diff --git a/go.mod b/go.mod
index 023055fa..51a70dc7 100644
--- a/go.mod
+++ b/go.mod
@@ -6,7 +6,7 @@ require (
github.com/cli/go-gh/v2 v2.7.0
github.com/go-errors/errors v1.5.1
github.com/onsi/gomega v1.32.0
- github.com/sigstore/cosign/v2 v2.2.3
+ github.com/sigstore/cosign/v2 v2.2.4
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
github.com/tektoncd/cli v0.36.0
@@ -24,22 +24,21 @@ require (
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
github.com/google/go-github/v55 v55.0.0 // indirect
- github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
)
require (
- cloud.google.com/go/compute v1.23.4 // indirect
+ cloud.google.com/go/compute v1.25.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect
contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect
- cuelabs.dev/go/oci/ociregistry v0.0.0-20231103182354-93e78c079a13 // indirect
- cuelang.org/go v0.7.0 // indirect
+ cuelabs.dev/go/oci/ociregistry v0.0.0-20240314152124-224736b49f2e // indirect
+ cuelang.org/go v0.8.1 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
@@ -69,19 +68,19 @@ require (
github.com/aliyun/credentials-go v1.3.1 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
- github.com/aws/aws-sdk-go-v2 v1.25.2 // indirect
- github.com/aws/aws-sdk-go-v2/config v1.27.4 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.17.4 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.26.0 // indirect
+ github.com/aws/aws-sdk-go-v2/config v1.27.9 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.9 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 // indirect
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.20.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.28.5 // indirect
github.com/aws/smithy-go v1.20.1 // indirect
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 // indirect
github.com/aymanbagabas/go-osc52 v1.0.3 // indirect
@@ -101,7 +100,7 @@ require (
github.com/cockroachdb/apd/v3 v3.2.1 // indirect
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
- github.com/coreos/go-oidc/v3 v3.9.0 // indirect
+ github.com/coreos/go-oidc/v3 v3.10.0 // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect
@@ -111,6 +110,7 @@ require (
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker v25.0.5+incompatible // indirect
github.com/docker/docker-credential-helpers v0.8.0 // indirect
+ github.com/dustin/go-humanize v1.0.1 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/emicklei/proto v1.12.1 // indirect
github.com/evanphx/json-patch/v5 v5.7.0 // indirect
@@ -119,20 +119,21 @@ require (
github.com/go-chi/chi v4.1.2+incompatible // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
+ github.com/go-jose/go-jose/v4 v4.0.1 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-openapi/analysis v0.22.0 // indirect
- github.com/go-openapi/errors v0.21.0 // indirect
- github.com/go-openapi/jsonpointer v0.20.2 // indirect
- github.com/go-openapi/jsonreference v0.20.4 // indirect
- github.com/go-openapi/loads v0.21.5 // indirect
- github.com/go-openapi/runtime v0.27.1 // indirect
- github.com/go-openapi/spec v0.20.13 // indirect
- github.com/go-openapi/strfmt v0.22.0 // indirect
- github.com/go-openapi/swag v0.22.9 // indirect
- github.com/go-openapi/validate v0.22.4 // indirect
+ github.com/go-openapi/analysis v0.23.0 // indirect
+ github.com/go-openapi/errors v0.22.0 // indirect
+ github.com/go-openapi/jsonpointer v0.21.0 // indirect
+ github.com/go-openapi/jsonreference v0.21.0 // indirect
+ github.com/go-openapi/loads v0.22.0 // indirect
+ github.com/go-openapi/runtime v0.28.0 // indirect
+ github.com/go-openapi/spec v0.21.0 // indirect
+ github.com/go-openapi/strfmt v0.23.0 // indirect
+ github.com/go-openapi/swag v0.23.0 // indirect
+ github.com/go-openapi/validate v0.24.0 // indirect
github.com/go-piv/piv-go v1.11.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
@@ -141,7 +142,7 @@ require (
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/cel-go v0.20.0 // indirect
- github.com/google/certificate-transparency-go v1.1.7 // indirect
+ github.com/google/certificate-transparency-go v1.1.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-containerregistry v0.19.1 // indirect
github.com/google/go-querystring v1.1.0 // indirect
@@ -150,7 +151,7 @@ require (
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/gorilla/mux v1.8.1 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
@@ -166,7 +167,7 @@ require (
github.com/jonboulle/clockwork v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.3 // indirect
+ github.com/klauspost/compress v1.17.4 // indirect
github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
@@ -181,41 +182,41 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mozillazg/docker-credential-acr-helper v0.3.0 // indirect
- github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de // indirect
github.com/muesli/termenv v0.13.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/oleiade/reflections v1.0.1 // indirect
- github.com/open-policy-agent/opa v0.61.0 // indirect
+ github.com/open-policy-agent/opa v0.63.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pborman/uuid v1.2.1 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
- github.com/prometheus/client_golang v1.18.0 // indirect
- github.com/prometheus/client_model v0.5.0 // indirect
- github.com/prometheus/common v0.45.0 // indirect
+ github.com/prometheus/client_golang v1.19.0 // indirect
+ github.com/prometheus/client_model v0.6.0 // indirect
+ github.com/prometheus/common v0.51.1 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/statsd_exporter v0.22.7 // indirect
github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
+ github.com/rogpeppe/go-internal v1.12.0 // indirect
github.com/sassoftware/relic v7.2.1+incompatible // indirect
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/shibumi/go-pathspec v1.3.0 // indirect
- github.com/sigstore/fulcio v1.4.3 // indirect
- github.com/sigstore/rekor v1.3.4 // indirect
- github.com/sigstore/sigstore v1.8.2 // indirect
- github.com/sigstore/timestamp-authority v1.2.1 // indirect
+ github.com/sigstore/fulcio v1.4.5 // indirect
+ github.com/sigstore/rekor v1.3.6 // indirect
+ github.com/sigstore/sigstore v1.8.3 // indirect
+ github.com/sigstore/timestamp-authority v1.2.2 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/viper v1.18.2 // indirect
- github.com/spiffe/go-spiffe/v2 v2.1.7 // indirect
+ github.com/spiffe/go-spiffe/v2 v2.2.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
@@ -230,36 +231,35 @@ require (
github.com/tjfoc/gmsm v1.4.1 // indirect
github.com/transparency-dev/merkle v0.0.2 // indirect
github.com/vbatts/tar-split v0.11.5 // indirect
- github.com/xanzy/go-gitlab v0.96.0 // indirect
+ github.com/xanzy/go-gitlab v0.102.0 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/yashtewari/glob-intersection v0.2.0 // indirect
github.com/zeebo/errs v1.3.0 // indirect
- go.mongodb.org/mongo-driver v1.13.1 // indirect
+ go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel v1.24.0 // indirect
go.opentelemetry.io/otel/sdk v1.24.0 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
- go.step.sm/crypto v0.42.1 // indirect
+ go.step.sm/crypto v0.44.2 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
- golang.org/x/crypto v0.20.0 // indirect
+ golang.org/x/crypto v0.22.0 // indirect
golang.org/x/exp v0.0.0-20231108232855-2478ac86f678 // indirect
- golang.org/x/mod v0.14.0 // indirect
- golang.org/x/net v0.21.0 // indirect
- golang.org/x/oauth2 v0.17.0 // indirect
- golang.org/x/sync v0.6.0 // indirect
- golang.org/x/sys v0.18.0 // indirect
- golang.org/x/term v0.18.0 // indirect
+ golang.org/x/mod v0.16.0 // indirect
+ golang.org/x/net v0.22.0 // indirect
+ golang.org/x/oauth2 v0.19.0 // indirect
+ golang.org/x/sync v0.7.0 // indirect
+ golang.org/x/sys v0.19.0 // indirect
+ golang.org/x/term v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect
- golang.org/x/tools v0.17.0 // indirect
+ golang.org/x/tools v0.19.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
- google.golang.org/api v0.167.0 // indirect
- google.golang.org/appengine v1.6.8 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect
+ google.golang.org/api v0.172.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
google.golang.org/grpc v1.62.1 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
@@ -267,7 +267,7 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/api v0.29.3 // indirect
k8s.io/apiextensions-apiserver v0.27.6 // indirect
- k8s.io/klog/v2 v2.110.1 // indirect
+ k8s.io/klog/v2 v2.120.1 // indirect
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
knative.dev/pkg v0.0.0-20231103161548-f5b42e8dea44 // indirect
diff --git a/go.sum b/go.sum
index 99a1ca37..7e1fac83 100644
--- a/go.sum
+++ b/go.sum
@@ -13,23 +13,23 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
+cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw=
-cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI=
+cloud.google.com/go/compute v1.25.0 h1:H1/4SqSUhjPFE7L5ddzHOfY2bCAvjwNRZPNl6Ni5oYU=
+cloud.google.com/go/compute v1.25.0/go.mod h1:GR7F0ZPZH8EhChlMo9FkLd7eUTwEymjqQagxzilIxIE=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc=
cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI=
-cloud.google.com/go/kms v1.15.7 h1:7caV9K3yIxvlQPAcaFffhlT7d1qpxjB1wHBtjWa13SM=
-cloud.google.com/go/kms v1.15.7/go.mod h1:ub54lbsa6tDkUwnu4W7Yt1aAIFLnspgh0kPGToDukeI=
+cloud.google.com/go/kms v1.15.8 h1:szIeDCowID8th2i8XE4uRev5PMxQFqW+JjwYxL9h6xs=
+cloud.google.com/go/kms v1.15.8/go.mod h1:WoUHcDjD9pluCg7pNds131awnH429QGvRM3N/4MyoVs=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -43,21 +43,21 @@ contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY=
contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg=
contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ=
-cuelabs.dev/go/oci/ociregistry v0.0.0-20231103182354-93e78c079a13 h1:zkiIe8AxZ/kDjqQN+mDKc5BxoVJOqioSdqApjc+eB1I=
-cuelabs.dev/go/oci/ociregistry v0.0.0-20231103182354-93e78c079a13/go.mod h1:XGKYSMtsJWfqQYPwq51ZygxAPqpEUj/9bdg16iDPTAA=
-cuelang.org/go v0.7.0 h1:gMztinxuKfJwMIxtboFsNc6s8AxwJGgsJV+3CuLffHI=
-cuelang.org/go v0.7.0/go.mod h1:ix+3dM/bSpdG9xg6qpCgnJnpeLtciZu+O/rDbywoMII=
+cuelabs.dev/go/oci/ociregistry v0.0.0-20240314152124-224736b49f2e h1:GwCVItFUPxwdsEYnlUcJ6PJxOjTeFFCKOh6QWg4oAzQ=
+cuelabs.dev/go/oci/ociregistry v0.0.0-20240314152124-224736b49f2e/go.mod h1:ApHceQLLwcOkCEXM1+DyCXTHEJhNGDpJ2kmV6axsx24=
+cuelang.org/go v0.8.1 h1:VFYsxIFSPY5KgSaH1jQ2GxHOrbu6Ga3kEI70yCZwnOg=
+cuelang.org/go v0.8.1/go.mod h1:CoDbYolfMms4BhWUlhD+t5ORnihR7wvjcfgyO9lL5FI=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
-github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230618160516-e936619f9f18 h1:rd389Q26LMy03gG4anandGFC2LW/xvjga5GezeeaxQk=
-github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230618160516-e936619f9f18/go.mod h1:fgJuSBrJP5qZtKqaMJE0hmhS2tmRH+44IkfZvjtaf1M=
+github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg=
+github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM=
github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 h1:8+4G8JaejP8Xa6W46PzJEwisNgBXMvFcz78N6zG/ARw=
github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
@@ -89,8 +89,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
@@ -160,23 +160,23 @@ github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/aws/aws-sdk-go v1.50.0 h1:HBtrLeO+QyDKnc3t1+5DR1RxodOHCGr8ZcrHudpv7jI=
-github.com/aws/aws-sdk-go v1.50.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go v1.51.6 h1:Ld36dn9r7P9IjU8WZSaswQ8Y/XUCRpewim5980DwYiU=
+github.com/aws/aws-sdk-go v1.51.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM=
-github.com/aws/aws-sdk-go-v2 v1.25.2 h1:/uiG1avJRgLGiQM9X3qJM8+Qa6KRGK5rRPuXE0HUM+w=
-github.com/aws/aws-sdk-go-v2 v1.25.2/go.mod h1:Evoc5AsmtveRt1komDwIsjHFyrP5tDuF1D1U+6z6pNo=
-github.com/aws/aws-sdk-go-v2/config v1.27.4 h1:AhfWb5ZwimdsYTgP7Od8E9L1u4sKmDW2ZVeLcf2O42M=
-github.com/aws/aws-sdk-go-v2/config v1.27.4/go.mod h1:zq2FFXK3A416kiukwpsd+rD4ny6JC7QSkp4QdN1Mp2g=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.4 h1:h5Vztbd8qLppiPwX+y0Q6WiwMZgpd9keKe2EAENgAuI=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.4/go.mod h1:+30tpwrkOgvkJL1rUZuRLoxcJwtI/OkeBLYnHxJtVe0=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 h1:AK0J8iYBFeUk2Ax7O8YpLtFsfhdOByh2QIkHmigpRYk=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2/go.mod h1:iRlGzMix0SExQEviAyptRWRGdYNo3+ufW/lCzvKVTUc=
+github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA=
+github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I=
+github.com/aws/aws-sdk-go-v2/config v1.27.9 h1:gRx/NwpNEFSk+yQlgmk1bmxxvQ5TyJ76CWXs9XScTqg=
+github.com/aws/aws-sdk-go-v2/config v1.27.9/go.mod h1:dK1FQfpwpql83kbD873E9vz4FyAxuJtR22wzoXn3qq0=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.9 h1:N8s0/7yW+h8qR8WaRlPQeJ6czVMNQVNtNdUqf6cItao=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.9/go.mod h1:446YhIdmSV0Jf/SLafGZalQo+xr2iw7/fzXGDPTU1yQ=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0 h1:af5YzcLf80tv4Em4jWVD75lpnOHSBkPUZxZfGkrI3HI=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0/go.mod h1:nQ3how7DMnFMWiU1SpECohgC82fpn4cKZ875NDMmwtA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 h1:bNo4LagzUKbjdxE0tIcR9pMzLR2U/Tgie1Hq1HQ3iH8=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2/go.mod h1:wRQv0nN6v9wDXuWThpovGQjqF1HFdcgWjporw14lS8k=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 h1:0ScVK/4qZ8CIW0k8jOeFVsyS/sAiXpYxRBLolMkuLQM=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4/go.mod h1:84KyjNZdHC6QZW08nfHI6yZgPd+qRgaWcYsyLUo3QY8=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 h1:EtOU5jsPdIQNP+6Q2C5e3d65NKT1PeCiQk+9OdzO12Q=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2/go.mod h1:tyF5sKccmDz0Bv4NrstEr+/9YkSPJHrcO7UsUKf7pWM=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 h1:sHmMWWX5E7guWEFQ9SVo6A3S4xpPrWnd77a6y4WM6PU=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4/go.mod h1:WjpDrhWisWOIoS9n3nk67A3Ll1vfULJ9Kq6h29HTD48=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 h1:y6LX9GUoEA3mO0qpFl1ZQHj1rFyPWVphlzebiSt2tKE=
@@ -185,16 +185,16 @@ github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 h1:PpbXaecV3sLAS6rjQiaKw4
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2/go.mod h1:fUHpGXr4DrXkEDpGAjClPsviWf+Bszeb0daKE0blxv8=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 h1:5ffmXjPtwRExp1zc7gENLgCPyHFbhEPwVTkTiH9niSk=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2/go.mod h1:Ru7vg1iQ7cR4i7SZ/JTLYN9kaXtbL69UdgG0OQWQxW0=
-github.com/aws/aws-sdk-go-v2/service/kms v1.29.1 h1:OdjJjUWFlMZLAMl54ASxIpZdGEesY4BH3/c0HAPSFdI=
-github.com/aws/aws-sdk-go-v2/service/kms v1.29.1/go.mod h1:Cbx2uxEX0bAB7SlSY+ys05ZBkEb8IbmuAOcGVmDfJFs=
-github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 h1:utEGkfdQ4L6YW/ietH7111ZYglLJvS+sLriHJ1NBJEQ=
-github.com/aws/aws-sdk-go-v2/service/sso v1.20.1/go.mod h1:RsYqzYr2F2oPDdpy+PdhephuZxTfjHQe7SOBcZGoAU8=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 h1:9/GylMS45hGGFCcMrUZDVayQE1jYSIN6da9jo7RAYIw=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1/go.mod h1:YjAPFn4kGFqKC54VsHs5fn5B6d+PCY2tziEa3U/GB5Y=
-github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 h1:3I2cBEYgKhrWlwyZgfpSO2BpaMY1LHPqXYk/QGlu2ew=
-github.com/aws/aws-sdk-go-v2/service/sts v1.28.1/go.mod h1:uQ7YYKZt3adCRrdCBREm1CD3efFLOUNH77MrUCvx5oA=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 h1:b+E7zIUHMmcB4Dckjpkapoy47W6C9QBv/zoUP+Hn8Kc=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6/go.mod h1:S2fNV0rxrP78NhPbCZeQgY8H9jdDMeGtwcfZIRxzBqU=
+github.com/aws/aws-sdk-go-v2/service/kms v1.30.0 h1:yS0JkEdV6h9JOo8sy2JSpjX+i7vsKifU8SIeHrqiDhU=
+github.com/aws/aws-sdk-go-v2/service/kms v1.30.0/go.mod h1:+I8VUUSVD4p5ISQtzpgSva4I8cJ4SQ4b1dcBcof7O+g=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.3 h1:mnbuWHOcM70/OFUlZZ5rcdfA8PflGXXiefU/O+1S3+8=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.3/go.mod h1:5HFu51Elk+4oRBZVxmHrSds5jFXmFj8C3w7DVF2gnrs=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3 h1:uLq0BKatTmDzWa/Nu4WO0M1AaQDaPpwTKAeByEc6WFM=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3/go.mod h1:b+qdhjnxj8GSR6t5YfphOffeoQSQ1KmpoVVuBn+PWxs=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.5 h1:J/PpTf/hllOjx8Xu9DMflff3FajfLxqM5+tepvVXmxg=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.5/go.mod h1:0ih0Z83YDH/QeQ6Ori2yGE2XvWYv/Xm+cZc01LC6oK0=
github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw=
github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
@@ -261,8 +261,8 @@ github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w=
github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
-github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo=
-github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4=
+github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU=
+github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc=
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
@@ -315,8 +315,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
-github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI=
-github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4=
+github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI=
+github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -336,6 +336,8 @@ github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
+github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U=
+github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
@@ -349,31 +351,30 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-openapi/analysis v0.22.0 h1:wQ/d07nf78HNj4u+KiSY0sT234IAyePPbMgpUjUJQR0=
-github.com/go-openapi/analysis v0.22.0/go.mod h1:acDnkkCI2QxIo8sSIPgmp1wUlRohV7vfGtAIVae73b0=
-github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY=
-github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho=
-github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q=
-github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs=
-github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=
-github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
-github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0=
-github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8=
-github.com/go-openapi/runtime v0.27.1 h1:ae53yaOoh+fx/X5Eaq8cRmavHgDma65XPZuvBqvJYto=
-github.com/go-openapi/runtime v0.27.1/go.mod h1:fijeJEiEclyS8BRurYE1DE5TLb9/KZl6eAdbzjsrlLU=
-github.com/go-openapi/spec v0.20.13 h1:XJDIN+dLH6vqXgafnl5SUIMnzaChQ6QTo0/UPMbkIaE=
-github.com/go-openapi/spec v0.20.13/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw=
-github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI=
-github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4=
-github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
-github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
-github.com/go-openapi/validate v0.22.4 h1:5v3jmMyIPKTR8Lv9syBAIRxG6lY0RqeBPB1LKEijzk8=
-github.com/go-openapi/validate v0.22.4/go.mod h1:qm6O8ZIcPVdSY5219468Jv7kBdGvkiZLPOmqnqTUZ2A=
+github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
+github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
+github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w=
+github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco=
+github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs=
+github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ=
+github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc=
+github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
+github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
+github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
+github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
+github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
+github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
github.com/go-piv/piv-go v1.11.0 h1:5vAaCdRTFSIW4PeqMbnsDlUZ7odMYWnHBDGdmtU/Zhg=
github.com/go-piv/piv-go v1.11.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM=
github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
@@ -397,8 +398,8 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
-github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
-github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
+github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
@@ -432,15 +433,14 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/cel-go v0.20.0 h1:h4n6DOCppEMpWERzllyNkntl7JrDyxoE543KWS6BLpc=
github.com/google/cel-go v0.20.0/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
-github.com/google/certificate-transparency-go v1.1.7 h1:IASD+NtgSTJLPdzkthwvAG1ZVbF2WtFg4IvoA68XGSw=
-github.com/google/certificate-transparency-go v1.1.7/go.mod h1:FSSBo8fyMVgqptbfF6j5p/XNdgQftAhSmXcIxV9iphE=
+github.com/google/certificate-transparency-go v1.1.8 h1:LGYKkgZF7satzgTak9R4yzfJXEeYVAjV6/EAEJOf1to=
+github.com/google/certificate-transparency-go v1.1.8/go.mod h1:bV/o8r0TBKRf1X//iiiSgWrvII4d7/8OiA+3vG26gI8=
github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM=
github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
@@ -486,8 +486,8 @@ github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w=
github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM=
-github.com/google/trillian v1.5.3 h1:3ioA5p09qz+U9/t2riklZtaQdZclaStp0/eQNfewNRg=
-github.com/google/trillian v1.5.3/go.mod h1:p4tcg7eBr7aT6DxrAoILpc3uXNfcuAvZSnQKonVg+Eo=
+github.com/google/trillian v1.6.0 h1:jMBeDBIkINFvS2n6oV5maDqfRlxREAc6CW9QYWQ0qT4=
+github.com/google/trillian v1.6.0/go.mod h1:Yu3nIMITzNhhMJEHjAtp6xKiu+H/iHu2Oq5FjV2mCWI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@@ -496,15 +496,15 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM=
-github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc=
+github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA=
+github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -533,8 +533,8 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM=
github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM=
-github.com/hashicorp/vault/api v1.12.0 h1:meCpJSesvzQyao8FCOgk2fGdoADAnbDu2WPJN1lDLJ4=
-github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck=
+github.com/hashicorp/vault/api v1.12.2 h1:7YkCTE5Ni90TcmYHDBExdt4WGJxhpzaHqR6uGbQb/rE=
+github.com/hashicorp/vault/api v1.12.2/go.mod h1:LSGf1NGT1BnvFFnKVtnvcaLBM2Lz+gJdpL6HUYed8KE=
github.com/henvic/httpretty v0.0.6 h1:JdzGzKZBajBfnvlMALXXMVQWxWMF/ofTy8C3/OSUTxs=
github.com/henvic/httpretty v0.0.6/go.mod h1:X38wLjWXHkXT7r2+uK8LjCMne9rsuNaBLJ+5cU2/Pmo=
github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM=
@@ -577,9 +577,8 @@ github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dv
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
-github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
+github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@@ -611,10 +610,8 @@ github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
-github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
-github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
+github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM=
+github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
@@ -631,11 +628,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/mozillazg/docker-credential-acr-helper v0.3.0 h1:DVWFZ3/O8BP6Ue3iS/Olw+G07u1hCq1EOVCDZZjCIBI=
github.com/mozillazg/docker-credential-acr-helper v0.3.0/go.mod h1:cZlu3tof523ujmLuiNUb6JsjtHcNA70u1jitrrdnuyA=
-github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de h1:D5x39vF5KCwKQaw+OC9ZPiLVHXz3UFw2+psEX+gYcto=
-github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de/go.mod h1:kJun4WP5gFuHZgRjZUWWuH1DTxCtxbHDOIJsudS8jzY=
github.com/muesli/termenv v0.13.0 h1:wK20DRpJdDX8b7Ek2QfhvqhRQFZ237RGRO0RQ/Iqdy0=
github.com/muesli/termenv v0.13.0/go.mod h1:sP1+uffeLaEYpyOTb8pLCUctGcGLnoFjSn4YJK5e2bc=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
@@ -668,8 +662,8 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk=
github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg=
-github.com/open-policy-agent/opa v0.61.0 h1:nhncQ2CAYtQTV/SMBhDDPsCpCQsUW+zO/1j+T5V7oZg=
-github.com/open-policy-agent/opa v0.61.0/go.mod h1:7OUuzJnsS9yHf8lw0ApfcbrnaRG1EkN3J2fuuqi4G/E=
+github.com/open-policy-agent/opa v0.63.0 h1:ztNNste1v8kH0/vJMJNquE45lRvqwrM5mY9Ctr9xIXw=
+github.com/open-policy-agent/opa v0.63.0/go.mod h1:9VQPqEfoB2N//AToTxzZ1pVTVPUoF2Mhd64szzjWPpU=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
@@ -698,22 +692,22 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
-github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
-github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
+github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
+github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
-github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
+github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
-github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
-github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
+github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw=
+github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
@@ -733,8 +727,8 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.11.1-0.20231026093722-fa6a31e0812c h1:fPpdjePK1atuOg28PXfNSqgwf9I/qD1Hlo39JFwKBXk=
-github.com/rogpeppe/go-internal v1.11.1-0.20231026093722-fa6a31e0812c/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
@@ -744,32 +738,32 @@ github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6g
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A=
github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk=
-github.com/sassoftware/relic/v7 v7.6.1 h1:O5s8ewCgq5QYNpv45dK4u6IpBmDM9RIcsbf/G1uXepQ=
-github.com/sassoftware/relic/v7 v7.6.1/go.mod h1:NxwtWxWxlUa9as2qZi635Ye6bBT/tGnMALLq7dSfOOU=
+github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4=
+github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k=
github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI=
github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE=
-github.com/sigstore/cosign/v2 v2.2.3 h1:WX7yawI+EXu9h7S5bZsfYCbB9XW6Jc43ctKy/NoOSiA=
-github.com/sigstore/cosign/v2 v2.2.3/go.mod h1:WpMn4MBt0cI23GdHsePwO4NxhX1FOz1ITGB3ALUjFaI=
-github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ=
-github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og=
-github.com/sigstore/rekor v1.3.4 h1:RGIia1iOZU7fOiiP2UY/WFYhhp50S5aUm7YrM8aiA6E=
-github.com/sigstore/rekor v1.3.4/go.mod h1:1GubPVO2yO+K0m0wt/3SHFqnilr/hWbsjSOe7Vzxrlg=
-github.com/sigstore/sigstore v1.8.2 h1:0Ttjcn3V0fVQXlYq7+oHaaHkGFIt3ywm7SF4JTU/l8c=
-github.com/sigstore/sigstore v1.8.2/go.mod h1:CHVcSyknCcjI4K2ZhS1SI28r0tcQyBlwtALG536x1DY=
-github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.2 h1:e0EtUcE7cqWBxxME7h6upA3EA0IR3EOE3F1t+WHOdTc=
-github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.2/go.mod h1:07qBxPjI9bsgdQRiBz27Ai+gl6hgr//vwXMZzTX87Us=
-github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.2 h1:Fgt4dC9OozkLEtMO6JYfFgqNdSDG1y1uAdiJgrtZYN4=
-github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.2/go.mod h1:BT+jh/GK55djPRHqTYu937eq29Zzusf1t0qVbrcn4Aw=
-github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.2 h1:aX6hLH5v3JdOQJJ6+uCMmeDjcwyfQMLmXKJVl6HtzAg=
-github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.2/go.mod h1:OEFPub6XKsX6Fl/PpeIpQTsukG3I0CFWb9saHINV72U=
-github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.2 h1:hRC8sGPQtnTcoOqWbCNAvLpW1pHL4CQl7FT55IrEof8=
-github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.2/go.mod h1:frWJBbYRRHnbLE9h1fH349Mde84NZh6hDrnKqhPgMNU=
-github.com/sigstore/timestamp-authority v1.2.1 h1:j9RmqSAdvKgSofeltPO4x7d+1M3AXaROBzUJ+AA7L5Q=
-github.com/sigstore/timestamp-authority v1.2.1/go.mod h1:Ce+vWWEf0QaKLY2u6mpwEJbmYXEVeOfUk4fQ69kE6ck=
+github.com/sigstore/cosign/v2 v2.2.4 h1:iY4vtEacmu2hkNj1Fh+8EBqBwKs2DHM27/lbNWDFJro=
+github.com/sigstore/cosign/v2 v2.2.4/go.mod h1:JZlRD2uaEjVAvZ1XJ3QkkZJhTqSDVtLaet+C/TMR81Y=
+github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc=
+github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8=
+github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8=
+github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc=
+github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWkA4=
+github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs=
+github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3 h1:LTfPadUAo+PDRUbbdqbeSl2OuoFQwUFTnJ4stu+nwWw=
+github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3/go.mod h1:QV/Lxlxm0POyhfyBtIbTWxNeF18clMlkkyL9mu45y18=
+github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3 h1:xgbPRCr2npmmsuVVteJqi/ERw9+I13Wou7kq0Yk4D8g=
+github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3/go.mod h1:G4+I83FILPX6MtnoaUdmv/bRGEVtR3JdLeJa/kXdk/0=
+github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3 h1:vDl2fqPT0h3D/k6NZPlqnKFd1tz3335wm39qjvpZNJc=
+github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3/go.mod h1:9uOJXbXEXj+M6QjMKH5PaL5WDMu43rHfbIMgXzA8eKI=
+github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3 h1:h9G8j+Ds21zqqulDbA/R/ft64oQQIyp8S7wJYABYSlg=
+github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3/go.mod h1:zgCeHOuqF6k7A7TTEvftcA9V3FRzB7mrPtHOhXAQBnc=
+github.com/sigstore/timestamp-authority v1.2.2 h1:X4qyutnCQqJ0apMewFyx+3t7Tws00JQ/JonBiu3QvLE=
+github.com/sigstore/timestamp-authority v1.2.2/go.mod h1:nEah4Eq4wpliDjlY342rXclGSO7Kb9hoRrl9tqLW13A=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@@ -794,8 +788,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
-github.com/spiffe/go-spiffe/v2 v2.1.7 h1:VUkM1yIyg/x8X7u1uXqSRVRCdMdfRIEdFBzpqoeASGk=
-github.com/spiffe/go-spiffe/v2 v2.1.7/go.mod h1:QJDGdhXllxjxvd5B+2XnhhXB/+rC8gr+lNrtOryiWeE=
+github.com/spiffe/go-spiffe/v2 v2.2.0 h1:9Vf06UsvsDbLYK/zJ4sYsIsHmMFknUD+feA7IYoWMQY=
+github.com/spiffe/go-spiffe/v2 v2.2.0/go.mod h1:Urzb779b3+IwDJD2ZbN8fVl3Aa8G4N/PiUe6iXC0XxU=
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -854,18 +848,14 @@ github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG
github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A=
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
-github.com/xanzy/go-gitlab v0.96.0 h1:LGkZ+wSNMRtHIBaYE4Hq3dZVjprwHv3Y1+rhKU3WETs=
-github.com/xanzy/go-gitlab v0.96.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI=
-github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
-github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
-github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
+github.com/xanzy/go-gitlab v0.102.0 h1:ExHuJ1OTQ2yt25zBMMj0G96ChBirGYv8U7HyUiYkZ+4=
+github.com/xanzy/go-gitlab v0.102.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg=
github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
-github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ=
github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns=
github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ=
@@ -882,12 +872,12 @@ github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-github.com/zalando/go-keyring v0.2.2 h1:f0xmpYiSrHtSNAVgwip93Cg8tuF45HJM6rHq/A5RI/4=
-github.com/zalando/go-keyring v0.2.2/go.mod h1:sI3evg9Wvpw3+n4SqplGSJUMwtDeROfD4nsFz4z9PG0=
+github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms=
+github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk=
github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs=
github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
-go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk=
-go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo=
+go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
+go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -896,10 +886,10 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8=
@@ -914,8 +904,8 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
-go.step.sm/crypto v0.42.1 h1:OmwHm3GJO8S4VGWL3k4+I+Q4P/F2s+j8msvTyGnh1Vg=
-go.step.sm/crypto v0.42.1/go.mod h1:yNcTLFQBnYCA75fC5bklBoTAT7y0dRZsB1TkinB8JMs=
+go.step.sm/crypto v0.44.2 h1:t3p3uQ7raP2jp2ha9P6xkQF85TJZh+87xmjSLaib+jk=
+go.step.sm/crypto v0.44.2/go.mod h1:x1439EnFhadzhkuaGX7sz03LEMQ+jV4gRamf5LCZJQQ=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
@@ -938,15 +928,14 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
-golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg=
-golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
+golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
+golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -981,8 +970,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
-golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
+golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1031,8 +1020,8 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
-golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
-golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
+golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1040,8 +1029,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
-golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
+golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
+golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1055,8 +1044,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
-golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1116,8 +1105,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
-golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
+golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
@@ -1126,8 +1115,8 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
-golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
+golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1135,7 +1124,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
@@ -1195,8 +1183,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
-golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
+golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
+golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1221,16 +1209,14 @@ google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.167.0 h1:CKHrQD1BLRii6xdkatBDXyKzM0mkawt2QP+H3LtPmSE=
-google.golang.org/api v0.167.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA=
+google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk=
+google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -1262,12 +1248,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU=
-google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M=
-google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A=
-google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk=
+google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s=
+google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7/go.mod h1:/3XmxOjePkvmKrHuBy4zNFw7IzxJXtAgdpXi8Ll990U=
+google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 h1:oqta3O3AnlWbmIE3bFnWbu4bRxZjfbWCp0cKSuZh01E=
+google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1348,8 +1334,8 @@ k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
-k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
-k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
+k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
+k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
@@ -1373,5 +1359,5 @@ sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+s
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
-software.sslmate.com/src/go-pkcs12 v0.2.0 h1:nlFkj7bTysH6VkC4fGphtjXRbezREPgrHuJG20hBGPE=
-software.sslmate.com/src/go-pkcs12 v0.2.0/go.mod h1:23rNcYsMabIc1otwLpTkCCPwUq6kQsTyowttG/as0kQ=
+software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=
+software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go
index 27a1970b..f494e95b 100644
--- a/vendor/cloud.google.com/go/compute/internal/version.go
+++ b/vendor/cloud.google.com/go/compute/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.23.4"
+const Version = "1.25.0"
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/README.md b/vendor/cuelabs.dev/go/oci/ociregistry/README.md
index 2ef7faf1..d6d7c183 100644
--- a/vendor/cuelabs.dev/go/oci/ociregistry/README.md
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/README.md
@@ -10,9 +10,9 @@ and an HTTP server that implements the [OCI registry protocol](https://github.co
The server currently passes the [conformance tests](https://pkg.go.dev/github.com/opencontainers/distribution-spec/conformance).
-That said, it is in total flux at the moment! Do not use it as a dependency, as the API is changing hourly.
-
-The aim, however, is to provide an ergonomic interface for defining and layering
+The aim is to provide an ergonomic interface for defining and layering
OCI registry implementations.
+Although the API is fairly stable, it's still in v0 currently, so incompatible changes can't be ruled out.
+
The code was originally derived from the [go-containerregistry](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/registry) registry, but has considerably diverged since then.
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/func.go b/vendor/cuelabs.dev/go/oci/ociregistry/func.go
index e916ac82..cb1fafb5 100644
--- a/vendor/cuelabs.dev/go/oci/ociregistry/func.go
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/func.go
@@ -20,6 +20,8 @@ import (
"io"
)
+var _ Interface = (*Funcs)(nil)
+
// Funcs implements Interface by calling its member functions: there's one field
// for every corresponding method of [Interface].
//
@@ -52,9 +54,9 @@ type Funcs struct {
DeleteBlob_ func(ctx context.Context, repo string, digest Digest) error
DeleteManifest_ func(ctx context.Context, repo string, digest Digest) error
DeleteTag_ func(ctx context.Context, repo string, name string) error
- Repositories_ func(ctx context.Context) Iter[string]
- Tags_ func(ctx context.Context, repo string) Iter[string]
- Referrers_ func(ctx context.Context, repo string, digest Digest, artifactType string) Iter[Descriptor]
+ Repositories_ func(ctx context.Context, startAfter string) Seq[string]
+ Tags_ func(ctx context.Context, repo string, startAfter string) Seq[string]
+ Referrers_ func(ctx context.Context, repo string, digest Digest, artifactType string) Seq[Descriptor]
}
// This blesses Funcs as the canonical Interface implementation.
@@ -172,21 +174,21 @@ func (f *Funcs) DeleteTag(ctx context.Context, repo string, name string) error {
return f.newError(ctx, "DeleteTag", repo)
}
-func (f *Funcs) Repositories(ctx context.Context) Iter[string] {
+func (f *Funcs) Repositories(ctx context.Context, startAfter string) Seq[string] {
if f != nil && f.Repositories_ != nil {
- return f.Repositories_(ctx)
+ return f.Repositories_(ctx, startAfter)
}
return ErrorIter[string](f.newError(ctx, "Repositories", ""))
}
-func (f *Funcs) Tags(ctx context.Context, repo string) Iter[string] {
+func (f *Funcs) Tags(ctx context.Context, repo string, startAfter string) Seq[string] {
if f != nil && f.Tags_ != nil {
- return f.Tags_(ctx, repo)
+ return f.Tags_(ctx, repo, startAfter)
}
return ErrorIter[string](f.newError(ctx, "Tags", repo))
}
-func (f *Funcs) Referrers(ctx context.Context, repo string, digest Digest, artifactType string) Iter[Descriptor] {
+func (f *Funcs) Referrers(ctx context.Context, repo string, digest Digest, artifactType string) Seq[Descriptor] {
if f != nil && f.Referrers_ != nil {
return f.Referrers_(ctx, repo, digest, artifactType)
}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/interface.go b/vendor/cuelabs.dev/go/oci/ociregistry/interface.go
index 1df3fd9e..f110e817 100644
--- a/vendor/cuelabs.dev/go/oci/ociregistry/interface.go
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/interface.go
@@ -210,20 +210,24 @@ type Deleter interface {
// Lister defines registry operations that enumerate objects within the registry.
// TODO support resumption from a given point.
type Lister interface {
- // Repositories returns an iterator that can be used to iterate over all the repositories
- // in the registry.
- Repositories(ctx context.Context) Iter[string]
-
- // Tags returns an iterator that can be used to iterate over all the tags
- // in the given repository.
- Tags(ctx context.Context, repo string) Iter[string]
+ // Repositories returns an iterator that can be used to iterate
+ // over all the repositories in the registry in lexical order.
+ // If startAfter is non-empty, the iteration starts lexically
+ // after, but not including, that repository.
+ Repositories(ctx context.Context, startAfter string) Seq[string]
+
+ // Tags returns an iterator that can be used to iterate over all
+ // the tags in the given repository in lexical order. If
+ // startAfter is non-empty, the tags start lexically after, but
+ // not including that tag.
+ Tags(ctx context.Context, repo string, startAfter string) Seq[string]
// Referrers returns an iterator that can be used to iterate over all
// the manifests that have the given digest as their Subject.
// If artifactType is non-zero, the results will be restricted to
// only manifests with that type.
// TODO is it possible to ask for multiple artifact types?
- Referrers(ctx context.Context, repo string, digest Digest, artifactType string) Iter[Descriptor]
+ Referrers(ctx context.Context, repo string, digest Digest, artifactType string) Seq[Descriptor]
}
// BlobWriter provides a handle for uploading a blob to a registry.
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/constraints/constraints.go b/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/constraints/constraints.go
new file mode 100644
index 00000000..2c033dff
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/constraints/constraints.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package constraints defines a set of useful constraints to be used
+// with type parameters.
+package constraints
+
+// Signed is a constraint that permits any signed integer type.
+// If future releases of Go add new predeclared signed integer types,
+// this constraint will be modified to include them.
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// Unsigned is a constraint that permits any unsigned integer type.
+// If future releases of Go add new predeclared unsigned integer types,
+// this constraint will be modified to include them.
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Integer is a constraint that permits any integer type.
+// If future releases of Go add new predeclared integer types,
+// this constraint will be modified to include them.
+type Integer interface {
+ Signed | Unsigned
+}
+
+// Float is a constraint that permits any floating-point type.
+// If future releases of Go add new predeclared floating-point types,
+// this constraint will be modified to include them.
+type Float interface {
+ ~float32 | ~float64
+}
+
+// Complex is a constraint that permits any complex numeric type.
+// If future releases of Go add new predeclared complex numeric types,
+// this constraint will be modified to include them.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// Ordered is a constraint that permits any ordered type: any type
+// that supports the operators < <= >= >.
+// If future releases of Go add new ordered types,
+// this constraint will be modified to include them.
+type Ordered interface {
+ Integer | Float | ~string
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/cmp.go b/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/cmp.go
new file mode 100644
index 00000000..56ee1910
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/cmp.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "cuelabs.dev/go/oci/ociregistry/internal/exp/constraints"
+
+// min is a version of the predeclared function from the Go 1.21 release.
+func min[T constraints.Ordered](a, b T) T {
+ if a < b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// max is a version of the predeclared function from the Go 1.21 release.
+func max[T constraints.Ordered](a, b T) T {
+ if a > b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// cmpLess is a copy of cmp.Less from the Go 1.21 release.
+func cmpLess[T constraints.Ordered](x, y T) bool {
+ return (isNaN(x) && !isNaN(y)) || x < y
+}
+
+// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
+func cmpCompare[T constraints.Ordered](x, y T) int {
+ xNaN := isNaN(x)
+ yNaN := isNaN(y)
+ if xNaN && yNaN {
+ return 0
+ }
+ if xNaN || x < y {
+ return -1
+ }
+ if yNaN || x > y {
+ return +1
+ }
+ return 0
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/slices.go b/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/slices.go
new file mode 100644
index 00000000..e4152708
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/slices.go
@@ -0,0 +1,499 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices defines various functions useful with slices of any type.
+package slices
+
+import (
+ "unsafe"
+
+ "cuelabs.dev/go/oci/ociregistry/internal/exp/constraints"
+)
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. If the lengths are different, Equal returns false.
+// Otherwise, the elements are compared in increasing index order, and the
+// comparison stops at the first unequal pair.
+// Floating point NaNs are not considered equal.
+func Equal[S ~[]E, E comparable](s1, s2 S) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFunc reports whether two slices are equal using an equality
+// function on each pair of elements. If the lengths are different,
+// EqualFunc returns false. Otherwise, the elements are compared in
+// increasing index order, and the comparison stops at the first index
+// for which eq returns false.
+func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
+// of elements. The elements are compared sequentially, starting at index 0,
+// until one element is not equal to the other.
+// The result of comparing the first non-matching elements is returned.
+// If both slices are equal until one of them ends, the shorter slice is
+// considered less than the longer one.
+// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
+func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
+ for i, v1 := range s1 {
+ if i >= len(s2) {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmpCompare(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < len(s2) {
+ return -1
+ }
+ return 0
+}
+
+// CompareFunc is like [Compare] but uses a custom comparison function on each
+// pair of elements.
+// The result is the first non-zero result of cmp; if cmp always
+// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
+// and +1 if len(s1) > len(s2).
+func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
+ for i, v1 := range s1 {
+ if i >= len(s2) {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmp(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < len(s2) {
+ return -1
+ }
+ return 0
+}
+
+// Index returns the index of the first occurrence of v in s,
+// or -1 if not present.
+func Index[S ~[]E, E comparable](s S, v E) int {
+ for i := range s {
+ if v == s[i] {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexFunc returns the first index i satisfying f(s[i]),
+// or -1 if none do.
+func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
+ for i := range s {
+ if f(s[i]) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Contains reports whether v is present in s.
+func Contains[S ~[]E, E comparable](s S, v E) bool {
+ return Index(s, v) >= 0
+}
+
+// ContainsFunc reports whether at least one
+// element e of s satisfies f(e).
+func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
+ return IndexFunc(s, f) >= 0
+}
+
+// Insert inserts the values v... into s at index i,
+// returning the modified slice.
+// The elements at s[i:] are shifted up to make room.
+// In the returned slice r, r[i] == v[0],
+// and r[i+len(v)] == value originally at r[i].
+// Insert panics if i is out of range.
+// This function is O(len(s) + len(v)).
+func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+ m := len(v)
+ if m == 0 {
+ return s
+ }
+ n := len(s)
+ if i == n {
+ return append(s, v...)
+ }
+ if n+m > cap(s) {
+ // Use append rather than make so that we bump the size of
+ // the slice up to the next storage class.
+ // This is what Grow does but we don't call Grow because
+ // that might copy the values twice.
+ s2 := append(s[:i], make(S, n+m-i)...)
+ copy(s2[i:], v)
+ copy(s2[i+m:], s[i:])
+ return s2
+ }
+ s = s[:n+m]
+
+ // before:
+ // s: aaaaaaaabbbbccccccccdddd
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // after:
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ //
+ // a are the values that don't move in s.
+ // v are the values copied in from v.
+ // b and c are the values from s that are shifted up in index.
+ // d are the values that get overwritten, never to be seen again.
+
+ if !overlaps(v, s[i+m:]) {
+ // Easy case - v does not overlap either the c or d regions.
+ // (It might be in some of a or b, or elsewhere entirely.)
+ // The data we copy up doesn't write to v at all, so just do it.
+
+ copy(s[i+m:], s[i:])
+
+ // Now we have
+ // s: aaaaaaaabbbbbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // Note the b values are duplicated.
+
+ copy(s[i:], v)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
+ }
+
+ // The hard case - v overlaps c or d. We can't just shift up
+ // the data because we'd move or clobber the values we're trying
+ // to insert.
+ // So instead, write v on top of d, then rotate.
+ copy(s[n:], v)
+
+ // Now we have
+ // s: aaaaaaaabbbbccccccccvvvv
+ // ^ ^ ^ ^
+ // i i+m n n+m
+
+ rotateRight(s[i:], m)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
+}
+
+// Delete removes the elements s[i:j] from s, returning the modified slice.
+// Delete panics if s[i:j] is not a valid slice of s.
+// Delete is O(len(s)-j), so if many items must be deleted, it is better to
+// make a single call deleting them all together than to delete one at a time.
+// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
+// elements contain pointers you might consider zeroing those elements so that
+// objects they reference can be garbage collected.
+func Delete[S ~[]E, E any](s S, i, j int) S {
+ _ = s[i:j] // bounds check
+
+ return append(s[:i], s[j:]...)
+}
+
+// DeleteFunc removes any elements from s for which del returns true,
+// returning the modified slice.
+// When DeleteFunc removes m elements, it might not modify the elements
+// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
+// zeroing those elements so that objects they reference can be garbage
+// collected.
+func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
+ i := IndexFunc(s, del)
+ if i == -1 {
+ return s
+ }
+ // Don't start copying elements until we find one to delete.
+ for j := i + 1; j < len(s); j++ {
+ if v := s[j]; !del(v) {
+ s[i] = v
+ i++
+ }
+ }
+ return s[:i]
+}
+
+// Replace replaces the elements s[i:j] by the given v, and returns the
+// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
+ _ = s[i:j] // verify that i:j is a valid subslice
+
+ if i == j {
+ return Insert(s, i, v...)
+ }
+ if j == len(s) {
+ return append(s[:i], v...)
+ }
+
+ tot := len(s[:i]) + len(v) + len(s[j:])
+ if tot > cap(s) {
+ // Too big to fit, allocate and copy over.
+ s2 := append(s[:i], make(S, tot-i)...) // See Insert
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[j:])
+ return s2
+ }
+
+ r := s[:tot]
+
+ if i+len(v) <= j {
+ // Easy, as v fits in the deleted portion.
+ copy(r[i:], v)
+ if i+len(v) != j {
+ copy(r[i+len(v):], s[j:])
+ }
+ return r
+ }
+
+ // We are expanding (v is bigger than j-i).
+ // The situation is something like this:
+ // (example has i=4,j=8,len(s)=16,len(v)=6)
+ // s: aaaaxxxxbbbbbbbbyy
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ // a: prefix of s
+ // x: deleted range
+ // b: more of s
+ // y: area to expand into
+
+ if !overlaps(r[i+len(v):], v) {
+ // Easy, as v is not clobbered by the first copy.
+ copy(r[i+len(v):], s[j:])
+ copy(r[i:], v)
+ return r
+ }
+
+ // This is a situation where we don't have a single place to which
+ // we can copy v. Parts of it need to go to two different places.
+ // We want to copy the prefix of v into y and the suffix into x, then
+ // rotate |y| spots to the right.
+ //
+ // v[2:] v[:2]
+ // | |
+ // s: aaaavvvvbbbbbbbbvv
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ //
+ // If either of those two destinations don't alias v, then we're good.
+ y := len(v) - (j - i) // length of y portion
+
+ if !overlaps(r[i:j], v) {
+ copy(r[i:j], v[y:])
+ copy(r[len(s):], v[:y])
+ rotateRight(r[i:], y)
+ return r
+ }
+ if !overlaps(r[len(s):], v) {
+ copy(r[len(s):], v[:y])
+ copy(r[i:j], v[y:])
+ rotateRight(r[i:], y)
+ return r
+ }
+
+ // Now we know that v overlaps both x and y.
+ // That means that the entirety of b is *inside* v.
+ // So we don't need to preserve b at all; instead we
+ // can copy v first, then copy the b part of v out of
+ // v to the right destination.
+ k := startIdx(v, s[j:])
+ copy(r[i:], v)
+ copy(r[i+len(v):], r[i+k:])
+ return r
+}
+
+// Clone returns a copy of the slice.
+// The elements are copied using assignment, so this is a shallow clone.
+func Clone[S ~[]E, E any](s S) S {
+ // Preserve nil in case it matters.
+ if s == nil {
+ return nil
+ }
+ return append(S([]E{}), s...)
+}
+
+// Compact replaces consecutive runs of equal elements with a single copy.
+// This is like the uniq command found on Unix.
+// Compact modifies the contents of the slice s and returns the modified slice,
+// which may have a smaller length.
+// When Compact discards m elements in total, it might not modify the elements
+// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
+// zeroing those elements so that objects they reference can be garbage collected.
+func Compact[S ~[]E, E comparable](s S) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if s[k] != s[k-1] {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ return s[:i]
+}
+
+// CompactFunc is like [Compact] but uses an equality function to compare elements.
+// For runs of elements that compare equal, CompactFunc keeps the first one.
+func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if !eq(s[k], s[k-1]) {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ return s[:i]
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation. If n is negative or too large to
+// allocate the memory, Grow panics.
+func Grow[S ~[]E, E any](s S, n int) S {
+ if n < 0 {
+ panic("cannot be negative")
+ }
+ if n -= cap(s) - len(s); n > 0 {
+ // TODO(https://go.dev/issue/53888): Make using []E instead of S
+ // to workaround a compiler bug where the runtime.growslice optimization
+ // does not take effect. Revert when the compiler is fixed.
+ s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
+ }
+ return s
+}
+
+// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+func Clip[S ~[]E, E any](s S) S {
+ return s[:len(s):len(s)]
+}
+
+// Rotation algorithm explanation:
+//
+// rotate left by 2
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join first parts
+// 89234567 01
+// recursively rotate first left part by 2
+// 23456789 01
+// join at the end
+// 2345678901
+//
+// rotate left by 8
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join last parts
+// 89 23456701
+// recursively rotate second part left by 6
+// 89 01234567
+// join at the end
+// 8901234567
+
+// TODO: There are other rotate algorithms.
+// This algorithm has the desirable property that it moves each element exactly twice.
+// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
+// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
+
+// rotateLeft rotates b left by n spaces.
+// s_final[i] = s_orig[i+r], wrapping around.
+func rotateLeft[E any](s []E, r int) {
+ for r != 0 && r != len(s) {
+ if r*2 <= len(s) {
+ swap(s[:r], s[len(s)-r:])
+ s = s[:len(s)-r]
+ } else {
+ swap(s[:len(s)-r], s[r:])
+ s, r = s[len(s)-r:], r*2-len(s)
+ }
+ }
+}
+func rotateRight[E any](s []E, r int) {
+ rotateLeft(s, len(s)-r)
+}
+
+// swap swaps the contents of x and y. x and y must be equal length and disjoint.
+func swap[E any](x, y []E) {
+ for i := 0; i < len(x); i++ {
+ x[i], y[i] = y[i], x[i]
+ }
+}
+
+// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
+func overlaps[E any](a, b []E) bool {
+ if len(a) == 0 || len(b) == 0 {
+ return false
+ }
+ elemSize := unsafe.Sizeof(a[0])
+ if elemSize == 0 {
+ return false
+ }
+ // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
+ // Also see crypto/internal/alias/alias.go:AnyOverlap
+ return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
+ uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
+}
+
+// startIdx returns the index in haystack where the needle starts.
+// prerequisite: the needle must be aliased entirely inside the haystack.
+func startIdx[E any](haystack, needle []E) int {
+ p := &needle[0]
+ for i := range haystack {
+ if p == &haystack[i] {
+ return i
+ }
+ }
+ // TODO: what if the overlap is by a non-integral number of Es?
+ panic("needle not found")
+}
+
+// Reverse reverses the elements of the slice in place.
+func Reverse[S ~[]E, E any](s S) {
+ for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
+ s[i], s[j] = s[j], s[i]
+ }
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/sort.go b/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/sort.go
new file mode 100644
index 00000000..cc8d4c5f
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/sort.go
@@ -0,0 +1,193 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import (
+ "math/bits"
+
+ "cuelabs.dev/go/oci/ociregistry/internal/exp/constraints"
+)
+
+// Sort sorts a slice of any ordered type in ascending order.
+// When sorting floating-point numbers, NaNs are ordered before other values.
+func Sort[S ~[]E, E constraints.Ordered](x S) {
+ n := len(x)
+ pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+}
+
+// SortFunc sorts the slice x in ascending order as determined by the cmp
+// function. This sort is not guaranteed to be stable.
+// cmp(a, b) should return a negative number when a < b, a positive number when
+// a > b and zero when a == b.
+//
+// SortFunc requires that cmp is a strict weak ordering.
+// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
+ n := len(x)
+ pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
+}
+
+// SortStableFunc sorts the slice x while keeping the original order of equal
+// elements, using cmp to compare elements in the same way as [SortFunc].
+func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
+ stableCmpFunc(x, len(x), cmp)
+}
+
+// IsSorted reports whether x is sorted in ascending order.
+func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if cmpLess(x[i], x[i-1]) {
+ return false
+ }
+ }
+ return true
+}
+
+// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
+// comparison function as defined by [SortFunc].
+func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if cmp(x[i], x[i-1]) < 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Min returns the minimal value in x. It panics if x is empty.
+// For floating-point numbers, Min propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Min[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Min: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = min(m, x[i])
+ }
+ return m
+}
+
+// MinFunc returns the minimal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one minimal element
+// according to the cmp function, MinFunc returns the first one.
+func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MinFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) < 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
+// Max returns the maximal value in x. It panics if x is empty.
+// For floating-point E, Max propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Max[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Max: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = max(m, x[i])
+ }
+ return m
+}
+
+// MaxFunc returns the maximal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one maximal element
+// according to the cmp function, MaxFunc returns the first one.
+func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MaxFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) > 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
+// BinarySearch searches for target in a sorted slice and returns the position
+// where target is found, or the position where target would appear in the
+// sort order; it also returns a bool saying whether the target is really found
+// in the slice. The slice must be sorted in increasing order.
+func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
+ // Inlining is faster than calling BinarySearchFunc with a lambda.
+ n := len(x)
+ // Define x[-1] < target and x[n] >= target.
+ // Invariant: x[i-1] < target, x[j] >= target.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmpLess(x[h], target) {
+ i = h + 1 // preserves x[i-1] < target
+ } else {
+ j = h // preserves x[j] >= target
+ }
+ }
+ // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
+ return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
+}
+
+// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing"
+// is defined by cmp. cmp should return 0 if the slice element matches
+// the target, a negative number if the slice element precedes the target,
+// or a positive number if the slice element follows the target.
+// cmp must implement the same ordering as the slice, such that if
+// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
+func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
+ n := len(x)
+ // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
+ // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmp(x[h], target) < 0 {
+ i = h + 1 // preserves cmp(x[i - 1], target) < 0
+ } else {
+ j = h // preserves cmp(x[j], target) >= 0
+ }
+ }
+ // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
+ return i, i < n && cmp(x[i], target) == 0
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+ unknownHint sortedHint = iota
+ increasingHint
+ decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+ *r ^= *r << 13
+ *r ^= *r >> 17
+ *r ^= *r << 5
+ return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+ return 1 << bits.Len(uint(length))
+}
+
+// isNaN reports whether x is a NaN without requiring the math package.
+// This will always return false if T is not floating-point.
+func isNaN[T constraints.Ordered](x T) bool {
+ return x != x
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/zsortanyfunc.go b/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/zsortanyfunc.go
new file mode 100644
index 00000000..06f2c7a2
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/zsortanyfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+// insertionSortCmpFunc sorts data[a:b] using insertion sort.
+func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownCmpFunc implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
+ child++
+ }
+ if !(cmp(data[first+root], data[first+child]) < 0) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownCmpFunc(data, i, hi, first, cmp)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownCmpFunc(data, lo, i, first, cmp)
+ }
+}
+
+// pdqsortCmpFunc sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortCmpFunc(data, a, b, cmp)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortCmpFunc(data, a, b, cmp)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsCmpFunc(data, a, b, cmp)
+ limit--
+ }
+
+ pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
+ if hint == decreasingHint {
+ reverseRangeCmpFunc(data, a, b, cmp)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortCmpFunc(data, a, b, cmp) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
+ mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortCmpFunc(data, a, mid, limit, cmp)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortCmpFunc(data, mid+1, b, limit, cmp)
+ b = mid
+ }
+ }
+}
+
+// partitionCmpFunc does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]
=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(cmp(data[a], data[i]) < 0) {
+ i++
+ }
+ for i <= j && (cmp(data[a], data[j]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(cmp(data[i], data[i-1]) < 0) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotCmpFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
+ j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
+ k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianCmpFunc(data, i, j, k, &swaps, cmp)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
+ if cmp(data[b], data[a]) < 0 {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ b, c = order2CmpFunc(data, b, c, swaps, cmp)
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ return b
+}
+
+// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
+ return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
+}
+
+func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortCmpFunc(data, a, b, cmp)
+ a = b
+ b += blockSize
+ }
+ insertionSortCmpFunc(data, a, n, cmp)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeCmpFunc(data, a, a+blockSize, b, cmp)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeCmpFunc(data, a, m, n, cmp)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmp(data[h], data[a]) < 0 {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(cmp(data[m], data[h]) < 0) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(cmp(data[p-c], data[c]) < 0) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateCmpFunc(data, start, m, end, cmp)
+ }
+ if a < start && start < mid {
+ symMergeCmpFunc(data, a, start, mid, cmp)
+ }
+ if mid < end && end < b {
+ symMergeCmpFunc(data, mid, end, b, cmp)
+ }
+}
+
+// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeCmpFunc(data, m-i, m, j, cmp)
+ i -= j
+ } else {
+ swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeCmpFunc(data, m-i, m, i, cmp)
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/zsortordered.go b/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/zsortordered.go
new file mode 100644
index 00000000..ee63d398
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/internal/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "cuelabs.dev/go/oci/ociregistry/internal/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !cmpLess(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !cmpLess(data[a-1], data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && cmpLess(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !cmpLess(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && cmpLess(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !cmpLess(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !cmpLess(data[a], data[i]) {
+ i++
+ }
+ for i <= j && cmpLess(data[a], data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !cmpLess(data[i], data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !cmpLess(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !cmpLess(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotOrdered chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentOrdered(data, i, &swaps)
+ j = medianAdjacentOrdered(data, j, &swaps)
+ k = medianAdjacentOrdered(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianOrdered(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
+ if cmpLess(data[b], data[a]) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
+ a, b = order2Ordered(data, a, b, swaps)
+ b, c = order2Ordered(data, b, c, swaps)
+ a, b = order2Ordered(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
+ return medianOrdered(data, a-1, a, a+1, swaps)
+}
+
+func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableOrdered[E constraints.Ordered](data []E, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortOrdered(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSortOrdered(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeOrdered(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeOrdered(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmpLess(data[h], data[a]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !cmpLess(data[m], data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !cmpLess(data[p-c], data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateOrdered(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMergeOrdered(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMergeOrdered(data, mid, end, b)
+ }
+}
+
+// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeOrdered(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRangeOrdered(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeOrdered(data, m-i, m, i)
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/create.go b/vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/create.go
new file mode 100644
index 00000000..3a1e9b4d
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/create.go
@@ -0,0 +1,111 @@
+// Copyright 2023 CUE Labs AG
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ocirequest
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/url"
+)
+
+func (req *Request) Construct() (method string, ustr string, err error) {
+ method, ustr = req.construct()
+ u, err := url.Parse(ustr)
+ if err != nil {
+ return "", "", fmt.Errorf("invalid OCI request: %v", err)
+ }
+ if _, err := Parse(method, u); err != nil {
+ return "", "", fmt.Errorf("invalid OCI request: %v", err)
+ }
+ return method, ustr, nil
+}
+
+func (req *Request) MustConstruct() (method string, ustr string) {
+ method, ustr, err := req.Construct()
+ if err != nil {
+ panic(err)
+ }
+ return method, ustr
+}
+
+func (req *Request) construct() (method string, url string) {
+ switch req.Kind {
+ case ReqPing:
+ return "GET", "/v2/"
+ case ReqBlobGet:
+ return "GET", "/v2/" + req.Repo + "/blobs/" + req.Digest
+ case ReqBlobHead:
+ return "HEAD", "/v2/" + req.Repo + "/blobs/" + req.Digest
+ case ReqBlobDelete:
+ return "DELETE", "/v2/" + req.Repo + "/blobs/" + req.Digest
+ case ReqBlobStartUpload:
+ return "POST", "/v2/" + req.Repo + "/blobs/uploads/"
+ case ReqBlobUploadBlob:
+ return "POST", "/v2/" + req.Repo + "/blobs/uploads/?digest=" + req.Digest
+ case ReqBlobMount:
+ return "POST", "/v2/" + req.Repo + "/blobs/uploads/?mount=" + req.Digest + "&from=" + req.FromRepo
+ case ReqBlobUploadInfo:
+ // Note: this is specific to the ociserver implementation.
+ return "GET", req.uploadPath()
+ case ReqBlobUploadChunk:
+ // Note: this is specific to the ociserver implementation.
+ return "PATCH", req.uploadPath()
+ case ReqBlobCompleteUpload:
+ // Note: this is specific to the ociserver implementation.
+ // TODO this is bogus when the upload ID contains query parameters.
+ return "PUT", req.uploadPath() + "?digest=" + req.Digest
+ case ReqManifestGet:
+ return "GET", "/v2/" + req.Repo + "/manifests/" + req.tagOrDigest()
+ case ReqManifestHead:
+ return "HEAD", "/v2/" + req.Repo + "/manifests/" + req.tagOrDigest()
+ case ReqManifestPut:
+ return "PUT", "/v2/" + req.Repo + "/manifests/" + req.tagOrDigest()
+ case ReqManifestDelete:
+ return "DELETE", "/v2/" + req.Repo + "/manifests/" + req.tagOrDigest()
+ case ReqTagsList:
+ return "GET", "/v2/" + req.Repo + "/tags/list" + req.listParams()
+ case ReqReferrersList:
+ return "GET", "/v2/" + req.Repo + "/referrers/" + req.Digest
+ case ReqCatalogList:
+ return "GET", "/v2/_catalog" + req.listParams()
+ default:
+ panic("invalid request kind")
+ }
+}
+
+func (req *Request) uploadPath() string {
+ return "/v2/" + req.Repo + "/blobs/uploads/" + base64.RawURLEncoding.EncodeToString([]byte(req.UploadID))
+}
+
+func (req *Request) listParams() string {
+ q := make(url.Values)
+ if req.ListN >= 0 {
+ q.Set("n", fmt.Sprint(req.ListN))
+ }
+ if req.ListLast != "" {
+ q.Set("last", req.ListLast)
+ }
+ if len(q) > 0 {
+ return "?" + q.Encode()
+ }
+ return ""
+}
+
+func (req *Request) tagOrDigest() string {
+ if req.Tag != "" {
+ return req.Tag
+ }
+ return req.Digest
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/request.go b/vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/request.go
new file mode 100644
index 00000000..f2adebc0
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/internal/ocirequest/request.go
@@ -0,0 +1,449 @@
+// Copyright 2023 CUE Labs AG
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ocirequest
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "cuelabs.dev/go/oci/ociregistry"
+)
+
+// ParseError represents an error that can happen when parsing.
+// The Err field holds one of the possible error values below.
+type ParseError struct {
+ Err error
+}
+
+func (e *ParseError) Error() string {
+ return e.Err.Error()
+}
+
+func (e *ParseError) Unwrap() error {
+ return e.Err
+}
+
+var (
+ ErrNotFound = errors.New("page not found")
+ ErrBadlyFormedDigest = errors.New("badly formed digest")
+ ErrMethodNotAllowed = errors.New("method not allowed")
+ ErrBadRequest = errors.New("bad request")
+)
+
+func badAPIUseError(f string, a ...any) error {
+ return ociregistry.NewError(fmt.Sprintf(f, a...), ociregistry.ErrUnsupported.Code(), nil)
+}
+
+type Request struct {
+ Kind Kind
+
+ // Repo holds the repository name. Valid for all request kinds
+ // except ReqCatalogList and ReqPing.
+ Repo string
+
+ // Digest holds the digest being used in the request.
+ // Valid for:
+ // ReqBlobMount
+ // ReqBlobUploadBlob
+ // ReqBlobGet
+ // ReqBlobHead
+ // ReqBlobDelete
+ // ReqBlobCompleteUpload
+ // ReqReferrersList
+ //
+ // Valid for these manifest requests when they're referring to a digest
+ // rather than a tag:
+ // ReqManifestGet
+ // ReqManifestHead
+ // ReqManifestPut
+ // ReqManifestDelete
+ Digest string
+
+ // Tag holds the tag being used in the request. Valid for
+ // these manifest requests when they're referring to a tag:
+ // ReqManifestGet
+ // ReqManifestHead
+ // ReqManifestPut
+ // ReqManifestDelete
+ Tag string
+
+ // FromRepo holds the repository name to mount from
+ // for ReqBlobMount.
+ FromRepo string
+
+ // UploadID holds the upload identifier as used for
+ // chunked uploads.
+ // Valid for:
+ // ReqBlobUploadInfo
+ // ReqBlobUploadChunk
+ UploadID string
+
+ // ListN holds the maximum count for listing.
+ // It's -1 to specify that all items should be returned.
+ //
+ // Valid for:
+ // ReqTagsList
+ // ReqCatalog
+ // ReqReferrers
+ ListN int
+
+ // listLast holds the item to start just after
+ // when listing.
+ //
+ // Valid for:
+ // ReqTagsList
+ // ReqCatalog
+ // ReqReferrers
+ ListLast string
+}
+
+type Kind int
+
+const (
+ // end-1 GET /v2/ 200 404/401
+ ReqPing = Kind(iota)
+
+ // Blob-related endpoints
+
+ // end-2 GET /v2//blobs/ 200 404
+ ReqBlobGet
+
+ // end-2 HEAD /v2//blobs/ 200 404
+ ReqBlobHead
+
+ // end-10 DELETE /v2//blobs/ 202 404/405
+ ReqBlobDelete
+
+ // end-4a POST /v2//blobs/uploads/ 202 404
+ ReqBlobStartUpload
+
+ // end-4b POST /v2//blobs/uploads/?digest= 201/202 404/400
+ ReqBlobUploadBlob
+
+ // end-11 POST /v2//blobs/uploads/?mount=&from= 201 404
+ ReqBlobMount
+
+ // end-13 GET /v2//blobs/uploads/ 204 404
+ // NOTE: despite being described in the distribution spec, this
+ // isn't really part of the OCI spec.
+ ReqBlobUploadInfo
+
+ // end-5 PATCH /v2//blobs/uploads/ 202 404/416
+ // NOTE: despite being described in the distribution spec, this
+ // isn't really part of the OCI spec.
+ ReqBlobUploadChunk
+
+ // end-6 PUT /v2//blobs/uploads/?digest= 201 404/400
+ // NOTE: despite being described in the distribution spec, this
+ // isn't really part of the OCI spec.
+ ReqBlobCompleteUpload
+
+ // Manifest-related endpoints
+
+ // end-3 GET /v2//manifests/ 200 404
+ ReqManifestGet
+
+ // end-3 HEAD /v2//manifests/ 200 404
+ ReqManifestHead
+
+ // end-7 PUT /v2//manifests/ 201 404
+ ReqManifestPut
+
+ // end-9 DELETE /v2//manifests/ 202 404/400/405
+ ReqManifestDelete
+
+ // Tag-related endpoints
+
+ // end-8a GET /v2//tags/list 200 404
+ // end-8b GET /v2//tags/list?n=&last= 200 404
+ ReqTagsList
+
+ // Referrer-related endpoints
+
+ // end-12a GET /v2//referrers/ 200 404/400
+ ReqReferrersList
+
+ // Catalog endpoints (out-of-spec)
+ // GET /v2/_catalog
+ ReqCatalogList
+)
+
+// Parse parses the given HTTP method and URL as an OCI registry request.
+// It understands the endpoints described in the [distribution spec].
+//
+// If it returns an error, it will be of type *ParseError.
+//
+// [distribution spec]: https://github.com/opencontainers/distribution-spec/blob/main/spec.md#endpoints
+func Parse(method string, u *url.URL) (*Request, error) {
+ req, err := parse(method, u)
+ if err != nil {
+ return nil, &ParseError{err}
+ }
+ return req, nil
+}
+
+func parse(method string, u *url.URL) (*Request, error) {
+ path := u.Path
+ urlq, err := url.ParseQuery(u.RawQuery)
+ if err != nil {
+ return nil, err
+ }
+
+ var rreq Request
+ if path == "/v2" || path == "/v2/" {
+ rreq.Kind = ReqPing
+ return &rreq, nil
+ }
+ path, ok := strings.CutPrefix(path, "/v2/")
+ if !ok {
+ return nil, ociregistry.NewError("unknown URL path", ociregistry.ErrNameUnknown.Code(), nil)
+ }
+ if path == "_catalog" {
+ if method != "GET" {
+ return nil, ErrMethodNotAllowed
+ }
+ rreq.Kind = ReqCatalogList
+ setListQueryParams(&rreq, urlq)
+ return &rreq, nil
+ }
+ uploadPath, ok := strings.CutSuffix(path, "/blobs/uploads/")
+ if !ok {
+ uploadPath, ok = strings.CutSuffix(path, "/blobs/uploads")
+ }
+ if ok {
+ rreq.Repo = uploadPath
+ if !ociregistry.IsValidRepoName(rreq.Repo) {
+ return nil, ociregistry.ErrNameInvalid
+ }
+ if method != "POST" {
+ return nil, ErrMethodNotAllowed
+ }
+ if d := urlq.Get("mount"); d != "" {
+ // end-11
+ rreq.Digest = d
+ if !ociregistry.IsValidDigest(rreq.Digest) {
+ return nil, ociregistry.ErrDigestInvalid
+ }
+ rreq.FromRepo = urlq.Get("from")
+ if rreq.FromRepo == "" {
+ // There's no "from" argument so fall back to
+ // a regular chunked upload.
+ rreq.Kind = ReqBlobStartUpload
+ // TODO does the "mount" query argument actually take effect in some way?
+ rreq.Digest = ""
+ return &rreq, nil
+ }
+ if !ociregistry.IsValidRepoName(rreq.FromRepo) {
+ return nil, ociregistry.ErrNameInvalid
+ }
+ rreq.Kind = ReqBlobMount
+ return &rreq, nil
+ }
+ if d := urlq.Get("digest"); d != "" {
+ // end-4b
+ rreq.Digest = d
+ if !ociregistry.IsValidDigest(d) {
+ return nil, ErrBadlyFormedDigest
+ }
+ rreq.Kind = ReqBlobUploadBlob
+ return &rreq, nil
+ }
+ // end-4a
+ rreq.Kind = ReqBlobStartUpload
+ return &rreq, nil
+ }
+ path, last, ok := cutLast(path, "/")
+ if !ok {
+ return nil, ErrNotFound
+ }
+ path, lastButOne, ok := cutLast(path, "/")
+ if !ok {
+ return nil, ErrNotFound
+ }
+ switch lastButOne {
+ case "blobs":
+ rreq.Repo = path
+ if !ociregistry.IsValidDigest(last) {
+ return nil, ErrBadlyFormedDigest
+ }
+ if !ociregistry.IsValidRepoName(rreq.Repo) {
+ return nil, ociregistry.ErrNameInvalid
+ }
+ rreq.Digest = last
+ switch method {
+ case "GET":
+ rreq.Kind = ReqBlobGet
+ case "HEAD":
+ rreq.Kind = ReqBlobHead
+ case "DELETE":
+ rreq.Kind = ReqBlobDelete
+ default:
+ return nil, ErrMethodNotAllowed
+ }
+ return &rreq, nil
+ case "uploads":
+ // Note: this section is all specific to ociserver and
+ // isn't part of the OCI registry spec.
+ repo, ok := strings.CutSuffix(path, "/blobs")
+ if !ok {
+ return nil, ErrNotFound
+ }
+ rreq.Repo = repo
+ if !ociregistry.IsValidRepoName(rreq.Repo) {
+ return nil, ociregistry.ErrNameInvalid
+ }
+ uploadID64 := last
+ if uploadID64 == "" {
+ return nil, ErrNotFound
+ }
+ uploadID, err := base64.RawURLEncoding.DecodeString(uploadID64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid upload ID %q (cannot decode)", uploadID64)
+ }
+ if !utf8.Valid(uploadID) {
+ return nil, fmt.Errorf("upload ID %q decoded to invalid utf8", uploadID64)
+ }
+ rreq.UploadID = string(uploadID)
+
+ switch method {
+ case "GET":
+ rreq.Kind = ReqBlobUploadInfo
+ case "PATCH":
+ rreq.Kind = ReqBlobUploadChunk
+ case "PUT":
+ rreq.Kind = ReqBlobCompleteUpload
+ rreq.Digest = urlq.Get("digest")
+ if !ociregistry.IsValidDigest(rreq.Digest) {
+ return nil, ErrBadlyFormedDigest
+ }
+ default:
+ return nil, ErrMethodNotAllowed
+ }
+ return &rreq, nil
+ case "manifests":
+ rreq.Repo = path
+ if !ociregistry.IsValidRepoName(rreq.Repo) {
+ return nil, ociregistry.ErrNameInvalid
+ }
+ switch {
+ case ociregistry.IsValidDigest(last):
+ rreq.Digest = last
+ case ociregistry.IsValidTag(last):
+ rreq.Tag = last
+ default:
+ return nil, ErrNotFound
+ }
+ switch method {
+ case "GET":
+ rreq.Kind = ReqManifestGet
+ case "HEAD":
+ rreq.Kind = ReqManifestHead
+ case "PUT":
+ rreq.Kind = ReqManifestPut
+ case "DELETE":
+ rreq.Kind = ReqManifestDelete
+ default:
+ return nil, ErrMethodNotAllowed
+ }
+ return &rreq, nil
+
+ case "tags":
+ if last != "list" {
+ return nil, ErrNotFound
+ }
+ if err := setListQueryParams(&rreq, urlq); err != nil {
+ return nil, err
+ }
+ if method != "GET" {
+ return nil, ErrMethodNotAllowed
+ }
+ rreq.Repo = path
+ if !ociregistry.IsValidRepoName(rreq.Repo) {
+ return nil, ociregistry.ErrNameInvalid
+ }
+ rreq.Kind = ReqTagsList
+ return &rreq, nil
+ case "referrers":
+ if !ociregistry.IsValidDigest(last) {
+ return nil, ErrBadlyFormedDigest
+ }
+ if method != "GET" {
+ return nil, ErrMethodNotAllowed
+ }
+ rreq.Repo = path
+ if !ociregistry.IsValidRepoName(rreq.Repo) {
+ return nil, ociregistry.ErrNameInvalid
+ }
+ // TODO is there any kind of pagination for referrers?
+ // We'll set ListN to be future-proof.
+ rreq.ListN = -1
+ rreq.Digest = last
+ rreq.Kind = ReqReferrersList
+ return &rreq, nil
+ }
+ return nil, ErrNotFound
+}
+
+func setListQueryParams(rreq *Request, urlq url.Values) error {
+ rreq.ListN = -1
+ if nstr := urlq.Get("n"); nstr != "" {
+ n, err := strconv.Atoi(nstr)
+ if err != nil {
+ return fmt.Errorf("n is not a valid integer: %w", ErrBadRequest)
+ }
+ rreq.ListN = n
+ }
+ rreq.ListLast = urlq.Get("last")
+ return nil
+}
+
+func cutLast(s, sep string) (before, after string, found bool) {
+ if i := strings.LastIndex(s, sep); i >= 0 {
+ return s[:i], s[i+len(sep):], true
+ }
+ return "", s, false
+}
+
+// ParseRange extracts the start and end offsets from a Content-Range string.
+// The resulting start is inclusive and the end exclusive, to match Go convention,
+// whereas Content-Range is inclusive on both ends.
+func ParseRange(s string) (start, end int64, ok bool) {
+ p0s, p1s, ok := strings.Cut(s, "-")
+ if !ok {
+ return 0, 0, false
+ }
+ p0, err0 := strconv.ParseInt(p0s, 10, 64)
+ p1, err1 := strconv.ParseInt(p1s, 10, 64)
+ if p1 > 0 {
+ p1++
+ }
+ return p0, p1, err0 == nil && err1 == nil
+}
+
+// RangeString formats a pair of start and end offsets in the Content-Range form.
+// The input start is inclusive and the end exclusive, to match Go convention,
+// whereas Content-Range is inclusive on both ends.
+func RangeString(start, end int64) string {
+ end--
+ if end < 0 {
+ end = 0
+ }
+ return fmt.Sprintf("%d-%d", start, end)
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/iter.go b/vendor/cuelabs.dev/go/oci/ociregistry/iter.go
index 43e43763..f44247a7 100644
--- a/vendor/cuelabs.dev/go/oci/ociregistry/iter.go
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/iter.go
@@ -14,21 +14,26 @@
package ociregistry
-type Iter[T any] interface {
- Close()
- Next() (T, bool)
- Error() error
-}
+// TODO(go1.23) when we can depend on Go 1.23, this should be:
+// type Seq[T any] = iter.Seq2[T, error]
+
+// Seq defines the type of an iterator sequence returned from
+// the iterator functions. In general, a non-nil
+// error means that the item is the last in the sequence.
+type Seq[T any] func(yield func(T, error) bool)
-func All[T any](it Iter[T]) ([]T, error) {
+func All[T any](it Seq[T]) (_ []T, _err error) {
xs := []T{}
- for {
- x, ok := it.Next()
- if !ok {
- return xs, it.Error()
+ // TODO(go1.23) for x, err := range it
+ it(func(x T, err error) bool {
+ if err != nil {
+ _err = err
+ return false
}
xs = append(xs, x)
- }
+ return true
+ })
+ return xs, _err
}
type sliceIter[T any] struct {
@@ -36,43 +41,20 @@ type sliceIter[T any] struct {
xs []T
}
-func SliceIter[T any](xs []T) Iter[T] {
- return &sliceIter[T]{
- xs: xs,
- }
-}
-
-func (it *sliceIter[T]) Close() {}
-
-func (it *sliceIter[T]) Next() (T, bool) {
- if it.i >= len(it.xs) {
- return *new(T), false
+func SliceIter[T any](xs []T) Seq[T] {
+ return func(yield func(T, error) bool) {
+ for _, x := range xs {
+ if !yield(x, nil) {
+ return
+ }
+ }
}
- x := it.xs[it.i]
- it.i++
- return x, true
-}
-
-func (it *sliceIter[T]) Error() error {
- return nil
}
// ErrorIter returns an iterator that has no
// items and always returns the given error.
-func ErrorIter[T any](err error) Iter[T] {
- return errorIter[T]{err}
-}
-
-type errorIter[T any] struct {
- err error
-}
-
-func (it errorIter[T]) Close() {}
-
-func (it errorIter[T]) Next() (T, bool) {
- return *new(T), false
-}
-
-func (it errorIter[T]) Error() error {
- return it.err
+func ErrorIter[T any](err error) Seq[T] {
+ return func(yield func(T, error) bool) {
+ yield(*new(T), err)
+ }
}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/auth.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/auth.go
new file mode 100644
index 00000000..77c9427d
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/auth.go
@@ -0,0 +1,505 @@
+package ociauth
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+
+ "cuelabs.dev/go/oci/ociregistry/internal/exp/slices"
+)
+
+// TODO decide on a good value for this.
+const oauthClientID = "cuelabs-ociauth"
+
+var ErrNoAuth = fmt.Errorf("no authorization token available to add to request")
+
+// stdTransport implements [http.RoundTripper] by acquiring authorization tokens
+// using the flows implemented
+// by the usual docker clients. Note that this is _not_ documented as
+// part of any official OCI spec.
+//
+// See https://distribution.github.io/distribution/spec/auth/token/ for an overview.
+type stdTransport struct {
+ config Config
+ transport http.RoundTripper
+ mu sync.Mutex
+ registries map[string]*registry
+}
+
+type StdTransportParams struct {
+ // Config represents the underlying configuration file information.
+ // It is consulted for authorization information on the hosts
+ // to which the HTTP requests are made.
+ Config Config
+
+ // HTTPClient is used to make the underlying HTTP requests.
+ // If it's nil, [http.DefaultTransport] will be used.
+ Transport http.RoundTripper
+}
+
+// NewStdTransport returns an [http.RoundTripper] implementation that
+// acquires authorization tokens using the flows implemented by the
+// usual docker clients. Note that this is _not_ documented as part of
+// any official OCI spec.
+//
+// See https://distribution.github.io/distribution/spec/auth/token/ for an overview.
+//
+// The RoundTrip method acquires authorization before invoking the
+// request. request. It may invoke the request more than once, and can
+// use [http.Request.GetBody] to reset the request body if it gets
+// consumed.
+//
+// It ensures that the authorization token used will have at least the
+// capability to execute operations in the required scope associated
+// with the request context (see [ContextWithRequestInfo]). Any other
+// auth scope inside the context (see [ContextWithScope]) may also be
+// taken into account when acquiring new tokens.
+func NewStdTransport(p StdTransportParams) http.RoundTripper {
+ if p.Config == nil {
+ p.Config = emptyConfig{}
+ }
+ if p.Transport == nil {
+ p.Transport = http.DefaultTransport
+ }
+ return &stdTransport{
+ config: p.Config,
+ transport: p.Transport,
+ registries: make(map[string]*registry),
+ }
+}
+
+// registry holds currently known auth information for a registry.
+type registry struct {
+ host string
+ transport http.RoundTripper
+ config Config
+ initOnce sync.Once
+ initErr error
+
+ // mu guards the fields that follow it.
+ mu sync.Mutex
+
+ // wwwAuthenticate holds the Www-Authenticate header from
+ // the most recent 401 response. If there was a 401 response
+ // that didn't hold such a header, this will still be non-nil
+ // but hold a zero authHeader.
+ wwwAuthenticate *authHeader
+
+ accessTokens []*scopedToken
+ refreshToken string
+ basic *userPass
+}
+
+type scopedToken struct {
+ // scope holds the scope that the token is good for.
+ scope Scope
+ // token holds the actual access token.
+ token string
+ // expires holds when the token expires.
+ expires time.Time
+}
+
+type userPass struct {
+ username string
+ password string
+}
+
+var forever = time.Date(99999, time.January, 1, 0, 0, 0, 0, time.UTC)
+
+// RoundTrip implements [http.RoundTripper.RoundTrip].
+func (a *stdTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ // From the [http.RoundTripper] docs:
+ // RoundTrip should not modify the request, except for
+ // consuming and closing the Request's Body.
+ req = req.Clone(req.Context())
+
+ // From the [http.RoundTripper] docs:
+ // RoundTrip must always close the body, including on errors, [...]
+ needBodyClose := true
+ defer func() {
+ if needBodyClose && req.Body != nil {
+ req.Body.Close()
+ }
+ }()
+
+ a.mu.Lock()
+ r := a.registries[req.URL.Host]
+ if r == nil {
+ r = ®istry{
+ host: req.URL.Host,
+ config: a.config,
+ transport: a.transport,
+ }
+ a.registries[r.host] = r
+ }
+ a.mu.Unlock()
+ if err := r.init(); err != nil {
+ return nil, err
+ }
+
+ ctx := req.Context()
+ requiredScope := RequestInfoFromContext(ctx).RequiredScope
+ wantScope := ScopeFromContext(ctx)
+
+ if err := r.setAuthorization(ctx, req, requiredScope, wantScope); err != nil {
+ return nil, err
+ }
+ resp, err := r.transport.RoundTrip(req)
+
+ // The underlying transport should now have closed the request body
+ // so we don't have to.
+ needBodyClose = false
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode != http.StatusUnauthorized {
+ return resp, nil
+ }
+ challenge := challengeFromResponse(resp)
+ if challenge == nil {
+ return resp, nil
+ }
+ authAdded, err := r.setAuthorizationFromChallenge(ctx, req, challenge, requiredScope, wantScope)
+ if err != nil {
+ resp.Body.Close()
+ return nil, err
+ }
+ if !authAdded {
+ // Couldn't acquire any more authorization than we had initially.
+ return resp, nil
+ }
+ resp.Body.Close()
+ // rewind request body if needed and possible.
+ if req.GetBody != nil {
+ req.Body, err = req.GetBody()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return r.transport.RoundTrip(req)
+}
+
+// setAuthorization sets up authorization on the given request using any
+// auth information currently available.
+func (r *registry) setAuthorization(ctx context.Context, req *http.Request, requiredScope, wantScope Scope) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ // Remove tokens that have expired or will expire soon so that
+ // the caller doesn't start using a token only for it to expire while it's
+ // making the request.
+ r.deleteExpiredTokens(time.Now().UTC().Add(time.Second))
+
+ if accessToken := r.accessTokenForScope(requiredScope); accessToken != nil {
+ // We have a potentially valid access token. Use it.
+ req.Header.Set("Authorization", "Bearer "+accessToken.token)
+ return nil
+ }
+ if r.wwwAuthenticate == nil {
+ // We haven't seen a 401 response yet. Avoid putting any
+ // basic authorization in the request, because that can mean that
+ // the server sends a 401 response without a Www-Authenticate
+ // header.
+ return nil
+ }
+ if r.refreshToken != "" && r.wwwAuthenticate.scheme == "bearer" {
+ // We've got a refresh token that we can use to try to
+ // acquire an access token and we've seen a Www-Authenticate response
+ // that tells us how we can use it.
+
+ // TODO we're holding the lock (r.mu) here, which is precluding
+ // acquiring several tokens concurrently. We should relax the lock
+ // to allow that.
+
+ accessToken, err := r.acquireAccessToken(ctx, requiredScope, wantScope)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Authorization", "Bearer "+accessToken)
+ return nil
+ }
+ if r.wwwAuthenticate.scheme != "bearer" && r.basic != nil {
+ req.SetBasicAuth(r.basic.username, r.basic.password)
+ return nil
+ }
+ return nil
+}
+
+func (r *registry) setAuthorizationFromChallenge(ctx context.Context, req *http.Request, challenge *authHeader, requiredScope, wantScope Scope) (bool, error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.wwwAuthenticate = challenge
+
+ switch {
+ case r.wwwAuthenticate.scheme == "bearer":
+ scope := ParseScope(r.wwwAuthenticate.params["scope"])
+ accessToken, err := r.acquireAccessToken(ctx, scope, wantScope.Union(requiredScope))
+ if err != nil {
+ return false, err
+ }
+ req.Header.Set("Authorization", "Bearer "+accessToken)
+ return true, nil
+ case r.basic != nil:
+ req.SetBasicAuth(r.basic.username, r.basic.password)
+ return true, nil
+ }
+ return false, nil
+}
+
+// init initializes the registry instance by acquiring auth information from
+// the Config, if available. As this might be slow (invoking EntryForRegistry
+// can end up invoking slow external commands), we ensure that it's only
+// done once.
+// TODO it's possible that this could take a very long time, during which
+// the outer context is cancelled, but we'll ignore that. We probably shouldn't.
+func (r *registry) init() error {
+ inner := func() error {
+ info, err := r.config.EntryForRegistry(r.host)
+ if err != nil {
+ return fmt.Errorf("cannot acquire auth info for registry %q: %v", r.host, err)
+ }
+ r.refreshToken = info.RefreshToken
+ if info.AccessToken != "" {
+ r.accessTokens = append(r.accessTokens, &scopedToken{
+ scope: UnlimitedScope(),
+ token: info.AccessToken,
+ expires: forever,
+ })
+ }
+ if info.Username != "" && info.Password != "" {
+ r.basic = &userPass{
+ username: info.Username,
+ password: info.Password,
+ }
+ }
+ return nil
+ }
+ r.initOnce.Do(func() {
+ r.initErr = inner()
+ })
+ return r.initErr
+}
+
+// acquireAccessToken tries to acquire an access token for authorizing a request.
+// The requiredScopeStr parameter indicates the scope that's definitely
+// required. This is a string because apparently some servers are picky
+// about getting exactly the same scope in the auth request that was
+// returned in the challenge. The wantScope parameter indicates
+// what scope might be required in the future.
+//
+// This method assumes that there has been a previous 401 response with
+// a Www-Authenticate: Bearer... header.
+func (r *registry) acquireAccessToken(ctx context.Context, requiredScope, wantScope Scope) (string, error) {
+ scope := requiredScope.Union(wantScope)
+ tok, err := r.acquireToken(ctx, scope)
+ if err != nil {
+ var rerr *responseError
+ if !errors.As(err, &rerr) || rerr.statusCode != http.StatusUnauthorized {
+ return "", err
+ }
+ // The documentation says this:
+ //
+ // If the client only has a subset of the requested
+ // access it _must not be considered an error_ as it is
+ // not the responsibility of the token server to
+ // indicate authorization errors as part of this
+ // workflow.
+ //
+ // However it's apparently not uncommon for servers to reject
+ // such requests anyway, so if we've got an unauthorized error
+ // and wantScope goes beyond requiredScope, it may be because
+ // the server is rejecting the request.
+ scope = requiredScope
+ tok, err = r.acquireToken(ctx, scope)
+ if err != nil {
+ return "", err
+ }
+ // TODO mark the registry as picky about tokens so we don't
+ // attempt twice every time?
+ }
+ if tok.RefreshToken != "" {
+ r.refreshToken = tok.RefreshToken
+ }
+ accessToken := tok.Token
+ if accessToken == "" {
+ accessToken = tok.AccessToken
+ }
+ if accessToken == "" {
+ return "", fmt.Errorf("no access token found in auth server response")
+ }
+ var expires time.Time
+ now := time.Now().UTC()
+ if tok.ExpiresIn == 0 {
+ expires = now.Add(60 * time.Second) // TODO link to where this is mentioned
+ } else {
+ expires = now.Add(time.Duration(tok.ExpiresIn) * time.Second)
+ }
+ r.accessTokens = append(r.accessTokens, &scopedToken{
+ scope: scope,
+ token: accessToken,
+ expires: expires,
+ })
+ // TODO persist the access token to save round trips when doing
+ // the authorization flow in a newly run executable.
+ return accessToken, nil
+}
+
+func (r *registry) acquireToken(ctx context.Context, scope Scope) (*wireToken, error) {
+ realm := r.wwwAuthenticate.params["realm"]
+ if realm == "" {
+ return nil, fmt.Errorf("malformed Www-Authenticate header (missing realm)")
+ }
+ if r.refreshToken != "" {
+ v := url.Values{}
+ v.Set("scope", scope.String())
+ if service := r.wwwAuthenticate.params["service"]; service != "" {
+ v.Set("service", service)
+ }
+ v.Set("client_id", oauthClientID)
+ v.Set("grant_type", "refresh_token")
+ v.Set("refresh_token", r.refreshToken)
+ req, err := http.NewRequestWithContext(ctx, "POST", realm, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, fmt.Errorf("cannot form HTTP request to %q: %v", realm, err)
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ tok, err := r.doTokenRequest(req)
+ if err == nil {
+ return tok, nil
+ }
+ var rerr *responseError
+ if !errors.As(err, &rerr) || rerr.statusCode != http.StatusNotFound {
+ return tok, err
+ }
+ // The request to the endpoint returned 404 from the POST request,
+ // Note: Not all token servers implement oauth2, so fall
+ // back to using a GET with basic auth.
+ // See the Token documentation for the HTTP GET method supported by all token servers.
+ // TODO where in that documentation is this documented?
+ }
+ u, err := url.Parse(realm)
+ if err != nil {
+ return nil, fmt.Errorf("malformed Www-Authenticate header (malformed realm %q): %v", realm, err)
+ }
+ v := u.Query()
+ // TODO where is it documented that we should send multiple scope
+ // attributes rather than a single space-separated attribute as
+ // the POST method does?
+ v["scope"] = strings.Split(scope.String(), " ")
+ if service := r.wwwAuthenticate.params["service"]; service != "" {
+ // TODO the containerregistry code sets this even if it's empty.
+ // Is that better?
+ v.Set("service", service)
+ }
+ u.RawQuery = v.Encode()
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ // TODO if there's an unlimited-scope access token, the original code
+ // will use it as Bearer authorization at this point. If
+ // that's valid, why are we even acquiring another token?
+ if r.basic != nil {
+ req.SetBasicAuth(r.basic.username, r.basic.password)
+ }
+ return r.doTokenRequest(req)
+}
+
+// wireToken describes the JSON encoding used in the response to a token
+// acquisition method. The comments are taken from the [token docs]
+// and made available here for ease of reference.
+//
+// [token docs]: https://distribution.github.io/distribution/spec/auth/token/#token-response-fields
+type wireToken struct {
+ // Token holds an opaque Bearer token that clients should supply
+ // to subsequent requests in the Authorization header.
+ // AccessToken is provided for compatibility with OAuth 2.0: it's equivalent to Token.
+ // At least one of these fields must be specified, but both may also appear (for compatibility with older clients).
+ // When both are specified, they should be equivalent; if they differ the client's choice is undefined.
+ Token string `json:"token"`
+ AccessToken string `json:"access_token,omitempty"`
+
+ // Refresh token optionally holds a token which can be used to
+ // get additional access tokens for the same subject with different scopes.
+ // This token should be kept secure by the client and only sent
+ // to the authorization server which issues bearer tokens. This
+ // field will only be set when `offline_token=true` is provided
+ // in the request.
+ RefreshToken string `json:"refresh_token"`
+
+ // ExpiresIn holds the duration in seconds since the token was
+ // issued that it will remain valid. When omitted, this defaults
+ // to 60 seconds. For compatibility with older clients, a token
+ // should never be returned with less than 60 seconds to live.
+ ExpiresIn int `json:"expires_in"`
+}
+
+func (r *registry) doTokenRequest(req *http.Request) (*wireToken, error) {
+ client := &http.Client{
+ Transport: r.transport,
+ }
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return nil, errorFromResponse(resp)
+ }
+ data, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("cannot read response body: %v", err)
+ }
+ var tok wireToken
+ if err := json.Unmarshal(data, &tok); err != nil {
+ return nil, fmt.Errorf("malformed JSON token in response: %v", err)
+ }
+ return &tok, nil
+}
+
+type responseError struct {
+ statusCode int
+ msg string
+}
+
+func errorFromResponse(resp *http.Response) error {
+ // TODO include body of response in error message.
+ return &responseError{
+ statusCode: resp.StatusCode,
+ }
+}
+
+func (e *responseError) Error() string {
+ return fmt.Sprintf("unexpected HTTP response %d", e.statusCode)
+}
+
+// deleteExpiredTokens removes all tokens from r that expire after the given
+// time.
+// TODO ask the store to remove expired tokens?
+func (r *registry) deleteExpiredTokens(now time.Time) {
+ r.accessTokens = slices.DeleteFunc(r.accessTokens, func(tok *scopedToken) bool {
+ return now.After(tok.expires)
+ })
+}
+
+func (r *registry) accessTokenForScope(scope Scope) *scopedToken {
+ for _, tok := range r.accessTokens {
+ if tok.scope.Contains(scope) {
+ // TODO prefer tokens with less scope?
+ return tok
+ }
+ }
+ return nil
+}
+
+type emptyConfig struct{}
+
+func (emptyConfig) EntryForRegistry(host string) (ConfigEntry, error) {
+ return ConfigEntry{}, nil
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/authfile.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/authfile.go
new file mode 100644
index 00000000..d9c6b684
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/authfile.go
@@ -0,0 +1,358 @@
+package ociauth
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strings"
+)
+
+// AuthConfig represents access to system level (e.g. config-file or command-execution based)
+// configuration information.
+//
+// It's OK to call EntryForRegistry concurrently.
+type Config interface {
+ // EntryForRegistry returns auth information for the given host.
+ // If there's no information available, it should return the zero ConfigEntry
+ // and nil.
+ EntryForRegistry(host string) (ConfigEntry, error)
+}
+
+// ConfigEntry holds auth information for a registry.
+// It mirrors the information obtainable from the .docker/config.json
+// file and from the docker credential helper protocol
+type ConfigEntry struct {
+ // RefreshToken holds a token that can be used to obtain an access token.
+ RefreshToken string
+ // AccessToken holds a bearer token to be sent to a registry.
+ AccessToken string
+ // Username holds the username for use with basic auth.
+ Username string
+ // Password holds the password for use with Username.
+ Password string
+}
+
+// ConfigFile holds auth information for OCI registries as read from a configuration file.
+// It implements [Config].
+type ConfigFile struct {
+ data configData
+ runner HelperRunner
+}
+
+var ErrHelperNotFound = errors.New("helper not found")
+
+// HelperRunner is the function used to execute auth "helper"
+// commands. It's passed the helper name as specified in the configuration file,
+// without the "docker-credential-helper-" prefix.
+//
+// If the credentials are not found, it should return the zero AuthInfo
+// and no error.
+//
+// If the helper doesn't exist, it should return an [ErrHelperNotFound] error.
+type HelperRunner = func(helperName string, serverURL string) (ConfigEntry, error)
+
+// configData holds the part of ~/.docker/config.json that pertains to auth.
+type configData struct {
+ Auths map[string]authConfig `json:"auths"`
+ CredsStore string `json:"credsStore,omitempty"`
+ CredHelpers map[string]string `json:"credHelpers,omitempty"`
+}
+
+// authConfig contains authorization information for connecting to a Registry.
+type authConfig struct {
+ // derivedFrom records the entries from which this one was derived.
+ // If this is empty, the entry was explicitly present.
+ derivedFrom []string
+
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ // Auth is an alternative way of specifying username and password
+ // (in base64(username:password) form.
+ Auth string `json:"auth,omitempty"`
+
+ // IdentityToken is used to authenticate the user and get
+ // an access token for the registry.
+ IdentityToken string `json:"identitytoken,omitempty"`
+
+ // RegistryToken is a bearer token to be sent to a registry
+ RegistryToken string `json:"registrytoken,omitempty"`
+}
+
+// LoadWithEnv is like [Load] but takes environment variables in the form
+// returned by [os.Environ] instead of calling [os.Getenv]. If env
+// is nil, the current process's environment will be used.
+func LoadWithEnv(runner HelperRunner, env []string) (*ConfigFile, error) {
+ if runner == nil {
+ runner = ExecHelperWithEnv(env)
+ }
+ getenv := os.Getenv
+ if env != nil {
+ getenv = getenvFunc(env)
+ }
+ for _, f := range configFileLocations {
+ filename := f(getenv)
+ if filename == "" {
+ continue
+ }
+ data, err := os.ReadFile(filename)
+ if err != nil {
+ if os.IsNotExist(err) {
+ continue
+ }
+ return nil, err
+ }
+ f, err := decodeConfigFile(data)
+ if err != nil {
+ return nil, fmt.Errorf("invalid config file %q: %v", filename, err)
+ }
+ return &ConfigFile{
+ data: f,
+ runner: runner,
+ }, nil
+ }
+ return &ConfigFile{
+ runner: runner,
+ }, nil
+}
+
+// Load loads the auth configuration from the first location it can find.
+// It uses runner to run any external helper commands; if runner
+// is nil, [ExecHelper] will be used.
+//
+// In order it tries:
+// - $DOCKER_CONFIG/config.json
+// - ~/.docker/config.json
+// - $XDG_RUNTIME_DIR/containers/auth.json
+func Load(runner HelperRunner) (*ConfigFile, error) {
+ return LoadWithEnv(runner, nil)
+}
+
+func getenvFunc(env []string) func(string) string {
+ return func(key string) string {
+ for i := len(env) - 1; i >= 0; i-- {
+ if e := env[i]; len(e) >= len(key)+1 && e[len(key)] == '=' && e[:len(key)] == key {
+ return e[len(key)+1:]
+ }
+ }
+ return ""
+ }
+}
+
+var configFileLocations = []func(func(string) string) string{
+ func(getenv func(string) string) string {
+ if d := getenv("DOCKER_CONFIG"); d != "" {
+ return filepath.Join(d, "config.json")
+ }
+ return ""
+ },
+ func(getenv func(string) string) string {
+ if home := userHomeDir(getenv); home != "" {
+ return filepath.Join(home, ".docker", "config.json")
+ }
+ return ""
+ },
+ // If neither of the above locations was found, look for Podman's auth at
+ // $XDG_RUNTIME_DIR/containers/auth.json and attempt to load it as a
+ // Docker config.
+ func(getenv func(string) string) string {
+ if d := getenv("XDG_RUNTIME_DIR"); d != "" {
+ return filepath.Join(d, "containers", "auth.json")
+ }
+ return ""
+ },
+}
+
+// userHomeDir returns the current user's home directory.
+// The logic in this is directly derived from the logic in
+// [os.UserHomeDir] as of go 1.22.0.
+//
+// It's defined as a variable so it can be patched in tests.
+var userHomeDir = func(getenv func(string) string) string {
+ env := "HOME"
+ switch runtime.GOOS {
+ case "windows":
+ env = "USERPROFILE"
+ case "plan9":
+ env = "home"
+ }
+ if v := getenv(env); v != "" {
+ return v
+ }
+ // On some geese the home directory is not always defined.
+ switch runtime.GOOS {
+ case "android":
+ return "/sdcard"
+ case "ios":
+ return "/"
+ }
+ return ""
+}
+
+// EntryForRegistry implements [Authorizer.InfoForRegistry].
+// If no registry is found, it returns the zero [ConfigEntry] and a nil error.
+func (c *ConfigFile) EntryForRegistry(registryHostname string) (ConfigEntry, error) {
+ helper, ok := c.data.CredHelpers[registryHostname]
+ explicit := true
+ if !ok {
+ helper = c.data.CredsStore
+ explicit = false
+ }
+ if helper != "" {
+ entry, err := c.runner(helper, registryHostname)
+ if err == nil || explicit || !errors.Is(err, ErrHelperNotFound) {
+ return entry, err
+ }
+ // The helper command isn't found and it's a fallback default.
+ // Don't treat that as an error, because it's common for
+ // a helper default to be set up without the helper actually
+ // existing. See https://github.com/cue-lang/cue/issues/2934.
+ }
+ auth := c.data.Auths[registryHostname]
+ if auth.IdentityToken != "" && auth.Username != "" {
+ return ConfigEntry{}, fmt.Errorf("ambiguous auth credentials")
+ }
+ if len(auth.derivedFrom) > 1 {
+ return ConfigEntry{}, fmt.Errorf("more than one auths entry for %q (%s)", registryHostname, strings.Join(auth.derivedFrom, ", "))
+ }
+
+ return ConfigEntry{
+ RefreshToken: auth.IdentityToken,
+ AccessToken: auth.RegistryToken,
+ Username: auth.Username,
+ Password: auth.Password,
+ }, nil
+}
+
+func decodeConfigFile(data []byte) (configData, error) {
+ var f configData
+ if err := json.Unmarshal(data, &f); err != nil {
+ return configData{}, fmt.Errorf("decode failed: %v", err)
+ }
+ for addr, ac := range f.Auths {
+ if ac.Auth != "" {
+ var err error
+ ac.Username, ac.Password, err = decodeAuth(ac.Auth)
+ if err != nil {
+ return configData{}, fmt.Errorf("cannot decode auth field for %q: %v", addr, err)
+ }
+ }
+ f.Auths[addr] = ac
+ if !strings.Contains(addr, "//") {
+ continue
+ }
+ // It looks like it might be a URL, so follow the original logic
+ // and extract the host name for later lookup. Explicit
+ // entries override implicit, and if several entries map to
+ // the same host, we record that so we can return an error
+ // later if that host is looked up (this avoids the nondeterministic
+ // behavior found in the original code when this happens).
+ addr1 := urlHost(addr)
+ if addr1 == addr {
+ continue
+ }
+ if ac1, ok := f.Auths[addr1]; ok {
+ if len(ac1.derivedFrom) == 0 {
+ // Don't override an explicit entry.
+ continue
+ }
+ ac = ac1
+ }
+ ac.derivedFrom = append(ac.derivedFrom, addr)
+ sort.Strings(ac.derivedFrom)
+ f.Auths[addr1] = ac
+ }
+ return f, nil
+}
+
+// urlHost returns the host part of a registry URL.
+// Mimics [github.com/docker/docker/registry.ConvertToHostname]
+// to keep the logic the same as that.
+func urlHost(url string) string {
+ stripped := url
+ if strings.HasPrefix(url, "http://") {
+ stripped = strings.TrimPrefix(url, "http://")
+ } else if strings.HasPrefix(url, "https://") {
+ stripped = strings.TrimPrefix(url, "https://")
+ }
+
+ hostName, _, _ := strings.Cut(stripped, "/")
+ return hostName
+}
+
+// decodeAuth decodes a base64 encoded string and returns username and password
+func decodeAuth(authStr string) (string, string, error) {
+ s, err := base64.StdEncoding.DecodeString(authStr)
+ if err != nil {
+ return "", "", fmt.Errorf("invalid base64-encoded string")
+ }
+ username, password, ok := strings.Cut(string(s), ":")
+ if !ok || username == "" {
+ return "", "", errors.New("no username found")
+ }
+ // The zero-byte-trimming logic here mimics the logic in the
+ // docker CLI configfile package.
+ return username, strings.Trim(password, "\x00"), nil
+}
+
+// ExecHelper executes an external program to get the credentials from a native store.
+// It implements [HelperRunner].
+func ExecHelper(helperName string, serverURL string) (ConfigEntry, error) {
+ return ExecHelperWithEnv(nil)(helperName, serverURL)
+}
+
+// ExecHelperWithEnv returns a [HelperRunner] that behaves like [ExecHelper]
+// except that, if env is non-nil, it will be used as the set of environment
+// variables to pass to the executed helper command. If env is nil,
+// the current process's environment will be used.
+func ExecHelperWithEnv(env []string) HelperRunner {
+ return func(helperName string, serverURL string) (ConfigEntry, error) {
+ var out bytes.Buffer
+ cmd := exec.Command("docker-credential-"+helperName, "get")
+ // TODO this doesn't produce a decent error message for
+ // other helpers such as gcloud that print errors to stderr.
+ cmd.Stdin = strings.NewReader(serverURL)
+ cmd.Stdout = &out
+ cmd.Stderr = &out
+ cmd.Env = env
+ if err := cmd.Run(); err != nil {
+ if !errors.As(err, new(*exec.ExitError)) {
+ if errors.Is(err, exec.ErrNotFound) {
+ return ConfigEntry{}, fmt.Errorf("%w: %v", ErrHelperNotFound, err)
+ }
+ return ConfigEntry{}, fmt.Errorf("cannot run auth helper: %v", err)
+ }
+ t := strings.TrimSpace(out.String())
+ if t == "credentials not found in native keychain" {
+ return ConfigEntry{}, nil
+ }
+ return ConfigEntry{}, fmt.Errorf("error getting credentials: %s", t)
+ }
+
+ // helperCredentials defines the JSON encoding of the data printed
+ // by credentials helper programs.
+ type helperCredentials struct {
+ Username string
+ Secret string
+ }
+ var creds helperCredentials
+ if err := json.Unmarshal(out.Bytes(), &creds); err != nil {
+ return ConfigEntry{}, err
+ }
+ if creds.Username == "" {
+ return ConfigEntry{
+ RefreshToken: creds.Secret,
+ }, nil
+ }
+ return ConfigEntry{
+ Password: creds.Secret,
+ Username: creds.Username,
+ }, nil
+ }
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/challenge.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/challenge.go
new file mode 100644
index 00000000..8ca1a301
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/challenge.go
@@ -0,0 +1,167 @@
+package ociauth
+
+import (
+ "net/http"
+ "strings"
+)
+
+// Octet types from RFC 2616.
+type octetType byte
+
+var octetTypes [256]octetType
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET =
+ // CHAR =
+ // CTL =
+ // CR =
+ // LF =
+ // SP =
+ // HT =
+ // <"> =
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT =
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*
+ // qdtext = >
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
+ if strings.ContainsRune(" \t\r\n", rune(c)) {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+// authHeader holds the parsed contents of a Www-Authenticate HTTP header.
+type authHeader struct {
+ scheme string
+ params map[string]string
+}
+
+func challengeFromResponse(resp *http.Response) *authHeader {
+ var h *authHeader
+ for _, chalStr := range resp.Header["Www-Authenticate"] {
+ h1 := parseWWWAuthenticate(chalStr)
+ if h1 == nil {
+ continue
+ }
+ if h1.scheme != "basic" && h1.scheme != "bearer" {
+ continue
+ }
+ if h == nil {
+ h = h1
+ } else if h1.scheme == "basic" && h.scheme == "bearer" {
+ // We prefer basic auth to bearer auth.
+ h = h1
+ }
+ }
+ return h
+}
+
+// parseWWWAuthenticate parses the contents of a Www-Authenticate HTTP header.
+// It returns nil if the parsing fails.
+func parseWWWAuthenticate(header string) *authHeader {
+ var h authHeader
+ h.params = make(map[string]string)
+
+ scheme, s := expectToken(header)
+ if scheme == "" {
+ return nil
+ }
+ h.scheme = strings.ToLower(scheme)
+ s = skipSpace(s)
+ for len(s) > 0 {
+ var pkey, pvalue string
+ pkey, s = expectToken(skipSpace(s))
+ if pkey == "" {
+ return nil
+ }
+ if !strings.HasPrefix(s, "=") {
+ return nil
+ }
+ pvalue, s = expectTokenOrQuoted(s[1:])
+ if pvalue == "" {
+ return nil
+ }
+ h.params[strings.ToLower(pkey)] = pvalue
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ",") {
+ break
+ }
+ s = s[1:]
+ }
+ if len(s) > 0 {
+ return nil
+ }
+ return &h
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isToken == 0 {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return expectToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/context.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/context.go
new file mode 100644
index 00000000..cd691d61
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/context.go
@@ -0,0 +1,53 @@
+package ociauth
+
+import (
+ "context"
+)
+
+type scopeKey struct{}
+
+// ContextWithScope returns ctx annotated with the given
+// scope. When the ociauth transport receives a request with a scope in the context,
+// it will treat it as "desired authorization scope"; new authorization tokens
+// will be acquired with that scope as well as any scope required by
+// the operation.
+func ContextWithScope(ctx context.Context, s Scope) context.Context {
+ return context.WithValue(ctx, scopeKey{}, s)
+}
+
+// ScopeFromContext returns any scope associated with the context
+// by [ContextWithScope].
+func ScopeFromContext(ctx context.Context) Scope {
+ s, _ := ctx.Value(scopeKey{}).(Scope)
+ return s
+}
+
+type requestInfoKey struct{}
+
+// RequestInfo provides information about the OCI request that
+// is currently being made. It is expected to be attached to an HTTP
+// request context. The [ociclient] package will add this to all
+// requests that is makes.
+type RequestInfo struct {
+ // RequiredScope holds the authorization scope that's required
+ // by the request. The ociauth logic will reuse any available
+ // auth token that has this scope. When acquiring a new token,
+ // it will add any scope found in [ScopeFromContext] too.
+ RequiredScope Scope
+}
+
+// ContextWithRequestInfo returns ctx annotated with the given
+// request informaton. When ociclient receives a request with
+// this attached, it will respect info.RequiredScope to determine
+// what auth tokens to reuse. When it acquires a new token,
+// it will ask for the union of info.RequiredScope [ScopeFromContext].
+func ContextWithRequestInfo(ctx context.Context, info RequestInfo) context.Context {
+ return context.WithValue(ctx, requestInfoKey{}, info)
+}
+
+// RequestInfoFromContext returns any request information associated with the context
+// by [ContextWithRequestInfo].
+func RequestInfoFromContext(ctx context.Context) RequestInfo {
+ info, _ := ctx.Value(requestInfoKey{}).(RequestInfo)
+ return info
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/scope.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/scope.go
new file mode 100644
index 00000000..773dc438
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociauth/scope.go
@@ -0,0 +1,493 @@
+package ociauth
+
+import (
+ "math/bits"
+ "strings"
+
+ "cuelabs.dev/go/oci/ociregistry/internal/exp/slices"
+)
+
+// knownAction represents an action that we know about
+// and use a more efficient internal representation for.
+type knownAction byte
+
+const (
+ unknownAction knownAction = iota
+ // Note: ordered by lexical string representation.
+ pullAction
+ pushAction
+ numActions
+)
+
+const (
+ // Known resource types.
+ TypeRepository = "repository"
+ TypeRegistry = "registry"
+
+ // Known action types.
+ ActionPull = "pull"
+ ActionPush = "push"
+)
+
+func (a knownAction) String() string {
+ switch a {
+ case pullAction:
+ return ActionPull
+ case pushAction:
+ return ActionPush
+ default:
+ return "unknown"
+ }
+}
+
+// CatalogScope defines the resource scope used to allow
+// listing all the items in a registry.
+var CatalogScope = ResourceScope{
+ ResourceType: TypeRegistry,
+ Resource: "catalog",
+ Action: "*",
+}
+
+// ResourceScope defines a component of an authorization scope
+// associated with a single resource and action only.
+// See [Scope] for a way of combining multiple ResourceScopes
+// into a single value.
+type ResourceScope struct {
+ // ResourceType holds the type of resource the scope refers to.
+ // Known values for this include TypeRegistry and TypeRepository.
+ // When a scope does not conform to the standard resourceType:resource:actions
+ // syntax, ResourceType will hold the entire scope.
+ ResourceType string
+
+ // Resource names the resource the scope pertains to.
+ // For resource type TypeRepository, this will be the name of the repository.
+ Resource string
+
+ // Action names an action that can be performed on the resource.
+ // This is usually ActionPush or ActionPull.
+ Action string
+}
+
+func (rs1 ResourceScope) Equal(rs2 ResourceScope) bool {
+ return rs1.Compare(rs2) == 0
+}
+
+// Compare returns -1, 0 or 1 depending on whether
+// rs1 compares less than, equal, or greater than, rs2.
+//
+// In most to least precedence, the fields are compared in the order
+// ResourceType, Resource, Action.
+func (rs1 ResourceScope) Compare(rs2 ResourceScope) int {
+ if c := strings.Compare(rs1.ResourceType, rs2.ResourceType); c != 0 {
+ return c
+ }
+ if c := strings.Compare(rs1.Resource, rs2.Resource); c != 0 {
+ return c
+ }
+ return strings.Compare(rs1.Action, rs2.Action)
+}
+
+func (rs ResourceScope) isKnown() bool {
+ switch rs.ResourceType {
+ case TypeRepository:
+ return parseKnownAction(rs.Action) != unknownAction
+ case TypeRegistry:
+ return rs == CatalogScope
+ }
+ return false
+}
+
+// Scope holds a set of [ResourceScope] values. The zero value
+// represents the empty set.
+type Scope struct {
+ // original holds the original string from which
+ // this Scope was parsed. This maintains the string
+ // representation unchanged as far as possible.
+ original string
+
+ // unlimited holds whether this scope is considered to include all
+ // other scopes.
+ unlimited bool
+
+ // repositories holds all the repositories that the scope
+ // refers to. An empty repository name implies a CatalogScope
+ // entry. The elements of this are maintained in sorted order.
+ repositories []string
+
+ // actions holds an element for each element in repositories
+ // defining the set of allowed actions for that repository
+ // as a bitmask of 1< 0 && s.repositories[i-1] == rs.Resource {
+ s.actions[i-1] |= actionMask
+ } else {
+ s.repositories = append(s.repositories, rs.Resource)
+ s.actions = append(s.actions, actionMask)
+ }
+ }
+ slices.SortFunc(s.others, ResourceScope.Compare)
+ s.others = slices.Compact(s.others)
+ return s
+}
+
+// Len returns the number of ResourceScopes in the scope set.
+// It panics if the scope is unlimited.
+func (s Scope) Len() int {
+ if s.IsUnlimited() {
+ panic("Len called on unlimited scope")
+ }
+ n := len(s.others)
+ for _, b := range s.actions {
+ n += bits.OnesCount8(b)
+ }
+ return n
+}
+
+// UnlimitedScope returns a scope that contains all other
+// scopes. This is not representable in the docker scope syntax,
+// but it's useful to represent the scope of tokens that can
+// be used for arbitrary access.
+func UnlimitedScope() Scope {
+ return Scope{
+ unlimited: true,
+ }
+}
+
+// IsUnlimited reports whether s is unlimited in scope.
+func (s Scope) IsUnlimited() bool {
+ return s.unlimited
+}
+
+// IsEmpty reports whether the scope holds the empty set.
+func (s Scope) IsEmpty() bool {
+ return len(s.repositories) == 0 &&
+ len(s.others) == 0 &&
+ !s.unlimited
+}
+
+// Iter returns an iterator over all the individual scopes that are
+// part of s. The items will be produced according to [Scope.Compare]
+// ordering.
+//
+// The unlimited scope does not yield any scopes.
+func (s Scope) Iter() func(yield func(ResourceScope) bool) {
+ return func(yield0 func(ResourceScope) bool) {
+ if s.unlimited {
+ return
+ }
+ others := s.others
+ yield := func(scope ResourceScope) bool {
+ // Yield any scopes from others that are ready to
+ // be produced, thus preserving ordering of all
+ // values in the iterator.
+ for len(others) > 0 && others[0].Compare(scope) < 0 {
+ if !yield0(others[0]) {
+ return false
+ }
+ others = others[1:]
+ }
+ return yield0(scope)
+ }
+ for i, repo := range s.repositories {
+ if repo == "" {
+ if !yield(CatalogScope) {
+ return
+ }
+ continue
+ }
+ acts := s.actions[i]
+ for k := knownAction(0); k < numActions; k++ {
+ if acts&(1< 0 {
+ buf.WriteByte(' ')
+ }
+ buf.WriteString(s.ResourceType)
+ if s.Resource != "" || s.Action != "" {
+ buf.WriteByte(':')
+ buf.WriteString(s.Resource)
+ buf.WriteByte(':')
+ buf.WriteString(s.Action)
+ }
+ return true
+ })
+ return buf.String()
+}
+
+func parseKnownAction(s string) knownAction {
+ switch s {
+ case ActionPull:
+ return pullAction
+ case ActionPush:
+ return pushAction
+ default:
+ return unknownAction
+ }
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/client.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/client.go
new file mode 100644
index 00000000..842f5241
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/client.go
@@ -0,0 +1,412 @@
+// Copyright 2023 CUE Labs AG
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ociclient provides an implementation of ociregistry.Interface that
+// uses HTTP to talk to the remote registry.
+package ociclient
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "hash"
+ "io"
+ "log"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync/atomic"
+
+ "github.com/opencontainers/go-digest"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+
+ "cuelabs.dev/go/oci/ociregistry"
+ "cuelabs.dev/go/oci/ociregistry/internal/ocirequest"
+ "cuelabs.dev/go/oci/ociregistry/ociauth"
+)
+
+// debug enables logging.
+// TODO this should be configurable in the API.
+const debug = false
+
+type Options struct {
+ // DebugID is used to prefix any log messages printed by the client.
+ DebugID string
+
+ // Transport is used to make HTTP requests. The context passed
+ // to its RoundTrip method will have an appropriate
+ // [ociauth.RequestInfo] value added, suitable for consumption
+ // by the transport created by [ociauth.NewStdTransport]. If
+ // Transport is nil, [http.DefaultTransport] will be used.
+ Transport http.RoundTripper
+
+ // Insecure specifies whether an http scheme will be used to
+ // address the host instead of https.
+ Insecure bool
+
+ // ListPageSize configures the maximum number of results
+ // requested when making list requests. If it's <= zero, it
+ // defaults to DefaultListPageSize.
+ ListPageSize int
+}
+
+// See https://github.com/google/go-containerregistry/issues/1091
+// for an early report of the issue alluded to below.
+
+// DefaultListPageSize holds the default number of results
+// to request when using the list endpoints.
+// It's not more than 1000 because AWS ECR complains
+// it it's more than that.
+const DefaultListPageSize = 1000
+
+var debugID int32
+
+// New returns a registry implementation that uses the OCI
+// HTTP API. A nil opts parameter is equivalent to a pointer
+// to zero Options.
+//
+// The host specifies the host name to talk to; it may
+// optionally be a host:port pair.
+func New(host string, opts0 *Options) (ociregistry.Interface, error) {
+ var opts Options
+ if opts0 != nil {
+ opts = *opts0
+ }
+ if opts.DebugID == "" {
+ opts.DebugID = fmt.Sprintf("id%d", atomic.AddInt32(&debugID, 1))
+ }
+ if opts.Transport == nil {
+ opts.Transport = http.DefaultTransport
+ }
+ // Check that it's a valid host by forming a URL from it and checking that it matches.
+ u, err := url.Parse("https://" + host + "/path")
+ if err != nil {
+ return nil, fmt.Errorf("invalid host %q", host)
+ }
+ if u.Host != host {
+ return nil, fmt.Errorf("invalid host %q (does not correctly form a host part of a URL)", host)
+ }
+ if opts.Insecure {
+ u.Scheme = "http"
+ }
+ if opts.ListPageSize == 0 {
+ opts.ListPageSize = DefaultListPageSize
+ }
+ return &client{
+ httpHost: host,
+ httpScheme: u.Scheme,
+ httpClient: &http.Client{
+ Transport: opts.Transport,
+ },
+ debugID: opts.DebugID,
+ listPageSize: opts.ListPageSize,
+ }, nil
+}
+
+type client struct {
+ *ociregistry.Funcs
+ httpScheme string
+ httpHost string
+ httpClient *http.Client
+ debugID string
+ listPageSize int
+}
+
+// descriptorFromResponse tries to form a descriptor from an HTTP response,
+// filling in the Digest field using knownDigest if it's not present.
+//
+// Note: this implies that the Digest field will be empty if there is no
+// digest in the response and knownDigest is empty.
+func descriptorFromResponse(resp *http.Response, knownDigest digest.Digest, requireSize bool) (ociregistry.Descriptor, error) {
+ contentType := resp.Header.Get("Content-Type")
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+ size := int64(0)
+ if requireSize {
+ if resp.StatusCode == http.StatusPartialContent {
+ contentRange := resp.Header.Get("Content-Range")
+ if contentRange == "" {
+ return ociregistry.Descriptor{}, fmt.Errorf("no Content-Range in partial content response")
+ }
+ i := strings.LastIndex(contentRange, "/")
+ if i == -1 {
+ return ociregistry.Descriptor{}, fmt.Errorf("malformed Content-Range %q", contentRange)
+ }
+ contentSize, err := strconv.ParseInt(contentRange[i+1:], 10, 64)
+ if err != nil {
+ return ociregistry.Descriptor{}, fmt.Errorf("malformed Content-Range %q", contentRange)
+ }
+ size = contentSize
+ } else {
+ if resp.ContentLength < 0 {
+ return ociregistry.Descriptor{}, fmt.Errorf("unknown content length")
+ }
+ size = resp.ContentLength
+ }
+ }
+ digest := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
+ if digest != "" {
+ if !ociregistry.IsValidDigest(string(digest)) {
+ return ociregistry.Descriptor{}, fmt.Errorf("bad digest %q found in response", digest)
+ }
+ } else {
+ digest = knownDigest
+ }
+ return ociregistry.Descriptor{
+ Digest: digest,
+ MediaType: contentType,
+ Size: size,
+ }, nil
+}
+
+func newBlobReader(r io.ReadCloser, desc ociregistry.Descriptor) *blobReader {
+ return &blobReader{
+ r: r,
+ digester: desc.Digest.Algorithm().Hash(),
+ desc: desc,
+ verify: true,
+ }
+}
+
+func newBlobReaderUnverified(r io.ReadCloser, desc ociregistry.Descriptor) *blobReader {
+ br := newBlobReader(r, desc)
+ br.verify = false
+ return br
+}
+
+type blobReader struct {
+ r io.ReadCloser
+ n int64
+ digester hash.Hash
+ desc ociregistry.Descriptor
+ verify bool
+}
+
+func (r *blobReader) Descriptor() ociregistry.Descriptor {
+ return r.desc
+}
+
+func (r *blobReader) Read(buf []byte) (int, error) {
+ n, err := r.r.Read(buf)
+ r.n += int64(n)
+ r.digester.Write(buf[:n])
+ if err == nil {
+ if r.n > r.desc.Size {
+ // Fail early when the blob is too big; we can do that even
+ // when we're not verifying for other use cases.
+ return n, fmt.Errorf("blob size exceeds content length %d: %w", r.desc.Size, ociregistry.ErrSizeInvalid)
+ }
+ return n, nil
+ }
+ if err != io.EOF {
+ return n, err
+ }
+ if !r.verify {
+ return n, io.EOF
+ }
+ if r.n != r.desc.Size {
+ return n, fmt.Errorf("blob size mismatch (%d/%d): %w", r.n, r.desc.Size, ociregistry.ErrSizeInvalid)
+ }
+ gotDigest := digest.NewDigest(r.desc.Digest.Algorithm(), r.digester)
+ if gotDigest != r.desc.Digest {
+ return n, fmt.Errorf("digest mismatch when reading blob")
+ }
+ return n, io.EOF
+}
+
+func (r *blobReader) Close() error {
+ return r.r.Close()
+}
+
+// TODO make this list configurable.
+var knownManifestMediaTypes = []string{
+ ocispec.MediaTypeImageManifest,
+ ocispec.MediaTypeImageIndex,
+ "application/vnd.oci.artifact.manifest.v1+json", // deprecated.
+ "application/vnd.docker.distribution.manifest.v1+json",
+ "application/vnd.docker.distribution.manifest.v2+json",
+ "application/vnd.docker.distribution.manifest.list.v2+json",
+ // Technically this wildcard should be sufficient, but it isn't
+ // recognized by some registries.
+ "*/*",
+}
+
+// doRequest performs the given OCI request, sending it with the given body (which may be nil).
+func (c *client) doRequest(ctx context.Context, rreq *ocirequest.Request, okStatuses ...int) (*http.Response, error) {
+ req, err := newRequest(ctx, rreq, nil)
+ if err != nil {
+ return nil, err
+ }
+ if rreq.Kind == ocirequest.ReqManifestGet || rreq.Kind == ocirequest.ReqManifestHead {
+ // When getting manifests, some servers won't return
+ // the content unless there's an Accept header, so
+ // add all the manifest kinds that we know about.
+ req.Header["Accept"] = knownManifestMediaTypes
+ }
+ resp, err := c.do(req, okStatuses...)
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode/100 == 2 {
+ return resp, nil
+ }
+ defer resp.Body.Close()
+ return nil, makeError(resp)
+}
+
+func (c *client) do(req *http.Request, okStatuses ...int) (*http.Response, error) {
+ if req.URL.Scheme == "" {
+ req.URL.Scheme = c.httpScheme
+ }
+ if req.URL.Host == "" {
+ req.URL.Host = c.httpHost
+ }
+ if req.Body != nil {
+ // Ensure that the body isn't consumed until the
+ // server has responded that it will receive it.
+ // This means that we can retry requests even when we've
+ // got a consume-once-only io.Reader, such as
+ // when pushing blobs.
+ req.Header.Set("Expect", "100-continue")
+ }
+ var buf bytes.Buffer
+ if debug {
+ fmt.Fprintf(&buf, "client.Do: %s %s {{\n", req.Method, req.URL)
+ fmt.Fprintf(&buf, "\tBODY: %#v\n", req.Body)
+ for k, v := range req.Header {
+ fmt.Fprintf(&buf, "\t%s: %q\n", k, v)
+ }
+ c.logf("%s", buf.Bytes())
+ }
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("cannot do HTTP request: %w", err)
+ }
+ if debug {
+ buf.Reset()
+ fmt.Fprintf(&buf, "} -> %s {\n", resp.Status)
+ for k, v := range resp.Header {
+ fmt.Fprintf(&buf, "\t%s: %q\n", k, v)
+ }
+ data, _ := io.ReadAll(resp.Body)
+ if len(data) > 0 {
+ fmt.Fprintf(&buf, "\tBODY: %q\n", data)
+ }
+ fmt.Fprintf(&buf, "}}\n")
+ resp.Body.Close()
+ resp.Body = io.NopCloser(bytes.NewReader(data))
+ c.logf("%s", buf.Bytes())
+ }
+ if len(okStatuses) == 0 && resp.StatusCode == http.StatusOK {
+ return resp, nil
+ }
+ for _, status := range okStatuses {
+ if resp.StatusCode == status {
+ return resp, nil
+ }
+ }
+ defer resp.Body.Close()
+ if !isOKStatus(resp.StatusCode) {
+ return nil, makeError(resp)
+ }
+ return nil, unexpectedStatusError(resp.StatusCode)
+}
+
+func (c *client) logf(f string, a ...any) {
+ log.Printf("ociclient %s: %s", c.debugID, fmt.Sprintf(f, a...))
+}
+
+func locationFromResponse(resp *http.Response) (*url.URL, error) {
+ location := resp.Header.Get("Location")
+ if location == "" {
+ return nil, fmt.Errorf("no Location found in response")
+ }
+ u, err := url.Parse(location)
+ if err != nil {
+ return nil, fmt.Errorf("invalid Location URL found in response")
+ }
+ return resp.Request.URL.ResolveReference(u), nil
+}
+
+func isOKStatus(code int) bool {
+ return code/100 == 2
+}
+
+func closeOnError(err *error, r io.Closer) {
+ if *err != nil {
+ r.Close()
+ }
+}
+
+func unexpectedStatusError(code int) error {
+ return fmt.Errorf("unexpected HTTP response code %d", code)
+}
+
+func scopeForRequest(r *ocirequest.Request) ociauth.Scope {
+ switch r.Kind {
+ case ocirequest.ReqPing:
+ return ociauth.Scope{}
+ case ocirequest.ReqBlobGet,
+ ocirequest.ReqBlobHead,
+ ocirequest.ReqManifestGet,
+ ocirequest.ReqManifestHead,
+ ocirequest.ReqTagsList,
+ ocirequest.ReqReferrersList:
+ return ociauth.NewScope(ociauth.ResourceScope{
+ ResourceType: ociauth.TypeRepository,
+ Resource: r.Repo,
+ Action: ociauth.ActionPull,
+ })
+ case ocirequest.ReqBlobDelete,
+ ocirequest.ReqBlobStartUpload,
+ ocirequest.ReqBlobUploadBlob,
+ ocirequest.ReqBlobUploadInfo,
+ ocirequest.ReqBlobUploadChunk,
+ ocirequest.ReqBlobCompleteUpload,
+ ocirequest.ReqManifestPut,
+ ocirequest.ReqManifestDelete:
+ return ociauth.NewScope(ociauth.ResourceScope{
+ ResourceType: ociauth.TypeRepository,
+ Resource: r.Repo,
+ Action: ociauth.ActionPush,
+ })
+ case ocirequest.ReqBlobMount:
+ return ociauth.NewScope(ociauth.ResourceScope{
+ ResourceType: ociauth.TypeRepository,
+ Resource: r.Repo,
+ Action: ociauth.ActionPush,
+ }, ociauth.ResourceScope{
+ ResourceType: ociauth.TypeRepository,
+ Resource: r.FromRepo,
+ Action: ociauth.ActionPull,
+ })
+ case ocirequest.ReqCatalogList:
+ return ociauth.NewScope(ociauth.CatalogScope)
+ default:
+ panic(fmt.Errorf("unexpected request kind %v", r.Kind))
+ }
+}
+
+func newRequest(ctx context.Context, rreq *ocirequest.Request, body io.Reader) (*http.Request, error) {
+ method, u, err := rreq.Construct()
+ if err != nil {
+ return nil, err
+ }
+ ctx = ociauth.ContextWithRequestInfo(ctx, ociauth.RequestInfo{
+ RequiredScope: scopeForRequest(rreq),
+ })
+ return http.NewRequestWithContext(ctx, method, u, body)
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/deleter.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/deleter.go
new file mode 100644
index 00000000..9a66c21b
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/deleter.go
@@ -0,0 +1,56 @@
+// Copyright 2023 CUE Labs AG
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ociclient
+
+import (
+ "context"
+ "net/http"
+
+ "cuelabs.dev/go/oci/ociregistry"
+ "cuelabs.dev/go/oci/ociregistry/internal/ocirequest"
+)
+
+func (c *client) DeleteBlob(ctx context.Context, repoName string, digest ociregistry.Digest) error {
+ return c.delete(ctx, &ocirequest.Request{
+ Kind: ocirequest.ReqBlobDelete,
+ Repo: repoName,
+ Digest: string(digest),
+ })
+}
+
+func (c *client) DeleteManifest(ctx context.Context, repoName string, digest ociregistry.Digest) error {
+ return c.delete(ctx, &ocirequest.Request{
+ Kind: ocirequest.ReqManifestDelete,
+ Repo: repoName,
+ Digest: string(digest),
+ })
+}
+
+func (c *client) DeleteTag(ctx context.Context, repoName string, tagName string) error {
+ return c.delete(ctx, &ocirequest.Request{
+ Kind: ocirequest.ReqManifestDelete,
+ Repo: repoName,
+ Tag: tagName,
+ })
+}
+
+func (c *client) delete(ctx context.Context, rreq *ocirequest.Request) error {
+ resp, err := c.doRequest(ctx, rreq, http.StatusAccepted)
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/error.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/error.go
new file mode 100644
index 00000000..71d3cdd3
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/error.go
@@ -0,0 +1,195 @@
+// Copyright 2023 CUE Labs AG
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ociclient
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "mime"
+ "net/http"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "cuelabs.dev/go/oci/ociregistry"
+)
+
+// errorBodySizeLimit holds the maximum number of response bytes aallowed in
+// the server's error response. A typical error message is around 200
+// bytes. Hence, 8 KiB should be sufficient.
+const errorBodySizeLimit = 8 * 1024
+
+type wireError struct {
+ Code_ string `json:"code"`
+ Message string `json:"message,omitempty"`
+ Detail_ json.RawMessage `json:"detail,omitempty"`
+}
+
+func (e *wireError) Error() string {
+ var buf strings.Builder
+ for _, r := range e.Code_ {
+ if r == '_' {
+ buf.WriteByte(' ')
+ } else {
+ buf.WriteRune(unicode.ToLower(r))
+ }
+ }
+ if buf.Len() == 0 {
+ buf.WriteString("(no code)")
+ }
+ if e.Message != "" {
+ buf.WriteString(": ")
+ buf.WriteString(e.Message)
+ }
+ if len(e.Detail_) != 0 && !bytes.Equal(e.Detail_, []byte("null")) {
+ buf.WriteString("; detail: ")
+ buf.Write(e.Detail_)
+ }
+ return buf.String()
+}
+
+// Code implements [ociregistry.Error.Code].
+func (e *wireError) Code() string {
+ return e.Code_
+}
+
+// Detail implements [ociregistry.Error.Detail].
+func (e *wireError) Detail() any {
+ if len(e.Detail_) == 0 {
+ return nil
+ }
+ // TODO do this once only?
+ var d any
+ json.Unmarshal(e.Detail_, &d)
+ return d
+}
+
+// Is makes it possible for users to write `if errors.Is(err, ociregistry.ErrBlobUnknown)`
+// even when the error hasn't exactly wrapped that error.
+func (e *wireError) Is(err error) bool {
+ var rerr ociregistry.Error
+ return errors.As(err, &rerr) && rerr.Code() == e.Code()
+}
+
+type wireErrors struct {
+ httpStatusCode int
+ Errors []wireError `json:"errors"`
+}
+
+func (e *wireErrors) Unwrap() []error {
+ // TODO we could do this only once.
+ errs := make([]error, len(e.Errors))
+ for i := range e.Errors {
+ errs[i] = &e.Errors[i]
+ }
+ return errs
+}
+
+// Is makes it possible for users to write `if errors.Is(err, ociregistry.ErrRangeInvalid)`
+// even when the error hasn't exactly wrapped that error.
+func (e *wireErrors) Is(err error) bool {
+ switch e.httpStatusCode {
+ case http.StatusRequestedRangeNotSatisfiable:
+ return err == ociregistry.ErrRangeInvalid
+ }
+ return false
+}
+
+func (e *wireErrors) Error() string {
+ var buf strings.Builder
+ buf.WriteString(strconv.Itoa(e.httpStatusCode))
+ buf.WriteString(" ")
+ buf.WriteString(http.StatusText(e.httpStatusCode))
+ buf.WriteString(": ")
+ buf.WriteString(e.Errors[0].Error())
+ for i := range e.Errors[1:] {
+ buf.WriteString("; ")
+ buf.WriteString(e.Errors[i+1].Error())
+ }
+ return buf.String()
+}
+
+// makeError forms an error from a non-OK response.
+func makeError(resp *http.Response) error {
+ if resp.Request.Method == "HEAD" {
+ // When we've made a HEAD request, we can't see any of
+ // the actual error, so we'll have to make up something
+ // from the HTTP status.
+ var err error
+ switch resp.StatusCode {
+ case http.StatusNotFound:
+ err = ociregistry.ErrNameUnknown
+ case http.StatusUnauthorized:
+ err = ociregistry.ErrUnauthorized
+ case http.StatusForbidden:
+ err = ociregistry.ErrDenied
+ case http.StatusTooManyRequests:
+ err = ociregistry.ErrTooManyRequests
+ case http.StatusBadRequest:
+ err = ociregistry.ErrUnsupported
+ default:
+ return fmt.Errorf("error response: %v", resp.Status)
+ }
+ return fmt.Errorf("error response: %v: %w", resp.Status, err)
+ }
+ if !isJSONMediaType(resp.Header.Get("Content-Type")) || resp.Request.Method == "HEAD" {
+ // TODO include some of the body in this case?
+ data, _ := io.ReadAll(resp.Body)
+ return fmt.Errorf("error response: %v; body: %q", resp.Status, data)
+ }
+ data, err := io.ReadAll(io.LimitReader(resp.Body, errorBodySizeLimit+1))
+ if err != nil {
+ return fmt.Errorf("%s: cannot read error body: %v", resp.Status, err)
+ }
+ if len(data) > errorBodySizeLimit {
+ // TODO include some part of the body
+ return fmt.Errorf("error body too large")
+ }
+ var errs wireErrors
+ if err := json.Unmarshal(data, &errs); err != nil {
+ return fmt.Errorf("%s: malformed error response: %v", resp.Status, err)
+ }
+ if len(errs.Errors) == 0 {
+ return fmt.Errorf("%s: no errors in body (probably a server issue)", resp.Status)
+ }
+ errs.httpStatusCode = resp.StatusCode
+ return &errs
+}
+
+// isJSONMediaType reports whether the content type implies
+// that the content is JSON.
+func isJSONMediaType(contentType string) bool {
+ mediaType, _, _ := mime.ParseMediaType(contentType)
+ m := strings.TrimPrefix(mediaType, "application/")
+ if len(m) == len(mediaType) {
+ return false
+ }
+ // Look for +json suffix. See https://tools.ietf.org/html/rfc6838#section-4.2.8
+ // We recognize multiple suffixes too (e.g. application/something+json+other)
+ // as that seems to be a possibility.
+ for {
+ i := strings.Index(m, "+")
+ if i == -1 {
+ return m == "json"
+ }
+ if m[0:i] == "json" {
+ return true
+ }
+ m = m[i+1:]
+ }
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/lister.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/lister.go
new file mode 100644
index 00000000..eeeab8a3
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/lister.go
@@ -0,0 +1,180 @@
+// Copyright 2023 CUE Labs AG
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ociclient
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+
+ "cuelabs.dev/go/oci/ociregistry"
+ "cuelabs.dev/go/oci/ociregistry/internal/ocirequest"
+)
+
+func (c *client) Repositories(ctx context.Context, startAfter string) ociregistry.Seq[string] {
+ return c.pager(ctx, &ocirequest.Request{
+ Kind: ocirequest.ReqCatalogList,
+ ListN: c.listPageSize,
+ ListLast: startAfter,
+ }, func(resp *http.Response) ([]string, error) {
+ data, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ var catalog struct {
+ Repos []string `json:"repositories"`
+ }
+ if err := json.Unmarshal(data, &catalog); err != nil {
+ return nil, fmt.Errorf("cannot unmarshal catalog response: %v", err)
+ }
+ return catalog.Repos, nil
+ })
+}
+
+func (c *client) Tags(ctx context.Context, repoName, startAfter string) ociregistry.Seq[string] {
+ return c.pager(ctx, &ocirequest.Request{
+ Kind: ocirequest.ReqTagsList,
+ Repo: repoName,
+ ListN: c.listPageSize,
+ ListLast: startAfter,
+ }, func(resp *http.Response) ([]string, error) {
+ data, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ var tagsResponse struct {
+ Repo string `json:"name"`
+ Tags []string `json:"tags"`
+ }
+ if err := json.Unmarshal(data, &tagsResponse); err != nil {
+ return nil, fmt.Errorf("cannot unmarshal tags list response: %v", err)
+ }
+ return tagsResponse.Tags, nil
+ })
+}
+
+func (c *client) Referrers(ctx context.Context, repoName string, digest ociregistry.Digest, artifactType string) ociregistry.Seq[ociregistry.Descriptor] {
+ // TODO paging
+ resp, err := c.doRequest(ctx, &ocirequest.Request{
+ Kind: ocirequest.ReqReferrersList,
+ Repo: repoName,
+ Digest: string(digest),
+ ListN: c.listPageSize,
+ })
+ if err != nil {
+ return ociregistry.ErrorIter[ociregistry.Descriptor](err)
+ }
+
+ data, err := io.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ return ociregistry.ErrorIter[ociregistry.Descriptor](err)
+ }
+ var referrersResponse ocispec.Index
+ if err := json.Unmarshal(data, &referrersResponse); err != nil {
+ return ociregistry.ErrorIter[ociregistry.Descriptor](fmt.Errorf("cannot unmarshal referrers response: %v", err))
+ }
+ return ociregistry.SliceIter(referrersResponse.Manifests)
+}
+
+// pager returns an iterator for a list entry point. It starts by sending the given
+// initial request and parses each response into its component items using
+// parseResponse. It tries to use the Link header in each response to continue
+// the iteration, falling back to using the "last" query parameter.
+func (c *client) pager(ctx context.Context, initialReq *ocirequest.Request, parseResponse func(*http.Response) ([]string, error)) ociregistry.Seq[string] {
+ return func(yield func(string, error) bool) {
+ // We assume that the same scope is applicable to all page requests.
+ req, err := newRequest(ctx, initialReq, nil)
+ if err != nil {
+ yield("", err)
+ return
+ }
+ for {
+ resp, err := c.do(req)
+ if err != nil {
+ yield("", err)
+ return
+ }
+ items, err := parseResponse(resp)
+ resp.Body.Close()
+ if err != nil {
+ yield("", err)
+ return
+ }
+ // TODO sanity check that items are in lexical order?
+ for _, item := range items {
+ if !yield(item, nil) {
+ return
+ }
+ }
+ if len(items) < initialReq.ListN {
+ // From the distribution spec:
+ // The response to such a request MAY return fewer than results,
+ // but only when the total number of tags attached to the repository
+ // is less than .
+ return
+ }
+ req, err = nextLink(ctx, resp, initialReq, items[len(items)-1])
+ if err != nil {
+ yield("", fmt.Errorf("invalid Link header in response: %v", err))
+ return
+ }
+ }
+ }
+}
+
+// nextLink tries to form a request that can be sent to obtain the next page
+// in a set of list results.
+// The given response holds the response received from the previous
+// list request; initialReq holds the request that initiated the listing,
+// and last holds the final item returned in the previous response.
+func nextLink(ctx context.Context, resp *http.Response, initialReq *ocirequest.Request, last string) (*http.Request, error) {
+ link0 := resp.Header.Get("Link")
+ if link0 == "" {
+ // This is beyond the first page and there was no Link
+ // in the previous response (the standard doesn't mandate
+ // one), so add a "last" parameter to the initial request.
+ rreq := *initialReq
+ rreq.ListLast = last
+ req, err := newRequest(ctx, &rreq, nil)
+ if err != nil {
+ // Given that we could form the initial request, this should
+ // never happen.
+ return nil, fmt.Errorf("cannot form next request: %v", err)
+ }
+ return req, nil
+ }
+ // Parse the link header according to RFC 5988.
+ // TODO perhaps we shouldn't ignore the relation type?
+ link, ok := strings.CutPrefix(link0, "<")
+ if !ok {
+ return nil, fmt.Errorf("no initial < character in Link=%q", link0)
+ }
+ link, _, ok = strings.Cut(link, ">")
+ if !ok {
+ return nil, fmt.Errorf("no > character in Link=%q", link0)
+ }
+ // Parse it with respect to the originating request, as it's probably relative.
+ linkURL, err := resp.Request.URL.Parse(link)
+ if err != nil {
+ return nil, fmt.Errorf("invalid URL in Link=%q", link0)
+ }
+ return http.NewRequestWithContext(ctx, "GET", linkURL.String(), nil)
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/reader.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/reader.go
new file mode 100644
index 00000000..068252e0
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/reader.go
@@ -0,0 +1,186 @@
+// Copyright 2023 CUE Labs AG
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ociclient
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+
+ "cuelabs.dev/go/oci/ociregistry"
+ "cuelabs.dev/go/oci/ociregistry/internal/ocirequest"
+ "github.com/opencontainers/go-digest"
+)
+
+func (c *client) GetBlob(ctx context.Context, repo string, digest ociregistry.Digest) (ociregistry.BlobReader, error) {
+ return c.read(ctx, &ocirequest.Request{
+ Kind: ocirequest.ReqBlobGet,
+ Repo: repo,
+ Digest: string(digest),
+ })
+}
+
+func (c *client) GetBlobRange(ctx context.Context, repo string, digest ociregistry.Digest, o0, o1 int64) (_ ociregistry.BlobReader, _err error) {
+ if o0 == 0 && o1 < 0 {
+ return c.GetBlob(ctx, repo, digest)
+ }
+ rreq := &ocirequest.Request{
+ Kind: ocirequest.ReqBlobGet,
+ Repo: repo,
+ Digest: string(digest),
+ }
+ req, err := newRequest(ctx, rreq, nil)
+ if o1 < 0 {
+ req.Header.Set("Range", fmt.Sprintf("bytes=%d-", o0))
+ } else {
+ req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", o0, o1-1))
+ }
+ resp, err := c.do(req, http.StatusOK, http.StatusPartialContent)
+ if err != nil {
+ return nil, err
+ }
+ // TODO this is wrong when the server returns a 200 response.
+ // Fix that either by returning ErrUnsupported or by reading the whole
+ // blob and returning only the required portion.
+ defer closeOnError(&_err, resp.Body)
+ desc, err := descriptorFromResponse(resp, ociregistry.Digest(rreq.Digest), true)
+ if err != nil {
+ return nil, fmt.Errorf("invalid descriptor in response: %v", err)
+ }
+ return newBlobReaderUnverified(resp.Body, desc), nil
+}
+
+func (c *client) ResolveBlob(ctx context.Context, repo string, digest ociregistry.Digest) (ociregistry.Descriptor, error) {
+ return c.resolve(ctx, &ocirequest.Request{
+ Kind: ocirequest.ReqBlobHead,
+ Repo: repo,
+ Digest: string(digest),
+ })
+}
+
+func (c *client) ResolveManifest(ctx context.Context, repo string, digest ociregistry.Digest) (ociregistry.Descriptor, error) {
+ return c.resolve(ctx, &ocirequest.Request{
+ Kind: ocirequest.ReqManifestHead,
+ Repo: repo,
+ Digest: string(digest),
+ })
+}
+
+func (c *client) ResolveTag(ctx context.Context, repo string, tag string) (ociregistry.Descriptor, error) {
+ return c.resolve(ctx, &ocirequest.Request{
+ Kind: ocirequest.ReqManifestHead,
+ Repo: repo,
+ Tag: tag,
+ })
+}
+
+func (c *client) resolve(ctx context.Context, rreq *ocirequest.Request) (ociregistry.Descriptor, error) {
+ resp, err := c.doRequest(ctx, rreq)
+ if err != nil {
+ return ociregistry.Descriptor{}, err
+ }
+ resp.Body.Close()
+ desc, err := descriptorFromResponse(resp, "", true)
+ if err != nil {
+ return ociregistry.Descriptor{}, fmt.Errorf("invalid descriptor in response: %v", err)
+ }
+ return desc, nil
+}
+
+func (c *client) GetManifest(ctx context.Context, repo string, digest ociregistry.Digest) (ociregistry.BlobReader, error) {
+ return c.read(ctx, &ocirequest.Request{
+ Kind: ocirequest.ReqManifestGet,
+ Repo: repo,
+ Digest: string(digest),
+ })
+}
+
+func (c *client) GetTag(ctx context.Context, repo string, tagName string) (ociregistry.BlobReader, error) {
+ return c.read(ctx, &ocirequest.Request{
+ Kind: ocirequest.ReqManifestGet,
+ Repo: repo,
+ Tag: tagName,
+ })
+}
+
+// inMemThreshold holds the maximum number of bytes of manifest content
+// that we'll hold in memory to obtain a digest before falling back do
+// doing a HEAD request.
+//
+// This is hopefully large enough to be considerably larger than most
+// manifests but small enough to fit comfortably into RAM on most
+// platforms.
+//
+// Note: this is only used when talking to registries that fail to return
+// a digest when doing a GET on a tag.
+const inMemThreshold = 128 * 1024
+
+func (c *client) read(ctx context.Context, rreq *ocirequest.Request) (_ ociregistry.BlobReader, _err error) {
+ resp, err := c.doRequest(ctx, rreq)
+ if err != nil {
+ return nil, err
+ }
+ defer closeOnError(&_err, resp.Body)
+ desc, err := descriptorFromResponse(resp, ociregistry.Digest(rreq.Digest), true)
+ if err != nil {
+ return nil, fmt.Errorf("invalid descriptor in response: %v", err)
+ }
+ if desc.Digest == "" {
+ // Returning a digest isn't mandatory according to the spec, and
+ // at least one registry (AWS's ECR) fails to return a digest
+ // when doing a GET of a tag.
+ // We know the request must be a tag-getting
+ // request because all other requests take a digest not a tag
+ // but sanity check anyway.
+ if rreq.Kind != ocirequest.ReqManifestGet {
+ return nil, fmt.Errorf("internal error: no digest available for non-tag request")
+ }
+
+ // If the manifest is of a reasonable size, just read it into memory
+ // and calculate the digest that way, otherwise issue a HEAD
+ // request which should hopefully (and does in the ECR case)
+ // give us the digest we need.
+ if desc.Size <= inMemThreshold {
+ data, err := io.ReadAll(io.LimitReader(resp.Body, desc.Size+1))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read body to determine digest: %v", err)
+ }
+ if int64(len(data)) != desc.Size {
+ return nil, fmt.Errorf("body size mismatch")
+ }
+ desc.Digest = digest.FromBytes(data)
+ resp.Body.Close()
+ resp.Body = io.NopCloser(bytes.NewReader(data))
+ } else {
+ rreq1 := rreq
+ rreq1.Kind = ocirequest.ReqManifestHead
+ resp1, err := c.doRequest(ctx, rreq1)
+ if err != nil {
+ return nil, err
+ }
+ resp1.Body.Close()
+ desc, err = descriptorFromResponse(resp1, "", true)
+ if err != nil {
+ return nil, err
+ }
+ if desc.Digest == "" {
+ return nil, fmt.Errorf("no digest header found in response")
+ }
+ }
+ }
+ return newBlobReader(resp.Body, desc), nil
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/writer.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/writer.go
new file mode 100644
index 00000000..dc83774e
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociclient/writer.go
@@ -0,0 +1,425 @@
+// Copyright 2023 CUE Labs AG
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ociclient
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "sync"
+
+ "github.com/opencontainers/go-digest"
+
+ "cuelabs.dev/go/oci/ociregistry"
+ "cuelabs.dev/go/oci/ociregistry/internal/ocirequest"
+ "cuelabs.dev/go/oci/ociregistry/ociauth"
+)
+
+// This file implements the ociregistry.Writer methods.
+
+func (c *client) PushManifest(ctx context.Context, repo string, tag string, contents []byte, mediaType string) (ociregistry.Descriptor, error) {
+ if mediaType == "" {
+ return ociregistry.Descriptor{}, fmt.Errorf("PushManifest called with empty mediaType")
+ }
+ desc := ociregistry.Descriptor{
+ Digest: digest.FromBytes(contents),
+ Size: int64(len(contents)),
+ MediaType: mediaType,
+ }
+
+ rreq := &ocirequest.Request{
+ Kind: ocirequest.ReqManifestPut,
+ Repo: repo,
+ Tag: tag,
+ Digest: string(desc.Digest),
+ }
+ req, err := newRequest(ctx, rreq, bytes.NewReader(contents))
+ req.Header.Set("Content-Type", mediaType)
+ req.ContentLength = desc.Size
+ resp, err := c.do(req, http.StatusCreated)
+ if err != nil {
+ return ociregistry.Descriptor{}, err
+ }
+ resp.Body.Close()
+ return desc, nil
+}
+
+func (c *client) MountBlob(ctx context.Context, fromRepo, toRepo string, dig ociregistry.Digest) (ociregistry.Descriptor, error) {
+ rreq := &ocirequest.Request{
+ Kind: ocirequest.ReqBlobMount,
+ Repo: toRepo,
+ FromRepo: fromRepo,
+ Digest: string(dig),
+ }
+ resp, err := c.doRequest(ctx, rreq, http.StatusCreated, http.StatusAccepted)
+ if err != nil {
+ return ociregistry.Descriptor{}, err
+ }
+ resp.Body.Close()
+ if resp.StatusCode == http.StatusAccepted {
+ // Mount isn't supported and technically the upload session has begun,
+ // but we aren't in a great position to be able to continue it, so let's just
+ // return Unsupported.
+ return ociregistry.Descriptor{}, fmt.Errorf("registry does not support mounts: %w", ociregistry.ErrUnsupported)
+ }
+ return descriptorFromResponse(resp, dig, false)
+}
+
+func (c *client) PushBlob(ctx context.Context, repo string, desc ociregistry.Descriptor, r io.Reader) (_ ociregistry.Descriptor, _err error) {
+ // TODO use the single-post blob-upload method (ReqBlobUploadBlob)
+ // See:
+ // https://github.com/distribution/distribution/issues/4065
+ // https://github.com/golang/go/issues/63152
+ rreq := &ocirequest.Request{
+ Kind: ocirequest.ReqBlobStartUpload,
+ Repo: repo,
+ }
+ req, err := newRequest(ctx, rreq, nil)
+ if err != nil {
+ return ociregistry.Descriptor{}, err
+ }
+ resp, err := c.do(req, http.StatusAccepted)
+ if err != nil {
+ return ociregistry.Descriptor{}, err
+ }
+ resp.Body.Close()
+ location, err := locationFromResponse(resp)
+ if err != nil {
+ return ociregistry.Descriptor{}, err
+ }
+
+ // We've got the upload location. Now PUT the content.
+
+ ctx = ociauth.ContextWithRequestInfo(ctx, ociauth.RequestInfo{
+ RequiredScope: scopeForRequest(rreq),
+ })
+ // Note: we can't use ocirequest.Request here because that's
+ // specific to the ociserver implementation in this case.
+ req, err = http.NewRequestWithContext(ctx, "PUT", "", r)
+ if err != nil {
+ return ociregistry.Descriptor{}, err
+ }
+ req.URL = urlWithDigest(location, string(desc.Digest))
+ req.ContentLength = desc.Size
+ req.Header.Set("Content-Type", "application/octet-stream")
+ // TODO: per the spec, the content-range header here is unnecessary.
+ req.Header.Set("Content-Range", ocirequest.RangeString(0, desc.Size))
+ resp, err = c.do(req, http.StatusCreated)
+ if err != nil {
+ return ociregistry.Descriptor{}, err
+ }
+ defer closeOnError(&_err, resp.Body)
+ resp.Body.Close()
+ return desc, nil
+}
+
+// TODO is this a reasonable default? We have to
+// weigh up in-memory cost vs round-trip overhead.
+// TODO: make this default configurable.
+const defaultChunkSize = 64 * 1024
+
+func (c *client) PushBlobChunked(ctx context.Context, repo string, chunkSize int) (ociregistry.BlobWriter, error) {
+ if chunkSize <= 0 {
+ chunkSize = defaultChunkSize
+ }
+ resp, err := c.doRequest(ctx, &ocirequest.Request{
+ Kind: ocirequest.ReqBlobStartUpload,
+ Repo: repo,
+ }, http.StatusAccepted)
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+ location, err := locationFromResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+ ctx = ociauth.ContextWithRequestInfo(ctx, ociauth.RequestInfo{
+ RequiredScope: ociauth.NewScope(ociauth.ResourceScope{
+ ResourceType: "repository",
+ Resource: repo,
+ Action: "push",
+ }),
+ })
+ return &blobWriter{
+ ctx: ctx,
+ client: c,
+ chunkSize: chunkSizeFromResponse(resp, chunkSize),
+ chunk: make([]byte, 0, chunkSize),
+ location: location,
+ }, nil
+}
+
+func (c *client) PushBlobChunkedResume(ctx context.Context, repo string, id string, offset int64, chunkSize int) (ociregistry.BlobWriter, error) {
+ if id == "" {
+ return nil, fmt.Errorf("id must be non-empty to resume a chunked upload")
+ }
+ if chunkSize <= 0 {
+ chunkSize = defaultChunkSize
+ }
+ var location *url.URL
+ switch {
+ case offset == -1:
+ // Try to find what offset we're meant to be writing at
+ // by doing a GET to the location.
+ // TODO does resuming an upload require push or pull scope or both?
+ ctx := ociauth.ContextWithRequestInfo(ctx, ociauth.RequestInfo{
+ RequiredScope: ociauth.NewScope(ociauth.ResourceScope{
+ ResourceType: "repository",
+ Resource: repo,
+ Action: "push",
+ }, ociauth.ResourceScope{
+ ResourceType: "repository",
+ Resource: repo,
+ Action: "pull",
+ }),
+ })
+ req, err := http.NewRequestWithContext(ctx, "GET", id, nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := c.do(req, http.StatusNoContent)
+ if err != nil {
+ return nil, fmt.Errorf("cannot recover chunk offset: %v", err)
+ }
+ location, err = locationFromResponse(resp)
+ if err != nil {
+ return nil, fmt.Errorf("cannot get location from response: %v", err)
+ }
+ rangeStr := resp.Header.Get("Range")
+ p0, p1, ok := ocirequest.ParseRange(rangeStr)
+ if !ok {
+ return nil, fmt.Errorf("invalid range %q in response", rangeStr)
+ }
+ if p0 != 0 {
+ return nil, fmt.Errorf("range %q does not start with 0", rangeStr)
+ }
+ chunkSize = chunkSizeFromResponse(resp, chunkSize)
+ offset = p1
+ case offset < 0:
+ return nil, fmt.Errorf("invalid offset; must be -1 or non-negative")
+ default:
+ var err error
+ location, err = url.Parse(id) // Note that this mirrors [BlobWriter.ID].
+ if err != nil {
+ return nil, fmt.Errorf("provided ID is not a valid location URL")
+ }
+ }
+ ctx = ociauth.ContextWithRequestInfo(ctx, ociauth.RequestInfo{
+ RequiredScope: ociauth.NewScope(ociauth.ResourceScope{
+ ResourceType: "repository",
+ Resource: repo,
+ Action: "push",
+ }),
+ })
+ return &blobWriter{
+ ctx: ctx,
+ client: c,
+ chunkSize: chunkSize,
+ size: offset,
+ flushed: offset,
+ location: location,
+ }, nil
+}
+
+type blobWriter struct {
+ client *client
+ chunkSize int
+ ctx context.Context
+
+ // mu guards the fields below it.
+ mu sync.Mutex
+ closed bool
+ chunk []byte
+ closeErr error
+
+ // size holds the size of the entire upload as seen from the
+ // client perspective. Each call to Write increases this immediately.
+ size int64
+
+ // flushed holds the size of the upload as flushed to the server.
+ // Each successfully flushed chunk increases this.
+ flushed int64
+ location *url.URL
+}
+
+type doResult struct {
+ resp *http.Response
+ err error
+}
+
+func (w *blobWriter) Write(buf []byte) (int, error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ // We use > rather than >= here so that using a chunk size of 100
+ // and writing 100 bytes does not actually flush, which would result in a PATCH
+ // then followed by an empty-bodied PUT with the call to Commit.
+ // Instead, we want the writes to not flush at all, and Commit to PUT the entire chunk.
+ if len(w.chunk)+len(buf) > w.chunkSize {
+ if err := w.flush(buf, ""); err != nil {
+ return 0, err
+ }
+ } else {
+ if w.chunk == nil {
+ w.chunk = make([]byte, 0, w.chunkSize)
+ }
+ w.chunk = append(w.chunk, buf...)
+ }
+ w.size += int64(len(buf))
+ return len(buf), nil
+}
+
+// flush flushes any outstanding upload data to the server.
+// If commitDigest is non-empty, this is the final segment of data in the blob:
+// the blob is being committed and the digest should hold the digest of the entire blob content.
+func (w *blobWriter) flush(buf []byte, commitDigest ociregistry.Digest) error {
+ if commitDigest == "" && len(buf)+len(w.chunk) == 0 {
+ return nil
+ }
+ // Start a new PATCH request to send the currently outstanding data.
+ method := "PATCH"
+ expect := http.StatusAccepted
+ reqURL := w.location
+ if commitDigest != "" {
+ // This is the final piece of data, so send it as the final PUT request
+ // (committing the whole blob) which avoids an extra round trip.
+ method = "PUT"
+ expect = http.StatusCreated
+ reqURL = urlWithDigest(reqURL, string(commitDigest))
+ }
+ req, err := http.NewRequestWithContext(w.ctx, method, "", concatBody(w.chunk, buf))
+ if err != nil {
+ return fmt.Errorf("cannot make PATCH request: %v", err)
+ }
+ req.URL = reqURL
+ req.ContentLength = int64(len(w.chunk) + len(buf))
+ // TODO: per the spec, the content-range header here is unnecessary
+ // if we are doing a final PUT without a body.
+ req.Header.Set("Content-Range", ocirequest.RangeString(w.flushed, w.flushed+req.ContentLength))
+ resp, err := w.client.do(req, expect)
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ location, err := locationFromResponse(resp)
+ if err != nil {
+ return fmt.Errorf("bad Location in response: %v", err)
+ }
+ // TODO is there something we could be doing with the Range header in the response?
+ w.location = location
+ w.flushed += req.ContentLength
+ w.chunk = w.chunk[:0]
+ return nil
+}
+
+func concatBody(b1, b2 []byte) io.Reader {
+ if len(b1)+len(b2) == 0 {
+ return nil // note that net/http treats a nil request body differently
+ }
+ if len(b1) == 0 {
+ return bytes.NewReader(b2)
+ }
+ if len(b2) == 0 {
+ return bytes.NewReader(b1)
+ }
+ return io.MultiReader(
+ bytes.NewReader(b1),
+ bytes.NewReader(b2),
+ )
+}
+
+func (w *blobWriter) Close() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if w.closed {
+ return w.closeErr
+ }
+ err := w.flush(nil, "")
+ w.closed = true
+ w.closeErr = err
+ return err
+}
+
+func (w *blobWriter) Size() int64 {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ return w.size
+}
+
+func (w *blobWriter) ChunkSize() int {
+ return w.chunkSize
+}
+
+func (w *blobWriter) ID() string {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ return w.location.String()
+}
+
+func (w *blobWriter) Commit(digest ociregistry.Digest) (ociregistry.Descriptor, error) {
+ if digest == "" {
+ return ociregistry.Descriptor{}, fmt.Errorf("cannot commit with an empty digest")
+ }
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if err := w.flush(nil, digest); err != nil {
+ return ociregistry.Descriptor{}, fmt.Errorf("cannot flush data before commit: %v", err)
+ }
+ return ociregistry.Descriptor{
+ MediaType: "application/octet-stream",
+ Size: w.size,
+ Digest: digest,
+ }, nil
+}
+
+func (w *blobWriter) Cancel() error {
+ return nil
+}
+
+// urlWithDigest returns u with the digest query parameter set, taking care not
+// to disrupt the initial URL (thus avoiding the charge of "manually
+// assembing the location; see [here].
+//
+// [here]: https://github.com/opencontainers/distribution-spec/blob/main/spec.md#post-then-put
+func urlWithDigest(u0 *url.URL, digest string) *url.URL {
+ u := *u0
+ digest = url.QueryEscape(digest)
+ switch {
+ case u.ForceQuery:
+ // The URL already ended in a "?" with no actual query parameters.
+ u.RawQuery = "digest=" + digest
+ u.ForceQuery = false
+ case u.RawQuery != "":
+ // There's already a query parameter present.
+ u.RawQuery += "&digest=" + digest
+ default:
+ u.RawQuery = "digest=" + digest
+ }
+ return &u
+}
+
+// See https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pushing-a-blob-in-chunks
+func chunkSizeFromResponse(resp *http.Response, chunkSize int) int {
+ minChunkSize, err := strconv.Atoi(resp.Header.Get("OCI-Chunk-Min-Length"))
+ if err == nil && minChunkSize > chunkSize {
+ return minChunkSize
+ }
+ return chunkSize
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/ociref/reference.go b/vendor/cuelabs.dev/go/oci/ociregistry/ociref/reference.go
new file mode 100644
index 00000000..650b041a
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/ociref/reference.go
@@ -0,0 +1,233 @@
+// Copyright 2023 CUE Labs AG
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ociref supports parsing cross-registry OCI registry references.
+package ociref
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "cuelabs.dev/go/oci/ociregistry"
+)
+
+// The following regular expressions derived from code in the
+// [github.com/distribution/distribution/v3/reference] package.
+const (
+ // alphanumeric defines the alphanumeric atom, typically a
+ // component of names. This only allows lower case characters and digits.
+ alphanumeric = `[a-z0-9]+`
+
+ // separator defines the separators allowed to be embedded in name
+ // components. This allows one period, one or two underscore and multiple
+ // dashes. Repeated dashes and underscores are intentionally treated
+ // differently. In order to support valid hostnames as name components,
+ // supporting repeated dash was added. Additionally double underscore is
+ // now allowed as a separator to loosen the restriction for previously
+ // supported names.
+ // TODO the distribution spec doesn't allow these variations.
+ separator = `(?:[._]|__|[-]+)`
+
+ // domainNameComponent restricts the registry domain component of a
+ // repository name to start with a component as defined by DomainRegexp.
+ domainNameComponent = `(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?)`
+
+ // ipv6address are enclosed between square brackets and may be represented
+ // in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format
+ // are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as
+ // IPv4-Mapped are deliberately excluded.
+ ipv6address = `(?:\[[a-fA-F0-9:]+\])`
+
+ // optionalPort matches an optional port-number including the port separator
+ // (e.g. ":80").
+ port = `[0-9]+`
+
+ // domainName defines the structure of potential domain components
+ // that may be part of image names. This is purposely a subset of what is
+ // allowed by DNS to ensure backwards compatibility with Docker image
+ // names. This includes IPv4 addresses on decimal format.
+ //
+ // Note: we purposely exclude domain names without dots here,
+ // because otherwise we can't tell if the first component is
+ // a host name or not when it doesn't have a port.
+ // When it does have a port, the distinction is clear.
+ //
+ domainName = `(?:` + domainNameComponent + `(?:\.` + domainNameComponent + `)+` + `)`
+
+ // host defines the structure of potential domains based on the URI
+ // Host subcomponent on rfc3986. It may be a subset of DNS domain name,
+ // or an IPv4 address in decimal format, or an IPv6 address between square
+ // brackets (excluding zone identifiers as defined by rfc6874 or special
+ // addresses such as IPv4-Mapped).
+ host = `(?:` + domainName + `|` + ipv6address + `)`
+
+ // allowed by the URI Host subcomponent on rfc3986 to ensure backwards
+ // compatibility with Docker image names.
+ // Note: that we require the port when the host name looks like a regular
+ // name component.
+ domainAndPort = `(?:` + host + `(?:` + `:` + port + `)?` + `|` + domainNameComponent + `:` + port + `)`
+
+ // pathComponent restricts path-components to start with an alphanumeric
+ // character, with following parts able to be separated by a separator
+ // (one period, one or two underscore and multiple dashes).
+ pathComponent = `(?:` + alphanumeric + `(?:` + separator + alphanumeric + `)*` + `)`
+
+ // repoName matches the name of a repository. It consists of one
+ // or more forward slash (/) delimited path-components:
+ //
+ // pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu"
+ repoName = pathComponent + `(?:` + `/` + pathComponent + `)*`
+)
+
+var referencePat = regexp.MustCompile(
+ `^(?:` +
+ `(?:` + `(` + domainAndPort + `)` + `/` + `)?` + // capture 1: host
+ `(` + repoName + `)` + // capture 2: repository name
+ `(?:` + `:([^@]+))?` + // capture 3: tag; rely on Go logic to test validity.
+ `(?:` + `@(.+))?` + // capture 4: digest; rely on go-digest to find issues
+ `)$`,
+)
+
+var hostPat = regexp.MustCompile(`^(?:` + domainAndPort + `)$`)
+var repoPat = regexp.MustCompile(`^(?:` + repoName + `)$`)
+
+// Reference represents an entry in an OCI repository.
+type Reference struct {
+ // Host holds the host name of the registry
+ // within which the repository is stored, optionally in
+ // the form host:port. This might be empty.
+ Host string
+
+ // Repository holds the repository name.
+ Repository string
+
+ // Tag holds the TAG part of a :TAG or :TAG@DIGEST reference.
+ // When Digest is set as well as Tag, the tag will be verified
+ // to exist and have the expected digest.
+ Tag string
+
+ // Digest holds the DIGEST part of an @DIGEST reference
+ // or of a :TAG@DIGEST reference.
+ Digest ociregistry.Digest
+}
+
+// IsValidHost reports whether s is a valid host (or host:port) part of a reference string.
+func IsValidHost(s string) bool {
+ return hostPat.MatchString(s)
+}
+
+// IsValidHost reports whether s is a valid repository part
+// of a reference string.
+func IsValidRepository(s string) bool {
+ return repoPat.MatchString(s)
+}
+
+// IsValidTag reports whether s is a valid reference tag.
+func IsValidTag(s string) bool {
+ return checkTag(s) == nil
+}
+
+// Parse parses a reference string that must include
+// a host name (or host:port pair) component.
+//
+// It is represented in string form as HOST[:PORT]/NAME[:TAG|@DIGEST]
+// form: the same syntax accepted by "docker pull".
+// Unlike "docker pull" however, there is no default registry: when
+// presented with a bare repository name, Parse will return an error.
+func Parse(refStr string) (Reference, error) {
+ ref, err := ParseRelative(refStr)
+ if err != nil {
+ return Reference{}, err
+ }
+ if ref.Host == "" {
+ return Reference{}, fmt.Errorf("reference does not contain host name")
+ }
+ return ref, nil
+}
+
+// ParseRelative parses a reference string that may
+// or may not include a host name component.
+//
+// It is represented in string form as [HOST[:PORT]/]NAME[:TAG|@DIGEST]
+// form: the same syntax accepted by "docker pull".
+// Unlike "docker pull" however, there is no default registry: when
+// presented with a bare repository name, the Host field will be empty.
+func ParseRelative(refStr string) (Reference, error) {
+ m := referencePat.FindStringSubmatch(refStr)
+ if m == nil {
+ return Reference{}, fmt.Errorf("invalid reference syntax (%q)", refStr)
+ }
+ var ref Reference
+ ref.Host, ref.Repository, ref.Tag, ref.Digest = m[1], m[2], m[3], ociregistry.Digest(m[4])
+ // Check lengths and digest: we don't check these as part of the regexp
+ // because it's more efficient to do it in Go and we get
+ // nicer error messages as a result.
+ if len(ref.Digest) > 0 {
+ if err := ref.Digest.Validate(); err != nil {
+ return Reference{}, fmt.Errorf("invalid digest %q: %v", ref.Digest, err)
+ }
+ }
+ if len(ref.Tag) > 0 {
+ if err := checkTag(ref.Tag); err != nil {
+ return Reference{}, err
+ }
+ }
+ if len(ref.Repository) > 255 {
+ return Reference{}, fmt.Errorf("repository name too long")
+ }
+ return ref, nil
+}
+
+func checkTag(s string) error {
+ if len(s) > 128 {
+ return fmt.Errorf("tag too long")
+ }
+ if !isWord(s[0]) {
+ return fmt.Errorf("tag %q does not start with word character", s)
+ }
+ for i := 1; i < len(s); i++ {
+ c := s[i]
+ if !isWord(c) && c != '.' && c != '-' {
+ return fmt.Errorf("tag %q contains invalid invalid character %q", s, c)
+ }
+ }
+ return nil
+}
+
+func isWord(c byte) bool {
+ return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9')
+}
+
+// String returns the string form of a reference in the form
+//
+// [HOST/]NAME[:TAG|@DIGEST]
+func (ref Reference) String() string {
+ var buf strings.Builder
+ buf.Grow(len(ref.Host) + 1 + len(ref.Repository) + 1 + len(ref.Tag) + 1 + len(ref.Digest))
+ if ref.Host != "" {
+ buf.WriteString(ref.Host)
+ buf.WriteByte('/')
+ }
+ buf.WriteString(ref.Repository)
+ if len(ref.Tag) > 0 {
+ buf.WriteByte(':')
+ buf.WriteString(ref.Tag)
+ }
+ if len(ref.Digest) > 0 {
+ buf.WriteByte('@')
+ buf.WriteString(string(ref.Digest))
+ }
+ return buf.String()
+}
diff --git a/vendor/cuelabs.dev/go/oci/ociregistry/valid.go b/vendor/cuelabs.dev/go/oci/ociregistry/valid.go
new file mode 100644
index 00000000..7486fa19
--- /dev/null
+++ b/vendor/cuelabs.dev/go/oci/ociregistry/valid.go
@@ -0,0 +1,30 @@
+package ociregistry
+
+import (
+ "regexp"
+
+ "github.com/opencontainers/go-digest"
+)
+
+var (
+ tagPattern = regexp.MustCompile(`^[a-zA-Z0-9_][a-zA-Z0-9._-]{0,127}$`)
+ repoNamePattern = regexp.MustCompile(`^[a-z0-9]+([._-][a-z0-9]+)*(/[a-z0-9]+([._-][a-z0-9]+)*)*$`)
+)
+
+// IsValidRepoName reports whether the given repository
+// name is valid according to the specification.
+func IsValidRepoName(repoName string) bool {
+ return repoNamePattern.MatchString(repoName)
+}
+
+// IsValidTag reports whether the digest d is valid
+// according to the specification.
+func IsValidTag(tag string) bool {
+ return tagPattern.MatchString(tag)
+}
+
+// IsValidDigest reports whether the digest d is well formed.
+func IsValidDigest(d string) bool {
+ _, err := digest.Parse(d)
+ return err == nil
+}
diff --git a/vendor/cuelang.org/go/cue/ast/ast.go b/vendor/cuelang.org/go/cue/ast/ast.go
index 0f1e2afa..5b9a3b54 100644
--- a/vendor/cuelang.org/go/cue/ast/ast.go
+++ b/vendor/cuelang.org/go/cue/ast/ast.go
@@ -48,10 +48,10 @@ type Node interface {
// the node or nil if there is no such position.
pos() *token.Pos
- // Deprecated: use ast.Comments
+ // Deprecated: use [Comments]
Comments() []*CommentGroup
- // Deprecated: use ast.AddComment
+ // Deprecated: use [AddComment]
AddComment(*CommentGroup)
commentInfo() *comments
}
diff --git a/vendor/cuelang.org/go/cue/errors/errors.go b/vendor/cuelang.org/go/cue/errors/errors.go
index 8fb87039..f09b9f6f 100644
--- a/vendor/cuelang.org/go/cue/errors/errors.go
+++ b/vendor/cuelang.org/go/cue/errors/errors.go
@@ -20,15 +20,15 @@
package errors // import "cuelang.org/go/cue/errors"
import (
+ "cmp"
"errors"
"fmt"
"io"
"path/filepath"
+ "slices"
"sort"
"strings"
- "github.com/mpvl/unique"
-
"cuelang.org/go/cue/token"
)
@@ -133,12 +133,11 @@ func Positions(err error) []token.Pos {
a := make([]token.Pos, 0, 3)
- sortOffset := 0
pos := e.Position()
if pos.IsValid() {
a = append(a, pos)
- sortOffset = 1
}
+ sortOffset := len(a)
for _, p := range e.InputPositions() {
if p.IsValid() && p != pos {
@@ -146,19 +145,10 @@ func Positions(err error) []token.Pos {
}
}
- byPos := byPos(a[sortOffset:])
- sort.Sort(byPos)
- k := unique.ToFront(byPos)
- return a[:k+sortOffset]
+ slices.SortFunc(a[sortOffset:], comparePos)
+ return slices.Compact(a)
}
-type byPos []token.Pos
-
-func (s *byPos) Truncate(n int) { (*s) = (*s)[:n] }
-func (s byPos) Len() int { return len(s) }
-func (s byPos) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s byPos) Less(i, j int) bool { return comparePos(s[i], s[j]) == -1 }
-
// Path returns the path of an Error if err is of that type.
func Path(err error) []string {
if e := Error(nil); errors.As(err, &e) {
@@ -274,13 +264,12 @@ var _ Error = &posError{}
// the offending token, and the error condition is described
// by Msg.
type posError struct {
- pos token.Pos
- inputs []token.Pos
+ pos token.Pos
Message
}
func (e *posError) Path() []string { return nil }
-func (e *posError) InputPositions() []token.Pos { return e.inputs }
+func (e *posError) InputPositions() []token.Pos { return nil }
func (e *posError) Position() token.Pos { return e.pos }
// Append combines two errors, flattening Lists as necessary.
@@ -385,22 +374,15 @@ func (p list) Less(i, j int) bool {
return p[i].Error() < p[j].Error()
}
-func lessOrMore(isLess bool) int {
- if isLess {
- return -1
- }
- return 1
-}
-
func comparePos(a, b token.Pos) int {
if a.Filename() != b.Filename() {
- return lessOrMore(a.Filename() < b.Filename())
+ return cmp.Compare(a.Filename(), b.Filename())
}
if a.Line() != b.Line() {
- return lessOrMore(a.Line() < b.Line())
+ return cmp.Compare(a.Line(), b.Line())
}
if a.Column() != b.Column() {
- return lessOrMore(a.Column() < b.Column())
+ return cmp.Compare(a.Column(), b.Column())
}
return 0
}
diff --git a/vendor/cuelang.org/go/cue/instance.go b/vendor/cuelang.org/go/cue/instance.go
index ee1f74fa..09ebe96b 100644
--- a/vendor/cuelang.org/go/cue/instance.go
+++ b/vendor/cuelang.org/go/cue/instance.go
@@ -289,10 +289,6 @@ func (inst *hiddenInstance) Build(p *build.Instance) *Instance {
return i
}
-func (inst *Instance) value() Value {
- return newVertexRoot(inst.index, newContext(inst.index), inst.root)
-}
-
// Lookup reports the value at a path starting from the top level struct. The
// Exists method of the returned value will report false if the path did not
// exist. The Err method reports if any error occurred during evaluation. The
@@ -301,7 +297,7 @@ func (inst *Instance) value() Value {
//
// Deprecated: use Value.LookupPath
func (inst *hiddenInstance) Lookup(path ...string) Value {
- return inst.value().Lookup(path...)
+ return inst.Value().Lookup(path...)
}
// LookupDef reports the definition with the given name within struct v. The
@@ -310,7 +306,7 @@ func (inst *hiddenInstance) Lookup(path ...string) Value {
//
// Deprecated: use Value.LookupPath
func (inst *hiddenInstance) LookupDef(path string) Value {
- return inst.value().LookupDef(path)
+ return inst.Value().LookupDef(path)
}
// LookupField reports a Field at a path starting from v, or an error if the
@@ -323,7 +319,7 @@ func (inst *hiddenInstance) LookupDef(path string) Value {
//
// Deprecated: use Value.LookupPath
func (inst *hiddenInstance) LookupField(path ...string) (f FieldInfo, err error) {
- v := inst.value()
+ v := inst.Value()
for _, k := range path {
s, err := v.Struct()
if err != nil {
diff --git a/vendor/cuelang.org/go/cue/load/config.go b/vendor/cuelang.org/go/cue/load/config.go
index 8d131dcf..073ae5c4 100644
--- a/vendor/cuelang.org/go/cue/load/config.go
+++ b/vendor/cuelang.org/go/cue/load/config.go
@@ -15,25 +15,27 @@
package load
import (
+ "context"
"io"
"os"
"path/filepath"
"strings"
- "cuelabs.dev/go/oci/ociregistry"
"cuelang.org/go/cue/ast"
"cuelang.org/go/cue/build"
"cuelang.org/go/cue/errors"
"cuelang.org/go/cue/token"
"cuelang.org/go/internal"
- "cuelang.org/go/internal/mod/modfile"
+ "cuelang.org/go/internal/cueexperiment"
+ "cuelang.org/go/mod/modconfig"
+ "cuelang.org/go/mod/modfile"
+ "cuelang.org/go/mod/module"
)
const (
cueSuffix = ".cue"
modDir = "cue.mod"
moduleFile = "module.cue"
- pkgDir = "pkg"
)
// FromArgsUsage is a partial usage message that applications calling
@@ -277,11 +279,20 @@ type Config struct {
// Registry is used to fetch CUE module dependencies.
//
- // When nil, dependencies will be resolved in legacy mode:
- // reading from cue.mod/pkg, cue.mod/usr, and cue.mod/gen.
+ // When nil, if the modules experiment is enabled
+ // (CUE_EXPERIMENT=modules), [modconfig.NewRegistry]
+ // will be used to create a registry instance using the
+ // usual cmd/cue conventions for environment variables
+ // (but see the Env field below).
//
- // THIS IS EXPERIMENTAL FOR NOW. DO NOT USE.
- Registry ociregistry.Interface
+ // THIS IS EXPERIMENTAL. API MIGHT CHANGE.
+ Registry modconfig.Registry
+
+ // Env provides environment variables for use in the configuration.
+ // Currently this is only used in the construction of the Registry
+ // value (see above). If this is nil, the current process's environment
+ // will be used.
+ Env []string
fileSystem fileSystem
}
@@ -293,10 +304,6 @@ func (c *Config) stdin() io.Reader {
return c.Stdin
}
-func toImportPath(dir string) importPath {
- return importPath(filepath.ToSlash(dir))
-}
-
type importPath string
type fsPath string
@@ -352,7 +359,7 @@ func (c Config) complete() (cfg *Config, err error) {
// TODO: we could populate this already with absolute file paths,
// but relative paths cannot be added. Consider what is reasonable.
- if err := c.fileSystem.init(&c); err != nil {
+ if err := c.fileSystem.init(c.Dir, c.Overlay); err != nil {
return nil, err
}
@@ -368,12 +375,72 @@ func (c Config) complete() (cfg *Config, err error) {
} else if !filepath.IsAbs(c.ModuleRoot) {
c.ModuleRoot = filepath.Join(c.Dir, c.ModuleRoot)
}
+ // Note: if cueexperiment.Flags.Modules _isn't_ set but c.Registry
+ // is, we consider that a good enough hint that modules support
+ // should be enabled and hence don't return an error in that case.
+ if cueexperiment.Flags.Modules && c.Registry == nil {
+ registry, err := modconfig.NewRegistry(&modconfig.Config{
+ Env: c.Env,
+ })
+ if err != nil {
+ // If there's an error in the registry configuration,
+ // don't error immediately, but only when we actually
+ // need to resolve modules.
+ registry = errorRegistry{err}
+ }
+ c.Registry = registry
+ }
if err := c.loadModule(); err != nil {
return nil, err
}
return &c, nil
}
+// loadModule loads the module file, resolves and downloads module
+// dependencies. It sets c.Module if it's empty or checks it for
+// consistency with the module file otherwise.
+func (c *Config) loadModule() error {
+ // TODO: also make this work if run from outside the module?
+ mod := filepath.Join(c.ModuleRoot, modDir)
+ info, cerr := c.fileSystem.stat(mod)
+ if cerr != nil {
+ return nil
+ }
+ // TODO remove support for legacy non-directory module.cue file
+ // by returning an error if info.IsDir is false.
+ if info.IsDir() {
+ mod = filepath.Join(mod, moduleFile)
+ }
+ f, cerr := c.fileSystem.openFile(mod)
+ if cerr != nil {
+ return nil
+ }
+ defer f.Close()
+ data, err := io.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ parseModFile := modfile.ParseNonStrict
+ if c.Registry == nil {
+ parseModFile = modfile.ParseLegacy
+ }
+ mf, err := parseModFile(data, mod)
+ if err != nil {
+ return err
+ }
+ c.modFile = mf
+ if mf.Module == "" {
+ // Backward compatibility: allow empty module.cue file.
+ // TODO maybe check that the rest of the fields are empty too?
+ return nil
+ }
+ if c.Module != "" && c.Module != mf.Module {
+ return errors.Newf(token.NoPos, "inconsistent modules: got %q, want %q", mf.Module, c.Module)
+ }
+ c.Module = mf.Module
+ return nil
+}
+
func (c Config) isRoot(dir string) bool {
fs := &c.fileSystem
// Note: cue.mod used to be a file. We still allow both to match.
@@ -384,8 +451,6 @@ func (c Config) isRoot(dir string) bool {
// findRoot returns the module root that's ancestor
// of the given absolute directory path, or "" if none was found.
func (c Config) findRoot(absDir string) string {
- fs := &c.fileSystem
-
abs := absDir
for {
if c.isRoot(abs) {
@@ -398,21 +463,7 @@ func (c Config) findRoot(absDir string) string {
return ""
}
if len(d) >= len(abs) {
- break // reached top of file system, no cue.mod
- }
- abs = d
- }
- abs = absDir
-
- // TODO(legacy): remove this capability at some point.
- for {
- info, err := fs.stat(filepath.Join(abs, pkgDir))
- if err == nil && info.IsDir() {
- return abs
- }
- d := filepath.Dir(abs)
- if len(d) >= len(abs) {
- return "" // reached top of file system, no pkg dir.
+ return "" // reached top of file system, no cue.mod
}
abs = d
}
@@ -425,3 +476,20 @@ func (c *Config) newErrInstance(err error) *build.Instance {
i.Err = errors.Promote(err, "instance")
return i
}
+
+// errorRegistry implements [modconfig.Registry] by returning err from all methods.
+type errorRegistry struct {
+ err error
+}
+
+func (r errorRegistry) Requirements(ctx context.Context, m module.Version) ([]module.Version, error) {
+ return nil, r.err
+}
+
+func (r errorRegistry) Fetch(ctx context.Context, m module.Version) (module.SourceLoc, error) {
+ return module.SourceLoc{}, r.err
+}
+
+func (r errorRegistry) ModuleVersions(ctx context.Context, mpath string) ([]string, error) {
+ return nil, r.err
+}
diff --git a/vendor/cuelang.org/go/cue/load/fs.go b/vendor/cuelang.org/go/cue/load/fs.go
index 41f38836..5ff11777 100644
--- a/vendor/cuelang.org/go/cue/load/fs.go
+++ b/vendor/cuelang.org/go/cue/load/fs.go
@@ -16,6 +16,7 @@ package load
import (
"bytes"
+ "fmt"
"io"
iofs "io/fs"
"os"
@@ -27,6 +28,7 @@ import (
"cuelang.org/go/cue/ast"
"cuelang.org/go/cue/errors"
"cuelang.org/go/cue/token"
+ "cuelang.org/go/mod/module"
)
type overlayFile struct {
@@ -37,9 +39,14 @@ type overlayFile struct {
isDir bool
}
-func (f *overlayFile) Name() string { return f.basename }
-func (f *overlayFile) Size() int64 { return int64(len(f.contents)) }
-func (f *overlayFile) Mode() os.FileMode { return 0644 }
+func (f *overlayFile) Name() string { return f.basename }
+func (f *overlayFile) Size() int64 { return int64(len(f.contents)) }
+func (f *overlayFile) Mode() iofs.FileMode {
+ if f.isDir {
+ return iofs.ModeDir | 0o555
+ }
+ return 0o444
+}
func (f *overlayFile) ModTime() time.Time { return f.modtime }
func (f *overlayFile) IsDir() bool { return f.isDir }
func (f *overlayFile) Sys() interface{} { return nil }
@@ -60,19 +67,42 @@ func (fs *fileSystem) getDir(dir string, create bool) map[string]*overlayFile {
return m
}
-func (fs *fileSystem) init(c *Config) error {
- fs.cwd = c.Dir
+// ioFS returns an implementation of [io/fs.FS] that holds
+// the contents of fs under the given filepath root.
+//
+// Note: we can't return an FS implementation that covers the
+// entirety of fs because the overlay paths may not all share
+// a common root.
+//
+// Note also: the returned FS also implements
+// [modpkgload.OSRootFS] so that we can map
+// the resulting source locations back to the filesystem
+// paths required by most of the `cue/load` package
+// implementation.
+func (fs *fileSystem) ioFS(root string) iofs.FS {
+ dir := fs.getDir(root, false)
+ if dir == nil {
+ return module.OSDirFS(root)
+ }
+ return &ioFS{
+ fs: fs,
+ root: root,
+ }
+}
- overlay := c.Overlay
+func (fs *fileSystem) init(cwd string, overlay map[string]Source) error {
+ fs.cwd = cwd
fs.overlayDirs = map[string]map[string]*overlayFile{}
// Organize overlay
for filename, src := range overlay {
+ if !filepath.IsAbs(filename) {
+ return fmt.Errorf("non-absolute file path %q in overlay", filename)
+ }
// TODO: do we need to further clean the path or check that the
// specified files are within the root/ absolute files?
dir, base := filepath.Split(filename)
m := fs.getDir(dir, true)
-
b, file, err := src.contents()
if err != nil {
return err
@@ -107,19 +137,11 @@ func (fs *fileSystem) joinPath(elem ...string) string {
return filepath.Join(elem...)
}
-func (fs *fileSystem) splitPathList(s string) []string {
- return filepath.SplitList(s)
-}
-
-func (fs *fileSystem) isAbsPath(path string) bool {
- return filepath.IsAbs(path)
-}
-
func (fs *fileSystem) makeAbs(path string) string {
- if fs.isAbsPath(path) {
+ if filepath.IsAbs(path) {
return path
}
- return filepath.Clean(filepath.Join(fs.cwd, path))
+ return filepath.Join(fs.cwd, path)
}
func (fs *fileSystem) isDir(path string) bool {
@@ -131,40 +153,6 @@ func (fs *fileSystem) isDir(path string) bool {
return err == nil && fi.IsDir()
}
-func (fs *fileSystem) hasSubdir(root, dir string) (rel string, ok bool) {
- // Try using paths we received.
- if rel, ok = hasSubdir(root, dir); ok {
- return
- }
-
- // Try expanding symlinks and comparing
- // expanded against unexpanded and
- // expanded against expanded.
- rootSym, _ := filepath.EvalSymlinks(root)
- dirSym, _ := filepath.EvalSymlinks(dir)
-
- if rel, ok = hasSubdir(rootSym, dir); ok {
- return
- }
- if rel, ok = hasSubdir(root, dirSym); ok {
- return
- }
- return hasSubdir(rootSym, dirSym)
-}
-
-func hasSubdir(root, dir string) (rel string, ok bool) {
- const sep = string(filepath.Separator)
- root = filepath.Clean(root)
- if !strings.HasSuffix(root, sep) {
- root += sep
- }
- dir = filepath.Clean(dir)
- if !strings.HasPrefix(dir, root) {
- return "", false
- }
- return filepath.ToSlash(dir[len(root):]), true
-}
-
func (fs *fileSystem) readDir(path string) ([]iofs.DirEntry, errors.Error) {
path = fs.makeAbs(path)
m := fs.getDir(path, false)
@@ -174,23 +162,24 @@ func (fs *fileSystem) readDir(path string) ([]iofs.DirEntry, errors.Error) {
return nil, errors.Wrapf(err, token.NoPos, "readDir")
}
}
- if m != nil {
- done := map[string]bool{}
- for i, fi := range items {
- done[fi.Name()] = true
- if o := m[fi.Name()]; o != nil {
- items[i] = iofs.FileInfoToDirEntry(o)
- }
+ if m == nil {
+ return items, nil
+ }
+ done := map[string]bool{}
+ for i, fi := range items {
+ done[fi.Name()] = true
+ if o := m[fi.Name()]; o != nil {
+ items[i] = iofs.FileInfoToDirEntry(o)
}
- for _, o := range m {
- if !done[o.Name()] {
- items = append(items, iofs.FileInfoToDirEntry(o))
- }
+ }
+ for _, o := range m {
+ if !done[o.Name()] {
+ items = append(items, iofs.FileInfoToDirEntry(o))
}
- sort.Slice(items, func(i, j int) bool {
- return items[i].Name() < items[j].Name()
- })
}
+ sort.Slice(items, func(i, j int) bool {
+ return items[i].Name() < items[j].Name()
+ })
return items, nil
}
@@ -202,7 +191,7 @@ func (fs *fileSystem) getOverlay(path string) *overlayFile {
return nil
}
-func (fs *fileSystem) stat(path string) (os.FileInfo, errors.Error) {
+func (fs *fileSystem) stat(path string) (iofs.FileInfo, errors.Error) {
path = fs.makeAbs(path)
if fi := fs.getOverlay(path); fi != nil {
return fi, nil
@@ -214,7 +203,7 @@ func (fs *fileSystem) stat(path string) (os.FileInfo, errors.Error) {
return fi, nil
}
-func (fs *fileSystem) lstat(path string) (os.FileInfo, errors.Error) {
+func (fs *fileSystem) lstat(path string) (iofs.FileInfo, errors.Error) {
path = fs.makeAbs(path)
if fi := fs.getOverlay(path); fi != nil {
return fi, nil
@@ -290,3 +279,120 @@ func (fs *fileSystem) walkRec(path string, entry iofs.DirEntry, f walkFunc) erro
}
return nil
}
+
+var _ interface {
+ iofs.FS
+ iofs.ReadDirFS
+ iofs.ReadFileFS
+ module.OSRootFS
+} = (*ioFS)(nil)
+
+type ioFS struct {
+ fs *fileSystem
+ root string
+}
+
+func (fs *ioFS) OSRoot() string {
+ return fs.root
+}
+
+func (fs *ioFS) Open(name string) (iofs.File, error) {
+ fpath, err := fs.absPathFromFSPath(name)
+ if err != nil {
+ return nil, err
+ }
+ r, err := fs.fs.openFile(fpath)
+ if err != nil {
+ return nil, err // TODO convert filepath in error to fs path
+ }
+ return &ioFSFile{
+ fs: fs.fs,
+ path: fpath,
+ rc: r,
+ }, nil
+}
+
+func (fs *ioFS) absPathFromFSPath(name string) (string, error) {
+ if !iofs.ValidPath(name) {
+ return "", fmt.Errorf("invalid io/fs path %q", name)
+ }
+ // Technically we should mimic Go's internal/safefilepath.fromFS
+ // functionality here, but as we're using this in a relatively limited
+ // context, we can just prohibit some characters.
+ if strings.ContainsAny(name, ":\\") {
+ return "", fmt.Errorf("invalid io/fs path %q", name)
+ }
+ return filepath.Join(fs.root, name), nil
+}
+
+// ReadDir implements [io/fs.ReadDirFS].
+func (fs *ioFS) ReadDir(name string) ([]iofs.DirEntry, error) {
+ fpath, err := fs.absPathFromFSPath(name)
+ if err != nil {
+ return nil, err
+ }
+ return fs.fs.readDir(fpath)
+}
+
+// ReadDir implements [io/fs.ReadFileFS].
+func (fs *ioFS) ReadFile(name string) ([]byte, error) {
+ fpath, err := fs.absPathFromFSPath(name)
+ if err != nil {
+ return nil, err
+ }
+ if fi := fs.fs.getOverlay(fpath); fi != nil {
+ return bytes.Clone(fi.contents), nil
+ }
+ return os.ReadFile(fpath)
+}
+
+// ioFSFile implements [io/fs.File] for the overlay filesystem.
+type ioFSFile struct {
+ fs *fileSystem
+ path string
+ rc io.ReadCloser
+ entries []iofs.DirEntry
+}
+
+var _ interface {
+ iofs.File
+ iofs.ReadDirFile
+} = (*ioFSFile)(nil)
+
+func (f *ioFSFile) Stat() (iofs.FileInfo, error) {
+ return f.fs.stat(f.path)
+}
+
+func (f *ioFSFile) Read(buf []byte) (int, error) {
+ return f.rc.Read(buf)
+}
+
+func (f *ioFSFile) Close() error {
+ return f.rc.Close()
+}
+
+func (f *ioFSFile) ReadDir(n int) ([]iofs.DirEntry, error) {
+ if f.entries == nil {
+ entries, err := f.fs.readDir(f.path)
+ if err != nil {
+ return entries, err
+ }
+ if entries == nil {
+ entries = []iofs.DirEntry{}
+ }
+ f.entries = entries
+ }
+ if n <= 0 {
+ entries := f.entries
+ f.entries = f.entries[len(f.entries):]
+ return entries, nil
+ }
+ var err error
+ if n >= len(f.entries) {
+ n = len(f.entries)
+ err = io.EOF
+ }
+ entries := f.entries[:n]
+ f.entries = f.entries[n:]
+ return entries, err
+}
diff --git a/vendor/cuelang.org/go/cue/load/import.go b/vendor/cuelang.org/go/cue/load/import.go
index 2842d48c..e074c2b6 100644
--- a/vendor/cuelang.org/go/cue/load/import.go
+++ b/vendor/cuelang.org/go/cue/load/import.go
@@ -15,7 +15,6 @@
package load
import (
- "context"
"fmt"
"os"
pathpkg "path"
@@ -28,7 +27,7 @@ import (
"cuelang.org/go/cue/errors"
"cuelang.org/go/cue/token"
"cuelang.org/go/internal/filetypes"
- "cuelang.org/go/internal/mod/module"
+ "cuelang.org/go/mod/module"
)
// importPkg returns details about the CUE package named by the import path,
@@ -97,20 +96,16 @@ func (l *loader) importPkg(pos token.Pos, p *build.Instance) []*build.Instance {
genDir := GenPath(cfg.ModuleRoot)
if strings.HasPrefix(p.Dir, genDir) {
dirs = append(dirs, [2]string{genDir, p.Dir})
- // TODO(legacy): don't support "pkg"
// && p.PkgName != "_"
- if filepath.Base(genDir) != "pkg" {
- for _, sub := range []string{"pkg", "usr"} {
- rel, err := filepath.Rel(genDir, p.Dir)
- if err != nil {
- // should not happen
- return retErr(
- errors.Wrapf(err, token.NoPos, "invalid path"))
- }
- base := filepath.Join(cfg.ModuleRoot, modDir, sub)
- dir := filepath.Join(base, rel)
- dirs = append(dirs, [2]string{base, dir})
+ for _, sub := range []string{"pkg", "usr"} {
+ rel, err := filepath.Rel(genDir, p.Dir)
+ if err != nil {
+ // should not happen
+ return retErr(errors.Wrapf(err, token.NoPos, "invalid path"))
}
+ base := filepath.Join(cfg.ModuleRoot, modDir, sub)
+ dir := filepath.Join(base, rel)
+ dirs = append(dirs, [2]string{base, dir})
}
} else {
dirs = append(dirs, [2]string{cfg.ModuleRoot, p.Dir})
@@ -165,7 +160,7 @@ func (l *loader) importPkg(pos token.Pos, p *build.Instance) []*build.Instance {
})
continue // skip unrecognized file types
}
- fp.add(pos, dir, file, importComment)
+ fp.add(dir, file, importComment)
}
if p.PkgName == "" || !inModule || l.cfg.isRoot(dir) || dir == d[0] {
@@ -239,7 +234,6 @@ func (l *loader) newRelInstance(pos token.Pos, path, pkgName string) *build.Inst
if !isLocalImport(path) {
panic(fmt.Errorf("non-relative import path %q passed to newRelInstance", path))
}
- fs := l.cfg.fileSystem
var err errors.Error
dir := path
@@ -266,7 +260,7 @@ func (l *loader) newRelInstance(pos token.Pos, path, pkgName string) *build.Inst
p.Dir = dir
- if fs.isAbsPath(path) || strings.HasPrefix(path, "/") {
+ if filepath.IsAbs(path) || strings.HasPrefix(path, "/") {
err = errors.Append(err, errors.Newf(pos,
"absolute import path %q not allowed", path))
}
@@ -299,14 +293,6 @@ func (l *loader) importPathFromAbsDir(absDir fsPath, key string) (importPath, er
"invalid package %q (root of %s)", key, modDir)
}
- // TODO(legacy): remove.
- case strings.HasPrefix(pkg, "/pkg/"):
- pkg = pkg[len("/pkg/"):]
- if pkg == "" {
- return "", errors.Newf(token.NoPos,
- "invalid package %q (root of %s)", key, pkgDir)
- }
-
case l.cfg.Module == "":
return "", errors.Newf(token.NoPos,
"cannot determine import path for %q (no module)", key)
@@ -345,28 +331,11 @@ func (l *loader) absDirFromImportPath(pos token.Pos, p importPath) (absDir, name
if l.cfg.ModuleRoot == "" {
return "", "", errors.Newf(pos, "cannot import %q (root undefined)", p)
}
-
+ origp := p
// Extract the package name.
-
- name = string(p)
- switch i := strings.LastIndexAny(name, "/:"); {
- case i < 0:
- case p[i] == ':':
- name = string(p[i+1:])
- p = p[:i]
-
- default: // p[i] == '/'
- mp, _, ok := module.SplitPathVersion(string(p))
- if ok {
- // import of the form: example.com/foo/bar@v1
- if i := strings.LastIndex(mp, "/"); i >= 0 {
- name = mp[i+1:]
- }
- } else {
- name = string(p[i+1:])
- }
- }
- // TODO: fully test that name is a valid identifier.
+ parts := module.ParseImportPath(string(p))
+ name = parts.Qualifier
+ p = importPath(parts.Unqualified().String())
if name == "" {
err = errors.Newf(pos, "empty package name in import path %q", p)
} else if strings.IndexByte(name, '.') >= 0 {
@@ -376,8 +345,43 @@ func (l *loader) absDirFromImportPath(pos token.Pos, p importPath) (absDir, name
err = errors.Newf(pos,
"implied package identifier %q from import path %q is not valid", name, p)
}
+ if l.cfg.Registry != nil {
+ if l.pkgs == nil {
+ return "", name, errors.Newf(pos, "imports are unavailable because there is no cue.mod/module.cue file")
+ }
+ // TODO predicate registry-aware lookup on module.cue-declared CUE version?
+
+ // Note: use the original form of the import path because
+ // that's the form passed to modpkgload.LoadPackages
+ // and hence it's available by that name via Pkg.
+ pkg := l.pkgs.Pkg(string(origp))
+ if pkg == nil {
+ return "", name, errors.Newf(pos, "no dependency found for package %q", p)
+ }
+ if err := pkg.Error(); err != nil {
+ return "", name, errors.Newf(pos, "cannot find package %q: %v", p, err)
+ }
+ if mv := pkg.Mod(); mv.IsLocal() {
+ // It's a local package that's present inside one or both of the gen, usr or pkg
+ // directories. Even though modpkgload tells us exactly what those directories
+ // are, the rest of the cue/load logic expects only a single directory for now,
+ // so just use that.
+ absDir = filepath.Join(GenPath(l.cfg.ModuleRoot), parts.Path)
+ } else {
+ locs := pkg.Locations()
+ if len(locs) > 1 {
+ return "", "", errors.Newf(pos, "package %q unexpectedly found in multiple locations", p)
+ }
+ var err error
+ absDir, err = absPathForSourceLoc(locs[0])
+ if err != nil {
+ return "", name, errors.Newf(pos, "cannot determine source directory for package %q: %v", p, err)
+ }
+ }
+ return absDir, name, nil
+ }
- // Determine the directory.
+ // Determine the directory without using the registry.
sub := filepath.FromSlash(string(p))
switch hasPrefix := strings.HasPrefix(string(p), l.cfg.Module); {
@@ -388,34 +392,19 @@ func (l *loader) absDirFromImportPath(pos token.Pos, p importPath) (absDir, name
absDir = filepath.Join(l.cfg.ModuleRoot, sub[len(l.cfg.Module)+1:])
default:
- // TODO predicate registry-aware lookup on module.cue-declared CUE version?
- if l.cfg.Registry != nil {
- var err error
- absDir, err = l.externalPackageDir(p)
- if err != nil {
- // TODO why can't we use %w ?
- return "", name, errors.Newf(token.NoPos, "cannot get directory for external module %q: %v", p, err)
- }
- } else {
- absDir = filepath.Join(GenPath(l.cfg.ModuleRoot), sub)
- }
+ absDir = filepath.Join(GenPath(l.cfg.ModuleRoot), sub)
}
-
return absDir, name, err
}
-func (l *loader) externalPackageDir(p importPath) (dir string, err error) {
- if l.deps == nil {
- return "", fmt.Errorf("no dependency found for import path %q (no dependencies at all)", p)
+func absPathForSourceLoc(loc module.SourceLoc) (string, error) {
+ osfs, ok := loc.FS.(module.OSRootFS)
+ if !ok {
+ return "", fmt.Errorf("cannot get absolute path for FS of type %T", loc.FS)
}
- m, subPath, err := l.deps.lookup(p)
- if err != nil {
- return "", err
- }
-
- dir, err = l.regClient.getModContents(context.TODO(), m)
- if err != nil {
- return "", fmt.Errorf("cannot get contents for %v: %v", m, err)
+ osPath := osfs.OSRoot()
+ if osPath == "" {
+ return "", fmt.Errorf("cannot get absolute path for FS of type %T", loc.FS)
}
- return filepath.Join(dir, filepath.FromSlash(subPath)), nil
+ return filepath.Join(osPath, loc.Dir), nil
}
diff --git a/vendor/cuelang.org/go/cue/load/instances.go b/vendor/cuelang.org/go/cue/load/instances.go
index b6500c9d..d8fcc778 100644
--- a/vendor/cuelang.org/go/cue/load/instances.go
+++ b/vendor/cuelang.org/go/cue/load/instances.go
@@ -20,12 +20,17 @@ package load
// - go/build
import (
+ "context"
"fmt"
- "os"
"cuelang.org/go/cue/ast"
"cuelang.org/go/cue/build"
+ "cuelang.org/go/internal/cueexperiment"
"cuelang.org/go/internal/filetypes"
+ "cuelang.org/go/internal/mod/modimports"
+ "cuelang.org/go/internal/mod/modpkgload"
+ "cuelang.org/go/internal/mod/modrequirements"
+ "cuelang.org/go/mod/module"
// Trigger the unconditional loading of all core builtin packages if load
// is used. This was deemed the simplest way to avoid having to import
@@ -40,36 +45,41 @@ import (
// instance, but errors that occur loading dependencies are recorded in these
// dependencies.
func Instances(args []string, c *Config) []*build.Instance {
+ ctx := context.TODO()
if c == nil {
c = &Config{}
}
+ // We want to consult the CUE_EXPERIMENT flag to see whether
+ // consult external registries by default.
+ if err := cueexperiment.Init(); err != nil {
+ return []*build.Instance{c.newErrInstance(err)}
+ }
newC, err := c.complete()
if err != nil {
return []*build.Instance{c.newErrInstance(err)}
}
c = newC
- // TODO use predictable location
- var deps *dependencies
- var regClient *registryClient
- if c.Registry != nil {
- // TODO use configured cache directory.
- tmpDir, err := os.MkdirTemp("", "cue-load-")
- if err != nil {
- return []*build.Instance{c.newErrInstance(err)}
- }
- regClient, err = newRegistryClient(c.Registry, tmpDir)
- if err != nil {
- return []*build.Instance{c.newErrInstance(fmt.Errorf("cannot make registry client: %v", err))}
- }
- deps1, err := resolveDependencies(c.modFile, regClient)
- if err != nil {
- return []*build.Instance{c.newErrInstance(fmt.Errorf("cannot resolve dependencies: %v", err))}
- }
- deps = deps1
+ // TODO: This requires packages to be placed before files. At some point this
+ // could be relaxed.
+ i := 0
+ for ; i < len(args) && filetypes.IsPackage(args[i]); i++ {
+ }
+ pkgArgs := args[:i]
+ otherArgs := args[i:]
+
+ // Pass all arguments that look like packages to loadPackages
+ // so that they'll be available when looking up the packages
+ // that are specified on the command line.
+ // Relative import paths create a package with an associated
+ // error but it turns out that's actually OK because the cue/load
+ // logic resolves such paths without consulting pkgs.
+ pkgs, err := loadPackages(ctx, c, pkgArgs)
+ if err != nil {
+ return []*build.Instance{c.newErrInstance(err)}
}
tg := newTagger(c)
- l := newLoader(c, tg, deps, regClient)
+ l := newLoader(c, tg, pkgs)
if c.Context == nil {
c.Context = build.NewContext(
@@ -78,16 +88,9 @@ func Instances(args []string, c *Config) []*build.Instance {
)
}
- // TODO: require packages to be placed before files. At some point this
- // could be relaxed.
- i := 0
- for ; i < len(args) && filetypes.IsPackage(args[i]); i++ {
- }
-
a := []*build.Instance{}
-
if len(args) == 0 || i > 0 {
- for _, m := range l.importPaths(args[:i]) {
+ for _, m := range l.importPaths(pkgArgs) {
if m.Err != nil {
inst := c.newErrInstance(m.Err)
a = append(a, inst)
@@ -97,8 +100,8 @@ func Instances(args []string, c *Config) []*build.Instance {
}
}
- if args = args[i:]; len(args) > 0 {
- files, err := filetypes.ParseArgs(args)
+ if len(otherArgs) > 0 {
+ files, err := filetypes.ParseArgs(otherArgs)
if err != nil {
return []*build.Instance{c.newErrInstance(err)}
}
@@ -140,3 +143,34 @@ func Instances(args []string, c *Config) []*build.Instance {
return a
}
+
+func loadPackages(ctx context.Context, cfg *Config, extraPkgs []string) (*modpkgload.Packages, error) {
+ if cfg.Registry == nil || cfg.modFile == nil || cfg.modFile.Module == "" {
+ return nil, nil
+ }
+ reqs := modrequirements.NewRequirements(
+ cfg.modFile.Module,
+ cfg.Registry,
+ cfg.modFile.DepVersions(),
+ cfg.modFile.DefaultMajorVersions(),
+ )
+ mainModLoc := module.SourceLoc{
+ FS: cfg.fileSystem.ioFS(cfg.ModuleRoot),
+ Dir: ".",
+ }
+ allImports, err := modimports.AllImports(modimports.AllModuleFiles(mainModLoc.FS, mainModLoc.Dir))
+ if err != nil {
+ return nil, fmt.Errorf("cannot enumerate all module imports: %v", err)
+ }
+ // Add any packages specified on the command line so they're always
+ // available.
+ allImports = append(allImports, extraPkgs...)
+ return modpkgload.LoadPackages(
+ ctx,
+ cfg.Module,
+ mainModLoc,
+ reqs,
+ cfg.Registry,
+ allImports,
+ ), nil
+}
diff --git a/vendor/cuelang.org/go/cue/load/loader.go b/vendor/cuelang.org/go/cue/load/loader.go
index e2f07523..992c17a8 100644
--- a/vendor/cuelang.org/go/cue/load/loader.go
+++ b/vendor/cuelang.org/go/cue/load/loader.go
@@ -26,6 +26,7 @@ import (
"cuelang.org/go/cue/errors"
"cuelang.org/go/cue/token"
"cuelang.org/go/internal/encoding"
+ "cuelang.org/go/internal/mod/modpkgload"
// Trigger the unconditional loading of all core builtin packages if load
// is used. This was deemed the simplest way to avoid having to import
@@ -35,20 +36,18 @@ import (
)
type loader struct {
- cfg *Config
- tagger *tagger
- stk importStack
- loadFunc build.LoadFunc
- deps *dependencies
- regClient *registryClient
+ cfg *Config
+ tagger *tagger
+ stk importStack
+ loadFunc build.LoadFunc
+ pkgs *modpkgload.Packages
}
-func newLoader(c *Config, tg *tagger, deps *dependencies, regClient *registryClient) *loader {
+func newLoader(c *Config, tg *tagger, pkgs *modpkgload.Packages) *loader {
l := &loader{
- cfg: c,
- tagger: tg,
- deps: deps,
- regClient: regClient,
+ cfg: c,
+ tagger: tg,
+ pkgs: pkgs,
}
l.loadFunc = l._loadFunc
return l
@@ -101,7 +100,7 @@ func (l *loader) cueFilesPackage(files []*build.File) *build.Instance {
fp := newFileProcessor(cfg, pkg, l.tagger)
for _, file := range files {
- fp.add(token.NoPos, cfg.Dir, file, allowAnonymous)
+ fp.add(cfg.Dir, file, allowAnonymous)
}
// TODO: ModImportFromFiles(files)
diff --git a/vendor/cuelang.org/go/cue/load/loader_common.go b/vendor/cuelang.org/go/cue/load/loader_common.go
index 40689480..5a11063b 100644
--- a/vendor/cuelang.org/go/cue/load/loader_common.go
+++ b/vendor/cuelang.org/go/cue/load/loader_common.go
@@ -16,7 +16,6 @@ package load
import (
"bytes"
- "path"
pathpkg "path"
"path/filepath"
"sort"
@@ -37,16 +36,11 @@ import (
type importMode uint
const (
- // If findOnly is set, Import stops after locating the directory
- // that should contain the sources for a package. It does not
- // read any files in the directory.
- findOnly importMode = 1 << iota
-
// If importComment is set, parse import comments on package statements.
// Import returns an error if it finds a comment it cannot understand
// or finds conflicting comments in multiple source files.
// See golang.org/s/go14customimport for more information.
- importComment
+ importComment importMode = 1 << iota
allowAnonymous
)
@@ -167,7 +161,7 @@ func (fp *fileProcessor) finalize(p *build.Instance) errors.Error {
return nil
}
-func (fp *fileProcessor) add(pos token.Pos, root string, file *build.File, mode importMode) (added bool) {
+func (fp *fileProcessor) add(root string, file *build.File, mode importMode) (added bool) {
fullPath := file.Filename
if fullPath != "-" {
if !filepath.IsAbs(fullPath) {
@@ -471,12 +465,12 @@ func cleanPatterns(patterns []string) []string {
// Put argument in canonical form, but preserve leading ./.
if strings.HasPrefix(a, "./") {
- a = "./" + path.Clean(a)
+ a = "./" + pathpkg.Clean(a)
if a == "./." {
a = "."
}
} else {
- a = path.Clean(a)
+ a = pathpkg.Clean(a)
}
out = append(out, a)
}
@@ -488,22 +482,6 @@ func isMetaPackage(name string) bool {
return name == "std" || name == "cmd" || name == "all"
}
-// hasPathPrefix reports whether the path s begins with the
-// elements in prefix.
-func hasPathPrefix(s, prefix string) bool {
- switch {
- default:
- return false
- case len(s) == len(prefix):
- return s == prefix
- case len(s) > len(prefix):
- if prefix != "" && prefix[len(prefix)-1] == '/' {
- return strings.HasPrefix(s, prefix)
- }
- return s[len(prefix)] == '/' && s[:len(prefix)] == prefix
- }
-}
-
// hasFilepathPrefix reports whether the path s begins with the
// elements in prefix.
func hasFilepathPrefix(s, prefix string) bool {
@@ -519,100 +497,3 @@ func hasFilepathPrefix(s, prefix string) bool {
return s[len(prefix)] == filepath.Separator && s[:len(prefix)] == prefix
}
}
-
-// isStandardImportPath reports whether $GOROOT/src/path should be considered
-// part of the standard distribution. For historical reasons we allow people to add
-// their own code to $GOROOT instead of using $GOPATH, but we assume that
-// code will start with a domain name (dot in the first element).
-//
-// Note that this function is meant to evaluate whether a directory found in GOROOT
-// should be treated as part of the standard library. It should not be used to decide
-// that a directory found in GOPATH should be rejected: directories in GOPATH
-// need not have dots in the first element, and they just take their chances
-// with future collisions in the standard library.
-func isStandardImportPath(path string) bool {
- i := strings.Index(path, "/")
- if i < 0 {
- i = len(path)
- }
- elem := path[:i]
- return !strings.Contains(elem, ".")
-}
-
-// isRelativePath reports whether pattern should be interpreted as a directory
-// path relative to the current directory, as opposed to a pattern matching
-// import paths.
-func isRelativePath(pattern string) bool {
- return strings.HasPrefix(pattern, "./") || strings.HasPrefix(pattern, "../") || pattern == "." || pattern == ".."
-}
-
-// inDir checks whether path is in the file tree rooted at dir.
-// If so, inDir returns an equivalent path relative to dir.
-// If not, inDir returns an empty string.
-// inDir makes some effort to succeed even in the presence of symbolic links.
-// TODO(rsc): Replace internal/test.inDir with a call to this function for Go 1.12.
-func inDir(path, dir string) string {
- if rel := inDirLex(path, dir); rel != "" {
- return rel
- }
- xpath, err := filepath.EvalSymlinks(path)
- if err != nil || xpath == path {
- xpath = ""
- } else {
- if rel := inDirLex(xpath, dir); rel != "" {
- return rel
- }
- }
-
- xdir, err := filepath.EvalSymlinks(dir)
- if err == nil && xdir != dir {
- if rel := inDirLex(path, xdir); rel != "" {
- return rel
- }
- if xpath != "" {
- if rel := inDirLex(xpath, xdir); rel != "" {
- return rel
- }
- }
- }
- return ""
-}
-
-// inDirLex is like inDir but only checks the lexical form of the file names.
-// It does not consider symbolic links.
-// TODO(rsc): This is a copy of str.HasFilePathPrefix, modified to
-// return the suffix. Most uses of str.HasFilePathPrefix should probably
-// be calling InDir instead.
-func inDirLex(path, dir string) string {
- pv := strings.ToUpper(filepath.VolumeName(path))
- dv := strings.ToUpper(filepath.VolumeName(dir))
- path = path[len(pv):]
- dir = dir[len(dv):]
- switch {
- default:
- return ""
- case pv != dv:
- return ""
- case len(path) == len(dir):
- if path == dir {
- return "."
- }
- return ""
- case dir == "":
- return path
- case len(path) > len(dir):
- if dir[len(dir)-1] == filepath.Separator {
- if path[:len(dir)] == dir {
- return path[len(dir):]
- }
- return ""
- }
- if path[len(dir)] == filepath.Separator && path[:len(dir)] == dir {
- if len(path) == len(dir)+1 {
- return "."
- }
- return path[len(dir)+1:]
- }
- return ""
- }
-}
diff --git a/vendor/cuelang.org/go/cue/load/match.go b/vendor/cuelang.org/go/cue/load/match.go
index 487948d0..e817defa 100644
--- a/vendor/cuelang.org/go/cue/load/match.go
+++ b/vendor/cuelang.org/go/cue/load/match.go
@@ -17,7 +17,6 @@ package load
import (
"io"
"path/filepath"
- "regexp"
"strings"
"cuelang.org/go/cue/build"
@@ -98,87 +97,3 @@ func matchFile(cfg *Config, file *build.File, returnImports, allFiles bool, allT
return true, data, nil
}
-
-// treeCanMatchPattern(pattern)(name) reports whether
-// name or children of name can possibly match pattern.
-// Pattern is the same limited glob accepted by matchPattern.
-func treeCanMatchPattern(pattern string) func(name string) bool {
- wildCard := false
- if i := strings.Index(pattern, "..."); i >= 0 {
- wildCard = true
- pattern = pattern[:i]
- }
- return func(name string) bool {
- return len(name) <= len(pattern) && hasPathPrefix(pattern, name) ||
- wildCard && strings.HasPrefix(name, pattern)
- }
-}
-
-// matchPattern(pattern)(name) reports whether
-// name matches pattern. Pattern is a limited glob
-// pattern in which '...' means 'any string' and there
-// is no other special syntax.
-// Unfortunately, there are two special cases. Quoting "go help packages":
-//
-// First, /... at the end of the pattern can match an empty string,
-// so that net/... matches both net and packages in its subdirectories, like net/http.
-// Second, any slash-separted pattern element containing a wildcard never
-// participates in a match of the "vendor" element in the path of a vendored
-// package, so that ./... does not match packages in subdirectories of
-// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do.
-// Note, however, that a directory named vendor that itself contains code
-// is not a vendored package: cmd/vendor would be a command named vendor,
-// and the pattern cmd/... matches it.
-func matchPattern(pattern string) func(name string) bool {
- // Convert pattern to regular expression.
- // The strategy for the trailing /... is to nest it in an explicit ? expression.
- // The strategy for the vendor exclusion is to change the unmatchable
- // vendor strings to a disallowed code point (vendorChar) and to use
- // "(anything but that codepoint)*" as the implementation of the ... wildcard.
- // This is a bit complicated but the obvious alternative,
- // namely a hand-written search like in most shell glob matchers,
- // is too easy to make accidentally exponential.
- // Using package regexp guarantees linear-time matching.
-
- const vendorChar = "\x00"
-
- if strings.Contains(pattern, vendorChar) {
- return func(name string) bool { return false }
- }
-
- re := regexp.QuoteMeta(pattern)
- re = replaceVendor(re, vendorChar)
- switch {
- case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`):
- re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)`
- case re == vendorChar+`/\.\.\.`:
- re = `(/vendor|/` + vendorChar + `/\.\.\.)`
- case strings.HasSuffix(re, `/\.\.\.`):
- re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?`
- }
- re = strings.Replace(re, `\.\.\.`, `[^`+vendorChar+`]*`, -1)
-
- reg := regexp.MustCompile(`^` + re + `$`)
-
- return func(name string) bool {
- if strings.Contains(name, vendorChar) {
- return false
- }
- return reg.MatchString(replaceVendor(name, vendorChar))
- }
-}
-
-// replaceVendor returns the result of replacing
-// non-trailing vendor path elements in x with repl.
-func replaceVendor(x, repl string) string {
- if !strings.Contains(x, "vendor") {
- return x
- }
- elem := strings.Split(x, "/")
- for i := 0; i < len(elem)-1; i++ {
- if elem[i] == "vendor" {
- elem[i] = repl
- }
- }
- return strings.Join(elem, "/")
-}
diff --git a/vendor/cuelang.org/go/cue/load/module.go b/vendor/cuelang.org/go/cue/load/module.go
deleted file mode 100644
index 2c85570a..00000000
--- a/vendor/cuelang.org/go/cue/load/module.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package load
-
-import (
- "context"
- "fmt"
- "io"
- "path/filepath"
- "strings"
-
- "cuelang.org/go/cue/errors"
- "cuelang.org/go/cue/token"
- "cuelang.org/go/internal/mod/modfile"
- "cuelang.org/go/internal/mod/module"
- "cuelang.org/go/internal/mod/mvs"
- "cuelang.org/go/internal/mod/semver"
-)
-
-// loadModule loads the module file, resolves and downloads module
-// dependencies. It sets c.Module if it's empty or checks it for
-// consistency with the module file otherwise.
-func (c *Config) loadModule() error {
- // TODO: also make this work if run from outside the module?
- mod := filepath.Join(c.ModuleRoot, modDir)
- info, cerr := c.fileSystem.stat(mod)
- if cerr != nil {
- return nil
- }
- // TODO remove support for legacy non-directory module.cue file
- // by returning an error if info.IsDir is false.
- if info.IsDir() {
- mod = filepath.Join(mod, moduleFile)
- }
- f, cerr := c.fileSystem.openFile(mod)
- if cerr != nil {
- return nil
- }
- defer f.Close()
- data, err := io.ReadAll(f)
- if err != nil {
- return err
- }
- parseModFile := modfile.ParseNonStrict
- if c.Registry == nil {
- parseModFile = modfile.ParseLegacy
- }
- mf, err := parseModFile(data, mod)
- if err != nil {
- return err
- }
- c.modFile = mf
- if mf.Module == "" {
- // Backward compatibility: allow empty module.cue file.
- // TODO maybe check that the rest of the fields are empty too?
- return nil
- }
- if c.Module != "" && c.Module != mf.Module {
- return errors.Newf(token.NoPos, "inconsistent modules: got %q, want %q", mf.Module, c.Module)
- }
- c.Module = mf.Module
- return nil
-}
-
-type dependencies struct {
- mainModule *modfile.File
- versions []module.Version
-}
-
-// lookup returns the module corresponding to the given import path, and the relative path
-// of the package beneath that.
-//
-// It assumes that modules are not nested.
-func (deps *dependencies) lookup(pkgPath importPath) (v module.Version, subPath string, err error) {
- type answer struct {
- v module.Version
- subPath string
- }
- var possible []answer
- for _, dep := range deps.versions {
- if subPath, ok := isParent(dep, pkgPath); ok {
- possible = append(possible, answer{dep, subPath})
- }
- }
- switch len(possible) {
- case 0:
- return module.Version{}, "", fmt.Errorf("no dependency found for import path %q", pkgPath)
- case 1:
- return possible[0].v, possible[0].subPath, nil
- }
- var found *answer
- for i, a := range possible {
- dep, ok := deps.mainModule.Deps[a.v.Path()]
- if ok && dep.Default {
- if found != nil {
- // More than one default.
- // TODO this should be impossible and checked by modfile.
- return module.Version{}, "", fmt.Errorf("more than one default module for import path %q", pkgPath)
- }
- found = &possible[i]
- }
- }
- if found == nil {
- return module.Version{}, "", fmt.Errorf("no default module found for import path %q", pkgPath)
- }
- return found.v, found.subPath, nil
-}
-
-// resolveDependencies resolves all the versions of all the modules in the given module file,
-// using regClient to fetch dependency information.
-func resolveDependencies(mainModFile *modfile.File, regClient *registryClient) (*dependencies, error) {
- vs, err := mvs.BuildList[module.Version](mainModFile.DepVersions(), &mvsReqs{
- mainModule: mainModFile,
- regClient: regClient,
- })
- if err != nil {
- return nil, err
- }
- return &dependencies{
- mainModule: mainModFile,
- versions: vs,
- }, nil
-}
-
-// mvsReqs implements mvs.Reqs by fetching information using
-// regClient.
-type mvsReqs struct {
- module.Versions
- mainModule *modfile.File
- regClient *registryClient
-}
-
-// Required implements mvs.Reqs.Required.
-func (reqs *mvsReqs) Required(m module.Version) (vs []module.Version, err error) {
- if m.Path() == reqs.mainModule.Module {
- return reqs.mainModule.DepVersions(), nil
- }
- mf, err := reqs.regClient.fetchModFile(context.TODO(), m)
- if err != nil {
- return nil, err
- }
- return mf.DepVersions(), nil
-}
-
-// Required implements mvs.Reqs.Max.
-func (reqs *mvsReqs) Max(v1, v2 string) string {
- if cmpVersion(v1, v2) < 0 {
- return v2
- }
- return v1
-}
-
-// cmpVersion implements the comparison for versions in the module loader.
-//
-// It is consistent with semver.Compare except that as a special case,
-// the version "" is considered higher than all other versions.
-// The main module (also known as the target) has no version and must be chosen
-// over other versions of the same module in the module dependency graph.
-func cmpVersion(v1, v2 string) int {
- if v2 == "" {
- if v1 == "" {
- return 0
- }
- return -1
- }
- if v1 == "" {
- return 1
- }
- return semver.Compare(v1, v2)
-}
-
-// isParent reports whether the module modv contains the package with the given
-// path, and if so, returns its relative path within that module.
-func isParent(modv module.Version, pkgPath importPath) (subPath string, ok bool) {
- modBase := modv.BasePath()
- pkgBase, pkgMajor, pkgHasVersion := module.SplitPathVersion(string(pkgPath))
- if !pkgHasVersion {
- pkgBase = string(pkgPath)
- }
-
- if !strings.HasPrefix(pkgBase, modBase) {
- return "", false
- }
- if len(pkgBase) == len(modBase) {
- subPath = "."
- } else if pkgBase[len(modBase)] != '/' {
- return "", false
- } else {
- subPath = pkgBase[len(modBase)+1:]
- }
- // It's potentially a match, but we need to check the major version too.
- if !pkgHasVersion || semver.Major(modv.Version()) == pkgMajor {
- return subPath, true
- }
- return "", false
-}
diff --git a/vendor/cuelang.org/go/cue/load/package.go b/vendor/cuelang.org/go/cue/load/package.go
index 79759a5a..af3edfaf 100644
--- a/vendor/cuelang.org/go/cue/load/package.go
+++ b/vendor/cuelang.org/go/cue/load/package.go
@@ -14,10 +14,6 @@
package load
-import (
- "unicode/utf8"
-)
-
// Package rules:
//
// - the package clause defines a namespace.
@@ -30,20 +26,3 @@ import (
// The contents of a namespace depends on the directory that is selected as the
// starting point to load a package. An instance defines a package-directory
// pair.
-
-// safeArg reports whether arg is a "safe" command-line argument,
-// meaning that when it appears in a command-line, it probably
-// doesn't have some special meaning other than its own name.
-// Obviously args beginning with - are not safe (they look like flags).
-// Less obviously, args beginning with @ are not safe (they look like
-// GNU binutils flagfile specifiers, sometimes called "response files").
-// To be conservative, we reject almost any arg beginning with non-alphanumeric ASCII.
-// We accept leading . _ and / as likely in file system paths.
-// There is a copy of this function in cmd/compile/internal/gc/noder.go.
-func safeArg(name string) bool {
- if name == "" {
- return false
- }
- c := name[0]
- return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf
-}
diff --git a/vendor/cuelang.org/go/cue/load/registry.go b/vendor/cuelang.org/go/cue/load/registry.go
index c2efeb59..a4ac0562 100644
--- a/vendor/cuelang.org/go/cue/load/registry.go
+++ b/vendor/cuelang.org/go/cue/load/registry.go
@@ -1,95 +1 @@
package load
-
-import (
- "context"
- "fmt"
- "io"
- "os"
- "path"
- "path/filepath"
-
- "cuelabs.dev/go/oci/ociregistry"
- "cuelang.org/go/internal/mod/modfile"
- "cuelang.org/go/internal/mod/modregistry"
- "cuelang.org/go/internal/mod/module"
- "cuelang.org/go/internal/mod/modzip"
-)
-
-// registryClient implements the protocol for talking to
-// the registry server.
-type registryClient struct {
- // TODO caching
- client *modregistry.Client
- cacheDir string
-}
-
-// newRegistryClient returns a registry client that talks to
-// the given base URL and stores downloaded module information
-// in the given cache directory. It assumes that information
-// in the registry is immutable, so if it's in the cache, a module
-// will not be downloaded again.
-func newRegistryClient(registry ociregistry.Interface, cacheDir string) (*registryClient, error) {
- return ®istryClient{
- client: modregistry.NewClient(registry),
- cacheDir: cacheDir,
- }, nil
-}
-
-// fetchModFile returns the parsed contents of the cue.mod/module.cue file
-// for the given module.
-func (c *registryClient) fetchModFile(ctx context.Context, m module.Version) (*modfile.File, error) {
- data, err := c.fetchRawModFile(ctx, m)
- if err != nil {
- return nil, err
- }
- mf, err := modfile.Parse(data, path.Join(m.Path(), "cue.mod/module.cue"))
- if err != nil {
- return nil, err
- }
- return mf, nil
-}
-
-// fetchModFile returns the contents of the cue.mod/module.cue file
-// for the given module without parsing it.
-func (c *registryClient) fetchRawModFile(ctx context.Context, mv module.Version) ([]byte, error) {
- m, err := c.client.GetModule(ctx, mv)
- if err != nil {
- return nil, err
- }
- return m.ModuleFile(ctx)
-}
-
-// getModContents downloads the module with the given version
-// and returns the directory where it's stored.
-func (c *registryClient) getModContents(ctx context.Context, mv module.Version) (string, error) {
- modPath := filepath.Join(c.cacheDir, mv.String())
- if _, err := os.Stat(modPath); err == nil {
- return modPath, nil
- }
- m, err := c.client.GetModule(ctx, mv)
- if err != nil {
- return "", err
- }
- r, err := m.GetZip(ctx)
- if err != nil {
- return "", err
- }
- defer r.Close()
- zipfile := filepath.Join(c.cacheDir, mv.String()+".zip")
- if err := os.MkdirAll(filepath.Dir(zipfile), 0o777); err != nil {
- return "", fmt.Errorf("cannot create parent directory for zip file: %v", err)
- }
- f, err := os.Create(zipfile)
- if err != nil {
- return "", fmt.Errorf("cannot create zipfile: %v", err)
- }
-
- defer f.Close() // TODO check error on close
- if _, err := io.Copy(f, r); err != nil {
- return "", fmt.Errorf("cannot copy data to zip file %q: %v", zipfile, err)
- }
- if err := modzip.Unzip(modPath, mv, zipfile); err != nil {
- return "", fmt.Errorf("cannot unzip %v: %v", mv, err)
- }
- return modPath, nil
-}
diff --git a/vendor/cuelang.org/go/cue/load/search.go b/vendor/cuelang.org/go/cue/load/search.go
index c821d1b7..acf80e96 100644
--- a/vendor/cuelang.org/go/cue/load/search.go
+++ b/vendor/cuelang.org/go/cue/load/search.go
@@ -150,14 +150,12 @@ func (l *loader) matchPackagesInFS(pattern, pkgName string) *match {
}
pkgDir := filepath.Join(root, modDir)
- // TODO(legacy): remove
- pkgDir2 := filepath.Join(root, "pkg")
_ = c.fileSystem.walk(root, func(path string, entry fs.DirEntry, err errors.Error) errors.Error {
if err != nil || !entry.IsDir() {
return nil
}
- if path == pkgDir || path == pkgDir2 {
+ if path == pkgDir {
return skipDir
}
diff --git a/vendor/cuelang.org/go/cue/marshal.go b/vendor/cuelang.org/go/cue/marshal.go
index dabf86d9..6e1bbf91 100644
--- a/vendor/cuelang.org/go/cue/marshal.go
+++ b/vendor/cuelang.org/go/cue/marshal.go
@@ -128,19 +128,20 @@ func (r *Runtime) Unmarshal(b []byte) ([]*Instance, error) {
//
// The stored instances are functionally the same, but preserving of file
// information is only done on a best-effort basis.
-func (r *Runtime) Marshal(instances ...*Instance) (b []byte, err error) {
+func (r *Runtime) Marshal(values ...InstanceOrValue) (b []byte, err error) {
staged := []instanceData{}
done := map[string]int{}
var errs errors.Error
- var stageInstance func(i *Instance) (pos int)
- stageInstance = func(i *Instance) (pos int) {
- if p, ok := done[i.ImportPath]; ok {
+ var stageInstance func(i Value) (pos int)
+ stageInstance = func(i Value) (pos int) {
+ inst := i.BuildInstance()
+ if p, ok := done[inst.ImportPath]; ok {
return p
}
// TODO: support exporting instance
- file, _ := export.Def(r.runtime(), i.inst.ID(), i.root)
+ file, _ := export.Def(r.runtime(), inst.ID(), i.instance().root)
imports := []string{}
file.VisitImports(func(i *ast.ImportDecl) {
for _, spec := range i.Specs {
@@ -149,15 +150,14 @@ func (r *Runtime) Marshal(instances ...*Instance) (b []byte, err error) {
}
})
- if i.PkgName != "" {
- p, name, _ := internal.PackageInfo(file)
- if p == nil {
- pkg := &ast.Package{Name: ast.NewIdent(i.PkgName)}
+ if inst.PkgName != "" {
+ pi := internal.GetPackageInfo(file)
+ if pi.Package == nil {
+ pkg := &ast.Package{Name: ast.NewIdent(inst.PkgName)}
file.Decls = append([]ast.Decl{pkg}, file.Decls...)
- } else if name != i.PkgName {
- // p is guaranteed to be generated by Def, so it is "safe" to
- // modify.
- p.Name = ast.NewIdent(i.PkgName)
+ } else if pi.Name != inst.PkgName {
+ // pi is guaranteed to be generated by Def, so it is "safe" to modify.
+ pi.Package.Name = ast.NewIdent(inst.PkgName)
}
}
@@ -165,12 +165,12 @@ func (r *Runtime) Marshal(instances ...*Instance) (b []byte, err error) {
errs = errors.Append(errs, errors.Promote(err, "marshal"))
filename := "unmarshal"
- if i.inst != nil && len(i.inst.Files) == 1 {
- filename = i.inst.Files[0].Filename
+ if len(inst.Files) == 1 {
+ filename = inst.Files[0].Filename
- dir := i.Dir
- if i.inst != nil && i.inst.Root != "" {
- dir = i.inst.Root
+ dir := inst.Dir
+ if inst.Root != "" {
+ dir = inst.Root
}
if dir != "" {
filename = filepath.FromSlash(filename)
@@ -180,7 +180,7 @@ func (r *Runtime) Marshal(instances ...*Instance) (b []byte, err error) {
}
// TODO: this should probably be changed upstream, but as the path
// is for reference purposes only, this is safe.
- importPath := filepath.ToSlash(i.ImportPath)
+ importPath := filepath.ToSlash(i.instance().ImportPath)
staged = append(staged, instanceData{
Path: importPath,
@@ -194,14 +194,14 @@ func (r *Runtime) Marshal(instances ...*Instance) (b []byte, err error) {
if i == nil || !strings.Contains(imp, ".") {
continue // a builtin package.
}
- stageInstance(i)
+ stageInstance(i.Value())
}
return p
}
- for _, i := range instances {
- staged[stageInstance(i)].Root = true
+ for _, val := range values {
+ staged[stageInstance(val.Value())].Root = true
}
buf := &bytes.Buffer{}
diff --git a/vendor/cuelang.org/go/cue/parser/interface.go b/vendor/cuelang.org/go/cue/parser/interface.go
index af498773..6afca039 100644
--- a/vendor/cuelang.org/go/cue/parser/interface.go
+++ b/vendor/cuelang.org/go/cue/parser/interface.go
@@ -118,8 +118,10 @@ const (
)
// FileOffset specifies the File position info to use.
+//
+// Deprecated: this has no effect.
func FileOffset(pos int) Option {
- return func(p *parser) { p.offset = pos }
+ return func(p *parser) {}
}
// A mode value is a set of flags (or 0).
diff --git a/vendor/cuelang.org/go/cue/parser/parser.go b/vendor/cuelang.org/go/cue/parser/parser.go
index 5a3e336b..c10e28e4 100644
--- a/vendor/cuelang.org/go/cue/parser/parser.go
+++ b/vendor/cuelang.org/go/cue/parser/parser.go
@@ -33,7 +33,6 @@ var debugStr = astinternal.DebugStr
// The parser structure holds the parser's internal state.
type parser struct {
file *token.File
- offset int
errors errors.Error
scanner scanner.Scanner
@@ -68,11 +67,10 @@ type parser struct {
}
func (p *parser) init(filename string, src []byte, mode []Option) {
- p.offset = -1
for _, f := range mode {
f(p)
}
- p.file = token.NewFile(filename, p.offset, len(src))
+ p.file = token.NewFile(filename, -1, len(src))
var m scanner.Mode
if p.mode&parseCommentsMode != 0 {
diff --git a/vendor/cuelang.org/go/cue/path.go b/vendor/cuelang.org/go/cue/path.go
index 41cfd475..cfa03c3d 100644
--- a/vendor/cuelang.org/go/cue/path.go
+++ b/vendor/cuelang.org/go/cue/path.go
@@ -467,9 +467,9 @@ func isHiddenOrDefinition(s string) bool {
return strings.HasPrefix(s, "#") || strings.HasPrefix(s, "_")
}
-// Hid returns a selector for a hidden field. It panics is pkg is empty.
+// Hid returns a selector for a hidden field. It panics if pkg is empty.
// Hidden fields are scoped by package, and pkg indicates for which package
-// the hidden field must apply.For anonymous packages, it must be set to "_".
+// the hidden field must apply. For anonymous packages, it must be set to "_".
func Hid(name, pkg string) Selector {
if !ast.IsValidIdent(name) {
panic(fmt.Sprintf("invalid identifier %s", name))
diff --git a/vendor/cuelang.org/go/cue/scanner/fuzz.go b/vendor/cuelang.org/go/cue/scanner/fuzz.go
index a8b560e4..3ad29c54 100644
--- a/vendor/cuelang.org/go/cue/scanner/fuzz.go
+++ b/vendor/cuelang.org/go/cue/scanner/fuzz.go
@@ -27,7 +27,7 @@ func Fuzz(b []byte) int {
}
var s Scanner
- s.Init(token.NewFile("", 1, len(b)), b, eh, ScanComments)
+ s.Init(token.NewFile("", -1, len(b)), b, eh, ScanComments)
for {
_, tok, _ := s.Scan()
diff --git a/vendor/cuelang.org/go/cue/token/position.go b/vendor/cuelang.org/go/cue/token/position.go
index 577254f8..675f70b1 100644
--- a/vendor/cuelang.org/go/cue/token/position.go
+++ b/vendor/cuelang.org/go/cue/token/position.go
@@ -199,26 +199,35 @@ func toPos(x index) int {
// -----------------------------------------------------------------------------
// File
+// index represents an offset into the file.
+// It's 1-based rather than zero-based so that
+// we can distinguish the zero Pos from a Pos that
+// just has a zero offset.
type index int
// A File has a name, size, and line offset table.
type File struct {
mutex sync.RWMutex
name string // file name as provided to AddFile
- base index // Pos index range for this file is [base...base+size]
- size index // file size as provided to AddFile
+ // base is deprecated and stored only so that [File.Base]
+ // can continue to return the same value passed to [NewFile].
+ base index
+ size index // file size as provided to AddFile
// lines and infos are protected by set.mutex
lines []index // lines contains the offset of the first character for each line (the first entry is always 0)
infos []lineInfo
}
-// NewFile returns a new file.
-func NewFile(filename string, base, size int) *File {
- if base < 0 {
- base = 1
+// NewFile returns a new file with the given OS file name. The size provides the
+// size of the whole file.
+//
+// The second argument is deprecated. It has no effect.
+func NewFile(filename string, deprecatedBase, size int) *File {
+ if deprecatedBase < 0 {
+ deprecatedBase = 1
}
- return &File{sync.RWMutex{}, filename, index(base), index(size), []index{0}, nil}
+ return &File{sync.RWMutex{}, filename, index(deprecatedBase), index(size), []index{0}, nil}
}
// Name returns the file name of file f as registered with AddFile.
@@ -226,12 +235,14 @@ func (f *File) Name() string {
return f.name
}
-// Base returns the base offset of file f as registered with AddFile.
+// Base returns the base offset of file f as passed to NewFile.
+//
+// Deprecated: this method just returns the (deprecated) second argument passed to NewFile.
func (f *File) Base() int {
return int(f.base)
}
-// Size returns the size of file f as registered with AddFile.
+// Size returns the size of file f as passed to NewFile.
func (f *File) Size() int {
return int(f.size)
}
@@ -359,7 +370,7 @@ func (f *File) Pos(offset int, rel RelPos) Pos {
if index(offset) > f.size {
panic("illegal file offset")
}
- return Pos{f, toPos(f.base+index(offset)) + int(rel)}
+ return Pos{f, toPos(1+index(offset)) + int(rel)}
}
// Offset returns the offset for the given file position p;
@@ -367,10 +378,10 @@ func (f *File) Pos(offset int, rel RelPos) Pos {
// f.Offset(f.Pos(offset)) == offset.
func (f *File) Offset(p Pos) int {
x := p.index()
- if x < f.base || x > f.base+index(f.size) {
+ if x < 1 || x > 1+index(f.size) {
panic("illegal Pos value")
}
- return int(x - f.base)
+ return int(x - 1)
}
// Line returns the line number for the given file position p;
@@ -405,7 +416,7 @@ func (f *File) unpack(offset index, adjusted bool) (filename string, line, colum
}
func (f *File) position(p Pos, adjusted bool) (pos Position) {
- offset := p.index() - f.base
+ offset := p.index() - 1
pos.Offset = int(offset)
pos.Filename, pos.Line, pos.Column = f.unpack(offset, adjusted)
return
@@ -418,7 +429,7 @@ func (f *File) position(p Pos, adjusted bool) (pos Position) {
func (f *File) PositionFor(p Pos, adjusted bool) (pos Position) {
x := p.index()
if p != NoPos {
- if x < f.base || x > f.base+f.size {
+ if x < 1 || x > 1+f.size {
panic("illegal Pos value")
}
pos = f.position(p, adjusted)
diff --git a/vendor/cuelang.org/go/cue/types.go b/vendor/cuelang.org/go/cue/types.go
index 015118e0..3aebe38d 100644
--- a/vendor/cuelang.org/go/cue/types.go
+++ b/vendor/cuelang.org/go/cue/types.go
@@ -645,12 +645,17 @@ func Dereference(v Value) Value {
return v
}
- c := n.Conjuncts[0]
- r, _ := c.Expr().(adt.Resolver)
+ env, expr := n.Conjuncts[0].EnvExpr()
+
+ // TODO: consider supporting unwrapping of structs or comprehensions around
+ // a single embedded reference.
+ r, _ := expr.(adt.Resolver)
if r == nil {
return v
}
+ c := adt.MakeRootConjunct(env, expr)
+
ctx := v.ctx()
n, b := ctx.Resolve(c, r)
if b != nil {
@@ -1067,7 +1072,8 @@ func (v hiddenValue) Split() []Value {
}
a := []Value{}
for _, x := range v.v.Conjuncts {
- a = append(a, remakeValue(v, x.Env, x.Expr()))
+ env, expr := x.EnvExpr()
+ a = append(a, remakeValue(v, env, expr))
}
return a
}
@@ -1983,7 +1989,9 @@ func (v Value) ReferencePath() (root Value, p Path) {
ctx := v.ctx()
c := v.v.Conjuncts[0]
- x, path := reference(v.idx, ctx, c.Env, c.Expr())
+ env, expr := c.EnvExpr()
+
+ x, path := reference(v.idx, ctx, env, expr)
if x == nil {
return Value{}, Path{}
}
@@ -2319,8 +2327,7 @@ func (v Value) Expr() (Op, []Value) {
case 1:
// the default case, processed below.
c := v.v.Conjuncts[0]
- env = c.Env
- expr = c.Expr()
+ env, expr = c.EnvExpr()
if w, ok := expr.(*adt.Vertex); ok {
return Value{v.idx, w, v.parent_}.Expr()
}
diff --git a/vendor/cuelang.org/go/encoding/json/json.go b/vendor/cuelang.org/go/encoding/json/json.go
index 69c89865..51c1e71a 100644
--- a/vendor/cuelang.org/go/encoding/json/json.go
+++ b/vendor/cuelang.org/go/encoding/json/json.go
@@ -99,19 +99,17 @@ func extract(path string, b []byte) (ast.Expr, error) {
// The runtime may be nil if Decode isn't used.
func NewDecoder(r *cue.Runtime, path string, src io.Reader) *Decoder {
return &Decoder{
- r: r,
- path: path,
- dec: json.NewDecoder(src),
- offset: 1,
+ r: r,
+ path: path,
+ dec: json.NewDecoder(src),
}
}
// A Decoder converts JSON values to CUE.
type Decoder struct {
- r *cue.Runtime
- path string
- dec *json.Decoder
- offset int
+ r *cue.Runtime
+ path string
+ dec *json.Decoder
}
// Extract converts the current JSON value to a CUE ast. It returns io.EOF
@@ -131,13 +129,12 @@ func (d *Decoder) extract() (ast.Expr, error) {
if err == io.EOF {
return nil, err
}
- offset := d.offset
- d.offset += len(raw)
if err != nil {
- pos := token.NewFile(d.path, offset, len(raw)).Pos(0, 0)
+ pos := token.NewFile(d.path, -1, len(raw)).Pos(0, 0)
return nil, errors.Wrapf(err, pos, "invalid JSON for file %q", d.path)
}
- expr, err := parser.ParseExpr(d.path, []byte(raw), parser.FileOffset(offset))
+ expr, err := parser.ParseExpr(d.path, []byte(raw))
+
if err != nil {
return nil, err
}
diff --git a/vendor/cuelang.org/go/encoding/protobuf/parse.go b/vendor/cuelang.org/go/encoding/protobuf/parse.go
index 8881a031..93e15cb7 100644
--- a/vendor/cuelang.org/go/encoding/protobuf/parse.go
+++ b/vendor/cuelang.org/go/encoding/protobuf/parse.go
@@ -61,7 +61,7 @@ func (s *Extractor) parse(filename string, src interface{}) (p *protoConverter,
return nil, errors.Newf(token.NoPos, "protobuf: %v", err)
}
- tfile := token.NewFile(filename, 0, len(b))
+ tfile := token.NewFile(filename, -1, len(b))
tfile.SetLinesForContent(b)
p = &protoConverter{
diff --git a/vendor/cuelang.org/go/encoding/protobuf/protobuf.go b/vendor/cuelang.org/go/encoding/protobuf/protobuf.go
index 2d4a35ef..46071511 100644
--- a/vendor/cuelang.org/go/encoding/protobuf/protobuf.go
+++ b/vendor/cuelang.org/go/encoding/protobuf/protobuf.go
@@ -87,11 +87,9 @@ package protobuf
import (
"os"
"path/filepath"
- "sort"
+ "slices"
"strings"
- "github.com/mpvl/unique"
-
"cuelang.org/go/cue/ast"
"cuelang.org/go/cue/build"
"cuelang.org/go/cue/errors"
@@ -300,20 +298,20 @@ func (b *Extractor) Instances() (instances []*build.Instance, err error) {
for _, p := range b.imports {
instances = append(instances, p)
- sort.Strings(p.ImportPaths)
- unique.Strings(&p.ImportPaths)
+ slices.Sort(p.ImportPaths)
+ p.ImportPaths = slices.Compact(p.ImportPaths)
for _, i := range p.ImportPaths {
if imp := b.imports[i]; imp != nil {
p.Imports = append(p.Imports, imp)
}
}
- sort.Slice(p.Files, func(i, j int) bool {
- return p.Files[i].Filename < p.Files[j].Filename
+ slices.SortFunc(p.Files, func(a, b *ast.File) int {
+ return strings.Compare(a.Filename, b.Filename)
})
}
- sort.Slice(instances, func(i, j int) bool {
- return instances[i].ImportPath < instances[j].ImportPath
+ slices.SortFunc(instances, func(a, b *build.Instance) int {
+ return strings.Compare(a.ImportPath, b.ImportPath)
})
if err != nil {
diff --git a/vendor/cuelang.org/go/encoding/protobuf/textproto/decoder.go b/vendor/cuelang.org/go/encoding/protobuf/textproto/decoder.go
index d5f95472..e917342d 100644
--- a/vendor/cuelang.org/go/encoding/protobuf/textproto/decoder.go
+++ b/vendor/cuelang.org/go/encoding/protobuf/textproto/decoder.go
@@ -77,7 +77,7 @@ func (d *Decoder) Parse(schema cue.Value, filename string, b []byte) (ast.Expr,
// dec.errs = nil
- f := token.NewFile(filename, 0, len(b))
+ f := token.NewFile(filename, -1, len(b))
f.SetLinesForContent(b)
dec.file = f
diff --git a/vendor/cuelang.org/go/internal/attrs.go b/vendor/cuelang.org/go/internal/attrs.go
index 44707bb1..6e50e3f1 100644
--- a/vendor/cuelang.org/go/internal/attrs.go
+++ b/vendor/cuelang.org/go/internal/attrs.go
@@ -148,7 +148,7 @@ func ParseAttrBody(pos token.Pos, s string) (a Attr) {
// Create temporary token.File so that scanner has something
// to work with.
// TODO it's probably possible to do this without allocations.
- tmpFile := token.NewFile("", 0, len(s))
+ tmpFile := token.NewFile("", -1, len(s))
if len(s) > 0 {
tmpFile.AddLine(len(s) - 1)
}
diff --git a/vendor/cuelang.org/go/internal/cli/cli.go b/vendor/cuelang.org/go/internal/cli/cli.go
index f6ffd251..d4702e4c 100644
--- a/vendor/cuelang.org/go/internal/cli/cli.go
+++ b/vendor/cuelang.org/go/internal/cli/cli.go
@@ -32,7 +32,7 @@ func ParseValue(pos token.Pos, name, str string, k cue.Kind) (x ast.Expr, errs e
expr, err = parser.ParseExpr(name, str)
if err != nil {
errs = errors.Wrapf(err, pos,
- "invalid number for environment variable %s", name)
+ "invalid number for injection tag %q", name)
}
}
@@ -41,7 +41,7 @@ func ParseValue(pos token.Pos, name, str string, k cue.Kind) (x ast.Expr, errs e
b, ok := boolValues[str]
if !ok {
errs = errors.Append(errs, errors.Newf(pos,
- "invalid boolean value %q for environment variable %s", str, name))
+ "invalid boolean value %q for injection tag %q", str, name))
} else if expr != nil || k&cue.StringKind != 0 {
// Convert into an expression
bl := ast.NewBool(b)
@@ -70,7 +70,7 @@ func ParseValue(pos token.Pos, name, str string, k cue.Kind) (x ast.Expr, errs e
return x, nil
case errs == nil:
return nil, errors.Newf(pos,
- "invalid type for environment variable %s", name)
+ "invalid type for injection tag %q", name)
}
return nil, errs
}
diff --git a/vendor/cuelang.org/go/internal/core/adt/adt.go b/vendor/cuelang.org/go/internal/core/adt/adt.go
index 5d114b83..1d6ff0d4 100644
--- a/vendor/cuelang.org/go/internal/core/adt/adt.go
+++ b/vendor/cuelang.org/go/internal/core/adt/adt.go
@@ -39,7 +39,7 @@ func Resolve(ctx *OpContext, c Conjunct) *Vertex {
v = x
case Resolver:
- r, err := ctx.resolveState(c, x, finalized)
+ r, err := ctx.resolveState(c, x, attempt(finalized, allKnown))
if err != nil {
v = err
break
@@ -108,14 +108,14 @@ type Evaluator interface {
// evaluate evaluates the underlying expression. If the expression
// is incomplete, it may record the error in ctx and return nil.
- evaluate(ctx *OpContext, state vertexStatus) Value
+ evaluate(ctx *OpContext, state combinedFlags) Value
}
// A Resolver represents a reference somewhere else within a tree that resolves
// a value.
type Resolver interface {
Node
- resolve(ctx *OpContext, state vertexStatus) *Vertex
+ resolve(ctx *OpContext, state combinedFlags) *Vertex
}
type YieldFunc func(env *Environment)
@@ -251,6 +251,9 @@ func (x *Ellipsis) expr() Expr {
}
return x.Value
}
+func (*ConjunctGroup) declNode() {}
+func (*ConjunctGroup) elemNode() {}
+func (*ConjunctGroup) expr() {}
var top = &Top{}
@@ -338,6 +341,7 @@ func (*Comprehension) elemNode() {}
func (*Vertex) node() {}
func (*Conjunction) node() {}
+func (*ConjunctGroup) node() {}
func (*Disjunction) node() {}
func (*BoundValue) node() {}
func (*Builtin) node() {}
diff --git a/vendor/cuelang.org/go/internal/core/adt/closed2.go b/vendor/cuelang.org/go/internal/core/adt/closed2.go
index c2c1b611..ce25d470 100644
--- a/vendor/cuelang.org/go/internal/core/adt/closed2.go
+++ b/vendor/cuelang.org/go/internal/core/adt/closed2.go
@@ -32,6 +32,8 @@ func isComplexStruct(ctx *OpContext, v *Vertex) bool {
// TODO: cleanup code and error messages. Reduce duplication in some related
// code.
func verifyArc2(ctx *OpContext, f Feature, v *Vertex, isClosed bool) (found bool, err *Bottom) {
+ unreachableForDev(ctx)
+
// Don't check computed, temporary vertices.
if v.Label == InvalidLabel {
return true, nil
diff --git a/vendor/cuelang.org/go/internal/core/adt/composite.go b/vendor/cuelang.org/go/internal/core/adt/composite.go
index d315e282..657851a9 100644
--- a/vendor/cuelang.org/go/internal/core/adt/composite.go
+++ b/vendor/cuelang.org/go/internal/core/adt/composite.go
@@ -127,7 +127,7 @@ func (e *Environment) evalCached(c *OpContext, x Expr) Value {
// Save and restore errors to ensure that only relevant errors are
// associated with the cash.
err := c.errs
- v = c.evalState(x, partial) // TODO: should this be finalized?
+ v = c.evalState(x, require(partial, allKnown)) // TODO: should this be finalized?
c.e, c.src = env, src
c.errs = err
if b, ok := v.(*Bottom); !ok || !b.IsIncomplete() {
@@ -157,12 +157,16 @@ type Vertex struct {
// eval: *, BaseValue: * -- finalized
//
state *nodeContext
- // TODO: move the following status fields to nodeContext.
+
+ // cc manages the closedness logic for this Vertex. It is created
+ // by rootCloseContext.
+ // TODO: move back to nodeContext, but be sure not to clone it.
+ cc *closeContext
// Label is the feature leading to this vertex.
Label Feature
- // TODO: move the following status fields to nodeContext.
+ // TODO: move the following fields to nodeContext.
// status indicates the evaluation progress of this vertex.
status vertexStatus
@@ -192,11 +196,6 @@ type Vertex struct {
// or any other operation that relies on the set of arcs being constant.
LockArcs bool
- // disallowedField means that this arc is not allowed according
- // to the closedness rules. This is used to avoid duplicate error reporting.
- // TODO: perhaps rename to notAllowedErrorEmitted.
- disallowedField bool
-
// IsDynamic signifies whether this struct is computed as part of an
// expression and not part of the static evaluation tree.
// Used for cycle detection.
@@ -245,6 +244,21 @@ type Vertex struct {
Structs []*StructInfo
}
+// rootCloseContext creates a closeContext for this Vertex or returns the
+// existing one.
+func (v *Vertex) rootCloseContext() *closeContext {
+ if v.cc == nil {
+ v.cc = &closeContext{
+ group: &ConjunctGroup{},
+ parent: nil,
+ src: v,
+ parentConjuncts: v,
+ }
+ v.cc.incDependent(ROOT, nil) // matched in REF(decrement:nodeDone)
+ }
+ return v.cc
+}
+
// newInlineVertex creates a Vertex that is needed for computation, but for
// which there is no CUE path defined from the root Vertex.
func (ctx *OpContext) newInlineVertex(parent *Vertex, v BaseValue, a ...Conjunct) *Vertex {
@@ -259,9 +273,21 @@ func (ctx *OpContext) newInlineVertex(parent *Vertex, v BaseValue, a ...Conjunct
// updateArcType updates v.ArcType if t is more restrictive.
func (v *Vertex) updateArcType(t ArcType) {
- if t < v.ArcType {
- v.ArcType = t
+ if t >= v.ArcType {
+ return
+ }
+ if v.ArcType == ArcNotPresent {
+ return
}
+ if s := v.state; s != nil && s.ctx.isDevVersion() {
+ c := s.ctx
+ if s.scheduler.frozen.meets(arcTypeKnown) {
+ parent := v.Parent
+ parent.reportFieldCycleError(c, c.Source().Pos(), v.Label)
+ return
+ }
+ }
+ v.ArcType = t
}
// isDefined indicates whether this arc is a "value" field, and not a constraint
@@ -324,6 +350,22 @@ const (
// We could also define types for required fields and potentially lets.
)
+func (a ArcType) String() string {
+ switch a {
+ case ArcMember:
+ return "Member"
+ case ArcOptional:
+ return "Optional"
+ case ArcRequired:
+ return "Required"
+ case ArcPending:
+ return "Pending"
+ case ArcNotPresent:
+ return "NotPresent"
+ }
+ return fmt.Sprintf("ArcType(%d)", a)
+}
+
// definitelyExists reports whether an arc is a constraint or member arc.
// TODO: we should check that users of this call ensure there are no
// ArcPendings.
@@ -647,13 +689,13 @@ func (v *Vertex) Finalize(c *OpContext) {
// case the caller did not handle existing errors in the context.
err := c.errs
c.errs = nil
- c.unify(v, finalized)
+ c.unify(v, final(finalized, allKnown))
c.errs = err
}
// CompleteArcs ensures the set of arcs has been computed.
func (v *Vertex) CompleteArcs(c *OpContext) {
- c.unify(v, conjuncts)
+ c.unify(v, final(conjuncts, allKnown))
}
func (v *Vertex) AddErr(ctx *OpContext, b *Bottom) {
@@ -667,6 +709,7 @@ func (v *Vertex) SetValue(ctx *OpContext, value BaseValue) *Bottom {
func (v *Vertex) setValue(ctx *OpContext, state vertexStatus, value BaseValue) *Bottom {
v.BaseValue = value
+ // TODO: should not set status here for new evaluator.
v.updateStatus(state)
return nil
}
@@ -887,6 +930,8 @@ func (v *Vertex) Elems() []*Vertex {
// GetArc returns a Vertex for the outgoing arc with label f. It creates and
// ads one if it doesn't yet exist.
func (v *Vertex) GetArc(c *OpContext, f Feature, t ArcType) (arc *Vertex, isNew bool) {
+ unreachableForDev(c)
+
arc = v.Lookup(f)
if arc != nil {
arc.updateArcType(t)
@@ -951,7 +996,11 @@ func (v *Vertex) hasConjunct(c Conjunct) (added bool) {
default:
v.ArcType = ArcMember
}
- for _, x := range v.Conjuncts {
+ return hasConjunct(v.Conjuncts, c)
+}
+
+func hasConjunct(cs []Conjunct, c Conjunct) bool {
+ for _, x := range cs {
// TODO: disregard certain fields from comparison (e.g. Refs)?
if x.CloseInfo.closeInfo == c.CloseInfo.closeInfo &&
x.x == c.x &&
@@ -963,6 +1012,8 @@ func (v *Vertex) hasConjunct(c Conjunct) (added bool) {
}
func (n *nodeContext) addConjunction(c Conjunct, index int) {
+ unreachableForDev(n.ctx)
+
// NOTE: This does not split binary expressions for comprehensions.
// TODO: split for comprehensions and rewrap?
if x, ok := c.Elem().(*BinaryExpr); ok && x.Op == AndOp {
@@ -978,7 +1029,7 @@ func (n *nodeContext) addConjunction(c Conjunct, index int) {
func (v *Vertex) addConjunctUnchecked(c Conjunct) {
index := len(v.Conjuncts)
v.Conjuncts = append(v.Conjuncts, c)
- if n := v.state; n != nil {
+ if n := v.state; n != nil && !n.ctx.isDevVersion() {
n.addConjunction(c, index)
// TODO: can we remove notifyConjunct here? This method is only
@@ -992,6 +1043,8 @@ func (v *Vertex) addConjunctUnchecked(c Conjunct) {
// addConjunctDynamic adds a conjunct to a vertex and immediately evaluates
// it, whilst doing the same for any vertices on the notify list, recursively.
func (n *nodeContext) addConjunctDynamic(c Conjunct) {
+ unreachableForDev(n.ctx)
+
n.node.Conjuncts = append(n.node.Conjuncts, c)
n.addExprConjunct(c, partial)
n.notifyConjunct(c)
@@ -999,6 +1052,8 @@ func (n *nodeContext) addConjunctDynamic(c Conjunct) {
}
func (n *nodeContext) notifyConjunct(c Conjunct) {
+ unreachableForDev(n.ctx)
+
for _, rec := range n.notify {
arc := rec.v
if !arc.hasConjunct(c) {
@@ -1109,12 +1164,39 @@ func (c *Conjunct) Elem() Elem {
}
}
-// Expr retrieves the expression form of the contained conjunct.
-// If it is a field or comprehension, it will return its associated value.
+// Expr retrieves the expression form of the contained conjunct. If it is a
+// field or comprehension, it will return its associated value. This is only to
+// be used for syntactic operations where evaluation of the expression is not
+// required. To get an expression paired with the correct environment, use
+// EnvExpr.
+//
+// TODO: rename to RawExpr.
func (c *Conjunct) Expr() Expr {
return ToExpr(c.x)
}
+// EnvExpr returns the expression form of the contained conjunct alongside an
+// Environment in which this expression should be evaluated.
+func (c Conjunct) EnvExpr() (*Environment, Expr) {
+ return EnvExpr(c.Env, c.Elem())
+}
+
+// EnvExpr returns the expression represented by Elem alongside an Environment
+// with the necessary adjustments in which the resulting expression can be
+// evaluated.
+func EnvExpr(env *Environment, elem Elem) (*Environment, Expr) {
+ for {
+ if c, ok := elem.(*Comprehension); ok {
+ env = linkChildren(env, c)
+ c := MakeConjunct(env, c.Value, CloseInfo{})
+ elem = c.Elem()
+ continue
+ }
+ break
+ }
+ return env, ToExpr(elem)
+}
+
// ToExpr extracts the underlying expression for a Node. If something is already
// an Expr, it will return it as is, if it is a field, it will return its value,
// and for comprehensions it returns the yielded struct.
diff --git a/vendor/cuelang.org/go/internal/core/adt/comprehension.go b/vendor/cuelang.org/go/internal/core/adt/comprehension.go
index 0f0cbeb7..31ccd805 100644
--- a/vendor/cuelang.org/go/internal/core/adt/comprehension.go
+++ b/vendor/cuelang.org/go/internal/core/adt/comprehension.go
@@ -156,8 +156,10 @@ func (n *nodeContext) insertComprehension(
x := c.Value
- ci = ci.SpawnEmbed(c)
- ci.closeInfo.span |= ComprehensionSpan
+ if !n.ctx.isDevVersion() {
+ ci = ci.SpawnEmbed(c)
+ ci.closeInfo.span |= ComprehensionSpan
+ }
var decls []Decl
switch v := ToExpr(x).(type) {
@@ -184,7 +186,6 @@ func (n *nodeContext) insertComprehension(
conjunct := MakeConjunct(env, c, ci)
n.node.state.insertFieldUnchecked(f.Label, ArcPending, conjunct)
fields = append(fields, f)
- // TODO: adjust ci to embed?
case *LetField:
// TODO: consider merging this case with the LetField case.
@@ -251,13 +252,19 @@ func (n *nodeContext) insertComprehension(
}
}
- n.comprehensions = append(n.comprehensions, envYield{
- envComprehension: ec,
- leaf: c,
- env: env,
- id: ci,
- expr: x,
- })
+ if n.ctx.isDevVersion() {
+ t := n.scheduleTask(handleComprehension, env, x, ci)
+ t.comp = ec
+ t.leaf = c
+ } else {
+ n.comprehensions = append(n.comprehensions, envYield{
+ envComprehension: ec,
+ leaf: c,
+ env: env,
+ id: ci,
+ expr: x,
+ })
+ }
}
type compState struct {
@@ -274,14 +281,14 @@ func (c *OpContext) yield(
node *Vertex, // errors are associated with this node
env *Environment, // env for field for which this yield is called
comp *Comprehension,
- state vertexStatus,
+ state combinedFlags,
f YieldFunc, // called for every result
) *Bottom {
s := &compState{
ctx: c,
comp: comp,
f: f,
- state: state,
+ state: state.vertexStatus(),
}
y := comp.Clauses[0]
@@ -321,6 +328,8 @@ func (s *compState) yield(env *Environment) (ok bool) {
// embeddings before inserting the results to ensure that the order of
// evaluation does not matter.
func (n *nodeContext) injectComprehensions(state vertexStatus) (progress bool) {
+ unreachableForDev(n.ctx)
+
workRemaining := false
// We use variables, instead of range, as the list may grow dynamically.
@@ -371,6 +380,8 @@ func (n *nodeContext) injectComprehensions(state vertexStatus) (progress bool) {
// as iterating over the node in which they are defined. Such comprehensions
// are legal as long as they do not modify the arc set of the node.
func (n *nodeContext) injectSelfComprehensions(state vertexStatus) {
+ unreachableForDev(n.ctx)
+
// We use variables, instead of range, as the list may grow dynamically.
for i := 0; i < len(n.selfComprehensions); i++ {
n.processComprehension(&n.selfComprehensions[i], state)
@@ -391,7 +402,7 @@ func (n *nodeContext) processComprehension(d *envYield, state vertexStatus) *Bot
envs = append(envs, env)
}
- if err := ctx.yield(d.vertex, d.env, d.comp, state, f); err != nil {
+ if err := ctx.yield(d.vertex, d.env, d.comp, oldOnly(state), f); err != nil {
if err.IsIncomplete() {
return err
}
@@ -426,14 +437,34 @@ func (n *nodeContext) processComprehension(d *envYield, state vertexStatus) *Bot
v := n.node
for c := d.leaf; c.parent != nil; c = c.parent {
v.updateArcType(c.arcType)
+ if v.ArcType == ArcNotPresent {
+ parent := v.Parent
+ b := parent.reportFieldCycleError(ctx, d.comp.Syntax.Pos(), v.Label)
+ d.envComprehension.vertex.state.addBottom(b)
+ ctx.current().err = b
+ ctx.current().state = taskFAILED
+ return nil
+ }
v = c.arc
}
id := d.id
for _, env := range d.envs {
+ if n.node.ArcType == ArcNotPresent {
+ b := n.node.reportFieldCycleError(ctx, d.comp.Syntax.Pos(), n.node.Label)
+ ctx.current().err = b
+ n.yield()
+ return nil
+ }
+
env = linkChildren(env, d.leaf)
- n.addExprConjunct(Conjunct{env, d.expr, id}, state)
+
+ if ctx.isDevVersion() {
+ n.scheduleConjunct(Conjunct{env, d.expr, id}, id)
+ } else {
+ n.addExprConjunct(Conjunct{env, d.expr, id}, state)
+ }
}
return nil
diff --git a/vendor/cuelang.org/go/internal/core/adt/conjunct.go b/vendor/cuelang.org/go/internal/core/adt/conjunct.go
new file mode 100644
index 00000000..816dce93
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/conjunct.go
@@ -0,0 +1,594 @@
+// Copyright 2023 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import "fmt"
+
+// This file contains functionality for processing conjuncts to insert the
+// corresponding values in the Vertex.
+//
+// Conjuncts are divided into two classes:
+// - literal values that need no evaluation: these are inserted directly into
+// the Vertex.
+// - field or value expressions that need to be evaluated: these are inserted
+// as a task into the Vertex' associated scheduler for later evaluation.
+// The implementation of these tasks can be found in tasks.go.
+//
+// The main entrypoint is scheduleConjunct.
+
+// scheduleConjunct splits c into parts to be incrementally processed and queues
+// these parts up for processing. it will itself not cause recursive processing.
+func (n *nodeContext) scheduleConjunct(c Conjunct, id CloseInfo) {
+ // Explanation of switch statement:
+ //
+ // A Conjunct can be a leaf or, through a ConjunctGroup, a tree. The tree
+ // reflects the history of how the conjunct was inserted in terms of
+ // definitions and embeddings. This, in turn, is used to compute closedness.
+ //
+ // Once all conjuncts for a Vertex have been collected, this tree contains
+ // all the information needed to trace its histroy: if a Vertex is
+ // referenced in an expression, this tree can be used to insert the
+ // conjuncts keeping closedness in mind.
+ //
+ // In the collection phase, however, this is not sufficient. CUE computes
+ // conjuncts "out of band". This means that conjuncts accumulate in
+ // different parts of the tree in an indeterminate order. closeContext is
+ // used to account for this.
+ //
+ // Basically, if the closeContext associated with c belongs to n, we take
+ // it that the conjunct needs to be inserted at the point in the tree
+ // associated by this closeContext. If, on the other hand, the closeContext
+ // is not defined or does not belong to this node, we take this conjunct
+ // is inserted by means of a reference. In this case we assume that the
+ // computation of the tree has completed and the tree can be used to reflect
+ // the closedness structure.
+ //
+ // TODO: once the evaluator is done and all tests pass, consider having
+ // two different entry points to account for these cases.
+ switch cc := c.CloseInfo.cc; {
+ case cc == nil || cc.src != n.node:
+ // In this case, a Conjunct is inserted from another Arc. If the
+ // conjunct represents an embedding or definition, we need to create a
+ // new closeContext to represent this.
+ if id.cc == nil {
+ id.cc = n.node.rootCloseContext()
+ }
+ if id.cc == cc {
+ panic("inconsistent state")
+ }
+ var t closeNodeType
+ if c.CloseInfo.FromDef {
+ t |= closeDef
+ }
+ if c.CloseInfo.FromEmbed {
+ t |= closeEmbed
+ }
+ if t != 0 {
+ id, _ = id.spawnCloseContext(t)
+ }
+ if !id.cc.done {
+ id.cc.incDependent(DEFER, nil)
+ defer id.cc.decDependent(n.ctx, DEFER, nil)
+ }
+
+ if id.cc.src != n.node {
+ panic("inconsistent state")
+ }
+ default:
+
+ // In this case, the conjunct is inserted as the result of an expansion
+ // of a conjunct in place, not a reference. In this case, we must use
+ // the cached closeContext.
+ id.cc = cc
+
+ // Note this subtlety: we MUST take the cycle info from c when this is
+ // an in place evaluated node, otherwise we must take that of id.
+ id.CycleInfo = c.CloseInfo.CycleInfo
+ }
+
+ if id.cc.needsCloseInSchedule != nil {
+ dep := id.cc.needsCloseInSchedule
+ id.cc.needsCloseInSchedule = nil
+ defer id.cc.decDependent(n.ctx, EVAL, dep)
+ }
+
+ env := c.Env
+
+ if id.cc.isDef {
+ n.node.Closed = true
+ }
+
+ switch x := c.Elem().(type) {
+ case *ConjunctGroup:
+ for _, c := range *x {
+ // TODO(perf): can be one loop
+
+ cc := c.CloseInfo.cc
+ if cc.src == n.node && cc.needsCloseInSchedule != nil {
+ // We need to handle this specifically within the ConjunctGroup
+ // loop, because multiple conjuncts may be using the same root
+ // closeContext. This can be merged once Vertex.Conjuncts is an
+ // interface, requiring any list to be a root conjunct.
+
+ dep := cc.needsCloseInSchedule
+ cc.needsCloseInSchedule = nil
+ defer cc.decDependent(n.ctx, EVAL, dep)
+ }
+ }
+ for _, c := range *x {
+ n.scheduleConjunct(c, id)
+ }
+
+ case *Vertex:
+ if x.IsData() {
+ n.insertValueConjunct(env, x, id)
+ } else {
+ n.scheduleVertexConjuncts(c, x, id)
+ }
+
+ case Value:
+ n.insertValueConjunct(env, x, id)
+
+ case *BinaryExpr:
+ if x.Op == AndOp {
+ n.scheduleConjunct(MakeConjunct(env, x.X, id), id)
+ n.scheduleConjunct(MakeConjunct(env, x.Y, id), id)
+ return
+ }
+ // Even though disjunctions and conjunctions are excluded, the result
+ // must may still be list in the case of list arithmetic. This could
+ // be a scalar value only once this is no longer supported.
+ n.scheduleTask(handleExpr, env, x, id)
+
+ case *StructLit:
+ n.scheduleStruct(env, x, id)
+
+ case *ListLit:
+ env := &Environment{
+ Up: env,
+ Vertex: n.node,
+ }
+ n.scheduleTask(handleListLit, env, x, id)
+
+ case *DisjunctionExpr:
+ panic("unimplemented")
+ // n.addDisjunction(env, x, id)
+
+ case *Comprehension:
+ // always a partial comprehension.
+ n.insertComprehension(env, x, id)
+
+ case Resolver:
+ n.scheduleTask(handleResolver, env, x, id)
+
+ case Evaluator:
+ // Interpolation, UnaryExpr, CallExpr
+ n.scheduleTask(handleExpr, env, x, id)
+
+ default:
+ panic("unreachable")
+ }
+
+ n.ctx.stats.Conjuncts++
+}
+
+// scheduleStruct records all elements of this conjunct in the structure and
+// then processes it. If an element needs to be inserted for evaluation,
+// it may be scheduled.
+func (n *nodeContext) scheduleStruct(env *Environment,
+ s *StructLit,
+ ci CloseInfo) {
+ n.updateCyclicStatus(ci)
+
+ // NOTE: This is a crucial point in the code:
+ // Unification dereferencing happens here. The child nodes are set to
+ // an Environment linked to the current node. Together with the De Bruijn
+ // indices, this determines to which Vertex a reference resolves.
+
+ childEnv := &Environment{
+ Up: env,
+ Vertex: n.node,
+ }
+
+ hasEmbed := false
+ hasEllipsis := false
+
+ // shouldClose := ci.cc.isDef || ci.cc.isClosedOnce
+ // s.Init()
+
+ // TODO: do we still need to AddStruct and do we still need to Disable?
+ parent := n.node.AddStruct(s, childEnv, ci)
+ parent.Disable = true // disable until processing is done.
+ ci.IsClosed = false
+
+ // TODO: precompile
+loop1:
+ for _, d := range s.Decls {
+ switch d.(type) {
+ case *Ellipsis:
+ hasEllipsis = true
+ break loop1
+ }
+ }
+
+ // TODO(perf): precompile whether struct has embedding.
+loop2:
+ for _, d := range s.Decls {
+ switch d.(type) {
+ case *Comprehension, Expr:
+ // No need to increment and decrement, as there will be at least
+ // one entry.
+ ci, _ = ci.spawnCloseContext(0)
+ // Note: adding a count is not needed here, as there will be an
+ // embed spawn below.
+ hasEmbed = true
+ break loop2
+ }
+ }
+
+ // First add fixed fields and schedule expressions.
+ for _, d := range s.Decls {
+ switch x := d.(type) {
+ case *Field:
+ if x.Label.IsString() && x.ArcType == ArcMember {
+ n.aStruct = s
+ n.aStructID = ci
+ }
+ fc := MakeConjunct(childEnv, x, ci)
+ // fc.CloseInfo.cc = nil // TODO: should we add this?
+ n.insertArc(x.Label, x.ArcType, fc, ci, true)
+
+ case *LetField:
+ lc := MakeConjunct(childEnv, x, ci)
+ n.insertArc(x.Label, ArcMember, lc, ci, true)
+
+ case *Comprehension:
+ ci, cc := ci.spawnCloseContext(closeEmbed)
+ cc.incDependent(DEFER, nil)
+ defer cc.decDependent(n.ctx, DEFER, nil)
+ n.insertComprehension(childEnv, x, ci)
+ hasEmbed = true
+
+ case *Ellipsis:
+ // Can be added unconditionally to patterns.
+ ci.cc.isDef = false
+ ci.cc.isClosed = false
+
+ case *DynamicField:
+ if x.ArcType == ArcMember {
+ n.aStruct = s
+ n.aStructID = ci
+ }
+ n.scheduleTask(handleDynamic, childEnv, x, ci)
+
+ case *BulkOptionalField:
+
+ // All do not depend on each other, so can be added at once.
+ n.scheduleTask(handlePatternConstraint, childEnv, x, ci)
+
+ case Expr:
+ // TODO: perhaps special case scalar Values to avoid creating embedding.
+ ci, cc := ci.spawnCloseContext(closeEmbed)
+
+ // TODO: do we need to increment here?
+ cc.incDependent(DEFER, nil) // decrement deferred below
+ defer cc.decDependent(n.ctx, DEFER, nil)
+
+ ec := MakeConjunct(childEnv, x, ci)
+ n.scheduleConjunct(ec, ci)
+ hasEmbed = true
+ }
+ }
+ if hasEllipsis {
+ ci.cc.hasEllipsis = true
+ }
+ if !hasEmbed {
+ n.aStruct = s
+ n.aStructID = ci
+ ci.cc.hasNonTop = true
+ }
+
+ // TODO: probably no longer necessary.
+ parent.Disable = false
+}
+
+// scheduleVertexConjuncts injects the conjuncst of src n. If src was not fully
+// evaluated, it subscribes dst for future updates.
+func (n *nodeContext) scheduleVertexConjuncts(c Conjunct, arc *Vertex, closeInfo CloseInfo) {
+ // Don't add conjuncts if a node is referring to itself.
+ if n.node == arc {
+ return
+ }
+
+ // We need to ensure that each arc is only unified once (or at least) a
+ // bounded time, witch each conjunct. Comprehensions, for instance, may
+ // distribute a value across many values that get unified back into the
+ // same value. If such a value is a disjunction, than a disjunction of N
+ // disjuncts will result in a factor N more unifications for each
+ // occurrence of such value, resulting in exponential running time. This
+ // is especially common values that are used as a type.
+ //
+ // However, unification is idempotent, so each such conjunct only needs
+ // to be unified once. This cache checks for this and prevents an
+ // exponential blowup in such case.
+ //
+ // TODO(perf): this cache ensures the conjuncts of an arc at most once
+ // per ID. However, we really need to add the conjuncts of an arc only
+ // once total, and then add the close information once per close ID
+ // (pointer can probably be shared). Aside from being more performant,
+ // this is probably the best way to guarantee that conjunctions are
+ // linear in this case.
+
+ ciKey := closeInfo
+ ciKey.Refs = nil
+ ciKey.Inline = false
+ key := arcKey{arc, ciKey}
+ for _, k := range n.arcMap {
+ if key == k {
+ return
+ }
+ }
+ n.arcMap = append(n.arcMap, key)
+
+ if IsDef(c.Expr()) {
+ // TODO: or should we always insert the wrapper (for errors)?
+ ci, dc := closeInfo.spawnCloseContext(closeDef)
+ closeInfo = ci
+
+ dc.incDependent(DEFER, nil) // decrement deferred below
+ defer dc.decDependent(n.ctx, DEFER, nil)
+ }
+
+ if state := arc.getState(n.ctx); state != nil {
+ state.addNotify2(n.node, closeInfo)
+ }
+
+ for i := 0; i < len(arc.Conjuncts); i++ {
+ c := arc.Conjuncts[i]
+
+ // Note that we are resetting the tree here. We hereby assume that
+ // closedness conflicts resulting from unifying the referenced arc were
+ // already caught there and that we can ignore further errors here.
+ // c.CloseInfo = closeInfo
+
+ // We can use the original, but we know it will not be used
+
+ n.scheduleConjunct(c, closeInfo)
+ }
+}
+
+func (n *nodeContext) addNotify2(v *Vertex, c CloseInfo) []receiver {
+ n.completeNodeTasks()
+
+ // No need to do the notification mechanism if we are already complete.
+ old := n.notify
+ if n.meets(allAncestorsProcessed) {
+ return old
+ }
+
+ // Create a "root" closeContext to reflect the entry point of the
+ // reference into n.node relative to cc within v. After that, we can use
+ // assignConjunct to add new conjuncts.
+
+ // TODO: dedup: only add if t does not already exist. First check if this
+ // is even possible by adding a panic.
+ root := n.node.rootCloseContext()
+ if root.isDecremented {
+ return old
+ }
+
+ for _, r := range n.notify {
+ if r.v == v && r.cc == c.cc {
+ return old
+ }
+ }
+
+ cc := c.cc
+
+ if root.linkNotify(v, cc, c.CycleInfo) {
+ n.notify = append(n.notify, receiver{v, cc})
+ n.completeNodeTasks()
+ }
+
+ return old
+}
+
+// Literal conjuncts
+
+func (n *nodeContext) insertValueConjunct(env *Environment, v Value, id CloseInfo) {
+ n.updateCyclicStatus(id)
+
+ ctx := n.ctx
+
+ switch x := v.(type) {
+ case *Vertex:
+ if m, ok := x.BaseValue.(*StructMarker); ok {
+ n.aStruct = x
+ n.aStructID = id
+ if m.NeedClose {
+ // TODO: In the new evaluator this is used to mark a struct
+ // as closed in the debug output. Once the old evaluator is
+ // gone, we could simplify this.
+ id.IsClosed = true
+ if ctx.isDevVersion() {
+ var cc *closeContext
+ id, cc = id.spawnCloseContext(0)
+ cc.isClosedOnce = true
+ }
+ }
+ }
+
+ if !x.IsData() {
+ c := MakeConjunct(env, x, id)
+ n.scheduleVertexConjuncts(c, x, id)
+ return
+ }
+
+ // TODO: evaluate value?
+ switch v := x.BaseValue.(type) {
+ default:
+ panic(fmt.Sprintf("invalid type %T", x.BaseValue))
+
+ case *ListMarker:
+ // TODO: arguably we know now that the type _must_ be a list.
+ n.scheduleTask(handleListVertex, env, x, id)
+
+ return
+
+ case *StructMarker:
+ for _, a := range x.Arcs {
+ if a.ArcType != ArcMember {
+ continue
+ }
+ // TODO(errors): report error when this is a regular field.
+ c := MakeConjunct(nil, a, id)
+ n.insertArc(a.Label, a.ArcType, c, id, true)
+ }
+
+ case Value:
+ n.insertValueConjunct(env, v, id)
+ }
+
+ return
+
+ case *Bottom:
+ id.cc.hasNonTop = true
+ n.addBottom(x)
+ return
+
+ case *Builtin:
+ id.cc.hasNonTop = true
+ if v := x.BareValidator(); v != nil {
+ n.insertValueConjunct(env, v, id)
+ return
+ }
+ }
+
+ if !n.updateNodeType(v.Kind(), v, id) {
+ return
+ }
+
+ switch x := v.(type) {
+ case *Disjunction:
+ n.addDisjunctionValue(env, x, id)
+
+ case *Conjunction:
+ for _, x := range x.Values {
+ n.insertValueConjunct(env, x, id)
+ }
+
+ case *Top:
+ n.hasTop = true
+ id.cc.hasTop = true
+
+ case *BasicType:
+ id.cc.hasNonTop = true
+
+ case *BoundValue:
+ id.cc.hasNonTop = true
+ switch x.Op {
+ case LessThanOp, LessEqualOp:
+ if y := n.upperBound; y != nil {
+ n.upperBound = nil
+ v := SimplifyBounds(ctx, n.kind, x, y)
+ if err := valueError(v); err != nil {
+ err.AddPosition(v)
+ err.AddPosition(n.upperBound)
+ err.AddClosedPositions(id)
+ }
+ n.insertValueConjunct(env, v, id)
+ return
+ }
+ n.upperBound = x
+
+ case GreaterThanOp, GreaterEqualOp:
+ if y := n.lowerBound; y != nil {
+ n.lowerBound = nil
+ v := SimplifyBounds(ctx, n.kind, x, y)
+ if err := valueError(v); err != nil {
+ err.AddPosition(v)
+ err.AddPosition(n.lowerBound)
+ err.AddClosedPositions(id)
+ }
+ n.insertValueConjunct(env, v, id)
+ return
+ }
+ n.lowerBound = x
+
+ case EqualOp, NotEqualOp, MatchOp, NotMatchOp:
+ // This check serves as simplifier, but also to remove duplicates.
+ k := 0
+ match := false
+ for _, c := range n.checks {
+ if y, ok := c.(*BoundValue); ok {
+ switch z := SimplifyBounds(ctx, n.kind, x, y); {
+ case z == y:
+ match = true
+ case z == x:
+ continue
+ }
+ }
+ n.checks[k] = c
+ k++
+ }
+ n.checks = n.checks[:k]
+ if !match {
+ n.checks = append(n.checks, x)
+ }
+ return
+ }
+
+ case Validator:
+ // This check serves as simplifier, but also to remove duplicates.
+ for i, y := range n.checks {
+ if b := SimplifyValidator(ctx, x, y); b != nil {
+ n.checks[i] = b
+ return
+ }
+ }
+ n.updateNodeType(x.Kind(), x, id)
+ n.checks = append(n.checks, x)
+
+ case *Vertex:
+ // handled above.
+
+ case Value: // *NullLit, *BoolLit, *NumLit, *StringLit, *BytesLit, *Builtin
+ if y := n.scalar; y != nil {
+ if b, ok := BinOp(ctx, EqualOp, x, y).(*Bool); !ok || !b.B {
+ n.reportConflict(x, y, x.Kind(), y.Kind(), n.scalarID, id)
+ }
+ break
+ }
+ n.scalar = x
+ n.scalarID = id
+ n.signal(scalarKnown)
+
+ default:
+ panic(fmt.Sprintf("unknown value type %T", x))
+ }
+
+ if n.lowerBound != nil && n.upperBound != nil {
+ if u := SimplifyBounds(ctx, n.kind, n.lowerBound, n.upperBound); u != nil {
+ if err := valueError(u); err != nil {
+ err.AddPosition(n.lowerBound)
+ err.AddPosition(n.upperBound)
+ err.AddClosedPositions(id)
+ }
+ n.lowerBound = nil
+ n.upperBound = nil
+ n.insertValueConjunct(env, u, id)
+ }
+ }
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/constraints.go b/vendor/cuelang.org/go/internal/core/adt/constraints.go
index c2e07239..ee5708a7 100644
--- a/vendor/cuelang.org/go/internal/core/adt/constraints.go
+++ b/vendor/cuelang.org/go/internal/core/adt/constraints.go
@@ -109,8 +109,8 @@ func (n *nodeContext) insertConstraint(pattern Value, c Conjunct) bool {
// matchPattern reports whether f matches pattern. The result reflects
// whether unification of pattern with f converted to a CUE value succeeds.
-func matchPattern(n *nodeContext, pattern Value, f Feature) bool {
- if pattern == nil {
+func matchPattern(ctx *OpContext, pattern Value, f Feature) bool {
+ if pattern == nil || !f.IsRegular() {
return false
}
@@ -124,10 +124,10 @@ func matchPattern(n *nodeContext, pattern Value, f Feature) bool {
if int64(f.Index()) == MaxIndex {
f = 0
} else if f.IsString() {
- label = f.ToValue(n.ctx)
+ label = f.ToValue(ctx)
}
- return matchPatternValue(n.ctx, pattern, f, label)
+ return matchPatternValue(ctx, pattern, f, label)
}
// matchPatternValue matches a concrete value against f. label must be the
@@ -150,7 +150,7 @@ func matchPatternValue(ctx *OpContext, pattern Value, f Feature, label Value) (r
if x == cycle {
err := ctx.NewPosf(pos(pattern), "cyclic pattern constraint")
for _, c := range ctx.vertex.Conjuncts {
- err.AddPosition(c.Elem())
+ addPositions(err, c)
}
ctx.AddBottom(&Bottom{
Err: err,
diff --git a/vendor/cuelang.org/go/internal/core/adt/context.go b/vendor/cuelang.org/go/internal/core/adt/context.go
index d3f2e345..528eb8c5 100644
--- a/vendor/cuelang.org/go/internal/core/adt/context.go
+++ b/vendor/cuelang.org/go/internal/core/adt/context.go
@@ -30,6 +30,7 @@ import (
"cuelang.org/go/cue/errors"
"cuelang.org/go/cue/stats"
"cuelang.org/go/cue/token"
+ "cuelang.org/go/internal"
)
// Debug sets whether extra aggressive checking should be done.
@@ -146,7 +147,7 @@ func (c *OpContext) Logf(v *Vertex, format string, args ...interface{}) {
}
// PathToString creates a pretty-printed path of the given list of features.
-func (c *OpContext) PathToString(r Runtime, path []Feature) string {
+func (c *OpContext) PathToString(path []Feature) string {
var b strings.Builder
for i, f := range path {
if i > 0 {
@@ -174,6 +175,8 @@ type Runtime interface {
// LoadType retrieves a previously stored CUE expression for a given Go
// type if available.
LoadType(t reflect.Type) (src ast.Expr, expr Expr, ok bool)
+
+ EvaluatorVersion() internal.EvaluatorVersion
}
type Config struct {
@@ -187,9 +190,11 @@ func New(v *Vertex, cfg *Config) *OpContext {
panic("nil Runtime")
}
ctx := &OpContext{
- Runtime: cfg.Runtime,
- Format: cfg.Format,
- vertex: v,
+ Runtime: cfg.Runtime,
+ Format: cfg.Format,
+ vertex: v,
+ Version: cfg.Runtime.EvaluatorVersion(),
+ taskContext: schedConfig,
}
if v != nil {
ctx.e = &Environment{Up: nil, Vertex: v}
@@ -197,15 +202,9 @@ func New(v *Vertex, cfg *Config) *OpContext {
return ctx
}
-type EvaluatorVersion int
-
-const (
- DefaultVersion EvaluatorVersion = iota
-
- // The DevVersion is used for new implementations of the evaluator that
- // do not cover all features of the CUE language yet.
- DevVersion
-)
+func (c *OpContext) isDevVersion() bool {
+ return c.Version == internal.DevVersion
+}
// An OpContext implements CUE's unification operation. It only
// operates on values that are created with the Runtime with which an OpContext
@@ -215,7 +214,9 @@ type OpContext struct {
Runtime
Format func(Node) string
- Version EvaluatorVersion
+ Version internal.EvaluatorVersion // Copied from Runtime
+
+ taskContext
nest int
@@ -268,6 +269,10 @@ type OpContext struct {
// as an error if this is true.
// TODO: strictly separate validators and functions.
IsValidator bool
+
+ // ErrorGraphs contains an analysis, represented as a Mermaid graph, for
+ // each node that has an error.
+ ErrorGraphs map[string]string
}
func (c *OpContext) CloseInfo() CloseInfo { return c.ci }
@@ -324,7 +329,7 @@ func (c *OpContext) Env(upCount int32) *Environment {
func (c *OpContext) relNode(upCount int32) *Vertex {
e := c.e.up(c, upCount)
- c.unify(e.Vertex, partial)
+ c.unify(e.Vertex, oldOnly(partial))
return e.Vertex
}
@@ -471,10 +476,10 @@ func (c *OpContext) PopArc(saved *Vertex) {
// Should only be used to insert Conjuncts. TODO: perhaps only return Conjuncts
// and error.
func (c *OpContext) Resolve(x Conjunct, r Resolver) (*Vertex, *Bottom) {
- return c.resolveState(x, r, finalized)
+ return c.resolveState(x, r, final(finalized, allKnown))
}
-func (c *OpContext) resolveState(x Conjunct, r Resolver, state vertexStatus) (*Vertex, *Bottom) {
+func (c *OpContext) resolveState(x Conjunct, r Resolver, state combinedFlags) (*Vertex, *Bottom) {
s := c.PushConjunct(x)
arc := r.resolve(c, state)
@@ -497,7 +502,7 @@ func (c *OpContext) resolveState(x Conjunct, r Resolver, state vertexStatus) (*V
func (c *OpContext) Lookup(env *Environment, r Resolver) (*Vertex, *Bottom) {
s := c.PushState(env, r.Source())
- arc := r.resolve(c, partial)
+ arc := r.resolve(c, oldOnly(partial))
err := c.PopState(s)
@@ -527,8 +532,11 @@ func (c *OpContext) Validate(check Validator, value Value) *Bottom {
// concrete returns the concrete value of x after evaluating it.
// msg is used to mention the context in which an error occurred, if any.
func (c *OpContext) concrete(env *Environment, x Expr, msg interface{}) (result Value, complete bool) {
+ s := c.PushState(env, x.Source())
- w, complete := c.Evaluate(env, x)
+ state := require(partial, concreteKnown)
+ w := c.evalState(x, state)
+ _ = c.PopState(s)
w, ok := c.getDefault(w)
if !ok {
@@ -536,6 +544,7 @@ func (c *OpContext) concrete(env *Environment, x Expr, msg interface{}) (result
}
v := Unwrap(w)
+ complete = w != nil
if !IsConcrete(v) {
complete = false
b := c.NewErrf("non-concrete value %v in operand to %s", w, msg)
@@ -543,11 +552,7 @@ func (c *OpContext) concrete(env *Environment, x Expr, msg interface{}) (result
v = b
}
- if !complete {
- return v, complete
- }
-
- return v, true
+ return v, complete
}
// getDefault resolves a disjunction to a single value. If there is no default
@@ -590,7 +595,7 @@ func (c *OpContext) getDefault(v Value) (result Value, ok bool) {
func (c *OpContext) Evaluate(env *Environment, x Expr) (result Value, complete bool) {
s := c.PushState(env, x.Source())
- val := c.evalState(x, partial)
+ val := c.evalState(x, final(partial, concreteKnown))
complete = true
@@ -616,7 +621,7 @@ func (c *OpContext) Evaluate(env *Environment, x Expr) (result Value, complete b
return val, true
}
-func (c *OpContext) evaluateRec(v Conjunct, state vertexStatus) Value {
+func (c *OpContext) evaluateRec(v Conjunct, state combinedFlags) Value {
x := v.Expr()
s := c.PushConjunct(v)
@@ -637,7 +642,7 @@ func (c *OpContext) evaluateRec(v Conjunct, state vertexStatus) Value {
// value evaluates expression v within the current environment. The result may
// be nil if the result is incomplete. value leaves errors untouched to that
// they can be collected by the caller.
-func (c *OpContext) value(x Expr, state vertexStatus) (result Value) {
+func (c *OpContext) value(x Expr, state combinedFlags) (result Value) {
v := c.evalState(x, state)
v, _ = c.getDefault(v)
@@ -645,7 +650,7 @@ func (c *OpContext) value(x Expr, state vertexStatus) (result Value) {
return v
}
-func (c *OpContext) evalState(v Expr, state vertexStatus) (result Value) {
+func (c *OpContext) evalState(v Expr, state combinedFlags) (result Value) {
savedSrc := c.src
c.src = v.Source()
err := c.errs
@@ -659,7 +664,7 @@ func (c *OpContext) evalState(v Expr, state vertexStatus) (result Value) {
switch b.Code {
case IncompleteError:
case CycleError:
- if state == partial {
+ if state.vertexStatus() == partial || c.isDevVersion() {
break
}
fallthrough
@@ -706,6 +711,43 @@ func (c *OpContext) evalState(v Expr, state vertexStatus) (result Value) {
c.ci, _ = n.markCycle(arc, nil, x, c.ci)
}
c.ci.Inline = true
+
+ if c.isDevVersion() {
+ if s := arc.getState(c); s != nil {
+ needs := state.conditions()
+ runMode := state.runMode()
+
+ arc.unify(c, needs|arcTypeKnown, attemptOnly) // to set scalar
+
+ if runMode == finalize {
+ // arc.unify(c, needs, attemptOnly) // to set scalar
+ // Freeze node.
+ arc.state.freeze(needs)
+ } else {
+ arc.unify(c, needs, runMode)
+ }
+
+ v := arc
+ if v.ArcType == ArcPending {
+ if v.status == evaluating {
+ for ; v.Parent != nil && v.ArcType == ArcPending; v = v.Parent {
+ }
+ err := c.Newf("cycle with field %v", x)
+ b := &Bottom{Code: CycleError, Err: err}
+ v.setValue(c, v.status, b)
+ return b
+ // TODO: use this instead, as is usual for incomplete errors,
+ // and also move this block one scope up to also apply to
+ // defined arcs. In both cases, though, doing so results in
+ // some errors to be misclassified as evaluation error.
+ // c.AddBottom(b)
+ // return nil
+ }
+ c.undefinedFieldError(v, IncompleteError)
+ return nil
+ }
+ }
+ }
v := c.evaluate(arc, x, state)
c.ci = saved
return v
@@ -732,7 +774,7 @@ func (c *OpContext) wrapCycleError(src ast.Node, b *Bottom) *Bottom {
// unifyNode returns a possibly partially evaluated node value.
//
// TODO: maybe return *Vertex, *Bottom
-func (c *OpContext) unifyNode(v Expr, state vertexStatus) (result Value) {
+func (c *OpContext) unifyNode(v Expr, state combinedFlags) (result Value) {
savedSrc := c.src
c.src = v.Source()
err := c.errs
@@ -782,8 +824,15 @@ func (c *OpContext) unifyNode(v Expr, state vertexStatus) (result Value) {
return nil
}
- if v.isUndefined() || state > v.status {
- c.unify(v, state)
+ if c.isDevVersion() {
+ if n := v.getState(c); n != nil {
+ // Always yield to not get spurious errors.
+ n.process(arcTypeKnown, yield)
+ }
+ } else {
+ if v.isUndefined() || state.vertexStatus() > v.status {
+ c.unify(v, state)
+ }
}
return v
@@ -794,7 +843,13 @@ func (c *OpContext) unifyNode(v Expr, state vertexStatus) (result Value) {
}
}
-func (c *OpContext) lookup(x *Vertex, pos token.Pos, l Feature, state vertexStatus) *Vertex {
+func (c *OpContext) lookup(x *Vertex, pos token.Pos, l Feature, flags combinedFlags) *Vertex {
+ if c.isDevVersion() {
+ return x.lookup(c, pos, l, flags)
+ }
+
+ state := flags.vertexStatus()
+
if l == InvalidLabel || x == nil {
// TODO: is it possible to have an invalid label here? Maybe through the
// API?
@@ -863,9 +918,9 @@ func (c *OpContext) lookup(x *Vertex, pos token.Pos, l Feature, state vertexStat
// hasAllConjuncts, but that are finalized too early, get conjuncts
// processed beforehand.
if state > a.status {
- c.unify(a, state)
+ c.unify(a, deprecated(c, state))
} else if a.state != nil {
- c.unify(a, partial)
+ c.unify(a, deprecated(c, partial))
}
if a.IsConstraint() {
@@ -973,7 +1028,7 @@ func pos(x Node) token.Pos {
return x.Source().Pos()
}
-func (c *OpContext) node(orig Node, x Expr, scalar bool, state vertexStatus) *Vertex {
+func (c *OpContext) node(orig Node, x Expr, scalar bool, state combinedFlags) *Vertex {
// TODO: always get the vertex. This allows a whole bunch of trickery
// down the line.
v := c.unifyNode(x, state)
@@ -1002,14 +1057,8 @@ func (c *OpContext) node(orig Node, x Expr, scalar bool, state vertexStatus) *Ve
switch nv := v.(type) {
case nil:
- switch orig.(type) {
- case *ForClause:
- c.addErrf(IncompleteError, pos(x),
- "cannot range over %s (incomplete)", x)
- default:
- c.addErrf(IncompleteError, pos(x),
- "%s undefined (%s is incomplete)", orig, x)
- }
+ c.addErrf(IncompleteError, pos(x),
+ "%s undefined (%s is incomplete)", orig, x)
return emptyNode
case *Bottom:
@@ -1026,14 +1075,8 @@ func (c *OpContext) node(orig Node, x Expr, scalar bool, state vertexStatus) *Ve
default:
if kind := v.Kind(); kind&StructKind != 0 {
- switch orig.(type) {
- case *ForClause:
- c.addErrf(IncompleteError, pos(x),
- "cannot range over %s (incomplete type %s)", x, kind)
- default:
- c.addErrf(IncompleteError, pos(x),
- "%s undefined as %s is incomplete (type %s)", orig, x, kind)
- }
+ c.addErrf(IncompleteError, pos(x),
+ "%s undefined as %s is incomplete (type %s)", orig, x, kind)
return emptyNode
} else if !ok {
diff --git a/vendor/cuelang.org/go/internal/core/adt/cycle.go b/vendor/cuelang.org/go/internal/core/adt/cycle.go
index bfa7dae9..4536cf56 100644
--- a/vendor/cuelang.org/go/internal/core/adt/cycle.go
+++ b/vendor/cuelang.org/go/internal/core/adt/cycle.go
@@ -455,9 +455,7 @@ outer:
a := p.Conjuncts
count := 0
for _, c := range a {
- if !c.CloseInfo.IsCyclic {
- count++
- }
+ count += getNonCyclicCount(c)
}
if !alreadyCycle {
count--
@@ -478,13 +476,36 @@ outer:
return ci, false
}
+func getNonCyclicCount(c Conjunct) int {
+ switch a, ok := c.x.(*ConjunctGroup); {
+ case ok:
+ count := 0
+ for _, c := range *a {
+ count += getNonCyclicCount(c)
+ }
+ return count
+
+ case !c.CloseInfo.IsCyclic:
+ return 1
+
+ default:
+ return 0
+ }
+}
+
// updateCyclicStatus looks for proof of non-cyclic conjuncts to override
// a structural cycle.
func (n *nodeContext) updateCyclicStatus(c CloseInfo) {
if !c.IsCyclic {
n.hasNonCycle = true
for _, c := range n.cyclicConjuncts {
- n.addVertexConjuncts(c.c, c.arc, false)
+ if n.ctx.isDevVersion() {
+ ci := c.c.CloseInfo
+ ci.cc = n.node.rootCloseContext()
+ n.scheduleVertexConjuncts(c.c, c.arc, ci)
+ } else {
+ n.addVertexConjuncts(c.c, c.arc, false)
+ }
}
n.cyclicConjuncts = n.cyclicConjuncts[:0]
}
diff --git a/vendor/cuelang.org/go/internal/core/adt/debug.go b/vendor/cuelang.org/go/internal/core/adt/debug.go
new file mode 100644
index 00000000..54ad8ffb
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/debug.go
@@ -0,0 +1,678 @@
+// Copyright 2023 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+// RecordDebugGraph records debug output in ctx if there was an anomaly
+// discovered.
+func RecordDebugGraph(ctx *OpContext, v *Vertex, name string) {
+ graph, hasError := CreateMermaidGraph(ctx, v, true)
+ if hasError {
+ if ctx.ErrorGraphs == nil {
+ ctx.ErrorGraphs = map[string]string{}
+ }
+ path := ctx.PathToString(v.Path())
+ ctx.ErrorGraphs[path] = graph
+ }
+}
+
+// OpenNodeGraph takes a given mermaid graph and opens it in the system default
+// browser.
+func OpenNodeGraph(title, path, code, out, graph string) {
+ err := os.MkdirAll(path, 0755)
+ if err != nil {
+ log.Fatal(err)
+ }
+ url := filepath.Join(path, "graph.html")
+
+ w, err := os.Create(url)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer w.Close()
+
+ data := struct {
+ Title string
+ Code string
+ Out string
+ Graph string
+ }{
+ Title: title,
+ Code: code,
+ Out: out,
+ Graph: graph,
+ }
+
+ tmpl := template.Must(template.New("").Parse(`
+
+
+
+ {{.Title}}
+
+
+
+
+
+ {{.Graph}}
+
+
+
+`))
+
+ err = tmpl.Execute(w, data)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ openBrowser(url)
+}
+
+// openDebugGraph opens a browser with a graph of the state of the given Vertex
+// and all its dependencies that have not completed processing.
+// DO NOT DELETE: this is used to insert during debugging of the evaluator
+// to inspect a node.
+func openDebugGraph(ctx *OpContext, v *Vertex, name string) {
+ graph, _ := CreateMermaidGraph(ctx, v, true)
+ path := filepath.Join(".debug", "TestX", name)
+ OpenNodeGraph(name, path, "in", "out", graph)
+}
+
+// depKind is a type of dependency that is tracked with incDependent and
+// decDependent. For each there should be matching pairs passed to these
+// functions. The debugger, when used, tracks and verifies that these
+// dependencies are balanced.
+type depKind int
+
+const (
+ // PARENT dependencies are used to track the completion of parent
+ // closedContexts within the closedness tree.
+ PARENT depKind = iota + 1
+
+ // ARC dependencies are used to track the completion of corresponding
+ // closedContexts in parent Vertices.
+ ARC
+
+ // NOTIFY dependencies keep a note while dependent conjuncts are collected
+ NOTIFY // root node of source
+
+ // TASK dependencies are used to track the completion of a task.
+ TASK
+
+ // EVAL tracks that the conjunct associated with a closeContext has been
+ // inserted using scheduleConjunct. A closeContext may not be deleted
+ // as long as the conjunct has not been evaluated yet.
+ // This prevents a node from being released if an ARC decrement happens
+ // before a node is evaluated.
+ EVAL
+
+ // ROOT dependencies are used to track that all nodes of parents are
+ // added to a tree.
+ ROOT // Always refers to self.
+
+ // INIT dependencies are used to hold ownership of a closeContext during
+ // initialization and prevent it from being finalized when scheduling a
+ // node's conjuncts.
+ INIT
+
+ // DEFER is used to track recursive processing of a node.
+ DEFER // Always refers to self.
+
+ // TEST is used for testing notifications.
+ TEST // Always refers to self.
+)
+
+func (k depKind) String() string {
+ switch k {
+ case PARENT:
+ return "PARENT"
+ case ARC:
+ return "ARC"
+ case NOTIFY:
+ return "NOTIFY"
+ case TASK:
+ return "TASK"
+ case EVAL:
+ return "EVAL"
+ case ROOT:
+ return "ROOT"
+
+ case INIT:
+ return "INIT"
+ case DEFER:
+ return "DEFER"
+ case TEST:
+ return "TEST"
+ }
+ panic("unreachable")
+}
+
+// ccDep is used to record counters which is used for debugging only.
+// It is purpose is to be precise about matching inc/dec as well as to be able
+// to traverse dependency.
+type ccDep struct {
+ dependency *closeContext
+ kind depKind
+ decremented bool
+
+ // task keeps a reference to a task for TASK dependencies.
+ task *task
+ // taskID indicates the sequence number of a task within a scheduler.
+ taskID int
+}
+
+// DebugDeps enables dependency tracking for debugging purposes.
+// It is off by default, as it adds a significant overhead.
+//
+// TODO: hook this init CUE_DEBUG, once we have set this up as a single
+// environment variable. For instance, CUE_DEBUG=matchdeps=1.
+var DebugDeps = false
+
+func (c *closeContext) addDependent(kind depKind, dependant *closeContext) *ccDep {
+ if !DebugDeps {
+ return nil
+ }
+
+ if dependant == nil {
+ dependant = c
+ }
+
+ if Verbosity > 1 {
+ var state *nodeContext
+ if c.src != nil && c.src.state != nil {
+ state = c.src.state
+ } else if dependant != nil && dependant.src != nil && dependant.src.state != nil {
+ state = dependant.src.state
+ }
+ if state != nil {
+ state.Logf("INC(%s, %d) %v; %p (parent: %p) <= %p\n", kind, c.conjunctCount, c.Label(), c, c.parent, dependant)
+ } else {
+ log.Printf("INC(%s) %v %p parent: %p %d\n", kind, c.Label(), c, c.parent, c.conjunctCount)
+ }
+ }
+
+ dep := &ccDep{kind: kind, dependency: dependant}
+ c.dependencies = append(c.dependencies, dep)
+
+ return dep
+}
+
+// matchDecrement checks that this decrement matches a previous increment.
+func (c *closeContext) matchDecrement(v *Vertex, kind depKind, dependant *closeContext) {
+ if !DebugDeps {
+ return
+ }
+
+ if dependant == nil {
+ dependant = c
+ }
+
+ if Verbosity > 1 {
+ if v.state != nil {
+ v.state.Logf("DEC(%s) %v %p %d\n", kind, c.Label(), c, c.conjunctCount)
+ } else {
+ log.Printf("DEC(%s) %v %p %d\n", kind, c.Label(), c, c.conjunctCount)
+ }
+ }
+
+ for _, d := range c.dependencies {
+ if d.kind != kind {
+ continue
+ }
+ if d.dependency != dependant {
+ continue
+ }
+ // Only one typ-dependant pair possible.
+ if d.decremented {
+ // There might be a duplicate entry, so continue searching.
+ continue
+ }
+
+ d.decremented = true
+ return
+ }
+
+ panic(fmt.Sprintf("unmatched decrement: %s", kind))
+}
+
+// mermaidContext is used to create a dependency analysis for a node.
+type mermaidContext struct {
+ ctx *OpContext
+ v *Vertex
+
+ all bool
+
+ hasError bool
+
+ // roots maps the root closeContext of any Vertex to the analysis data
+ // for that Vertex.
+ roots map[*closeContext]*mermaidVertex
+
+ // processed indicates whether the node in question has been processed
+ // by the dependency analysis.
+ processed map[*closeContext]bool
+
+ // inConjuncts indicates whether a node is explicitly referenced by
+ // a Conjunct. These nodes are visualized with an additional circle.
+ inConjuncts map[*closeContext]bool
+
+ // ccID maps a closeContext to a unique ID.
+ ccID map[*closeContext]string
+
+ w io.Writer
+
+ // vertices lists an analysis of all nodes related to the analyzed node.
+ // The first node is the node being analyzed itself.
+ vertices []*mermaidVertex
+}
+
+type mermaidVertex struct {
+ f Feature
+ w *bytes.Buffer
+ tasks *bytes.Buffer
+ intra *bytes.Buffer
+}
+
+// CreateMermaidGraph creates an analysis of relations and values involved in
+// nodes with unbalanced increments. The graph is in Mermaid format.
+func CreateMermaidGraph(ctx *OpContext, v *Vertex, all bool) (graph string, hasError bool) {
+ if !DebugDeps {
+ return "", false
+ }
+
+ buf := &strings.Builder{}
+
+ m := &mermaidContext{
+ ctx: ctx,
+ v: v,
+ roots: map[*closeContext]*mermaidVertex{},
+ processed: map[*closeContext]bool{},
+ inConjuncts: map[*closeContext]bool{},
+ ccID: map[*closeContext]string{},
+ w: buf,
+ all: all,
+ }
+
+ io.WriteString(m.w, "graph TD\n")
+ io.WriteString(m.w, " classDef err fill:#e01010,stroke:#000000,stroke-width:3,font-size:medium\n")
+
+ indent(m.w, 1)
+ fmt.Fprintf(m.w, "style %s stroke-width:5\n\n", m.vertexID(v))
+ // Trigger descent on first vertex. This may include other vertices when
+ // traversing closeContexts if they have dependencies on such vertices.
+ m.vertex(v)
+
+ // Close and flush all collected vertices.
+ for i, v := range m.vertices {
+ v.closeVertex()
+ if i == 0 || len(m.ccID) > 0 {
+ m.w.Write(v.w.Bytes())
+ }
+ }
+
+ return buf.String(), m.hasError
+}
+
+// vertex creates a blob of Mermaid graph representing one vertex. It has
+// the following shape (where ptr(x) means pointer of x):
+//
+// subgraph ptr(v)
+// %% root note if ROOT has not been decremented.
+// root((cc1)) -|R|-> ptr(cc1)
+//
+// %% closedness graph dependencies
+// ptr(cc1)
+// ptr(cc2) -|P|-> ptr(cc1)
+// ptr(cc2) -|E|-> ptr(cc1) %% mid schedule
+//
+// %% tasks
+// subgraph tasks
+// ptr(cc3)
+// ptr(cc4)
+// ptr(cc5)
+// end
+//
+// %% outstanding tasks and the contexts they depend on
+// ptr(cc3) -|T|-> ptr(cc2)
+//
+// subgraph notifications
+// ptr(cc6)
+// ptr(cc7)
+// end
+// end
+// %% arcs from nodes to nodes in other vertices
+// ptr(cc1) -|A|-> ptr(cc10)
+// ptr(vx) -|N|-> ptr(cc11)
+//
+//
+// A vertex has the following name: path(v); done
+//
+// Each closeContext has the following info: ptr(cc); cc.count
+func (m *mermaidContext) vertex(v *Vertex) *mermaidVertex {
+ root := v.rootCloseContext()
+
+ vc := m.roots[root]
+ if vc != nil {
+ return vc
+ }
+
+ vc = &mermaidVertex{
+ f: v.Label,
+ w: &bytes.Buffer{},
+ intra: &bytes.Buffer{},
+ }
+ m.vertices = append(m.vertices, vc)
+
+ m.tagReferencedConjuncts(v.Conjuncts)
+
+ m.roots[root] = vc
+ w := vc.w
+
+ var status string
+ switch {
+ case v.status == finalized:
+ status = "finalized"
+ case v.state == nil:
+ status = "ready"
+ default:
+ status = v.state.scheduler.state.String()
+ }
+ path := m.vertexPath(v)
+ if v.ArcType != ArcMember {
+ path += fmt.Sprintf("/%v", v.ArcType)
+ }
+
+ indentOnNewline(w, 1)
+ fmt.Fprintf(w, "subgraph %s[%s: %s]\n", m.vertexID(v), path, status)
+
+ m.cc(root)
+
+ return vc
+}
+
+func (m *mermaidContext) tagReferencedConjuncts(a []Conjunct) {
+ for _, c := range a {
+ m.inConjuncts[c.CloseInfo.cc] = true
+
+ if g, ok := c.x.(*ConjunctGroup); ok {
+ m.tagReferencedConjuncts([]Conjunct(*g))
+ }
+ }
+}
+
+func (v *mermaidVertex) closeVertex() {
+ w := v.w
+
+ if v.tasks != nil {
+ indent(v.tasks, 2)
+ fmt.Fprintf(v.tasks, "end\n")
+ w.Write(v.tasks.Bytes())
+ }
+
+ // TODO: write all notification sources (or is this just the node?)
+
+ indent(w, 1)
+ fmt.Fprintf(w, "end\n")
+}
+
+func (m *mermaidContext) task(d *ccDep) string {
+ v := d.dependency.src
+
+ // This must already exist.
+ vc := m.vertex(v)
+
+ if vc.tasks == nil {
+ vc.tasks = &bytes.Buffer{}
+ indentOnNewline(vc.tasks, 2)
+ fmt.Fprintf(vc.tasks, "subgraph %s_tasks[tasks]\n", m.vertexID(v))
+ }
+
+ if v != d.task.node.node {
+ panic("inconsistent task")
+ }
+ taskID := fmt.Sprintf("%s_%d", m.vertexID(v), d.taskID)
+ var state string
+ var completes condition
+ var kind string
+ if d.task != nil {
+ state = d.task.state.String()[:2]
+ completes = d.task.completes
+ kind = d.task.run.name
+ }
+ indentOnNewline(vc.tasks, 3)
+ fmt.Fprintf(vc.tasks, "%s(%d", taskID, d.taskID)
+ indentOnNewline(vc.tasks, 4)
+ io.WriteString(vc.tasks, state)
+ indentOnNewline(vc.tasks, 4)
+ io.WriteString(vc.tasks, kind)
+ indentOnNewline(vc.tasks, 4)
+ fmt.Fprintf(vc.tasks, "%x)\n", completes)
+
+ if s := d.task.blockedOn; s != nil {
+ m.vertex(s.node.node)
+ fmt.Fprintf(m.w, "%s_tasks == BLOCKED ==> %s\n", m.vertexID(s.node.node), taskID)
+ }
+
+ return taskID
+}
+
+func (m *mermaidContext) cc(cc *closeContext) {
+ if m.processed[cc] {
+ return
+ }
+ m.processed[cc] = true
+
+ // This must already exist.
+ v := m.vertex(cc.src)
+
+ // Dependencies at different scope levels.
+ global := m.w
+ node := v.w
+
+ for _, d := range cc.dependencies {
+ indentLevel := 2
+ var w io.Writer
+ var name, link string
+
+ switch {
+ case !d.decremented:
+ link = fmt.Sprintf(`--%s-->`, d.kind.String())
+ case m.all:
+ link = fmt.Sprintf("-. %s .->", d.kind.String()[0:1])
+ default:
+ continue
+ }
+
+ // Only include still outstanding nodes.
+ switch d.kind {
+ case PARENT:
+ w = node
+ name = m.pstr(d.dependency)
+ case EVAL:
+ if cc.Label().IsLet() {
+ // Do not show eval links for let nodes, as they never depend
+ // on the parent node. Alternatively, link them to the root
+ // node instead.
+ return
+ }
+ fallthrough
+ case ARC, NOTIFY:
+ w = global
+ indentLevel = 1
+ name = m.pstr(d.dependency)
+
+ case TASK:
+ w = node
+ taskID := m.task(d)
+ name = fmt.Sprintf("%s((%d))", taskID, d.taskID)
+ case ROOT, INIT:
+ w = node
+ src := cc.src
+ if v.f != src.Label {
+ panic("incompatible labels")
+ }
+ name = fmt.Sprintf("root_%s", m.vertexID(src))
+ }
+
+ if w != nil {
+ dst := m.pstr(cc)
+ indent(w, indentLevel)
+ fmt.Fprintf(w, "%s %s %s\n", name, link, dst)
+ }
+
+ // If the references count is 0, all direct dependencies must have
+ // completed as well. In this case, descending into each of them should
+ // not end up printing anything. In case of any bugs, these nodes will
+ // show up as unattached nodes.
+
+ if dep := d.dependency; dep != nil && dep != cc {
+ m.cc(dep)
+ }
+ }
+}
+
+func (m *mermaidContext) vertexPath(v *Vertex) string {
+ path := m.ctx.PathToString(v.Path())
+ if path == "" {
+ return "_"
+ }
+ return path
+}
+
+const sigPtrLen = 6
+
+func (m *mermaidContext) vertexID(v *Vertex) string {
+ s := fmt.Sprintf("%p", v)
+ return "v" + s[len(s)-sigPtrLen:]
+}
+
+func (m *mermaidContext) pstr(cc *closeContext) string {
+ if id, ok := m.ccID[cc]; ok {
+ return id
+ }
+
+ ptr := fmt.Sprintf("%p", cc)
+ ptr = ptr[len(ptr)-sigPtrLen:]
+ id := fmt.Sprintf("cc%s", ptr)
+ m.ccID[cc] = id
+
+ v := m.vertex(cc.src)
+
+ w := v.w
+
+ indent(w, 2)
+ w.WriteString(id)
+
+ var open, close = "((", "))"
+ if m.inConjuncts[cc] {
+ open, close = "(((", ")))"
+ }
+
+ w.WriteString(open)
+ w.WriteString("cc")
+ if cc.conjunctCount > 0 {
+ fmt.Fprintf(w, " c:%d", cc.conjunctCount)
+ }
+ indentOnNewline(w, 3)
+ w.WriteString(ptr)
+
+ flags := &bytes.Buffer{}
+ addFlag := func(test bool, flag byte) {
+ if test {
+ flags.WriteByte(flag)
+ }
+ }
+ addFlag(cc.isDef, '#')
+ addFlag(cc.isEmbed, 'E')
+ addFlag(cc.isClosed, 'c')
+ addFlag(cc.isClosedOnce, 'C')
+ addFlag(cc.hasEllipsis, 'o')
+ io.Copy(w, flags)
+
+ w.WriteString(close)
+
+ if cc.conjunctCount > 0 {
+ fmt.Fprintf(w, ":::err")
+ if cc.src == m.v {
+ m.hasError = true
+ }
+ }
+
+ w.WriteString("\n")
+
+ return id
+}
+
+func indentOnNewline(w io.Writer, level int) {
+ w.Write([]byte{'\n'})
+ indent(w, level)
+}
+
+func indent(w io.Writer, level int) {
+ for i := 0; i < level; i++ {
+ io.WriteString(w, " ")
+ }
+}
+
+// openBrowser opens the given URL in the default browser.
+func openBrowser(url string) {
+ var cmd *exec.Cmd
+
+ switch runtime.GOOS {
+ case "windows":
+ cmd = exec.Command("cmd", "/c", "start", url)
+ case "darwin":
+ cmd = exec.Command("open", url)
+ default:
+ cmd = exec.Command("xdg-open", url)
+ }
+
+ err := cmd.Start()
+ if err != nil {
+ log.Fatal(err)
+ }
+ go cmd.Wait()
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/dev.go b/vendor/cuelang.org/go/internal/core/adt/dev.go
new file mode 100644
index 00000000..28db747d
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/dev.go
@@ -0,0 +1,79 @@
+// Copyright 2023 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+// This file contains types to help in the transition from the old to new
+// evaluation model.
+
+func unreachableForDev(c *OpContext) {
+ if c.isDevVersion() {
+ panic("unreachable for development version")
+ }
+}
+
+type combinedFlags uint32
+
+// oldOnly indicates that a Vertex should only be evaluated for the old
+// evaluator.
+func oldOnly(state vertexStatus) combinedFlags {
+ return combinedFlags(state) |
+ combinedFlags(ignore)<<8 |
+ combinedFlags(allKnown)<<16
+}
+
+func combineMode(cond condition, mode runMode) combinedFlags {
+ return combinedFlags(mode)<<8 | combinedFlags(cond)<<16
+}
+
+func attempt(state vertexStatus, cond condition) combinedFlags {
+ return combinedFlags(state) | combineMode(cond, attemptOnly)
+}
+
+func require(state vertexStatus, cond condition) combinedFlags {
+ return combinedFlags(state) | combineMode(cond, yield)
+}
+
+func final(state vertexStatus, cond condition) combinedFlags {
+ return combinedFlags(state) | combineMode(cond, finalize)
+}
+
+func deprecated(c *OpContext, state vertexStatus) combinedFlags {
+ // if c.isDevVersion() {
+ // panic("calling function may not be used in new evaluator")
+ // }
+ return combinedFlags(state)
+}
+
+func (f combinedFlags) vertexStatus() vertexStatus {
+ return vertexStatus(f & 0xff)
+}
+
+func (f combinedFlags) withVertexStatus(x vertexStatus) combinedFlags {
+ f &^= 0xff
+ f |= combinedFlags(x)
+ return f
+}
+
+func (f combinedFlags) conditions() condition {
+ return condition(f >> 16)
+}
+
+func (f combinedFlags) runMode() runMode {
+ return runMode(f>>8) & 0xff
+}
+
+func (f combinedFlags) ignore() bool {
+ return f&(combinedFlags(ignore)<<8) != 0
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/disjunct.go b/vendor/cuelang.org/go/internal/core/adt/disjunct.go
index 45abffcb..5f2ca85e 100644
--- a/vendor/cuelang.org/go/internal/core/adt/disjunct.go
+++ b/vendor/cuelang.org/go/internal/core/adt/disjunct.go
@@ -124,6 +124,8 @@ func (n *nodeContext) expandDisjuncts(
parentMode defaultMode, // default mode of this disjunct
recursive, last bool) {
+ unreachableForDev(n.ctx)
+
n.ctx.stats.Disjuncts++
// refNode is used to collect cyclicReferences for all disjuncts to be
diff --git a/vendor/cuelang.org/go/internal/core/adt/errors.go b/vendor/cuelang.org/go/internal/core/adt/errors.go
index 4de2a91c..c8ded3e8 100644
--- a/vendor/cuelang.org/go/internal/core/adt/errors.go
+++ b/vendor/cuelang.org/go/internal/core/adt/errors.go
@@ -206,6 +206,22 @@ func CombineErrors(src ast.Node, x, y Value) *Bottom {
}
}
+func addPositions(err *ValueError, c Conjunct) {
+ switch x := c.x.(type) {
+ case *Field:
+ // if x.ArcType == ArcRequired {
+ err.AddPosition(c.x)
+ // }
+ case *ConjunctGroup:
+ for _, c := range *x {
+ addPositions(err, c)
+ }
+ }
+ if c.CloseInfo.closeInfo != nil {
+ err.AddPosition(c.CloseInfo.location)
+ }
+}
+
func NewRequiredNotPresentError(ctx *OpContext, v *Vertex) *Bottom {
saved := ctx.PushArc(v)
err := ctx.Newf("field is required but not present")
@@ -230,9 +246,7 @@ func newRequiredFieldInComprehensionError(ctx *OpContext, x *ForClause, v *Verte
err := ctx.Newf("missing required field in for comprehension: %v", v.Label)
err.AddPosition(x.Src)
for _, c := range v.Conjuncts {
- if f, ok := c.x.(*Field); ok && f.ArcType == ArcRequired {
- err.AddPosition(c.x)
- }
+ addPositions(err, c)
}
return &Bottom{
Code: IncompleteError,
@@ -240,6 +254,41 @@ func newRequiredFieldInComprehensionError(ctx *OpContext, x *ForClause, v *Verte
}
}
+func (v *Vertex) reportFieldIndexError(c *OpContext, pos token.Pos, f Feature) {
+ v.reportFieldError(c, pos, f,
+ "index out of range [%d] with length %d",
+ "undefined field: %s")
+}
+
+func (v *Vertex) reportFieldCycleError(c *OpContext, pos token.Pos, f Feature) *Bottom {
+ const msg = "cyclic reference to field %[1]v"
+ b := v.reportFieldError(c, pos, f, msg, msg)
+ return b
+}
+
+func (v *Vertex) reportFieldError(c *OpContext, pos token.Pos, f Feature, intMsg, stringMsg string) *Bottom {
+ code := IncompleteError
+ if !v.Accept(c, f) {
+ code = EvalError
+ }
+
+ label := f.SelectorString(c.Runtime)
+
+ var err errors.Error
+ if f.IsInt() {
+ err = c.NewPosf(pos, intMsg, f.Index(), len(v.Elems()))
+ } else {
+ err = c.NewPosf(pos, stringMsg, label)
+ }
+ b := &Bottom{
+ Code: code,
+ Err: err,
+ }
+ // TODO: yield failure
+ c.AddBottom(b) // TODO: unify error mechanism.
+ return b
+}
+
// A ValueError is returned as a result of evaluating a value.
type ValueError struct {
r Runtime
@@ -301,7 +350,15 @@ func appendNodePositions(a []token.Pos, n Node) []token.Pos {
}
if v, ok := n.(*Vertex); ok {
for _, c := range v.Conjuncts {
- a = appendNodePositions(a, c.Elem())
+ switch x := c.x.(type) {
+ case *ConjunctGroup:
+ for _, c := range *x {
+ a = appendNodePositions(a, c.Elem())
+ }
+
+ default:
+ a = appendNodePositions(a, c.Elem())
+ }
}
}
return a
diff --git a/vendor/cuelang.org/go/internal/core/adt/eval.go b/vendor/cuelang.org/go/internal/core/adt/eval.go
index 4f40363f..7d04633b 100644
--- a/vendor/cuelang.org/go/internal/core/adt/eval.go
+++ b/vendor/cuelang.org/go/internal/core/adt/eval.go
@@ -73,7 +73,7 @@ var incompleteSentinel = &Bottom{
// error.
//
// TODO: return *Vertex
-func (c *OpContext) evaluate(v *Vertex, r Resolver, state vertexStatus) Value {
+func (c *OpContext) evaluate(v *Vertex, r Resolver, state combinedFlags) Value {
if v.isUndefined() {
// Use node itself to allow for cycle detection.
c.unify(v, state)
@@ -150,7 +150,13 @@ func (c *OpContext) evaluate(v *Vertex, r Resolver, state vertexStatus) Value {
// state can be used to indicate to which extent processing should continue.
// state == finalized means it is evaluated to completion. See vertexStatus
// for more details.
-func (c *OpContext) unify(v *Vertex, state vertexStatus) {
+func (c *OpContext) unify(v *Vertex, flags combinedFlags) {
+ if c.isDevVersion() {
+ requires, mode := flags.conditions(), flags.runMode()
+ v.unify(c, requires, mode)
+ return
+ }
+
// defer c.PopVertex(c.PushVertex(v))
if Debug {
c.nest++
@@ -166,6 +172,8 @@ func (c *OpContext) unify(v *Vertex, state vertexStatus) {
n := v.getNodeContext(c, 1)
defer v.freeNode(n)
+ state := flags.vertexStatus()
+
// TODO(cycle): verify this happens in all cases when we need it.
if n != nil && v.Parent != nil && v.Parent.state != nil {
n.depth = v.Parent.state.depth + 1
@@ -370,6 +378,8 @@ func (c *OpContext) unify(v *Vertex, state vertexStatus) {
// insertConjuncts inserts conjuncts previously not inserted.
func (n *nodeContext) insertConjuncts(state vertexStatus) bool {
+ unreachableForDev(n.ctx)
+
// Exit early if we have a concrete value and only need partial results.
if state == partial {
for n.conjunctsPartialPos < len(n.conjuncts) {
@@ -461,13 +471,14 @@ func (n *nodeContext) doNotify() {
func (n *nodeContext) postDisjunct(state vertexStatus) {
ctx := n.ctx
+ unreachableForDev(ctx)
for {
// Use maybeSetCache for cycle breaking
for n.maybeSetCache(); n.expandOne(state); n.maybeSetCache() {
}
- if !n.addLists(state) {
+ if !n.addLists(oldOnly(state)) {
break
}
}
@@ -735,6 +746,8 @@ func (n *nodeContext) incompleteErrors(final bool) *Bottom {
// though, that any potential performance issues are eliminated for
// Protobuf-like oneOf fields.
func (n *nodeContext) checkClosed(state vertexStatus) bool {
+ unreachableForDev(n.ctx)
+
ignore := state != finalized || n.skipNonMonotonicChecks()
v := n.node
@@ -754,6 +767,8 @@ func (n *nodeContext) checkClosed(state vertexStatus) bool {
}
func (n *nodeContext) completeArcs(state vertexStatus) {
+ unreachableForDev(n.ctx)
+
if DebugSort > 0 {
DebugSortArcs(n.ctx, n.node)
}
@@ -790,7 +805,7 @@ func (n *nodeContext) completeArcs(state vertexStatus) {
wasVoid := a.ArcType == ArcPending
- ctx.unify(a, finalized)
+ ctx.unify(a, oldOnly(finalized))
if a.ArcType == ArcPending {
continue
@@ -844,7 +859,7 @@ func (n *nodeContext) completeArcs(state vertexStatus) {
// TODO(errors): make Validate return bottom and generate
// optimized conflict message. Also track and inject IDs
// to determine origin location.s
- v := ctx.evalState(c.expr, finalized)
+ v := ctx.evalState(c.expr, oldOnly(finalized))
v, _ = ctx.getDefault(v)
v = Unwrap(v)
@@ -952,17 +967,13 @@ type nodeContext struct {
nextFree *nodeContext
refCount int
- // Keep these two out of the nodeContextState to make them more accessible
+ // Keep node out of the nodeContextState to make them more accessible
// for source-level debuggers.
- ctx *OpContext
node *Vertex
nodeContextState
- // rootCloseContext should not be cloned as clones need to get their own
- // copies of this. For this reason it is not included in nodeContextState,
- // as it prevents it from being set "by default".
- rootCloseContext *closeContext
+ scheduler
// Below are slices that need to be managed when cloning and reclaiming
// nodeContexts for reuse. We want to ensure that, instead of setting
@@ -1046,6 +1057,11 @@ type nodeContextState struct {
aStruct Expr
aStructID CloseInfo
+ // List fields
+ listIsClosed bool
+ maxListLen int
+ maxNode Expr
+
lowerBound *BoundValue // > or >=
upperBound *BoundValue // < or <=
errs *Bottom
@@ -1059,6 +1075,8 @@ type nodeContextState struct {
// conjunctsPartialPos is like conjunctsPos, but for the 'partial' phase
// of processing where conjuncts are only processed as concrete scalars.
conjunctsPartialPos int
+
+ arcPos int
}
// A receiver receives notifications.
@@ -1086,6 +1104,8 @@ type defaultInfo struct {
}
func (n *nodeContext) addNotify(v *Vertex, cc *closeContext) {
+ unreachableForDev(n.ctx)
+
if v != nil && !n.node.hasAllConjuncts {
n.notify = append(n.notify, receiver{v, cc})
}
@@ -1104,6 +1124,8 @@ func (n *nodeContext) clone() *nodeContext {
d.arcMap = append(d.arcMap, n.arcMap...)
d.notify = append(d.notify, n.notify...)
+ n.scheduler.cloneInto(&d.scheduler)
+
d.conjuncts = append(d.conjuncts, n.conjuncts...)
d.cyclicConjuncts = append(d.cyclicConjuncts, n.cyclicConjuncts...)
d.dynamicFields = append(d.dynamicFields, n.dynamicFields...)
@@ -1129,8 +1151,8 @@ func (c *OpContext) newNodeContext(node *Vertex) *nodeContext {
c.freeListNode = n.nextFree
*n = nodeContext{
- ctx: c,
- node: node,
+ scheduler: scheduler{ctx: c},
+ node: node,
nodeContextState: nodeContextState{
kind: TopKind,
},
@@ -1152,20 +1174,28 @@ func (c *OpContext) newNodeContext(node *Vertex) *nodeContext {
disjuncts: n.disjuncts[:0],
buffer: n.buffer[:0],
}
+ n.scheduler.clear()
+ n.scheduler.node = n
return n
}
c.stats.Allocs++
- return &nodeContext{
- ctx: c,
+ n := &nodeContext{
+ scheduler: scheduler{
+ ctx: c,
+ },
node: node,
nodeContextState: nodeContextState{kind: TopKind},
}
+ n.scheduler.node = n
+ return n
}
func (v *Vertex) getNodeContext(c *OpContext, ref int) *nodeContext {
+ unreachableForDev(c)
+
if v.state == nil {
if v.status == finalized {
return nil
@@ -1219,6 +1249,7 @@ func (c *OpContext) freeNodeContext(n *nodeContext) {
c.freeListNode = n
n.node = nil
n.refCount = 0
+ n.scheduler.clear()
}
// TODO(perf): return a dedicated ConflictError that can track original
@@ -1321,6 +1352,8 @@ func (n *nodeContext) updateNodeType(k Kind, v Expr, id CloseInfo) bool {
}
func (n *nodeContext) done() bool {
+ // TODO(v0.7): verify that done() is checking for the right conditions in
+ // the new evaluator implementation.
return len(n.dynamicFields) == 0 &&
len(n.comprehensions) == 0 &&
len(n.exprs) == 0
@@ -1329,6 +1362,7 @@ func (n *nodeContext) done() bool {
// finalDone is like done, but allows for cycle errors, which can be ignored
// as they essentially indicate a = a & _.
func (n *nodeContext) finalDone() bool {
+ // TODO(v0.7): update for new evaluator?
for _, x := range n.exprs {
if x.err.Code != CycleError {
return false
@@ -1474,6 +1508,8 @@ func (n *nodeContext) addErr(err errors.Error) {
// into the nodeContext if successful or queue it for later evaluation if it is
// incomplete or is not value.
func (n *nodeContext) addExprConjunct(v Conjunct, state vertexStatus) {
+ unreachableForDev(n.ctx)
+
env := v.Env
id := v.CloseInfo
@@ -1525,6 +1561,8 @@ func (n *nodeContext) addExprConjunct(v Conjunct, state vertexStatus) {
// evalExpr is only called by addExprConjunct. If an error occurs, it records
// the error in n and returns nil.
func (n *nodeContext) evalExpr(v Conjunct, state vertexStatus) {
+ unreachableForDev(n.ctx)
+
// Require an Environment.
ctx := n.ctx
@@ -1539,7 +1577,7 @@ func (n *nodeContext) evalExpr(v Conjunct, state vertexStatus) {
if state == finalized {
state = conjuncts
}
- arc, err := ctx.resolveState(v, x, state)
+ arc, err := ctx.resolveState(v, x, oldOnly(state))
if err != nil && (!err.IsIncomplete() || err.Permanent) {
n.addBottom(err)
break
@@ -1576,7 +1614,7 @@ func (n *nodeContext) evalExpr(v Conjunct, state vertexStatus) {
case Evaluator:
// Interpolation, UnaryExpr, BinaryExpr, CallExpr
// Could be unify?
- val := ctx.evaluateRec(v, partial)
+ val := ctx.evaluateRec(v, oldOnly(partial))
if b, ok := val.(*Bottom); ok &&
b.IsIncomplete() {
n.exprs = append(n.exprs, envExpr{v, b})
@@ -1616,6 +1654,8 @@ func (n *nodeContext) evalExpr(v Conjunct, state vertexStatus) {
}
func (n *nodeContext) addVertexConjuncts(c Conjunct, arc *Vertex, inline bool) {
+ unreachableForDev(n.ctx)
+
closeInfo := c.CloseInfo
// We need to ensure that each arc is only unified once (or at least) a
@@ -1689,7 +1729,7 @@ func (n *nodeContext) addVertexConjuncts(c Conjunct, arc *Vertex, inline bool) {
// is necessary to prevent lookups in unevaluated structs.
// TODO(cycles): this can probably most easily be fixed with a
// having a more recursive implementation.
- n.ctx.unify(arc, partial)
+ n.ctx.unify(arc, oldOnly(partial))
}
// Don't add conjuncts if a node is referring to itself.
@@ -2021,6 +2061,10 @@ func (n *nodeContext) addStruct(
// disjunctions.
func (n *nodeContext) insertField(f Feature, mode ArcType, x Conjunct) *Vertex {
ctx := n.ctx
+ if ctx.isDevVersion() {
+ return n.insertArc(f, mode, x, x.CloseInfo, true)
+ }
+
arc, isNew := n.node.GetArc(ctx, f, mode)
if f.IsLet() && !isNew {
arc.MultiLet = true
@@ -2050,6 +2094,10 @@ func (n *nodeContext) insertField(f Feature, mode ArcType, x Conjunct) *Vertex {
func (n *nodeContext) insertFieldUnchecked(f Feature, mode ArcType, x Conjunct) *Vertex {
ctx := n.ctx
+ if ctx.isDevVersion() {
+ return n.insertArc(f, mode, x, x.CloseInfo, false)
+ }
+
arc, isNew := n.node.GetArc(ctx, f, mode)
if f.IsLet() && !isNew {
arc.MultiLet = true
@@ -2073,6 +2121,8 @@ func (n *nodeContext) insertFieldUnchecked(f Feature, mode ArcType, x Conjunct)
// TODO(errors): detect when a field is added to a struct that is already used
// in a for clause.
func (n *nodeContext) expandOne(state vertexStatus) (done bool) {
+ unreachableForDev(n.ctx)
+
// Don't expand incomplete expressions if we detected a cycle.
if n.done() || (n.hasCycle && !n.hasNonCycle) {
return false
@@ -2110,6 +2160,8 @@ func (n *nodeContext) expandOne(state vertexStatus) (done bool) {
// injectDynamic evaluates and inserts dynamic declarations.
func (n *nodeContext) injectDynamic() (progress bool) {
+ unreachableForDev(n.ctx)
+
ctx := n.ctx
k := 0
@@ -2119,7 +2171,7 @@ func (n *nodeContext) injectDynamic() (progress bool) {
x := d.field.Key
// Push state to capture and remove errors.
s := ctx.PushState(d.env, x.Source())
- v := ctx.evalState(x, finalized)
+ v := ctx.evalState(x, oldOnly(finalized))
b := ctx.PopState(s)
if b != nil && b.IsIncomplete() {
@@ -2160,7 +2212,7 @@ func (n *nodeContext) injectDynamic() (progress bool) {
//
// TODO(embeddedScalars): for embedded scalars, there should be another pass
// of evaluation expressions after expanding lists.
-func (n *nodeContext) addLists(state vertexStatus) (progress bool) {
+func (n *nodeContext) addLists(state combinedFlags) (progress bool) {
if len(n.lists) == 0 && len(n.vLists) == 0 {
return false
}
@@ -2246,7 +2298,7 @@ outer:
if err != nil {
if err.ForCycle && !l.self {
// The list has a comprehension that refers to the list
- // itself. This means we should postpone evalauting this
+ // itself. This means we should postpone evaluating this
// list until all other lists have been evaluated.
n.lists[i].ignore = true
l.self = true
diff --git a/vendor/cuelang.org/go/internal/core/adt/expr.go b/vendor/cuelang.org/go/internal/core/adt/expr.go
index 8112e3d0..b2765569 100644
--- a/vendor/cuelang.org/go/internal/core/adt/expr.go
+++ b/vendor/cuelang.org/go/internal/core/adt/expr.go
@@ -27,6 +27,16 @@ import (
"cuelang.org/go/cue/token"
)
+var _ Elem = &ConjunctGroup{}
+
+// A ConjunctGroup is an Elem that is used for internal grouping of Conjuncts
+// only.
+type ConjunctGroup []Conjunct
+
+func (g *ConjunctGroup) Source() ast.Node {
+ return nil
+}
+
// A StructLit represents an unevaluated struct literal or file body.
type StructLit struct {
Src ast.Node // ast.File or ast.StructLit
@@ -71,7 +81,7 @@ func (x *StructLit) HasOptional() bool {
func (x *StructLit) Source() ast.Node { return x.Src }
-func (x *StructLit) evaluate(c *OpContext, state vertexStatus) Value {
+func (x *StructLit) evaluate(c *OpContext, state combinedFlags) Value {
e := c.Env(0)
v := c.newInlineVertex(e.Vertex, nil, Conjunct{e, x, c.ci})
// evaluate may not finalize a field, as the resulting value may be
@@ -281,7 +291,7 @@ func (x *ListLit) Source() ast.Node {
return x.Src
}
-func (x *ListLit) evaluate(c *OpContext, state vertexStatus) Value {
+func (x *ListLit) evaluate(c *OpContext, state combinedFlags) Value {
e := c.Env(0)
v := c.newInlineVertex(e.Vertex, nil, Conjunct{e, x, c.ci})
v.CompleteArcs(c)
@@ -446,8 +456,10 @@ func (x *BoundExpr) Source() ast.Node {
return x.Src
}
-func (x *BoundExpr) evaluate(ctx *OpContext, state vertexStatus) Value {
- v := ctx.value(x.Expr, partial)
+func (x *BoundExpr) evaluate(ctx *OpContext, state combinedFlags) Value {
+ // scalarKnown is used here to ensure we know the value. The result does
+ // not have to be concrete, though.
+ v := ctx.value(x.Expr, require(partial, scalarKnown))
if isError(v) {
return v
}
@@ -679,7 +691,7 @@ func (x *NodeLink) Kind() Kind {
}
func (x *NodeLink) Source() ast.Node { return x.Node.Source() }
-func (x *NodeLink) resolve(c *OpContext, state vertexStatus) *Vertex {
+func (x *NodeLink) resolve(c *OpContext, state combinedFlags) *Vertex {
return x.Node
}
@@ -699,7 +711,7 @@ func (x *FieldReference) Source() ast.Node {
return x.Src
}
-func (x *FieldReference) resolve(c *OpContext, state vertexStatus) *Vertex {
+func (x *FieldReference) resolve(c *OpContext, state combinedFlags) *Vertex {
n := c.relNode(x.UpCount)
pos := pos(x)
return c.lookup(n, pos, x.Label, state)
@@ -723,7 +735,7 @@ func (x *ValueReference) Source() ast.Node {
return x.Src
}
-func (x *ValueReference) resolve(c *OpContext, state vertexStatus) *Vertex {
+func (x *ValueReference) resolve(c *OpContext, state combinedFlags) *Vertex {
if x.UpCount == 0 {
return c.vertex
}
@@ -750,7 +762,7 @@ func (x *LabelReference) Source() ast.Node {
return x.Src
}
-func (x *LabelReference) evaluate(ctx *OpContext, state vertexStatus) Value {
+func (x *LabelReference) evaluate(ctx *OpContext, state combinedFlags) Value {
label := ctx.relLabel(x.UpCount)
if label == 0 {
// There is no label. This may happen if a LabelReference is evaluated
@@ -794,15 +806,15 @@ func (x *DynamicReference) Source() ast.Node {
func (x *DynamicReference) EvaluateLabel(ctx *OpContext, env *Environment) Feature {
env = env.up(ctx, x.UpCount)
frame := ctx.PushState(env, x.Src)
- v := ctx.value(x.Label, partial)
+ v := ctx.value(x.Label, require(partial, scalarKnown))
ctx.PopState(frame)
return ctx.Label(x, v)
}
-func (x *DynamicReference) resolve(ctx *OpContext, state vertexStatus) *Vertex {
+func (x *DynamicReference) resolve(ctx *OpContext, state combinedFlags) *Vertex {
e := ctx.Env(x.UpCount)
frame := ctx.PushState(e, x.Src)
- v := ctx.value(x.Label, partial)
+ v := ctx.value(x.Label, require(partial, scalarKnown))
ctx.PopState(frame)
f := ctx.Label(x.Label, v)
return ctx.lookup(e.Vertex, pos(x), f, state)
@@ -828,7 +840,7 @@ func (x *ImportReference) Source() ast.Node {
return x.Src
}
-func (x *ImportReference) resolve(ctx *OpContext, state vertexStatus) *Vertex {
+func (x *ImportReference) resolve(ctx *OpContext, state combinedFlags) *Vertex {
path := x.ImportPath.StringValue(ctx)
v := ctx.Runtime.LoadImport(path)
if v == nil {
@@ -856,14 +868,14 @@ func (x *LetReference) Source() ast.Node {
return x.Src
}
-func (x *LetReference) resolve(ctx *OpContext, state vertexStatus) *Vertex {
+func (x *LetReference) resolve(ctx *OpContext, state combinedFlags) *Vertex {
e := ctx.Env(x.UpCount)
n := e.Vertex
// No need to Unify n, as Let references can only result from evaluating
// an expression within n, in which case evaluation must already have
// started.
- if n.status < evaluating {
+ if n.status < evaluating && !ctx.isDevVersion() {
panic("unexpected node state < Evaluating")
}
@@ -926,8 +938,14 @@ func (x *LetReference) resolve(ctx *OpContext, state vertexStatus) *Vertex {
}
v = n
e.cache[key] = n
- nc := n.getNodeContext(ctx, 0)
- nc.hasNonCycle = true // Allow a first cycle to be skipped.
+ if ctx.isDevVersion() {
+ nc := n.getState(ctx)
+ nc.hasNonCycle = true // Allow a first cycle to be skipped.
+ nc.free()
+ } else {
+ nc := n.getNodeContext(ctx, 0)
+ nc.hasNonCycle = true // Allow a first cycle to be skipped.
+ }
// Parents cannot add more conjuncts to a let expression, so set of
// conjuncts is always complete.
@@ -959,16 +977,17 @@ func (x *SelectorExpr) Source() ast.Node {
return x.Src
}
-func (x *SelectorExpr) resolve(c *OpContext, state vertexStatus) *Vertex {
+func (x *SelectorExpr) resolve(c *OpContext, state combinedFlags) *Vertex {
// TODO: the node should really be evaluated as AllConjunctsDone, but the
// order of evaluation is slightly off, causing too much to be evaluated.
// This may especially result in incorrect results when using embedded
// scalars.
- n := c.node(x, x.X, x.Sel.IsRegular(), partial)
+ // In the new evaluator, evaluation of the node is done in lookup.
+ n := c.node(x, x.X, x.Sel.IsRegular(), attempt(partial, needFieldSetKnown))
if n == emptyNode {
return n
}
- if n.status == partial {
+ if n.status == partial && !c.isDevVersion() {
if b := n.state.incompleteErrors(false); b != nil && b.Code < CycleError {
c.AddBottom(b)
return n
@@ -978,7 +997,8 @@ func (x *SelectorExpr) resolve(c *OpContext, state vertexStatus) *Vertex {
// will otherwise be discarded and there will be no other chance to check
// the struct is valid.
- return c.lookup(n, x.Src.Sel.Pos(), x.Sel, state)
+ pos := x.Src.Sel.Pos()
+ return c.lookup(n, pos, x.Sel, state)
}
// IndexExpr is like a selector, but selects an index.
@@ -997,18 +1017,18 @@ func (x *IndexExpr) Source() ast.Node {
return x.Src
}
-func (x *IndexExpr) resolve(ctx *OpContext, state vertexStatus) *Vertex {
+func (x *IndexExpr) resolve(ctx *OpContext, state combinedFlags) *Vertex {
// TODO: support byte index.
// TODO: the node should really be evaluated as AllConjunctsDone, but the
// order of evaluation is slightly off, causing too much to be evaluated.
// This may especially result in incorrect results when using embedded
// scalars.
- n := ctx.node(x, x.X, true, partial)
- i := ctx.value(x.Index, partial)
+ n := ctx.node(x, x.X, true, attempt(partial, needFieldSetKnown))
+ i := ctx.value(x.Index, require(partial, scalarKnown))
if n == emptyNode {
return n
}
- if n.status == partial {
+ if n.status == partial && !ctx.isDevVersion() {
if b := n.state.incompleteErrors(false); b != nil && b.Code < CycleError {
ctx.AddBottom(b)
return n
@@ -1019,7 +1039,17 @@ func (x *IndexExpr) resolve(ctx *OpContext, state vertexStatus) *Vertex {
// the struct is valid.
f := ctx.Label(x.Index, i)
- return ctx.lookup(n, x.Src.Index.Pos(), f, state)
+
+ // Within lookup, errors collected in ctx may be associated with n. This is
+ // correct if the error is generated within lookup, but not if it has
+ // already been generated at this point. We therefore bail out early here if
+ // we already have an error.
+ // TODO: this code can probably go once we have cleaned up error generation.
+ if ctx.errs != nil {
+ return nil
+ }
+ pos := x.Src.Index.Pos()
+ return ctx.lookup(n, pos, f, state)
}
// A SliceExpr represents a slice operation. (Not currently in spec.)
@@ -1040,10 +1070,10 @@ func (x *SliceExpr) Source() ast.Node {
return x.Src
}
-func (x *SliceExpr) evaluate(c *OpContext, state vertexStatus) Value {
+func (x *SliceExpr) evaluate(c *OpContext, state combinedFlags) Value {
// TODO: strides
- v := c.value(x.X, partial)
+ v := c.value(x.X, require(partial, fieldSetKnown))
const as = "slice index"
switch v := v.(type) {
@@ -1060,10 +1090,10 @@ func (x *SliceExpr) evaluate(c *OpContext, state vertexStatus) Value {
hi = uint64(len(v.Arcs))
)
if x.Lo != nil {
- lo = c.uint64(c.value(x.Lo, partial), as)
+ lo = c.uint64(c.value(x.Lo, require(partial, scalarKnown)), as)
}
if x.Hi != nil {
- hi = c.uint64(c.value(x.Hi, partial), as)
+ hi = c.uint64(c.value(x.Hi, require(partial, scalarKnown)), as)
if hi > uint64(len(v.Arcs)) {
return c.NewErrf("index %d out of range", hi)
}
@@ -1093,10 +1123,10 @@ func (x *SliceExpr) evaluate(c *OpContext, state vertexStatus) Value {
hi = uint64(len(v.B))
)
if x.Lo != nil {
- lo = c.uint64(c.value(x.Lo, partial), as)
+ lo = c.uint64(c.value(x.Lo, require(partial, scalarKnown)), as)
}
if x.Hi != nil {
- hi = c.uint64(c.value(x.Hi, partial), as)
+ hi = c.uint64(c.value(x.Hi, require(partial, scalarKnown)), as)
if hi > uint64(len(v.B)) {
return c.NewErrf("index %d out of range", hi)
}
@@ -1129,10 +1159,10 @@ func (x *Interpolation) Source() ast.Node {
return x.Src
}
-func (x *Interpolation) evaluate(c *OpContext, state vertexStatus) Value {
+func (x *Interpolation) evaluate(c *OpContext, state combinedFlags) Value {
buf := bytes.Buffer{}
for _, e := range x.Parts {
- v := c.value(e, partial)
+ v := c.value(e, require(partial, scalarKnown))
if x.K == BytesKind {
buf.Write(c.ToBytes(v))
} else {
@@ -1171,11 +1201,11 @@ func (x *UnaryExpr) Source() ast.Node {
return x.Src
}
-func (x *UnaryExpr) evaluate(c *OpContext, state vertexStatus) Value {
+func (x *UnaryExpr) evaluate(c *OpContext, state combinedFlags) Value {
if !c.concreteIsPossible(x.Op, x.X) {
return nil
}
- v := c.value(x.X, partial)
+ v := c.value(x.X, require(partial, scalarKnown))
if isError(v) {
return v
}
@@ -1232,7 +1262,7 @@ func (x *BinaryExpr) Source() ast.Node {
return x.Src
}
-func (x *BinaryExpr) evaluate(c *OpContext, state vertexStatus) Value {
+func (x *BinaryExpr) evaluate(c *OpContext, state combinedFlags) Value {
env := c.Env(0)
if x.Op == AndOp {
v := c.newInlineVertex(nil, nil, makeAnonymousConjunct(env, x, c.ci.Refs))
@@ -1284,14 +1314,18 @@ func (x *BinaryExpr) evaluate(c *OpContext, state vertexStatus) Value {
return BinOp(c, x.Op, left, right)
}
-func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, state vertexStatus) (r Value) {
+func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, flags combinedFlags) (r Value) {
+ state := flags.vertexStatus()
+
s := c.PushState(env, src)
match := op != EqualOp // non-error case
// Like value(), but retain the original, unwrapped result.
c.inValidator++
- v := c.evalState(x, state)
+ req := flags
+ req = final(state, needTasksDone)
+ v := c.evalState(x, req)
c.inValidator--
u, _ := c.getDefault(v)
u = Unwrap(u)
@@ -1316,7 +1350,7 @@ func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, stat
return nil
case IncompleteError:
- c.evalState(x, finalized)
+ c.evalState(x, oldOnly(finalized))
// We have a nonmonotonic use of a failure. Referenced fields should
// not be added anymore.
@@ -1381,7 +1415,7 @@ func (c *OpContext) validate(env *Environment, src ast.Node, x Expr, op Op, stat
match = op == EqualOp
}
- c.evalState(x, partial)
+ c.evalState(x, require(state, needTasksDone))
}
c.PopState(s)
@@ -1428,8 +1462,8 @@ func (x *CallExpr) Source() ast.Node {
return x.Src
}
-func (x *CallExpr) evaluate(c *OpContext, state vertexStatus) Value {
- fun := c.value(x.Fun, partial)
+func (x *CallExpr) evaluate(c *OpContext, state combinedFlags) Value {
+ fun := c.value(x.Fun, require(partial, concreteKnown))
var b *Builtin
switch f := fun.(type) {
case *Builtin:
@@ -1459,6 +1493,12 @@ func (x *CallExpr) evaluate(c *OpContext, state vertexStatus) Value {
for i, a := range x.Args {
saved := c.errs
c.errs = nil
+ // XXX: XXX: clear id.closeContext per argument and remove from runTask?
+
+ runMode := state.runMode()
+ cond := state.conditions() | allAncestorsProcessed | concreteKnown
+ state = combineMode(cond, runMode).withVertexStatus(state.vertexStatus())
+
expr := c.value(a, state)
switch v := expr.(type) {
@@ -1489,7 +1529,7 @@ func (x *CallExpr) evaluate(c *OpContext, state vertexStatus) Value {
if result == nil {
return nil
}
- return c.evalState(result, partial)
+ return c.evalState(result, state.withVertexStatus(partial))
}
// A Builtin is a value representing a native function call.
@@ -1728,7 +1768,7 @@ func (x *DisjunctionExpr) Source() ast.Node {
return x.Src
}
-func (x *DisjunctionExpr) evaluate(c *OpContext, state vertexStatus) Value {
+func (x *DisjunctionExpr) evaluate(c *OpContext, state combinedFlags) Value {
e := c.Env(0)
v := c.newInlineVertex(nil, nil, Conjunct{e, x, c.ci})
v.Finalize(c) // TODO: also partial okay?
@@ -1850,39 +1890,115 @@ func (x *ForClause) Source() ast.Node {
return x.Syntax
}
+func (c *OpContext) forSource(x Expr) *Vertex {
+ state := require(conjuncts, needFieldSetKnown)
+
+ // TODO: always get the vertex. This allows a whole bunch of trickery
+ // down the line.
+ v := c.unifyNode(x, state)
+
+ node, ok := v.(*Vertex)
+ if ok && c.isDevVersion() {
+ node.unify(c, state.conditions(), yield)
+ }
+
+ v, ok = c.getDefault(v)
+
+ if !ok {
+ // Error already generated by getDefault.
+ return emptyNode
+ }
+
+ // TODO: skip in new evaluator? Check once we introduce disjunctions.
+ if w := Unwrap(v); !isCyclePlaceholder(w) {
+ v = w
+ }
+ node, ok = v.(*Vertex)
+ if ok && !isCyclePlaceholder(node.BaseValue) {
+ v = node.Value()
+ }
+
+ switch nv := v.(type) {
+ case nil:
+ c.addErrf(IncompleteError, pos(x),
+ "cannot range over %s (incomplete)", x)
+ return emptyNode
+
+ case *Bottom:
+ // TODO: this is a bit messy. In some cases errors are already added
+ // and in some cases not. Not a huge deal, as errors will be uniqued
+ // down the line, but could be better.
+ c.AddBottom(nv)
+ return emptyNode
+
+ case *Vertex:
+ if node == nil {
+ panic("unexpected markers with nil node")
+ }
+
+ default:
+ if kind := v.Kind(); kind&StructKind != 0 {
+ c.addErrf(IncompleteError, pos(x),
+ "cannot range over %s (incomplete type %s)", x, kind)
+ return emptyNode
+
+ } else if !ok {
+ c.addErrf(0, pos(x), // TODO(error): better message.
+ "cannot range over %s (found %s, want list or struct)",
+ x.Source(), v.Kind())
+ return emptyNode
+ }
+ }
+
+ return node
+}
+
func (x *ForClause) yield(s *compState) {
c := s.ctx
- n := c.node(x, x.Src, true, conjuncts)
- if n.status == evaluating && !n.LockArcs {
- c.AddBottom(&Bottom{
- Code: CycleError,
- ForCycle: true,
- Value: n,
- Err: errors.Newf(pos(x.Src), "comprehension source references itself"),
- })
- return
- }
- if c.HasErr() {
- return
+ n := c.forSource(x.Src)
+
+ if c.isDevVersion() {
+ if s := n.getState(c); s != nil {
+ s.freeze(fieldSetKnown)
+ }
+ } else {
+ if n.status == evaluating && !n.LockArcs {
+ c.AddBottom(&Bottom{
+ Code: CycleError,
+ ForCycle: true,
+ Value: n,
+ Err: errors.Newf(pos(x.Src), "comprehension source references itself"),
+ })
+ return
+ }
+ if c.HasErr() {
+ return
+ }
+ n.LockArcs = true
}
- n.LockArcs = true
+
for _, a := range n.Arcs {
if !a.Label.IsRegular() {
continue
}
- if !a.isDefined() {
- a.Finalize(c)
- switch a.ArcType {
- case ArcMember:
- case ArcRequired:
- c.AddBottom(newRequiredFieldInComprehensionError(c, x, a))
- continue
- default:
+
+ if c.isDevVersion() {
+ c.require(a, arcTypeKnown)
+ } else {
+ if !a.isDefined() {
+ a.Finalize(c)
+ }
+ if !a.definitelyExists() {
continue
}
}
- if !a.definitelyExists() {
+ switch a.ArcType {
+ case ArcMember:
+ case ArcRequired:
+ c.AddBottom(newRequiredFieldInComprehensionError(c, x, a))
+ continue
+ default:
continue
}
@@ -1943,7 +2059,7 @@ func (x *IfClause) Source() ast.Node {
func (x *IfClause) yield(s *compState) {
ctx := s.ctx
- if ctx.BoolValue(ctx.value(x.Condition, s.state)) {
+ if ctx.BoolValue(ctx.value(x.Condition, require(s.state, scalarKnown))) {
s.yield(ctx.e)
}
}
diff --git a/vendor/cuelang.org/go/internal/core/adt/fields.go b/vendor/cuelang.org/go/internal/core/adt/fields.go
index 29e33a09..2041ba5a 100644
--- a/vendor/cuelang.org/go/internal/core/adt/fields.go
+++ b/vendor/cuelang.org/go/internal/core/adt/fields.go
@@ -14,6 +14,10 @@
package adt
+import (
+ "fmt"
+)
+
// This file holds the logic for the insertion of fields and pattern
// constraints, including tracking closedness.
//
@@ -153,6 +157,16 @@ type closeContext struct {
// Used to recursively insert Vertices.
parent *closeContext
+ dependencies []*ccDep // For testing only. See debug.go
+
+ // externalDeps lists the closeContexts associated with a root node for
+ // which there are outstanding decrements (can only be NOTIFY or ARC). This
+ // is used to break counter cycles, if necessary.
+ //
+ // This is only used for root closedContext and only for debugging.
+ // TODO: move to nodeContext.
+ externalDeps []ccArcRef
+
// child links to a sequence which additional patterns need to be verified
// against (&&). If there are more than one, these additional nodes are
// linked with next. Only closed nodes with patterns are added. Arc sets are
@@ -174,6 +188,19 @@ type closeContext struct {
// definition.
isDef bool
+ // hasEllipsis indicates whether the node contains an ellipsis.
+ hasEllipsis bool
+
+ // hasTop indicates a node has at least one top conjunct.
+ hasTop bool
+
+ // hasNonTop indicates a node has at least one conjunct that is not top.
+ hasNonTop bool
+
+ // isClosedOnce is true if this closeContext is the result of calling the
+ // close builtin.
+ isClosedOnce bool
+
// isEmbed indicates whether the closeContext is created as part of an
// embedding.
isEmbed bool
@@ -189,7 +216,36 @@ type closeContext struct {
// values.
isTotal bool
- Arcs []*Vertex // TODO: also link to parent.src Vertex?
+ // done is true if all dependencies have been decremented.
+ done bool
+
+ // isDecremented is used to keep track of whether the evaluator decremented
+ // a closedContext for the ROOT depKind.
+ isDecremented bool
+
+ // needsCloseInSchedule is non-nil if a closeContext that was created
+ // as an arc still needs to be decremented. It points to the creating arc
+ // for reporting purposes.
+ needsCloseInSchedule *closeContext
+
+ // parentConjuncts represent the parent of this embedding or definition.
+ // Any closeContext is represented by a ConjunctGroup in parent of the
+ // expression tree.
+ parentConjuncts conjunctGrouper
+ // TODO: Only needed if more than one conjuncts.
+
+ // arcs represents closeContexts for sub fields and notification targets
+ // associated with this node that reflect the same point in the expression
+ // tree as this closeContext. In both cases the are keyed by Vertex.
+ arcs []ccArc
+
+ // parentIndex is the position in the parent's arcs slice that corresponds
+ // to this closeContext. This is currently unused. The intention is to use
+ // this to allow groups with single elements (which will be the majority)
+ // to be represented in place in the parent.
+ parentIndex int
+
+ group *ConjunctGroup
// Patterns contains all patterns of the current closeContext.
// It is used in the construction of Expr.
@@ -202,6 +258,165 @@ type closeContext struct {
Expr Value
}
+// Label is a convenience function to return the label of the associated Vertex.
+func (c *closeContext) Label() Feature {
+ return c.src.Label
+}
+
+type ccArc struct {
+ kind depKind
+ decremented bool
+ key *closeContext
+ cc *closeContext
+}
+
+// A ccArcRef x refers to the x.src.arcs[x.index].
+// We use this instead of pointers, because the address may change when
+// growing a slice. We use this instead mechanism instead of a pointers so
+// that we do not need to maintain separate free buffers once we use pools of
+// closeContext.
+type ccArcRef struct {
+ src *closeContext
+ index int
+}
+
+type conjunctGrouper interface {
+ assignConjunct(root *closeContext, c Conjunct, check, checkClosed bool) (arc *closeContext, pos int, added bool)
+}
+
+func (n *nodeContext) getArc(f Feature, mode ArcType) (arc *Vertex, isNew bool) {
+ v := n.node
+ for _, a := range v.Arcs {
+ if a.Label == f {
+ if f.IsLet() {
+ a.MultiLet = true
+ // TODO: add return here?
+ }
+ a.updateArcType(mode)
+ return a, false
+ }
+ }
+
+ arc = &Vertex{
+ Parent: v,
+ Label: f,
+ ArcType: mode,
+ nonRooted: v.IsDynamic || v.Label.IsLet() || v.nonRooted,
+ }
+ if n.scheduler.frozen&fieldSetKnown != 0 {
+ b := n.ctx.NewErrf("field %v not allowed by earlier comprehension or reference cycle", f)
+ n.ctx.AddBottom(b)
+ // This may panic for list arithmetic. Safer to leave out for now.
+ arc.ArcType = ArcNotPresent
+ }
+ v.Arcs = append(v.Arcs, arc)
+ return arc, true
+}
+
+func (v *Vertex) assignConjunct(root *closeContext, c Conjunct, check, checkClosed bool) (a *closeContext, pos int, added bool) {
+ // TODO: consider clearing CloseInfo.cc.
+ // c.CloseInfo.cc = nil
+
+ arc := root.src
+
+ pos = len(arc.Conjuncts)
+
+ added = !check || !arc.hasConjunct(c)
+ if added {
+ c.CloseInfo.cc = root
+ arc.addConjunctUnchecked(c)
+ }
+
+ return root, pos, added
+}
+
+func (cc *closeContext) getKeyedCC(key *closeContext, c CycleInfo, checkClosed bool) *closeContext {
+ for _, a := range cc.arcs {
+ if a.key == key {
+ return a.cc
+ }
+ }
+
+ group := &ConjunctGroup{}
+
+ if cc.parentConjuncts == cc {
+ panic("parent is self")
+ }
+
+ parent, pos, _ := cc.parentConjuncts.assignConjunct(key, Conjunct{
+ CloseInfo: CloseInfo{
+ FromDef: cc.isDef,
+ FromEmbed: cc.isEmbed,
+ CycleInfo: c,
+ },
+ x: group,
+ }, false, checkClosed)
+
+ arc := &closeContext{
+ parent: parent,
+ parentConjuncts: parent,
+ parentIndex: pos,
+
+ src: key.src,
+ group: group,
+
+ isDef: cc.isDef,
+ isEmbed: cc.isEmbed,
+ needsCloseInSchedule: cc,
+ }
+
+ arc.parent.incDependent(PARENT, arc)
+
+ // If the parent, w.r.t. the subfield relation was already processed,
+ // there is no need to register the notification.
+ arc.incDependent(EVAL, cc) // matched in REF(decrement:nodeDone)
+
+ // A let field never depends on its parent. So it is okay to filter here.
+ if !arc.Label().IsLet() {
+ // prevent a dependency on self.
+ if key.src != cc.src {
+ cc.addDependency(ARC, key, arc, key)
+ }
+ }
+
+ v := key.src
+ if checkClosed && v.Parent != nil && v.Parent.state != nil {
+ v.Parent.state.checkArc(cc, v)
+ }
+
+ return arc
+}
+
+func (cc *closeContext) linkNotify(dst *Vertex, key *closeContext, c CycleInfo) bool {
+ for _, a := range cc.arcs {
+ if a.key == key {
+ return false
+ }
+ }
+
+ cc.addDependency(NOTIFY, key, key, dst.cc)
+ return true
+}
+
+func (cc *closeContext) assignConjunct(root *closeContext, c Conjunct, check, checkClosed bool) (arc *closeContext, pos int, added bool) {
+ arc = cc.getKeyedCC(root, c.CloseInfo.CycleInfo, checkClosed)
+
+ pos = len(*arc.group)
+
+ c.CloseInfo.cc = nil
+ added = !check || !hasConjunct(*arc.group, c)
+ if added {
+ c.CloseInfo.cc = arc
+
+ if c.CloseInfo.cc.src != arc.src {
+ panic("Inconsistent src")
+ }
+ *arc.group = append(*arc.group, c)
+ }
+
+ return arc, pos, added
+}
+
// spawnCloseContext wraps the closeContext in c with a new one and returns
// this new context along with an updated CloseInfo. The new values reflect
// that the set of fields represented by c are now, for instance, enclosed in
@@ -209,12 +424,19 @@ type closeContext struct {
//
// This call is used when preparing ADT values for evaluation.
func (c CloseInfo) spawnCloseContext(t closeNodeType) (CloseInfo, *closeContext) {
- c.cc.incDependent()
+ cc := c.cc
+ if cc == nil {
+ panic("nil closeContext")
+ }
c.cc = &closeContext{
- parent: c.cc,
+ parent: cc,
+ src: cc.src,
+ parentConjuncts: cc,
}
+ cc.incDependent(PARENT, c.cc) // REF(decrement: spawn)
+
switch t {
case closeDef:
c.cc.isDef = true
@@ -225,32 +447,109 @@ func (c CloseInfo) spawnCloseContext(t closeNodeType) (CloseInfo, *closeContext)
return c, c.cc
}
+// addDependency adds a dependent arc to c. If child is an arc, child.src == key
+func (c *closeContext) addDependency(kind depKind, key, child, root *closeContext) {
+ // NOTE: do not increment
+ // - either root closeContext or otherwise resulting from sub closeContext
+ // all conjuncts will be added now, notified, or scheduled as task.
+
+ child.incDependent(kind, c) // matched in decDependent REF(arcs)
+
+ for _, a := range c.arcs {
+ if a.key == key {
+ panic("addArc: Label already exists")
+ }
+ }
+
+ // TODO: this tests seems sensible, but panics. Investigate what could
+ // trigger this.
+ // if child.src.Parent != c.src {
+ // panic("addArc: inconsistent parent")
+ // }
+ if child.src.cc != root.src.cc {
+ panic("addArc: inconsistent root")
+ }
+ c.arcs = append(c.arcs, ccArc{
+ kind: kind,
+ key: key,
+ cc: child,
+ })
+ root.externalDeps = append(root.externalDeps, ccArcRef{
+ src: c,
+ index: len(c.arcs) - 1,
+ })
+}
+
// incDependent needs to be called for any conjunct or child closeContext
// scheduled for c that is queued for later processing and not scheduled
// immediately.
-func (c *closeContext) incDependent() {
+func (c *closeContext) incDependent(kind depKind, dependant *closeContext) (debug *ccDep) {
+ if c.src == nil {
+ panic("incDependent: unexpected nil state")
+ }
+
+ debug = c.addDependent(kind, dependant)
+
+ if c.done {
+ ctx := c.src.state.ctx
+ openDebugGraph(ctx, c.src, "incDependent: already checked")
+
+ panic(fmt.Sprintf("incDependent: already closed: %p", c))
+ }
+
c.conjunctCount++
+ return debug
}
// decDependent needs to be called for any conjunct or child closeContext for
// which a corresponding incDependent was called after it has been successfully
// processed.
-func (c *closeContext) decDependent(n *nodeContext) {
+func (c *closeContext) decDependent(ctx *OpContext, kind depKind, dependant *closeContext) {
+ v := c.src
+
+ c.matchDecrement(v, kind, dependant)
+
+ if c.conjunctCount == 0 {
+ panic(fmt.Sprintf("negative reference counter %d %p", c.conjunctCount, c))
+ }
+
c.conjunctCount--
if c.conjunctCount > 0 {
return
}
- c.finalizePattern(n)
+ c.done = true
+
+ p := c.parent
- if c.isDef {
+ if c.isDef && !c.hasEllipsis && (!c.hasTop || c.hasNonTop) {
c.isClosed = true
+ if p != nil {
+ p.isDef = true
+ }
}
- p := c.parent
+ if c.isClosedOnce {
+ c.isClosed = true
+ if p != nil {
+ p.isClosedOnce = true
+ }
+ }
+
+ for i, a := range c.arcs {
+ cc := a.cc
+ if a.decremented {
+ continue
+ }
+ c.arcs[i].decremented = true
+ cc.decDependent(ctx, a.kind, c) // REF(arcs)
+ }
+
+ c.finalizePattern()
+
if p == nil {
// Root pattern, set allowed patterns.
- if pcs := n.node.PatternConstraints; pcs != nil {
+ if pcs := v.PatternConstraints; pcs != nil {
if pcs.Allowed != nil {
panic("unexpected allowed set")
}
@@ -260,10 +559,20 @@ func (c *closeContext) decDependent(n *nodeContext) {
return
}
+ if c.hasEllipsis {
+ p.hasEllipsis = true
+ }
+ if c.hasTop {
+ p.hasTop = true
+ }
+ if c.hasNonTop {
+ p.hasNonTop = true
+ }
+
if !c.isEmbed && c.isClosed {
// Merge the two closeContexts and ensure that the patterns and fields
// are mutually compatible according to the closedness rules.
- injectClosed(n, c, p)
+ injectClosed(ctx, c, p)
p.Expr = mergeConjunctions(p.Expr, c.Expr)
} else {
// Do not check closedness of fields for embeddings.
@@ -272,7 +581,17 @@ func (c *closeContext) decDependent(n *nodeContext) {
p.linkPatterns(c)
}
- p.decDependent(n)
+ p.decDependent(ctx, PARENT, c) // REF(decrement: spawn)
+
+ // If we have started decrementing a child closeContext, the parent started
+ // as well. If it is still marked as needing an EVAL decrement, which can
+ // happen if processing started before the node was added, it is safe to
+ // decrement it now. In this case the NOTIFY and ARC dependencies will keep
+ // the nodes alive until they can be completed.
+ if dep := p.needsCloseInSchedule; dep != nil {
+ p.needsCloseInSchedule = nil
+ p.decDependent(ctx, EVAL, dep)
+ }
}
// linkPatterns merges the patterns of child into c, if needed.
@@ -283,107 +602,150 @@ func (c *closeContext) linkPatterns(child *closeContext) {
}
}
+// checkArc validates that the node corresponding to cc allows a field with
+// label v.Label.
+func (n *nodeContext) checkArc(cc *closeContext, v *Vertex) *Vertex {
+ f := v.Label
+ ctx := n.ctx
+
+ if f.IsHidden() || f.IsLet() {
+ return v
+ }
+
+ if cc.isClosed && !matchPattern(ctx, cc.Expr, f) {
+ ctx.notAllowedError(n.node, v)
+ }
+ if n.scheduler.frozen&fieldSetKnown != 0 {
+ for _, a := range n.node.Arcs {
+ if a.Label == f {
+ return v
+ }
+ }
+ var b *Bottom
+ // TODO: include cycle data and improve error message.
+ if f.IsInt() {
+ b = ctx.NewErrf(
+ "element at index %v not allowed by earlier comprehension or reference cycle", f)
+ } else {
+ b = ctx.NewErrf(
+ "field %v not allowed by earlier comprehension or reference cycle", f)
+ }
+ v.SetValue(ctx, b)
+ }
+
+ return v
+}
+
+// insertConjunct inserts conjunct c into cc.
+func (cc *closeContext) insertConjunct(key *closeContext, c Conjunct, id CloseInfo, check, checkClosed bool) bool {
+ arc, _, added := cc.assignConjunct(key, c, check, checkClosed)
+ if key.src != arc.src {
+ panic("inconsistent src")
+ }
+
+ if !added {
+ return false
+ }
+
+ if n := key.src.state; n != nil {
+ c.CloseInfo.cc = nil
+ id.cc = arc
+ n.scheduleConjunct(c, id)
+
+ for _, rec := range n.notify {
+ if n.node.ArcType == ArcPending {
+ panic("unexpected pending arc")
+ }
+ cc.insertConjunct(rec.cc, c, id, check, checkClosed)
+ }
+ }
+
+ return true
+}
+
// insertArc inserts conjunct c into n. If check is true it will not add c if it
// was already added.
-func (n *nodeContext) insertArc(f Feature, mode ArcType, c Conjunct, check bool) {
+// Returns the arc of n.node with label f.
+func (n *nodeContext) insertArc(f Feature, mode ArcType, c Conjunct, id CloseInfo, check bool) *Vertex {
if n == nil {
panic("nil nodeContext")
}
if n.node == nil {
panic("nil node")
}
- cc := c.CloseInfo.cc
+ cc := id.cc
if cc == nil {
panic("nil closeContext")
}
- if _, isNew := n.insertArc1(f, mode, c, check); !isNew {
- return // Patterns were already added.
+ v, insertedArc := n.getArc(f, mode)
+
+ if v.ArcType == ArcNotPresent {
+ n.node.reportFieldCycleError(n.ctx, c.Source().Pos(), f)
+ return v
+ }
+
+ if !cc.insertConjunct(v.rootCloseContext(), c, id, check, true) {
+ return v
+ }
+
+ if !insertedArc {
+ return v
}
// Match and insert patterns.
if pcs := n.node.PatternConstraints; pcs != nil {
for _, pc := range pcs.Pairs {
- if matchPattern(n, pc.Pattern, f) {
+ if matchPattern(n.ctx, pc.Pattern, f) {
for _, c := range pc.Constraint.Conjuncts {
- n.insertArc1(f, mode, c, check)
+ n.addConstraint(v, mode, c, check)
}
}
}
}
-}
-
-// insertArc1 inserts conjunct c into its associated closeContext. If the
-// closeContext did not yet have a Vertex for f, it is created and it is ensured
-// that the grouping is associated with all the parent closeContexts. If it is
-// newly added to the root closeContext, the outer grouping is also added to
-// n.node, the top-level Vertex itself.
-//
-// insertArc1 is exclusively used by insertArc to insert conjuncts for regular
-// fields as well as pattern constraints.
-func (n *nodeContext) insertArc1(f Feature, mode ArcType, c Conjunct, check bool) (v *Vertex, isNew bool) {
- cc := c.CloseInfo.cc
-
- // Locate or create the arc in the current context.
- v, isNew = cc.insertArc(n, f, mode, c, check)
- if !isNew {
- return v, false
- }
- i := cc.parent
- for prev := cc; i != nil && isNew; prev, i = i, i.parent {
- vc := MakeRootConjunct(nil, v)
- vc.CloseInfo.FromDef = prev.isDef
- vc.CloseInfo.FromEmbed = prev.isEmbed
- v, isNew = i.insertArc(n, f, mode, vc, check)
- }
- if isNew && i == nil {
- n.node.Arcs = append(n.node.Arcs, v)
- }
-
- if cc.isClosed && !v.disallowedField && !matchPattern(n, cc.Expr, f) {
- n.notAllowedError(f)
- }
-
- return v, isNew
+ return v
}
-// insertArc is exclusively called from nodeContext.insertArc1 and is used to
-// insert a conjunct in closeContext, along with other conjuncts from the same
-// origin. It does not recursively insert conjuncts into parent closeContexts,
-// which is done by insertArc1.
-//
-// If check is true it will not add c if it was already added.
-func (cc *closeContext) insertArc(
- n *nodeContext, f Feature, mode ArcType, c Conjunct, check bool) (v *Vertex, isNew bool) {
- c.CloseInfo.cc = nil
-
- for _, a := range cc.Arcs {
- if a.Label != f {
- continue
- }
-
- if f.IsLet() {
- a.MultiLet = true
- return a, false
- }
- if check {
- a.AddConjunct(c)
- } else {
- a.addConjunctUnchecked(c)
- }
- // TODO: possibly add positions to error.
- return a, false
- }
+// addConstraint adds a constraint to arc of n.
+//
+// In order to resolve LabelReferences, it is not always possible to walk up
+// the parent Vertex chain to determan the label, because a label reference
+// may point past a point of referral. For instance,
+//
+// test: [ID=_]: name: ID
+// test: A: {}
+// B: test.A & {} // B.name should be "A", not "B".
+//
+// The arc must be the node arc to which the conjunct is added.
+func (n *nodeContext) addConstraint(arc *Vertex, mode ArcType, c Conjunct, check bool) {
+ // TODO(perf): avoid cloning the Environment, if:
+ // - the pattern constraint has no LabelReference
+ // (require compile-time support)
+ // - there are no references in the conjunct pointing to this node.
+ // - consider adding this value to the Conjunct struct
+ f := arc.Label
+ bulkEnv := *c.Env
+ bulkEnv.DynamicLabel = f
+ c.Env = &bulkEnv
+
+ // TODO(constraintNode): this should ideally be
+ // cc := id.cc
+ // or
+ // cc := c.CloseInfo.cc.src.cc
+ //
+ // Where id is the closeContext corresponding to the field, or the root
+ // context. But it is a bit hard to figure out how to account for this, as
+ // either this information is not available or the root context results in
+ // errors for the other use of addConstraint. For this reason, we keep
+ // things symmetric for now and will keep things as is, just avoiding the
+ // closedness check.
+ cc := c.CloseInfo.cc
- v = &Vertex{Parent: cc.src, Label: f, ArcType: mode}
- if mode == ArcPending {
- cc.src.hasPendingArc = true
- }
- v.Conjuncts = append(v.Conjuncts, c)
- cc.Arcs = append(cc.Arcs, v)
+ arc, _ = n.getArc(f, mode)
- return v, true
+ root := arc.rootCloseContext()
+ cc.insertConjunct(root, c, c.CloseInfo, check, false)
}
func (n *nodeContext) insertPattern(pattern Value, c Conjunct) {
@@ -405,9 +767,9 @@ func (n *nodeContext) insertPattern(pattern Value, c Conjunct) {
// from arcs and patterns are grouped under the same vertex.
// TODO: verify. See test Pattern 1b
for _, a := range n.node.Arcs {
- if matchPattern(n, pattern, a.Label) {
+ if matchPattern(n.ctx, pattern, a.Label) {
// TODO: is it necessary to check for uniqueness here?
- n.insertArc(a.Label, a.ArcType, c, true)
+ n.addConstraint(a, a.ArcType, c, true)
}
}
@@ -448,21 +810,23 @@ func isTotal(p Value) bool {
// It first ensures that the fields contained in dst are allowed by the fields
// and patterns defined in closed. It reports an error in the nodeContext if
// this is not the case.
-func injectClosed(n *nodeContext, closed, dst *closeContext) {
+func injectClosed(ctx *OpContext, closed, dst *closeContext) {
// TODO: check that fields are not void arcs.
outer:
- for _, a := range dst.Arcs {
- for _, b := range closed.Arcs {
- if a.Label == b.Label {
- if b.disallowedField {
- // Error was already reported.
- break
- }
+ for _, a := range dst.arcs {
+ ca := a.cc
+ f := ca.Label()
+ if f.IsHidden() || f.IsLet() {
+ continue
+ }
+ for _, b := range closed.arcs {
+ cb := b.cc
+ if f == cb.Label() {
continue outer
}
}
- if !matchPattern(n, closed.Expr, a.Label) {
- n.notAllowedError(a.Label)
+ if !matchPattern(ctx, closed.Expr, ca.Label()) {
+ ctx.notAllowedError(closed.src, ca.src)
continue
}
}
@@ -480,16 +844,45 @@ outer:
}
}
-// notAllowedError reports a "field not allowed" error in n and sets the value
+func (ctx *OpContext) addPositions(c Conjunct) {
+ if x, ok := c.x.(*ConjunctGroup); ok {
+ for _, c := range *x {
+ ctx.addPositions(c)
+ }
+ }
+ if pos := c.Field(); pos != nil {
+ ctx.AddPosition(pos)
+ }
+}
+
+// notAllowedError reports a field not allowed error in n and sets the value
// for arc f to that error.
-func (n *nodeContext) notAllowedError(f Feature) {
- // Set the error on the same arc as the old implementation
- // and using the same path.
- arc := n.node.Lookup(f)
- v := n.ctx.PushArc(arc)
- n.node.SetValue(n.ctx, n.ctx.NewErrf("field not allowed"))
- arc.disallowedField = true // Is this necessary?
- n.ctx.PopArc(v)
+func (ctx *OpContext) notAllowedError(v, arc *Vertex) {
+ defer ctx.PopArc(ctx.PushArc(arc))
+
+ defer ctx.ReleasePositions(ctx.MarkPositions())
+
+ for _, c := range arc.Conjuncts {
+ ctx.addPositions(c)
+ }
+ // XXX(0.7): Find another way to get this provenance information. Not
+ // currently stored in new evaluator.
+ // for _, s := range x.Structs {
+ // s.AddPositions(ctx)
+ // }
+
+ if arc.ArcType == ArcPending {
+ arc.ArcType = ArcNotPresent
+ return
+ }
+ // TODO: setting arc instead of n.node eliminates subfields. This may be
+ // desirable or not, but it differs, at least from <=v0.6 behavior.
+ arc.SetValue(ctx, ctx.NewErrf("field not allowed"))
+
+ // TODO: remove? We are now setting it on both fields, which seems to be
+ // necessary for now. But we should remove this as it often results in
+ // a duplicate error.
+ // v.SetValue(ctx, ctx.NewErrf("field not allowed"))
// TODO: create a special kind of error that gets the positions
// of the relevant locations upon request from the arc.
@@ -533,7 +926,7 @@ func mergeConjunctions(a, b Value) Value {
// children is closed, the result will be a conjunction of all these closed
// values. Otherwise it will be a disjunction of all its children. A nil value
// represents all values.
-func (c *closeContext) finalizePattern(n *nodeContext) {
+func (c *closeContext) finalizePattern() {
switch {
case c.Expr != nil: // Patterns and expression are already set.
if !c.isClosed {
diff --git a/vendor/cuelang.org/go/internal/core/adt/sched.go b/vendor/cuelang.org/go/internal/core/adt/sched.go
new file mode 100644
index 00000000..d4028cfa
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/sched.go
@@ -0,0 +1,713 @@
+// Copyright 2023 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "math/bits"
+)
+
+// The CUE scheduler schedules tasks for evaluation.
+//
+// A task is a computation unit associated with a single node. Each task may
+// depend on knowing certain properties of one or more fields, namely:
+//
+// - whether the field exists
+// - the scalar value of a field, if any
+// - the set of all conjuncts
+// - the set of all sub fields
+// - the recursively evaluated value
+//
+// Each task, in turn, may mark itself as providing knowledge about one or more
+// of these properties. If it is not known upfront whether a task may contribute
+// to a certain property, it must mark itself as (potentially) contributing to
+// this property.
+//
+//
+// DEPENDENCY GRAPH
+//
+// A task may depend on zero or more fields, including the field for which it
+// is defined. The graph of all dependencies is defined as follows:
+//
+// - Each task and each pair is a node in the graph.
+// - A task T for field F that (possibly) computes property P for F is
+// represented by an edge from to T.
+// - A task T for field F that depends on property P of field G is represented
+// by an edge from to T.
+//
+// It is an evaluation cycle for a task T if there is a path from any task T to
+// itself in the dependency graph. Processing will stop in the even of such a
+// cycle. In such case, the scheduler will commence an unblocking mechanism.
+//
+// As a general rule, once a node is detected to be blocking, it may no longer
+// become more specific. In other words, it is "frozen".
+// The unblocking consists of two phases: the scheduler will first freeze and
+// unblock all blocked nodes for the properties marked as autoUnblock-ing in
+// taskContext. Subsequently all tasks that are unblocked by this will run.
+// In the next phase all remaining tasks are unblocked.
+// See taskContext.autoUnblock for more information.
+//
+// Note that some tasks, like references, may depend on other fields without
+// requiring a certain property. These do not count as dependencies.
+
+// A taskContext manages the task memory and task stack.
+// It is typically associated with an OpContext.
+type taskContext struct {
+ // stack tracks the current execution of tasks. This is a stack as tasks
+ // may trigger the evaluation of other tasks to complete.
+ stack []*task
+
+ // blocking lists all tasks that were blocked during a round of evaluation.
+ // Evaluation finalized one node at a time, which includes the evaluation
+ // of all nodes necessary to evaluate that node. Any task that is blocked
+ // during such a round of evaluation is recorded here. Any mutual cycles
+ // will result in unresolved tasks. At the end of such a round, computation
+ // can be frozen and the tasks unblocked.
+ blocking []*task
+
+ // counterMask marks which conditions use counters. Other conditions are
+ // handled by signals only.
+ counterMask condition
+
+ // autoUnblock marks the flags that get unblocked automatically when there
+ // is a deadlock between nodes. These are properties that may become
+ // meaningful once it is known that a value may not become more specific.
+ // An example of this is the property "scalar". If something is not a scalar
+ // yet, and it is known that the value may never become more specific, it is
+ // known that this value is never will become a scalar, thus effectively
+ // making it known.
+ autoUnblock condition
+
+ // This is called upon completion of states, allowing other states to be
+ // updated atomically.
+ complete func(s *scheduler) condition
+}
+
+func (p *taskContext) current() *task {
+ return p.stack[len(p.stack)-1]
+}
+
+func (p *taskContext) pushTask(t *task) {
+ p.stack = append(p.stack, t)
+}
+
+func (p *taskContext) popTask() {
+ p.stack = p.stack[:len(p.stack)-1]
+}
+
+func (p *taskContext) newTask() *task {
+ // TODO: allocate from pool.
+ return &task{}
+}
+
+type taskState uint8
+
+const (
+ taskREADY taskState = iota
+
+ taskRUNNING // processing conjunct(s)
+ taskWAITING // task is blocked on a property of an arc to hold
+ taskSUCCESS
+ taskFAILED
+)
+
+type schedState uint8
+
+const (
+ schedREADY schedState = iota
+
+ schedRUNNING // processing conjunct(s)
+ schedFINALIZING // all tasks completed, run new tasks immediately
+ schedSUCCESS
+ schedFAILED
+)
+
+func (s schedState) done() bool { return s >= schedSUCCESS }
+
+func (s taskState) String() string {
+ switch s {
+ case taskREADY:
+ return "READY"
+ case taskRUNNING:
+ return "RUNNING"
+ case taskWAITING:
+ return "WAITING"
+ case taskSUCCESS:
+ return "SUCCESS"
+ case taskFAILED:
+ return "FAILED"
+ default:
+ return "UNKNOWN"
+ }
+}
+
+func (s schedState) String() string {
+ switch s {
+ case schedREADY:
+ return "READY"
+ case schedRUNNING:
+ return "RUNNING"
+ case schedFINALIZING:
+ return "FINALIZING"
+ case schedSUCCESS:
+ return "SUCCESS"
+ case schedFAILED:
+ return "FAILED"
+ default:
+ return "UNKNOWN"
+ }
+}
+
+// runMode indicates how to proceed after a condition could not be met.
+type runMode uint8
+
+const (
+ // ignore indicates that the new evaluator should not do any processing.
+ // This is mostly used in the transition from old to new evaluator and
+ // should probably eventually be removed.
+ ignore runMode = 1 + iota
+
+ // attemptOnly indicates that execution should continue even if the
+ // condition is not met.
+ attemptOnly
+
+ // yield means that execution should be yielded if the condition is not met.
+ // That is, the task is marked as a dependency and control is returned to
+ // the runloop. The task will resume once the dependency is met.
+ yield
+
+ // finalize means that uncompleted tasks should be turned into errors to
+ // complete the evaluation of a Vertex.
+ finalize
+)
+
+func (r runMode) String() string {
+ switch r {
+ case ignore:
+ return "ignore"
+ case attemptOnly:
+ return "attemptOnly"
+ case yield:
+ return "yield"
+ case finalize:
+ return "finalize"
+ }
+ return "unknown"
+}
+
+// condition is a bit mask of states that a task may depend on.
+//
+// There are generally two types of states: states that are met if all tasks
+// that contribute to that state are completed (counter states), and states that
+// are met if some global set of conditions are met.
+type condition uint16
+
+const (
+ // allKnown indicates that all possible states are completed.
+ allKnown condition = 0x7fff
+
+ // neverKnown is a special condition that is never met. It can be used to
+ // mark a task as impossible to complete.
+ neverKnown condition = 0x8000
+)
+
+func (c condition) meets(x condition) bool {
+ return c&x == x
+}
+
+const numCompletionStates = 10 // TODO: make this configurable
+
+// A scheduler represents the set of outstanding tasks for a node.
+type scheduler struct {
+ ctx *OpContext
+ node *nodeContext
+
+ state schedState
+
+ // completed is bit set of completed states.
+ completed condition
+
+ // needs specifies all the states needed to complete tasks in this scheduler.
+ needs condition
+
+ // provided specifies all the states that are provided by tasks added
+ // to this scheduler.
+ provided condition // TODO: rename to "provides"? To be consistent with "needs".
+
+ // frozen indicates all states that are frozen. These bits should be checked
+ // before making a node more specific.
+ // TODO: do we need a separate field for this, or can we use completed?
+ frozen condition
+
+ // isFrozen indicates if freeze was called explicitly.
+ //
+ // TODO: rename to isExplicitlyFrozen if it turns out we need both frozen
+ // and isFrozen. We probably do not. Check once the implementation of the
+ // new evaluator is complete.
+ isFrozen bool
+
+ // counters keeps track of the number of uncompleted tasks that are
+ // outstanding for each of the possible conditions. A state is
+ // considered completed if the corresponding counter reaches zero.
+ counters [numCompletionStates]int
+
+ // tasks lists all tasks that were scheduled for this scheduler.
+ // The list only contains tasks that are associated with this node.
+ // TODO: rename to queue and taskPos to nextQueueIndex.
+ tasks []*task
+ taskPos int
+
+ // blocking is a list of tasks that are blocked on the completion of
+ // the indicate conditions. This can hold tasks from other nodes or tasks
+ // originating from this node itself.
+ blocking []*task
+}
+
+func (s *scheduler) clear() {
+ // TODO(perf): free tasks into task pool
+
+ *s = scheduler{
+ ctx: s.ctx,
+ tasks: s.tasks[:0],
+ blocking: s.blocking[:0],
+ }
+}
+
+// cloneInto initializes the state of dst to be the same as s.
+//
+// NOTE: this is deliberately not a pointer receiver: this approach allows
+// cloning s into dst while preserving the buffers of dst and not having to
+// explicitly clone any non-buffer fields.
+func (s scheduler) cloneInto(dst *scheduler) {
+ s.tasks = append(dst.tasks, s.tasks...)
+ s.blocking = append(dst.blocking, s.blocking...)
+
+ *dst = s
+}
+
+// incrementCounts adds the counters for each condition.
+// See also decrementCounts.
+func (s *scheduler) incrementCounts(x condition) {
+ x &= s.ctx.counterMask
+
+ for {
+ n := bits.TrailingZeros16(uint16(x))
+ if n == 16 {
+ break
+ }
+ bit := condition(1 << n)
+ x &^= bit
+
+ s.counters[n]++
+ }
+}
+
+// decrementCounts decrements the counters for each condition. If a counter for
+// a condition reaches zero, it means that condition is met and all blocking
+// tasks depending on that state can be run.
+func (s *scheduler) decrementCounts(x condition) {
+ x &= s.ctx.counterMask
+
+ var completed condition
+ for {
+ n := bits.TrailingZeros16(uint16(x))
+ if n == 16 {
+ break
+ }
+ bit := condition(1 << n)
+ x &^= bit
+
+ s.counters[n]--
+ if s.counters[n] == 0 {
+ completed |= bit
+ }
+ }
+
+ s.signal(completed)
+}
+
+// finalize runs all tasks and signals that the scheduler is done upon
+// completion for the given signals.
+func (s *scheduler) finalize(completed condition) {
+ // Do not panic on cycle detection. Instead, post-process the tasks
+ // by collecting and marking cycle errors.
+ s.process(allKnown, finalize)
+ s.signal(completed)
+ if s.state == schedRUNNING {
+ if s.meets(s.needs) {
+ s.state = schedSUCCESS
+ } else {
+ s.state = schedFAILED
+ }
+ }
+}
+
+// process advances a scheduler by executing tasks that are required.
+// Depending on mode, if the scheduler is blocked on a condition, it will
+// forcefully unblock the tasks.
+func (s *scheduler) process(needs condition, mode runMode) bool {
+ c := s.ctx
+
+ // Update completions, if necessary.
+ if f := c.taskContext.complete; f != nil {
+ s.signal(f(s))
+ }
+
+ if Debug && len(s.tasks) > 0 {
+ if v := s.tasks[0].node.node; v != nil {
+ c.nest++
+ c.Logf(v, "START Process %v -- mode: %v", v.Label, mode)
+ defer func() {
+ c.Logf(v, "END Process")
+ c.nest--
+ }()
+ }
+ }
+
+ // hasRunning := false
+ s.state = schedRUNNING
+ // Use variable instead of range, because s.tasks may grow during processes.
+
+processNextTask:
+ for s.taskPos < len(s.tasks) {
+ t := s.tasks[s.taskPos]
+ s.taskPos++
+
+ if t.state != taskREADY {
+ // TODO(perf): Figure out how it is possible to reach this and if we
+ // should optimize.
+ // panic("task not READY")
+ }
+
+ switch {
+ case t.state == taskRUNNING:
+ // TODO: we could store the current referring node that caused
+ // the cycle and then proceed up the stack to mark all tasks
+ // that re involved in the cycle as well. Further, we could
+ // mark the cycle as a generation counter, instead of a boolean
+ // value, so that it will be trivial reconstruct a detailed cycle
+ // report when generating an error message.
+
+ case t.state != taskREADY:
+
+ default:
+ runTask(t, mode)
+ }
+ }
+
+ switch mode {
+ default: // case attemptOnly:
+ return s.meets(needs)
+
+ case yield:
+ if s.meets(needs) {
+ return true
+ }
+ c.current().waitFor(s, needs)
+ s.yield()
+ panic("unreachable")
+
+ case finalize:
+ // remainder of function
+ }
+
+unblockTasks:
+ // Unblocking proceeds in three stages. Each of the stages may cause
+ // formerly blocked tasks to become unblocked. To ensure that unblocking
+ // tasks do not happen in an order-dependent way, we want to ensure that we
+ // have unblocked all tasks from one phase, before commencing to the next.
+
+ // The types of the node can no longer be altered. We can unblock the
+ // relevant states first to finish up any tasks that were just waiting for
+ // types, such as lists.
+ for _, t := range c.blocking {
+ if t.blockedOn != nil {
+ t.blockedOn.signal(s.ctx.autoUnblock)
+ }
+ }
+
+ // Mark all remaining conditions as "frozen" before actually running the
+ // tasks. Doing this before running the remaining tasks ensures that we get
+ // the same errors, regardless of the order in which tasks are unblocked.
+ for _, t := range c.blocking {
+ if t.blockedOn != nil {
+ t.blockedOn.freeze(t.blockCondition)
+ t.unblocked = true
+ }
+ }
+
+ // Run the remaining blocked tasks.
+ numBlocked := len(c.blocking)
+ for _, t := range c.blocking {
+ if t.blockedOn != nil {
+ n, cond := t.blockedOn, t.blockCondition
+ t.blockedOn, t.blockCondition = nil, neverKnown
+ n.signal(cond)
+ runTask(t, attemptOnly) // Does this need to be final? Probably not if we do a fixed point computation.
+ }
+ }
+
+ // The running of tasks above may result in more tasks being added to the
+ // queue. Process these first before continuing.
+ if s.taskPos < len(s.tasks) {
+ goto processNextTask
+ }
+
+ // Similarly, the running of tasks may result in more tasks being blocked.
+ // Ensure we processed them all.
+ if numBlocked < len(c.blocking) {
+ goto unblockTasks
+ }
+
+ c.blocking = c.blocking[:0]
+
+ return true
+}
+
+// yield causes the current task to be suspended until the given conditions
+// are met.
+func (s *scheduler) yield() {
+ panic(s)
+}
+
+// meets reports whether all needed completion states in s are met.
+func (s *scheduler) meets(needs condition) bool {
+ if s.state != schedREADY {
+ // Automatically qualify for conditions that are not provided by this node.
+ // NOTE: in the evaluator this is generally not the case, as tasks my still
+ // be added during evaluation until all ancestor nodes are evaluated. This
+ // can be encoded by the scheduler by adding a state "ancestorsCompleted".
+ // which all other conditions depend on.
+ needs &= s.provided
+ }
+ return s.completed&needs == needs
+}
+
+// blockOn marks a state as uncompleted.
+func (s *scheduler) blockOn(cond condition) {
+ // TODO: should we allow this to be used for counters states?
+ // if s.ctx.counterMask&cond != 0 {
+ // panic("cannot block on counter states")
+ // }
+ s.provided |= cond
+}
+
+// signal causes tasks that are blocking on the given completion to be run
+// for this scheduler. Tasks are only run if the completion state was not
+// already reached before.
+func (s *scheduler) signal(completed condition) {
+ was := s.completed
+ s.completed |= completed
+ if was == s.completed {
+ s.frozen |= completed
+ return
+ }
+
+ s.completed |= s.ctx.complete(s)
+ s.frozen |= completed
+
+ // TODO: this could benefit from a linked list where tasks are removed
+ // from the list before being run.
+ for _, t := range s.blocking {
+ if t.blockCondition&s.completed == t.blockCondition {
+ // Prevent task from running again.
+ t.blockCondition = neverKnown
+ t.blockedOn = nil
+ runTask(t, attemptOnly) // TODO: does this ever need to be final?
+ // TODO: should only be run once for each blocking queue.
+ }
+ }
+}
+
+// freeze indicates no more tasks satisfying the given condition may be added.
+// It is also used to freeze certain elements of the task.
+func (s *scheduler) freeze(c condition) {
+ s.frozen |= c
+ s.completed |= c
+ s.ctx.complete(s)
+ s.isFrozen = true
+}
+
+// signalDoneAdding signals that no more tasks will be added to this scheduler.
+// This allows unblocking tasks that depend on states for which there are no
+// tasks in this scheduler.
+func (s *scheduler) signalDoneAdding() {
+ s.signal(s.needs &^ s.provided)
+}
+
+// runner defines properties of a type of task, including a function to run it.
+type runner struct {
+ name string
+
+ // The mode argument indicates whether the scheduler
+ // of this field is finalizing. It is passed as a component of the required
+ // state to various evaluation methods.
+ f func(ctx *OpContext, t *task, mode runMode)
+
+ // completes indicates which states this tasks contributes to.
+ completes condition
+
+ // needes indicates which states of the corresponding node need to be
+ // completed before this task can be run.
+ needs condition
+}
+
+type task struct {
+ state taskState
+
+ completes condition // cycles may alter the completion mask. TODO: is this still true?
+
+ // unblocked indicates this task was unblocked by force.
+ unblocked bool
+
+ // The following fields indicate what this task is blocked on, including
+ // the scheduler, which conditions it is blocking on, and the stack of
+ // tasks executed leading to the block.
+
+ blockedOn *scheduler
+ blockCondition condition
+ blockStack []*task // TODO: use; for error reporting.
+
+ err *Bottom
+
+ // The node from which this conjunct originates.
+ node *nodeContext
+
+ run *runner // TODO: use struct to make debugging easier?
+
+ // The Conjunct processed by this task.
+ env *Environment
+ id CloseInfo // TODO: rename to closeInfo?
+ x Node // The conjunct Expression or Value.
+
+ // For Comprehensions:
+ comp *envComprehension
+ leaf *Comprehension
+}
+
+func (s *scheduler) insertTask(t *task) {
+ completes := t.run.completes
+ needs := t.run.needs
+
+ s.needs |= needs
+ s.provided |= completes
+
+ if needs&completes != 0 {
+ panic("task depends on its own completion")
+ }
+ t.completes = completes
+
+ if s.state == schedFINALIZING {
+ runTask(t, finalize)
+ return
+ }
+
+ s.incrementCounts(completes)
+ if cc := t.id.cc; cc != nil {
+ // may be nil for "group" tasks, such as processLists.
+ dep := cc.incDependent(TASK, nil)
+ if dep != nil {
+ dep.taskID = len(s.tasks)
+ dep.task = t
+ }
+ }
+ s.tasks = append(s.tasks, t)
+ if s.completed&needs != needs {
+ t.waitFor(s, needs)
+ }
+}
+
+func runTask(t *task, mode runMode) {
+ ctx := t.node.ctx
+
+ switch t.state {
+ case taskSUCCESS, taskFAILED:
+ return
+ case taskRUNNING:
+ // TODO: should we mark this as a cycle?
+ }
+
+ defer func() {
+ switch r := recover().(type) {
+ case nil:
+ case *scheduler:
+ // Task must be WAITING.
+ if t.state == taskRUNNING {
+ t.state = taskSUCCESS // XXX: something else? Do we known the dependency?
+ if t.err != nil {
+ t.state = taskFAILED
+ }
+ }
+ default:
+ panic(r)
+ }
+ }()
+
+ defer ctx.PopArc(ctx.PushArc(t.node.node))
+
+ // TODO: merge these two mechanisms once we get rid of the old evaluator.
+ ctx.pushTask(t)
+ defer ctx.popTask()
+ if t.env != nil {
+ id := t.id
+ id.cc = nil // this is done to avoid struct args from passing fields up.
+ s := ctx.PushConjunct(MakeConjunct(t.env, t.x, id))
+ defer ctx.PopState(s)
+ }
+
+ t.state = taskRUNNING
+ // A task may have recorded an error on a previous try. Clear it.
+ t.err = nil
+
+ t.run.f(ctx, t, mode)
+
+ if t.state != taskWAITING {
+ t.blockedOn = nil
+ t.blockCondition = neverKnown
+
+ // TODO: always reporting errors in the current task would avoid us
+ // having to collect and assign errors here.
+ t.err = CombineErrors(nil, t.err, ctx.Err())
+ if t.err == nil {
+ t.state = taskSUCCESS
+ } else {
+ t.state = taskFAILED
+ }
+ t.node.addBottom(t.err) // TODO: replace with something more principled.
+
+ if t.id.cc != nil {
+ t.id.cc.decDependent(ctx, TASK, nil)
+ }
+ t.node.decrementCounts(t.completes)
+ t.completes = 0 // safety
+ }
+}
+
+// waitFor blocks task t until the needs for scheduler s are met.
+func (t *task) waitFor(s *scheduler, needs condition) {
+ if s.meets(needs) {
+ panic("waiting for condition that already completed")
+ }
+ // TODO: this line causes the scheduler state to fail if tasks are blocking
+ // on it. Is this desirable? At the very least we should then ensure that
+ // the scheduler where the tasks originate from will fail in that case.
+ s.needs |= needs
+
+ t.state = taskWAITING
+
+ t.blockCondition = needs
+ t.blockedOn = s
+ s.blocking = append(s.blocking, t)
+ s.ctx.blocking = append(s.ctx.blocking, t)
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/states.go b/vendor/cuelang.org/go/internal/core/adt/states.go
new file mode 100644
index 00000000..ba7eae94
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/states.go
@@ -0,0 +1,321 @@
+// Copyright 2023 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+// TODO: clean up following notes:
+
+// Used in expr.go:
+// - ctx.value (uses: noncrete scalar allowed, concrete scalar, concrete composite)
+// - evalState
+// - ctx.node (need to know all fields)
+// - ctx.lookup
+// - ctx.concrete
+//
+// - ctx.relLabel
+// OK: always exists
+// - ctx.relNode (upcount + unify(partial))
+// OK: node always exists.
+//
+// - ctx.evalState (in validation of comparison against bottom)
+//
+// - arc.Finalize (finalized)
+// - CompleteArcs (conjuncts)
+//
+
+// lookup in p1
+// - process remaining field todos
+
+// lookup:
+// if node is currently processing, just look up directly and create
+// field with notification.
+//
+// if node has not been processed, process once.
+//
+// Any dynamic fields should have been triggered by the existence of a new
+// arc. This will either cascade the evaluation or not.
+
+// p1: {
+// (p1.baz): "bar" // t1
+// (p1.foo): "baz" // t2
+// baz: "foo"
+// }
+//
+// -> t1 ->
+// -> t1
+// -> t2 ->
+
+// p2: {
+// (p2[p2.baz]): "bar"
+// (p2.foo): "baz"
+// baz: "qux"
+// qux: "foo"
+// }
+
+// b -> a - > b: detected cycle in b:
+//
+// xxx register expression (a-10) being processed as a post constraint.
+// add task to pending.
+// register value as waiting for scalar to be completed later.
+// return with cycle/ in complete error.
+//
+// - in b:
+// xxx register expression (b+10) as post constraint.
+// add task to pending
+// register value as waiting for scalar to be completed later.
+// 5 is processed and set
+// this completes the task in b
+// this sets a scalar in b
+// this completes the expression in a
+//
+// b: a - 10
+// a: b + 10
+// a: 5
+//
+// a: a
+// a: 5
+//
+
+// These are the condition types of the CUE evaluator. A scheduler
+// is associated with a single Vertex. So when these states refer to a Vertex,
+// it is the Vertex associated with the scheduler.
+//
+// There are core conditions and condition sets. The core conditions are
+// determined during operation as conditions are met. The condition sets are
+// used to indicate a set of required or provided conditions.
+//
+// Core conditions can be signal conditions or counter conditions. A counter
+// condition is triggered if all conjuncts that contribute to the computation
+// of this condition have been met. A signal condition is triggered as soon as
+// evidence is found that this condition is met. Unless otherwise specified,
+// conditions are counter conditions.
+const (
+ // allAncestorsProcessed indicates that all conjuncts that could be added
+ // to the Vertex by any of its ancestors have been added. In other words,
+ // all ancestors schedulers have reached the state fieldConjunctsKnown.
+ //
+ // This is a signal condition. It is explicitly set in unify when a
+ // parent meets fieldConjunctsKnown|allAncestorsProcessed.
+ allAncestorsProcessed condition = 1 << iota
+
+ // Really: all ancestor subfield tasks processed.
+
+ // arcTypeKnown means that the ArcType value of a Vertex is fully
+ // determined. The ArcType of all fields of a Vertex need to be known
+ // before the complete set of fields of this Vertex can be known.
+ arcTypeKnown
+
+ // valueKnown means that it is known what the "type" of the value would be
+ // if present.
+ valueKnown
+
+ // scalarKnown indicates that a Vertex has either a concrete scalar value or
+ // that it is known that it will never have a scalar value.
+ //
+ // This is a signal condition that is reached when:
+ // - a node is set to a concrete scalar value
+ // - a node is set to an error
+ // - or if XXXstate is reached.
+ //
+ // TODO: rename to something better?
+ scalarKnown
+
+ // listTypeKnown indicates that it is known that lists unified with this
+ // Vertex should be interpreted as integer indexed lists, as associative
+ // lists, or an error.
+ //
+ // This is a signal condition that is reached when:
+ // - allFieldsKnown is reached (all expressions have )
+ // - it is unified with an associative list type
+ listTypeKnown
+
+ // fieldConjunctsKnown means that all the conjuncts of all fields are
+ // known.
+ fieldConjunctsKnown
+
+ // fieldSetKnown means that all fields of this node are known. This is true
+ // if all tasks that can add a field have been processed and if
+ // all pending arcs have been resolved.
+ fieldSetKnown
+
+ // // allConjunctsKnown means that all conjuncts have been registered as a
+ // // task. allParentsProcessed must be true for this to be true.
+ // allConjunctsKnown
+
+ // allTasksCompleted means that all tasks of a Vertex have been completed
+ // with the exception of validation tasks. A Vertex may still not be
+ // finalized.
+ allTasksCompleted
+
+ // subFieldsProcessed means that all tasks of a Vertex, including those of
+ // its arcs have been completed.
+ //
+ // This is a signal condition that is met if all arcs have reached the
+ // the state finalStateKnown.
+ //
+ subFieldsProcessed
+
+ leftOfMaxCoreCondition
+
+ finalStateKnown condition = leftOfMaxCoreCondition - 1
+
+ preValidation condition = finalStateKnown //&^ validationCompleted
+
+ conditionsUsingCounters = arcTypeKnown |
+ valueKnown |
+ fieldConjunctsKnown |
+ allTasksCompleted
+
+ // The xConjunct condition sets indicate a conjunct MAY contribute the to
+ // final result. For some conjuncts it may not be known what the
+ // contribution will be. In such a cases the set that reflects all possible
+ // contributions should be used. For instance, an embedded reference may
+ // resolve to a scalar or struct.
+ //
+ // All conjunct states include allTasksCompleted.
+
+ // a genericConjunct is one for which the contributions to the states
+ // are not known in advance. For instance, an embedded reference can be
+ // anything. In such case, all conditions are included.
+ genericConjunct = allTasksCompleted |
+ scalarKnown |
+ valueKnown |
+ fieldConjunctsKnown
+
+ // a fieldConjunct is on that only adds a new field to the struct.
+ fieldConjunct = allTasksCompleted |
+ fieldConjunctsKnown
+
+ // a scalarConjunct is one that is guaranteed to result in a scalar or
+ // list value.
+ scalarConjunct = allTasksCompleted |
+ scalarKnown |
+ valueKnown
+
+ // needsX condition sets are used to indicate which conditions need to be
+ // met.
+
+ needFieldConjunctsKnown = fieldConjunctsKnown |
+ allAncestorsProcessed
+
+ needFieldSetKnown = fieldSetKnown |
+ allAncestorsProcessed
+
+ needTasksDone = allAncestorsProcessed | allTasksCompleted
+
+ // concreteKnown means that we know whether a value is concrete or not.
+ // At the moment this is equal to 'scalarKnown'.
+ concreteKnown = scalarKnown
+)
+
+// schedConfig configures a taskContext with the states needed for the
+// CUE evaluator. It is used in OpContext.New as a template for creating
+// new taskContexts.
+var schedConfig = taskContext{
+ counterMask: conditionsUsingCounters,
+ autoUnblock: listTypeKnown | scalarKnown | arcTypeKnown,
+ complete: stateCompletions,
+}
+
+// stateCompletions indicates the completion of conditions based on the
+// completions of other conditions.
+func stateCompletions(s *scheduler) condition {
+ x := s.completed
+ v := s.node.node
+ s.node.Logf("=== stateCompletions: %v %v", v.Label, s.completed)
+ if x.meets(allAncestorsProcessed) {
+ x |= conditionsUsingCounters &^ s.provided
+ // If we have a pending arc, a sub arc may still cause the arc to
+ // become not pending. For instance, if 'a' is pending in the following
+ // if x != _!_ {
+ // a: b: 1
+ // }
+ // it may still become not pending if 'b' becomes a regular arc.
+ if s.counters[arcTypeKnown] == 0 && x.meets(subFieldsProcessed) {
+ x |= arcTypeKnown
+ }
+ }
+ switch {
+ case v.ArcType == ArcMember, v.ArcType == ArcNotPresent:
+ x |= arcTypeKnown
+ case x&arcTypeKnown != 0 && v.ArcType == ArcPending:
+ v.ArcType = ArcNotPresent
+ }
+
+ if x.meets(valueKnown) {
+ // NOTE: in this case, scalarKnown is not the same as concreteKnown,
+ // especially if this arc is Pending, as it may still become concrete.
+ // We probably want to separate this out.
+ if v.ArcType == ArcMember || v.ArcType == ArcNotPresent {
+ x |= scalarKnown
+ }
+ x |= listTypeKnown
+ }
+
+ if x.meets(needFieldConjunctsKnown | needTasksDone) {
+ switch {
+ case x.meets(subFieldsProcessed):
+ x |= fieldSetKnown
+ default:
+ for _, a := range v.Arcs {
+ if a.ArcType == ArcPending {
+ return x
+ }
+ }
+ x |= fieldSetKnown
+ }
+ }
+ return x
+}
+
+// allChildConjunctsKnown indicates that all conjuncts have been added by
+// the parents and every conjunct that may add fields to subfields have been
+// processed.
+func (v *Vertex) allChildConjunctsKnown() bool {
+ if v == nil {
+ return true
+ }
+
+ return v.state.meets(fieldConjunctsKnown | allAncestorsProcessed)
+}
+
+func (n *nodeContext) scheduleTask(r *runner, env *Environment, x Node, ci CloseInfo) *task {
+ t := &task{
+ run: r,
+ node: n,
+
+ env: env,
+ id: ci,
+ x: x,
+ }
+ n.insertTask(t)
+ return t
+}
+
+// require ensures that a given condition is met for the given Vertex by
+// evaluating it. It yields execution back to the scheduler if it cannot
+// be completed at this point.
+func (c *OpContext) require(v *Vertex, needs condition) {
+ state := v.getState(c)
+ if state == nil {
+ return
+ }
+ state.process(needs, yield)
+}
+
+// scalarValue evaluates the given expression and either returns a
+// concrete value or schedules the task for later evaluation.
+func (ctx *OpContext) scalarValue(t *task, x Expr) Value {
+ return ctx.value(x, require(0, scalarKnown))
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/tasks.go b/vendor/cuelang.org/go/internal/core/adt/tasks.go
new file mode 100644
index 00000000..5de63aaa
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/tasks.go
@@ -0,0 +1,315 @@
+// Copyright 2023 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/token"
+)
+
+var (
+ handleExpr *runner
+ handleResolver *runner
+ handleDynamic *runner
+ handlePatternConstraint *runner
+ handleComprehension *runner
+ handleListLit *runner
+ handleListVertex *runner
+ handleDisjunction *runner
+)
+
+// Use init to avoid a (spurious?) cyclic dependency in Go.
+func init() {
+ handleExpr = &runner{
+ name: "Expr",
+ f: processExpr,
+ completes: genericConjunct,
+ }
+ handleResolver = &runner{
+ name: "Resolver",
+ f: processResolver,
+ completes: genericConjunct,
+ }
+ handleDynamic = &runner{
+ name: "Dynamic",
+ f: processDynamic,
+ completes: fieldConjunct,
+ }
+ handlePatternConstraint = &runner{
+ name: "PatternConstraint",
+ f: processPatternConstraint,
+ completes: allTasksCompleted | fieldConjunctsKnown,
+ }
+ handleComprehension = &runner{
+ name: "Comprehension",
+ f: processComprehension,
+ completes: valueKnown | allTasksCompleted | fieldConjunctsKnown,
+ }
+ handleListLit = &runner{
+ name: "ListLit",
+ f: processListLit,
+ completes: fieldConjunct,
+ needs: listTypeKnown,
+ }
+ handleListVertex = &runner{
+ name: "ListVertex",
+ f: processListVertex,
+ completes: fieldConjunct,
+ needs: listTypeKnown,
+ }
+}
+
+// This file contains task runners (func(ctx *OpContext, t *task, mode runMode)).
+
+func processExpr(ctx *OpContext, t *task, mode runMode) {
+ x := t.x.(Expr)
+
+ state := combineMode(concreteKnown, mode)
+ v := ctx.evalState(x, state)
+ t.node.insertValueConjunct(t.env, v, t.id)
+}
+
+func processResolver(ctx *OpContext, t *task, mode runMode) {
+ r := t.x.(Resolver)
+
+ arc := r.resolve(ctx, oldOnly(0))
+ if arc == nil {
+ // TODO: yield instead?
+ return
+ }
+ // A reference that points to itself indicates equality. In that case
+ // we are done computing and we can return the arc as is.
+ ci, skip := t.node.markCycle(arc, t.env, r, t.id)
+ if skip {
+ return
+ }
+
+ c := MakeConjunct(t.env, t.x, ci)
+ t.node.scheduleVertexConjuncts(c, arc, ci)
+}
+
+func processDynamic(ctx *OpContext, t *task, mode runMode) {
+ n := t.node
+
+ field := t.x.(*DynamicField)
+
+ v := ctx.scalarValue(t, field.Key)
+ if v == nil {
+ return
+ }
+
+ if v.Concreteness() != Concrete {
+ n.addBottom(&Bottom{
+ Code: IncompleteError,
+ Err: ctx.NewPosf(pos(field.Key),
+ "key value of dynamic field must be concrete, found %v", v),
+ })
+ return
+ }
+
+ f := ctx.Label(field.Key, v)
+ // TODO: remove this restriction.
+ if f.IsInt() {
+ n.addErr(ctx.NewPosf(pos(field.Key), "integer fields not supported"))
+ return
+ }
+
+ c := MakeConjunct(t.env, field, t.id)
+ c.CloseInfo.cc = nil
+ n.insertArc(f, field.ArcType, c, t.id, true)
+}
+
+func processPatternConstraint(ctx *OpContext, t *task, mode runMode) {
+ n := t.node
+
+ field := t.x.(*BulkOptionalField)
+
+ // Note that the result may be a disjunction. Be sure to not take the
+ // default value as we want to retain the options of the disjunction.
+ v := ctx.evalState(field.Filter, require(0, scalarKnown))
+ if v == nil {
+ return
+ }
+
+ n.insertPattern(v, MakeConjunct(t.env, t.x, t.id))
+}
+
+func processComprehension(ctx *OpContext, t *task, mode runMode) {
+ n := t.node
+
+ y := &envYield{
+ envComprehension: t.comp,
+ leaf: t.leaf,
+ env: t.env,
+ id: t.id,
+ expr: t.x,
+ }
+
+ err := n.processComprehension(y, 0)
+ t.err = CombineErrors(nil, t.err, err)
+ t.comp.vertex.state.addBottom(err)
+}
+
+func processListLit(c *OpContext, t *task, mode runMode) {
+ n := t.node
+
+ l := t.x.(*ListLit)
+
+ n.updateCyclicStatus(t.id)
+
+ var ellipsis Node
+
+ index := int64(0)
+ hasComprehension := false
+ for j, elem := range l.Elems {
+ // TODO: Terminate early in case of runaway comprehension.
+
+ switch x := elem.(type) {
+ case *Comprehension:
+ err := c.yield(nil, t.env, x, 0, func(e *Environment) {
+ label, err := MakeLabel(x.Source(), index, IntLabel)
+ n.addErr(err)
+ index++
+ c := MakeConjunct(e, x.Value, t.id)
+ n.insertArc(label, ArcMember, c, t.id, true)
+ })
+ hasComprehension = true
+ if err != nil {
+ n.addBottom(err)
+ return
+ }
+
+ case *Ellipsis:
+ if j != len(l.Elems)-1 {
+ n.addErr(c.Newf("ellipsis must be last element in list"))
+ return
+ }
+
+ elem := x.Value
+ if elem == nil {
+ elem = &Top{}
+ }
+
+ c := MakeConjunct(t.env, elem, t.id)
+ pat := &BoundValue{
+ Op: GreaterEqualOp,
+ Value: n.ctx.NewInt64(index, x),
+ }
+ n.insertPattern(pat, c)
+ ellipsis = x
+
+ default:
+ label, err := MakeLabel(x.Source(), index, IntLabel)
+ n.addErr(err)
+ index++
+ c := MakeConjunct(t.env, x, t.id)
+ n.insertArc(label, ArcMember, c, t.id, true)
+ }
+
+ if max := n.maxListLen; n.listIsClosed && int(index) > max {
+ n.invalidListLength(max, len(l.Elems), n.maxNode, l)
+ return
+ }
+ }
+
+ isClosed := ellipsis == nil
+
+ switch max := n.maxListLen; {
+ case int(index) < max:
+ if isClosed {
+ n.invalidListLength(int(index), max, l, n.maxNode)
+ return
+ }
+
+ case int(index) > max,
+ isClosed && !n.listIsClosed,
+ (isClosed == n.listIsClosed) && !hasComprehension:
+ n.maxListLen = int(index)
+ n.maxNode = l
+ n.listIsClosed = isClosed
+ }
+
+ n.updateListType(l, t.id, isClosed, ellipsis)
+}
+
+func processListVertex(c *OpContext, t *task, mode runMode) {
+ n := t.node
+
+ l := t.x.(*Vertex)
+
+ elems := l.Elems()
+ isClosed := l.IsClosedList()
+
+ // TODO: Share with code above.
+ switch max := n.maxListLen; {
+ case len(elems) < max:
+ if isClosed {
+ n.invalidListLength(len(elems), max, l, n.maxNode)
+ return
+ }
+
+ case len(elems) > max:
+ if n.listIsClosed {
+ n.invalidListLength(max, len(elems), n.maxNode, l)
+ return
+ }
+ n.listIsClosed = isClosed
+ n.maxListLen = len(elems)
+ n.maxNode = l
+
+ case isClosed:
+ n.listIsClosed = true
+ n.maxNode = l
+ }
+
+ for _, a := range elems {
+ if a.Conjuncts == nil {
+ c := MakeRootConjunct(nil, a)
+ n.insertArc(a.Label, ArcMember, c, CloseInfo{}, true)
+ continue
+ }
+ for _, c := range a.Conjuncts {
+ c.CloseInfo.cc = t.id.cc
+ n.insertArc(a.Label, ArcMember, c, t.id, true)
+ }
+ }
+
+ n.updateListType(l, t.id, isClosed, nil)
+}
+
+func (n *nodeContext) updateListType(list Expr, id CloseInfo, isClosed bool, ellipsis Node) {
+ m, ok := n.node.BaseValue.(*ListMarker)
+ if !ok {
+ m = &ListMarker{
+ IsOpen: true,
+ }
+ n.node.setValue(n.ctx, partial, m)
+ }
+ m.IsOpen = m.IsOpen && !isClosed
+
+ if ellipsis != nil {
+ if src, _ := ellipsis.Source().(ast.Expr); src != nil {
+ if m.Src == nil {
+ m.Src = src
+ } else {
+ m.Src = ast.NewBinExpr(token.AND, m.Src, src)
+ }
+ }
+ }
+
+ if n.kind != ListKind {
+ n.updateNodeType(ListKind, list, id)
+ }
+}
diff --git a/vendor/cuelang.org/go/internal/core/adt/unify.go b/vendor/cuelang.org/go/internal/core/adt/unify.go
new file mode 100644
index 00000000..2f30e8c9
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/core/adt/unify.go
@@ -0,0 +1,547 @@
+// Copyright 2023 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "fmt"
+
+ "cuelang.org/go/cue/token"
+)
+
+func (v *Vertex) getState(c *OpContext) *nodeContext {
+ if v.status == finalized { // TODO: use BaseValue != nil
+ return nil
+ }
+ if v.state == nil {
+ v.state = c.newNodeContext(v)
+ v.state.initNode()
+ v.state.refCount = 1
+ }
+
+ // An additional refCount for the current user.
+ v.state.refCount += 1
+
+ // TODO: see if we can get rid of ref counting after new evaluator is done:
+ // the recursive nature of the new evaluator should make this unnecessary.
+
+ return v.state
+}
+
+// initNode initializes a nodeContext for the evaluation of the given Vertex.
+func (n *nodeContext) initNode() {
+ v := n.node
+ if v.Parent != nil && v.Parent.state != nil {
+ v.state.depth = v.Parent.state.depth + 1
+ n.blockOn(allAncestorsProcessed)
+ }
+
+ n.blockOn(scalarKnown | listTypeKnown | arcTypeKnown)
+
+ if v.Label.IsDef() {
+ v.Closed = true
+ }
+
+ if v.Parent != nil {
+ if v.Parent.Closed {
+ v.Closed = true
+ }
+ }
+
+ ctx := n.ctx
+
+ ctx.stats.Unifications++
+
+ // Set the cache to a cycle error to ensure a cyclic reference will result
+ // in an error if applicable. A cyclic error may be ignored for
+ // non-expression references. The cycle error may also be removed as soon
+ // as there is evidence what a correct value must be, but before all
+ // validation has taken place.
+ //
+ // TODO(cycle): having a more recursive algorithm would make this
+ // special cycle handling unnecessary.
+ v.BaseValue = cycle
+
+ defer ctx.PopArc(ctx.PushArc(v))
+
+ root := n.node.rootCloseContext()
+ root.incDependent(INIT, nil) // decremented below
+
+ for _, c := range v.Conjuncts {
+ ci := c.CloseInfo
+ ci.cc = root
+ n.scheduleConjunct(c, ci)
+ }
+
+ root.decDependent(ctx, INIT, nil)
+}
+
+func (v *Vertex) unify(c *OpContext, needs condition, mode runMode) bool {
+ if Debug {
+ c.nest++
+ c.Logf(v, "Unify %v", fmt.Sprintf("%p", v))
+ defer func() {
+ c.Logf(v, "END Unify")
+ c.nest--
+ }()
+ }
+
+ if mode == ignore {
+ return false
+ }
+
+ n := v.getState(c)
+ if n == nil {
+ return true // already completed
+ }
+ defer n.free()
+
+ // Typically a node processes all conjuncts before processing its fields.
+ // So this condition is very likely to trigger. If for some reason the
+ // parent has not been processed yet, we could attempt to process more
+ // of its tasks to increase the chances of being able to find the
+ // information we are looking for here. For now we just continue as is,
+ // though.
+ // For dynamic nodes, the parent only exists to provide a path context.
+ if v.Label.IsLet() || v.IsDynamic || v.Parent.allChildConjunctsKnown() {
+ n.signal(allAncestorsProcessed)
+ }
+
+ defer c.PopArc(c.PushArc(v))
+
+ nodeOnlyNeeds := needs &^ (subFieldsProcessed)
+ n.process(nodeOnlyNeeds, mode)
+ n.updateScalar()
+
+ // First process all but the subfields.
+ switch {
+ case n.meets(nodeOnlyNeeds):
+ // pass through next phase.
+ case mode != finalize:
+ return false
+ }
+
+ if isCyclePlaceholder(n.node.BaseValue) {
+ n.node.BaseValue = nil
+ }
+ if n.aStruct != nil {
+ n.updateNodeType(StructKind, n.aStruct, n.aStructID)
+ }
+
+ n.validateValue(finalized)
+
+ if err, ok := n.node.BaseValue.(*Bottom); ok {
+ for _, arc := range n.node.Arcs {
+ if arc.Label.IsLet() {
+ continue
+ }
+ c := MakeConjunct(nil, err, c.CloseInfo())
+ if arc.state != nil {
+ arc.state.scheduleConjunct(c, c.CloseInfo)
+ }
+ }
+ }
+
+ if n.node.Label.IsLet() || n.meets(allAncestorsProcessed) {
+ if cc := v.rootCloseContext(); !cc.isDecremented { // TODO: use v.cc
+ cc.decDependent(c, ROOT, nil) // REF(decrement:nodeDone)
+ cc.isDecremented = true
+ }
+ }
+
+ // At this point, no more conjuncts will be added, so we could decrement
+ // the notification counters.
+
+ switch {
+ case n.completed&subFieldsProcessed != 0:
+ // done
+
+ case needs&subFieldsProcessed != 0:
+ if DebugSort > 0 {
+ DebugSortArcs(n.ctx, n.node)
+ }
+
+ switch {
+ case assertStructuralCycle(n):
+ case n.completeAllArcs(needs, mode):
+ }
+
+ n.signal(subFieldsProcessed)
+
+ if v.BaseValue == nil {
+ v.BaseValue = n.getValidators(finalized)
+ }
+ if v := n.node.Value(); v != nil && IsConcrete(v) {
+ // Ensure that checks are not run again when this value is used
+ // in a validator.
+ checks := n.checks
+ n.checks = n.checks[:0]
+ for _, v := range checks {
+ // TODO(errors): make Validate return bottom and generate
+ // optimized conflict message. Also track and inject IDs
+ // to determine origin location.s
+ if b := c.Validate(v, n.node); b != nil {
+ n.addBottom(b)
+ }
+ }
+ }
+
+ case needs&fieldSetKnown != 0:
+ n.evalArcTypes()
+ }
+
+ if err := n.getErr(); err != nil {
+ n.errs = nil
+ if b, _ := n.node.BaseValue.(*Bottom); b != nil {
+ err = CombineErrors(nil, b, err)
+ }
+ n.node.BaseValue = err
+ }
+
+ if mask := n.completed & needs; mask != 0 {
+ // TODO: phase3: validation
+ n.signal(mask)
+ }
+
+ // validationCompleted
+ if n.completed&(subFieldsProcessed) != 0 {
+ n.node.updateStatus(finalized)
+
+ for _, r := range n.node.cc.externalDeps {
+ src := r.src
+ a := &src.arcs[r.index]
+ if a.decremented {
+ continue
+ }
+ a.decremented = true
+ if n := src.src.getState(n.ctx); n != nil {
+ n.completeNodeConjuncts()
+ }
+ src.src.unify(n.ctx, needTasksDone, attemptOnly)
+ a.cc.decDependent(c, a.kind, src) // REF(arcs)
+ }
+
+ if DebugDeps {
+ RecordDebugGraph(n.ctx, n.node, "Finalize")
+ }
+ }
+
+ return n.meets(needs)
+}
+
+// Once returning, all arcs plus conjuncts that can be known are known.
+//
+// Proof:
+// - if there is a cycle, all completeNodeConjuncts will be called
+// repeatedly for all nodes in this cycle, and all tasks on the cycle
+// will have run at least once.
+// - any tasks that were blocking on values on this circle to be completed
+// will thus have to be completed at some point in time if they can.
+// - any tasks that were blocking on values outside of this ring will have
+// initiated its own execution, which is either not cyclic, and thus
+// completes, or is on a different cycle, in which case it completes as
+// well.
+func (n *nodeContext) completeNodeConjuncts() {
+ const conjunctsKnown = fieldConjunctsKnown | valueKnown // | fieldSetKnown
+
+ if n.meets(conjunctsKnown) {
+ return
+ }
+
+ if p := n.node.Parent; p != nil && p.state != nil {
+ p.state.completeNodeConjuncts()
+ }
+
+ // This only attempts, but it ensures that all references are processed.
+ n.process(conjunctsKnown, attemptOnly)
+}
+
+// Goal:
+// - complete notifications
+// - decrement reference counts for root and notify.
+// NOT:
+// - complete value. That is reserved for Unify.
+func (n *nodeContext) completeNodeTasks() (ok bool) {
+ v := n.node
+ c := n.ctx
+
+ if Debug {
+ c.nest++
+ defer func() {
+ c.nest--
+ }()
+ }
+
+ if p := v.Parent; p != nil && p.state != nil {
+ if !v.IsDynamic && n.completed&allAncestorsProcessed == 0 {
+ p.state.completeNodeTasks()
+ }
+ }
+
+ if v.IsDynamic || v.Parent.allChildConjunctsKnown() {
+ n.signal(allAncestorsProcessed)
+ }
+
+ if len(n.scheduler.tasks) != n.scheduler.taskPos {
+ // TODO: do we need any more requirements here?
+ const needs = valueKnown | fieldConjunctsKnown
+
+ n.process(needs, attemptOnly)
+ n.updateScalar()
+ }
+
+ // As long as ancestors are not processed, it is still possible for
+ // conjuncts to be inserted. Until that time, it is not okay to decrement
+ // theroot. It is not necessary to wait on tasks to complete, though,
+ // as pending tasks will have their own dependencies on root, meaning it
+ // is safe to decrement here.
+ if !n.meets(allAncestorsProcessed) && !n.node.Label.IsLet() {
+ return false
+ }
+
+ // At this point, no more conjuncts will be added, so we could decrement
+ // the notification counters.
+
+ if cc := v.rootCloseContext(); !cc.isDecremented { // TODO: use v.cc
+ cc.isDecremented = true
+
+ cc.decDependent(n.ctx, ROOT, nil) // REF(decrement:nodeDone)
+ }
+
+ return true
+}
+
+func (n *nodeContext) updateScalar() {
+ // Set BaseValue to scalar, but only if it was not set before. Most notably,
+ // errors should not be discarded.
+ _, isErr := n.node.BaseValue.(*Bottom)
+ if n.scalar != nil && (!isErr || isCyclePlaceholder(n.node.BaseValue)) {
+ n.node.BaseValue = n.scalar
+ n.signal(scalarKnown)
+ }
+}
+
+func (n *nodeContext) completeAllArcs(needs condition, mode runMode) bool {
+ if n.node.status == evaluatingArcs {
+ // NOTE: this was an "incomplete" error pre v0.6. If this is a problem
+ // we could make this a CycleError. Technically, this may be correct,
+ // as it is possible to make the values exactly as the inserted
+ // values. It seems more user friendly to just disallow this, though.
+ // TODO: make uniform error messages
+ // see compbottom2.cue:
+ n.ctx.addErrf(CycleError, pos(n.node), "mutual dependency")
+ }
+
+ n.node.updateStatus(evaluatingArcs)
+
+ // XXX(0.7): only set success if needs complete arcs.
+ success := true
+ // Visit arcs recursively to validate and compute error.
+ for n.arcPos < len(n.node.Arcs) {
+ a := n.node.Arcs[n.arcPos]
+ n.arcPos++
+
+ if !a.unify(n.ctx, needs, finalize) {
+ success = false
+ }
+
+ // At this point we need to ensure that all notification cycles
+ // for Arc a have been processed.
+
+ if a.ArcType == ArcPending {
+ // TODO: cancel tasks?
+ a.ArcType = ArcNotPresent
+ continue
+ }
+
+ // Errors are allowed in let fields. Handle errors and failure to
+ // complete accordingly.
+ if !a.Label.IsLet() && a.ArcType <= ArcRequired {
+ if err, _ := a.BaseValue.(*Bottom); err != nil {
+ n.node.AddChildError(err)
+ }
+ success = true // other arcs are irrelevant
+ }
+
+ // TODO: harmonize this error with "cannot combine"
+ switch {
+ case a.ArcType > ArcRequired, !a.Label.IsString():
+ case n.kind&StructKind == 0:
+ if !n.node.IsErr() {
+ n.reportFieldMismatch(pos(a.Value()), nil, a.Label, n.node.Value())
+ }
+ // case !wasVoid:
+ // case n.kind == TopKind:
+ // // Theoretically it may be possible that a "void" arc references
+ // // this top value where it really should have been a struct. One
+ // // way to solve this is to have two passes over the arcs, where
+ // // the first pass additionally analyzes whether comprehensions
+ // // will yield values and "un-voids" an arc ahead of the rest.
+ // //
+ // // At this moment, though, I fail to see a possibility to create
+ // // faulty CUE using this mechanism, though. At most error
+ // // messages are a bit unintuitive. This may change once we have
+ // // functionality to reflect on types.
+ // if _, ok := n.node.BaseValue.(*Bottom); !ok {
+ // n.node.BaseValue = &StructMarker{}
+ // n.kind = StructKind
+ // }
+ }
+ }
+
+ k := 0
+ for _, a := range n.node.Arcs {
+ if a.ArcType != ArcNotPresent {
+ n.node.Arcs[k] = a
+ k++
+ }
+ }
+ n.node.Arcs = n.node.Arcs[:k]
+
+ return success
+}
+
+func (n *nodeContext) evalArcTypes() {
+ for _, a := range n.node.Arcs {
+ if a.ArcType != ArcPending {
+ continue
+ }
+ a.unify(n.ctx, arcTypeKnown, yield)
+ // Ensure the arc is processed up to the desired level
+ if a.ArcType == ArcPending {
+ // TODO: cancel tasks?
+ a.ArcType = ArcNotPresent
+ }
+ }
+}
+
+func (v *Vertex) lookup(c *OpContext, pos token.Pos, f Feature, flags combinedFlags) *Vertex {
+ task := c.current()
+ needs := flags.conditions()
+ runMode := flags.runMode()
+
+ c.Logf(c.vertex, "LOOKUP %v", f)
+
+ state := v.getState(c)
+ if state != nil {
+ // If the scheduler associated with this vertex was already running,
+ // it means we have encountered a cycle. In that case, we allow to
+ // proceed with partial data, in which case a "pending" arc will be
+ // created to be completed later.
+
+ // Report error for now.
+ if state.hasErr() {
+ c.AddBottom(state.getErr())
+ }
+ state.completeNodeTasks()
+ }
+
+ // TODO: remove because unnecessary?
+ if task.state != taskRUNNING {
+ return nil // abort, task is blocked or terminated in a cycle.
+ }
+
+ // TODO: verify lookup types.
+
+ arc := v.Lookup(f)
+ // TODO: clean up this logic:
+ // - signal arcTypeKnown when ArcMember or ArcNotPresent is set,
+ // similarly to scalarKnown.
+ // - make it clear we want to yield if it is now known if a field exists.
+
+ var arcState *nodeContext
+ switch {
+ case arc != nil:
+ if arc.ArcType == ArcMember {
+ return arc
+ }
+ arcState = arc.getState(c)
+
+ case state == nil || state.meets(needTasksDone):
+ // This arc cannot exist.
+ v.reportFieldIndexError(c, pos, f)
+ return nil
+
+ default:
+ arc = &Vertex{Parent: state.node, Label: f, ArcType: ArcPending}
+ v.Arcs = append(v.Arcs, arc)
+ arcState = arc.getState(c)
+ }
+
+ if arcState != nil && (!arcState.meets(needTasksDone) || !arcState.meets(arcTypeKnown)) {
+ needs |= arcTypeKnown
+ // If this arc is not ArcMember, which it is not at this point,
+ // any pending arcs could influence the field set.
+ for _, a := range arc.Arcs {
+ if a.ArcType == ArcPending {
+ needs |= fieldSetKnown
+ break
+ }
+ }
+ arcState.completeNodeTasks()
+
+ // Child nodes, if pending and derived from a comprehension, may
+ // still cause this arc to become not pending.
+ if arc.ArcType != ArcMember {
+ for _, a := range arcState.node.Arcs {
+ if a.ArcType == ArcPending {
+ a.unify(c, arcTypeKnown, runMode)
+ }
+ }
+ }
+
+ switch runMode {
+ case ignore, attemptOnly:
+ // TODO: should we avoid notifying ArcPending vertices here?
+ arcState.addNotify2(task.node.node, task.id)
+ return arc
+
+ case yield:
+ arcState.process(needs, yield)
+ // continue processing, as successful processing may still result
+ // in an invalid field.
+
+ case finalize:
+ // TODO: should we try to use finalize? Using it results in errors and this works. It would be more principled, though.
+ arcState.process(needs, yield)
+ }
+ }
+
+ switch arc.ArcType {
+ case ArcMember:
+ return arc
+
+ case ArcOptional, ArcRequired:
+ label := f.SelectorString(c.Runtime)
+ b := &Bottom{
+ Code: IncompleteError,
+ Err: c.NewPosf(pos,
+ "cannot reference optional field: %s", label),
+ }
+ c.AddBottom(b)
+ // TODO: yield failure
+ return nil
+
+ case ArcNotPresent:
+ v.reportFieldCycleError(c, pos, f)
+ return nil
+
+ case ArcPending:
+ // should not happen.
+ panic("unreachable")
+ }
+
+ v.reportFieldIndexError(c, pos, f)
+ return nil
+}
diff --git a/vendor/cuelang.org/go/internal/core/convert/go.go b/vendor/cuelang.org/go/internal/core/convert/go.go
index 4d068636..4556a04d 100644
--- a/vendor/cuelang.org/go/internal/core/convert/go.go
+++ b/vendor/cuelang.org/go/internal/core/convert/go.go
@@ -275,6 +275,7 @@ func convertRec(ctx *adt.OpContext, nilIsTop bool, x interface{}) adt.Value {
res, _ := internal.BaseContext.RoundToIntegralExact(&d, v)
if !res.Inexact() {
kind = adt.IntKind
+ v = &d
}
n := &adt.Num{Src: ctx.Source(), K: kind}
n.X = *v
diff --git a/vendor/cuelang.org/go/internal/core/debug/compact.go b/vendor/cuelang.org/go/internal/core/debug/compact.go
index 20c695e8..3ed9aed6 100644
--- a/vendor/cuelang.org/go/internal/core/debug/compact.go
+++ b/vendor/cuelang.org/go/internal/core/debug/compact.go
@@ -307,6 +307,16 @@ func (w *compactPrinter) node(n adt.Node) {
w.node(c)
}
+ case *adt.ConjunctGroup:
+ w.string("&[")
+ for i, c := range *x {
+ if i > 0 {
+ w.string(" & ")
+ }
+ w.node(c.Expr())
+ }
+ w.string("]")
+
case *adt.Disjunction:
for i, c := range x.Values {
if i > 0 {
diff --git a/vendor/cuelang.org/go/internal/core/debug/debug.go b/vendor/cuelang.org/go/internal/core/debug/debug.go
index 717c3a38..fe32fd4e 100644
--- a/vendor/cuelang.org/go/internal/core/debug/debug.go
+++ b/vendor/cuelang.org/go/internal/core/debug/debug.go
@@ -249,6 +249,16 @@ func (w *printer) node(n adt.Node) {
w.indent += "// "
w.string("// ")
for i, c := range x.Conjuncts {
+ if c.CloseInfo.FromDef || c.CloseInfo.FromEmbed {
+ w.string("[")
+ if c.CloseInfo.FromDef {
+ w.string("d")
+ }
+ if c.CloseInfo.FromEmbed {
+ w.string("e")
+ }
+ w.string("]")
+ }
if i > 0 {
w.string(" & ")
}
@@ -510,6 +520,16 @@ func (w *printer) node(n adt.Node) {
}
w.string(")")
+ case *adt.ConjunctGroup:
+ w.string("&[")
+ for i, c := range *x {
+ if i > 0 {
+ w.string(", ")
+ }
+ w.node(c.Expr())
+ }
+ w.string("]")
+
case *adt.Disjunction:
w.string("|(")
for i, c := range x.Values {
diff --git a/vendor/cuelang.org/go/internal/core/dep/dep.go b/vendor/cuelang.org/go/internal/core/dep/dep.go
index f9de8e21..b8cde15d 100644
--- a/vendor/cuelang.org/go/internal/core/dep/dep.go
+++ b/vendor/cuelang.org/go/internal/core/dep/dep.go
@@ -621,6 +621,7 @@ func (c *visitor) markComprehension(env *adt.Environment, y *adt.Comprehension)
for i := y.Nest(); i > 0; i-- {
env = &adt.Environment{Up: env, Vertex: empty}
}
+ // TODO: consider using adt.EnvExpr and remove the above loop.
c.markExpr(env, adt.ToExpr(y.Value))
}
diff --git a/vendor/cuelang.org/go/internal/core/export/adt.go b/vendor/cuelang.org/go/internal/core/export/adt.go
index 26fcfc0d..2d2a14c4 100644
--- a/vendor/cuelang.org/go/internal/core/export/adt.go
+++ b/vendor/cuelang.org/go/internal/core/export/adt.go
@@ -260,6 +260,14 @@ func (e *exporter) adt(env *adt.Environment, expr adt.Elem) ast.Expr {
}
return ast.NewBinExpr(token.OR, a...)
+ case *adt.ConjunctGroup:
+ a := []ast.Expr{}
+ for _, c := range *x {
+ v := e.expr(c.EnvExpr())
+ a = append(a, v)
+ }
+ return ast.NewBinExpr(token.AND, a...)
+
case *adt.Comprehension:
if !x.DidResolve() {
return dummyTop
@@ -285,6 +293,7 @@ func (e *exporter) adt(env *adt.Environment, expr adt.Elem) ast.Expr {
env = &adt.Environment{Up: env, Vertex: empty}
}
+ // TODO: consider using adt.EnvExpr.
return e.adt(env, adt.ToExpr(x.Value))
default:
@@ -696,6 +705,7 @@ func (e *exporter) comprehension(env *adt.Environment, comp *adt.Comprehension)
env = &adt.Environment{Up: env, Vertex: empty}
}
+ // TODO: consider using adt.EnvExpr.
v := e.expr(env, adt.ToExpr(comp.Value))
if _, ok := v.(*ast.StructLit); !ok {
v = ast.NewStruct(ast.Embed(v))
diff --git a/vendor/cuelang.org/go/internal/core/export/export.go b/vendor/cuelang.org/go/internal/core/export/export.go
index c7f44cd7..a007d7f2 100644
--- a/vendor/cuelang.org/go/internal/core/export/export.go
+++ b/vendor/cuelang.org/go/internal/core/export/export.go
@@ -571,7 +571,7 @@ func (e *exporter) resolveLet(env *adt.Environment, x *adt.LetReference) ast.Exp
return e.expr(env, x.X)
}
- return e.expr(env, ref.Conjuncts[0].Expr())
+ return e.expr(ref.Conjuncts[0].EnvExpr())
case let.Expr == nil:
label := e.uniqueLetIdent(x.Label, x.X)
diff --git a/vendor/cuelang.org/go/internal/core/export/self.go b/vendor/cuelang.org/go/internal/core/export/self.go
index 267b98f6..11a633ce 100644
--- a/vendor/cuelang.org/go/internal/core/export/self.go
+++ b/vendor/cuelang.org/go/internal/core/export/self.go
@@ -447,7 +447,7 @@ func (p *pivotter) addExternal(d *depData) {
ast.SetRelPos(let, token.NewSection)
- path := p.x.ctx.PathToString(p.x.ctx, d.node().Path())
+ path := p.x.ctx.PathToString(d.node().Path())
var msg string
if d.dstImport == nil {
msg = fmt.Sprintf("//cue:path: %s", path)
diff --git a/vendor/cuelang.org/go/internal/core/runtime/extern.go b/vendor/cuelang.org/go/internal/core/runtime/extern.go
index d286d4ee..5cf9c0b3 100644
--- a/vendor/cuelang.org/go/internal/core/runtime/extern.go
+++ b/vendor/cuelang.org/go/internal/core/runtime/extern.go
@@ -40,19 +40,22 @@ func (r *Runtime) SetInterpreter(i Interpreter) {
// Interpreter defines an entrypoint for creating per-package interpreters.
type Interpreter interface {
// NewCompiler creates a compiler for b and reports any errors.
- NewCompiler(b *build.Instance) (Compiler, errors.Error)
+ NewCompiler(b *build.Instance, r *Runtime) (Compiler, errors.Error)
// Kind returns the string to be used in the file-level @extern attribute.
Kind() string
}
-// A Compiler composes an adt.Builtin for an external function implementation.
+// A Compiler fills in an adt.Expr for fields marked with `@extern(kind)`.
type Compiler interface {
- // Compile creates a builtin for the given function name and attribute.
- // funcName is the name of the function to compile, taken from altName in
- // @extern(name=altName), or from the field name if that's not defined.
- // Other than "name", the fields in a are implementation specific.
- Compile(funcName string, a *internal.Attr) (*adt.Builtin, errors.Error)
+ // Compile creates an adt.Expr (usually a builtin) for the
+ // given external named resource (usually a function). name
+ // is the name of the resource to compile, taken from altName
+ // in `@extern(name=altName)`, or from the field name if that's
+ // not defined. Scope is the struct that contains the field.
+ // Other than "name", the fields in a are implementation
+ // specific.
+ Compile(name string, scope adt.Value, a *internal.Attr) (adt.Expr, errors.Error)
}
// injectImplementations modifies v to include implementations of functions
@@ -72,7 +75,7 @@ func (r *Runtime) injectImplementations(b *build.Instance, v *adt.Vertex) (errs
}
for _, c := range v.Conjuncts {
- d.decorateConjunct(c.Elem())
+ d.decorateConjunct(c.Elem(), v)
}
return d.errs
@@ -215,7 +218,7 @@ func (d *externDecorator) initCompiler(kind string, pos token.Pos) (ok bool, err
return false, errors.Newf(pos, "no interpreter defined for %q", kind)
}
- c, err := x.NewCompiler(d.pkg)
+ c, err := x.NewCompiler(d.pkg, d.runtime)
if err != nil {
return false, err
}
@@ -289,14 +292,16 @@ func (d *externDecorator) markExternFieldAttr(kind string, decls []ast.Decl) (er
return errs
}
-func (d *externDecorator) decorateConjunct(e adt.Elem) {
- w := walk.Visitor{Before: d.processADTNode}
+func (d *externDecorator) decorateConjunct(e adt.Elem, scope *adt.Vertex) {
+ w := walk.Visitor{Before: func(n adt.Node) bool {
+ return d.processADTNode(n, scope)
+ }}
w.Elem(e)
}
// processADTNode injects a builtin conjunct into n if n is an adt.Field and
// has a marked ast.Field associated with it.
-func (d *externDecorator) processADTNode(n adt.Node) bool {
+func (d *externDecorator) processADTNode(n adt.Node, scope *adt.Vertex) bool {
f, ok := n.(*adt.Field)
if !ok {
return true
@@ -324,7 +329,7 @@ func (d *externDecorator) processADTNode(n adt.Node) bool {
name = str
}
- b, err := c.Compile(name, &attr)
+ b, err := c.Compile(name, scope, &attr)
if err != nil {
err = errors.Newf(info.attr.Pos(), "can't load from external module: %v", err)
d.errs = errors.Append(d.errs, err)
diff --git a/vendor/cuelang.org/go/internal/core/runtime/runtime.go b/vendor/cuelang.org/go/internal/core/runtime/runtime.go
index 0ab92e85..1eac1d7c 100644
--- a/vendor/cuelang.org/go/internal/core/runtime/runtime.go
+++ b/vendor/cuelang.org/go/internal/core/runtime/runtime.go
@@ -16,6 +16,7 @@ package runtime
import (
"cuelang.org/go/cue/build"
+ "cuelang.org/go/internal"
)
// A Runtime maintains data structures for indexing and reuse for evaluation.
@@ -27,6 +28,12 @@ type Runtime struct {
// interpreters implement extern functionality. The map key corresponds to
// the kind in a file-level @extern(kind) attribute.
interpreters map[string]Interpreter
+
+ version internal.EvaluatorVersion
+}
+
+func (r *Runtime) EvaluatorVersion() internal.EvaluatorVersion {
+ return r.version
}
func (r *Runtime) SetBuildData(b *build.Instance, x interface{}) {
@@ -38,14 +45,21 @@ func (r *Runtime) BuildData(b *build.Instance) (x interface{}, ok bool) {
return x, ok
}
-// New creates a new Runtime. The builtins registered with RegisterBuiltin
-// are available for
+// New is a wrapper for NewVersioned(internal.DefaultVersion).
func New() *Runtime {
r := &Runtime{}
r.Init()
return r
}
+// NewVersioned creates a new Runtime using the given runtime version.
+// The builtins registered with RegisterBuiltin are available for evaluation.
+func NewVersioned(v internal.EvaluatorVersion) *Runtime {
+ r := &Runtime{version: v}
+ r.Init()
+ return r
+}
+
// IsInitialized reports whether the runtime has been initialized.
func (r *Runtime) IsInitialized() bool {
return r.index != nil
diff --git a/vendor/cuelang.org/go/internal/core/walk/walk.go b/vendor/cuelang.org/go/internal/core/walk/walk.go
index 20002fdb..88b7ef2d 100644
--- a/vendor/cuelang.org/go/internal/core/walk/walk.go
+++ b/vendor/cuelang.org/go/internal/core/walk/walk.go
@@ -62,6 +62,11 @@ func (w *Visitor) node(n adt.Node) {
// TODO: special-case Vertex?
case adt.Value:
+ case *adt.ConjunctGroup:
+ for _, x := range *x {
+ w.Elem(x.Elem())
+ }
+
case *adt.ListLit:
for _, x := range x.Elems {
w.node(x)
diff --git a/vendor/cuelang.org/go/internal/cueconfig/config.go b/vendor/cuelang.org/go/internal/cueconfig/config.go
new file mode 100644
index 00000000..3cd350c1
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/cueconfig/config.go
@@ -0,0 +1,160 @@
+// Package cueconfig holds internal API relating to CUE configuration.
+package cueconfig
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ "cuelang.org/go/internal/mod/modresolve"
+ "golang.org/x/oauth2"
+)
+
+// Logins holds the login information as stored in $CUE_CONFIG_DIR/logins.cue.
+type Logins struct {
+ // TODO: perhaps add a version string to simplify making changes in the future
+
+ // TODO: Sooner or later we will likely need more than one token per registry,
+ // such as when our central registry starts using scopes.
+
+ Registries map[string]RegistryLogin `json:"registries"`
+}
+
+// RegistryLogin holds the login information for one registry.
+type RegistryLogin struct {
+ // These fields mirror [oauth2.Token].
+ // We don't directly reference the type so we can be in control of our file format.
+ // Note that Expiry is a pointer, so omitempty can work as intended.
+
+ AccessToken string `json:"access_token"`
+
+ TokenType string `json:"token_type,omitempty"`
+
+ RefreshToken string `json:"refresh_token,omitempty"`
+
+ Expiry *time.Time `json:"expiry,omitempty"`
+}
+
+func LoginConfigPath(getenv func(string) string) (string, error) {
+ configDir, err := ConfigDir(getenv)
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(configDir, "logins.json"), nil
+}
+
+func ConfigDir(getenv func(string) string) (string, error) {
+ if dir := getenv("CUE_CONFIG_DIR"); dir != "" {
+ return dir, nil
+ }
+ dir, err := os.UserConfigDir()
+ if err != nil {
+ return "", fmt.Errorf("cannot determine system config directory: %v", err)
+ }
+ return filepath.Join(dir, "cue"), nil
+}
+
+func CacheDir(getenv func(string) string) (string, error) {
+ if dir := getenv("CUE_CACHE_DIR"); dir != "" {
+ return dir, nil
+ }
+ dir, err := os.UserCacheDir()
+ if err != nil {
+ return "", fmt.Errorf("cannot determine system cache directory: %v", err)
+ }
+ return filepath.Join(dir, "cue"), nil
+}
+
+func ReadLogins(path string) (*Logins, error) {
+ body, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ logins := &Logins{
+ // Initialize the map so we can insert entries.
+ Registries: map[string]RegistryLogin{},
+ }
+ if err := json.Unmarshal(body, logins); err != nil {
+ return nil, err
+ }
+ return logins, nil
+}
+
+func WriteLogins(path string, logins *Logins) error {
+ // Indenting and a trailing newline are not necessary, but nicer to humans.
+ body, err := json.MarshalIndent(logins, "", "\t")
+ if err != nil {
+ return err
+ }
+ body = append(body, '\n')
+
+ if err := os.MkdirAll(filepath.Dir(path), 0o777); err != nil {
+ return err
+ }
+ // Discourage other users from reading this file.
+ if err := os.WriteFile(path, body, 0o600); err != nil {
+ return err
+ }
+ return nil
+}
+
+// RegistryOAuthConfig returns the oauth2 configuration
+// suitable for talking to the central registry.
+func RegistryOAuthConfig(host modresolve.Host) oauth2.Config {
+ // For now, we use the OAuth endpoints as implemented by registry.cue.works,
+ // but other OCI registries may support the OAuth device flow with different ones.
+ //
+ // TODO: Query /.well-known/oauth-authorization-server to obtain
+ // token_endpoint and device_authorization_endpoint per the Oauth RFCs:
+ // * https://datatracker.ietf.org/doc/html/rfc8414#section-3
+ // * https://datatracker.ietf.org/doc/html/rfc8628#section-4
+ scheme := "https://"
+ if host.Insecure {
+ scheme = "http://"
+ }
+ return oauth2.Config{
+ Endpoint: oauth2.Endpoint{
+ DeviceAuthURL: scheme + host.Name + "/login/device/code",
+ TokenURL: scheme + host.Name + "/login/oauth/token",
+ },
+ }
+}
+
+// TODO: Encrypt the JSON file if the system has a secret store available,
+// such as libsecret on Linux. Such secret stores tend to have low size limits,
+// so rather than store the entire JSON blob there, store an encryption key.
+// There are a number of Go packages which integrate with multiple OS keychains.
+//
+// The encrypted form of logins.json can be logins.json.enc, for example.
+// If a user has an existing logins.json file and encryption is available,
+// we should replace the file with logins.json.enc transparently.
+
+// TODO: When running "cue login", try to prevent overwriting concurrent changes
+// when writing to the file on disk. For example, grab a lock, or check if the size
+// changed between reading and writing the file.
+
+func TokenFromLogin(login RegistryLogin) *oauth2.Token {
+ tok := &oauth2.Token{
+ AccessToken: login.AccessToken,
+ TokenType: login.TokenType,
+ RefreshToken: login.RefreshToken,
+ }
+ if login.Expiry != nil {
+ tok.Expiry = *login.Expiry
+ }
+ return tok
+}
+
+func LoginFromToken(tok *oauth2.Token) RegistryLogin {
+ login := RegistryLogin{
+ AccessToken: tok.AccessToken,
+ TokenType: tok.TokenType,
+ RefreshToken: tok.RefreshToken,
+ }
+ if !tok.Expiry.IsZero() {
+ login.Expiry = &tok.Expiry
+ }
+ return login
+}
diff --git a/vendor/cuelang.org/go/internal/cueexperiment/exp.go b/vendor/cuelang.org/go/internal/cueexperiment/exp.go
new file mode 100644
index 00000000..302b6ac2
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/cueexperiment/exp.go
@@ -0,0 +1,26 @@
+package cueexperiment
+
+import (
+ "sync"
+
+ "cuelang.org/go/internal/envflag"
+)
+
+// Flags holds the set of CUE_EXPERIMENT flags. It is initialized
+// by Init.
+var Flags struct {
+ Modules bool
+}
+
+// Init initializes Flags. Note: this isn't named "init" because we
+// don't always want it to be called (for example we don't want it to be
+// called when running "cue help"), and also because we want the failure
+// mode to be one of error not panic, which would be the only option if
+// it was a top level init function.
+func Init() error {
+ return initOnce()
+}
+
+var initOnce = sync.OnceValue(func() error {
+ return envflag.Init(&Flags, "CUE_EXPERIMENT")
+})
diff --git a/vendor/cuelang.org/go/internal/cueimports/read.go b/vendor/cuelang.org/go/internal/cueimports/read.go
new file mode 100644
index 00000000..af0ed945
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/cueimports/read.go
@@ -0,0 +1,225 @@
+// Copyright 2023 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cueimports provides support for reading the import
+// section of a CUE file without needing to read the rest of it.
+package cueimports
+
+import (
+ "bufio"
+ "io"
+ "unicode/utf8"
+
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+)
+
+type importReader struct {
+ b *bufio.Reader
+ buf []byte
+ peek byte
+ err errors.Error
+ eof bool
+ nerr int
+}
+
+func isIdent(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf
+}
+
+var (
+ errSyntax = errors.Newf(token.NoPos, "syntax error") // TODO: remove
+ errNUL = errors.Newf(token.NoPos, "unexpected NUL in input")
+)
+
+// syntaxError records a syntax error, but only if an I/O error has not already been recorded.
+func (r *importReader) syntaxError() {
+ if r.err == nil {
+ r.err = errSyntax
+ }
+}
+
+// readByte reads the next byte from the input, saves it in buf, and returns it.
+// If an error occurs, readByte records the error in r.err and returns 0.
+func (r *importReader) readByte() byte {
+ c, err := r.b.ReadByte()
+ if err == nil {
+ r.buf = append(r.buf, c)
+ if c == 0 {
+ err = errNUL
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ r.eof = true
+ } else if r.err == nil {
+ r.err = errors.Wrapf(err, token.NoPos, "readByte")
+ }
+ c = 0
+ }
+ return c
+}
+
+// peekByte returns the next byte from the input reader but does not advance beyond it.
+// If skipSpace is set, peekByte skips leading spaces and comments.
+func (r *importReader) peekByte(skipSpace bool) byte {
+ if r.err != nil {
+ if r.nerr++; r.nerr > 10000 {
+ panic("import reader looping")
+ }
+ return 0
+ }
+
+ // Use r.peek as first input byte.
+ // Don't just return r.peek here: it might have been left by peekByte(false)
+ // and this might be peekByte(true).
+ c := r.peek
+ if c == 0 {
+ c = r.readByte()
+ }
+ for r.err == nil && !r.eof {
+ if skipSpace {
+ // For the purposes of this reader, commas are never necessary to
+ // understand the input and are treated as spaces.
+ switch c {
+ case ' ', '\f', '\t', '\r', '\n', ',':
+ c = r.readByte()
+ continue
+
+ case '/':
+ c = r.readByte()
+ if c == '/' {
+ for c != '\n' && r.err == nil && !r.eof {
+ c = r.readByte()
+ }
+ } else {
+ r.syntaxError()
+ }
+ c = r.readByte()
+ continue
+ }
+ }
+ break
+ }
+ r.peek = c
+ return r.peek
+}
+
+// nextByte is like peekByte but advances beyond the returned byte.
+func (r *importReader) nextByte(skipSpace bool) byte {
+ c := r.peekByte(skipSpace)
+ r.peek = 0
+ return c
+}
+
+// readKeyword reads the given keyword from the input.
+// If the keyword is not present, readKeyword records a syntax error.
+func (r *importReader) readKeyword(kw string) {
+ r.peekByte(true)
+ for i := 0; i < len(kw); i++ {
+ if r.nextByte(false) != kw[i] {
+ r.syntaxError()
+ return
+ }
+ }
+ if isIdent(r.peekByte(false)) {
+ r.syntaxError()
+ }
+}
+
+// readIdent reads an identifier from the input.
+// If an identifier is not present, readIdent records a syntax error.
+func (r *importReader) readIdent() {
+ c := r.peekByte(true)
+ if !isIdent(c) {
+ r.syntaxError()
+ return
+ }
+ for isIdent(r.peekByte(false)) {
+ r.peek = 0
+ }
+}
+
+// readString reads a quoted string literal from the input.
+// If an identifier is not present, readString records a syntax error.
+func (r *importReader) readString() {
+ switch r.nextByte(true) {
+ case '"':
+ // Note: although the syntax in the specification only allows
+ // a simple string literal here, the cue/parser package also
+ // allows #"..."# and """ literals, so there's some impedance-mismatch here.
+ for r.err == nil {
+ c := r.nextByte(false)
+ if c == '"' {
+ break
+ }
+ if r.eof || c == '\n' {
+ r.syntaxError()
+ }
+ if c == '\\' {
+ r.nextByte(false)
+ }
+ }
+ default:
+ r.syntaxError()
+ }
+}
+
+// readImport reads an import clause - optional identifier followed by quoted string -
+// from the input.
+func (r *importReader) readImport() {
+ c := r.peekByte(true)
+ if isIdent(c) {
+ r.readIdent()
+ }
+ r.readString()
+}
+
+// Read is like io.ReadAll, except that it expects a CUE file as
+// input and stops reading the input once the imports have completed.
+func Read(f io.Reader) ([]byte, errors.Error) {
+ r := &importReader{b: bufio.NewReader(f)}
+
+ r.readKeyword("package")
+ r.readIdent()
+ for r.peekByte(true) == 'i' {
+ r.readKeyword("import")
+ if r.peekByte(true) == '(' {
+ r.nextByte(false)
+ for r.peekByte(true) != ')' && r.err == nil {
+ r.readImport()
+ }
+ r.nextByte(false)
+ } else {
+ r.readImport()
+ }
+ }
+
+ // If we stopped successfully before EOF, we read a byte that told us we were done.
+ // Return all but that last byte, which would cause a syntax error if we let it through.
+ if r.err == nil && !r.eof {
+ return r.buf[:len(r.buf)-1], nil
+ }
+
+ // If we stopped for a syntax error, consume the whole file so that
+ // we are sure we don't change the errors that the cue/parser returns.
+ if r.err == errSyntax {
+ r.err = nil
+ for r.err == nil && !r.eof {
+ r.readByte()
+ }
+ }
+
+ return r.buf, r.err
+}
diff --git a/vendor/cuelang.org/go/internal/cueversion/transport.go b/vendor/cuelang.org/go/internal/cueversion/transport.go
new file mode 100644
index 00000000..d11c13c8
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/cueversion/transport.go
@@ -0,0 +1,34 @@
+package cueversion
+
+import (
+ "maps"
+ "net/http"
+)
+
+// NewTransport returns an [http.RoundTripper] implementation
+// that wraps next and adds a "User-Agent" header to every
+// HTTP request containing the result of UserAgent(clientType).
+// If next is nil, [http.DefaultTransport] will be used.
+func NewTransport(clientType string, next http.RoundTripper) http.RoundTripper {
+ if next == nil {
+ next = http.DefaultTransport
+ }
+ return &userAgentTransport{
+ next: next,
+ userAgent: UserAgent(clientType),
+ }
+}
+
+type userAgentTransport struct {
+ next http.RoundTripper
+ userAgent string
+}
+
+func (t *userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ // RoundTrip isn't allowed to modify the request, but we
+ // can avoid doing a full clone.
+ req1 := *req
+ req1.Header = maps.Clone(req.Header)
+ req1.Header.Set("User-Agent", t.userAgent)
+ return t.next.RoundTrip(&req1)
+}
diff --git a/vendor/cuelang.org/go/internal/cueversion/version.go b/vendor/cuelang.org/go/internal/cueversion/version.go
new file mode 100644
index 00000000..0501d1ae
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/cueversion/version.go
@@ -0,0 +1,59 @@
+// Package cueversion provides access to the version of the
+// cuelang.org/go module.
+package cueversion
+
+import (
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "sync"
+)
+
+// fallbackVersion is used when there isn't a recorded main module version,
+// for example when building via `go install ./cmd/cue`.
+// It should reflect the last release in the current branch.
+//
+// TODO: remove once Go stamps local builds with a main module version
+// derived from the local VCS information per https://go.dev/issue/50603.
+const fallbackVersion = "v0.8.1"
+
+// Version returns the version of the cuelang.org/go module as best as can
+// reasonably be determined. The result is always a valid Go semver version.
+func Version() string {
+ return versionOnce()
+}
+
+var versionOnce = sync.OnceValue(func() string {
+ bi, ok := debug.ReadBuildInfo()
+ if !ok {
+ return fallbackVersion
+ }
+ switch bi.Main.Version {
+ case "": // missing version
+ case "(devel)": // local build
+ case "v0.0.0-00010101000000-000000000000": // build via a directory replace directive
+ default:
+ return bi.Main.Version
+ }
+ return fallbackVersion
+})
+
+// UserAgent returns a string suitable for adding as the User-Agent
+// header in an HTTP agent. The clientType argument specifies
+// how CUE is being used: if this is empty it defaults to "cuelang.org/go".
+//
+// Example:
+//
+// Cue/v0.8.0 (cuelang.org/go) Go/go1.22.0 (linux/amd64)
+func UserAgent(clientType string) string {
+ if clientType == "" {
+ clientType = "cuelang.org/go"
+ }
+ // The Go version can contain spaces, but we don't want spaces inside
+ // Component/Version pair, so replace them with underscores.
+ // As the runtime version won't contain underscores itself, this
+ // is reversible.
+ goVersion := strings.ReplaceAll(runtime.Version(), " ", "_")
+ return fmt.Sprintf("Cue/%s (%s) Go/%s (%s/%s)", Version(), clientType, goVersion, runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/cuelang.org/go/internal/encoding/encoder.go b/vendor/cuelang.org/go/internal/encoding/encoder.go
index 9d151e34..77500dcb 100644
--- a/vendor/cuelang.org/go/internal/encoding/encoder.go
+++ b/vendor/cuelang.org/go/internal/encoding/encoder.go
@@ -149,7 +149,15 @@ func NewEncoder(f *build.File, cfg *Config) (*Encoder, error) {
// Casting an ast.Expr to an ast.File ensures that it always ends
// with a newline.
- b, err := format.Node(internal.ToFile(n), opts...)
+ f := internal.ToFile(n)
+ if e.cfg.PkgName != "" && f.PackageName() == "" {
+ f.Decls = append([]ast.Decl{
+ &ast.Package{
+ Name: ast.NewIdent(e.cfg.PkgName),
+ },
+ }, f.Decls...)
+ }
+ b, err := format.Node(f, opts...)
if err != nil {
return err
}
diff --git a/vendor/cuelang.org/go/internal/encoding/yaml/encode.go b/vendor/cuelang.org/go/internal/encoding/yaml/encode.go
index 55871dab..87aec6e4 100644
--- a/vendor/cuelang.org/go/internal/encoding/yaml/encode.go
+++ b/vendor/cuelang.org/go/internal/encoding/yaml/encode.go
@@ -164,7 +164,7 @@ func shouldQuote(str string) bool {
// This regular expression conservatively matches any date, time string,
// or base60 float.
-var useQuote = regexp.MustCompile(`^[\-+0-9:\. \t]+([-:]|[tT])[\-+0-9:\. \t]+[zZ]?$`)
+var useQuote = regexp.MustCompile(`^[\-+0-9:\. \t]+([-:]|[tT])[\-+0-9:\. \t]+[zZ]?$|^0x[a-fA-F0-9]+$`)
// legacyStrings contains a map of fixed strings with special meaning for any
// type in the YAML Tag registry (https://yaml.org/type/index.html) as used
diff --git a/vendor/cuelang.org/go/internal/envflag/flag.go b/vendor/cuelang.org/go/internal/envflag/flag.go
new file mode 100644
index 00000000..68bb798c
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/envflag/flag.go
@@ -0,0 +1,33 @@
+package envflag
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "strings"
+)
+
+// Init initializes the fields in flags from the contents of the given
+// environment variable, which contains a comma-separated
+// list of names representing the boolean fields in the struct type T.
+// Names are treated case insensitively.
+func Init[T any](flags *T, envVar string) error {
+ env := os.Getenv(envVar)
+ if env == "" {
+ return nil
+ }
+ names := make(map[string]int)
+ fv := reflect.ValueOf(flags).Elem()
+ ft := fv.Type()
+ for i := 0; i < ft.NumField(); i++ {
+ names[strings.ToLower(ft.Field(i).Name)] = i
+ }
+ for _, name := range strings.Split(env, ",") {
+ index, ok := names[name]
+ if !ok {
+ return fmt.Errorf("unknown %s %s", envVar, name)
+ }
+ fv.Field(index).SetBool(true)
+ }
+ return nil
+}
diff --git a/vendor/cuelang.org/go/internal/filetypes/filetypes.go b/vendor/cuelang.org/go/internal/filetypes/filetypes.go
index 27e90b59..7402ddb3 100644
--- a/vendor/cuelang.org/go/internal/filetypes/filetypes.go
+++ b/vendor/cuelang.org/go/internal/filetypes/filetypes.go
@@ -177,6 +177,17 @@ func ParseArgs(args []string) (files []*build.File, err error) {
hasFiles = true
continue
}
+
+ // The CUE command works just fine without this (how?),
+ // but the API tests require this for some reason.
+ //
+ // This is almost certainly wrong, and in the wrong place.
+ //
+ // TODO(aram): why do we need this here?
+ if len(a) == 1 && strings.HasSuffix(a[0], ".wasm") {
+ continue
+ }
+
inst, v, err = parseType("", Input)
if err != nil {
return nil, err
diff --git a/vendor/cuelang.org/go/internal/internal.go b/vendor/cuelang.org/go/internal/internal.go
index 9c85d0f7..c8c31f3d 100644
--- a/vendor/cuelang.org/go/internal/internal.go
+++ b/vendor/cuelang.org/go/internal/internal.go
@@ -22,7 +22,6 @@ package internal // import "cuelang.org/go/internal"
import (
"bufio"
"fmt"
- "os"
"path/filepath"
"strings"
@@ -111,6 +110,16 @@ func Version(minor, patch int) int {
return -1000 + 100*minor + patch
}
+type EvaluatorVersion int
+
+const (
+ DefaultVersion EvaluatorVersion = iota
+
+ // The DevVersion is used for new implementations of the evaluator that
+ // do not cover all features of the CUE language yet.
+ DevVersion
+)
+
// ListEllipsis reports the list type and remaining elements of a list. If we
// ever relax the usage of ellipsis, this function will likely change. Using
// this function will ensure keeping correct behavior or causing a compiler
@@ -435,17 +444,6 @@ func IsEllipsis(x ast.Decl) bool {
// GenPath reports the directory in which to store generated files.
func GenPath(root string) string {
- info, err := os.Stat(filepath.Join(root, "cue.mod"))
- if os.IsNotExist(err) || !info.IsDir() {
- // Try legacy pkgDir mode
- pkgDir := filepath.Join(root, "pkg")
- if err == nil && !info.IsDir() {
- return pkgDir
- }
- if info, err := os.Stat(pkgDir); err == nil && info.IsDir() {
- return pkgDir
- }
- }
return filepath.Join(root, "cue.mod", "gen")
}
diff --git a/vendor/cuelang.org/go/internal/mod/modfile/modfile.go b/vendor/cuelang.org/go/internal/mod/modfile/modfile.go
deleted file mode 100644
index 353b92dc..00000000
--- a/vendor/cuelang.org/go/internal/mod/modfile/modfile.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2023 CUE Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package modfile
-
-import (
- _ "embed"
- "fmt"
- "sync"
-
- "cuelang.org/go/internal/mod/semver"
-
- "cuelang.org/go/cue"
- "cuelang.org/go/cue/cuecontext"
- "cuelang.org/go/cue/errors"
- "cuelang.org/go/cue/parser"
- "cuelang.org/go/cue/token"
- "cuelang.org/go/internal/mod/module"
-)
-
-//go:embed schema.cue
-var moduleSchemaData []byte
-
-type File struct {
- Module string `json:"module"`
- Language Language `json:"language"`
- Deps map[string]*Dep `json:"deps,omitempty"`
- versions []module.Version
-}
-
-type Language struct {
- Version string `json:"version"`
-}
-
-type Dep struct {
- Version string `json:"v"`
- Default bool `json:"default,omitempty"`
-}
-
-type noDepsFile struct {
- Module string `json:"module"`
-}
-
-var (
- moduleSchemaOnce sync.Once
- _moduleSchema cue.Value
-)
-
-func moduleSchema() cue.Value {
- moduleSchemaOnce.Do(func() {
- ctx := cuecontext.New()
- schemav := ctx.CompileBytes(moduleSchemaData, cue.Filename("cuelang.org/go/internal/mod/modfile/schema.cue"))
- schemav = lookup(schemav, cue.Def("#File"))
- //schemav = schemav.Unify(lookup(schemav, cue.Hid("#Strict", "_")))
- if err := schemav.Validate(); err != nil {
- panic(fmt.Errorf("internal error: invalid CUE module.cue schema: %v", errors.Details(err, nil)))
- }
- _moduleSchema = schemav
- })
- return _moduleSchema
-}
-
-func lookup(v cue.Value, sels ...cue.Selector) cue.Value {
- return v.LookupPath(cue.MakePath(sels...))
-}
-
-// Parse verifies that the module file has correct syntax.
-// The file name is used for error messages.
-// All dependencies must be specified correctly: with major
-// versions in the module paths and canonical dependency
-// versions.
-func Parse(modfile []byte, filename string) (*File, error) {
- return parse(modfile, filename, true)
-}
-
-// ParseLegacy parses the legacy version of the module file
-// that only supports the single field "module" and ignores all other
-// fields.
-func ParseLegacy(modfile []byte, filename string) (*File, error) {
- v := moduleSchema().Context().CompileBytes(modfile, cue.Filename(filename))
- if err := v.Err(); err != nil {
- return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file")
- }
- var f noDepsFile
- if err := v.Decode(&f); err != nil {
- return nil, newCUEError(err, filename)
- }
- return &File{
- Module: f.Module,
- }, nil
-}
-
-// ParseNonStrict is like Parse but allows some laxity in the parsing:
-// - if a module path lacks a version, it's taken from the version.
-// - if a non-canonical version is used, it will be canonicalized.
-//
-// The file name is used for error messages.
-func ParseNonStrict(modfile []byte, filename string) (*File, error) {
- return parse(modfile, filename, false)
-}
-
-func parse(modfile []byte, filename string, strict bool) (*File, error) {
- file, err := parser.ParseFile(filename, modfile)
- if err != nil {
- return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file syntax")
- }
- // TODO disallow non-data-mode CUE.
-
- v := moduleSchema().Context().BuildFile(file)
- if err := v.Validate(cue.Concrete(true)); err != nil {
- return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file value")
- }
- v = v.Unify(moduleSchema())
- if err := v.Validate(); err != nil {
- return nil, newCUEError(err, filename)
- }
- var mf File
- if err := v.Decode(&mf); err != nil {
- return nil, errors.Wrapf(err, token.NoPos, "internal error: cannot decode into modFile struct")
- }
- if strict {
- _, v, ok := module.SplitPathVersion(mf.Module)
- if !ok {
- return nil, fmt.Errorf("module path %q in %s does not contain major version", mf.Module, filename)
- }
- if semver.Major(v) != v {
- return nil, fmt.Errorf("module path %s in %q should contain the major version only", mf.Module, filename)
- }
- }
- if v := mf.Language.Version; v != "" && !semver.IsValid(v) {
- return nil, fmt.Errorf("language version %q in %s is not well formed", v, filename)
- }
- var versions []module.Version
- // Check that major versions match dependency versions.
- for m, dep := range mf.Deps {
- v, err := module.NewVersion(m, dep.Version)
- if err != nil {
- return nil, fmt.Errorf("invalid module.cue file %s: cannot make version from module %q, version %q: %v", filename, m, dep.Version, err)
- }
- versions = append(versions, v)
- if strict && v.Path() != m {
- return nil, fmt.Errorf("invalid module.cue file %s: no major version in %q", filename, m)
- }
- }
-
- mf.versions = versions[:len(versions):len(versions)]
- module.Sort(mf.versions)
- return &mf, nil
-}
-
-func newCUEError(err error, filename string) error {
- // TODO we have some potential to improve error messages here.
- return err
-}
-
-// DepVersions returns the versions of all the modules depended on by the
-// file. The caller should not modify the returned slice.
-func (f *File) DepVersions() []module.Version {
- return f.versions
-}
diff --git a/vendor/cuelang.org/go/internal/mod/modimports/modimports.go b/vendor/cuelang.org/go/internal/mod/modimports/modimports.go
new file mode 100644
index 00000000..9d0c23cc
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/mod/modimports/modimports.go
@@ -0,0 +1,159 @@
+package modimports
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/parser"
+ "cuelang.org/go/internal/cueimports"
+ "cuelang.org/go/mod/module"
+)
+
+type ModuleFile struct {
+ // FilePath holds the path of the module file
+ // relative to the root of the fs. This will be
+ // valid even if there's an associated error.
+ //
+ // If there's an error, it might not a be CUE file.
+ FilePath string
+
+ // Syntax includes only the portion of the file up to and including
+ // the imports. It will be nil if there was an error reading the file.
+ Syntax *ast.File
+}
+
+// AllImports returns a sorted list of all the package paths
+// imported by the module files produced by modFilesIter
+// in canonical form.
+func AllImports(modFilesIter func(func(ModuleFile, error) bool)) (_ []string, retErr error) {
+ pkgPaths := make(map[string]bool)
+ modFilesIter(func(mf ModuleFile, err error) bool {
+ if err != nil {
+ retErr = fmt.Errorf("cannot read %q: %v", mf.FilePath, err)
+ return false
+ }
+ // TODO look at build tags and omit files with "ignore" tags.
+ for _, imp := range mf.Syntax.Imports {
+ pkgPath, err := strconv.Unquote(imp.Path.Value)
+ if err != nil {
+ // TODO location formatting
+ retErr = fmt.Errorf("invalid import path %q in %s", imp.Path.Value, mf.FilePath)
+ return false
+ }
+ // Canonicalize the path.
+ pkgPath = module.ParseImportPath(pkgPath).Canonical().String()
+ pkgPaths[pkgPath] = true
+ }
+ return true
+ })
+ if retErr != nil {
+ return nil, retErr
+ }
+ // TODO use maps.Keys when we can.
+ pkgPathSlice := make([]string, 0, len(pkgPaths))
+ for p := range pkgPaths {
+ pkgPathSlice = append(pkgPathSlice, p)
+ }
+ sort.Strings(pkgPathSlice)
+ return pkgPathSlice, nil
+}
+
+// PackageFiles returns an iterator that produces all the CUE files
+// inside the package with the given name at the given location.
+// If pkgQualifier is "*", files from all packages in the directory will be produced.
+func PackageFiles(fsys fs.FS, dir string, pkgQualifier string) func(func(ModuleFile, error) bool) {
+ return func(yield func(ModuleFile, error) bool) {
+ entries, err := fs.ReadDir(fsys, dir)
+ if err != nil {
+ yield(ModuleFile{
+ FilePath: dir,
+ }, err)
+ return
+ }
+ for _, e := range entries {
+ if !yieldPackageFile(fsys, path.Join(dir, e.Name()), pkgQualifier, yield) {
+ return
+ }
+ }
+ }
+}
+
+// AllModuleFiles returns an iterator that produces all the CUE files inside the
+// module at the given root.
+func AllModuleFiles(fsys fs.FS, root string) func(func(ModuleFile, error) bool) {
+ return func(yield func(ModuleFile, error) bool) {
+ fs.WalkDir(fsys, root, func(fpath string, d fs.DirEntry, err error) (_err error) {
+ if err != nil {
+ if !yield(ModuleFile{
+ FilePath: fpath,
+ }, err) {
+ return fs.SkipAll
+ }
+ return nil
+ }
+ if path.Base(fpath) == "cue.mod" {
+ return fs.SkipDir
+ }
+ if d.IsDir() {
+ if fpath == root {
+ return nil
+ }
+ base := path.Base(fpath)
+ if strings.HasPrefix(base, ".") || strings.HasPrefix(base, "_") {
+ return fs.SkipDir
+ }
+ _, err := fs.Stat(fsys, path.Join(fpath, "cue.mod"))
+ if err == nil {
+ // TODO is it enough to have a cue.mod directory
+ // or should we look for cue.mod/module.cue too?
+ return fs.SkipDir
+ }
+ if !errors.Is(err, fs.ErrNotExist) {
+ // We haven't got a package file to produce with the
+ // error here. Should we just ignore the error or produce
+ // a ModuleFile with an empty path?
+ yield(ModuleFile{}, err)
+ return fs.SkipAll
+ }
+ return nil
+ }
+ if !yieldPackageFile(fsys, fpath, "*", yield) {
+ return fs.SkipAll
+ }
+ return nil
+ })
+ }
+}
+
+func yieldPackageFile(fsys fs.FS, fpath, pkgQualifier string, yield func(ModuleFile, error) bool) bool {
+ if !strings.HasSuffix(fpath, ".cue") {
+ return true
+ }
+ pf := ModuleFile{
+ FilePath: fpath,
+ }
+ f, err := fsys.Open(fpath)
+ if err != nil {
+ return yield(pf, err)
+ }
+ defer f.Close()
+ data, err := cueimports.Read(f)
+ if err != nil {
+ return yield(pf, err)
+ }
+ syntax, err := parser.ParseFile(fpath, data, parser.ParseComments)
+ if err != nil {
+ return yield(pf, err)
+ }
+ if pkgQualifier != "*" && syntax.PackageName() != pkgQualifier {
+ return true
+ }
+ pf.Syntax = syntax
+ return yield(pf, nil)
+}
diff --git a/vendor/cuelang.org/go/internal/mod/modload/query.go b/vendor/cuelang.org/go/internal/mod/modload/query.go
new file mode 100644
index 00000000..45a73c10
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/mod/modload/query.go
@@ -0,0 +1,135 @@
+package modload
+
+import (
+ "context"
+ "fmt"
+ "path"
+ "runtime"
+ "sync"
+
+ "cuelang.org/go/internal/mod/modpkgload"
+ "cuelang.org/go/internal/mod/modrequirements"
+ "cuelang.org/go/internal/mod/semver"
+ "cuelang.org/go/internal/par"
+ "cuelang.org/go/mod/module"
+)
+
+// queryImport attempts to locate a module that can be added to the
+// current build list to provide the package with the given import path.
+// It also reports whether a default major version will be required
+// to select the candidates (this will be true if pkgPath lacks
+// a major version).
+//
+// It avoids results that are already in the given requirements.
+func (ld *loader) queryImport(ctx context.Context, pkgPath string, rs *modrequirements.Requirements) (candidates []module.Version, needsDefault bool, err error) {
+ if modpkgload.IsStdlibPackage(pkgPath) {
+ // This package isn't in the standard library and isn't in any module already
+ // in the build list.
+ //
+ // Moreover, the import path is reserved for the standard library, so
+ // QueryPattern cannot possibly find a module containing this package.
+ //
+ // Instead of trying QueryPattern, report an ImportMissingError immediately.
+ return nil, false, &modpkgload.ImportMissingError{Path: pkgPath}
+ }
+
+ // Look up module containing the package, for addition to the build list.
+ // Goal is to determine the module, download it to dir,
+ // and return m, dir, ImportMissingError.
+
+ // TODO this should probably be a non-debug log message.
+ logf("cue: finding module for package %s", pkgPath)
+
+ candidates, needsDefault, err = ld.queryLatestModules(ctx, pkgPath, rs)
+ if err != nil {
+ return nil, false, err
+ }
+ if len(candidates) == 0 {
+ return nil, false, fmt.Errorf("%v", &modpkgload.ImportMissingError{Path: pkgPath})
+ }
+ return candidates, needsDefault, nil
+}
+
+// queryLatestModules looks for potential modules that might contain the given
+// package by looking for the latest module version of all viable prefixes of pkgPath.
+// It does not return modules that are already present in the given requirements.
+// It also reports whether a default major version will be required.
+func (ld *loader) queryLatestModules(ctx context.Context, pkgPath string, rs *modrequirements.Requirements) ([]module.Version, bool, error) {
+ parts := module.ParseImportPath(pkgPath)
+ latestModuleForPrefix := func(prefix string) (module.Version, error) {
+ mv := parts.Version
+ if mv == "" {
+ var status modrequirements.MajorVersionDefaultStatus
+ mv, status = rs.DefaultMajorVersion(prefix)
+ if status == modrequirements.AmbiguousDefault {
+ // There are already multiple possibilities and
+ // we don't have any way of choosing one.
+ return module.Version{}, nil
+ }
+ }
+ mpath := prefix
+ if mv != "" {
+ mpath = prefix + "@" + mv
+ if _, ok := rs.RootSelected(mpath); ok {
+ // Already present in current requirements.
+ return module.Version{}, nil
+ }
+ }
+
+ versions, err := ld.registry.ModuleVersions(ctx, mpath)
+ logf("getting module versions for %q (prefix %q) -> %q, %v", mpath, prefix, versions, err)
+ if err != nil {
+ return module.Version{}, err
+ }
+ logf("-> %q", versions)
+ if v := latestVersion(versions); v != "" {
+ return module.NewVersion(prefix, v)
+ }
+ return module.Version{}, nil
+ }
+ work := par.NewQueue(runtime.GOMAXPROCS(0))
+ var (
+ mu sync.Mutex
+ candidates []module.Version
+ queryErr error
+ )
+ logf("initial module path %q", parts.Path)
+ for prefix := parts.Path; prefix != "."; prefix = path.Dir(prefix) {
+ prefix := prefix
+ work.Add(func() {
+ v, err := latestModuleForPrefix(prefix)
+ mu.Lock()
+ defer mu.Unlock()
+ if err != nil {
+ if queryErr == nil {
+ queryErr = err
+ }
+ return
+ }
+ if v.IsValid() {
+ candidates = append(candidates, v)
+ }
+ })
+ }
+ <-work.Idle()
+ return candidates, parts.Version == "", queryErr
+}
+
+// latestVersion returns the latest of any of the given versions,
+// ignoring prerelease versions if there is any stable version.
+func latestVersion(versions []string) string {
+ maxStable := ""
+ maxAny := ""
+ for _, v := range versions {
+ if semver.Prerelease(v) == "" && (maxStable == "" || semver.Compare(v, maxStable) > 0) {
+ maxStable = v
+ }
+ if maxAny == "" || semver.Compare(v, maxAny) > 0 {
+ maxAny = v
+ }
+ }
+ if maxStable != "" {
+ return maxStable
+ }
+ return maxAny
+}
diff --git a/vendor/cuelang.org/go/internal/mod/modload/tidy.go b/vendor/cuelang.org/go/internal/mod/modload/tidy.go
new file mode 100644
index 00000000..40fb97b1
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/mod/modload/tidy.go
@@ -0,0 +1,672 @@
+package modload
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "log"
+ "maps"
+ "path"
+ "runtime"
+ "slices"
+
+ "cuelang.org/go/internal/mod/modimports"
+ "cuelang.org/go/internal/mod/modpkgload"
+ "cuelang.org/go/internal/mod/modrequirements"
+ "cuelang.org/go/internal/mod/semver"
+ "cuelang.org/go/internal/par"
+ "cuelang.org/go/mod/modfile"
+ "cuelang.org/go/mod/module"
+)
+
+const logging = false // TODO hook this up to CUE_DEBUG
+
+// Registry is modload's view of a module registry.
+type Registry interface {
+ modrequirements.Registry
+ modpkgload.Registry
+ // ModuleVersions returns all the versions for the module with the given path
+ // sorted in semver order.
+ // If mpath has a major version suffix, only versions with that major version will
+ // be returned.
+ ModuleVersions(ctx context.Context, mpath string) ([]string, error)
+}
+
+type loader struct {
+ mainModule module.Version
+ mainModuleLoc module.SourceLoc
+ registry Registry
+ checkTidy bool
+}
+
+// CheckTidy checks that the module file in the given main module is considered tidy.
+// A module file is considered tidy when:
+// - it can be parsed OK by [modfile.ParseStrict].
+// - it contains a language version in canonical semver form
+// - it includes valid modules for all of its dependencies
+// - it does not include any unnecessary dependencies.
+func CheckTidy(ctx context.Context, fsys fs.FS, modRoot string, reg Registry) error {
+ _, err := tidy(ctx, fsys, modRoot, reg, "", true)
+ return err
+}
+
+// Tidy evaluates all the requirements of the given main module, using the given
+// registry to download requirements and returns a resolved and tidied module file.
+// If there's no language version in the module file and cueVers is non-empty
+// it will be used to populate the language version field.
+func Tidy(ctx context.Context, fsys fs.FS, modRoot string, reg Registry, cueVers string) (*modfile.File, error) {
+ return tidy(ctx, fsys, modRoot, reg, cueVers, false)
+}
+
+func tidy(ctx context.Context, fsys fs.FS, modRoot string, reg Registry, cueVers string, checkTidy bool) (*modfile.File, error) {
+ mainModuleVersion, mf, err := readModuleFile(ctx, fsys, modRoot)
+ if err != nil {
+ return nil, err
+ }
+ if checkTidy {
+ // This is the cheapest check, so do it first.
+ if mf.Language == nil || mf.Language.Version == "" {
+ return nil, fmt.Errorf("no language version found in cue.mod/module.cue")
+ }
+ }
+ // TODO check that module path is well formed etc
+ origRs := modrequirements.NewRequirements(mf.Module, reg, mf.DepVersions(), mf.DefaultMajorVersions())
+ rootPkgPaths, err := modimports.AllImports(modimports.AllModuleFiles(fsys, modRoot))
+ if err != nil {
+ return nil, err
+ }
+ ld := &loader{
+ mainModule: mainModuleVersion,
+ registry: reg,
+ mainModuleLoc: module.SourceLoc{
+ FS: fsys,
+ Dir: modRoot,
+ },
+ checkTidy: checkTidy,
+ }
+
+ rs, pkgs, err := ld.resolveDependencies(ctx, rootPkgPaths, origRs)
+ if err != nil {
+ return nil, err
+ }
+ for _, pkg := range pkgs.All() {
+ if pkg.Error() != nil {
+ return nil, fmt.Errorf("failed to resolve %q: %v", pkg.ImportPath(), pkg.Error())
+ }
+ }
+ // TODO check whether it's changed or not.
+ rs, err = ld.tidyRoots(ctx, rs, pkgs)
+ if err != nil {
+ return nil, fmt.Errorf("cannot tidy requirements: %v", err)
+ }
+ if ld.checkTidy && !equalRequirements(origRs, rs) {
+ // TODO be more specific in this error?
+ return nil, fmt.Errorf("module is not tidy")
+ }
+ return modfileFromRequirements(mf, rs, cueVers), nil
+}
+
+func equalRequirements(rs0, rs1 *modrequirements.Requirements) bool {
+ return slices.Equal(rs0.RootModules(), rs1.RootModules()) &&
+ maps.Equal(rs0.DefaultMajorVersions(), rs1.DefaultMajorVersions())
+}
+
+func readModuleFile(ctx context.Context, fsys fs.FS, modRoot string) (module.Version, *modfile.File, error) {
+ modFilePath := path.Join(modRoot, "cue.mod/module.cue")
+ data, err := fs.ReadFile(fsys, modFilePath)
+ if err != nil {
+ return module.Version{}, nil, fmt.Errorf("cannot read cue.mod file: %v", err)
+ }
+ mf, err := modfile.ParseNonStrict(data, modFilePath)
+ if err != nil {
+ return module.Version{}, nil, err
+ }
+ mainModuleVersion, err := module.NewVersion(mf.Module, "")
+ if err != nil {
+ return module.Version{}, nil, fmt.Errorf("invalid module path %q: %v", mf.Module, err)
+ }
+ return mainModuleVersion, mf, nil
+}
+
+func modfileFromRequirements(old *modfile.File, rs *modrequirements.Requirements, cueVers string) *modfile.File {
+ mf := &modfile.File{
+ Module: old.Module,
+ Language: old.Language,
+ Deps: make(map[string]*modfile.Dep),
+ }
+ if cueVers != "" && (mf.Language == nil || mf.Language.Version == "") {
+ mf.Language = &modfile.Language{
+ Version: cueVers,
+ }
+ }
+ defaults := rs.DefaultMajorVersions()
+ for _, v := range rs.RootModules() {
+ if v.IsLocal() {
+ continue
+ }
+ mf.Deps[v.Path()] = &modfile.Dep{
+ Version: v.Version(),
+ Default: defaults[v.BasePath()] == semver.Major(v.Version()),
+ }
+ }
+ return mf
+}
+
+func (ld *loader) resolveDependencies(ctx context.Context, rootPkgPaths []string, rs *modrequirements.Requirements) (*modrequirements.Requirements, *modpkgload.Packages, error) {
+ for {
+ logf("---- LOADING from requirements %q", rs.RootModules())
+ pkgs := modpkgload.LoadPackages(ctx, ld.mainModule.Path(), ld.mainModuleLoc, rs, ld.registry, rootPkgPaths)
+ if ld.checkTidy {
+ for _, pkg := range pkgs.All() {
+ if err := pkg.Error(); err != nil {
+ return nil, nil, fmt.Errorf("module is not tidy: %v", err)
+ }
+ }
+ // All packages could be loaded OK so there are no new
+ // dependencies to be resolved and nothing to do.
+ // Specifically, if there are no packages in error, then
+ // resolveMissingImports will never return any entries
+ // in modAddedBy and the default major versions won't
+ // change.
+ return rs, pkgs, nil
+ }
+
+ // TODO the original code calls updateRequirements at this point.
+ // /home/rogpeppe/go/src/cmd/go/internal/modload/load.go:1124
+
+ modAddedBy, defaultMajorVersions := ld.resolveMissingImports(ctx, pkgs, rs)
+ if !maps.Equal(defaultMajorVersions, rs.DefaultMajorVersions()) {
+ rs = rs.WithDefaultMajorVersions(defaultMajorVersions)
+ }
+ if len(modAddedBy) == 0 {
+ // The roots are stable, and we've resolved all of the missing packages
+ // that we can.
+ logf("dependencies are stable at %q", rs.RootModules())
+ return rs, pkgs, nil
+ }
+ toAdd := make([]module.Version, 0, len(modAddedBy))
+ // TODO use maps.Keys when we can.
+ for m, p := range modAddedBy {
+ logf("added: %v (by %v)", modAddedBy, p.ImportPath())
+ toAdd = append(toAdd, m)
+ }
+ module.Sort(toAdd) // to make errors deterministic
+ oldRs := rs
+ var err error
+ rs, err = ld.updateRoots(ctx, rs, pkgs, toAdd)
+ if err != nil {
+ return nil, nil, err
+ }
+ if slices.Equal(rs.RootModules(), oldRs.RootModules()) {
+ // Something is deeply wrong. resolveMissingImports gave us a non-empty
+ // set of modules to add to the graph, but adding those modules had no
+ // effect — either they were already in the graph, or updateRoots did not
+ // add them as requested.
+ panic(fmt.Sprintf("internal error: adding %v to module graph had no effect on root requirements (%v)", toAdd, rs.RootModules()))
+ }
+ logf("after loading, requirements: %v", rs.RootModules())
+ }
+}
+
+// updatePrunedRoots returns a set of root requirements that maintains the
+// invariants of the cue.mod/module.cue file needed to support graph pruning:
+//
+// 1. The selected version of the module providing each package marked with
+// either pkgInAll or pkgIsRoot is included as a root.
+// Note that certain root patterns (such as '...') may explode the root set
+// to contain every module that provides any package imported (or merely
+// required) by any other module.
+// 2. Each root appears only once, at the selected version of its path
+// (if rs.graph is non-nil) or at the highest version otherwise present as a
+// root (otherwise).
+// 3. Every module path that appears as a root in rs remains a root.
+// 4. Every version in add is selected at its given version unless upgraded by
+// (the dependencies of) an existing root or another module in add.
+//
+// The packages in pkgs are assumed to have been loaded from either the roots of
+// rs or the modules selected in the graph of rs.
+//
+// The above invariants together imply the graph-pruning invariants for the
+// go.mod file:
+//
+// 1. (The import invariant.) Every module that provides a package transitively
+// imported by any package or test in the main module is included as a root.
+// This follows by induction from (1) and (3) above. Transitively-imported
+// packages loaded during this invocation are marked with pkgInAll (1),
+// and by hypothesis any transitively-imported packages loaded in previous
+// invocations were already roots in rs (3).
+//
+// 2. (The argument invariant.) Every module that provides a package matching
+// an explicit package pattern is included as a root. This follows directly
+// from (1): packages matching explicit package patterns are marked with
+// pkgIsRoot.
+//
+// 3. (The completeness invariant.) Every module that contributed any package
+// to the build is required by either the main module or one of the modules
+// it requires explicitly. This invariant is left up to the caller, who must
+// not load packages from outside the module graph but may add roots to the
+// graph, but is facilitated by (3). If the caller adds roots to the graph in
+// order to resolve missing packages, then updatePrunedRoots will retain them,
+// the selected versions of those roots cannot regress, and they will
+// eventually be written back to the main module's go.mod file.
+//
+// (See https://golang.org/design/36460-lazy-module-loading#invariants for more
+// detail.)
+func (ld *loader) updateRoots(ctx context.Context, rs *modrequirements.Requirements, pkgs *modpkgload.Packages, add []module.Version) (*modrequirements.Requirements, error) {
+ roots := rs.RootModules()
+ rootsUpgraded := false
+
+ spotCheckRoot := map[module.Version]bool{}
+
+ // “The selected version of the module providing each package marked with
+ // either pkgInAll or pkgIsRoot is included as a root.”
+ needSort := false
+ for _, pkg := range pkgs.All() {
+ if !pkg.Mod().IsValid() || !pkg.FromExternalModule() {
+ // pkg was not loaded from a module dependency, so we don't need
+ // to do anything special to maintain that dependency.
+ continue
+ }
+
+ switch {
+ case pkg.HasFlags(modpkgload.PkgInAll):
+ // pkg is transitively imported by a package or test in the main module.
+ // We need to promote the module that maintains it to a root: if some
+ // other module depends on the main module, and that other module also
+ // uses a pruned module graph, it will expect to find all of our
+ // transitive dependencies by reading just our go.mod file, not the go.mod
+ // files of everything we depend on.
+ //
+ // (This is the “import invariant” that makes graph pruning possible.)
+
+ case pkg.HasFlags(modpkgload.PkgIsRoot):
+ // pkg is a root of the package-import graph. (Generally this means that
+ // it matches a command-line argument.) We want future invocations of the
+ // 'go' command — such as 'go test' on the same package — to continue to
+ // use the same versions of its dependencies that we are using right now.
+ // So we need to bring this package's dependencies inside the pruned
+ // module graph.
+ //
+ // Making the module containing this package a root of the module graph
+ // does exactly that: if the module containing the package supports graph
+ // pruning then it should satisfy the import invariant itself, so all of
+ // its dependencies should be in its go.mod file, and if the module
+ // containing the package does not support pruning then if we make it a
+ // root we will load all of its (unpruned) transitive dependencies into
+ // the module graph.
+ //
+ // (This is the “argument invariant”, and is important for
+ // reproducibility.)
+
+ default:
+ // pkg is a dependency of some other package outside of the main module.
+ // As far as we know it's not relevant to the main module (and thus not
+ // relevant to consumers of the main module either), and its dependencies
+ // should already be in the module graph — included in the dependencies of
+ // the package that imported it.
+ continue
+ }
+ if _, ok := rs.RootSelected(pkg.Mod().Path()); ok {
+ // It is possible that the main module's go.mod file is incomplete or
+ // otherwise erroneous — for example, perhaps the author forgot to 'git
+ // add' their updated go.mod file after adding a new package import, or
+ // perhaps they made an edit to the go.mod file using a third-party tool
+ // ('git merge'?) that doesn't maintain consistency for module
+ // dependencies. If that happens, ideally we want to detect the missing
+ // requirements and fix them up here.
+ //
+ // However, we also need to be careful not to be too aggressive. For
+ // transitive dependencies of external tests, the go.mod file for the
+ // module containing the test itself is expected to provide all of the
+ // relevant dependencies, and we explicitly don't want to pull in
+ // requirements on *irrelevant* requirements that happen to occur in the
+ // go.mod files for these transitive-test-only dependencies. (See the test
+ // in mod_lazy_test_horizon.txt for a concrete example).
+ //
+ // The “goldilocks zone” seems to be to spot-check exactly the same
+ // modules that we promote to explicit roots: namely, those that provide
+ // packages transitively imported by the main module, and those that
+ // provide roots of the package-import graph. That will catch erroneous
+ // edits to the main module's go.mod file and inconsistent requirements in
+ // dependencies that provide imported packages, but will ignore erroneous
+ // or misleading requirements in dependencies that aren't obviously
+ // relevant to the packages in the main module.
+ spotCheckRoot[pkg.Mod()] = true
+ } else {
+ roots = append(roots, pkg.Mod())
+ rootsUpgraded = true
+ // The roots slice was initially sorted because rs.rootModules was sorted,
+ // but the root we just added could be out of order.
+ needSort = true
+ }
+ }
+
+ for _, m := range add {
+ if !m.IsValid() {
+ panic("add contains invalid module")
+ }
+ if v, ok := rs.RootSelected(m.Path()); !ok || semver.Compare(v, m.Version()) < 0 {
+ roots = append(roots, m)
+ rootsUpgraded = true
+ needSort = true
+ }
+ }
+ if needSort {
+ module.Sort(roots)
+ }
+
+ // "Each root appears only once, at the selected version of its path ….”
+ for {
+ var mg *modrequirements.ModuleGraph
+ if rootsUpgraded {
+ // We've added or upgraded one or more roots, so load the full module
+ // graph so that we can update those roots to be consistent with other
+ // requirements.
+
+ rs = modrequirements.NewRequirements(ld.mainModule.Path(), ld.registry, roots, rs.DefaultMajorVersions())
+ var err error
+ mg, err = rs.Graph(ctx)
+ if err != nil {
+ return rs, err
+ }
+ } else {
+ // Since none of the roots have been upgraded, we have no reason to
+ // suspect that they are inconsistent with the requirements of any other
+ // roots. Only look at the full module graph if we've already loaded it;
+ // otherwise, just spot-check the explicit requirements of the roots from
+ // which we loaded packages.
+ if rs.GraphIsLoaded() {
+ // We've already loaded the full module graph, which includes the
+ // requirements of all of the root modules — even the transitive
+ // requirements, if they are unpruned!
+ mg, _ = rs.Graph(ctx)
+ } else if !ld.spotCheckRoots(ctx, rs, spotCheckRoot) {
+ // We spot-checked the explicit requirements of the roots that are
+ // relevant to the packages we've loaded. Unfortunately, they're
+ // inconsistent in some way; we need to load the full module graph
+ // so that we can fix the roots properly.
+ var err error
+ mg, err = rs.Graph(ctx)
+ if err != nil {
+ return rs, err
+ }
+ }
+ }
+
+ roots = make([]module.Version, 0, len(rs.RootModules()))
+ rootsUpgraded = false
+ inRootPaths := map[string]bool{
+ ld.mainModule.Path(): true,
+ }
+ for _, m := range rs.RootModules() {
+ if inRootPaths[m.Path()] {
+ // This root specifies a redundant path. We already retained the
+ // selected version of this path when we saw it before, so omit the
+ // redundant copy regardless of its version.
+ //
+ // When we read the full module graph, we include the dependencies of
+ // every root even if that root is redundant. That better preserves
+ // reproducibility if, say, some automated tool adds a redundant
+ // 'require' line and then runs 'go mod tidy' to try to make everything
+ // consistent, since the requirements of the older version are carried
+ // over.
+ //
+ // So omitting a root that was previously present may *reduce* the
+ // selected versions of non-roots, but merely removing a requirement
+ // cannot *increase* the selected versions of other roots as a result —
+ // we don't need to mark this change as an upgrade. (This particular
+ // change cannot invalidate any other roots.)
+ continue
+ }
+
+ var v string
+ if mg == nil {
+ v, _ = rs.RootSelected(m.Path())
+ } else {
+ v = mg.Selected(m.Path())
+ }
+ mv, err := module.NewVersion(m.Path(), v)
+ if err != nil {
+ return nil, fmt.Errorf("internal error: cannot form module version from %q@%q", m.Path(), v)
+ }
+ roots = append(roots, mv)
+ inRootPaths[m.Path()] = true
+ if v != m.Version() {
+ rootsUpgraded = true
+ }
+ }
+ // Note that rs.rootModules was already sorted by module path and version,
+ // and we appended to the roots slice in the same order and guaranteed that
+ // each path has only one version, so roots is also sorted by module path
+ // and (trivially) version.
+
+ if !rootsUpgraded {
+ // The root set has converged: every root going into this iteration was
+ // already at its selected version, although we have have removed other
+ // (redundant) roots for the same path.
+ break
+ }
+ }
+
+ if slices.Equal(roots, rs.RootModules()) {
+ // The root set is unchanged and rs was already pruned, so keep rs to
+ // preserve its cached ModuleGraph (if any).
+ return rs, nil
+ }
+ return modrequirements.NewRequirements(ld.mainModule.Path(), ld.registry, roots, rs.DefaultMajorVersions()), nil
+}
+
+// resolveMissingImports returns a set of modules that could be added as
+// dependencies in order to resolve missing packages from pkgs.
+//
+// It returns a map from each new module version to
+// the first missing package that module would resolve.
+func (ld *loader) resolveMissingImports(ctx context.Context, pkgs *modpkgload.Packages, rs *modrequirements.Requirements) (modAddedBy map[module.Version]*modpkgload.Package, defaultMajorVersions map[string]string) {
+ type pkgMod struct {
+ pkg *modpkgload.Package
+ needsDefault *bool
+ mods *[]module.Version
+ }
+ var pkgMods []pkgMod
+ work := par.NewQueue(runtime.GOMAXPROCS(0))
+ for _, pkg := range pkgs.All() {
+ pkg := pkg
+ if pkg.Error() == nil {
+ continue
+ }
+ if !errors.As(pkg.Error(), new(*modpkgload.ImportMissingError)) {
+ // Leave other errors to be reported outside of the module resolution logic.
+ continue
+ }
+ logf("querying %q", pkg.ImportPath())
+ var mods []module.Version // updated asynchronously.
+ var needsDefault bool
+ work.Add(func() {
+ var err error
+ mods, needsDefault, err = ld.queryImport(ctx, pkg.ImportPath(), rs)
+ if err != nil {
+ // pkg.err was already non-nil, so we can reasonably attribute the error
+ // for pkg to either the original error or the one returned by
+ // queryImport. The existing error indicates only that we couldn't find
+ // the package, whereas the query error also explains why we didn't fix
+ // the problem — so we prefer the latter.
+ pkg.SetError(err)
+ }
+
+ // err is nil, but we intentionally leave pkg.err non-nil: we still haven't satisfied other invariants of a
+ // successfully-loaded package, such as scanning and loading the imports
+ // of that package. If we succeed in resolving the new dependency graph,
+ // the caller can reload pkg and update the error at that point.
+ //
+ // Even then, the package might not be loaded from the version we've
+ // identified here. The module may be upgraded by some other dependency,
+ // or by a transitive dependency of mod itself, or — less likely — the
+ // package may be rejected by an AllowPackage hook or rendered ambiguous
+ // by some other newly-added or newly-upgraded dependency.
+ })
+
+ pkgMods = append(pkgMods, pkgMod{pkg: pkg, mods: &mods, needsDefault: &needsDefault})
+ }
+ <-work.Idle()
+
+ modAddedBy = map[module.Version]*modpkgload.Package{}
+ defaultMajorVersions = make(map[string]string)
+ for m, v := range rs.DefaultMajorVersions() {
+ defaultMajorVersions[m] = v
+ }
+ for _, pm := range pkgMods {
+ pkg, mods, needsDefault := pm.pkg, *pm.mods, *pm.needsDefault
+ for _, mod := range mods {
+ // TODO support logging progress messages like this but without printing to stderr?
+ logf("cue: found potential %s in %v", pkg.ImportPath(), mod)
+ if modAddedBy[mod] == nil {
+ modAddedBy[mod] = pkg
+ }
+ if needsDefault {
+ defaultMajorVersions[mod.BasePath()] = semver.Major(mod.Version())
+ }
+ }
+ }
+
+ return modAddedBy, defaultMajorVersions
+}
+
+// tidyRoots returns a minimal set of root requirements that maintains the
+// invariants of the cue.mod/module.cue file needed to support graph pruning for the given
+// packages:
+//
+// 1. For each package marked with PkgInAll, the module path that provided that
+// package is included as a root.
+// 2. For all packages, the module that provided that package either remains
+// selected at the same version or is upgraded by the dependencies of a
+// root.
+//
+// If any module that provided a package has been upgraded above its previous
+// version, the caller may need to reload and recompute the package graph.
+//
+// To ensure that the loading process eventually converges, the caller should
+// add any needed roots from the tidy root set (without removing existing untidy
+// roots) until the set of roots has converged.
+func (ld *loader) tidyRoots(ctx context.Context, old *modrequirements.Requirements, pkgs *modpkgload.Packages) (*modrequirements.Requirements, error) {
+ var (
+ roots []module.Version
+ pathIsRoot = map[string]bool{ld.mainModule.Path(): true}
+ )
+ // We start by adding roots for every package in "all".
+ //
+ // Once that is done, we may still need to add more roots to cover upgraded or
+ // otherwise-missing test dependencies for packages in "all". For those test
+ // dependencies, we prefer to add roots for packages with shorter import
+ // stacks first, on the theory that the module requirements for those will
+ // tend to fill in the requirements for their transitive imports (which have
+ // deeper import stacks). So we add the missing dependencies for one depth at
+ // a time, starting with the packages actually in "all" and expanding outwards
+ // until we have scanned every package that was loaded.
+ var (
+ queue []*modpkgload.Package
+ queued = map[*modpkgload.Package]bool{}
+ )
+ for _, pkg := range pkgs.All() {
+ if !pkg.HasFlags(modpkgload.PkgInAll) {
+ continue
+ }
+ if pkg.FromExternalModule() && !pathIsRoot[pkg.Mod().Path()] {
+ roots = append(roots, pkg.Mod())
+ pathIsRoot[pkg.Mod().Path()] = true
+ }
+ queue = append(queue, pkg)
+ queued[pkg] = true
+ }
+ module.Sort(roots)
+ tidy := modrequirements.NewRequirements(ld.mainModule.Path(), ld.registry, roots, old.DefaultMajorVersions())
+
+ for len(queue) > 0 {
+ roots = tidy.RootModules()
+ mg, err := tidy.Graph(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ prevQueue := queue
+ queue = nil
+ for _, pkg := range prevQueue {
+ m := pkg.Mod()
+ if m.Path() == "" {
+ continue
+ }
+ for _, dep := range pkg.Imports() {
+ if !queued[dep] {
+ queue = append(queue, dep)
+ queued[dep] = true
+ }
+ }
+ if !pathIsRoot[m.Path()] {
+ if s := mg.Selected(m.Path()); semver.Compare(s, m.Version()) < 0 {
+ roots = append(roots, m)
+ pathIsRoot[m.Path()] = true
+ }
+ }
+ }
+
+ if tidyRoots := tidy.RootModules(); len(roots) > len(tidyRoots) {
+ module.Sort(roots)
+ tidy = modrequirements.NewRequirements(ld.mainModule.Path(), ld.registry, roots, tidy.DefaultMajorVersions())
+ }
+ }
+
+ if _, err := tidy.Graph(ctx); err != nil {
+ return nil, err
+ }
+
+ // TODO the original code had some logic I don't properly understand,
+ // related to https://go.dev/issue/60313, that _may_ be relevant only
+ // to test-only dependencies, which we don't have, so leave it out for now.
+
+ return tidy, nil
+}
+
+// spotCheckRoots reports whether the versions of the roots in rs satisfy the
+// explicit requirements of the modules in mods.
+func (ld *loader) spotCheckRoots(ctx context.Context, rs *modrequirements.Requirements, mods map[module.Version]bool) bool {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ work := par.NewQueue(runtime.GOMAXPROCS(0))
+ for m := range mods {
+ m := m
+ work.Add(func() {
+ if ctx.Err() != nil {
+ return
+ }
+
+ require, err := ld.registry.Requirements(ctx, m)
+ if err != nil {
+ cancel()
+ return
+ }
+
+ for _, r := range require {
+ if v, ok := rs.RootSelected(r.Path()); ok && semver.Compare(v, r.Version()) < 0 {
+ cancel()
+ return
+ }
+ }
+ })
+ }
+ <-work.Idle()
+
+ if ctx.Err() != nil {
+ // Either we failed a spot-check, or the caller no longer cares about our
+ // answer anyway.
+ return false
+ }
+
+ return true
+}
+
+func logf(f string, a ...any) {
+ if logging {
+ log.Printf(f, a...)
+ }
+}
diff --git a/vendor/cuelang.org/go/internal/mod/modload/update.go b/vendor/cuelang.org/go/internal/mod/modload/update.go
new file mode 100644
index 00000000..cede6439
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/mod/modload/update.go
@@ -0,0 +1,175 @@
+package modload
+
+import (
+ "context"
+ "fmt"
+ "io/fs"
+ "runtime"
+ "strings"
+ "sync/atomic"
+
+ "cuelang.org/go/internal/mod/modrequirements"
+ "cuelang.org/go/internal/mod/semver"
+ "cuelang.org/go/internal/par"
+ "cuelang.org/go/mod/modfile"
+ "cuelang.org/go/mod/module"
+)
+
+// UpdateVersions returns the main module's module file with the specified module versions
+// updated if possible and added if not already present. It returns an error if asked
+// to downgrade a module below a version already required by an external dependency.
+//
+// A module in the versions slice can be specified as one of the following:
+// - $module@$fullVersion: a specific exact version
+// - $module@$partialVersion: a non-canonical version
+// specifies the latest version that has the same major/minor numbers.
+// - $module@latest: the latest non-prerelease version, or latest prerelease version if
+// there is no non-prerelease version
+// - $module: equivalent to $module@latest if $module doesn't have a default major
+// version or $module@$majorVersion if it does, where $majorVersion is the
+// default major version for $module.
+func UpdateVersions(ctx context.Context, fsys fs.FS, modRoot string, reg Registry, versions []string) (*modfile.File, error) {
+ mainModuleVersion, mf, err := readModuleFile(ctx, fsys, modRoot)
+ if err != nil {
+ return nil, err
+ }
+ rs := modrequirements.NewRequirements(mf.Module, reg, mf.DepVersions(), mf.DefaultMajorVersions())
+ mversions, err := resolveUpdateVersions(ctx, reg, rs, mainModuleVersion, versions)
+ if err != nil {
+ return nil, err
+ }
+ // Now we know what versions we want to update to, make a new set of
+ // requirements with these versions in place.
+
+ mversionsMap := make(map[string]module.Version)
+ for _, v := range mversions {
+ // Check existing membership of the map: if the same module has been specified
+ // twice, then choose t
+ if v1, ok := mversionsMap[v.Path()]; ok && v1.Version() != v.Version() {
+ // The same module has been specified twice with different requirements.
+ // Treat it as an error (an alternative approach might be to choose the greater
+ // version, but making it an error seems more appropriate to the "choose exact
+ // version" semantics of UpdateVersions.
+ return nil, fmt.Errorf("conflicting version update requirements %v vs %v", v1, v)
+ }
+ mversionsMap[v.Path()] = v
+ }
+ g, err := rs.Graph(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("cannot determine module graph: %v", err)
+ }
+ var newVersions []module.Version
+ for _, v := range g.BuildList() {
+ if v.Path() == mainModuleVersion.Path() {
+ continue
+ }
+ if newv, ok := mversionsMap[v.Path()]; ok {
+ newVersions = append(newVersions, newv)
+ delete(mversionsMap, v.Path())
+ } else {
+ newVersions = append(newVersions, v)
+ }
+ }
+ for _, v := range mversionsMap {
+ newVersions = append(newVersions, v)
+ }
+ rs = modrequirements.NewRequirements(mf.Module, reg, newVersions, mf.DefaultMajorVersions())
+ g, err = rs.Graph(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("cannot determine new module graph: %v", err)
+ }
+ // Now check that the resulting versions are the ones we wanted.
+ for _, v := range mversions {
+ actualVers := g.Selected(v.Path())
+ if actualVers != v.Version() {
+ return nil, fmt.Errorf("other requirements prevent changing module %v to version %v (actual selected version: %v)", v.Path(), v.Version(), actualVers)
+ }
+ }
+ // Make a new requirements with the selected versions of the above as roots.
+ var finalVersions []module.Version
+ for _, v := range g.BuildList() {
+ if v.Path() != mainModuleVersion.Path() {
+ finalVersions = append(finalVersions, v)
+ }
+ }
+ rs = modrequirements.NewRequirements(mf.Module, reg, finalVersions, mf.DefaultMajorVersions())
+ return modfileFromRequirements(mf, rs, ""), nil
+}
+
+// resolveUpdateVersions resolves a set of version strings as accepted by [UpdateVersions]
+// into the actual module versions they represent.
+func resolveUpdateVersions(ctx context.Context, reg Registry, rs *modrequirements.Requirements, mainModuleVersion module.Version, versions []string) ([]module.Version, error) {
+ work := par.NewQueue(runtime.GOMAXPROCS(0))
+ mversions := make([]module.Version, len(versions))
+ var queryErr atomic.Pointer[error]
+ setError := func(err error) {
+ queryErr.CompareAndSwap(nil, &err)
+ }
+ for i, v := range versions {
+ i, v := i, v
+ if mv, err := module.ParseVersion(v); err == nil {
+ // It's already canonical: nothing more to do.
+ mversions[i] = mv
+ continue
+ }
+ mpath, vers, ok := strings.Cut(v, "@")
+ if !ok {
+ if major, status := rs.DefaultMajorVersion(mpath); status == modrequirements.ExplicitDefault {
+ // TODO allow a non-explicit default too?
+ vers = major
+ } else {
+ vers = "latest"
+ }
+ }
+ if err := module.CheckPathWithoutVersion(mpath); err != nil {
+ return nil, fmt.Errorf("invalid module path in %q", v)
+ }
+ versionPrefix := ""
+ if vers != "latest" {
+ if !semver.IsValid(vers) {
+ return nil, fmt.Errorf("%q does not specify a valid semantic version", v)
+ }
+ if semver.Build(vers) != "" {
+ return nil, fmt.Errorf("build version suffixes not supported (%v)", v)
+ }
+ // It's a valid version but has no build suffix and it's not canonical,
+ // which means it must be either a major-only or major-minor, so
+ // the conforming canonical versions must have it as a prefix, with
+ // a dot separating the last component and the next.
+ versionPrefix = vers + "."
+ }
+ work.Add(func() {
+ allVersions, err := reg.ModuleVersions(ctx, mpath)
+ if err != nil {
+ setError(err)
+ return
+ }
+ possibleVersions := make([]string, 0, len(allVersions))
+ for _, v := range allVersions {
+ if strings.HasPrefix(v, versionPrefix) {
+ possibleVersions = append(possibleVersions, v)
+ }
+ }
+ chosen := latestVersion(possibleVersions)
+ mv, err := module.NewVersion(mpath, chosen)
+ if err != nil {
+ // Should never happen, because we've checked that
+ // mpath is valid and ModuleVersions
+ // should always return valid semver versions.
+ setError(err)
+ return
+ }
+ mversions[i] = mv
+ })
+ }
+ <-work.Idle()
+ if errPtr := queryErr.Load(); errPtr != nil {
+ return nil, *errPtr
+ }
+ for _, v := range mversions {
+ if v.Path() == mainModuleVersion.Path() {
+ return nil, fmt.Errorf("cannot update version of main module")
+ }
+ }
+ return mversions, nil
+}
diff --git a/vendor/cuelang.org/go/internal/mod/modpkgload/import.go b/vendor/cuelang.org/go/internal/mod/modpkgload/import.go
new file mode 100644
index 00000000..258dc63c
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/mod/modpkgload/import.go
@@ -0,0 +1,323 @@
+package modpkgload
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "path"
+ "path/filepath"
+ "slices"
+ "strings"
+
+ "cuelang.org/go/internal/mod/modrequirements"
+ "cuelang.org/go/mod/module"
+)
+
+// importFromModules finds the module and source location in the dependency graph of
+// pkgs containing the package with the given import path.
+//
+// The answer must be unique: importFromModules returns an error if multiple
+// modules are observed to provide the same package.
+//
+// importFromModules can return a zero module version for packages in
+// the standard library.
+//
+// If the package is not present in any module selected from the requirement
+// graph, importFromModules returns an *ImportMissingError.
+//
+// If the package is present in exactly one module, importFromModules will
+// return the module, its root directory, and a list of other modules that
+// lexically could have provided the package but did not.
+func (pkgs *Packages) importFromModules(ctx context.Context, pkgPath string) (m module.Version, pkgLocs []module.SourceLoc, altMods []module.Version, err error) {
+ fail := func(err error) (module.Version, []module.SourceLoc, []module.Version, error) {
+ return module.Version{}, []module.SourceLoc(nil), nil, err
+ }
+ failf := func(format string, args ...interface{}) (module.Version, []module.SourceLoc, []module.Version, error) {
+ return fail(fmt.Errorf(format, args...))
+ }
+ // Note: we don't care about the package qualifier at this point
+ // because any directory with CUE files in counts as a possible
+ // candidate, regardless of what packages are in it.
+ pathParts := module.ParseImportPath(pkgPath)
+ pkgPathOnly := pathParts.Path
+
+ if filepath.IsAbs(pkgPathOnly) || path.IsAbs(pkgPathOnly) {
+ return failf("%q is not a package path", pkgPath)
+ }
+ // TODO check that the path isn't relative.
+ // TODO check it's not a meta package name, such as "all".
+
+ // Before any further lookup, check that the path is valid.
+ if err := module.CheckImportPath(pkgPath); err != nil {
+ return fail(err)
+ }
+
+ // Check each module on the build list.
+ var locs [][]module.SourceLoc
+ var mods []module.Version
+ var mg *modrequirements.ModuleGraph
+ localPkgLocs, err := pkgs.findLocalPackage(pkgPathOnly)
+ if err != nil {
+ return fail(err)
+ }
+ if len(localPkgLocs) > 0 {
+ mods = append(mods, module.MustNewVersion("local", ""))
+ locs = append(locs, localPkgLocs)
+ }
+
+ // Iterate over possible modules for the path, not all selected modules.
+ // Iterating over selected modules would make the overall loading time
+ // O(M × P) for M modules providing P imported packages, whereas iterating
+ // over path prefixes is only O(P × k) with maximum path depth k. For
+ // large projects both M and P may be very large (note that M ≤ P), but k
+ // will tend to remain smallish (if for no other reason than filesystem
+ // path limitations).
+ //
+ // We perform this iteration either one or two times.
+ // Firstly we attempt to load the package using only the main module and
+ // its root requirements. If that does not identify the package, then we attempt
+ // to load the package using the full
+ // requirements in mg.
+ for {
+ var altMods []module.Version
+ // TODO we could probably do this loop concurrently.
+
+ for prefix := pkgPathOnly; prefix != "."; prefix = path.Dir(prefix) {
+ var (
+ v string
+ ok bool
+ )
+ pkgVersion := pathParts.Version
+ if pkgVersion == "" {
+ if pkgVersion, _ = pkgs.requirements.DefaultMajorVersion(prefix); pkgVersion == "" {
+ continue
+ }
+ }
+ prefixPath := prefix + "@" + pkgVersion
+ if mg == nil {
+ v, ok = pkgs.requirements.RootSelected(prefixPath)
+ } else {
+ v, ok = mg.Selected(prefixPath), true
+ }
+ if !ok || v == "none" {
+ continue
+ }
+ m, err := module.NewVersion(prefixPath, v)
+ if err != nil {
+ // Not all package paths are valid module versions,
+ // but a parent might be.
+ continue
+ }
+ mloc, isLocal, err := pkgs.fetch(ctx, m)
+ if err != nil {
+ // Report fetch error.
+ // Note that we don't know for sure this module is necessary,
+ // but it certainly _could_ provide the package, and even if we
+ // continue the loop and find the package in some other module,
+ // we need to look at this module to make sure the import is
+ // not ambiguous.
+ return fail(fmt.Errorf("cannot fetch %v: %v", m, err))
+ }
+ if loc, ok, err := locInModule(pkgPathOnly, prefix, mloc, isLocal); err != nil {
+ return fail(fmt.Errorf("cannot find package: %v", err))
+ } else if ok {
+ mods = append(mods, m)
+ locs = append(locs, []module.SourceLoc{loc})
+ } else {
+ altMods = append(altMods, m)
+ }
+ }
+
+ if len(mods) > 1 {
+ // We produce the list of directories from longest to shortest candidate
+ // module path, but the AmbiguousImportError should report them from
+ // shortest to longest. Reverse them now.
+ slices.Reverse(mods)
+ slices.Reverse(locs)
+ return fail(&AmbiguousImportError{ImportPath: pkgPath, Locations: locs, Modules: mods})
+ }
+
+ if len(mods) == 1 {
+ // We've found the unique module containing the package.
+ return mods[0], locs[0], altMods, nil
+ }
+
+ if mg != nil {
+ // We checked the full module graph and still didn't find the
+ // requested package.
+ return fail(&ImportMissingError{Path: pkgPath})
+ }
+
+ // So far we've checked the root dependencies.
+ // Load the full module graph and try again.
+ mg, err = pkgs.requirements.Graph(ctx)
+ if err != nil {
+ // We might be missing one or more transitive (implicit) dependencies from
+ // the module graph, so we can't return an ImportMissingError here — one
+ // of the missing modules might actually contain the package in question,
+ // in which case we shouldn't go looking for it in some new dependency.
+ return fail(fmt.Errorf("cannot expand module graph: %v", err))
+ }
+ }
+}
+
+// locInModule returns the location that would hold the package named by the given path,
+// if it were in the module with module path mpath and root location mloc.
+// If pkgPath is syntactically not within mpath,
+// or if mdir is a local file tree (isLocal == true) and the directory
+// that would hold path is in a sub-module (covered by a go.mod below mdir),
+// locInModule returns "", false, nil.
+//
+// Otherwise, locInModule returns the name of the directory where
+// CUE source files would be expected, along with a boolean indicating
+// whether there are in fact CUE source files in that directory.
+// A non-nil error indicates that the existence of the directory and/or
+// source files could not be determined, for example due to a permission error.
+func locInModule(pkgPath, mpath string, mloc module.SourceLoc, isLocal bool) (loc module.SourceLoc, haveCUEFiles bool, err error) {
+ loc.FS = mloc.FS
+
+ // Determine where to expect the package.
+ if pkgPath == mpath {
+ loc = mloc
+ } else if len(pkgPath) > len(mpath) && pkgPath[len(mpath)] == '/' && pkgPath[:len(mpath)] == mpath {
+ loc.Dir = path.Join(mloc.Dir, pkgPath[len(mpath)+1:])
+ } else {
+ return module.SourceLoc{}, false, nil
+ }
+
+ // Check that there aren't other modules in the way.
+ // This check is unnecessary inside the module cache.
+ // So we only check local module trees
+ // (the main module and, in the future, any directory trees pointed at by replace directives).
+ if isLocal {
+ for d := loc.Dir; d != mloc.Dir && len(d) > len(mloc.Dir); {
+ _, err := fs.Stat(mloc.FS, path.Join(d, "cue.mod/module.cue"))
+ // TODO should we count it as a module file if it's a directory?
+ haveCUEMod := err == nil
+ if haveCUEMod {
+ return module.SourceLoc{}, false, nil
+ }
+ parent := path.Dir(d)
+ if parent == d {
+ // Break the loop, as otherwise we'd loop
+ // forever if d=="." and mdir=="".
+ break
+ }
+ d = parent
+ }
+ }
+
+ // Are there CUE source files in the directory?
+ // We don't care about build tags, not even "ignore".
+ // We're just looking for a plausible directory.
+ haveCUEFiles, err = isDirWithCUEFiles(loc)
+ if err != nil {
+ return module.SourceLoc{}, false, err
+ }
+ return loc, haveCUEFiles, err
+}
+
+var localPkgDirs = []string{"cue.mod/gen", "cue.mod/usr", "cue.mod/pkg"}
+
+func (pkgs *Packages) findLocalPackage(pkgPath string) ([]module.SourceLoc, error) {
+ var locs []module.SourceLoc
+ for _, d := range localPkgDirs {
+ loc := pkgs.mainModuleLoc
+ loc.Dir = path.Join(loc.Dir, d, pkgPath)
+ ok, err := isDirWithCUEFiles(loc)
+ if err != nil {
+ return nil, err
+ }
+ if ok {
+ locs = append(locs, loc)
+ }
+ }
+ return locs, nil
+}
+
+func isDirWithCUEFiles(loc module.SourceLoc) (bool, error) {
+ // It would be nice if we could inspect the error returned from
+ // ReadDir to see if it's failing because it's not a directory,
+ // but unfortunately that doesn't seem to be something defined
+ // by the Go fs interface.
+ fi, err := fs.Stat(loc.FS, loc.Dir)
+ if err != nil {
+ if !errors.Is(err, fs.ErrNotExist) {
+ return false, err
+ }
+ return false, nil
+ }
+ if !fi.IsDir() {
+ return false, nil
+ }
+ entries, err := fs.ReadDir(loc.FS, loc.Dir)
+ if err != nil {
+ return false, err
+ }
+ for _, e := range entries {
+ if strings.HasSuffix(e.Name(), ".cue") && e.Type().IsRegular() {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// fetch downloads the given module (or its replacement)
+// and returns its location.
+//
+// The isLocal return value reports whether the replacement,
+// if any, is within the local main module.
+func (pkgs *Packages) fetch(ctx context.Context, mod module.Version) (loc module.SourceLoc, isLocal bool, err error) {
+ if mod == pkgs.mainModuleVersion {
+ return pkgs.mainModuleLoc, true, nil
+ }
+
+ loc, err = pkgs.registry.Fetch(ctx, mod)
+ return loc, false, err
+}
+
+// An AmbiguousImportError indicates an import of a package found in multiple
+// modules in the build list, or found in both the main module and its vendor
+// directory.
+type AmbiguousImportError struct {
+ ImportPath string
+ Locations [][]module.SourceLoc
+ Modules []module.Version // Either empty or 1:1 with Dirs.
+}
+
+func (e *AmbiguousImportError) Error() string {
+ locType := "modules"
+ if len(e.Modules) == 0 {
+ locType = "locations"
+ }
+
+ var buf strings.Builder
+ fmt.Fprintf(&buf, "ambiguous import: found package %s in multiple %s:", e.ImportPath, locType)
+
+ for i, loc := range e.Locations {
+ buf.WriteString("\n\t")
+ if i < len(e.Modules) {
+ m := e.Modules[i]
+ buf.WriteString(m.Path())
+ if m.Version() != "" {
+ fmt.Fprintf(&buf, " %s", m.Version())
+ }
+ // TODO work out how to present source locations in error messages.
+ fmt.Fprintf(&buf, " (%s)", loc[0].Dir)
+ } else {
+ buf.WriteString(loc[0].Dir)
+ }
+ }
+
+ return buf.String()
+}
+
+// ImportMissingError is used for errors where an imported package cannot be found.
+type ImportMissingError struct {
+ Path string
+}
+
+func (e *ImportMissingError) Error() string {
+ return "cannot find module providing package " + e.Path
+}
diff --git a/vendor/cuelang.org/go/internal/mod/modpkgload/pkgload.go b/vendor/cuelang.org/go/internal/mod/modpkgload/pkgload.go
new file mode 100644
index 00000000..68f0831f
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/mod/modpkgload/pkgload.go
@@ -0,0 +1,348 @@
+package modpkgload
+
+import (
+ "context"
+ "fmt"
+ "runtime"
+ "slices"
+ "sort"
+ "strings"
+ "sync/atomic"
+
+ "cuelang.org/go/internal/mod/modimports"
+ "cuelang.org/go/internal/mod/modrequirements"
+ "cuelang.org/go/internal/par"
+ "cuelang.org/go/mod/module"
+)
+
+// Registry represents a module registry, or at least this package's view of it.
+type Registry interface {
+ // Fetch returns the location of the contents for the given module
+ // version, downloading it if necessary.
+ Fetch(ctx context.Context, m module.Version) (module.SourceLoc, error)
+}
+
+// Flags is a set of flags tracking metadata about a package.
+type Flags int8
+
+const (
+ // PkgInAll indicates that the package is in the "all" package pattern,
+ // regardless of whether we are loading the "all" package pattern.
+ //
+ // When the PkgInAll flag and PkgImportsLoaded flags are both set, the caller
+ // who set the last of those flags must propagate the PkgInAll marking to all
+ // of the imports of the marked package.
+ PkgInAll Flags = 1 << iota
+
+ // PkgIsRoot indicates that the package matches one of the root package
+ // patterns requested by the caller.
+ PkgIsRoot
+
+ // PkgFromRoot indicates that the package is in the transitive closure of
+ // imports starting at the roots. (Note that every package marked as PkgIsRoot
+ // is also trivially marked PkgFromRoot.)
+ PkgFromRoot
+
+ // PkgImportsLoaded indicates that the Imports field of a
+ // Pkg have been populated.
+ PkgImportsLoaded
+)
+
+func (f Flags) String() string {
+ var buf strings.Builder
+ set := func(f1 Flags, s string) {
+ if (f & f1) == 0 {
+ return
+ }
+ if buf.Len() > 0 {
+ buf.WriteString(",")
+ }
+ buf.WriteString(s)
+ f &^= f1
+ }
+ set(PkgInAll, "inAll")
+ set(PkgIsRoot, "isRoot")
+ set(PkgFromRoot, "fromRoot")
+ set(PkgImportsLoaded, "importsLoaded")
+ if f != 0 {
+ set(f, fmt.Sprintf("extra%x", int(f)))
+ }
+ return buf.String()
+}
+
+// has reports whether all of the flags in cond are set in f.
+func (f Flags) has(cond Flags) bool {
+ return f&cond == cond
+}
+
+type Packages struct {
+ mainModuleVersion module.Version
+ mainModuleLoc module.SourceLoc
+ pkgCache par.Cache[string, *Package]
+ pkgs []*Package
+ rootPkgs []*Package
+ work *par.Queue
+ requirements *modrequirements.Requirements
+ registry Registry
+}
+
+type Package struct {
+ // Populated at construction time:
+ path string // import path
+
+ // Populated at construction time and updated by [loader.applyPkgFlags]:
+ flags atomicLoadPkgFlags
+
+ // Populated by [loader.load].
+ mod module.Version // module providing package
+ locs []module.SourceLoc // location of source code directories
+ err error // error loading package
+ imports []*Package // packages imported by this one
+ inStd bool
+ fromExternal bool
+ altMods []module.Version // modules that could have contained the package but did not
+
+ // Populated by postprocessing in [Packages.buildStacks]:
+ stack *Package // package importing this one in minimal import stack for this pkg
+}
+
+func (pkg *Package) ImportPath() string {
+ return pkg.path
+}
+
+func (pkg *Package) FromExternalModule() bool {
+ return pkg.fromExternal
+}
+
+func (pkg *Package) Locations() []module.SourceLoc {
+ return pkg.locs
+}
+
+func (pkg *Package) Error() error {
+ return pkg.err
+}
+
+func (pkg *Package) SetError(err error) {
+ pkg.err = err
+}
+
+func (pkg *Package) HasFlags(flags Flags) bool {
+ return pkg.flags.has(flags)
+}
+
+func (pkg *Package) Imports() []*Package {
+ return pkg.imports
+}
+
+func (pkg *Package) Flags() Flags {
+ return pkg.flags.get()
+}
+
+func (pkg *Package) Mod() module.Version {
+ return pkg.mod
+}
+
+// LoadPackages loads information about all the given packages and the
+// packages they import, recursively, using modules from the given
+// requirements to determine which modules they might be obtained from,
+// and reg to download module contents.
+func LoadPackages(
+ ctx context.Context,
+ mainModulePath string,
+ mainModuleLoc module.SourceLoc,
+ rs *modrequirements.Requirements,
+ reg Registry,
+ rootPkgPaths []string,
+) *Packages {
+ pkgs := &Packages{
+ mainModuleVersion: module.MustNewVersion(mainModulePath, ""),
+ mainModuleLoc: mainModuleLoc,
+ requirements: rs,
+ registry: reg,
+ work: par.NewQueue(runtime.GOMAXPROCS(0)),
+ }
+ inRoots := map[*Package]bool{}
+ pkgs.rootPkgs = make([]*Package, 0, len(rootPkgPaths))
+ for _, p := range rootPkgPaths {
+ // TODO the original logic didn't add PkgInAll here. Not sure why,
+ // and that might be a lurking problem.
+ if root := pkgs.addPkg(ctx, p, PkgIsRoot|PkgInAll); !inRoots[root] {
+ pkgs.rootPkgs = append(pkgs.rootPkgs, root)
+ inRoots[root] = true
+ }
+ }
+ <-pkgs.work.Idle()
+ pkgs.buildStacks()
+ return pkgs
+}
+
+// buildStacks computes minimal import stacks for each package,
+// for use in error messages. When it completes, packages that
+// are part of the original root set have pkg.stack == nil,
+// and other packages have pkg.stack pointing at the next
+// package up the import stack in their minimal chain.
+// As a side effect, buildStacks also constructs ld.pkgs,
+// the list of all packages loaded.
+func (pkgs *Packages) buildStacks() {
+ for _, pkg := range pkgs.rootPkgs {
+ pkg.stack = pkg // sentinel to avoid processing in next loop
+ pkgs.pkgs = append(pkgs.pkgs, pkg)
+ }
+ for i := 0; i < len(pkgs.pkgs); i++ { // not range: appending to pkgs.pkgs in loop
+ pkg := pkgs.pkgs[i]
+ for _, next := range pkg.imports {
+ if next.stack == nil {
+ next.stack = pkg
+ pkgs.pkgs = append(pkgs.pkgs, next)
+ }
+ }
+ }
+ for _, pkg := range pkgs.rootPkgs {
+ pkg.stack = nil
+ }
+}
+
+func (pkgs *Packages) Roots() []*Package {
+ return slices.Clip(pkgs.rootPkgs)
+}
+
+func (pkgs *Packages) All() []*Package {
+ return slices.Clip(pkgs.pkgs)
+}
+
+func (pkgs *Packages) Pkg(pkgPath string) *Package {
+ pkg, _ := pkgs.pkgCache.Get(pkgPath)
+ return pkg
+}
+
+func (pkgs *Packages) addPkg(ctx context.Context, pkgPath string, flags Flags) *Package {
+ pkg := pkgs.pkgCache.Do(pkgPath, func() *Package {
+ pkg := &Package{
+ path: pkgPath,
+ }
+ pkgs.applyPkgFlags(ctx, pkg, flags)
+
+ pkgs.work.Add(func() { pkgs.load(ctx, pkg) })
+ return pkg
+ })
+
+ // Ensure the flags apply even if the package already existed.
+ pkgs.applyPkgFlags(ctx, pkg, flags)
+ return pkg
+}
+
+func (pkgs *Packages) load(ctx context.Context, pkg *Package) {
+ if IsStdlibPackage(pkg.path) {
+ pkg.inStd = true
+ return
+ }
+ pkg.fromExternal = pkg.mod != pkgs.mainModuleVersion
+ pkg.mod, pkg.locs, pkg.altMods, pkg.err = pkgs.importFromModules(ctx, pkg.path)
+ if pkg.err != nil {
+ return
+ }
+ if pkgs.mainModuleVersion.Path() == pkg.mod.Path() {
+ pkgs.applyPkgFlags(ctx, pkg, PkgInAll)
+ }
+ pkgQual := module.ParseImportPath(pkg.path).Qualifier
+ importsMap := make(map[string]bool)
+ for _, loc := range pkg.locs {
+ imports, err := modimports.AllImports(modimports.PackageFiles(loc.FS, loc.Dir, pkgQual))
+ if err != nil {
+ pkg.err = fmt.Errorf("cannot get imports: %v", err)
+ return
+ }
+ for _, imp := range imports {
+ importsMap[imp] = true
+ }
+ }
+ imports := make([]string, 0, len(importsMap))
+ for imp := range importsMap {
+ imports = append(imports, imp)
+ }
+ sort.Strings(imports) // Make the algorithm deterministic for tests.
+
+ pkg.imports = make([]*Package, 0, len(imports))
+ var importFlags Flags
+ if pkg.flags.has(PkgInAll) {
+ importFlags = PkgInAll
+ }
+ for _, path := range imports {
+ pkg.imports = append(pkg.imports, pkgs.addPkg(ctx, path, importFlags))
+ }
+ pkgs.applyPkgFlags(ctx, pkg, PkgImportsLoaded)
+}
+
+// applyPkgFlags updates pkg.flags to set the given flags and propagate the
+// (transitive) effects of those flags, possibly loading or enqueueing further
+// packages as a result.
+func (pkgs *Packages) applyPkgFlags(ctx context.Context, pkg *Package, flags Flags) {
+ if flags == 0 {
+ return
+ }
+
+ if flags.has(PkgInAll) {
+ // This package matches a root pattern by virtue of being in "all".
+ flags |= PkgIsRoot
+ }
+ if flags.has(PkgIsRoot) {
+ flags |= PkgFromRoot
+ }
+
+ old := pkg.flags.update(flags)
+ new := old | flags
+ if new == old || !new.has(PkgImportsLoaded) {
+ // We either didn't change the state of pkg, or we don't know anything about
+ // its dependencies yet. Either way, we can't usefully load its test or
+ // update its dependencies.
+ return
+ }
+
+ if new.has(PkgInAll) && !old.has(PkgInAll|PkgImportsLoaded) {
+ // We have just marked pkg with pkgInAll, or we have just loaded its
+ // imports, or both. Now is the time to propagate pkgInAll to the imports.
+ for _, dep := range pkg.imports {
+ pkgs.applyPkgFlags(ctx, dep, PkgInAll)
+ }
+ }
+
+ if new.has(PkgFromRoot) && !old.has(PkgFromRoot|PkgImportsLoaded) {
+ for _, dep := range pkg.imports {
+ pkgs.applyPkgFlags(ctx, dep, PkgFromRoot)
+ }
+ }
+}
+
+// An atomicLoadPkgFlags stores a loadPkgFlags for which individual flags can be
+// added atomically.
+type atomicLoadPkgFlags struct {
+ bits atomic.Int32
+}
+
+// update sets the given flags in af (in addition to any flags already set).
+//
+// update returns the previous flag state so that the caller may determine which
+// flags were newly-set.
+func (af *atomicLoadPkgFlags) update(flags Flags) (old Flags) {
+ for {
+ old := af.bits.Load()
+ new := old | int32(flags)
+ if new == old || af.bits.CompareAndSwap(old, new) {
+ return Flags(old)
+ }
+ }
+}
+
+func (af *atomicLoadPkgFlags) get() Flags {
+ return Flags(af.bits.Load())
+}
+
+// has reports whether all of the flags in cond are set in af.
+func (af *atomicLoadPkgFlags) has(cond Flags) bool {
+ return Flags(af.bits.Load())&cond == cond
+}
+
+func IsStdlibPackage(pkgPath string) bool {
+ firstElem, _, _ := strings.Cut(pkgPath, "/")
+ return !strings.Contains(firstElem, ".")
+}
diff --git a/vendor/cuelang.org/go/internal/mod/modrequirements/requirements.go b/vendor/cuelang.org/go/internal/mod/modrequirements/requirements.go
new file mode 100644
index 00000000..c62347da
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/mod/modrequirements/requirements.go
@@ -0,0 +1,420 @@
+package modrequirements
+
+import (
+ "context"
+ "fmt"
+ "runtime"
+ "slices"
+ "sync"
+ "sync/atomic"
+
+ "cuelang.org/go/internal/mod/mvs"
+ "cuelang.org/go/internal/mod/semver"
+ "cuelang.org/go/internal/par"
+ "cuelang.org/go/mod/module"
+)
+
+type majorVersionDefault struct {
+ version string
+ explicitDefault bool
+ ambiguousDefault bool
+}
+
+// Requirements holds a set of module requirements. It does not
+// initially load the full module graph, as that can be expensive.
+// Instead the [Registry.Graph] method can be used to lazily construct
+// that.
+type Requirements struct {
+ registry Registry
+ mainModuleVersion module.Version
+
+ // rootModules is the set of root modules of the graph, sorted and capped to
+ // length. It may contain duplicates, and may contain multiple versions for a
+ // given module path. The root modules are the main module's direct requirements.
+ rootModules []module.Version
+ maxRootVersion map[string]string
+
+ // origDefaultMajorVersions holds the original passed to New.
+ origDefaultMajorVersions map[string]string
+
+ // defaultMajorVersions is derived from the above information,
+ // also holding modules that have a default due to being unique
+ // in the roots.
+ defaultMajorVersions map[string]majorVersionDefault
+
+ graphOnce sync.Once // guards writes to (but not reads from) graph
+ graph atomic.Pointer[cachedGraph]
+}
+
+// Registry holds the contents of a registry. It's expected that this will
+// cache any results that it returns.
+type Registry interface {
+ Requirements(ctx context.Context, m module.Version) ([]module.Version, error)
+}
+
+// A cachedGraph is a non-nil *ModuleGraph, together with any error discovered
+// while loading that graph.
+type cachedGraph struct {
+ mg *ModuleGraph
+ err error // If err is non-nil, mg may be incomplete (but must still be non-nil).
+}
+
+// NewRequirements returns a new requirement set with the given root modules.
+// The dependencies of the roots will be loaded lazily from the given
+// Registry value at the first call to the Graph method.
+//
+// The rootModules slice must be sorted according to [module.Sort].
+//
+// The defaultMajorVersions slice holds the default major version for (major-version-less)
+// mdule paths, if any have been specified. For example {"foo.com/bar": "v0"} specifies
+// that the default major version for the module `foo.com/bar` is `v0`.
+//
+// The caller must not modify rootModules or defaultMajorVersions after passing
+// them to NewRequirements.
+func NewRequirements(mainModulePath string, reg Registry, rootModules []module.Version, defaultMajorVersions map[string]string) *Requirements {
+ mainModuleVersion := module.MustNewVersion(mainModulePath, "")
+ // TODO add direct, so we can tell which modules are directly used by the
+ // main module.
+ for i, v := range rootModules {
+ if v.Path() == mainModulePath {
+ panic(fmt.Sprintf("NewRequirements called with untrimmed build list: rootModules[%v] is a main module", i))
+ }
+ if !v.IsValid() {
+ panic("NewRequirements with invalid zero version")
+ }
+ }
+ rs := &Requirements{
+ registry: reg,
+ mainModuleVersion: mainModuleVersion,
+ rootModules: rootModules,
+ maxRootVersion: make(map[string]string, len(rootModules)),
+ }
+ for i, m := range rootModules {
+ if i > 0 {
+ prev := rootModules[i-1]
+ if prev.Path() > m.Path() || (prev.Path() == m.Path() && semver.Compare(prev.Version(), m.Version()) > 0) {
+ panic(fmt.Sprintf("NewRequirements called with unsorted roots: %v", rootModules))
+ }
+ }
+ if v, ok := rs.maxRootVersion[m.Path()]; !ok || semver.Compare(v, m.Version()) < 0 {
+ rs.maxRootVersion[m.Path()] = m.Version()
+ }
+ }
+ rs.initDefaultMajorVersions(defaultMajorVersions)
+ return rs
+}
+
+// WithDefaultMajorVersions returns rs but with the given default major versions.
+// The caller should not modify the map after calling this method.
+func (rs *Requirements) WithDefaultMajorVersions(defaults map[string]string) *Requirements {
+ rs1 := &Requirements{
+ registry: rs.registry,
+ mainModuleVersion: rs.mainModuleVersion,
+ rootModules: rs.rootModules,
+ maxRootVersion: rs.maxRootVersion,
+ }
+ // Initialize graph and graphOnce in rs1 to mimic their state in rs.
+ // We can't copy the sync.Once, so if it's already triggered, we'll
+ // run the Once with a no-op function to get the same effect.
+ rs1.graph.Store(rs.graph.Load())
+ if rs1.GraphIsLoaded() {
+ rs1.graphOnce.Do(func() {})
+ }
+ rs1.initDefaultMajorVersions(defaults)
+ return rs1
+}
+
+func (rs *Requirements) initDefaultMajorVersions(defaultMajorVersions map[string]string) {
+ rs.origDefaultMajorVersions = defaultMajorVersions
+ rs.defaultMajorVersions = make(map[string]majorVersionDefault)
+ for mpath, v := range defaultMajorVersions {
+ if _, _, ok := module.SplitPathVersion(mpath); ok {
+ panic(fmt.Sprintf("NewRequirements called with major version in defaultMajorVersions %q", mpath))
+ }
+ if semver.Major(v) != v {
+ panic(fmt.Sprintf("NewRequirements called with invalid major version %q for module %q", v, mpath))
+ }
+ rs.defaultMajorVersions[mpath] = majorVersionDefault{
+ version: v,
+ explicitDefault: true,
+ }
+ }
+ // Add defaults for all modules that have exactly one major version
+ // and no existing default.
+ for _, m := range rs.rootModules {
+ if m.IsLocal() {
+ continue
+ }
+ mpath := m.BasePath()
+ d, ok := rs.defaultMajorVersions[mpath]
+ if !ok {
+ rs.defaultMajorVersions[mpath] = majorVersionDefault{
+ version: semver.Major(m.Version()),
+ }
+ continue
+ }
+ if d.explicitDefault {
+ continue
+ }
+ d.ambiguousDefault = true
+ rs.defaultMajorVersions[mpath] = d
+ }
+}
+
+// RootSelected returns the version of the root dependency with the given module
+// path, or the zero module.Version and ok=false if the module is not a root
+// dependency.
+func (rs *Requirements) RootSelected(mpath string) (version string, ok bool) {
+ if mpath == rs.mainModuleVersion.Path() {
+ return "", true
+ }
+ if v, ok := rs.maxRootVersion[mpath]; ok {
+ return v, true
+ }
+ return "", false
+}
+
+// DefaultMajorVersions returns the defaults that the requirements was
+// created with. The returned map should not be modified.
+func (rs *Requirements) DefaultMajorVersions() map[string]string {
+ return rs.origDefaultMajorVersions
+}
+
+type MajorVersionDefaultStatus byte
+
+const (
+ ExplicitDefault MajorVersionDefaultStatus = iota
+ NonExplicitDefault
+ NoDefault
+ AmbiguousDefault
+)
+
+// DefaultMajorVersion returns the default major version for the given
+// module path (which should not itself contain a major version).
+//
+// It also returns information about the default.
+func (rs *Requirements) DefaultMajorVersion(mpath string) (string, MajorVersionDefaultStatus) {
+ d, ok := rs.defaultMajorVersions[mpath]
+ switch {
+ case !ok:
+ return "", NoDefault
+ case d.ambiguousDefault:
+ return "", AmbiguousDefault
+ case d.explicitDefault:
+ return d.version, ExplicitDefault
+ default:
+ return d.version, NonExplicitDefault
+ }
+}
+
+// rootModules returns the set of root modules of the graph, sorted and capped to
+// length. It may contain duplicates, and may contain multiple versions for a
+// given module path.
+func (rs *Requirements) RootModules() []module.Version {
+ return slices.Clip(rs.rootModules)
+}
+
+// Graph returns the graph of module requirements loaded from the current
+// root modules (as reported by RootModules).
+//
+// Graph always makes a best effort to load the requirement graph despite any
+// errors, and always returns a non-nil *ModuleGraph.
+//
+// If the requirements of any relevant module fail to load, Graph also
+// returns a non-nil error of type *mvs.BuildListError.
+func (rs *Requirements) Graph(ctx context.Context) (*ModuleGraph, error) {
+ rs.graphOnce.Do(func() {
+ mg, mgErr := rs.readModGraph(ctx)
+ rs.graph.Store(&cachedGraph{mg, mgErr})
+ })
+ cached := rs.graph.Load()
+ return cached.mg, cached.err
+}
+
+// GraphIsLoaded reports whether Graph has been called previously.
+func (rs *Requirements) GraphIsLoaded() bool {
+ return rs.graph.Load() != nil
+}
+
+// A ModuleGraph represents the complete graph of module dependencies
+// of a main module.
+//
+// If the main module supports module graph pruning, the graph does not include
+// transitive dependencies of non-root (implicit) dependencies.
+type ModuleGraph struct {
+ g *mvs.Graph[module.Version]
+
+ buildListOnce sync.Once
+ buildList []module.Version
+}
+
+// cueModSummary returns a summary of the cue.mod/module.cue file for module m,
+// taking into account any replacements for m, exclusions of its dependencies,
+// and/or vendoring.
+//
+// m must be a version in the module graph, reachable from the Target module.
+// cueModSummary must not be called for the Target module
+// itself, as its requirements may change.
+//
+// The caller must not modify the returned summary.
+func (rs *Requirements) cueModSummary(ctx context.Context, m module.Version) (*modFileSummary, error) {
+ require, err := rs.registry.Requirements(ctx, m)
+ if err != nil {
+ return nil, err
+ }
+ // TODO account for replacements, exclusions, etc.
+ return &modFileSummary{
+ module: m,
+ require: require,
+ }, nil
+}
+
+type modFileSummary struct {
+ module module.Version
+ require []module.Version
+}
+
+// readModGraph reads and returns the module dependency graph starting at the
+// given roots.
+//
+// readModGraph does not attempt to diagnose or update inconsistent roots.
+func (rs *Requirements) readModGraph(ctx context.Context) (*ModuleGraph, error) {
+ var (
+ mu sync.Mutex // guards mg.g and hasError during loading
+ hasError bool
+ mg = &ModuleGraph{
+ g: mvs.NewGraph[module.Version](module.Versions{}, cmpVersion, []module.Version{rs.mainModuleVersion}),
+ }
+ )
+
+ mg.g.Require(rs.mainModuleVersion, rs.rootModules)
+
+ var (
+ loadQueue = par.NewQueue(runtime.GOMAXPROCS(0))
+ loading sync.Map // module.Version → nil; the set of modules that have been or are being loaded
+ loadCache par.ErrCache[module.Version, *modFileSummary]
+ )
+
+ // loadOne synchronously loads the explicit requirements for module m.
+ // It does not load the transitive requirements of m.
+ loadOne := func(m module.Version) (*modFileSummary, error) {
+ return loadCache.Do(m, func() (*modFileSummary, error) {
+ summary, err := rs.cueModSummary(ctx, m)
+
+ mu.Lock()
+ if err == nil {
+ mg.g.Require(m, summary.require)
+ } else {
+ hasError = true
+ }
+ mu.Unlock()
+
+ return summary, err
+ })
+ }
+
+ for _, m := range rs.rootModules {
+ m := m
+ if !m.IsValid() {
+ panic("root module version is invalid")
+ }
+ if m.IsLocal() || m.Version() == "none" {
+ continue
+ }
+
+ if _, dup := loading.LoadOrStore(m, nil); dup {
+ // m has already been enqueued for loading. Since unpruned loading may
+ // follow cycles in the requirement graph, we need to return early
+ // to avoid making the load queue infinitely long.
+ continue
+ }
+
+ loadQueue.Add(func() {
+ loadOne(m)
+ // If there's an error, findError will report it later.
+ })
+ }
+ <-loadQueue.Idle()
+
+ if hasError {
+ return mg, mg.findError(&loadCache)
+ }
+ return mg, nil
+}
+
+// RequiredBy returns the dependencies required by module m in the graph,
+// or ok=false if module m's dependencies are pruned out.
+//
+// The caller must not modify the returned slice, but may safely append to it
+// and may rely on it not to be modified.
+func (mg *ModuleGraph) RequiredBy(m module.Version) (reqs []module.Version, ok bool) {
+ return mg.g.RequiredBy(m)
+}
+
+// Selected returns the selected version of the module with the given path.
+//
+// If no version is selected, Selected returns version "none".
+func (mg *ModuleGraph) Selected(path string) (version string) {
+ return mg.g.Selected(path)
+}
+
+// WalkBreadthFirst invokes f once, in breadth-first order, for each module
+// version other than "none" that appears in the graph, regardless of whether
+// that version is selected.
+func (mg *ModuleGraph) WalkBreadthFirst(f func(m module.Version)) {
+ mg.g.WalkBreadthFirst(f)
+}
+
+// BuildList returns the selected versions of all modules present in the graph,
+// beginning with the main modules.
+//
+// The order of the remaining elements in the list is deterministic
+// but arbitrary.
+//
+// The caller must not modify the returned list, but may safely append to it
+// and may rely on it not to be modified.
+func (mg *ModuleGraph) BuildList() []module.Version {
+ mg.buildListOnce.Do(func() {
+ mg.buildList = slices.Clip(mg.g.BuildList())
+ })
+ return mg.buildList
+}
+
+func (mg *ModuleGraph) findError(loadCache *par.ErrCache[module.Version, *modFileSummary]) error {
+ errStack := mg.g.FindPath(func(m module.Version) bool {
+ _, err := loadCache.Get(m)
+ return err != nil && err != par.ErrCacheEntryNotFound
+ })
+ if len(errStack) > 0 {
+ // TODO it seems that this stack can never be more than one
+ // element long, becasuse readModGraph never goes more
+ // than one depth level below the root requirements.
+ // Given that the top of the stack will always be the main
+ // module and that BuildListError elides the first element
+ // in this case, is it really worth using FindPath?
+ _, err := loadCache.Get(errStack[len(errStack)-1])
+ var noUpgrade func(from, to module.Version) bool
+ return mvs.NewBuildListError[module.Version](err, errStack, module.Versions{}, noUpgrade)
+ }
+
+ return nil
+}
+
+// cmpVersion implements the comparison for versions in the module loader.
+//
+// It is consistent with semver.Compare except that as a special case,
+// the version "" is considered higher than all other versions.
+// The main module (also known as the target) has no version and must be chosen
+// over other versions of the same module in the module dependency graph.
+func cmpVersion(v1, v2 string) int {
+ if v2 == "" {
+ if v1 == "" {
+ return 0
+ }
+ return -1
+ }
+ if v1 == "" {
+ return 1
+ }
+ return semver.Compare(v1, v2)
+}
diff --git a/vendor/cuelang.org/go/internal/mod/modresolve/resolve.go b/vendor/cuelang.org/go/internal/mod/modresolve/resolve.go
new file mode 100644
index 00000000..bf37fbcc
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/mod/modresolve/resolve.go
@@ -0,0 +1,522 @@
+// Copyright 2024 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package modresolve
+
+import (
+ "crypto/sha256"
+ _ "embed"
+ "fmt"
+ "net"
+ "net/netip"
+ "path"
+ "sort"
+ "strings"
+ "sync"
+
+ "cuelabs.dev/go/oci/ociregistry/ociref"
+
+ "cuelang.org/go/cue"
+ "cuelang.org/go/cue/cuecontext"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/mod/module"
+)
+
+// pathEncoding represents one of the possible types of
+// encoding for module paths within a registry.
+// It reflects the #registry.pathEncoding disjunction
+// in schema.cue.
+// TODO it would be nice if this could be auto-generated
+// from the schema.
+type pathEncoding string
+
+const (
+ encPath pathEncoding = "path"
+ encHashAsRepo pathEncoding = "hashAsRepo"
+ encHashAsTag pathEncoding = "hashAsTag"
+)
+
+// LocationResolver resolves module paths to a location
+// consisting of a host name of a registry and where
+// in that registry the module is to be found.
+//
+// Note: The implementation in this package operates entirely lexically,
+// which is why [Location] contains only a host name and not an actual
+// [ociregistry.Interface] implementation.
+type LocationResolver interface {
+ // ResolveToLocation resolves a base module path (without a version
+ // suffix, a.k.a. OCI repository name) and optional version to
+ // the location for that path. It reports whether it can find
+ // appropriate location for the module.
+ //
+ // If the version is empty, the Tag in the returned Location
+ // will hold the prefix that all versions of the module in its
+ // repository have. That prefix will be followed by the version
+ // itself.
+ ResolveToLocation(path string, vers string) (Location, bool)
+
+ // AllHosts returns all the registry hosts that the resolver
+ // might resolve to, ordered lexically by hostname.
+ AllHosts() []Host
+}
+
+// Host represents a registry host name.
+type Host struct {
+ // Name holds the IP host name of the registry.
+ // If it's an IP v6 address, it will be surrounded with
+ // square brackets ([, ]).
+ Name string
+ // Insecure holds whether this host should be connected
+ // to insecurely (with an HTTP rather than HTTP connection).
+ Insecure bool
+}
+
+// Location represents the location for a given module version or versions.
+type Location struct {
+ // Host holds the host or host:port of the registry.
+ Host string
+
+ // Insecure holds whether an insecure connection
+ // should be used when connecting to the registry.
+ Insecure bool
+
+ // Repository holds the repository to store the module in.
+ Repository string
+
+ // Tag holds the tag for the module version.
+ // If an empty version was passed to
+ // Resolve, it holds the prefix shared by all version
+ // tags for the module.
+ Tag string
+}
+
+// config mirrors the #File definition in schema.cue.
+// TODO it would be nice to be able to generate this
+// type directly from the schema.
+type config struct {
+ ModuleRegistries map[string]*registryConfig `json:"moduleRegistries,omitempty"`
+ DefaultRegistry *registryConfig `json:"defaultRegistry,omitempty"`
+}
+
+func (cfg *config) init() error {
+ for prefix, reg := range cfg.ModuleRegistries {
+ if err := module.CheckPathWithoutVersion(prefix); err != nil {
+ return fmt.Errorf("invalid module path %q: %v", prefix, err)
+ }
+ if err := reg.init(); err != nil {
+ return fmt.Errorf("invalid registry configuration in %q: %v", prefix, err)
+ }
+ }
+ if cfg.DefaultRegistry != nil {
+ if err := cfg.DefaultRegistry.init(); err != nil {
+ return fmt.Errorf("invalid default registry configuration: %v", err)
+ }
+ }
+ return nil
+}
+
+type registryConfig struct {
+ Registry string `json:"registry,omitempty"`
+ PathEncoding pathEncoding `json:"pathEncoding,omitempty"`
+ PrefixForTags string `json:"prefixForTags,omitempty"`
+ StripPrefix bool `json:"stripPrefix,omitempty"`
+
+ // The following fields are filled in from Registry after parsing.
+ host string
+ repository string
+ insecure bool
+}
+
+func (r *registryConfig) init() error {
+ r1, err := parseRegistry(r.Registry)
+ if err != nil {
+ return err
+ }
+ r.host, r.repository, r.insecure = r1.host, r1.repository, r1.insecure
+
+ if r.PrefixForTags != "" {
+ if !ociref.IsValidTag(r.PrefixForTags) {
+ return fmt.Errorf("invalid tag prefix %q", r.PrefixForTags)
+ }
+ }
+ if r.PathEncoding == "" {
+ // Shouldn't happen because default should apply.
+ return fmt.Errorf("empty pathEncoding")
+ }
+ if r.StripPrefix {
+ if r.PathEncoding != encPath {
+ // TODO we could relax this to allow storing of naked tags
+ // when the module path matches exactly and hash tags
+ // otherwise.
+ return fmt.Errorf("cannot strip prefix unless using path encoding")
+ }
+ if r.repository == "" {
+ return fmt.Errorf("use of stripPrefix requires a non-empty repository within the registry")
+ }
+ }
+ return nil
+}
+
+var (
+ configSchemaOnce sync.Once // guards the creation of _configSchema
+ // TODO remove this mutex when https://cuelang.org/issue/2733 is fixed.
+ configSchemaMutex sync.Mutex // guards any use of _configSchema
+ _configSchema cue.Value
+)
+
+//go:embed schema.cue
+var configSchemaData []byte
+
+// RegistryConfigSchema returns the CUE schema
+// for the configuration parsed by [ParseConfig].
+func RegistryConfigSchema() string {
+ // Cut out the copyright header and the header that's
+ // not pure schema.
+ schema := string(configSchemaData)
+ i := strings.Index(schema, "\n// #file ")
+ if i == -1 {
+ panic("no file definition found in schema")
+ }
+ i++
+ return schema[i:]
+}
+
+// ParseConfig parses the registry configuration with the given contents and file name.
+// If there is no default registry, then the single registry specified in catchAllDefault
+// will be used as a default.
+func ParseConfig(configFile []byte, filename string, catchAllDefault string) (LocationResolver, error) {
+ configSchemaOnce.Do(func() {
+ ctx := cuecontext.New()
+ schemav := ctx.CompileBytes(configSchemaData, cue.Filename("cuelang.org/go/internal/mod/modresolve/schema.cue"))
+ schemav = schemav.LookupPath(cue.MakePath(cue.Def("#file")))
+ if err := schemav.Validate(); err != nil {
+ panic(fmt.Errorf("internal error: invalid CUE registry config schema: %v", errors.Details(err, nil)))
+ }
+ _configSchema = schemav
+ })
+ configSchemaMutex.Lock()
+ defer configSchemaMutex.Unlock()
+
+ v := _configSchema.Context().CompileBytes(configFile, cue.Filename(filename))
+ if err := v.Err(); err != nil {
+ return nil, errors.Wrapf(err, token.NoPos, "invalid registry configuration file")
+ }
+ v = v.Unify(_configSchema)
+ if err := v.Err(); err != nil {
+ return nil, errors.Wrapf(err, token.NoPos, "invalid configuration file")
+ }
+ var cfg config
+ if err := v.Decode(&cfg); err != nil {
+ return nil, errors.Wrapf(err, token.NoPos, "internal error: cannot decode into registry config struct")
+ }
+ if err := cfg.init(); err != nil {
+ return nil, err
+ }
+ if cfg.DefaultRegistry == nil {
+ if catchAllDefault == "" {
+ return nil, fmt.Errorf("no default catch-all registry provided")
+ }
+ // TODO is it too limiting to have the catch-all registry specified as a simple string?
+ reg, err := parseRegistry(catchAllDefault)
+ if err != nil {
+ return nil, fmt.Errorf("invalid catch-all registry %q: %v", catchAllDefault, err)
+ }
+ cfg.DefaultRegistry = reg
+ }
+ r := &resolver{
+ cfg: cfg,
+ }
+ if err := r.initHosts(); err != nil {
+ return nil, err
+ }
+ return r, nil
+}
+
+// ParseCUERegistry parses a registry routing specification that
+// maps module prefixes to the registry that should be used to
+// fetch that module.
+//
+// The specification consists of an order-independent, comma-separated list.
+//
+// Each element either maps a module prefix to the registry that will be used
+// for all modules that have that prefix (prefix=registry), or a catch-all registry to be used
+// for modules that do not match any prefix (registry).
+//
+// For example:
+//
+// myorg.com=myregistry.com/m,catchallregistry.example.org
+//
+// Any module with a matching prefix will be routed to the given registry.
+// A prefix only matches whole path elements.
+// In the above example, module myorg.com/foo/bar@v0 will be looked up
+// in myregistry.com in the repository m/myorg.com/foo/bar,
+// whereas github.com/x/y will be looked up in catchallregistry.example.com.
+//
+// The registry part is syntactically similar to a [docker reference]
+// except that the repository is optional and no tag or digest is allowed.
+// Additionally, a +secure or +insecure suffix may be used to indicate
+// whether to use a secure or insecure connection. Without that,
+// localhost, 127.0.0.1 and [::1] will default to insecure, and anything
+// else to secure.
+//
+// If s does not declare a catch-all registry location, catchAllDefault is
+// used. It is an error if s fails to declares a catch-all registry location
+// and no catchAllDefault is provided.
+//
+// [docker reference]: https://pkg.go.dev/github.com/distribution/reference
+func ParseCUERegistry(s string, catchAllDefault string) (LocationResolver, error) {
+ if s == "" && catchAllDefault == "" {
+ return nil, fmt.Errorf("no catch-all registry or default")
+ }
+ if s == "" {
+ s = catchAllDefault
+ }
+ cfg := config{
+ ModuleRegistries: make(map[string]*registryConfig),
+ }
+ parts := strings.Split(s, ",")
+ for _, part := range parts {
+ key, val, ok := strings.Cut(part, "=")
+ if !ok {
+ if part == "" {
+ // TODO or just ignore it?
+ return nil, fmt.Errorf("empty registry part")
+ }
+ if _, ok := cfg.ModuleRegistries[""]; ok {
+ return nil, fmt.Errorf("duplicate catch-all registry")
+ }
+ key, val = "", part
+ } else {
+ if key == "" {
+ return nil, fmt.Errorf("empty module prefix")
+ }
+ if val == "" {
+ return nil, fmt.Errorf("empty registry reference")
+ }
+ if err := module.CheckPathWithoutVersion(key); err != nil {
+ return nil, fmt.Errorf("invalid module path %q: %v", key, err)
+ }
+ if _, ok := cfg.ModuleRegistries[key]; ok {
+ return nil, fmt.Errorf("duplicate module prefix %q", key)
+ }
+ }
+ reg, err := parseRegistry(val)
+ if err != nil {
+ return nil, fmt.Errorf("invalid registry %q: %v", val, err)
+ }
+ cfg.ModuleRegistries[key] = reg
+ }
+ if _, ok := cfg.ModuleRegistries[""]; !ok {
+ if catchAllDefault == "" {
+ return nil, fmt.Errorf("no default catch-all registry provided")
+ }
+ reg, err := parseRegistry(catchAllDefault)
+ if err != nil {
+ return nil, fmt.Errorf("invalid catch-all registry %q: %v", catchAllDefault, err)
+ }
+ cfg.ModuleRegistries[""] = reg
+ }
+ cfg.DefaultRegistry = cfg.ModuleRegistries[""]
+ delete(cfg.ModuleRegistries, "")
+
+ r := &resolver{
+ cfg: cfg,
+ }
+ if err := r.initHosts(); err != nil {
+ return nil, err
+ }
+ return r, nil
+}
+
+type resolver struct {
+ allHosts []Host
+ cfg config
+}
+
+func (r *resolver) initHosts() error {
+ hosts := make(map[string]bool)
+ addHost := func(reg *registryConfig) error {
+ if insecure, ok := hosts[reg.host]; ok {
+ if insecure != reg.insecure {
+ return fmt.Errorf("registry host %q is specified both as secure and insecure", reg.host)
+ }
+ } else {
+ hosts[reg.host] = reg.insecure
+ }
+ return nil
+ }
+ for _, reg := range r.cfg.ModuleRegistries {
+ if err := addHost(reg); err != nil {
+ return err
+ }
+ }
+
+ if reg := r.cfg.DefaultRegistry; reg != nil {
+ if err := addHost(reg); err != nil {
+ return err
+ }
+ }
+ allHosts := make([]Host, 0, len(hosts))
+ for host, insecure := range hosts {
+ allHosts = append(allHosts, Host{
+ Name: host,
+ Insecure: insecure,
+ })
+ }
+ sort.Slice(allHosts, func(i, j int) bool {
+ return allHosts[i].Name < allHosts[j].Name
+ })
+ r.allHosts = allHosts
+ return nil
+}
+
+// AllHosts implements Resolver.AllHosts.
+func (r *resolver) AllHosts() []Host {
+ return r.allHosts
+}
+
+func (r *resolver) ResolveToLocation(mpath, vers string) (Location, bool) {
+ if mpath == "" {
+ return Location{}, false
+ }
+ bestMatch := ""
+ // Note: there's always a wildcard match.
+ bestMatchReg := r.cfg.DefaultRegistry
+ for pat, reg := range r.cfg.ModuleRegistries {
+ if pat == mpath {
+ bestMatch = pat
+ bestMatchReg = reg
+ break
+ }
+ if !strings.HasPrefix(mpath, pat) {
+ continue
+ }
+ if len(bestMatch) > len(pat) {
+ // We've already found a more specific match.
+ continue
+ }
+ if mpath[len(pat)] != '/' {
+ // The path doesn't have a separator at the end of
+ // the prefix, which means that it doesn't match.
+ // For example, foo.com/bar does not match foo.com/ba.
+ continue
+ }
+ // It's a possible match but not necessarily the longest one.
+ bestMatch, bestMatchReg = pat, reg
+ }
+ if bestMatchReg == nil {
+ return Location{}, false
+ }
+ reg := bestMatchReg
+ loc := Location{
+ Host: reg.host,
+ Insecure: reg.insecure,
+ Tag: vers,
+ }
+ switch reg.PathEncoding {
+ case encPath:
+ if reg.StripPrefix {
+ mpath = strings.TrimPrefix(mpath, bestMatch)
+ mpath = strings.TrimPrefix(mpath, "/")
+ }
+ loc.Repository = path.Join(reg.repository, mpath)
+ case encHashAsRepo:
+ loc.Repository = fmt.Sprintf("%s/%x", reg.repository, sha256.Sum256([]byte(mpath)))
+ case encHashAsTag:
+ loc.Repository = reg.repository
+ default:
+ panic("unreachable")
+ }
+ if reg.PathEncoding == encHashAsTag {
+ loc.Tag = fmt.Sprintf("%s%x-%s", reg.PrefixForTags, sha256.Sum256([]byte(mpath)), vers)
+ } else {
+ loc.Tag = reg.PrefixForTags + vers
+ }
+ return loc, true
+}
+
+func parseRegistry(env0 string) (*registryConfig, error) {
+ env := env0
+ var suffix string
+ if i := strings.LastIndex(env, "+"); i > 0 {
+ suffix = env[i:]
+ env = env[:i]
+ }
+ var r ociref.Reference
+ if !strings.Contains(env, "/") {
+ // OCI references don't allow a host name on its own without a repo,
+ // but we do.
+ r.Host = env
+ if !ociref.IsValidHost(r.Host) {
+ return nil, fmt.Errorf("invalid host name %q in registry", r.Host)
+ }
+ } else {
+ var err error
+ r, err = ociref.Parse(env)
+ if err != nil {
+ return nil, err
+ }
+ if r.Tag != "" || r.Digest != "" {
+ return nil, fmt.Errorf("cannot have an associated tag or digest")
+ }
+ }
+ if suffix == "" {
+ if isInsecureHost(r.Host) {
+ suffix = "+insecure"
+ } else {
+ suffix = "+secure"
+ }
+ }
+ insecure := false
+ switch suffix {
+ case "+insecure":
+ insecure = true
+ case "+secure":
+ default:
+ return nil, fmt.Errorf("unknown suffix (%q), need +insecure, +secure or no suffix)", suffix)
+ }
+ return ®istryConfig{
+ Registry: env0,
+ PathEncoding: encPath,
+ host: r.Host,
+ repository: r.Repository,
+ insecure: insecure,
+ }, nil
+}
+
+var (
+ ipV4Localhost = netip.MustParseAddr("127.0.0.1")
+ ipV6Localhost = netip.MustParseAddr("::1")
+)
+
+func isInsecureHost(hostPort string) bool {
+ host, _, err := net.SplitHostPort(hostPort)
+ if err != nil {
+ host = hostPort
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
+ host = host[1 : len(host)-1]
+ }
+ }
+ if host == "localhost" {
+ return true
+ }
+ addr, err := netip.ParseAddr(host)
+ if err != nil {
+ return false
+ }
+ // TODO other clients have logic for RFC1918 too, amongst other
+ // things. Maybe we should do that too.
+ return addr == ipV4Localhost || addr == ipV6Localhost
+}
diff --git a/vendor/cuelang.org/go/internal/mod/modresolve/schema.cue b/vendor/cuelang.org/go/internal/mod/modresolve/schema.cue
new file mode 100644
index 00000000..a6bfdeac
--- /dev/null
+++ b/vendor/cuelang.org/go/internal/mod/modresolve/schema.cue
@@ -0,0 +1,104 @@
+// Copyright 2024 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This aspect of #registry encodes the defaults used by the resolver
+// parser. It's kept separate because it's technically bad practice to
+// define regular fields as part of a schema, and by defining it this
+// way, the pure schema can be read independently as such.
+//
+// TODO work out a nice way of doing this such that we don't have to
+// mirror the fields in #file that mention #registry
+#registry: {
+ pathEncoding: *"path" | _
+}
+
+// Note: public part of schema (included in help output) starts
+// at "// #file" below.
+
+// #file represents the registry configuration schema.
+#file: {
+ // moduleRegistries specifies a mapping from module path prefix
+ // (excluding any version suffix) to the registry to be used for
+ // all modules under that path.
+ //
+ // A prefix is considered to match if a non-zero number of
+ // initial path elements (sequences of non-slash characters) in
+ // a module path match the prefix.
+ //
+ // If there are multiple matching prefixes, the longest
+ // is chosen.
+ moduleRegistries?: [#modulePath]: #registry
+
+ // defaultRegistry specifies a fallback registry to be used if no
+ // prefix from moduleRegistry matches.
+ // If it's not present, a system default will be used.
+ defaultRegistry?: #registry
+}
+
+#registry: {
+ // registry specifies the registry host name and optionally, the
+ // repository prefix to use for all modules in the repository,
+ // and the security to use when accessing the host.
+ //
+ // It is in the form:
+ // hostname[:port][/repoPrefix][+insecure]
+ //
+ // The hostname must be specified in square brackets if it's an
+ // IPv6 address.
+ //
+ // Connections will be secure unless explicitly specified
+ // otherwise, except for localhost connections which default to
+ // insecure.
+ //
+ // See the doc comment on pathEncoding for details as to how
+ // repoPrefix is used to determine the repository to use for a
+ // specific module.
+ //
+ // Examples:
+ // "localhost:1234"
+ // "myregistry.example/my-modules+secure"
+ registry!: string
+
+ // pathEncoding specifies how module versions map to
+ // repositories within a registry.
+ // Possible values are:
+ // - "path": the repository is used as a prefix to the unencoded
+ // module path. The version of the module is used as a tag.
+ // - "hashAsPath": the hex-encoded SHA256 hash of the path is
+ // used as a suffix to the above repository value. The version
+ // of the module is used as a tag.
+ // - "hashAsTag": the repository is used as is: the hex-encoded
+ // SHA256 hash of the path followed by a hyphen and the version
+ // is used as a tag.
+ pathEncoding?: "path" | "hashAsRepo" | "hashAsTag"
+
+ // prefixForTags specifies an arbitrary prefix that's added to
+ // all tags. This can be used to disambiguate tags when there
+ // might be some possibility of confusion with tags in use for
+ // other purposes.
+ prefixForTags?: #tag
+
+ // TODO we could encode the invariant below in CUE but that
+ // would result in poor error messages. With an error builtin,
+ // that could perhaps be improved.
+
+ // stripPrefix specifies that the pattern prefix should be
+ // stripped from the module path before using as a repository
+ // path. This only applies when pathEncoding is "path".
+ stripPrefix?: bool
+}
+
+// TODO more specific schemas below
+#modulePath: string
+#tag: string
diff --git a/vendor/cuelang.org/go/internal/mod/module/module.go b/vendor/cuelang.org/go/internal/mod/module/module.go
deleted file mode 100644
index 8ffeadaf..00000000
--- a/vendor/cuelang.org/go/internal/mod/module/module.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package module defines the module.Version type along with support code.
-//
-// The module.Version type is a simple Path, Version pair:
-//
-// type Version struct {
-// Path string
-// Version string
-// }
-//
-// There are no restrictions imposed directly by use of this structure,
-// but additional checking functions, most notably Check, verify that
-// a particular path, version pair is valid.
-package module
-
-// IMPORTANT NOTE
-//
-// This file essentially defines the set of valid import paths for the cue command.
-// There are many subtle considerations, including Unicode ambiguity,
-// security, network, and file system representations.
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "cuelang.org/go/internal/mod/semver"
-)
-
-// A Version (for clients, a module.Version) is defined by a module path and version pair.
-// These are stored in their plain (unescaped) form.
-// This type is comparable.
-type Version struct {
- path string
- version string
-}
-
-// Path returns the module path part of the Version,
-// which always includes the major version suffix
-// unless a module path, like "github.com/foo/bar@v0".
-// Note that in general the path should include the major version suffix
-// even though it's implied from the version. The Canonical
-// method can be used to add the major version suffix if not present.
-// The BasePath method can be used to obtain the path without
-// the suffix.
-func (m Version) Path() string {
- return m.path
-}
-
-func (m Version) Equal(m1 Version) bool {
- return m.path == m1.path && m.version == m1.version
-}
-
-func (m Version) BasePath() string {
- basePath, _, ok := SplitPathVersion(m.path)
- if !ok {
- panic(fmt.Errorf("broken invariant: failed to split version in %q", m.path))
- }
- return basePath
-}
-
-func (m Version) Version() string {
- return m.version
-}
-
-// String returns the string form of the Version:
-// (Path@Version, or just Path if Version is empty).
-func (m Version) String() string {
- if m.version == "" {
- return m.path
- }
- return m.BasePath() + "@" + m.version
-}
-
-func MustParseVersion(s string) Version {
- v, err := ParseVersion(s)
- if err != nil {
- panic(err)
- }
- return v
-}
-
-// ParseVersion parses a $module@$version
-// string into a Version.
-// The version must be canonical (i.e. it can't be
-// just a major version).
-func ParseVersion(s string) (Version, error) {
- basePath, vers, ok := SplitPathVersion(s)
- if !ok {
- return Version{}, fmt.Errorf("invalid module path@version %q", s)
- }
- if semver.Canonical(vers) != vers {
- return Version{}, fmt.Errorf("module version in %q is not canonical", s)
- }
- return Version{basePath + "@" + semver.Major(vers), vers}, nil
-}
-
-func MustNewVersion(path string, vers string) Version {
- v, err := NewVersion(path, vers)
- if err != nil {
- panic(err)
- }
- return v
-}
-
-// NewVersion forms a Version from the given path and version.
-// The version must be canonical, empty or "none".
-// If the path doesn't have a major version suffix, one will be added
-// if the version isn't empty; if the version is empty, it's an error.
-func NewVersion(path string, vers string) (Version, error) {
- if vers != "" && vers != "none" {
- if !semver.IsValid(vers) {
- return Version{}, fmt.Errorf("version %q (of module %q) is not well formed", vers, path)
- }
- if semver.Canonical(vers) != vers {
- return Version{}, fmt.Errorf("version %q (of module %q) is not canonical", vers, path)
- }
- maj := semver.Major(vers)
- _, vmaj, ok := SplitPathVersion(path)
- if ok && maj != vmaj {
- return Version{}, fmt.Errorf("mismatched major version suffix in %q (version %v)", path, vers)
- }
- if !ok {
- fullPath := path + "@" + maj
- if _, _, ok := SplitPathVersion(fullPath); !ok {
- return Version{}, fmt.Errorf("cannot form version path from %q, version %v", path, vers)
- }
- path = fullPath
- }
- } else {
- if _, _, ok := SplitPathVersion(path); !ok {
- return Version{}, fmt.Errorf("path %q has no major version", path)
- }
- }
- if vers == "" {
- if err := CheckPath(path); err != nil {
- return Version{}, err
- }
- } else {
- if err := Check(path, vers); err != nil {
- return Version{}, err
- }
- }
- return Version{
- path: path,
- version: vers,
- }, nil
-}
-
-// Sort sorts the list by Path, breaking ties by comparing Version fields.
-// The Version fields are interpreted as semantic versions (using semver.Compare)
-// optionally followed by a tie-breaking suffix introduced by a slash character,
-// like in "v0.0.1/module.cue".
-func Sort(list []Version) {
- sort.Slice(list, func(i, j int) bool {
- mi := list[i]
- mj := list[j]
- if mi.path != mj.path {
- return mi.path < mj.path
- }
- // To help go.sum formatting, allow version/file.
- // Compare semver prefix by semver rules,
- // file by string order.
- vi := mi.version
- vj := mj.version
- var fi, fj string
- if k := strings.Index(vi, "/"); k >= 0 {
- vi, fi = vi[:k], vi[k:]
- }
- if k := strings.Index(vj, "/"); k >= 0 {
- vj, fj = vj[:k], vj[k:]
- }
- if vi != vj {
- return semver.Compare(vi, vj) < 0
- }
- return fi < fj
- })
-}
diff --git a/vendor/cuelang.org/go/internal/mod/mvs/graph.go b/vendor/cuelang.org/go/internal/mod/mvs/graph.go
index bff8bba4..6cab7c3e 100644
--- a/vendor/cuelang.org/go/internal/mod/mvs/graph.go
+++ b/vendor/cuelang.org/go/internal/mod/mvs/graph.go
@@ -6,9 +6,8 @@ package mvs
import (
"fmt"
+ "slices"
"sort"
-
- "cuelang.org/go/internal/slices"
)
// Versions is an interface that should be provided by implementations
diff --git a/vendor/cuelang.org/go/internal/mod/mvs/mvs.go b/vendor/cuelang.org/go/internal/mod/mvs/mvs.go
index 0a9cd134..ef4a3e2d 100644
--- a/vendor/cuelang.org/go/internal/mod/mvs/mvs.go
+++ b/vendor/cuelang.org/go/internal/mod/mvs/mvs.go
@@ -12,7 +12,7 @@ import (
"sort"
"sync"
- "cuelang.org/go/internal/mod/internal/par"
+ "cuelang.org/go/internal/par"
)
// A Reqs is the requirement graph on which Minimal Version Selection (MVS) operates.
diff --git a/vendor/cuelang.org/go/internal/mod/internal/par/queue.go b/vendor/cuelang.org/go/internal/par/queue.go
similarity index 100%
rename from vendor/cuelang.org/go/internal/mod/internal/par/queue.go
rename to vendor/cuelang.org/go/internal/par/queue.go
diff --git a/vendor/cuelang.org/go/internal/mod/internal/par/work.go b/vendor/cuelang.org/go/internal/par/work.go
similarity index 100%
rename from vendor/cuelang.org/go/internal/mod/internal/par/work.go
rename to vendor/cuelang.org/go/internal/par/work.go
diff --git a/vendor/cuelang.org/go/internal/pkg/types.go b/vendor/cuelang.org/go/internal/pkg/types.go
index 06871a25..e3182027 100644
--- a/vendor/cuelang.org/go/internal/pkg/types.go
+++ b/vendor/cuelang.org/go/internal/pkg/types.go
@@ -62,6 +62,12 @@ func (s *Struct) IsOpen() bool {
if !s.node.IsClosedStruct() {
return true
}
+ // Technically this is not correct, but it is in the context of where
+ // it is used.
+ if s.node.PatternConstraints != nil && len(s.node.PatternConstraints.Pairs) > 0 {
+ return true
+ }
+ // The equivalent code for the old implementation.
ot := s.node.OptionalTypes()
if ot&^adt.HasDynamic != 0 {
return true
diff --git a/vendor/cuelang.org/go/internal/slices/slices.go b/vendor/cuelang.org/go/internal/slices/slices.go
deleted file mode 100644
index a0adcf49..00000000
--- a/vendor/cuelang.org/go/internal/slices/slices.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TODO: Replace with slices package when it lands in standard library.
-
-package slices
-
-// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
-func Clip[S ~[]E, E any](s S) S {
- return s[:len(s):len(s)]
-}
diff --git a/vendor/cuelang.org/go/mod/modcache/cache.go b/vendor/cuelang.org/go/mod/modcache/cache.go
new file mode 100644
index 00000000..6accdb21
--- /dev/null
+++ b/vendor/cuelang.org/go/mod/modcache/cache.go
@@ -0,0 +1,173 @@
+// Package modcache provides a file-based cache for modules.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL.
+// ITS API MAY CHANGE AT ANY TIME.
+package modcache
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "github.com/rogpeppe/go-internal/lockedfile"
+ "github.com/rogpeppe/go-internal/robustio"
+
+ "cuelang.org/go/mod/module"
+)
+
+var errNotCached = fmt.Errorf("not in cache")
+
+// readDiskModFile reads a cached go.mod file from disk,
+// returning the name of the cache file and the result.
+// If the read fails, the caller can use
+// writeDiskModFile(file, data) to write a new cache entry.
+func (c *cache) readDiskModFile(ctx context.Context, mv module.Version) (file string, data []byte, err error) {
+ return c.readDiskCache(ctx, mv, "mod")
+}
+
+// writeDiskModFile writes a cue.mod/module.cue cache entry.
+// The file name must have been returned by a previous call to readDiskModFile.
+func (c *cache) writeDiskModFile(ctx context.Context, file string, text []byte) error {
+ return c.writeDiskCache(ctx, file, text)
+}
+
+// readDiskCache is the generic "read from a cache file" implementation.
+// It takes the revision and an identifying suffix for the kind of data being cached.
+// It returns the name of the cache file and the content of the file.
+// If the read fails, the caller can use
+// writeDiskCache(file, data) to write a new cache entry.
+func (c *cache) readDiskCache(ctx context.Context, mv module.Version, suffix string) (file string, data []byte, err error) {
+ file, err = c.cachePath(ctx, mv, suffix)
+ if err != nil {
+ return "", nil, errNotCached
+ }
+ data, err = robustio.ReadFile(file)
+ if err != nil {
+ return file, nil, errNotCached
+ }
+ return file, data, nil
+}
+
+// writeDiskCache is the generic "write to a cache file" implementation.
+// The file must have been returned by a previous call to readDiskCache.
+func (c *cache) writeDiskCache(ctx context.Context, file string, data []byte) error {
+ if file == "" {
+ return nil
+ }
+ // Make sure directory for file exists.
+ if err := os.MkdirAll(filepath.Dir(file), 0777); err != nil {
+ return err
+ }
+
+ // Write the file to a temporary location, and then rename it to its final
+ // path to reduce the likelihood of a corrupt file existing at that final path.
+ f, err := tempFile(ctx, filepath.Dir(file), filepath.Base(file), 0666)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ // Only call os.Remove on f.Name() if we failed to rename it: otherwise,
+ // some other process may have created a new file with the same name after
+ // the rename completed.
+ if err != nil {
+ f.Close()
+ os.Remove(f.Name())
+ }
+ }()
+
+ if _, err := f.Write(data); err != nil {
+ return err
+ }
+ if err := f.Close(); err != nil {
+ return err
+ }
+ if err := robustio.Rename(f.Name(), file); err != nil {
+ return err
+ }
+ return nil
+}
+
+// downloadDir returns the directory for storing.
+// An error will be returned if the module path or version cannot be escaped.
+// An error satisfying errors.Is(err, fs.ErrNotExist) will be returned
+// along with the directory if the directory does not exist or if the directory
+// is not completely populated.
+func (c *cache) downloadDir(ctx context.Context, m module.Version) (string, error) {
+ if !m.IsCanonical() {
+ return "", fmt.Errorf("non-semver module version %q", m.Version())
+ }
+ enc, err := module.EscapePath(m.BasePath())
+ if err != nil {
+ return "", err
+ }
+ encVer, err := module.EscapeVersion(m.Version())
+ if err != nil {
+ return "", err
+ }
+
+ // Check whether the directory itself exists.
+ dir := filepath.Join(c.dir, enc+"@"+encVer)
+ if fi, err := os.Stat(dir); os.IsNotExist(err) {
+ return dir, err
+ } else if err != nil {
+ return dir, &downloadDirPartialError{dir, err}
+ } else if !fi.IsDir() {
+ return dir, &downloadDirPartialError{dir, errors.New("not a directory")}
+ }
+
+ // Check if a .partial file exists. This is created at the beginning of
+ // a download and removed after the zip is extracted.
+ partialPath, err := c.cachePath(ctx, m, "partial")
+ if err != nil {
+ return dir, err
+ }
+ if _, err := os.Stat(partialPath); err == nil {
+ return dir, &downloadDirPartialError{dir, errors.New("not completely extracted")}
+ } else if !os.IsNotExist(err) {
+ return dir, err
+ }
+ return dir, nil
+}
+
+func (c *cache) cachePath(ctx context.Context, m module.Version, suffix string) (string, error) {
+ if !m.IsValid() || m.Version() == "" {
+ return "", fmt.Errorf("non-semver module version %q", m)
+ }
+ esc, err := module.EscapePath(m.BasePath())
+ if err != nil {
+ return "", err
+ }
+ encVer, err := module.EscapeVersion(m.Version())
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(c.dir, "cache/download", esc, "/@v", encVer+"."+suffix), nil
+}
+
+// downloadDirPartialError is returned by DownloadDir if a module directory
+// exists but was not completely populated.
+//
+// downloadDirPartialError is equivalent to fs.ErrNotExist.
+type downloadDirPartialError struct {
+ Dir string
+ Err error
+}
+
+func (e *downloadDirPartialError) Error() string { return fmt.Sprintf("%s: %v", e.Dir, e.Err) }
+func (e *downloadDirPartialError) Is(err error) bool { return err == fs.ErrNotExist }
+
+// lockVersion locks a file within the module cache that guards the downloading
+// and extraction of module data for the given module version.
+func (c *cache) lockVersion(ctx context.Context, mod module.Version) (unlock func(), err error) {
+ path, err := c.cachePath(ctx, mod, "lock")
+ if err != nil {
+ return nil, err
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {
+ return nil, err
+ }
+ return lockedfile.MutexAt(path).Lock()
+}
diff --git a/vendor/cuelang.org/go/mod/modcache/fetch.go b/vendor/cuelang.org/go/mod/modcache/fetch.go
new file mode 100644
index 00000000..62d95545
--- /dev/null
+++ b/vendor/cuelang.org/go/mod/modcache/fetch.go
@@ -0,0 +1,367 @@
+package modcache
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "log"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/rogpeppe/go-internal/robustio"
+
+ "cuelang.org/go/internal/mod/modload"
+ "cuelang.org/go/internal/par"
+ "cuelang.org/go/mod/modfile"
+ "cuelang.org/go/mod/modregistry"
+ "cuelang.org/go/mod/module"
+ "cuelang.org/go/mod/modzip"
+)
+
+const logging = false // TODO hook this up to CUE_DEBUG
+
+// New returns r wrapped inside a caching layer that
+// stores persistent cached content inside the given
+// OS directory.
+//
+// The `module.SourceLoc.FS` fields in the locations
+// returned by the registry implement the `OSRootFS` interface,
+// allowing a caller to find the native OS filepath where modules
+// are stored.
+func New(registry *modregistry.Client, dir string) (modload.Registry, error) {
+ info, err := os.Stat(dir)
+ if err == nil && !info.IsDir() {
+ return nil, fmt.Errorf("%q is not a directory", dir)
+ }
+ return &cache{
+ dir: dir,
+ reg: registry,
+ }, nil
+}
+
+type cache struct {
+ dir string
+ reg *modregistry.Client
+ downloadZipCache par.ErrCache[module.Version, string]
+ modFileCache par.ErrCache[string, []byte]
+}
+
+func (c *cache) Requirements(ctx context.Context, mv module.Version) ([]module.Version, error) {
+ data, err := c.downloadModFile(ctx, mv)
+ if err != nil {
+ return nil, err
+ }
+ mf, err := modfile.Parse(data, mv.String())
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse module file from %v: %v", mv, err)
+ }
+ return mf.DepVersions(), nil
+}
+
+// Fetch returns the location of the contents for the given module
+// version, downloading it if necessary.
+func (c *cache) Fetch(ctx context.Context, mv module.Version) (module.SourceLoc, error) {
+ dir, err := c.downloadDir(ctx, mv)
+ if err == nil {
+ // The directory has already been completely extracted (no .partial file exists).
+ return c.dirToLocation(dir), nil
+ }
+ if dir == "" || !errors.Is(err, fs.ErrNotExist) {
+ return module.SourceLoc{}, err
+ }
+
+ // To avoid cluttering the cache with extraneous files,
+ // DownloadZip uses the same lockfile as Download.
+ // Invoke DownloadZip before locking the file.
+ zipfile, err := c.downloadZip(ctx, mv)
+ if err != nil {
+ return module.SourceLoc{}, err
+ }
+
+ unlock, err := c.lockVersion(ctx, mv)
+ if err != nil {
+ return module.SourceLoc{}, err
+ }
+ defer unlock()
+
+ // Check whether the directory was populated while we were waiting on the lock.
+ _, dirErr := c.downloadDir(ctx, mv)
+ if dirErr == nil {
+ return c.dirToLocation(dir), nil
+ }
+ _, dirExists := dirErr.(*downloadDirPartialError)
+
+ // Clean up any partially extracted directories (indicated by
+ // DownloadDirPartialError, usually because of a .partial file). This is only
+ // safe to do because the lock file ensures that their writers are no longer
+ // active.
+ parentDir := filepath.Dir(dir)
+ tmpPrefix := filepath.Base(dir) + ".tmp-"
+
+ entries, _ := os.ReadDir(parentDir)
+ for _, entry := range entries {
+ if strings.HasPrefix(entry.Name(), tmpPrefix) {
+ RemoveAll(filepath.Join(parentDir, entry.Name())) // best effort
+ }
+ }
+ if dirExists {
+ if err := RemoveAll(dir); err != nil {
+ return module.SourceLoc{}, err
+ }
+ }
+
+ partialPath, err := c.cachePath(ctx, mv, "partial")
+ if err != nil {
+ return module.SourceLoc{}, err
+ }
+
+ // Extract the module zip directory at its final location.
+ //
+ // To prevent other processes from reading the directory if we crash,
+ // create a .partial file before extracting the directory, and delete
+ // the .partial file afterward (all while holding the lock).
+ //
+ // A technique used previously was to extract to a temporary directory with a random name
+ // then rename it into place with os.Rename. On Windows, this can fail with
+ // ERROR_ACCESS_DENIED when another process (usually an anti-virus scanner)
+ // opened files in the temporary directory.
+ if err := os.MkdirAll(parentDir, 0777); err != nil {
+ return module.SourceLoc{}, err
+ }
+ if err := os.WriteFile(partialPath, nil, 0666); err != nil {
+ return module.SourceLoc{}, err
+ }
+ if err := modzip.Unzip(dir, mv, zipfile); err != nil {
+ if rmErr := RemoveAll(dir); rmErr == nil {
+ os.Remove(partialPath)
+ }
+ return module.SourceLoc{}, err
+ }
+ if err := os.Remove(partialPath); err != nil {
+ return module.SourceLoc{}, err
+ }
+ makeDirsReadOnly(dir)
+ return c.dirToLocation(dir), nil
+}
+
+// ModuleVersions implements [modload.Registry.ModuleVersions].
+func (c *cache) ModuleVersions(ctx context.Context, mpath string) ([]string, error) {
+ // TODO should this do any kind of short-term caching?
+ return c.reg.ModuleVersions(ctx, mpath)
+}
+
+func (c *cache) downloadZip(ctx context.Context, mv module.Version) (zipfile string, err error) {
+ return c.downloadZipCache.Do(mv, func() (string, error) {
+ zipfile, err := c.cachePath(ctx, mv, "zip")
+ if err != nil {
+ return "", err
+ }
+
+ // Return without locking if the zip file exists.
+ if _, err := os.Stat(zipfile); err == nil {
+ return zipfile, nil
+ }
+ logf("cue: downloading %s", mv)
+ unlock, err := c.lockVersion(ctx, mv)
+ if err != nil {
+ return "", err
+ }
+ defer unlock()
+
+ if err := c.downloadZip1(ctx, mv, zipfile); err != nil {
+ return "", err
+ }
+ return zipfile, nil
+ })
+}
+
+func (c *cache) downloadZip1(ctx context.Context, mod module.Version, zipfile string) (err error) {
+ // Double-check that the zipfile was not created while we were waiting for
+ // the lock in downloadZip.
+ if _, err := os.Stat(zipfile); err == nil {
+ return nil
+ }
+
+ // Create parent directories.
+ if err := os.MkdirAll(filepath.Dir(zipfile), 0777); err != nil {
+ return err
+ }
+
+ // Clean up any remaining tempfiles from previous runs.
+ // This is only safe to do because the lock file ensures that their
+ // writers are no longer active.
+ tmpPattern := filepath.Base(zipfile) + "*.tmp"
+ if old, err := filepath.Glob(filepath.Join(quoteGlob(filepath.Dir(zipfile)), tmpPattern)); err == nil {
+ for _, path := range old {
+ os.Remove(path) // best effort
+ }
+ }
+
+ // From here to the os.Rename call below is functionally almost equivalent to
+ // renameio.WriteToFile. We avoid using that so that we have control over the
+ // names of the temporary files (see the cleanup above) and to avoid adding
+ // renameio as an extra dependency.
+ f, err := tempFile(ctx, filepath.Dir(zipfile), filepath.Base(zipfile), 0666)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ f.Close()
+ os.Remove(f.Name())
+ }
+ }()
+
+ // TODO cache the result of GetModule so we don't have to do
+ // an extra round trip when we've already fetched the module file.
+ m, err := c.reg.GetModule(ctx, mod)
+ if err != nil {
+ return err
+ }
+ r, err := m.GetZip(ctx)
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+ if _, err := io.Copy(f, r); err != nil {
+ return fmt.Errorf("failed to get module zip contents: %v", err)
+ }
+ if err := f.Close(); err != nil {
+ return err
+ }
+ if err := os.Rename(f.Name(), zipfile); err != nil {
+ return err
+ }
+ // TODO should we check the zip file for well-formedness?
+ // TODO: Should we make the .zip file read-only to discourage tampering?
+ return nil
+}
+
+func (c *cache) downloadModFile(ctx context.Context, mod module.Version) ([]byte, error) {
+ return c.modFileCache.Do(mod.String(), func() ([]byte, error) {
+ modfile, data, err := c.readDiskModFile(ctx, mod)
+ if err == nil {
+ return data, nil
+ }
+ logf("cue: downloading %s", mod)
+ unlock, err := c.lockVersion(ctx, mod)
+ if err != nil {
+ return nil, err
+ }
+ defer unlock()
+ // Double-check that the file hasn't been created while we were
+ // acquiring the lock.
+ _, data, err = c.readDiskModFile(ctx, mod)
+ if err == nil {
+ return data, nil
+ }
+ return c.downloadModFile1(ctx, mod, modfile)
+ })
+}
+
+func (c *cache) downloadModFile1(ctx context.Context, mod module.Version, modfile string) ([]byte, error) {
+ m, err := c.reg.GetModule(ctx, mod)
+ if err != nil {
+ return nil, err
+ }
+ data, err := m.ModuleFile(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if err := c.writeDiskModFile(ctx, modfile, data); err != nil {
+ return nil, err
+ }
+ return data, nil
+}
+
+func (c *cache) dirToLocation(fpath string) module.SourceLoc {
+ return module.SourceLoc{
+ FS: module.OSDirFS(fpath),
+ Dir: ".",
+ }
+}
+
+// makeDirsReadOnly makes a best-effort attempt to remove write permissions for dir
+// and its transitive contents.
+func makeDirsReadOnly(dir string) {
+ type pathMode struct {
+ path string
+ mode fs.FileMode
+ }
+ var dirs []pathMode // in lexical order
+ filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
+ if err == nil && d.IsDir() {
+ info, err := d.Info()
+ if err == nil && info.Mode()&0222 != 0 {
+ dirs = append(dirs, pathMode{path, info.Mode()})
+ }
+ }
+ return nil
+ })
+
+ // Run over list backward to chmod children before parents.
+ for i := len(dirs) - 1; i >= 0; i-- {
+ os.Chmod(dirs[i].path, dirs[i].mode&^0222)
+ }
+}
+
+// RemoveAll removes a directory written by the cache, first applying
+// any permission changes needed to do so.
+func RemoveAll(dir string) error {
+ // Module cache has 0555 directories; make them writable in order to remove content.
+ filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error {
+ if err != nil {
+ return nil // ignore errors walking in file system
+ }
+ if info.IsDir() {
+ os.Chmod(path, 0777)
+ }
+ return nil
+ })
+ return robustio.RemoveAll(dir)
+}
+
+// quoteGlob returns s with all Glob metacharacters quoted.
+// We don't try to handle backslash here, as that can appear in a
+// file path on Windows.
+func quoteGlob(s string) string {
+ if !strings.ContainsAny(s, `*?[]`) {
+ return s
+ }
+ var sb strings.Builder
+ for _, c := range s {
+ switch c {
+ case '*', '?', '[', ']':
+ sb.WriteByte('\\')
+ }
+ sb.WriteRune(c)
+ }
+ return sb.String()
+}
+
+// tempFile creates a new temporary file with given permission bits.
+func tempFile(ctx context.Context, dir, prefix string, perm fs.FileMode) (f *os.File, err error) {
+ for i := 0; i < 10000; i++ {
+ name := filepath.Join(dir, prefix+strconv.Itoa(rand.Intn(1000000000))+".tmp")
+ f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm)
+ if os.IsExist(err) {
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+ continue
+ }
+ break
+ }
+ return
+}
+
+func logf(f string, a ...any) {
+ if logging {
+ log.Printf(f, a...)
+ }
+}
diff --git a/vendor/cuelang.org/go/mod/modconfig/modconfig.go b/vendor/cuelang.org/go/mod/modconfig/modconfig.go
new file mode 100644
index 00000000..7c6aa05c
--- /dev/null
+++ b/vendor/cuelang.org/go/mod/modconfig/modconfig.go
@@ -0,0 +1,327 @@
+// Package modconfig provides access to the standard CUE
+// module configuration, including registry access and authorization.
+package modconfig
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+
+ "cuelabs.dev/go/oci/ociregistry"
+ "cuelabs.dev/go/oci/ociregistry/ociauth"
+ "cuelabs.dev/go/oci/ociregistry/ociclient"
+ "golang.org/x/oauth2"
+
+ "cuelang.org/go/internal/cueconfig"
+ "cuelang.org/go/internal/cueversion"
+ "cuelang.org/go/internal/mod/modload"
+ "cuelang.org/go/internal/mod/modresolve"
+ "cuelang.org/go/mod/modcache"
+ "cuelang.org/go/mod/modregistry"
+ "cuelang.org/go/mod/module"
+)
+
+// Registry is used to access CUE modules from external sources.
+type Registry interface {
+ // Requirements returns a list of the modules required by the given module
+ // version.
+ Requirements(ctx context.Context, m module.Version) ([]module.Version, error)
+
+ // Fetch returns the location of the contents for the given module
+ // version, downloading it if necessary.
+ Fetch(ctx context.Context, m module.Version) (module.SourceLoc, error)
+
+ // ModuleVersions returns all the versions for the module with the
+ // given path, which should contain a major version.
+ ModuleVersions(ctx context.Context, mpath string) ([]string, error)
+}
+
+// We don't want to make modload part of the cue/load API,
+// so we define the above type independently, but we want
+// it to be interchangeable, so check that statically here.
+var (
+ _ Registry = modload.Registry(nil)
+ _ modload.Registry = Registry(nil)
+)
+
+// DefaultRegistry is the default registry host.
+const DefaultRegistry = "registry.cue.works"
+
+// Resolver implements [modregistry.Resolver] in terms of the
+// CUE registry configuration file and auth configuration.
+type Resolver struct {
+ resolver modresolve.LocationResolver
+ newRegistry func(host string, insecure bool) (ociregistry.Interface, error)
+
+ mu sync.Mutex
+ registries map[string]ociregistry.Interface
+}
+
+// Config provides the starting point for the configuration.
+type Config struct {
+ // TODO allow for a custom resolver to be passed in.
+
+ // Transport is used to make the underlying HTTP requests.
+ // If it's nil, [http.DefaultTransport] will be used.
+ Transport http.RoundTripper
+
+ // Env provides environment variable values. If this is nil,
+ // the current process's environment will be used.
+ Env []string
+
+ // ClientType is used as part of the User-Agent header
+ // that's added in each outgoing HTTP request.
+ // If it's empty, it defaults to "cuelang.org/go".
+ ClientType string
+}
+
+// NewResolver returns an implementation of [modregistry.Resolver]
+// that uses cfg to guide registry resolution. If cfg is nil, it's
+// equivalent to passing pointer to a zero Config struct.
+//
+// It consults the same environment variables used by the
+// cue command.
+//
+// The contents of the configuration will not be mutated.
+func NewResolver(cfg *Config) (*Resolver, error) {
+ cfg = newRef(cfg)
+ cfg.Transport = cueversion.NewTransport(cfg.ClientType, cfg.Transport)
+ getenv := getenvFunc(cfg.Env)
+ var configData []byte
+ var configPath string
+ cueRegistry := getenv("CUE_REGISTRY")
+ kind, rest, _ := strings.Cut(cueRegistry, ":")
+ switch kind {
+ case "file":
+ data, err := os.ReadFile(rest)
+ if err != nil {
+ return nil, err
+ }
+ configData, configPath = data, rest
+ case "inline":
+ configData, configPath = []byte(rest), "$CUE_REGISTRY"
+ case "simple":
+ cueRegistry = rest
+ }
+ var resolver modresolve.LocationResolver
+ var err error
+ if configPath != "" {
+ resolver, err = modresolve.ParseConfig(configData, configPath, DefaultRegistry)
+ } else {
+ resolver, err = modresolve.ParseCUERegistry(cueRegistry, DefaultRegistry)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("bad value for $CUE_REGISTRY: %v", err)
+ }
+ return &Resolver{
+ resolver: resolver,
+ newRegistry: func(host string, insecure bool) (ociregistry.Interface, error) {
+ return ociclient.New(host, &ociclient.Options{
+ Insecure: insecure,
+ Transport: &cueLoginsTransport{
+ getenv: getenv,
+ cfg: cfg,
+ },
+ })
+ },
+ registries: make(map[string]ociregistry.Interface),
+ }, nil
+}
+
+// Host represents a registry host name and whether
+// it should be accessed via a secure connection or not.
+type Host = modresolve.Host
+
+// AllHosts returns all the registry hosts that the resolver might resolve to,
+// ordered lexically by hostname.
+func (r *Resolver) AllHosts() []Host {
+ return r.resolver.AllHosts()
+}
+
+// HostLocation represents a registry host and a location with it.
+type HostLocation = modresolve.Location
+
+// ResolveToLocation returns the host location for the given module path and version
+// without creating a Registry instance for it.
+func (r *Resolver) ResolveToLocation(mpath string, version string) (HostLocation, bool) {
+ return r.resolver.ResolveToLocation(mpath, version)
+}
+
+// Resolve implements modregistry.Resolver.Resolve.
+func (r *Resolver) ResolveToRegistry(mpath string, version string) (modregistry.RegistryLocation, error) {
+ loc, ok := r.resolver.ResolveToLocation(mpath, version)
+ if !ok {
+ // This can only happen when mpath is invalid, which should not
+ // happen in practice, as the only caller is modregistry which
+ // vets module paths before calling Resolve.
+ return modregistry.RegistryLocation{}, fmt.Errorf("cannot resolve %s (version %s) to registry", mpath, version)
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ reg := r.registries[loc.Host]
+ if reg == nil {
+ reg1, err := r.newRegistry(loc.Host, loc.Insecure)
+ if err != nil {
+ return modregistry.RegistryLocation{}, fmt.Errorf("cannot make client: %v", err)
+ }
+ r.registries[loc.Host] = reg1
+ reg = reg1
+ }
+ return modregistry.RegistryLocation{
+ Registry: reg,
+ Repository: loc.Repository,
+ Tag: loc.Tag,
+ }, nil
+}
+
+// cueLoginsTransport implements [http.RoundTripper] by using
+// tokens from the CUE login information when available, falling
+// back to using the standard [ociauth] transport implementation.
+type cueLoginsTransport struct {
+ cfg *Config
+ getenv func(string) string
+
+ // initOnce guards initErr, logins, and transport.
+ initOnce sync.Once
+ initErr error
+ logins *cueconfig.Logins
+ // transport holds the underlying transport. This wraps
+ // t.cfg.Transport.
+ transport http.RoundTripper
+
+ // mu guards the fields below.
+ mu sync.Mutex
+
+ // cachedTransports holds a transport per host.
+ // This is needed because the oauth2 API requires a
+ // different client for each host. Each of these transports
+ // wraps the transport above.
+ cachedTransports map[string]http.RoundTripper
+}
+
+func (t *cueLoginsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ // Return an error lazily on the first request because if the
+ // user isn't doing anything that requires a registry, we
+ // shouldn't complain about reading a bad configuration file.
+ if err := t.init(); err != nil {
+ return nil, err
+ }
+ if t.logins == nil {
+ return t.transport.RoundTrip(req)
+ }
+ // TODO: note that a CUE registry may include a path prefix,
+ // so using solely the host will not work with such a path.
+ // Can we do better here, perhaps keeping the path prefix up to "/v2/"?
+ host := req.URL.Host
+ login, ok := t.logins.Registries[host]
+ if !ok {
+ return t.transport.RoundTrip(req)
+ }
+
+ t.mu.Lock()
+ transport := t.cachedTransports[host]
+ if transport == nil {
+ tok := cueconfig.TokenFromLogin(login)
+ oauthCfg := cueconfig.RegistryOAuthConfig(Host{
+ Name: host,
+ Insecure: req.URL.Scheme == "http",
+ })
+ // TODO: When this client refreshes an access token,
+ // we should store the refreshed token on disk.
+
+ // Make the oauth client use the transport that was set up
+ // in init.
+ ctx := context.WithValue(req.Context(), oauth2.HTTPClient, &http.Client{
+ Transport: t.transport,
+ })
+ transport = oauthCfg.Client(ctx, tok).Transport
+ t.cachedTransports[host] = transport
+ }
+ // Unlock immediately so we don't hold the lock for the entire
+ // request, which would preclude any concurrency when
+ // making HTTP requests.
+ t.mu.Unlock()
+ return transport.RoundTrip(req)
+}
+
+func (t *cueLoginsTransport) init() error {
+ t.initOnce.Do(func() {
+ t.initErr = t._init()
+ })
+ return t.initErr
+}
+
+func (t *cueLoginsTransport) _init() error {
+ // If a registry was authenticated via `cue login`, use that.
+ // If not, fall back to authentication via Docker's config.json.
+ // Note that the order below is backwards, since we layer interfaces.
+
+ config, err := ociauth.LoadWithEnv(nil, t.cfg.Env)
+ if err != nil {
+ return fmt.Errorf("cannot load OCI auth configuration: %v", err)
+ }
+ t.transport = ociauth.NewStdTransport(ociauth.StdTransportParams{
+ Config: config,
+ Transport: t.cfg.Transport,
+ })
+
+ // If we can't locate a logins.json file at all, then we'll
+ // We only refuse to continue if we find an invalid logins.json file.
+ loginsPath, err := cueconfig.LoginConfigPath(t.getenv)
+ if err != nil {
+ return nil
+ }
+ logins, err := cueconfig.ReadLogins(loginsPath)
+ if errors.Is(err, fs.ErrNotExist) {
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("cannot load CUE registry logins: %v", err)
+ }
+ t.logins = logins
+ t.cachedTransports = make(map[string]http.RoundTripper)
+ return nil
+}
+
+// NewRegistry returns an implementation of the Registry
+// interface suitable for passing to [load.Instances].
+// It uses the standard CUE cache directory.
+func NewRegistry(cfg *Config) (Registry, error) {
+ cfg = newRef(cfg)
+ resolver, err := NewResolver(cfg)
+ if err != nil {
+ return nil, err
+ }
+ cacheDir, err := cueconfig.CacheDir(getenvFunc(cfg.Env))
+ if err != nil {
+ return nil, err
+ }
+ return modcache.New(modregistry.NewClientWithResolver(resolver), cacheDir)
+}
+
+func getenvFunc(env []string) func(string) string {
+ if env == nil {
+ return os.Getenv
+ }
+ return func(key string) string {
+ for i := len(env) - 1; i >= 0; i-- {
+ if e := env[i]; len(e) >= len(key)+1 && e[len(key)] == '=' && e[:len(key)] == key {
+ return e[len(key)+1:]
+ }
+ }
+ return ""
+ }
+}
+
+func newRef[T any](x *T) *T {
+ var x1 T
+ if x != nil {
+ x1 = *x
+ }
+ return &x1
+}
diff --git a/vendor/cuelang.org/go/mod/modfile/modfile.go b/vendor/cuelang.org/go/mod/modfile/modfile.go
new file mode 100644
index 00000000..9b28805f
--- /dev/null
+++ b/vendor/cuelang.org/go/mod/modfile/modfile.go
@@ -0,0 +1,269 @@
+// Copyright 2023 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package modfile provides functionality for reading and parsing
+// the CUE module file, cue.mod/module.cue.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL.
+// ITS API MAY CHANGE AT ANY TIME.
+package modfile
+
+import (
+ _ "embed"
+ "fmt"
+ "slices"
+ "strings"
+ "sync"
+
+ "cuelang.org/go/internal/mod/semver"
+
+ "cuelang.org/go/cue"
+ "cuelang.org/go/cue/ast"
+ "cuelang.org/go/cue/cuecontext"
+ "cuelang.org/go/cue/errors"
+ "cuelang.org/go/cue/format"
+ "cuelang.org/go/cue/parser"
+ "cuelang.org/go/cue/token"
+ "cuelang.org/go/mod/module"
+)
+
+//go:embed schema.cue
+var moduleSchemaData []byte
+
+// File represents the contents of a cue.mod/module.cue file.
+type File struct {
+ Module string `json:"module"`
+ Language *Language `json:"language,omitempty"`
+ Deps map[string]*Dep `json:"deps,omitempty"`
+ versions []module.Version
+ // defaultMajorVersions maps from module base path to the
+ // major version default for that path.
+ defaultMajorVersions map[string]string
+}
+
+// Format returns a formatted representation of f
+// in CUE syntax.
+func (f *File) Format() ([]byte, error) {
+ if len(f.Deps) == 0 && f.Deps != nil {
+ // There's no way to get the CUE encoder to omit an empty
+ // but non-nil slice (despite the current doc comment on
+ // [cue.Context.Encode], so make a copy of f to allow us
+ // to do that.
+ f1 := *f
+ f1.Deps = nil
+ f = &f1
+ }
+ // TODO this could be better:
+ // - it should omit the outer braces
+ v := cuecontext.New().Encode(f)
+ if err := v.Validate(cue.Concrete(true)); err != nil {
+ return nil, err
+ }
+ n := v.Syntax(cue.Concrete(true)).(*ast.StructLit)
+
+ data, err := format.Node(&ast.File{
+ Decls: n.Elts,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("cannot format: %v", err)
+ }
+ // Sanity check that it can be parsed.
+ // TODO this could be more efficient by checking all the file fields
+ // before formatting the output.
+ if _, err := ParseNonStrict(data, "-"); err != nil {
+ return nil, fmt.Errorf("cannot round-trip module file: %v", strings.TrimSuffix(errors.Details(err, nil), "\n"))
+ }
+ return data, err
+}
+
+type Language struct {
+ Version string `json:"version,omitempty"`
+}
+
+type Dep struct {
+ Version string `json:"v"`
+ Default bool `json:"default,omitempty"`
+}
+
+type noDepsFile struct {
+ Module string `json:"module"`
+}
+
+var (
+ moduleSchemaOnce sync.Once // guards the creation of _moduleSchema
+ // TODO remove this mutex when https://cuelang.org/issue/2733 is fixed.
+ moduleSchemaMutex sync.Mutex // guards any use of _moduleSchema
+ _moduleSchema cue.Value
+)
+
+func moduleSchemaDo[T any](f func(moduleSchema cue.Value) (T, error)) (T, error) {
+ moduleSchemaOnce.Do(func() {
+ ctx := cuecontext.New()
+ schemav := ctx.CompileBytes(moduleSchemaData, cue.Filename("cuelang.org/go/mod/modfile/schema.cue"))
+ schemav = lookup(schemav, cue.Def("#File"))
+ //schemav = schemav.Unify(lookup(schemav, cue.Hid("#Strict", "_")))
+ if err := schemav.Validate(); err != nil {
+ panic(fmt.Errorf("internal error: invalid CUE module.cue schema: %v", errors.Details(err, nil)))
+ }
+ _moduleSchema = schemav
+ })
+ moduleSchemaMutex.Lock()
+ defer moduleSchemaMutex.Unlock()
+ return f(_moduleSchema)
+}
+
+func lookup(v cue.Value, sels ...cue.Selector) cue.Value {
+ return v.LookupPath(cue.MakePath(sels...))
+}
+
+// Parse verifies that the module file has correct syntax.
+// The file name is used for error messages.
+// All dependencies must be specified correctly: with major
+// versions in the module paths and canonical dependency
+// versions.
+func Parse(modfile []byte, filename string) (*File, error) {
+ return parse(modfile, filename, true)
+}
+
+// ParseLegacy parses the legacy version of the module file
+// that only supports the single field "module" and ignores all other
+// fields.
+func ParseLegacy(modfile []byte, filename string) (*File, error) {
+ return moduleSchemaDo(func(schema cue.Value) (*File, error) {
+ v := schema.Context().CompileBytes(modfile, cue.Filename(filename))
+ if err := v.Err(); err != nil {
+ return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file")
+ }
+ var f noDepsFile
+ if err := v.Decode(&f); err != nil {
+ return nil, newCUEError(err, filename)
+ }
+ return &File{
+ Module: f.Module,
+ }, nil
+ })
+}
+
+// ParseNonStrict is like Parse but allows some laxity in the parsing:
+// - if a module path lacks a version, it's taken from the version.
+// - if a non-canonical version is used, it will be canonicalized.
+//
+// The file name is used for error messages.
+func ParseNonStrict(modfile []byte, filename string) (*File, error) {
+ return parse(modfile, filename, false)
+}
+
+func parse(modfile []byte, filename string, strict bool) (*File, error) {
+ file, err := parser.ParseFile(filename, modfile)
+ if err != nil {
+ return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file syntax")
+ }
+ // TODO disallow non-data-mode CUE.
+
+ mf, err := moduleSchemaDo(func(schema cue.Value) (*File, error) {
+ v := schema.Context().BuildFile(file)
+ if err := v.Validate(cue.Concrete(true)); err != nil {
+ return nil, errors.Wrapf(err, token.NoPos, "invalid module.cue file value")
+ }
+ v = v.Unify(schema)
+ if err := v.Validate(); err != nil {
+ return nil, newCUEError(err, filename)
+ }
+ var mf File
+ if err := v.Decode(&mf); err != nil {
+ return nil, errors.Wrapf(err, token.NoPos, "internal error: cannot decode into modFile struct")
+ }
+ return &mf, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ mainPath, mainMajor, ok := module.SplitPathVersion(mf.Module)
+ if strict && !ok {
+ return nil, fmt.Errorf("module path %q in %s does not contain major version", mf.Module, filename)
+ }
+ if ok {
+ if semver.Major(mainMajor) != mainMajor {
+ return nil, fmt.Errorf("module path %s in %q should contain the major version only", mf.Module, filename)
+ }
+ } else if mainPath = mf.Module; mainPath != "" {
+ if err := module.CheckPathWithoutVersion(mainPath); err != nil {
+ return nil, fmt.Errorf("module path %q in %q is not valid: %v", mainPath, filename, err)
+ }
+ // There's no main module major version: default to v0.
+ mainMajor = "v0"
+ // TODO perhaps we'd be better preserving the original?
+ mf.Module += "@v0"
+ }
+ if mf.Language != nil {
+ vers := mf.Language.Version
+ if !semver.IsValid(vers) {
+ return nil, fmt.Errorf("language version %q in %s is not well formed", vers, filename)
+ }
+ if semver.Canonical(vers) != vers {
+ return nil, fmt.Errorf("language version %v in %s is not canonical", vers, filename)
+ }
+ }
+ var versions []module.Version
+ // The main module is always the default for its own major version.
+ defaultMajorVersions := map[string]string{
+ mainPath: mainMajor,
+ }
+ // Check that major versions match dependency versions.
+ for m, dep := range mf.Deps {
+ vers, err := module.NewVersion(m, dep.Version)
+ if err != nil {
+ return nil, fmt.Errorf("invalid module.cue file %s: cannot make version from module %q, version %q: %v", filename, m, dep.Version, err)
+ }
+ versions = append(versions, vers)
+ if strict && vers.Path() != m {
+ return nil, fmt.Errorf("invalid module.cue file %s: no major version in %q", filename, m)
+ }
+ if dep.Default {
+ mp := vers.BasePath()
+ if _, ok := defaultMajorVersions[mp]; ok {
+ return nil, fmt.Errorf("multiple default major versions found for %v", mp)
+ }
+ defaultMajorVersions[mp] = semver.Major(vers.Version())
+ }
+ }
+
+ if len(defaultMajorVersions) > 0 {
+ mf.defaultMajorVersions = defaultMajorVersions
+ }
+ mf.versions = versions[:len(versions):len(versions)]
+ module.Sort(mf.versions)
+ return mf, nil
+}
+
+func newCUEError(err error, filename string) error {
+ // TODO we have some potential to improve error messages here.
+ return err
+}
+
+// DepVersions returns the versions of all the modules depended on by the
+// file. The caller should not modify the returned slice.
+//
+// This always returns the same value, even if the contents
+// of f are changed. If f was not created with [Parse], it returns nil.
+func (f *File) DepVersions() []module.Version {
+ return slices.Clip(f.versions)
+}
+
+// DefaultMajorVersions returns a map from module base path
+// to the major version that's specified as the default for that module.
+// The caller should not modify the returned map.
+func (f *File) DefaultMajorVersions() map[string]string {
+ return f.defaultMajorVersions
+}
diff --git a/vendor/cuelang.org/go/internal/mod/modfile/schema.cue b/vendor/cuelang.org/go/mod/modfile/schema.cue
similarity index 100%
rename from vendor/cuelang.org/go/internal/mod/modfile/schema.cue
rename to vendor/cuelang.org/go/mod/modfile/schema.cue
diff --git a/vendor/cuelang.org/go/internal/mod/modregistry/client.go b/vendor/cuelang.org/go/mod/modregistry/client.go
similarity index 58%
rename from vendor/cuelang.org/go/internal/mod/modregistry/client.go
rename to vendor/cuelang.org/go/mod/modregistry/client.go
index f378323b..70e06641 100644
--- a/vendor/cuelang.org/go/internal/mod/modregistry/client.go
+++ b/vendor/cuelang.org/go/mod/modregistry/client.go
@@ -12,6 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package modregistry provides functionality for reading and writing
+// CUE modules from an OCI registry.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL.
+// ITS API MAY CHANGE AT ANY TIME.
package modregistry
import (
@@ -30,9 +35,9 @@ import (
specs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
- "cuelang.org/go/internal/mod/modfile"
- "cuelang.org/go/internal/mod/module"
- "cuelang.org/go/internal/mod/modzip"
+ "cuelang.org/go/mod/modfile"
+ "cuelang.org/go/mod/module"
+ "cuelang.org/go/mod/modzip"
)
var ErrNotFound = fmt.Errorf("module not found")
@@ -40,7 +45,34 @@ var ErrNotFound = fmt.Errorf("module not found")
// Client represents a OCI-registry-backed client that
// provides a store for CUE modules.
type Client struct {
- registry ociregistry.Interface
+ resolver Resolver
+}
+
+// Resolver resolves module paths to a registry and a location
+// within that registry.
+type Resolver interface {
+ // ResolveToRegistry resolves a base module path (without a version)
+ // and optional version to the location for that path.
+ //
+ // If the version is empty, the Tag in the returned Location
+ // will hold the prefix that all versions of the module in its
+ // repository have. That prefix will be followed by the version
+ // itself.
+ ResolveToRegistry(mpath, vers string) (RegistryLocation, error)
+}
+
+// RegistryLocation holds a registry and a location within it
+// that a specific module (or set of versions for a module)
+// will be stored.
+type RegistryLocation struct {
+ // Registry holds the registry to use to access the module.
+ Registry ociregistry.Interface
+ // Repository holds the repository where the module is located.
+ Repository string
+ // Tag holds the tag for the module version. If an empty version
+ // was passed to Resolve, it holds the prefix shared by all
+ // version tags for the module.
+ Tag string
}
const (
@@ -53,7 +85,15 @@ const (
// hostname.
func NewClient(registry ociregistry.Interface) *Client {
return &Client{
- registry: registry,
+ resolver: singleResolver{registry},
+ }
+}
+
+// NewClientWithResolver returns a new client that uses the given
+// resolver to decide which registries to fetch from or push to.
+func NewClientWithResolver(resolver Resolver) *Client {
+ return &Client{
+ resolver: resolver,
}
}
@@ -61,20 +101,42 @@ func NewClient(registry ociregistry.Interface) *Client {
// It returns an error that satisfies errors.Is(ErrNotFound) if the
// module is not present in the store at this version.
func (c *Client) GetModule(ctx context.Context, m module.Version) (*Module, error) {
- repoName := c.repoName(m.Path())
- modDesc, err := c.registry.ResolveTag(ctx, repoName, m.Version())
+ loc, err := c.resolve(m)
+ if err != nil {
+ return nil, err
+ }
+ rd, err := loc.Registry.GetTag(ctx, loc.Repository, loc.Tag)
if err != nil {
if errors.Is(err, ociregistry.ErrManifestUnknown) {
return nil, fmt.Errorf("module %v: %w", m, ErrNotFound)
}
return nil, fmt.Errorf("module %v: %v", m, err)
}
- manifest, err := fetchManifest(ctx, c.registry, repoName, modDesc)
+ defer rd.Close()
+ data, err := io.ReadAll(rd)
if err != nil {
- return nil, fmt.Errorf("cannot unmarshal manifest data: %v", err)
+ return nil, err
+ }
+
+ return c.GetModuleWithManifest(ctx, m, data, rd.Descriptor().MediaType)
+}
+
+// GetModuleWithManifest returns a module instance given
+// the top level manifest contents, without querying its tag.
+// It assumes that the module will be tagged with the given
+// version.
+func (c *Client) GetModuleWithManifest(ctx context.Context, m module.Version, contents []byte, mediaType string) (*Module, error) {
+ loc, err := c.resolve(m)
+ if err != nil {
+ return nil, err
+ }
+
+ manifest, err := unmarshalManifest(ctx, contents, mediaType)
+ if err != nil {
+ return nil, fmt.Errorf("module %v: %v", m, err)
}
if !isModule(manifest) {
- return nil, fmt.Errorf("%v does not resolve to a manifest (media type is %q)", m, modDesc.MediaType)
+ return nil, fmt.Errorf("%v does not resolve to a manifest (media type is %q)", m, mediaType)
}
// TODO check type of manifest too.
if n := len(manifest.Layers); n != 2 {
@@ -85,44 +147,57 @@ func (c *Client) GetModule(ctx context.Context, m module.Version) (*Module, erro
}
// TODO check that the other blobs are of the expected type (application/zip).
return &Module{
- client: c,
- repo: repoName,
- manifest: *manifest,
+ client: c,
+ loc: loc,
+ version: m,
+ manifest: *manifest,
+ manifestDigest: digest.FromBytes(contents),
}, nil
}
-func (c *Client) repoName(modPath string) string {
- path, _, _ := module.SplitPathVersion(modPath)
- return path
-}
-
-// ModuleVersions returns all the versions for the module with the given path.
+// ModuleVersions returns all the versions for the module with the given path
+// sorted in semver order.
+// If m has a major version suffix, only versions with that major version will
+// be returned.
func (c *Client) ModuleVersions(ctx context.Context, m string) ([]string, error) {
- _, major, ok := module.SplitPathVersion(m)
- if !ok {
- return nil, fmt.Errorf("non-canonical module path %q", m)
- }
- var tags []string
- iter := c.registry.Tags(ctx, c.repoName(m))
- for {
- tag, ok := iter.Next()
- if !ok {
- break
- }
- if semver.IsValid(tag) && semver.Major(tag) == major {
- tags = append(tags, tag)
- }
+ mpath, major, hasMajor := module.SplitPathVersion(m)
+ if !hasMajor {
+ mpath = m
}
- if err := iter.Error(); err != nil {
+ loc, err := c.resolver.ResolveToRegistry(mpath, "")
+ if err != nil {
return nil, err
}
- return tags, nil
+ versions := []string{}
+ // Note: do not use c.repoName because that always expects
+ // a module path with a major version.
+ iter := loc.Registry.Tags(ctx, loc.Repository, "")
+ var _err error
+ iter(func(tag string, err error) bool {
+ if err != nil {
+ _err = err
+ return false
+ }
+ vers, ok := strings.CutPrefix(tag, loc.Tag)
+ if !ok || !semver.IsValid(vers) {
+ return true
+ }
+ if !hasMajor || semver.Major(vers) == major {
+ versions = append(versions, vers)
+ }
+ return true
+ })
+ if _err != nil && !isNotExist(_err) {
+ return nil, _err
+ }
+ semver.Sort(versions)
+ return versions, nil
}
-// CheckedModule represents module content that has passed the same
+// checkedModule represents module content that has passed the same
// checks made by [Client.PutModule]. The caller should not mutate
// any of the values returned by its methods.
-type CheckedModule struct {
+type checkedModule struct {
mv module.Version
blobr io.ReaderAt
size int64
@@ -131,37 +206,20 @@ type CheckedModule struct {
modFileContent []byte
}
-// Version returns the version that the module will be tagged as.
-func (m *CheckedModule) Version() module.Version {
- return m.mv
-}
-
-// Version returns the parsed contents of the modules cue.mod/module.cue file.
-func (m *CheckedModule) ModFile() *modfile.File {
- return m.modFile
-}
-
-// ModFileContent returns the raw contents of the modules cue.mod/module.cue file.
-func (m *CheckedModule) ModFileContent() []byte {
- return m.modFileContent
-}
-
-// Zip returns the reader for the module's zip archive.
-func (m *CheckedModule) Zip() *zip.Reader {
- return m.zipr
-}
-
-// PutCheckedModule is like [Client.PutModule] except that it allows the
+// putCheckedModule is like [Client.PutModule] except that it allows the
// caller to do some additional checks (see [CheckModule] for more info).
-func (c *Client) PutCheckedModule(ctx context.Context, m *CheckedModule) error {
- repoName := c.repoName(m.mv.Path())
+func (c *Client) putCheckedModule(ctx context.Context, m *checkedModule) error {
+ loc, err := c.resolve(m.mv)
+ if err != nil {
+ return err
+ }
selfDigest, err := digest.FromReader(io.NewSectionReader(m.blobr, 0, m.size))
if err != nil {
return fmt.Errorf("cannot read module zip file: %v", err)
}
// Upload the actual module's content
// TODO should we use a custom media type for this?
- configDesc, err := c.scratchConfig(ctx, repoName, moduleArtifactType)
+ configDesc, err := c.scratchConfig(ctx, loc, moduleArtifactType)
if err != nil {
return fmt.Errorf("cannot make scratch config: %v", err)
}
@@ -183,17 +241,17 @@ func (c *Client) PutCheckedModule(ctx context.Context, m *CheckedModule) error {
}},
}
- if _, err := c.registry.PushBlob(ctx, repoName, manifest.Layers[0], io.NewSectionReader(m.blobr, 0, m.size)); err != nil {
+ if _, err := loc.Registry.PushBlob(ctx, loc.Repository, manifest.Layers[0], io.NewSectionReader(m.blobr, 0, m.size)); err != nil {
return fmt.Errorf("cannot push module contents: %v", err)
}
- if _, err := c.registry.PushBlob(ctx, repoName, manifest.Layers[1], bytes.NewReader(m.modFileContent)); err != nil {
+ if _, err := loc.Registry.PushBlob(ctx, loc.Repository, manifest.Layers[1], bytes.NewReader(m.modFileContent)); err != nil {
return fmt.Errorf("cannot push cue.mod/module.cue contents: %v", err)
}
manifestData, err := json.Marshal(manifest)
if err != nil {
return fmt.Errorf("cannot marshal manifest: %v", err)
}
- if _, err := c.registry.PushManifest(ctx, repoName, m.mv.Version(), manifestData, ocispec.MediaTypeImageManifest); err != nil {
+ if _, err := loc.Registry.PushManifest(ctx, loc.Repository, loc.Tag, manifestData, ocispec.MediaTypeImageManifest); err != nil {
return fmt.Errorf("cannot tag %v: %v", m.mv, err)
}
return nil
@@ -206,21 +264,21 @@ func (c *Client) PutCheckedModule(ctx context.Context, m *CheckedModule) error {
// TODO check deps are resolved correctly? Or is that too domain-specific for this package?
// Is it a problem to call zip.CheckZip twice?
func (c *Client) PutModule(ctx context.Context, m module.Version, r io.ReaderAt, size int64) error {
- cm, err := CheckModule(m, r, size)
+ cm, err := checkModule(m, r, size)
if err != nil {
return err
}
- return c.PutCheckedModule(ctx, cm)
+ return c.putCheckedModule(ctx, cm)
}
-// CheckModule checks a module's zip file before uploading it.
+// checkModule checks a module's zip file before uploading it.
// This does the same checks that [Client.PutModule] does, so
// can be used to avoid doing duplicate work when an uploader
// wishes to do more checks that are implemented by that method.
//
// Note that the returned [CheckedModule] value contains r, so will
// be invalidated if r is closed.
-func CheckModule(m module.Version, blobr io.ReaderAt, size int64) (*CheckedModule, error) {
+func checkModule(m module.Version, blobr io.ReaderAt, size int64) (*checkedModule, error) {
zipr, modf, _, err := modzip.CheckZip(m, blobr, size)
if err != nil {
return nil, fmt.Errorf("module zip file check failed: %v", err)
@@ -229,7 +287,7 @@ func CheckModule(m module.Version, blobr io.ReaderAt, size int64) (*CheckedModul
if err != nil {
return nil, fmt.Errorf("module.cue file check failed: %v", err)
}
- return &CheckedModule{
+ return &checkedModule{
mv: m,
blobr: blobr,
size: size,
@@ -281,14 +339,20 @@ func checkModFile(m module.Version, f *zip.File) ([]byte, *modfile.File, error)
// Module represents a CUE module instance.
type Module struct {
- client *Client
- repo string
- manifest ocispec.Manifest
+ client *Client
+ loc RegistryLocation
+ version module.Version
+ manifest ocispec.Manifest
+ manifestDigest ociregistry.Digest
+}
+
+func (m *Module) Version() module.Version {
+ return m.version
}
// ModuleFile returns the contents of the cue.mod/module.cue file.
func (m *Module) ModuleFile(ctx context.Context) ([]byte, error) {
- r, err := m.client.registry.GetBlob(ctx, m.repo, m.manifest.Layers[1].Digest)
+ r, err := m.loc.Registry.GetBlob(ctx, m.loc.Repository, m.manifest.Layers[1].Digest)
if err != nil {
return nil, err
}
@@ -301,29 +365,47 @@ func (m *Module) ModuleFile(ctx context.Context) ([]byte, error) {
// and the contents should not be assumed to be correct until the close
// error has been checked.
func (m *Module) GetZip(ctx context.Context) (io.ReadCloser, error) {
- return m.client.registry.GetBlob(ctx, m.repo, m.manifest.Layers[0].Digest)
+ return m.loc.Registry.GetBlob(ctx, m.loc.Repository, m.manifest.Layers[0].Digest)
}
-func fetchManifest(ctx context.Context, r ociregistry.Interface, repoName string, desc ocispec.Descriptor) (*ociregistry.Manifest, error) {
- if !isJSON(desc.MediaType) {
- return nil, fmt.Errorf("expected JSON media type but %q does not look like JSON", desc.MediaType)
- }
- rd, err := r.GetManifest(ctx, repoName, desc.Digest)
+// ManifestDigest returns the digest of the manifest representing
+// the module.
+func (m *Module) ManifestDigest() ociregistry.Digest {
+ return m.manifestDigest
+}
+
+func (c *Client) resolve(m module.Version) (RegistryLocation, error) {
+ loc, err := c.resolver.ResolveToRegistry(m.BasePath(), m.Version())
if err != nil {
- return nil, err
+ return RegistryLocation{}, err
}
- defer rd.Close()
- data, err := io.ReadAll(rd)
- if err != nil {
- return nil, err
+ if loc.Registry == nil {
+ return RegistryLocation{}, fmt.Errorf("module %v unexpectedly resolved to nil registry", m)
+ }
+ if loc.Repository == "" {
+ return RegistryLocation{}, fmt.Errorf("module %v unexpectedly resolved to empty location", m)
+ }
+ if loc.Tag == "" {
+ return RegistryLocation{}, fmt.Errorf("module %v unexpectedly resolved to empty tag", m)
+ }
+ return loc, nil
+}
+
+func unmarshalManifest(ctx context.Context, data []byte, mediaType string) (*ociregistry.Manifest, error) {
+ if !isJSON(mediaType) {
+ return nil, fmt.Errorf("expected JSON media type but %q does not look like JSON", mediaType)
}
var m ociregistry.Manifest
if err := json.Unmarshal(data, &m); err != nil {
- return nil, fmt.Errorf("cannot decode %s content as manifest: %v", desc.MediaType, err)
+ return nil, fmt.Errorf("cannot decode %s content as manifest: %v", mediaType, err)
}
return &m, nil
}
+func isNotExist(err error) bool {
+ return errors.Is(err, ociregistry.ErrNameUnknown) || errors.Is(err, ociregistry.ErrNameInvalid)
+}
+
func isModule(m *ocispec.Manifest) bool {
// TODO check m.ArtifactType too when that's defined?
// See https://github.com/opencontainers/image-spec/blob/main/manifest.md#image-manifest-property-descriptions
@@ -344,7 +426,7 @@ func isJSON(mediaType string) bool {
// scratchConfig returns a dummy configuration consisting only of the
// two-byte configuration {}.
// https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-of-a-scratch-config-or-layer-descriptor
-func (c *Client) scratchConfig(ctx context.Context, repoName string, mediaType string) (ocispec.Descriptor, error) {
+func (c *Client) scratchConfig(ctx context.Context, loc RegistryLocation, mediaType string) (ocispec.Descriptor, error) {
// TODO check if it exists already to avoid push?
content := []byte("{}")
desc := ocispec.Descriptor{
@@ -352,8 +434,23 @@ func (c *Client) scratchConfig(ctx context.Context, repoName string, mediaType s
MediaType: mediaType,
Size: int64(len(content)),
}
- if _, err := c.registry.PushBlob(ctx, repoName, desc, bytes.NewReader(content)); err != nil {
+ if _, err := loc.Registry.PushBlob(ctx, loc.Repository, desc, bytes.NewReader(content)); err != nil {
return ocispec.Descriptor{}, err
}
return desc, nil
}
+
+// singleResolver implements Resolver by always returning R,
+// and mapping module paths directly to repository paths in
+// the registry.
+type singleResolver struct {
+ R ociregistry.Interface
+}
+
+func (r singleResolver) ResolveToRegistry(mpath, vers string) (RegistryLocation, error) {
+ return RegistryLocation{
+ Registry: r.R,
+ Repository: mpath,
+ Tag: vers,
+ }, nil
+}
diff --git a/vendor/cuelang.org/go/mod/module/dirfs.go b/vendor/cuelang.org/go/mod/module/dirfs.go
new file mode 100644
index 00000000..42cbbbd1
--- /dev/null
+++ b/vendor/cuelang.org/go/mod/module/dirfs.go
@@ -0,0 +1,56 @@
+package module
+
+import (
+ "io/fs"
+ "os"
+)
+
+// SourceLoc represents the location of some CUE source code.
+type SourceLoc struct {
+ // FS is the filesystem containing the source.
+ FS fs.FS
+ // Dir is the directory within the above filesystem.
+ Dir string
+}
+
+// OSRootFS can be implemented by an [fs.FS]
+// implementation to return its root directory as
+// an OS file path.
+type OSRootFS interface {
+ fs.FS
+
+ // OSRoot returns the root directory of the FS
+ // as an OS file path. If it wasn't possible to do that,
+ // it returns the empty string.
+ OSRoot() string
+}
+
+// OSDirFS is like [os.DirFS] but the returned value implements
+// [OSRootFS] by returning p.
+func OSDirFS(p string) fs.FS {
+ return dirFSImpl{
+ augmentedFS: os.DirFS(p).(augmentedFS),
+ osRoot: p,
+ }
+}
+
+var _ interface {
+ augmentedFS
+ OSRootFS
+} = dirFSImpl{}
+
+type augmentedFS interface {
+ fs.FS
+ fs.StatFS
+ fs.ReadDirFS
+ fs.ReadFileFS
+}
+
+type dirFSImpl struct {
+ osRoot string
+ augmentedFS
+}
+
+func (fsys dirFSImpl) OSRoot() string {
+ return fsys.osRoot
+}
diff --git a/vendor/cuelang.org/go/internal/mod/module/error.go b/vendor/cuelang.org/go/mod/module/error.go
similarity index 79%
rename from vendor/cuelang.org/go/internal/mod/module/error.go
rename to vendor/cuelang.org/go/mod/module/error.go
index db3ef1b2..3ab91a44 100644
--- a/vendor/cuelang.org/go/internal/mod/module/error.go
+++ b/vendor/cuelang.org/go/mod/module/error.go
@@ -1,7 +1,6 @@
package module
import (
- "errors"
"fmt"
)
@@ -12,20 +11,6 @@ type ModuleError struct {
Err error
}
-// VersionError returns a ModuleError derived from a Version and error,
-// or err itself if it is already such an error.
-func VersionError(v Version, err error) error {
- var mErr *ModuleError
- if errors.As(err, &mErr) && mErr.Path == v.Path() && mErr.Version == v.Version() {
- return err
- }
- return &ModuleError{
- Path: v.Path(),
- Version: v.Version(),
- Err: err,
- }
-}
-
func (e *ModuleError) Error() string {
if v, ok := e.Err.(*InvalidVersionError); ok {
return fmt.Sprintf("%s@%s: invalid version: %v", e.Path, v.Version, v.Err)
diff --git a/vendor/cuelang.org/go/mod/module/escape.go b/vendor/cuelang.org/go/mod/module/escape.go
new file mode 100644
index 00000000..7c0c4420
--- /dev/null
+++ b/vendor/cuelang.org/go/mod/module/escape.go
@@ -0,0 +1,68 @@
+package module
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "cuelang.org/go/internal/mod/semver"
+)
+
+// EscapePath returns the escaped form of the given module path
+// (without the major version suffix).
+// It fails if the module path is invalid.
+func EscapePath(path string) (escaped string, err error) {
+ if err := CheckPathWithoutVersion(path); err != nil {
+ return "", err
+ }
+ // Technically there's no need to escape capital letters because CheckPath
+ // doesn't allow them, but let's be defensive.
+ return escapeString(path)
+}
+
+// EscapeVersion returns the escaped form of the given module version.
+// Versions must be in (possibly non-canonical) semver form and must be valid file names
+// and not contain exclamation marks.
+func EscapeVersion(v string) (escaped string, err error) {
+ if !semver.IsValid(v) {
+ return "", &InvalidVersionError{
+ Version: v,
+ Err: fmt.Errorf("version is not in semver syntax"),
+ }
+ }
+ if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") {
+ return "", &InvalidVersionError{
+ Version: v,
+ Err: fmt.Errorf("disallowed version string"),
+ }
+ }
+ return escapeString(v)
+}
+
+func escapeString(s string) (escaped string, err error) {
+ haveUpper := false
+ for _, r := range s {
+ if r == '!' || r >= utf8.RuneSelf {
+ // This should be disallowed by CheckPath, but diagnose anyway.
+ // The correctness of the escaping loop below depends on it.
+ return "", fmt.Errorf("internal error: inconsistency in EscapePath")
+ }
+ if 'A' <= r && r <= 'Z' {
+ haveUpper = true
+ }
+ }
+
+ if !haveUpper {
+ return s, nil
+ }
+
+ var buf []byte
+ for _, r := range s {
+ if 'A' <= r && r <= 'Z' {
+ buf = append(buf, '!', byte(r+'a'-'A'))
+ } else {
+ buf = append(buf, byte(r))
+ }
+ }
+ return string(buf), nil
+}
diff --git a/vendor/cuelang.org/go/mod/module/module.go b/vendor/cuelang.org/go/mod/module/module.go
new file mode 100644
index 00000000..1807edc6
--- /dev/null
+++ b/vendor/cuelang.org/go/mod/module/module.go
@@ -0,0 +1,272 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package module defines the [Version] type along with support code.
+//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL.
+// ITS API MAY CHANGE AT ANY TIME.
+//
+// The [Version] type holds a pair of module path and version.
+// The module path conforms to the checks implemented by [Check].
+//
+// # Escaped Paths
+//
+// Module versions appear as substrings of file system paths (as stored by
+// the modcache package).
+// In general we cannot rely on file systems to be case-sensitive. Although
+// module paths cannot currently contain upper case characters because
+// OCI registries forbid that, versions can. That
+// is, we cannot rely on the file system to keep foo.com/v@v1.0.0-PRE and
+// foo.com/v@v1.0.0-PRE separate. Windows and macOS don't. Instead, we must
+// never require two different casings of a file path.
+//
+// One possibility would be to make the escaped form be the lowercase
+// hexadecimal encoding of the actual path bytes. This would avoid ever
+// needing different casings of a file path, but it would be fairly illegible
+// to most programmers when those paths appeared in the file system
+// (including in file paths in compiler errors and stack traces)
+// in web server logs, and so on. Instead, we want a safe escaped form that
+// leaves most paths unaltered.
+//
+// The safe escaped form is to replace every uppercase letter
+// with an exclamation mark followed by the letter's lowercase equivalent.
+//
+// For example,
+//
+// foo.com/v@v1.0.0-PRE -> foo.com/v@v1.0.0-!p!r!e
+//
+// Versions that avoid upper-case letters are left unchanged.
+// Note that because import paths are ASCII-only and avoid various
+// problematic punctuation (like : < and >), the escaped form is also ASCII-only
+// and avoids the same problematic punctuation.
+//
+// Neither versions nor module paths allow exclamation marks, so there is no
+// need to define how to escape a literal !.
+//
+// # Unicode Restrictions
+//
+// Today, paths are disallowed from using Unicode.
+//
+// Although paths are currently disallowed from using Unicode,
+// we would like at some point to allow Unicode letters as well, to assume that
+// file systems and URLs are Unicode-safe (storing UTF-8), and apply
+// the !-for-uppercase convention for escaping them in the file system.
+// But there are at least two subtle considerations.
+//
+// First, note that not all case-fold equivalent distinct runes
+// form an upper/lower pair.
+// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
+// are three distinct runes that case-fold to each other.
+// When we do add Unicode letters, we must not assume that upper/lower
+// are the only case-equivalent pairs.
+// Perhaps the Kelvin symbol would be disallowed entirely, for example.
+// Or perhaps it would escape as "!!k", or perhaps as "(212A)".
+//
+// Second, it would be nice to allow Unicode marks as well as letters,
+// but marks include combining marks, and then we must deal not
+// only with case folding but also normalization: both U+00E9 ('é')
+// and U+0065 U+0301 ('e' followed by combining acute accent)
+// look the same on the page and are treated by some file systems
+// as the same path. If we do allow Unicode marks in paths, there
+// must be some kind of normalization to allow only one canonical
+// encoding of any character used in an import path.
+package module
+
+// IMPORTANT NOTE
+//
+// This file essentially defines the set of valid import paths for the cue command.
+// There are many subtle considerations, including Unicode ambiguity,
+// security, network, and file system representations.
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "cuelang.org/go/internal/mod/semver"
+)
+
+// A Version (for clients, a module.Version) is defined by a module path and version pair.
+// These are stored in their plain (unescaped) form.
+// This type is comparable.
+type Version struct {
+ path string
+ version string
+}
+
+// Path returns the module path part of the Version,
+// which always includes the major version suffix
+// unless a module path, like "github.com/foo/bar@v0".
+// Note that in general the path should include the major version suffix
+// even though it's implied from the version. The Canonical
+// method can be used to add the major version suffix if not present.
+// The BasePath method can be used to obtain the path without
+// the suffix.
+func (m Version) Path() string {
+ return m.path
+}
+
+// Equal reports whether m is equal to m1.
+func (m Version) Equal(m1 Version) bool {
+ return m.path == m1.path && m.version == m1.version
+}
+
+// BasePath returns the path part of m without its major version suffix.
+func (m Version) BasePath() string {
+ if m.IsLocal() {
+ return m.path
+ }
+ basePath, _, ok := SplitPathVersion(m.path)
+ if !ok {
+ panic(fmt.Errorf("broken invariant: failed to split version in %q", m.path))
+ }
+ return basePath
+}
+
+// Version returns the version part of m. This is either
+// a canonical semver version or "none" or the empty string.
+func (m Version) Version() string {
+ return m.version
+}
+
+// IsValid reports whether m is non-zero.
+func (m Version) IsValid() bool {
+ return m.path != ""
+}
+
+// IsCanonical reports whether m is valid and has a canonical
+// semver version.
+func (m Version) IsCanonical() bool {
+ return m.IsValid() && m.version != "" && m.version != "none"
+}
+
+func (m Version) IsLocal() bool {
+ return m.path == "local"
+}
+
+// String returns the string form of the Version:
+// (Path@Version, or just Path if Version is empty).
+func (m Version) String() string {
+ if m.version == "" {
+ return m.path
+ }
+ return m.BasePath() + "@" + m.version
+}
+
+func MustParseVersion(s string) Version {
+ v, err := ParseVersion(s)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// ParseVersion parses a $module@$version
+// string into a Version.
+// The version must be canonical (i.e. it can't be
+// just a major version).
+func ParseVersion(s string) (Version, error) {
+ basePath, vers, ok := SplitPathVersion(s)
+ if !ok {
+ return Version{}, fmt.Errorf("invalid module path@version %q", s)
+ }
+ if semver.Canonical(vers) != vers {
+ return Version{}, fmt.Errorf("module version in %q is not canonical", s)
+ }
+ return Version{basePath + "@" + semver.Major(vers), vers}, nil
+}
+
+func MustNewVersion(path string, version string) Version {
+ v, err := NewVersion(path, version)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// NewVersion forms a Version from the given path and version.
+// The version must be canonical, empty or "none".
+// If the path doesn't have a major version suffix, one will be added
+// if the version isn't empty; if the version is empty, it's an error.
+//
+// As a special case, the path "local" is used to mean all packages
+// held in the gen, pkg and usr directories.
+func NewVersion(path string, version string) (Version, error) {
+ switch {
+ case path == "local":
+ if version != "" {
+ return Version{}, fmt.Errorf("module 'local' cannot have version")
+ }
+ case version != "" && version != "none":
+ if !semver.IsValid(version) {
+ return Version{}, fmt.Errorf("version %q (of module %q) is not well formed", version, path)
+ }
+ if semver.Canonical(version) != version {
+ return Version{}, fmt.Errorf("version %q (of module %q) is not canonical", version, path)
+ }
+ maj := semver.Major(version)
+ _, vmaj, ok := SplitPathVersion(path)
+ if ok && maj != vmaj {
+ return Version{}, fmt.Errorf("mismatched major version suffix in %q (version %v)", path, version)
+ }
+ if !ok {
+ fullPath := path + "@" + maj
+ if _, _, ok := SplitPathVersion(fullPath); !ok {
+ return Version{}, fmt.Errorf("cannot form version path from %q, version %v", path, version)
+ }
+ path = fullPath
+ }
+ default:
+ base, _, ok := SplitPathVersion(path)
+ if !ok {
+ return Version{}, fmt.Errorf("path %q has no major version", path)
+ }
+ if base == "local" {
+ return Version{}, fmt.Errorf("module 'local' cannot have version")
+ }
+ }
+ if version == "" {
+ if err := CheckPath(path); err != nil {
+ return Version{}, err
+ }
+ } else {
+ if err := Check(path, version); err != nil {
+ return Version{}, err
+ }
+ }
+ return Version{
+ path: path,
+ version: version,
+ }, nil
+}
+
+// Sort sorts the list by Path, breaking ties by comparing Version fields.
+// The Version fields are interpreted as semantic versions (using semver.Compare)
+// optionally followed by a tie-breaking suffix introduced by a slash character,
+// like in "v0.0.1/module.cue".
+func Sort(list []Version) {
+ sort.Slice(list, func(i, j int) bool {
+ mi := list[i]
+ mj := list[j]
+ if mi.path != mj.path {
+ return mi.path < mj.path
+ }
+ // To help go.sum formatting, allow version/file.
+ // Compare semver prefix by semver rules,
+ // file by string order.
+ vi := mi.version
+ vj := mj.version
+ var fi, fj string
+ if k := strings.Index(vi, "/"); k >= 0 {
+ vi, fi = vi[:k], vi[k:]
+ }
+ if k := strings.Index(vj, "/"); k >= 0 {
+ vj, fj = vj[:k], vj[k:]
+ }
+ if vi != vj {
+ return semver.Compare(vi, vj) < 0
+ }
+ return fi < fj
+ })
+}
diff --git a/vendor/cuelang.org/go/internal/mod/module/path.go b/vendor/cuelang.org/go/mod/module/path.go
similarity index 81%
rename from vendor/cuelang.org/go/internal/mod/module/path.go
rename to vendor/cuelang.org/go/mod/module/path.go
index c63e6011..b3cfd84d 100644
--- a/vendor/cuelang.org/go/internal/mod/module/path.go
+++ b/vendor/cuelang.org/go/mod/module/path.go
@@ -152,10 +152,10 @@ func CheckPathWithoutVersion(basePath string) (err error) {
// (ASCII digits) and must not begin with a leading zero.
//
// Third, no path element may begin with a dot.
-//
-// TODO we probably need function to check module paths that
-// may not contain a major version.
func CheckPath(mpath string) (err error) {
+ if mpath == "local" {
+ return nil
+ }
defer func() {
if err != nil {
err = &InvalidPathError{Kind: "module", Path: mpath, Err: err}
@@ -192,21 +192,17 @@ func CheckPath(mpath string) (err error) {
//
// The element prefix up to the first dot must not be a reserved file name
// on Windows, regardless of case (CON, com1, NuL, and so on).
-func CheckImportPath(path0 string) error {
- path := path0
- basePath, vers, ok := SplitPathVersion(path)
- if ok {
- if semver.Major(vers) != vers {
- return &InvalidPathError{
- Kind: "import",
- Path: path,
- Err: fmt.Errorf("import paths can only contain a major version specifier"),
- }
+func CheckImportPath(path string) error {
+ parts := ParseImportPath(path)
+ if semver.Major(parts.Version) != parts.Version {
+ return &InvalidPathError{
+ Kind: "import",
+ Path: path,
+ Err: fmt.Errorf("import paths can only contain a major version specifier"),
}
- path = basePath
}
- if err := checkPath(path, importPath); err != nil {
- return &InvalidPathError{Kind: "import", Path: path0, Err: err}
+ if err := checkPath(parts.Path, importPath); err != nil {
+ return &InvalidPathError{Kind: "import", Path: path, Err: err}
}
return nil
}
@@ -397,12 +393,87 @@ func SplitPathVersion(path string) (prefix, version string, ok bool) {
return path[:split], path[split+1:], true
}
-// MatchPathMajor reports whether the semantic version v
-// matches the path major version pathMajor.
-//
-// MatchPathMajor returns true if and only if CheckPathMajor returns nil.
-func MatchPathMajor(v, pathMajor string) bool {
- return CheckPathMajor(v, pathMajor) == nil
+// ImportPath holds the various components of an import path.
+type ImportPath struct {
+ // Path holds the base package/directory path, similar
+ // to that returned by [Version.BasePath].
+ Path string
+
+ // Version holds the version of the import
+ // or empty if not present. Note: in general this
+ // will contain a major version only, but there's no
+ // guarantee of that.
+ Version string
+
+ // Qualifier holds the package qualifier within the path.
+ // This will be derived from the last component of Path
+ // if it wasn't explicitly present in the import path.
+ // This is not guaranteed to be a valid CUE identifier.
+ Qualifier string
+
+ // ExplicitQualifier holds whether the qualifier was explicitly
+ // present in the import path.
+ ExplicitQualifier bool
+}
+
+// Canonical returns the canonical form of the import path.
+// Specifically, it will only include the package qualifier
+// if it's different from the last component of parts.Path.
+func (parts ImportPath) Canonical() ImportPath {
+ if i := strings.LastIndex(parts.Path, "/"); i >= 0 && parts.Path[i+1:] == parts.Qualifier {
+ parts.Qualifier = ""
+ parts.ExplicitQualifier = false
+ }
+ return parts
+}
+
+// Unqualified returns the import path without any package qualifier.
+func (parts ImportPath) Unqualified() ImportPath {
+ parts.Qualifier = ""
+ parts.ExplicitQualifier = false
+ return parts
+}
+
+func (parts ImportPath) String() string {
+ if parts.Version == "" && !parts.ExplicitQualifier {
+ // Fast path.
+ return parts.Path
+ }
+ var buf strings.Builder
+ buf.WriteString(parts.Path)
+ if parts.Version != "" {
+ buf.WriteByte('@')
+ buf.WriteString(parts.Version)
+ }
+ if parts.ExplicitQualifier {
+ buf.WriteByte(':')
+ buf.WriteString(parts.Qualifier)
+ }
+ return buf.String()
+}
+
+// ParseImportPath returns the various components of an import path.
+func ParseImportPath(p string) ImportPath {
+ var parts ImportPath
+ pathWithoutQualifier := p
+ if i := strings.LastIndexAny(p, "/:"); i >= 0 && p[i] == ':' {
+ pathWithoutQualifier = p[:i]
+ parts.Qualifier = p[i+1:]
+ parts.ExplicitQualifier = true
+ }
+ parts.Path = pathWithoutQualifier
+ if path, version, ok := SplitPathVersion(pathWithoutQualifier); ok {
+ parts.Version = version
+ parts.Path = path
+ }
+ if !parts.ExplicitQualifier {
+ if i := strings.LastIndex(parts.Path, "/"); i >= 0 {
+ parts.Qualifier = parts.Path[i+1:]
+ } else {
+ parts.Qualifier = parts.Path
+ }
+ }
+ return parts
}
// CheckPathMajor returns a non-nil error if the semantic version v
diff --git a/vendor/cuelang.org/go/internal/mod/module/versions.go b/vendor/cuelang.org/go/mod/module/versions.go
similarity index 100%
rename from vendor/cuelang.org/go/internal/mod/module/versions.go
rename to vendor/cuelang.org/go/mod/module/versions.go
diff --git a/vendor/cuelang.org/go/internal/mod/modzip/zip.go b/vendor/cuelang.org/go/mod/modzip/zip.go
similarity index 97%
rename from vendor/cuelang.org/go/internal/mod/modzip/zip.go
rename to vendor/cuelang.org/go/mod/modzip/zip.go
index ef381f86..c23a2ea0 100644
--- a/vendor/cuelang.org/go/internal/mod/modzip/zip.go
+++ b/vendor/cuelang.org/go/mod/modzip/zip.go
@@ -4,11 +4,14 @@
// Package modzip provides functions for creating and extracting module zip files.
//
+// WARNING: THIS PACKAGE IS EXPERIMENTAL.
+// ITS API MAY CHANGE AT ANY TIME.
+//
// Module zip files have several restrictions listed below. These are necessary
// to ensure that module zip files can be extracted consistently on supported
// platforms and file systems.
//
-// • All file paths within a zip file must be valid (see cuelang.org/go/internal/mod/module.CheckFilePath).
+// • All file paths within a zip file must be valid (see cuelang.org/go/mod/module.CheckFilePath).
//
// • No two file paths may be equal under Unicode case-folding (see
// strings.EqualFold).
@@ -50,7 +53,7 @@ import (
"unicode"
"unicode/utf8"
- "cuelang.org/go/internal/mod/module"
+ "cuelang.org/go/mod/module"
)
const (
@@ -71,6 +74,9 @@ const (
// File provides an abstraction for a file in a directory, zip, or anything
// else that looks like a file - it knows how to open files represented
// as a particular type without being a file itself.
+//
+// Deprecated: this will be removed in a future API iteration that reduces
+// dependence on zip archives.
type FileIO[F any] interface {
// Path returns a clean slash-separated relative path from the module root
// directory to the file.
@@ -189,6 +195,9 @@ var (
// Note that CheckFiles will not open any files, so Create may still fail when
// CheckFiles is successful due to I/O errors, reported size differences
// or an invalid module.cue file.
+//
+// Deprecated: this will be removed in a future API iteration that reduces
+// dependence on zip archives.
func CheckFiles[F any](files []F, fio FileIO[F]) (CheckedFiles, error) {
cf, _, _ := checkFiles(files, fio)
return cf, cf.Err()
@@ -354,6 +363,9 @@ func checkFiles[F any](files []F, fio FileIO[F]) (cf CheckedFiles, validFiles []
//
// Note that CheckDir will not open any files, so CreateFromDir may still fail
// when CheckDir is successful due to I/O errors.
+//
+// Deprecated: this will be removed in a future API iteration that reduces
+// dependence on zip archives.
func CheckDir(dir string) (CheckedFiles, error) {
// List files (as CreateFromDir would) and check which ones are omitted
// or invalid.
@@ -408,8 +420,6 @@ func CheckZipFile(m module.Version, zipFile string) (CheckedFiles, error) {
//
// Note that checkZip does not read individual files, so zip.Unzip may still fail
// when checkZip is successful due to I/O errors.
-//
-// TODO update this for new semantics (no top level module directory).
func CheckZip(m module.Version, r io.ReaderAt, zipSize int64) (*zip.Reader, *zip.File, CheckedFiles, error) {
if zipSize > MaxZipFile {
cf := CheckedFiles{SizeError: fmt.Errorf("module zip file is too large (%d bytes; limit is %d bytes)", zipSize, MaxZipFile)}
@@ -508,6 +518,9 @@ func CheckZip(m module.Version, r io.ReaderAt, zipSize int64) (*zip.Reader, *zip
// In particular, Create will not include files in modules found in
// subdirectories, most files in vendor directories, or irregular files (such
// as symbolic links) in the output archive.
+//
+// Deprecated: this will be removed in a future API iteration that reduces
+// dependence on zip archives.
func Create[F any](w io.Writer, m module.Version, files []F, fio FileIO[F]) (err error) {
defer func() {
if err != nil {
@@ -862,20 +875,3 @@ func splitCUEMod(p string) (string, string) {
s = dir
}
}
-
-// ZipFileIO implements FileIO for *zip.File.
-type ZipFileIO struct {
- // StripPrefix causes the given prefix to be stripped from
- // all file names with that prefix.
- StripPrefix string
-}
-
-func (fio ZipFileIO) Path(f *zip.File) string {
- return strings.TrimPrefix(f.Name, fio.StripPrefix)
-}
-func (ZipFileIO) Lstat(f *zip.File) (os.FileInfo, error) {
- return f.FileInfo(), nil
-}
-func (ZipFileIO) Open(f *zip.File) (io.ReadCloser, error) {
- return f.Open()
-}
diff --git a/vendor/cuelang.org/go/pkg/net/host.go b/vendor/cuelang.org/go/pkg/net/host.go
index 70be5f2b..aef0ad99 100644
--- a/vendor/cuelang.org/go/pkg/net/host.go
+++ b/vendor/cuelang.org/go/pkg/net/host.go
@@ -56,7 +56,7 @@ func JoinHostPort(host, port cue.Value) (string, error) {
switch host.Kind() {
case cue.ListKind:
ipdata := netGetIP(host)
- if len(ipdata) != 4 && len(ipdata) != 16 {
+ if !ipdata.IsValid() {
err = fmt.Errorf("invalid host %s", host)
}
hostStr = ipdata.String()
diff --git a/vendor/cuelang.org/go/pkg/net/ip.go b/vendor/cuelang.org/go/pkg/net/ip.go
index 4fbb6179..89c587be 100644
--- a/vendor/cuelang.org/go/pkg/net/ip.go
+++ b/vendor/cuelang.org/go/pkg/net/ip.go
@@ -17,7 +17,7 @@ package net
import (
"fmt"
- "net"
+ "net/netip"
"cuelang.org/go/cue"
)
@@ -28,76 +28,81 @@ const (
IPv6len = 16
)
-func netGetIP(ip cue.Value) (goip net.IP) {
+func netGetIP(ip cue.Value) (goip netip.Addr) {
switch ip.Kind() {
case cue.StringKind:
s, err := ip.String()
if err != nil {
- return nil
+ return netip.Addr{}
}
- goip := net.ParseIP(s)
- if goip == nil {
- return nil
+ goip, err := netip.ParseAddr(s)
+ if err != nil {
+ return netip.Addr{}
}
return goip
case cue.BytesKind:
b, err := ip.Bytes()
if err != nil {
- return nil
+ return netip.Addr{}
}
- goip := net.ParseIP(string(b))
- if goip == nil {
- return nil
+ goip, err := netip.ParseAddr(string(b))
+ if err != nil {
+ return netip.Addr{}
}
return goip
case cue.ListKind:
iter, err := ip.List()
if err != nil {
- return nil
+ return netip.Addr{}
}
+ var bytes []byte
for iter.Next() {
v, err := iter.Value().Int64()
if err != nil {
- return nil
+ return netip.Addr{}
}
if v < 0 || 255 < v {
- return nil
+ return netip.Addr{}
}
- goip = append(goip, byte(v))
+ bytes = append(bytes, byte(v))
+ }
+ goip, ok := netip.AddrFromSlice(bytes)
+ if !ok {
+ return netip.Addr{}
}
return goip
default:
// TODO: return canonical invalid type.
- return nil
+ return netip.Addr{}
}
}
-func netGetIPCIDR(ip cue.Value) (gonet *net.IPNet, err error) {
+func netGetIPCIDR(ip cue.Value) (gonet *netip.Prefix, err error) {
switch ip.Kind() {
case cue.StringKind:
s, err := ip.String()
if err != nil {
return nil, err
}
- _, gonet, err := net.ParseCIDR(s)
+ cidr, err := netip.ParsePrefix(s)
if err != nil {
return nil, err
}
- return gonet, nil
+ return &cidr, nil
case cue.BytesKind:
b, err := ip.Bytes()
if err != nil {
return nil, err
}
- _, gonet, err := net.ParseCIDR(string(b))
+ cidr, err := netip.ParsePrefix(string(b))
if err != nil {
return nil, err
}
- return gonet, nil
+ return &cidr, nil
default:
// TODO: return canonical invalid type.
@@ -111,14 +116,14 @@ func netGetIPCIDR(ip cue.Value) (gonet *net.IPNet, err error) {
// If s is not a valid textual representation of an IP address,
// ParseIP returns nil.
func ParseIP(s string) ([]uint, error) {
- goip := net.ParseIP(s)
- if goip == nil {
+ goip, err := netip.ParseAddr(s)
+ if err != nil {
return nil, fmt.Errorf("invalid IP address %q", s)
}
- return netToList(goip), nil
+ return netToList(goip.AsSlice()), nil
}
-func netToList(ip net.IP) []uint {
+func netToList(ip []byte) []uint {
a := make([]uint, len(ip))
for i, p := range ip {
a[i] = uint(p)
@@ -126,20 +131,27 @@ func netToList(ip net.IP) []uint {
return a
}
-// IPv4 reports whether s is a valid IPv4 address.
+// IPv4 reports whether ip is a valid IPv4 address.
//
// The address may be a string or list of bytes.
func IPv4(ip cue.Value) bool {
// TODO: convert to native CUE.
- return netGetIP(ip).To4() != nil
+ return netGetIP(ip).Is4()
+}
+
+// IPv6 reports whether ip is a valid IPv6 address.
+//
+// The address may be a string or list of bytes.
+func IPv6(ip cue.Value) bool {
+ return netGetIP(ip).Is6()
}
-// IP reports whether s is a valid IPv4 or IPv6 address.
+// IP reports whether ip is a valid IPv4 or IPv6 address.
//
// The address may be a string or list of bytes.
func IP(ip cue.Value) bool {
// TODO: convert to native CUE.
- return netGetIP(ip) != nil
+ return netGetIP(ip).IsValid()
}
// IPCIDR reports whether ip is a valid IPv4 or IPv6 address with CIDR subnet notation.
@@ -196,24 +208,25 @@ func UnspecifiedIP(ip cue.Value) bool {
// 4-byte representation.
func ToIP4(ip cue.Value) ([]uint, error) {
ipdata := netGetIP(ip)
- if ipdata == nil {
+ if !ipdata.IsValid() {
return nil, fmt.Errorf("invalid IP %q", ip)
}
- ipv4 := ipdata.To4()
- if ipv4 == nil {
+ if !ipdata.Is4() {
return nil, fmt.Errorf("cannot convert %q to IPv4", ipdata)
}
- return netToList(ipv4), nil
+ as4 := ipdata.As4()
+ return netToList(as4[:]), nil
}
// ToIP16 converts a given IP address, which may be a string or a list, to its
// 16-byte representation.
func ToIP16(ip cue.Value) ([]uint, error) {
ipdata := netGetIP(ip)
- if ipdata == nil {
+ if !ipdata.IsValid() {
return nil, fmt.Errorf("invalid IP %q", ip)
}
- return netToList(ipdata), nil
+ as16 := ipdata.As16()
+ return netToList(as16[:]), nil
}
// IPString returns the string form of the IP address ip. It returns one of 4 forms:
@@ -224,7 +237,7 @@ func ToIP16(ip cue.Value) ([]uint, error) {
// - the hexadecimal form of ip, without punctuation, if no other cases apply
func IPString(ip cue.Value) (string, error) {
ipdata := netGetIP(ip)
- if ipdata == nil {
+ if !ipdata.IsValid() {
return "", fmt.Errorf("invalid IP %q", ip)
}
return ipdata.String(), nil
diff --git a/vendor/cuelang.org/go/pkg/net/pkg.go b/vendor/cuelang.org/go/pkg/net/pkg.go
index 4cd172cb..a1371998 100644
--- a/vendor/cuelang.org/go/pkg/net/pkg.go
+++ b/vendor/cuelang.org/go/pkg/net/pkg.go
@@ -81,6 +81,18 @@ var p = &pkg.Package{
c.Ret = IPv4(ip)
}
},
+ }, {
+ Name: "IPv6",
+ Params: []pkg.Param{
+ {Kind: adt.TopKind},
+ },
+ Result: adt.BoolKind,
+ Func: func(c *pkg.CallCtxt) {
+ ip := c.Value(0)
+ if c.Do() {
+ c.Ret = IPv6(ip)
+ }
+ },
}, {
Name: "IP",
Params: []pkg.Param{
diff --git a/vendor/cuelang.org/go/pkg/path/path.go b/vendor/cuelang.org/go/pkg/path/path.go
index 4d1647c1..247dd7b3 100644
--- a/vendor/cuelang.org/go/pkg/path/path.go
+++ b/vendor/cuelang.org/go/pkg/path/path.go
@@ -16,13 +16,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package filepath implements utility routines for manipulating filename paths
-// in a way compatible with the target operating system-defined file paths.
-//
-// The filepath package uses either forward slashes or backslashes,
-// depending on the operating system. To process paths such as URLs
-// that always use forward slashes regardless of the operating
-// system, see the path package.
+// Package path implements utility routines for manipulating filename paths as
+// defined by targetted operating systems, and also paths that always use
+// forward slashes regardless of the operating system, such as URLs.
package path
import (
diff --git a/vendor/cuelang.org/go/pkg/tool/exec/exec.cue b/vendor/cuelang.org/go/pkg/tool/exec/exec.cue
index 62e3af53..c9800837 100644
--- a/vendor/cuelang.org/go/pkg/tool/exec/exec.cue
+++ b/vendor/cuelang.org/go/pkg/tool/exec/exec.cue
@@ -29,7 +29,7 @@ Run: {
// If the value is a list, the entries mus be of the form key=value,
// where the last value takes precendence in the case of multiple
// occurrances of the same key.
- env: [string]: string | [...=~"="]
+ env: {[string]: string} | [...=~"="]
// stdout captures the output from stdout if it is of type bytes or string.
// The default value of null indicates it is redirected to the stdout of the
diff --git a/vendor/cuelang.org/go/pkg/tool/exec/exec.go b/vendor/cuelang.org/go/pkg/tool/exec/exec.go
index 5c5f5ee3..a596a8ca 100644
--- a/vendor/cuelang.org/go/pkg/tool/exec/exec.go
+++ b/vendor/cuelang.org/go/pkg/tool/exec/exec.go
@@ -45,13 +45,8 @@ func (c *execCmd) Run(ctx *task.Context) (res interface{}, err error) {
// TODO: set environment variables, if defined.
stream := func(name string) (stream cue.Value, ok bool) {
- c := ctx.Obj.Lookup(name)
- // Although the schema defines a default versions, older implementations
- // may not use it yet.
- if !c.Exists() {
- return
- }
- if err := c.Null(); ctx.Err != nil || err == nil {
+ c := ctx.Obj.LookupPath(cue.ParsePath(name))
+ if err := c.Null(); c.Err() != nil || err == nil {
return
}
return c, true
@@ -71,14 +66,10 @@ func (c *execCmd) Run(ctx *task.Context) (res interface{}, err error) {
cmd.Stderr = ctx.Stderr
}
- // TODO(mvdan): exec.Run declares mustSucceed as a regular field with a default of true.
- // We should be able to rely on that here, removing the need for Exists and repeating the default.
- mustSucceed := true
- if v := ctx.Obj.LookupPath(cue.ParsePath("mustSucceed")); v.Exists() {
- mustSucceed, err = v.Bool()
- if err != nil {
- return nil, errors.Wrapf(err, v.Pos(), "invalid bool value")
- }
+ v := ctx.Obj.LookupPath(cue.ParsePath("mustSucceed"))
+ mustSucceed, err := v.Bool()
+ if err != nil {
+ return nil, errors.Wrapf(err, v.Pos(), "invalid bool value")
}
update := map[string]interface{}{}
@@ -102,6 +93,7 @@ func (c *execCmd) Run(ctx *task.Context) (res interface{}, err error) {
update["stderr"] = err.Error()
}
}
+
if !mustSucceed {
return update, nil
}
@@ -152,13 +144,14 @@ func mkCommand(ctx *task.Context) (c *exec.Cmd, doc string, err error) {
cmd := exec.CommandContext(ctx.Context, bin, args...)
- cmd.Dir, _ = ctx.Obj.Lookup("dir").String()
+ cmd.Dir, _ = ctx.Obj.LookupPath(cue.ParsePath("dir")).String()
- env := ctx.Obj.Lookup("env")
+ env := ctx.Obj.LookupPath(cue.ParsePath("env"))
// List case.
for iter, _ := env.List(); iter.Next(); {
- str, err := iter.Value().String()
+ v, _ := iter.Value().Default()
+ str, err := v.String()
if err != nil {
return nil, "", errors.Wrapf(err, v.Pos(),
"invalid environment variable value %q", v)
@@ -167,9 +160,9 @@ func mkCommand(ctx *task.Context) (c *exec.Cmd, doc string, err error) {
}
// Struct case.
- for iter, _ := ctx.Obj.Lookup("env").Fields(); iter.Next(); {
+ for iter, _ := env.Fields(); iter.Next(); {
label := iter.Label()
- v := iter.Value()
+ v, _ := iter.Value().Default()
var str string
switch v.Kind() {
case cue.StringKind:
diff --git a/vendor/cuelang.org/go/pkg/tool/exec/pkg.go b/vendor/cuelang.org/go/pkg/tool/exec/pkg.go
index 690b8de7..c20f7299 100644
--- a/vendor/cuelang.org/go/pkg/tool/exec/pkg.go
+++ b/vendor/cuelang.org/go/pkg/tool/exec/pkg.go
@@ -19,7 +19,7 @@
// // If the value is a list, the entries mus be of the form key=value,
// // where the last value takes precendence in the case of multiple
// // occurrances of the same key.
-// env: [string]: string | [...=~"="]
+// env: {[string]: string} | [...=~"="]
//
// // stdout captures the output from stdout if it is of type bytes or string.
// // The default value of null indicates it is redirected to the stdout of the
@@ -64,8 +64,8 @@ var p = &pkg.Package{
cmd: string | [string, ...string]
dir?: string
env: {
- [string]: string | [...=~"="]
- }
+ [string]: string
+ } | [...=~"="]
stdout: *null | string | bytes
stderr: *null | string | bytes
stdin: *null | string | bytes
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
index 12a33149..847cc51a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
@@ -3,4 +3,4 @@
package aws
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.25.2"
+const goModuleVersion = "1.26.0"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go
new file mode 100644
index 00000000..8c783641
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go
@@ -0,0 +1,20 @@
+package ratelimit
+
+import "context"
+
+// None implements a no-op rate limiter which effectively disables client-side
+// rate limiting (also known as "retry quotas").
+//
+// GetToken does nothing and always returns a nil error. The returned
+// token-release function does nothing, and always returns a nil error.
+//
+// AddTokens does nothing and always returns a nil error.
+var None = &none{}
+
+type none struct{}
+
+func (*none) GetToken(ctx context.Context, cost uint) (func() error, error) {
+ return func() error { return nil }, nil
+}
+
+func (*none) AddTokens(v uint) error { return nil }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go
index 25abffc8..d5ea9322 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go
@@ -123,6 +123,17 @@ type StandardOptions struct {
// Provides the rate limiting strategy for rate limiting attempt retries
// across all attempts the retryer is being used with.
+ //
+ // A RateLimiter operates as a token bucket with a set capacity, where
+ // attempt failures events consume tokens. A retry attempt that attempts to
+ // consume more tokens than what's available results in operation failure.
+ // The default implementation is parameterized as follows:
+ // - a capacity of 500 (DefaultRetryRateTokens)
+ // - a retry caused by a timeout costs 10 tokens (DefaultRetryCost)
+ // - a retry caused by other errors costs 5 tokens (DefaultRetryTimeoutCost)
+ // - an operation that succeeds on the 1st attempt adds 1 token (DefaultNoRetryIncrement)
+ //
+ // You can disable rate limiting by setting this field to ratelimit.None.
RateLimiter RateLimiter
// The cost to deduct from the RateLimiter's token bucket per retry.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
index 38b390aa..d5e6071f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
@@ -1,3 +1,24 @@
+# v1.27.9 (2024-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.8 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.7 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.6 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.5 (2024-03-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.27.4 (2024-02-23)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
index 1e60413c..00ee2049 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
@@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.27.4"
+const goModuleVersion = "1.27.9"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
index 0f571bce..399f0896 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
@@ -1,3 +1,24 @@
+# v1.17.9 (2024-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.8 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.7 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.6 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2024-03-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.17.4 (2024-02-23)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
index ca8e4d24..2b4ff389 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
@@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.17.4"
+const goModuleVersion = "1.17.9"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
index 2ba9a260..e07fb5ca 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
@@ -1,3 +1,16 @@
+# v1.16.0 (2024-03-21)
+
+* **Feature**: Add config switch `DisableDefaultTimeout` that allows you to disable the default operation timeout (5 seconds) for IMDS calls.
+
+# v1.15.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.3 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.15.2 (2024-02-23)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go
index 46e144d9..3f4a10e2 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go
@@ -185,6 +185,10 @@ type Options struct {
// [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
EnableFallback aws.Ternary
+ // By default, all IMDS client operations enforce a 5-second timeout. You
+ // can disable that behavior with this setting.
+ DisableDefaultTimeout bool
+
// provides the caching of API tokens used for operation calls. If unset,
// the API token will not be retrieved for the operation.
tokenProvider *tokenProvider
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go
index bacdb5d2..d5765c36 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go
@@ -3,8 +3,9 @@
//
// All Client operation calls have a default timeout. If the operation is not
// completed before this timeout expires, the operation will be canceled. This
-// timeout can be overridden by providing Context with a timeout or deadline
-// with calling the client's operations.
+// timeout can be overridden through the following:
+// - Set the options flag DisableDefaultTimeout
+// - Provide a Context with a timeout or deadline with calling the client's operations.
//
// See the EC2 IMDS user guide for more information on using the API.
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
index 8010ded7..a44cd1b7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
@@ -3,4 +3,4 @@
package imds
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.15.2"
+const goModuleVersion = "1.16.0"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
index fc948c27..90cf4aeb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
@@ -56,6 +56,7 @@ func addRequestMiddleware(stack *middleware.Stack,
// Operation timeout
err = stack.Initialize.Add(&operationTimeout{
+ Disabled: options.DisableDefaultTimeout,
DefaultTimeout: defaultOperationTimeout,
}, middleware.Before)
if err != nil {
@@ -260,6 +261,7 @@ const (
// Otherwise the timeout cleanup will race the resource being consumed
// upstream.
type operationTimeout struct {
+ Disabled bool
DefaultTimeout time.Duration
}
@@ -270,6 +272,10 @@ func (m *operationTimeout) HandleInitialize(
) (
output middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
+ if m.Disabled {
+ return next.HandleInitialize(ctx, input)
+ }
+
if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 {
var cancelFn func()
ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
index b62d57cb..86f5b137 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
@@ -1,3 +1,11 @@
+# v1.3.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2024-03-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.3.2 (2024-02-23)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
index a99e10d8..d25782e9 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
@@ -3,4 +3,4 @@
package configsources
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.3.2"
+const goModuleVersion = "1.3.4"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
index b95cd39f..5bb02f57 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
@@ -1,3 +1,12 @@
+# v2.6.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.3 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v2.6.2 (2024-02-23)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
index 833b9115..bb857bcb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
@@ -3,4 +3,4 @@
package endpoints
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "2.6.2"
+const goModuleVersion = "2.6.4"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
index 38b0de28..ead169d5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
@@ -1,3 +1,20 @@
+# v1.11.6 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.5 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.4 (2024-03-05)
+
+* **Bug Fix**: Restore typo'd API `AddAsIsInternalPresigingMiddleware` as an alias for backwards compatibility.
+
+# v1.11.3 (2024-03-04)
+
+* **Bug Fix**: Correct a typo in internal AddAsIsPresigningMiddleware API.
+
# v1.11.2 (2024-02-23)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go
index cc919701..5d5286f9 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go
@@ -27,13 +27,21 @@ func GetIsPresigning(ctx context.Context) bool {
type isPresigningKey struct{}
-// AddAsIsPresigingMiddleware adds a middleware to the head of the stack that
+// AddAsIsPresigningMiddleware adds a middleware to the head of the stack that
// will update the stack's context to be flagged as being invoked for the
// purpose of presigning.
-func AddAsIsPresigingMiddleware(stack *middleware.Stack) error {
+func AddAsIsPresigningMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before)
}
+// AddAsIsPresigingMiddleware is an alias for backwards compatibility.
+//
+// Deprecated: This API was released with a typo. Use
+// [AddAsIsPresigningMiddleware] instead.
+func AddAsIsPresigingMiddleware(stack *middleware.Stack) error {
+ return AddAsIsPresigningMiddleware(stack)
+}
+
type asIsPresigningMiddleware struct{}
func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
index 0af263c5..98bea53b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
@@ -3,4 +3,4 @@
package presignedurl
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.11.2"
+const goModuleVersion = "1.11.6"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
index f0a4e60d..5a508309 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
@@ -1,3 +1,12 @@
+# v1.20.3 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.2 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.20.1 (2024-02-23)
* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
index 62aba0d0..936253d7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
@@ -3,8 +3,7 @@
"github.com/aws/aws-sdk-go-v2": "v1.4.0",
"github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000",
"github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000",
- "github.com/aws/smithy-go": "v1.4.0",
- "github.com/google/go-cmp": "v0.5.4"
+ "github.com/aws/smithy-go": "v1.4.0"
},
"files": [
"api_client.go",
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
index 250762b7..e98c0f32 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
@@ -3,4 +3,4 @@
package sso
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.20.1"
+const goModuleVersion = "1.20.3"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
index c8f7c09e..0a00b256 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
@@ -211,6 +211,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "ap-southeast-3",
},
},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-4",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-southeast-4.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpoints.EndpointKey{
Region: "ca-central-1",
}: endpoints.Endpoint{
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
index 13cffac4..c6d5ae92 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
@@ -1,3 +1,12 @@
+# v1.23.3 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.2 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.23.1 (2024-02-23)
* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
index 62007829..b2a52633 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
@@ -3,8 +3,7 @@
"github.com/aws/aws-sdk-go-v2": "v1.4.0",
"github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000",
"github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000",
- "github.com/aws/smithy-go": "v1.4.0",
- "github.com/google/go-cmp": "v0.5.4"
+ "github.com/aws/smithy-go": "v1.4.0"
},
"files": [
"api_client.go",
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
index 8c5f455b..e81f202f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
@@ -3,4 +3,4 @@
package ssooidc
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.23.1"
+const goModuleVersion = "1.23.3"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
index cbd77fd2..aa207253 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
@@ -211,6 +211,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "ap-southeast-3",
},
},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-4",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-southeast-4.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpoints.EndpointKey{
Region: "ca-central-1",
}: endpoints.Endpoint{
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
index 0f4845a5..1c503194 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
@@ -1,3 +1,21 @@
+# v1.28.5 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.4 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.3 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.2 (2024-03-04)
+
+* **Bug Fix**: Update internal/presigned-url dependency for corrected API name.
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.28.1 (2024-02-23)
* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
index 8b38a505..4d18dc86 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
@@ -643,7 +643,7 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op
if err != nil {
return err
}
- err = presignedurlcust.AddAsIsPresigingMiddleware(stack)
+ err = presignedurlcust.AddAsIsPresigningMiddleware(stack)
if err != nil {
return err
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
index 54fac4bd..6b6e839e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
@@ -5,8 +5,7 @@
"github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000",
"github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5",
"github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7",
- "github.com/aws/smithy-go": "v1.4.0",
- "github.com/google/go-cmp": "v0.5.4"
+ "github.com/aws/smithy-go": "v1.4.0"
},
"files": [
"api_client.go",
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
index 06957274..8bba9b7d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
@@ -3,4 +3,4 @@
package sts
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.28.1"
+const goModuleVersion = "1.28.5"
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
index b7bd0927..f42d37d4 100644
--- a/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
@@ -1,5 +1,7 @@
package oidc
+import jose "github.com/go-jose/go-jose/v4"
+
// JOSE asymmetric signing algorithm values as defined by RFC 7518
//
// see: https://tools.ietf.org/html/rfc7518#section-3.1
@@ -15,3 +17,16 @@ const (
PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
EdDSA = "EdDSA" // Ed25519 using SHA-512
)
+
+var allAlgs = []jose.SignatureAlgorithm{
+ jose.RS256,
+ jose.RS384,
+ jose.RS512,
+ jose.ES256,
+ jose.ES384,
+ jose.ES512,
+ jose.PS256,
+ jose.PS384,
+ jose.PS512,
+ jose.EdDSA,
+}
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
index b1e3f7e3..9a70c143 100644
--- a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
@@ -13,7 +13,7 @@ import (
"sync"
"time"
- jose "github.com/go-jose/go-jose/v3"
+ jose "github.com/go-jose/go-jose/v4"
)
// StaticKeySet is a verifier that validates JWT against a static set of public keys.
@@ -25,7 +25,9 @@ type StaticKeySet struct {
// VerifySignature compares the signature against a static set of public keys.
func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
- jws, err := jose.ParseSigned(jwt)
+ // Algorithms are already checked by Verifier, so this parse method accepts
+ // any algorithm.
+ jws, err := jose.ParseSigned(jwt, allAlgs)
if err != nil {
return nil, fmt.Errorf("parsing jwt: %v", err)
}
@@ -127,8 +129,13 @@ var parsedJWTKey contextKey
func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
jws, ok := ctx.Value(parsedJWTKey).(*jose.JSONWebSignature)
if !ok {
+ // The algorithm values are already enforced by the Validator, which also sets
+ // the context value above to pre-parsed signature.
+ //
+ // Practically, this codepath isn't called in normal use of this package, but
+ // if it is, the algorithms have already been checked.
var err error
- jws, err = jose.ParseSigned(jwt)
+ jws, err = jose.ParseSigned(jwt, allAlgs)
if err != nil {
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
}
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
index b7db3c73..17419f38 100644
--- a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
@@ -79,7 +79,7 @@ func getClient(ctx context.Context) *http.Client {
// provider, err := oidc.NewProvider(ctx, discoveryBaseURL)
//
// This is insecure because validating the correct issuer is critical for multi-tenant
-// proivders. Any overrides here MUST be carefully reviewed.
+// providers. Any overrides here MUST be carefully reviewed.
func InsecureIssuerURLContext(ctx context.Context, issuerURL string) context.Context {
return context.WithValue(ctx, issuerURLKey, issuerURL)
}
diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
index 0bca49a8..0ac58d29 100644
--- a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
+++ b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
@@ -12,7 +12,7 @@ import (
"strings"
"time"
- jose "github.com/go-jose/go-jose/v3"
+ jose "github.com/go-jose/go-jose/v4"
"golang.org/x/oauth2"
)
@@ -310,7 +310,16 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok
return t, nil
}
- jws, err := jose.ParseSigned(rawIDToken)
+ var supportedSigAlgs []jose.SignatureAlgorithm
+ for _, alg := range v.config.SupportedSigningAlgs {
+ supportedSigAlgs = append(supportedSigAlgs, jose.SignatureAlgorithm(alg))
+ }
+ if len(supportedSigAlgs) == 0 {
+ // If no algorithms were specified by both the config and discovery, default
+ // to the one mandatory algorithm "RS256".
+ supportedSigAlgs = []jose.SignatureAlgorithm{jose.RS256}
+ }
+ jws, err := jose.ParseSigned(rawIDToken, supportedSigAlgs)
if err != nil {
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
}
@@ -322,17 +331,7 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok
default:
return nil, fmt.Errorf("oidc: multiple signatures on id token not supported")
}
-
sig := jws.Signatures[0]
- supportedSigAlgs := v.config.SupportedSigningAlgs
- if len(supportedSigAlgs) == 0 {
- supportedSigAlgs = []string{RS256}
- }
-
- if !contains(supportedSigAlgs, sig.Header.Algorithm) {
- return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", supportedSigAlgs, sig.Header.Algorithm)
- }
-
t.sigAlgorithm = sig.Header.Algorithm
ctx = context.WithValue(ctx, parsedJWTKey, jws)
diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml
new file mode 100644
index 00000000..ac12e485
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/.travis.yml
@@ -0,0 +1,21 @@
+sudo: false
+language: go
+go_import_path: github.com/dustin/go-humanize
+go:
+ - 1.13.x
+ - 1.14.x
+ - 1.15.x
+ - 1.16.x
+ - stable
+ - master
+matrix:
+ allow_failures:
+ - go: master
+ fast_finish: true
+install:
+ - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+ - diff -u <(echo -n) <(gofmt -d -s .)
+ - go vet .
+ - go install -v -race ./...
+ - go test -v -race ./...
diff --git a/vendor/github.com/mpvl/unique/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE
similarity index 84%
rename from vendor/github.com/mpvl/unique/LICENSE
rename to vendor/github.com/dustin/go-humanize/LICENSE
index 60b39f27..8d9a94a9 100644
--- a/vendor/github.com/mpvl/unique/LICENSE
+++ b/vendor/github.com/dustin/go-humanize/LICENSE
@@ -1,6 +1,4 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Marcel van Lohuizen
+Copyright (c) 2005-2008 Dustin Sallings
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -9,8 +7,8 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
@@ -20,3 +18,4 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
+
diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown
new file mode 100644
index 00000000..7d0b16b3
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/README.markdown
@@ -0,0 +1,124 @@
+# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
+
+Just a few functions for helping humanize times and sizes.
+
+`go get` it as `github.com/dustin/go-humanize`, import it as
+`"github.com/dustin/go-humanize"`, use it as `humanize`.
+
+See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for
+complete documentation.
+
+## Sizes
+
+This lets you take numbers like `82854982` and convert them to useful
+strings like, `83 MB` or `79 MiB` (whichever you prefer).
+
+Example:
+
+```go
+fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
+```
+
+## Times
+
+This lets you take a `time.Time` and spit it out in relative terms.
+For example, `12 seconds ago` or `3 days from now`.
+
+Example:
+
+```go
+fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
+```
+
+Thanks to Kyle Lemons for the time implementation from an IRC
+conversation one day. It's pretty neat.
+
+## Ordinals
+
+From a [mailing list discussion][odisc] where a user wanted to be able
+to label ordinals.
+
+ 0 -> 0th
+ 1 -> 1st
+ 2 -> 2nd
+ 3 -> 3rd
+ 4 -> 4th
+ [...]
+
+Example:
+
+```go
+fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
+```
+
+## Commas
+
+Want to shove commas into numbers? Be my guest.
+
+ 0 -> 0
+ 100 -> 100
+ 1000 -> 1,000
+ 1000000000 -> 1,000,000,000
+ -100000 -> -100,000
+
+Example:
+
+```go
+fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
+```
+
+## Ftoa
+
+Nicer float64 formatter that removes trailing zeros.
+
+```go
+fmt.Printf("%f", 2.24) // 2.240000
+fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
+fmt.Printf("%f", 2.0) // 2.000000
+fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
+```
+
+## SI notation
+
+Format numbers with [SI notation][sinotation].
+
+Example:
+
+```go
+humanize.SI(0.00000000223, "M") // 2.23 nM
+```
+
+## English-specific functions
+
+The following functions are in the `humanize/english` subpackage.
+
+### Plurals
+
+Simple English pluralization
+
+```go
+english.PluralWord(1, "object", "") // object
+english.PluralWord(42, "object", "") // objects
+english.PluralWord(2, "bus", "") // buses
+english.PluralWord(99, "locus", "loci") // loci
+
+english.Plural(1, "object", "") // 1 object
+english.Plural(42, "object", "") // 42 objects
+english.Plural(2, "bus", "") // 2 buses
+english.Plural(99, "locus", "loci") // 99 loci
+```
+
+### Word series
+
+Format comma-separated words lists with conjuctions:
+
+```go
+english.WordSeries([]string{"foo"}, "and") // foo
+english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
+english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
+
+english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
+```
+
+[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
+[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go
new file mode 100644
index 00000000..f49dc337
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/big.go
@@ -0,0 +1,31 @@
+package humanize
+
+import (
+ "math/big"
+)
+
+// order of magnitude (to a max order)
+func oomm(n, b *big.Int, maxmag int) (float64, int) {
+ mag := 0
+ m := &big.Int{}
+ for n.Cmp(b) >= 0 {
+ n.DivMod(n, b, m)
+ mag++
+ if mag == maxmag && maxmag >= 0 {
+ break
+ }
+ }
+ return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}
+
+// total order of magnitude
+// (same as above, but with no upper limit)
+func oom(n, b *big.Int) (float64, int) {
+ mag := 0
+ m := &big.Int{}
+ for n.Cmp(b) >= 0 {
+ n.DivMod(n, b, m)
+ mag++
+ }
+ return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}
diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go
new file mode 100644
index 00000000..3b015fd5
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bigbytes.go
@@ -0,0 +1,189 @@
+package humanize
+
+import (
+ "fmt"
+ "math/big"
+ "strings"
+ "unicode"
+)
+
+var (
+ bigIECExp = big.NewInt(1024)
+
+ // BigByte is one byte in bit.Ints
+ BigByte = big.NewInt(1)
+ // BigKiByte is 1,024 bytes in bit.Ints
+ BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
+ // BigMiByte is 1,024 k bytes in bit.Ints
+ BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
+ // BigGiByte is 1,024 m bytes in bit.Ints
+ BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
+ // BigTiByte is 1,024 g bytes in bit.Ints
+ BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
+ // BigPiByte is 1,024 t bytes in bit.Ints
+ BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
+ // BigEiByte is 1,024 p bytes in bit.Ints
+ BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
+ // BigZiByte is 1,024 e bytes in bit.Ints
+ BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
+ // BigYiByte is 1,024 z bytes in bit.Ints
+ BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
+ // BigRiByte is 1,024 y bytes in bit.Ints
+ BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp)
+ // BigQiByte is 1,024 r bytes in bit.Ints
+ BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp)
+)
+
+var (
+ bigSIExp = big.NewInt(1000)
+
+ // BigSIByte is one SI byte in big.Ints
+ BigSIByte = big.NewInt(1)
+ // BigKByte is 1,000 SI bytes in big.Ints
+ BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
+ // BigMByte is 1,000 SI k bytes in big.Ints
+ BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
+ // BigGByte is 1,000 SI m bytes in big.Ints
+ BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
+ // BigTByte is 1,000 SI g bytes in big.Ints
+ BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
+ // BigPByte is 1,000 SI t bytes in big.Ints
+ BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
+ // BigEByte is 1,000 SI p bytes in big.Ints
+ BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
+ // BigZByte is 1,000 SI e bytes in big.Ints
+ BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
+ // BigYByte is 1,000 SI z bytes in big.Ints
+ BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
+ // BigRByte is 1,000 SI y bytes in big.Ints
+ BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp)
+ // BigQByte is 1,000 SI r bytes in big.Ints
+ BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp)
+)
+
+var bigBytesSizeTable = map[string]*big.Int{
+ "b": BigByte,
+ "kib": BigKiByte,
+ "kb": BigKByte,
+ "mib": BigMiByte,
+ "mb": BigMByte,
+ "gib": BigGiByte,
+ "gb": BigGByte,
+ "tib": BigTiByte,
+ "tb": BigTByte,
+ "pib": BigPiByte,
+ "pb": BigPByte,
+ "eib": BigEiByte,
+ "eb": BigEByte,
+ "zib": BigZiByte,
+ "zb": BigZByte,
+ "yib": BigYiByte,
+ "yb": BigYByte,
+ "rib": BigRiByte,
+ "rb": BigRByte,
+ "qib": BigQiByte,
+ "qb": BigQByte,
+ // Without suffix
+ "": BigByte,
+ "ki": BigKiByte,
+ "k": BigKByte,
+ "mi": BigMiByte,
+ "m": BigMByte,
+ "gi": BigGiByte,
+ "g": BigGByte,
+ "ti": BigTiByte,
+ "t": BigTByte,
+ "pi": BigPiByte,
+ "p": BigPByte,
+ "ei": BigEiByte,
+ "e": BigEByte,
+ "z": BigZByte,
+ "zi": BigZiByte,
+ "y": BigYByte,
+ "yi": BigYiByte,
+ "r": BigRByte,
+ "ri": BigRiByte,
+ "q": BigQByte,
+ "qi": BigQiByte,
+}
+
+var ten = big.NewInt(10)
+
+func humanateBigBytes(s, base *big.Int, sizes []string) string {
+ if s.Cmp(ten) < 0 {
+ return fmt.Sprintf("%d B", s)
+ }
+ c := (&big.Int{}).Set(s)
+ val, mag := oomm(c, base, len(sizes)-1)
+ suffix := sizes[mag]
+ f := "%.0f %s"
+ if val < 10 {
+ f = "%.1f %s"
+ }
+
+ return fmt.Sprintf(f, val, suffix)
+
+}
+
+// BigBytes produces a human readable representation of an SI size.
+//
+// See also: ParseBigBytes.
+//
+// BigBytes(82854982) -> 83 MB
+func BigBytes(s *big.Int) string {
+ sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"}
+ return humanateBigBytes(s, bigSIExp, sizes)
+}
+
+// BigIBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBigBytes.
+//
+// BigIBytes(82854982) -> 79 MiB
+func BigIBytes(s *big.Int) string {
+ sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"}
+ return humanateBigBytes(s, bigIECExp, sizes)
+}
+
+// ParseBigBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See also: BigBytes, BigIBytes.
+//
+// ParseBigBytes("42 MB") -> 42000000, nil
+// ParseBigBytes("42 mib") -> 44040192, nil
+func ParseBigBytes(s string) (*big.Int, error) {
+ lastDigit := 0
+ hasComma := false
+ for _, r := range s {
+ if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+ break
+ }
+ if r == ',' {
+ hasComma = true
+ }
+ lastDigit++
+ }
+
+ num := s[:lastDigit]
+ if hasComma {
+ num = strings.Replace(num, ",", "", -1)
+ }
+
+ val := &big.Rat{}
+ _, err := fmt.Sscanf(num, "%f", val)
+ if err != nil {
+ return nil, err
+ }
+
+ extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+ if m, ok := bigBytesSizeTable[extra]; ok {
+ mv := (&big.Rat{}).SetInt(m)
+ val.Mul(val, mv)
+ rv := &big.Int{}
+ rv.Div(val.Num(), val.Denom())
+ return rv, nil
+ }
+
+ return nil, fmt.Errorf("unhandled size name: %v", extra)
+}
diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go
new file mode 100644
index 00000000..0b498f48
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bytes.go
@@ -0,0 +1,143 @@
+package humanize
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+// IEC Sizes.
+// kibis of bits
+const (
+ Byte = 1 << (iota * 10)
+ KiByte
+ MiByte
+ GiByte
+ TiByte
+ PiByte
+ EiByte
+)
+
+// SI Sizes.
+const (
+ IByte = 1
+ KByte = IByte * 1000
+ MByte = KByte * 1000
+ GByte = MByte * 1000
+ TByte = GByte * 1000
+ PByte = TByte * 1000
+ EByte = PByte * 1000
+)
+
+var bytesSizeTable = map[string]uint64{
+ "b": Byte,
+ "kib": KiByte,
+ "kb": KByte,
+ "mib": MiByte,
+ "mb": MByte,
+ "gib": GiByte,
+ "gb": GByte,
+ "tib": TiByte,
+ "tb": TByte,
+ "pib": PiByte,
+ "pb": PByte,
+ "eib": EiByte,
+ "eb": EByte,
+ // Without suffix
+ "": Byte,
+ "ki": KiByte,
+ "k": KByte,
+ "mi": MiByte,
+ "m": MByte,
+ "gi": GiByte,
+ "g": GByte,
+ "ti": TiByte,
+ "t": TByte,
+ "pi": PiByte,
+ "p": PByte,
+ "ei": EiByte,
+ "e": EByte,
+}
+
+func logn(n, b float64) float64 {
+ return math.Log(n) / math.Log(b)
+}
+
+func humanateBytes(s uint64, base float64, sizes []string) string {
+ if s < 10 {
+ return fmt.Sprintf("%d B", s)
+ }
+ e := math.Floor(logn(float64(s), base))
+ suffix := sizes[int(e)]
+ val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
+ f := "%.0f %s"
+ if val < 10 {
+ f = "%.1f %s"
+ }
+
+ return fmt.Sprintf(f, val, suffix)
+}
+
+// Bytes produces a human readable representation of an SI size.
+//
+// See also: ParseBytes.
+//
+// Bytes(82854982) -> 83 MB
+func Bytes(s uint64) string {
+ sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
+ return humanateBytes(s, 1000, sizes)
+}
+
+// IBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBytes.
+//
+// IBytes(82854982) -> 79 MiB
+func IBytes(s uint64) string {
+ sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
+ return humanateBytes(s, 1024, sizes)
+}
+
+// ParseBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See Also: Bytes, IBytes.
+//
+// ParseBytes("42 MB") -> 42000000, nil
+// ParseBytes("42 mib") -> 44040192, nil
+func ParseBytes(s string) (uint64, error) {
+ lastDigit := 0
+ hasComma := false
+ for _, r := range s {
+ if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+ break
+ }
+ if r == ',' {
+ hasComma = true
+ }
+ lastDigit++
+ }
+
+ num := s[:lastDigit]
+ if hasComma {
+ num = strings.Replace(num, ",", "", -1)
+ }
+
+ f, err := strconv.ParseFloat(num, 64)
+ if err != nil {
+ return 0, err
+ }
+
+ extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+ if m, ok := bytesSizeTable[extra]; ok {
+ f *= float64(m)
+ if f >= math.MaxUint64 {
+ return 0, fmt.Errorf("too large: %v", s)
+ }
+ return uint64(f), nil
+ }
+
+ return 0, fmt.Errorf("unhandled size name: %v", extra)
+}
diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go
new file mode 100644
index 00000000..520ae3e5
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/comma.go
@@ -0,0 +1,116 @@
+package humanize
+
+import (
+ "bytes"
+ "math"
+ "math/big"
+ "strconv"
+ "strings"
+)
+
+// Comma produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Comma(834142) -> 834,142
+func Comma(v int64) string {
+ sign := ""
+
+ // Min int64 can't be negated to a usable value, so it has to be special cased.
+ if v == math.MinInt64 {
+ return "-9,223,372,036,854,775,808"
+ }
+
+ if v < 0 {
+ sign = "-"
+ v = 0 - v
+ }
+
+ parts := []string{"", "", "", "", "", "", ""}
+ j := len(parts) - 1
+
+ for v > 999 {
+ parts[j] = strconv.FormatInt(v%1000, 10)
+ switch len(parts[j]) {
+ case 2:
+ parts[j] = "0" + parts[j]
+ case 1:
+ parts[j] = "00" + parts[j]
+ }
+ v = v / 1000
+ j--
+ }
+ parts[j] = strconv.Itoa(int(v))
+ return sign + strings.Join(parts[j:], ",")
+}
+
+// Commaf produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Commaf(834142.32) -> 834,142.32
+func Commaf(v float64) string {
+ buf := &bytes.Buffer{}
+ if v < 0 {
+ buf.Write([]byte{'-'})
+ v = 0 - v
+ }
+
+ comma := []byte{','}
+
+ parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
+ pos := 0
+ if len(parts[0])%3 != 0 {
+ pos += len(parts[0]) % 3
+ buf.WriteString(parts[0][:pos])
+ buf.Write(comma)
+ }
+ for ; pos < len(parts[0]); pos += 3 {
+ buf.WriteString(parts[0][pos : pos+3])
+ buf.Write(comma)
+ }
+ buf.Truncate(buf.Len() - 1)
+
+ if len(parts) > 1 {
+ buf.Write([]byte{'.'})
+ buf.WriteString(parts[1])
+ }
+ return buf.String()
+}
+
+// CommafWithDigits works like the Commaf but limits the resulting
+// string to the given number of decimal places.
+//
+// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
+func CommafWithDigits(f float64, decimals int) string {
+ return stripTrailingDigits(Commaf(f), decimals)
+}
+
+// BigComma produces a string form of the given big.Int in base 10
+// with commas after every three orders of magnitude.
+func BigComma(b *big.Int) string {
+ sign := ""
+ if b.Sign() < 0 {
+ sign = "-"
+ b.Abs(b)
+ }
+
+ athousand := big.NewInt(1000)
+ c := (&big.Int{}).Set(b)
+ _, m := oom(c, athousand)
+ parts := make([]string, m+1)
+ j := len(parts) - 1
+
+ mod := &big.Int{}
+ for b.Cmp(athousand) >= 0 {
+ b.DivMod(b, athousand, mod)
+ parts[j] = strconv.FormatInt(mod.Int64(), 10)
+ switch len(parts[j]) {
+ case 2:
+ parts[j] = "0" + parts[j]
+ case 1:
+ parts[j] = "00" + parts[j]
+ }
+ j--
+ }
+ parts[j] = strconv.Itoa(int(b.Int64()))
+ return sign + strings.Join(parts[j:], ",")
+}
diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go
new file mode 100644
index 00000000..2bc83a03
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/commaf.go
@@ -0,0 +1,41 @@
+//go:build go1.6
+// +build go1.6
+
+package humanize
+
+import (
+ "bytes"
+ "math/big"
+ "strings"
+)
+
+// BigCommaf produces a string form of the given big.Float in base 10
+// with commas after every three orders of magnitude.
+func BigCommaf(v *big.Float) string {
+ buf := &bytes.Buffer{}
+ if v.Sign() < 0 {
+ buf.Write([]byte{'-'})
+ v.Abs(v)
+ }
+
+ comma := []byte{','}
+
+ parts := strings.Split(v.Text('f', -1), ".")
+ pos := 0
+ if len(parts[0])%3 != 0 {
+ pos += len(parts[0]) % 3
+ buf.WriteString(parts[0][:pos])
+ buf.Write(comma)
+ }
+ for ; pos < len(parts[0]); pos += 3 {
+ buf.WriteString(parts[0][pos : pos+3])
+ buf.Write(comma)
+ }
+ buf.Truncate(buf.Len() - 1)
+
+ if len(parts) > 1 {
+ buf.Write([]byte{'.'})
+ buf.WriteString(parts[1])
+ }
+ return buf.String()
+}
diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go
new file mode 100644
index 00000000..bce923f3
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ftoa.go
@@ -0,0 +1,49 @@
+package humanize
+
+import (
+ "strconv"
+ "strings"
+)
+
+func stripTrailingZeros(s string) string {
+ if !strings.ContainsRune(s, '.') {
+ return s
+ }
+ offset := len(s) - 1
+ for offset > 0 {
+ if s[offset] == '.' {
+ offset--
+ break
+ }
+ if s[offset] != '0' {
+ break
+ }
+ offset--
+ }
+ return s[:offset+1]
+}
+
+func stripTrailingDigits(s string, digits int) string {
+ if i := strings.Index(s, "."); i >= 0 {
+ if digits <= 0 {
+ return s[:i]
+ }
+ i++
+ if i+digits >= len(s) {
+ return s
+ }
+ return s[:i+digits]
+ }
+ return s
+}
+
+// Ftoa converts a float to a string with no trailing zeros.
+func Ftoa(num float64) string {
+ return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
+}
+
+// FtoaWithDigits converts a float to a string but limits the resulting string
+// to the given number of decimal places, and no trailing zeros.
+func FtoaWithDigits(num float64, digits int) string {
+ return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
+}
diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go
new file mode 100644
index 00000000..a2c2da31
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/humanize.go
@@ -0,0 +1,8 @@
+/*
+Package humanize converts boring ugly numbers to human-friendly strings and back.
+
+Durations can be turned into strings such as "3 days ago", numbers
+representing sizes like 82854982 into useful strings like, "83 MB" or
+"79 MiB" (whichever you prefer).
+*/
+package humanize
diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go
new file mode 100644
index 00000000..6470d0d4
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/number.go
@@ -0,0 +1,192 @@
+package humanize
+
+/*
+Slightly adapted from the source to fit go-humanize.
+
+Author: https://github.com/gorhill
+Source: https://gist.github.com/gorhill/5285193
+
+*/
+
+import (
+ "math"
+ "strconv"
+)
+
+var (
+ renderFloatPrecisionMultipliers = [...]float64{
+ 1,
+ 10,
+ 100,
+ 1000,
+ 10000,
+ 100000,
+ 1000000,
+ 10000000,
+ 100000000,
+ 1000000000,
+ }
+
+ renderFloatPrecisionRounders = [...]float64{
+ 0.5,
+ 0.05,
+ 0.005,
+ 0.0005,
+ 0.00005,
+ 0.000005,
+ 0.0000005,
+ 0.00000005,
+ 0.000000005,
+ 0.0000000005,
+ }
+)
+
+// FormatFloat produces a formatted number as string based on the following user-specified criteria:
+// * thousands separator
+// * decimal separator
+// * decimal precision
+//
+// Usage: s := RenderFloat(format, n)
+// The format parameter tells how to render the number n.
+//
+// See examples: http://play.golang.org/p/LXc1Ddm1lJ
+//
+// Examples of format strings, given n = 12345.6789:
+// "#,###.##" => "12,345.67"
+// "#,###." => "12,345"
+// "#,###" => "12345,678"
+// "#\u202F###,##" => "12 345,68"
+// "#.###,###### => 12.345,678900
+// "" (aka default format) => 12,345.67
+//
+// The highest precision allowed is 9 digits after the decimal symbol.
+// There is also a version for integer number, FormatInteger(),
+// which is convenient for calls within template.
+func FormatFloat(format string, n float64) string {
+ // Special cases:
+ // NaN = "NaN"
+ // +Inf = "+Infinity"
+ // -Inf = "-Infinity"
+ if math.IsNaN(n) {
+ return "NaN"
+ }
+ if n > math.MaxFloat64 {
+ return "Infinity"
+ }
+ if n < (0.0 - math.MaxFloat64) {
+ return "-Infinity"
+ }
+
+ // default format
+ precision := 2
+ decimalStr := "."
+ thousandStr := ","
+ positiveStr := ""
+ negativeStr := "-"
+
+ if len(format) > 0 {
+ format := []rune(format)
+
+ // If there is an explicit format directive,
+ // then default values are these:
+ precision = 9
+ thousandStr = ""
+
+ // collect indices of meaningful formatting directives
+ formatIndx := []int{}
+ for i, char := range format {
+ if char != '#' && char != '0' {
+ formatIndx = append(formatIndx, i)
+ }
+ }
+
+ if len(formatIndx) > 0 {
+ // Directive at index 0:
+ // Must be a '+'
+ // Raise an error if not the case
+ // index: 0123456789
+ // +0.000,000
+ // +000,000.0
+ // +0000.00
+ // +0000
+ if formatIndx[0] == 0 {
+ if format[formatIndx[0]] != '+' {
+ panic("RenderFloat(): invalid positive sign directive")
+ }
+ positiveStr = "+"
+ formatIndx = formatIndx[1:]
+ }
+
+ // Two directives:
+ // First is thousands separator
+ // Raise an error if not followed by 3-digit
+ // 0123456789
+ // 0.000,000
+ // 000,000.00
+ if len(formatIndx) == 2 {
+ if (formatIndx[1] - formatIndx[0]) != 4 {
+ panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
+ }
+ thousandStr = string(format[formatIndx[0]])
+ formatIndx = formatIndx[1:]
+ }
+
+ // One directive:
+ // Directive is decimal separator
+ // The number of digit-specifier following the separator indicates wanted precision
+ // 0123456789
+ // 0.00
+ // 000,0000
+ if len(formatIndx) == 1 {
+ decimalStr = string(format[formatIndx[0]])
+ precision = len(format) - formatIndx[0] - 1
+ }
+ }
+ }
+
+ // generate sign part
+ var signStr string
+ if n >= 0.000000001 {
+ signStr = positiveStr
+ } else if n <= -0.000000001 {
+ signStr = negativeStr
+ n = -n
+ } else {
+ signStr = ""
+ n = 0.0
+ }
+
+ // split number into integer and fractional parts
+ intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
+
+ // generate integer part string
+ intStr := strconv.FormatInt(int64(intf), 10)
+
+ // add thousand separator if required
+ if len(thousandStr) > 0 {
+ for i := len(intStr); i > 3; {
+ i -= 3
+ intStr = intStr[:i] + thousandStr + intStr[i:]
+ }
+ }
+
+ // no fractional part, we can leave now
+ if precision == 0 {
+ return signStr + intStr
+ }
+
+ // generate fractional part
+ fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
+ // may need padding
+ if len(fracStr) < precision {
+ fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
+ }
+
+ return signStr + intStr + decimalStr + fracStr
+}
+
+// FormatInteger produces a formatted number as string.
+// See FormatFloat.
+func FormatInteger(format string, n int) string {
+ return FormatFloat(format, float64(n))
+}
diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go
new file mode 100644
index 00000000..43d88a86
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ordinals.go
@@ -0,0 +1,25 @@
+package humanize
+
+import "strconv"
+
+// Ordinal gives you the input number in a rank/ordinal format.
+//
+// Ordinal(3) -> 3rd
+func Ordinal(x int) string {
+ suffix := "th"
+ switch x % 10 {
+ case 1:
+ if x%100 != 11 {
+ suffix = "st"
+ }
+ case 2:
+ if x%100 != 12 {
+ suffix = "nd"
+ }
+ case 3:
+ if x%100 != 13 {
+ suffix = "rd"
+ }
+ }
+ return strconv.Itoa(x) + suffix
+}
diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go
new file mode 100644
index 00000000..8b850198
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/si.go
@@ -0,0 +1,127 @@
+package humanize
+
+import (
+ "errors"
+ "math"
+ "regexp"
+ "strconv"
+)
+
+var siPrefixTable = map[float64]string{
+ -30: "q", // quecto
+ -27: "r", // ronto
+ -24: "y", // yocto
+ -21: "z", // zepto
+ -18: "a", // atto
+ -15: "f", // femto
+ -12: "p", // pico
+ -9: "n", // nano
+ -6: "µ", // micro
+ -3: "m", // milli
+ 0: "",
+ 3: "k", // kilo
+ 6: "M", // mega
+ 9: "G", // giga
+ 12: "T", // tera
+ 15: "P", // peta
+ 18: "E", // exa
+ 21: "Z", // zetta
+ 24: "Y", // yotta
+ 27: "R", // ronna
+ 30: "Q", // quetta
+}
+
+var revSIPrefixTable = revfmap(siPrefixTable)
+
+// revfmap reverses the map and precomputes the power multiplier
+func revfmap(in map[float64]string) map[string]float64 {
+ rv := map[string]float64{}
+ for k, v := range in {
+ rv[v] = math.Pow(10, k)
+ }
+ return rv
+}
+
+var riParseRegex *regexp.Regexp
+
+func init() {
+ ri := `^([\-0-9.]+)\s?([`
+ for _, v := range siPrefixTable {
+ ri += v
+ }
+ ri += `]?)(.*)`
+
+ riParseRegex = regexp.MustCompile(ri)
+}
+
+// ComputeSI finds the most appropriate SI prefix for the given number
+// and returns the prefix along with the value adjusted to be within
+// that prefix.
+//
+// See also: SI, ParseSI.
+//
+// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
+func ComputeSI(input float64) (float64, string) {
+ if input == 0 {
+ return 0, ""
+ }
+ mag := math.Abs(input)
+ exponent := math.Floor(logn(mag, 10))
+ exponent = math.Floor(exponent/3) * 3
+
+ value := mag / math.Pow(10, exponent)
+
+ // Handle special case where value is exactly 1000.0
+ // Should return 1 M instead of 1000 k
+ if value == 1000.0 {
+ exponent += 3
+ value = mag / math.Pow(10, exponent)
+ }
+
+ value = math.Copysign(value, input)
+
+ prefix := siPrefixTable[exponent]
+ return value, prefix
+}
+
+// SI returns a string with default formatting.
+//
+// SI uses Ftoa to format float value, removing trailing zeros.
+//
+// See also: ComputeSI, ParseSI.
+//
+// e.g. SI(1000000, "B") -> 1 MB
+// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
+func SI(input float64, unit string) string {
+ value, prefix := ComputeSI(input)
+ return Ftoa(value) + " " + prefix + unit
+}
+
+// SIWithDigits works like SI but limits the resulting string to the
+// given number of decimal places.
+//
+// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
+// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
+func SIWithDigits(input float64, decimals int, unit string) string {
+ value, prefix := ComputeSI(input)
+ return FtoaWithDigits(value, decimals) + " " + prefix + unit
+}
+
+var errInvalid = errors.New("invalid input")
+
+// ParseSI parses an SI string back into the number and unit.
+//
+// See also: SI, ComputeSI.
+//
+// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
+func ParseSI(input string) (float64, string, error) {
+ found := riParseRegex.FindStringSubmatch(input)
+ if len(found) != 4 {
+ return 0, "", errInvalid
+ }
+ mag := revSIPrefixTable[found[2]]
+ unit := found[3]
+
+ base, err := strconv.ParseFloat(found[1], 64)
+ return base * mag, unit, err
+}
diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go
new file mode 100644
index 00000000..dd3fbf5e
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/times.go
@@ -0,0 +1,117 @@
+package humanize
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "time"
+)
+
+// Seconds-based time units
+const (
+ Day = 24 * time.Hour
+ Week = 7 * Day
+ Month = 30 * Day
+ Year = 12 * Month
+ LongTime = 37 * Year
+)
+
+// Time formats a time into a relative string.
+//
+// Time(someT) -> "3 weeks ago"
+func Time(then time.Time) string {
+ return RelTime(then, time.Now(), "ago", "from now")
+}
+
+// A RelTimeMagnitude struct contains a relative time point at which
+// the relative format of time will switch to a new format string. A
+// slice of these in ascending order by their "D" field is passed to
+// CustomRelTime to format durations.
+//
+// The Format field is a string that may contain a "%s" which will be
+// replaced with the appropriate signed label (e.g. "ago" or "from
+// now") and a "%d" that will be replaced by the quantity.
+//
+// The DivBy field is the amount of time the time difference must be
+// divided by in order to display correctly.
+//
+// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
+// DivBy should be time.Minute so whatever the duration is will be
+// expressed in minutes.
+type RelTimeMagnitude struct {
+ D time.Duration
+ Format string
+ DivBy time.Duration
+}
+
+var defaultMagnitudes = []RelTimeMagnitude{
+ {time.Second, "now", time.Second},
+ {2 * time.Second, "1 second %s", 1},
+ {time.Minute, "%d seconds %s", time.Second},
+ {2 * time.Minute, "1 minute %s", 1},
+ {time.Hour, "%d minutes %s", time.Minute},
+ {2 * time.Hour, "1 hour %s", 1},
+ {Day, "%d hours %s", time.Hour},
+ {2 * Day, "1 day %s", 1},
+ {Week, "%d days %s", Day},
+ {2 * Week, "1 week %s", 1},
+ {Month, "%d weeks %s", Week},
+ {2 * Month, "1 month %s", 1},
+ {Year, "%d months %s", Month},
+ {18 * Month, "1 year %s", 1},
+ {2 * Year, "2 years %s", 1},
+ {LongTime, "%d years %s", Year},
+ {math.MaxInt64, "a long while %s", 1},
+}
+
+// RelTime formats a time into a relative string.
+//
+// It takes two times and two labels. In addition to the generic time
+// delta string (e.g. 5 minutes), the labels are used applied so that
+// the label corresponding to the smaller time is applied.
+//
+// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
+func RelTime(a, b time.Time, albl, blbl string) string {
+ return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
+}
+
+// CustomRelTime formats a time into a relative string.
+//
+// It takes two times two labels and a table of relative time formats.
+// In addition to the generic time delta string (e.g. 5 minutes), the
+// labels are used applied so that the label corresponding to the
+// smaller time is applied.
+func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
+ lbl := albl
+ diff := b.Sub(a)
+
+ if a.After(b) {
+ lbl = blbl
+ diff = a.Sub(b)
+ }
+
+ n := sort.Search(len(magnitudes), func(i int) bool {
+ return magnitudes[i].D > diff
+ })
+
+ if n >= len(magnitudes) {
+ n = len(magnitudes) - 1
+ }
+ mag := magnitudes[n]
+ args := []interface{}{}
+ escaped := false
+ for _, ch := range mag.Format {
+ if escaped {
+ switch ch {
+ case 's':
+ args = append(args, lbl)
+ case 'd':
+ args = append(args, diff/mag.DivBy)
+ }
+ escaped = false
+ } else {
+ escaped = ch == '%'
+ }
+ }
+ return fmt.Sprintf(mag.Format, args...)
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/.gitignore b/vendor/github.com/go-jose/go-jose/v4/.gitignore
new file mode 100644
index 00000000..eb29ebae
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/.gitignore
@@ -0,0 +1,2 @@
+jose-util/jose-util
+jose-util.t.err
\ No newline at end of file
diff --git a/vendor/github.com/go-jose/go-jose/v4/.golangci.yml b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml
new file mode 100644
index 00000000..2a577a8f
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml
@@ -0,0 +1,53 @@
+# https://github.com/golangci/golangci-lint
+
+run:
+ skip-files:
+ - doc_test.go
+ modules-download-mode: readonly
+
+linters:
+ enable-all: true
+ disable:
+ - gochecknoglobals
+ - goconst
+ - lll
+ - maligned
+ - nakedret
+ - scopelint
+ - unparam
+ - funlen # added in 1.18 (requires go-jose changes before it can be enabled)
+
+linters-settings:
+ gocyclo:
+ min-complexity: 35
+
+issues:
+ exclude-rules:
+ - text: "don't use ALL_CAPS in Go names"
+ linters:
+ - golint
+ - text: "hardcoded credentials"
+ linters:
+ - gosec
+ - text: "weak cryptographic primitive"
+ linters:
+ - gosec
+ - path: json/
+ linters:
+ - dupl
+ - errcheck
+ - gocritic
+ - gocyclo
+ - golint
+ - govet
+ - ineffassign
+ - staticcheck
+ - structcheck
+ - stylecheck
+ - unused
+ - path: _test\.go
+ linters:
+ - scopelint
+ - path: jwk.go
+ linters:
+ - gocyclo
diff --git a/vendor/github.com/go-jose/go-jose/v4/.travis.yml b/vendor/github.com/go-jose/go-jose/v4/.travis.yml
new file mode 100644
index 00000000..48de631b
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/.travis.yml
@@ -0,0 +1,33 @@
+language: go
+
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
+
+go:
+ - "1.13.x"
+ - "1.14.x"
+ - tip
+
+before_script:
+ - export PATH=$HOME/.local/bin:$PATH
+
+before_install:
+ - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge
+ - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0
+ - pip install cram --user
+
+script:
+ - go test -v -covermode=count -coverprofile=profile.cov .
+ - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner
+ - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher
+ - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt
+ - go test -v ./json # no coverage for forked encoding/json package
+ - golangci-lint run
+ - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util
+ - cd ..
+
+after_success:
+ - gocovmerge *.cov */*.cov > merged.coverprofile
+ - goveralls -coverprofile merged.coverprofile -service=travis-ci
diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
new file mode 100644
index 00000000..28bdd2fc
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
@@ -0,0 +1,72 @@
+# v4.0.1
+
+## Fixed
+
+ - An attacker could send a JWE containing compressed data that used large
+ amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`.
+ Those functions now return an error if the decompressed data would exceed
+ 250kB or 10x the compressed size (whichever is larger). Thanks to
+ Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj)
+ for reporting.
+
+# v4.0.0
+
+This release makes some breaking changes in order to more thoroughly
+address the vulnerabilities discussed in [Three New Attacks Against JSON Web
+Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot
+token".
+
+## Changed
+
+ - Limit JWT encryption types (exclude password or public key types) (#78)
+ - Enforce minimum length for HMAC keys (#85)
+ - jwt: match any audience in a list, rather than requiring all audiences (#81)
+ - jwt: accept only Compact Serialization (#75)
+ - jws: Add expected algorithms for signatures (#74)
+ - Require specifying expected algorithms for ParseEncrypted,
+ ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned,
+ jwt.ParseSignedAndEncrypted (#69, #74)
+ - Usually there is a small, known set of appropriate algorithms for a program
+ to use and it's a mistake to allow unexpected algorithms. For instance the
+ "billion hash attack" relies in part on programs accepting the PBES2
+ encryption algorithm and doing the necessary work even if they weren't
+ specifically configured to allow PBES2.
+ - Revert "Strip padding off base64 strings" (#82)
+ - The specs require base64url encoding without padding.
+ - Minimum supported Go version is now 1.21
+
+## Added
+
+ - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON.
+ - These allow parsing a specific serialization, as opposed to ParseSigned and
+ ParseEncrypted, which try to automatically detect which serialization was
+ provided. It's common to require a specific serialization for a specific
+ protocol - for instance JWT requires Compact serialization.
+
+[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf
+
+# v3.0.2
+
+## Fixed
+
+ - DecryptMulti: handle decompression error (#19)
+
+## Changed
+
+ - jwe/CompactSerialize: improve performance (#67)
+ - Increase the default number of PBKDF2 iterations to 600k (#48)
+ - Return the proper algorithm for ECDSA keys (#45)
+
+## Added
+
+ - Add Thumbprint support for opaque signers (#38)
+
+# v3.0.1
+
+## Fixed
+
+ - Security issue: an attacker specifying a large "p2c" value can cause
+ JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large
+ amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the
+ disclosure and to Tom Tervoort for originally publishing the category of attack.
+ https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf
diff --git a/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md
new file mode 100644
index 00000000..b63e1f8f
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md
@@ -0,0 +1,15 @@
+# Contributing
+
+If you would like to contribute code to go-jose you can do so through GitHub by
+forking the repository and sending a pull request.
+
+When submitting code, please make every effort to follow existing conventions
+and style in order to keep the code as readable as possible. Please also make
+sure all tests pass by running `go test`, and format your code with `go fmt`.
+We also recommend using `golint` and `errcheck`.
+
+Before your code can be accepted into the project you must also sign the
+Individual Contributor License Agreement. We use [cla-assistant.io][1] and you
+will be prompted to sign once a pull request is opened.
+
+[1]: https://cla-assistant.io/
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/github.com/go-jose/go-jose/v4/LICENSE
similarity index 100%
rename from vendor/google.golang.org/appengine/LICENSE
rename to vendor/github.com/go-jose/go-jose/v4/LICENSE
diff --git a/vendor/github.com/go-jose/go-jose/v4/README.md b/vendor/github.com/go-jose/go-jose/v4/README.md
new file mode 100644
index 00000000..79a7c5ec
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/README.md
@@ -0,0 +1,114 @@
+# Go JOSE
+
+[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4)
+[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt)
+[![license](https://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
+[![test](https://img.shields.io/github/checks-status/go-jose/go-jose/v4)](https://github.com/go-jose/go-jose/actions)
+
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. This includes support for JSON Web Encryption,
+JSON Web Signature, and JSON Web Token standards.
+
+**Disclaimer**: This library contains encryption software that is subject to
+the U.S. Export Administration Regulations. You may not export, re-export,
+transfer or download this code or any part of it in violation of any United
+States law, directive or regulation. In particular this software may not be
+exported or re-exported in any form or on any media to Iran, North Sudan,
+Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
+US maintained blocked list.
+
+## Overview
+
+The implementation follows the
+[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516),
+[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
+[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications.
+Tables of supported algorithms are shown below. The library supports both
+the compact and JWS/JWE JSON Serialization formats, and has optional support for
+multiple recipients. It also comes with a small command-line utility
+([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util))
+for dealing with JOSE messages in a shell.
+
+**Note**: We use a forked version of the `encoding/json` package from the Go
+standard library which uses case-sensitive matching for member names (instead
+of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)).
+This is to avoid differences in interpretation of messages between go-jose and
+libraries in other languages.
+
+### Versions
+
+[Version 4](https://github.com/go-jose/go-jose)
+([branch](https://github.com/go-jose/go-jose/tree/main),
+[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version:
+
+ import "github.com/go-jose/go-jose/v4"
+
+The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which
+are still useable but not actively developed anymore.
+
+Version 3, in this repo, is still receiving security fixes but not functionality
+updates.
+
+### Supported algorithms
+
+See below for a table of supported algorithms. Algorithm identifiers match
+the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518)
+standard where possible. The Godoc reference has a list of constants.
+
+ Key encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSA-PKCS#1v1.5 | RSA1_5
+ RSA-OAEP | RSA-OAEP, RSA-OAEP-256
+ AES key wrap | A128KW, A192KW, A256KW
+ AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
+ ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
+ ECDH-ES (direct) | ECDH-ES1
+ Direct encryption | dir1
+
+1. Not supported in multi-recipient mode
+
+ Signing / MAC | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
+ RSASSA-PSS | PS256, PS384, PS512
+ HMAC | HS256, HS384, HS512
+ ECDSA | ES256, ES384, ES512
+ Ed25519 | EdDSA2
+
+2. Only available in version 2 of the package
+
+ Content encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
+ AES-GCM | A128GCM, A192GCM, A256GCM
+
+ Compression | Algorithm identifiers(s)
+ :------------------------- | -------------------------------
+ DEFLATE (RFC 1951) | DEF
+
+### Supported key types
+
+See below for a table of supported key types. These are understood by the
+library, and can be passed to corresponding functions such as `NewEncrypter` or
+`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which
+allows attaching a key id.
+
+ Algorithm(s) | Corresponding types
+ :------------------------- | -------------------------------
+ RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey)
+ ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey)
+ EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey)
+ AES, HMAC | []byte
+
+1. Only available in version 2 or later of the package
+
+## Examples
+
+[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4)
+[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt)
+
+Examples can be found in the Godoc
+reference for this package. The
+[`jose-util`](https://github.com/go-jose/go-jose/tree/v4/jose-util)
+subdirectory also contains a small command-line utility which might be useful
+as an example as well.
diff --git a/vendor/github.com/go-jose/go-jose/v4/SECURITY.md b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md
new file mode 100644
index 00000000..2f18a75a
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md
@@ -0,0 +1,13 @@
+# Security Policy
+This document explains how to contact the Let's Encrypt security team to report security vulnerabilities.
+
+## Supported Versions
+| Version | Supported |
+| ------- | ----------|
+| >= v3 | ✓ |
+| v2 | ✗ |
+| v1 | ✗ |
+
+## Reporting a vulnerability
+
+Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email.
diff --git a/vendor/github.com/go-jose/go-jose/v4/asymmetric.go b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go
new file mode 100644
index 00000000..f8d5774e
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go
@@ -0,0 +1,595 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto"
+ "crypto/aes"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "math/big"
+
+ josecipher "github.com/go-jose/go-jose/v4/cipher"
+ "github.com/go-jose/go-jose/v4/json"
+)
+
+// A generic RSA-based encrypter/verifier
+type rsaEncrypterVerifier struct {
+ publicKey *rsa.PublicKey
+}
+
+// A generic RSA-based decrypter/signer
+type rsaDecrypterSigner struct {
+ privateKey *rsa.PrivateKey
+}
+
+// A generic EC-based encrypter/verifier
+type ecEncrypterVerifier struct {
+ publicKey *ecdsa.PublicKey
+}
+
+type edEncrypterVerifier struct {
+ publicKey ed25519.PublicKey
+}
+
+// A key generator for ECDH-ES
+type ecKeyGenerator struct {
+ size int
+ algID string
+ publicKey *ecdsa.PublicKey
+}
+
+// A generic EC-based decrypter/signer
+type ecDecrypterSigner struct {
+ privateKey *ecdsa.PrivateKey
+}
+
+type edDecrypterSigner struct {
+ privateKey ed25519.PrivateKey
+}
+
+// newRSARecipient creates recipientKeyInfo based on the given key.
+func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case RSA1_5, RSA_OAEP, RSA_OAEP_256:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &rsaEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newRSASigner creates a recipientSigInfo based on the given key.
+func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case RS256, RS384, RS512, PS256, PS384, PS512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &rsaDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) {
+ if sigAlg != EdDSA {
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &edDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// newECDHRecipient creates recipientKeyInfo based on the given key.
+func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &ecEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newECDSASigner creates a recipientSigInfo based on the given key.
+func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case ES256, ES384, ES512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &ecDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// Encrypt the given payload and update the object.
+func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ encryptedKey, err := ctx.encrypt(cek, alg)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: encryptedKey,
+ header: &rawHeader{},
+ }, nil
+}
+
+// Encrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
+ switch alg {
+ case RSA1_5:
+ return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek)
+ case RSA_OAEP:
+ return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{})
+ case RSA_OAEP_256:
+ return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator)
+}
+
+// Decrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
+ // Note: The random reader on decrypt operations is only used for blinding,
+ // so stubbing is meanlingless (hence the direct use of rand.Reader).
+ switch alg {
+ case RSA1_5:
+ defer func() {
+ // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
+ // because of an index out of bounds error, which we want to ignore.
+ // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
+ // only exists for preventing crashes with unpatched versions.
+ // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
+ // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
+ _ = recover()
+ }()
+
+ // Perform some input validation.
+ keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
+ if keyBytes != len(jek) {
+ // Input size is incorrect, the encrypted payload should always match
+ // the size of the public modulus (e.g. using a 2048 bit key will
+ // produce 256 bytes of output). Reject this since it's invalid input.
+ return nil, ErrCryptoFailure
+ }
+
+ cek, _, err := generator.genKey()
+ if err != nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
+ // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
+ // the Million Message Attack on Cryptographic Message Syntax". We are
+ // therefore deliberately ignoring errors here.
+ _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
+
+ return cek, nil
+ case RSA_OAEP:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ case RSA_OAEP_256:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Sign the given payload
+func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ var out []byte
+ var err error
+
+ switch alg {
+ case RS256, RS384, RS512:
+ // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the
+ // random parameter is legacy and ignored, and it can be nil.
+ // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1
+ out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
+ case PS256, PS384, PS512:
+ out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ })
+ }
+
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ switch alg {
+ case RS256, RS384, RS512:
+ return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
+ case PS256, PS384, PS512:
+ return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
+ }
+
+ return ErrUnsupportedAlgorithm
+}
+
+// Encrypt the given payload and update the object.
+func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ switch alg {
+ case ECDH_ES:
+ // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
+ return recipientInfo{
+ header: &rawHeader{},
+ }, nil
+ case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ generator := ecKeyGenerator{
+ algID: string(alg),
+ publicKey: ctx.publicKey,
+ }
+
+ switch alg {
+ case ECDH_ES_A128KW:
+ generator.size = 16
+ case ECDH_ES_A192KW:
+ generator.size = 24
+ case ECDH_ES_A256KW:
+ generator.size = 32
+ }
+
+ kek, header, err := generator.genKey()
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ block, err := aes.NewCipher(kek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ jek, err := josecipher.KeyWrap(block, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: jek,
+ header: &header,
+ }, nil
+}
+
+// Get key size for EC key generator
+func (ctx ecKeyGenerator) keySize() int {
+ return ctx.size
+}
+
+// Get a content encryption key for ECDH-ES
+func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader)
+ if err != nil {
+ return nil, rawHeader{}, err
+ }
+
+ out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
+
+ b, err := json.Marshal(&JSONWebKey{
+ Key: &priv.PublicKey,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ headers := rawHeader{
+ headerEPK: makeRawMessage(b),
+ }
+
+ return out, headers, nil
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ epk, err := headers.getEPK()
+ if err != nil {
+ return nil, errors.New("go-jose/go-jose: invalid epk header")
+ }
+ if epk == nil {
+ return nil, errors.New("go-jose/go-jose: missing epk header")
+ }
+
+ publicKey, ok := epk.Key.(*ecdsa.PublicKey)
+ if publicKey == nil || !ok {
+ return nil, errors.New("go-jose/go-jose: invalid epk header")
+ }
+
+ if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return nil, errors.New("go-jose/go-jose: invalid public key in epk header")
+ }
+
+ apuData, err := headers.getAPU()
+ if err != nil {
+ return nil, errors.New("go-jose/go-jose: invalid apu header")
+ }
+ apvData, err := headers.getAPV()
+ if err != nil {
+ return nil, errors.New("go-jose/go-jose: invalid apv header")
+ }
+
+ deriveKey := func(algID string, size int) []byte {
+ return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size)
+ }
+
+ var keySize int
+
+ algorithm := headers.getAlgorithm()
+ switch algorithm {
+ case ECDH_ES:
+ // ECDH-ES uses direct key agreement, no key unwrapping necessary.
+ return deriveKey(string(headers.getEncryption()), generator.keySize()), nil
+ case ECDH_ES_A128KW:
+ keySize = 16
+ case ECDH_ES_A192KW:
+ keySize = 24
+ case ECDH_ES_A256KW:
+ keySize = 32
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ key := deriveKey(string(algorithm), keySize)
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return josecipher.KeyUnwrap(block, recipient.encryptedKey)
+}
+
+func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ if alg != EdDSA {
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0))
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: sig,
+ protected: &rawHeader{},
+ }, nil
+}
+
+func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ if alg != EdDSA {
+ return ErrUnsupportedAlgorithm
+ }
+ ok := ed25519.Verify(ctx.publicKey, payload, signature)
+ if !ok {
+ return errors.New("go-jose/go-jose: ed25519 signature failed to verify")
+ }
+ return nil
+}
+
+// Sign the given payload
+func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var expectedBitSize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ expectedBitSize = 256
+ hash = crypto.SHA256
+ case ES384:
+ expectedBitSize = 384
+ hash = crypto.SHA384
+ case ES512:
+ expectedBitSize = 521
+ hash = crypto.SHA512
+ }
+
+ curveBits := ctx.privateKey.Curve.Params().BitSize
+ if expectedBitSize != curveBits {
+ return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed)
+ if err != nil {
+ return Signature{}, err
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes++
+ }
+
+ // We serialize the outputs (r and s) into big-endian byte arrays and pad
+ // them with zeros on the left to make sure the sizes work out. Both arrays
+ // must be keyBytes long, and the output must be 2*keyBytes long.
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := s.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ out := append(rBytesPadded, sBytesPadded...)
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var keySize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ keySize = 32
+ hash = crypto.SHA256
+ case ES384:
+ keySize = 48
+ hash = crypto.SHA384
+ case ES512:
+ keySize = 66
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ if len(signature) != 2*keySize {
+ return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r := big.NewInt(0).SetBytes(signature[:keySize])
+ s := big.NewInt(0).SetBytes(signature[keySize:])
+
+ match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
+ if !match {
+ return errors.New("go-jose/go-jose: ecdsa signature failed to verify")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go
new file mode 100644
index 00000000..af029cec
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go
@@ -0,0 +1,196 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+const (
+ nonceBytes = 16
+)
+
+// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
+func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
+ keySize := len(key) / 2
+ integrityKey := key[:keySize]
+ encryptionKey := key[keySize:]
+
+ blockCipher, err := newBlockCipher(encryptionKey)
+ if err != nil {
+ return nil, err
+ }
+
+ var hash func() hash.Hash
+ switch keySize {
+ case 16:
+ hash = sha256.New
+ case 24:
+ hash = sha512.New384
+ case 32:
+ hash = sha512.New
+ }
+
+ return &cbcAEAD{
+ hash: hash,
+ blockCipher: blockCipher,
+ authtagBytes: keySize,
+ integrityKey: integrityKey,
+ }, nil
+}
+
+// An AEAD based on CBC+HMAC
+type cbcAEAD struct {
+ hash func() hash.Hash
+ authtagBytes int
+ integrityKey []byte
+ blockCipher cipher.Block
+}
+
+func (ctx *cbcAEAD) NonceSize() int {
+ return nonceBytes
+}
+
+func (ctx *cbcAEAD) Overhead() int {
+ // Maximum overhead is block size (for padding) plus auth tag length, where
+ // the length of the auth tag is equivalent to the key size.
+ return ctx.blockCipher.BlockSize() + ctx.authtagBytes
+}
+
+// Seal encrypts and authenticates the plaintext.
+func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
+ // Output buffer -- must take care not to mangle plaintext input.
+ ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)]
+ copy(ciphertext, plaintext)
+ ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
+
+ cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
+
+ cbc.CryptBlocks(ciphertext, ciphertext)
+ authtag := ctx.computeAuthTag(data, nonce, ciphertext)
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag)))
+ copy(out, ciphertext)
+ copy(out[len(ciphertext):], authtag)
+
+ return ret
+}
+
+// Open decrypts and authenticates the ciphertext.
+func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(ciphertext) < ctx.authtagBytes {
+ return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)")
+ }
+
+ offset := len(ciphertext) - ctx.authtagBytes
+ expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
+ match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
+ if match != 1 {
+ return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)")
+ }
+
+ cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
+
+ // Make copy of ciphertext buffer, don't want to modify in place
+ buffer := append([]byte{}, ciphertext[:offset]...)
+
+ if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
+ return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)")
+ }
+
+ cbc.CryptBlocks(buffer, buffer)
+
+ // Remove padding
+ plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
+ if err != nil {
+ return nil, err
+ }
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext)))
+ copy(out, plaintext)
+
+ return ret, nil
+}
+
+// Compute an authentication tag
+func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
+ buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8)
+ n := 0
+ n += copy(buffer, aad)
+ n += copy(buffer[n:], nonce)
+ n += copy(buffer[n:], ciphertext)
+ binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8)
+
+ // According to documentation, Write() on hash.Hash never fails.
+ hmac := hmac.New(ctx.hash, ctx.integrityKey)
+ _, _ = hmac.Write(buffer)
+
+ return hmac.Sum(nil)[:ctx.authtagBytes]
+}
+
+// resize ensures that the given slice has a capacity of at least n bytes.
+// If the capacity of the slice is less than n, a new slice is allocated
+// and the existing data will be copied.
+func resize(in []byte, n uint64) (head, tail []byte) {
+ if uint64(cap(in)) >= n {
+ head = in[:n]
+ } else {
+ head = make([]byte, n)
+ copy(head, in)
+ }
+
+ tail = head[len(in):]
+ return
+}
+
+// Apply padding
+func padBuffer(buffer []byte, blockSize int) []byte {
+ missing := blockSize - (len(buffer) % blockSize)
+ ret, out := resize(buffer, uint64(len(buffer))+uint64(missing))
+ padding := bytes.Repeat([]byte{byte(missing)}, missing)
+ copy(out, padding)
+ return ret
+}
+
+// Remove padding
+func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
+ if len(buffer)%blockSize != 0 {
+ return nil, errors.New("go-jose/go-jose: invalid padding")
+ }
+
+ last := buffer[len(buffer)-1]
+ count := int(last)
+
+ if count == 0 || count > blockSize || count > len(buffer) {
+ return nil, errors.New("go-jose/go-jose: invalid padding")
+ }
+
+ padding := bytes.Repeat([]byte{last}, count)
+ if !bytes.HasSuffix(buffer, padding) {
+ return nil, errors.New("go-jose/go-jose: invalid padding")
+ }
+
+ return buffer[:len(buffer)-count], nil
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go
new file mode 100644
index 00000000..f62c3bdb
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go
@@ -0,0 +1,75 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto"
+ "encoding/binary"
+ "hash"
+ "io"
+)
+
+type concatKDF struct {
+ z, info []byte
+ i uint32
+ cache []byte
+ hasher hash.Hash
+}
+
+// NewConcatKDF builds a KDF reader based on the given inputs.
+func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
+ buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
+ n := 0
+ n += copy(buffer, algID)
+ n += copy(buffer[n:], ptyUInfo)
+ n += copy(buffer[n:], ptyVInfo)
+ n += copy(buffer[n:], supPubInfo)
+ copy(buffer[n:], supPrivInfo)
+
+ hasher := hash.New()
+
+ return &concatKDF{
+ z: z,
+ info: buffer,
+ hasher: hasher,
+ cache: []byte{},
+ i: 1,
+ }
+}
+
+func (ctx *concatKDF) Read(out []byte) (int, error) {
+ copied := copy(out, ctx.cache)
+ ctx.cache = ctx.cache[copied:]
+
+ for copied < len(out) {
+ ctx.hasher.Reset()
+
+ // Write on a hash.Hash never fails
+ _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
+ _, _ = ctx.hasher.Write(ctx.z)
+ _, _ = ctx.hasher.Write(ctx.info)
+
+ hash := ctx.hasher.Sum(nil)
+ chunkCopied := copy(out[copied:], hash)
+ copied += chunkCopied
+ ctx.cache = hash[chunkCopied:]
+
+ ctx.i++
+ }
+
+ return copied, nil
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go
new file mode 100644
index 00000000..093c6467
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go
@@ -0,0 +1,86 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "encoding/binary"
+)
+
+// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
+// It is an error to call this function with a private/public key that are not on the same
+// curve. Callers must ensure that the keys are valid before calling this function. Output
+// size may be at most 1<<16 bytes (64 KiB).
+func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
+ if size > 1<<16 {
+ panic("ECDH-ES output size too large, must be less than or equal to 1<<16")
+ }
+
+ // algId, partyUInfo, partyVInfo inputs must be prefixed with the length
+ algID := lengthPrefixed([]byte(alg))
+ ptyUInfo := lengthPrefixed(apuData)
+ ptyVInfo := lengthPrefixed(apvData)
+
+ // suppPubInfo is the encoded length of the output size in bits
+ supPubInfo := make([]byte, 4)
+ binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
+
+ if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) {
+ panic("public key not on same curve as private key")
+ }
+
+ z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
+ zBytes := z.Bytes()
+
+ // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from
+ // the returned byte array. This can lead to a problem where zBytes will be
+ // shorter than expected which breaks the key derivation. Therefore we must pad
+ // to the full length of the expected coordinate here before calling the KDF.
+ octSize := dSize(priv.Curve)
+ if len(zBytes) != octSize {
+ zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...)
+ }
+
+ reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
+ key := make([]byte, size)
+
+ // Read on the KDF will never fail
+ _, _ = reader.Read(key)
+
+ return key
+}
+
+// dSize returns the size in octets for a coordinate on a elliptic curve.
+func dSize(curve elliptic.Curve) int {
+ order := curve.Params().P
+ bitLen := order.BitLen()
+ size := bitLen / 8
+ if bitLen%8 != 0 {
+ size++
+ }
+ return size
+}
+
+func lengthPrefixed(data []byte) []byte {
+ out := make([]byte, len(data)+4)
+ binary.BigEndian.PutUint32(out, uint32(len(data)))
+ copy(out[4:], data)
+ return out
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go
new file mode 100644
index 00000000..b9effbca
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go
@@ -0,0 +1,109 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto/cipher"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+)
+
+var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
+
+// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
+func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
+ if len(cek)%8 != 0 {
+ return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := len(cek) / 8
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], cek[i*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer, defaultIV)
+
+ for t := 0; t < 6*n; t++ {
+ copy(buffer[8:], r[t%n])
+
+ block.Encrypt(buffer, buffer)
+
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] ^= tBytes[i]
+ }
+ copy(r[t%n], buffer[8:])
+ }
+
+ out := make([]byte, (n+1)*8)
+ copy(out, buffer[:8])
+ for i := range r {
+ copy(out[(i+1)*8:], r[i])
+ }
+
+ return out, nil
+}
+
+// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
+func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
+ if len(ciphertext)%8 != 0 {
+ return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := (len(ciphertext) / 8) - 1
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], ciphertext[(i+1)*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer[:8], ciphertext[:8])
+
+ for t := 6*n - 1; t >= 0; t-- {
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] ^= tBytes[i]
+ }
+ copy(buffer[8:], r[t%n])
+
+ block.Decrypt(buffer, buffer)
+
+ copy(r[t%n], buffer[8:])
+ }
+
+ if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
+ return nil, errors.New("go-jose/go-jose: failed to unwrap key")
+ }
+
+ out := make([]byte, n*8)
+ for i := range r {
+ copy(out[i*8:], r[i])
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go
new file mode 100644
index 00000000..aba08424
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go
@@ -0,0 +1,593 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+
+ "github.com/go-jose/go-jose/v4/json"
+)
+
+// Encrypter represents an encrypter which produces an encrypted JWE object.
+type Encrypter interface {
+ Encrypt(plaintext []byte) (*JSONWebEncryption, error)
+ EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error)
+ Options() EncrypterOptions
+}
+
+// A generic content cipher
+type contentCipher interface {
+ keySize() int
+ encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
+ decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
+}
+
+// A key generator (for generating/getting a CEK)
+type keyGenerator interface {
+ keySize() int
+ genKey() ([]byte, rawHeader, error)
+}
+
+// A generic key encrypter
+type keyEncrypter interface {
+ encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
+}
+
+// A generic key decrypter
+type keyDecrypter interface {
+ decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
+}
+
+// A generic encrypter based on the given key encrypter and content cipher.
+type genericEncrypter struct {
+ contentAlg ContentEncryption
+ compressionAlg CompressionAlgorithm
+ cipher contentCipher
+ recipients []recipientKeyInfo
+ keyGenerator keyGenerator
+ extraHeaders map[HeaderKey]interface{}
+}
+
+type recipientKeyInfo struct {
+ keyID string
+ keyAlg KeyAlgorithm
+ keyEncrypter keyEncrypter
+}
+
+// EncrypterOptions represents options that can be set on new encrypters.
+type EncrypterOptions struct {
+ Compression CompressionAlgorithm
+
+ // Optional map of name/value pairs to be inserted into the protected
+ // header of a JWS object. Some specifications which make use of
+ // JWS require additional values here.
+ //
+ // Values will be serialized by [json.Marshal] and must be valid inputs to
+ // that function.
+ //
+ // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
+ ExtraHeaders map[HeaderKey]interface{}
+}
+
+// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
+// if necessary, and returns the updated EncrypterOptions.
+//
+// The v parameter will be serialized by [json.Marshal] and must be a valid
+// input to that function.
+//
+// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
+func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions {
+ if eo.ExtraHeaders == nil {
+ eo.ExtraHeaders = map[HeaderKey]interface{}{}
+ }
+ eo.ExtraHeaders[k] = v
+ return eo
+}
+
+// WithContentType adds a content type ("cty") header and returns the updated
+// EncrypterOptions.
+func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions {
+ return eo.WithHeader(HeaderContentType, contentType)
+}
+
+// WithType adds a type ("typ") header and returns the updated EncrypterOptions.
+func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions {
+ return eo.WithHeader(HeaderType, typ)
+}
+
+// Recipient represents an algorithm/key to encrypt messages to.
+//
+// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used
+// on the password-based encryption algorithms PBES2-HS256+A128KW,
+// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe
+// default of 100000 will be used for the count and a 128-bit random salt will
+// be generated.
+type Recipient struct {
+ Algorithm KeyAlgorithm
+ // Key must have one of these types:
+ // - ed25519.PublicKey
+ // - *ecdsa.PublicKey
+ // - *rsa.PublicKey
+ // - *JSONWebKey
+ // - JSONWebKey
+ // - []byte (a symmetric key)
+ // - Any type that satisfies the OpaqueKeyEncrypter interface
+ //
+ // The type of Key must match the value of Algorithm.
+ Key interface{}
+ KeyID string
+ PBES2Count int
+ PBES2Salt []byte
+}
+
+// NewEncrypter creates an appropriate encrypter based on the key type
+func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) {
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ recipients: []recipientKeyInfo{},
+ cipher: getContentCipher(enc),
+ }
+ if opts != nil {
+ encrypter.compressionAlg = opts.Compression
+ encrypter.extraHeaders = opts.ExtraHeaders
+ }
+
+ if encrypter.cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ var keyID string
+ var rawKey interface{}
+ switch encryptionKey := rcpt.Key.(type) {
+ case JSONWebKey:
+ keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
+ case *JSONWebKey:
+ keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
+ case OpaqueKeyEncrypter:
+ keyID, rawKey = encryptionKey.KeyID(), encryptionKey
+ default:
+ rawKey = encryptionKey
+ }
+
+ switch rcpt.Algorithm {
+ case DIRECT:
+ // Direct encryption mode must be treated differently
+ keyBytes, ok := rawKey.([]byte)
+ if !ok {
+ return nil, ErrUnsupportedKeyType
+ }
+ if encrypter.cipher.keySize() != len(keyBytes) {
+ return nil, ErrInvalidKeySize
+ }
+ encrypter.keyGenerator = staticKeyGenerator{
+ key: keyBytes,
+ }
+ recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes)
+ recipientInfo.keyID = keyID
+ if rcpt.KeyID != "" {
+ recipientInfo.keyID = rcpt.KeyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipientInfo}
+ return encrypter, nil
+ case ECDH_ES:
+ // ECDH-ES (w/o key wrapping) is similar to DIRECT mode
+ keyDSA, ok := rawKey.(*ecdsa.PublicKey)
+ if !ok {
+ return nil, ErrUnsupportedKeyType
+ }
+ encrypter.keyGenerator = ecKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ algID: string(enc),
+ publicKey: keyDSA,
+ }
+ recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA)
+ recipientInfo.keyID = keyID
+ if rcpt.KeyID != "" {
+ recipientInfo.keyID = rcpt.KeyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipientInfo}
+ return encrypter, nil
+ default:
+ // Can just add a standard recipient
+ encrypter.keyGenerator = randomKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ }
+ err := encrypter.addRecipient(rcpt)
+ return encrypter, err
+ }
+}
+
+// NewMultiEncrypter creates a multi-encrypter based on the given parameters
+func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) {
+ cipher := getContentCipher(enc)
+
+ if cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+ if len(rcpts) == 0 {
+ return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty")
+ }
+
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ recipients: []recipientKeyInfo{},
+ cipher: cipher,
+ keyGenerator: randomKeyGenerator{
+ size: cipher.keySize(),
+ },
+ }
+
+ if opts != nil {
+ encrypter.compressionAlg = opts.Compression
+ encrypter.extraHeaders = opts.ExtraHeaders
+ }
+
+ for _, recipient := range rcpts {
+ err := encrypter.addRecipient(recipient)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return encrypter, nil
+}
+
+func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) {
+ var recipientInfo recipientKeyInfo
+
+ switch recipient.Algorithm {
+ case DIRECT, ECDH_ES:
+ return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm)
+ }
+
+ recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key)
+ if recipient.KeyID != "" {
+ recipientInfo.keyID = recipient.KeyID
+ }
+
+ switch recipient.Algorithm {
+ case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
+ if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok {
+ sr.p2c = recipient.PBES2Count
+ sr.p2s = recipient.PBES2Salt
+ }
+ }
+
+ if err == nil {
+ ctx.recipients = append(ctx.recipients, recipientInfo)
+ }
+ return err
+}
+
+func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) {
+ switch encryptionKey := encryptionKey.(type) {
+ case *rsa.PublicKey:
+ return newRSARecipient(alg, encryptionKey)
+ case *ecdsa.PublicKey:
+ return newECDHRecipient(alg, encryptionKey)
+ case []byte:
+ return newSymmetricRecipient(alg, encryptionKey)
+ case string:
+ return newSymmetricRecipient(alg, []byte(encryptionKey))
+ case *JSONWebKey:
+ recipient, err := makeJWERecipient(alg, encryptionKey.Key)
+ recipient.keyID = encryptionKey.KeyID
+ return recipient, err
+ case OpaqueKeyEncrypter:
+ return newOpaqueKeyEncrypter(alg, encryptionKey)
+ }
+ return recipientKeyInfo{}, ErrUnsupportedKeyType
+}
+
+// newDecrypter creates an appropriate decrypter based on the key type
+func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
+ switch decryptionKey := decryptionKey.(type) {
+ case *rsa.PrivateKey:
+ return &rsaDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case *ecdsa.PrivateKey:
+ return &ecDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case []byte:
+ return &symmetricKeyCipher{
+ key: decryptionKey,
+ }, nil
+ case string:
+ return &symmetricKeyCipher{
+ key: []byte(decryptionKey),
+ }, nil
+ case JSONWebKey:
+ return newDecrypter(decryptionKey.Key)
+ case *JSONWebKey:
+ return newDecrypter(decryptionKey.Key)
+ case OpaqueKeyDecrypter:
+ return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil
+ default:
+ return nil, ErrUnsupportedKeyType
+ }
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) {
+ return ctx.EncryptWithAuthData(plaintext, nil)
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) {
+ obj := &JSONWebEncryption{}
+ obj.aad = aad
+
+ obj.protected = &rawHeader{}
+ err := obj.protected.set(headerEncryption, ctx.contentAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.recipients = make([]recipientInfo, len(ctx.recipients))
+
+ if len(ctx.recipients) == 0 {
+ return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to")
+ }
+
+ cek, headers, err := ctx.keyGenerator.genKey()
+ if err != nil {
+ return nil, err
+ }
+
+ obj.protected.merge(&headers)
+
+ for i, info := range ctx.recipients {
+ recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ err = recipient.header.set(headerAlgorithm, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ if info.keyID != "" {
+ err = recipient.header.set(headerKeyID, info.keyID)
+ if err != nil {
+ return nil, err
+ }
+ }
+ obj.recipients[i] = recipient
+ }
+
+ if len(ctx.recipients) == 1 {
+ // Move per-recipient headers into main protected header if there's
+ // only a single recipient.
+ obj.protected.merge(obj.recipients[0].header)
+ obj.recipients[0].header = nil
+ }
+
+ if ctx.compressionAlg != NONE {
+ plaintext, err = compress(ctx.compressionAlg, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ err = obj.protected.set(headerCompression, ctx.compressionAlg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ for k, v := range ctx.extraHeaders {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ (*obj.protected)[k] = makeRawMessage(b)
+ }
+
+ authData := obj.computeAuthData()
+ parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.iv = parts.iv
+ obj.ciphertext = parts.ciphertext
+ obj.tag = parts.tag
+
+ return obj, nil
+}
+
+func (ctx *genericEncrypter) Options() EncrypterOptions {
+ return EncrypterOptions{
+ Compression: ctx.compressionAlg,
+ ExtraHeaders: ctx.extraHeaders,
+ }
+}
+
+// Decrypt and validate the object and return the plaintext. This
+// function does not support multi-recipient. If you desire multi-recipient
+// decryption use DecryptMulti instead.
+//
+// The decryptionKey argument must contain a private or symmetric key
+// and must have one of these types:
+// - *ecdsa.PrivateKey
+// - *rsa.PrivateKey
+// - *JSONWebKey
+// - JSONWebKey
+// - *JSONWebKeySet
+// - JSONWebKeySet
+// - []byte (a symmetric key)
+// - string (a symmetric key)
+// - Any type that satisfies the OpaqueKeyDecrypter interface.
+//
+// Note that ed25519 is only available for signatures, not encryption, so is
+// not an option here.
+//
+// Automatically decompresses plaintext, but returns an error if the decompressed
+// data would be >250kB or >10x the size of the compressed data, whichever is larger.
+func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
+ headers := obj.mergedHeaders(nil)
+
+ if len(obj.recipients) > 1 {
+ return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one")
+ }
+
+ critical, err := headers.getCritical()
+ if err != nil {
+ return nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
+ }
+
+ if len(critical) > 0 {
+ return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
+ }
+
+ key := tryJWKS(decryptionKey, obj.Header)
+ decrypter, err := newDecrypter(key)
+ if err != nil {
+ return nil, err
+ }
+
+ cipher := getContentCipher(headers.getEncryption())
+ if cipher == nil {
+ return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption()))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ var plaintext []byte
+ recipient := obj.recipients[0]
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ }
+
+ if plaintext == nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if comp := obj.protected.getCompression(); comp != "" {
+ plaintext, err = decompress(comp, plaintext)
+ if err != nil {
+ return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err)
+ }
+ }
+
+ return plaintext, nil
+}
+
+// DecryptMulti decrypts and validates the object and returns the plaintexts,
+// with support for multiple recipients. It returns the index of the recipient
+// for which the decryption was successful, the merged headers for that recipient,
+// and the plaintext.
+//
+// The decryptionKey argument must have one of the types allowed for the
+// decryptionKey argument of Decrypt().
+//
+// Automatically decompresses plaintext, but returns an error if the decompressed
+// data would be >250kB or >3x the size of the compressed data, whichever is larger.
+func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
+ globalHeaders := obj.mergedHeaders(nil)
+
+ critical, err := globalHeaders.getCritical()
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
+ }
+
+ if len(critical) > 0 {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
+ }
+
+ key := tryJWKS(decryptionKey, obj.Header)
+ decrypter, err := newDecrypter(key)
+ if err != nil {
+ return -1, Header{}, nil, err
+ }
+
+ encryption := globalHeaders.getEncryption()
+ cipher := getContentCipher(encryption)
+ if cipher == nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ index := -1
+ var plaintext []byte
+ var headers rawHeader
+
+ for i, recipient := range obj.recipients {
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ if err == nil {
+ index = i
+ headers = recipientHeaders
+ break
+ }
+ }
+ }
+
+ if plaintext == nil {
+ return -1, Header{}, nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if comp := obj.protected.getCompression(); comp != "" {
+ plaintext, err = decompress(comp, plaintext)
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err)
+ }
+ }
+
+ sanitized, err := headers.sanitized()
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err)
+ }
+
+ return index, sanitized, plaintext, err
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/doc.go b/vendor/github.com/go-jose/go-jose/v4/doc.go
new file mode 100644
index 00000000..0ad40ca0
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/doc.go
@@ -0,0 +1,25 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. It implements encryption and signing based on
+the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web
+Token support available in a sub-package. The library supports both the compact
+and JWS/JWE JSON Serialization formats, and has optional support for multiple
+recipients.
+*/
+package jose
diff --git a/vendor/github.com/go-jose/go-jose/v4/encoding.go b/vendor/github.com/go-jose/go-jose/v4/encoding.go
new file mode 100644
index 00000000..4f6e0d4a
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/encoding.go
@@ -0,0 +1,228 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "compress/flate"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math/big"
+ "strings"
+ "unicode"
+
+ "github.com/go-jose/go-jose/v4/json"
+)
+
+// Helper function to serialize known-good objects.
+// Precondition: value is not a nil pointer.
+func mustSerializeJSON(value interface{}) []byte {
+ out, err := json.Marshal(value)
+ if err != nil {
+ panic(err)
+ }
+ // We never want to serialize the top-level value "null," since it's not a
+ // valid JOSE message. But if a caller passes in a nil pointer to this method,
+ // MarshalJSON will happily serialize it as the top-level value "null". If
+ // that value is then embedded in another operation, for instance by being
+ // base64-encoded and fed as input to a signing algorithm
+ // (https://github.com/go-jose/go-jose/issues/22), the result will be
+ // incorrect. Because this method is intended for known-good objects, and a nil
+ // pointer is not a known-good object, we are free to panic in this case.
+ // Note: It's not possible to directly check whether the data pointed at by an
+ // interface is a nil pointer, so we do this hacky workaround.
+ // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
+ if string(out) == "null" {
+ panic("Tried to serialize a nil pointer.")
+ }
+ return out
+}
+
+// Strip all newlines and whitespace
+func stripWhitespace(data string) string {
+ buf := strings.Builder{}
+ buf.Grow(len(data))
+ for _, r := range data {
+ if !unicode.IsSpace(r) {
+ buf.WriteRune(r)
+ }
+ }
+ return buf.String()
+}
+
+// Perform compression based on algorithm
+func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return deflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// Perform decompression based on algorithm
+func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return inflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// deflate compresses the input.
+func deflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+
+ // Writing to byte buffer, err is always nil
+ writer, _ := flate.NewWriter(output, 1)
+ _, _ = io.Copy(writer, bytes.NewBuffer(input))
+
+ err := writer.Close()
+ return output.Bytes(), err
+}
+
+// inflate decompresses the input.
+//
+// Errors if the decompressed data would be >250kB or >10x the size of the
+// compressed data, whichever is larger.
+func inflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+ reader := flate.NewReader(bytes.NewBuffer(input))
+
+ maxCompressedSize := max(250_000, 10*int64(len(input)))
+
+ limit := maxCompressedSize + 1
+ n, err := io.CopyN(output, reader, limit)
+ if err != nil && err != io.EOF {
+ return nil, err
+ }
+ if n == limit {
+ return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize)
+ }
+
+ err = reader.Close()
+ return output.Bytes(), err
+}
+
+// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
+type byteBuffer struct {
+ data []byte
+}
+
+func newBuffer(data []byte) *byteBuffer {
+ if data == nil {
+ return nil
+ }
+ return &byteBuffer{
+ data: data,
+ }
+}
+
+func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
+ if len(data) > length {
+ panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
+ }
+ pad := make([]byte, length-len(data))
+ return newBuffer(append(pad, data...))
+}
+
+func newBufferFromInt(num uint64) *byteBuffer {
+ data := make([]byte, 8)
+ binary.BigEndian.PutUint64(data, num)
+ return newBuffer(bytes.TrimLeft(data, "\x00"))
+}
+
+func (b *byteBuffer) MarshalJSON() ([]byte, error) {
+ return json.Marshal(b.base64())
+}
+
+func (b *byteBuffer) UnmarshalJSON(data []byte) error {
+ var encoded string
+ err := json.Unmarshal(data, &encoded)
+ if err != nil {
+ return err
+ }
+
+ if encoded == "" {
+ return nil
+ }
+
+ decoded, err := base64.RawURLEncoding.DecodeString(encoded)
+ if err != nil {
+ return err
+ }
+
+ *b = *newBuffer(decoded)
+
+ return nil
+}
+
+func (b *byteBuffer) base64() string {
+ return base64.RawURLEncoding.EncodeToString(b.data)
+}
+
+func (b *byteBuffer) bytes() []byte {
+ // Handling nil here allows us to transparently handle nil slices when serializing.
+ if b == nil {
+ return nil
+ }
+ return b.data
+}
+
+func (b byteBuffer) bigInt() *big.Int {
+ return new(big.Int).SetBytes(b.data)
+}
+
+func (b byteBuffer) toInt() int {
+ return int(b.bigInt().Int64())
+}
+
+func base64EncodeLen(sl []byte) int {
+ return base64.RawURLEncoding.EncodedLen(len(sl))
+}
+
+func base64JoinWithDots(inputs ...[]byte) string {
+ if len(inputs) == 0 {
+ return ""
+ }
+
+ // Count of dots.
+ totalCount := len(inputs) - 1
+
+ for _, input := range inputs {
+ totalCount += base64EncodeLen(input)
+ }
+
+ out := make([]byte, totalCount)
+ startEncode := 0
+ for i, input := range inputs {
+ base64.RawURLEncoding.Encode(out[startEncode:], input)
+
+ if i == len(inputs)-1 {
+ continue
+ }
+
+ startEncode += base64EncodeLen(input)
+ out[startEncode] = '.'
+ startEncode++
+ }
+
+ return string(out)
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/json/LICENSE b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE
new file mode 100644
index 00000000..74487567
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-jose/go-jose/v4/json/README.md b/vendor/github.com/go-jose/go-jose/v4/json/README.md
new file mode 100644
index 00000000..86de5e55
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/json/README.md
@@ -0,0 +1,13 @@
+# Safe JSON
+
+This repository contains a fork of the `encoding/json` package from Go 1.6.
+
+The following changes were made:
+
+* Object deserialization uses case-sensitive member name matching instead of
+ [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html).
+ This is to avoid differences in the interpretation of JOSE messages between
+ go-jose and libraries written in other languages.
+* When deserializing a JSON object, we check for duplicate keys and reject the
+ input whenever we detect a duplicate. Rather than trying to work with malformed
+ data, we prefer to reject it right away.
diff --git a/vendor/github.com/go-jose/go-jose/v4/json/decode.go b/vendor/github.com/go-jose/go-jose/v4/json/decode.go
new file mode 100644
index 00000000..50634dd8
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/json/decode.go
@@ -0,0 +1,1216 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+// Unmarshal will only set exported fields of the struct.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a string-keyed map, Unmarshal first
+// establishes a map to use, If the map is nil, Unmarshal allocates a new map.
+// Otherwise Unmarshal reuses the existing map, keeping existing entries.
+// Unmarshal then stores key-value pairs from the JSON object into the map.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// “not present,” unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+func Unmarshal(data []byte, v interface{}) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by objects
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ d.value(rv)
+ return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+ // This function implements the JSON numbers grammar.
+ // See https://tools.ietf.org/html/rfc7159#section-6
+ // and http://json.org/number.gif
+
+ if s == "" {
+ return false
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+
+ // Digits
+ switch {
+ default:
+ return false
+
+ case s[0] == '0':
+ s = s[1:]
+
+ case '1' <= s[0] && s[0] <= '9':
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // . followed by 1 or more digits.
+ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // e or E followed by an optional - or + and
+ // 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ s = s[1:]
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // Make sure we are at the end.
+ return s == ""
+}
+
+type NumberUnmarshalType int
+
+const (
+ // unmarshal a JSON number into an interface{} as a float64
+ UnmarshalFloat NumberUnmarshalType = iota
+ // unmarshal a JSON number into an interface{} as a `json.Number`
+ UnmarshalJSONNumber
+ // unmarshal a JSON number into an interface{} as a int64
+ // if value is an integer otherwise float64
+ UnmarshalIntOrFloat
+)
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // read offset in data
+ scan scanner
+ nextscan scanner // for calls to nextValue
+ savedError error
+ numberType NumberUnmarshalType
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+ panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = err
+ }
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+ c := d.data[d.off]
+ item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // Our scanner has seen the opening brace/bracket
+ // and thinks we're still in the middle of the object.
+ // invent a closing brace/bracket to get it out.
+ if c == '{' {
+ d.scan.step(&d.scan, '}')
+ } else {
+ d.scan.step(&d.scan, ']')
+ }
+
+ return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+ var newOp int
+ for {
+ if d.off >= len(d.data) {
+ newOp = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ } else {
+ c := d.data[d.off]
+ d.off++
+ newOp = d.scan.step(&d.scan, c)
+ }
+ if newOp != op {
+ break
+ }
+ }
+ return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+ if !v.IsValid() {
+ _, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // d.scan thinks we're still at the beginning of the item.
+ // Feed in an empty string - the shortest, simplest value -
+ // so that it knows we got to the end of the value.
+ if d.scan.redo {
+ // rewind.
+ d.scan.redo = false
+ d.scan.step = stateBeginValue
+ }
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+
+ n := len(d.scan.parseState)
+ if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+ // d.scan thinks we just read an object key; finish the object
+ d.scan.step(&d.scan, ':')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '}')
+ }
+
+ return
+ }
+
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(v)
+
+ case scanBeginObject:
+ d.object(v)
+
+ case scanBeginLiteral:
+ d.literal(v)
+ }
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(reflect.Value{})
+
+ case scanBeginObject:
+ d.object(reflect.Value{})
+
+ case scanBeginLiteral:
+ switch v := d.literalInterface().(type) {
+ case nil, string:
+ return v
+ }
+ }
+ return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ }
+
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ v.Set(reflect.ValueOf(d.arrayInterface()))
+ return
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ case reflect.Array:
+ case reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ d.value(v.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.value(reflect.Value{})
+ }
+ i++
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+}
+
+var nullLiteral = []byte("null")
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(d.objectInterface()))
+ return
+ }
+
+ // Check type of target: struct or map[string]T
+ switch v.Kind() {
+ case reflect.Map:
+ // map must have string kind
+ t := v.Type()
+ if t.Key().Kind() != reflect.String {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ var mapElem reflect.Value
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, []byte(key)) {
+ f = ff
+ break
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ d.literalStore(nullLiteral, subv, false)
+ case string:
+ d.literalStore([]byte(qv), subv, true)
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kv := reflect.ValueOf(key).Convert(v.Type().Key())
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64, int64 or a Number
+// depending on d.numberDecodeType.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+ switch d.numberType {
+
+ case UnmarshalJSONNumber:
+ return Number(s), nil
+ case UnmarshalIntOrFloat:
+ v, err := strconv.ParseInt(s, 10, 64)
+ if err == nil {
+ return v, nil
+ }
+
+ // tries to parse integer number in scientific notation
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+
+ // if it has no decimal value use int64
+ if fi, fd := math.Modf(f); fd == 0.0 {
+ return int64(fi), nil
+ }
+ return f, nil
+ default:
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+ return f, nil
+ }
+
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ //Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return
+ }
+ wantptr := item[0] == 'n' // null
+ u, ut, pv := d.indirect(v, wantptr)
+ if u != nil {
+ err := u.UnmarshalJSON(item)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ return
+ }
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ err := ut.UnmarshalText(s)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case 't', 'f': // true, false
+ value := c == 't'
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ }
+
+ case '"': // string
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ case reflect.Slice:
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.SetBytes(b[:n])
+ case reflect.String:
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ v.SetString(s)
+ if !isValidNumber(s) {
+ d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item))
+ }
+ break
+ }
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ }
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+ switch d.scanWhile(scanSkipSpace) {
+ default:
+ d.error(errPhase)
+ panic("unreachable")
+ case scanBeginArray:
+ return d.arrayInterface()
+ case scanBeginObject:
+ return d.objectInterface()
+ case scanBeginLiteral:
+ return d.literalInterface()
+ }
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]interface{} {
+ m := make(map[string]interface{})
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+ item := d.data[start:d.off]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ d.error(errPhase)
+ }
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+ }
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/json/encode.go b/vendor/github.com/go-jose/go-jose/v4/json/encode.go
new file mode 100644
index 00000000..98de68ce
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/json/encode.go
@@ -0,0 +1,1197 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON objects as defined in
+// RFC 4627. The mapping between JSON objects and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON object.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option.
+//
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects.
+// The map's key type must be string; the map keys are used as JSON object
+// keys, subject to the UTF-8 coercion described for string values above.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON object.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON object.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+func Marshal(v interface{}) ([]byte, error) {
+ e := &encodeState{}
+ err := e.marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML