From 74d7e7950c01399a93b0d1bd33e64a0d285f5b23 Mon Sep 17 00:00:00 2001 From: zimbatm Date: Sat, 27 Jul 2024 18:44:23 +0200 Subject: [PATCH 1/3] vendor https://github.com/nix-community/go-nix/pull/120 Fixes the issues with cachix --- api/unpack/index.go | 6 +- default.nix | 2 +- go.mod | 68 ++-- go.sum | 166 +++----- {libstore => pkg/libstore}/binary_cache.go | 0 {libstore => pkg/libstore}/doc.go | 0 .../libstore}/file_binary_cache_store.go | 0 .../libstore}/gcs_binary_cache_store.go | 0 .../libstore}/http_binary_cache_store.go | 0 .../libstore}/s3_binary_cache_store.go | 0 pkg/nar/doc.go | 8 + pkg/nar/dump.go | 150 +++++++ pkg/nar/dump_nonwindows_test.go | 32 ++ pkg/nar/dump_test.go | 195 +++++++++ pkg/nar/fixtures_test.go | 277 +++++++++++++ pkg/nar/header.go | 79 ++++ pkg/nar/header_mode.go | 29 ++ pkg/nar/header_mode_windows.go | 25 ++ pkg/nar/header_test.go | 72 ++++ pkg/nar/ls/doc.go | 7 + pkg/nar/ls/list.go | 95 +++++ pkg/nar/ls/list_test.go | 59 +++ pkg/nar/reader.go | 385 ++++++++++++++++++ pkg/nar/reader_test.go | 367 +++++++++++++++++ pkg/nar/types.go | 19 + pkg/nar/util.go | 39 ++ pkg/nar/util_test.go | 61 +++ pkg/nar/writer.go | 337 +++++++++++++++ pkg/nar/writer_test.go | 333 +++++++++++++++ pkg/narinfo/check.go | 49 +++ pkg/narinfo/narinfo_test.go | 214 ++++++++++ pkg/narinfo/parser.go | 119 ++++++ pkg/narinfo/signature.go | 53 +++ pkg/narinfo/signature_test.go | 48 +++ pkg/narinfo/types.go | 94 +++++ pkg/nixbase32/doc.go | 10 + pkg/nixbase32/nixbase32.go | 123 ++++++ pkg/nixbase32/nixbase32_test.go | 115 ++++++ pkg/nixhash/algo.go | 67 +++ pkg/nixhash/algo_test.go | 60 +++ pkg/nixhash/encoding.go | 21 + pkg/nixhash/hash.go | 66 +++ pkg/nixhash/hash_test.go | 120 ++++++ pkg/nixhash/hash_with_encoding.go | 34 ++ pkg/nixhash/parse.go | 102 +++++ pkg/nixhash/util.go | 15 + pkg/nixpath/nixpath.go | 116 ++++++ pkg/nixpath/nixpath_test.go | 120 ++++++ pkg/nixpath/references/refs.go | 105 +++++ pkg/nixpath/references/refs_test.go | 96 +++++ pkg/wire/bytes_reader.go | 59 +++ pkg/wire/bytes_writer.go | 72 ++++ pkg/wire/read.go | 103 +++++ pkg/wire/read_test.go | 157 +++++++ pkg/wire/wire.go | 12 + pkg/wire/write.go | 76 ++++ pkg/wire/write_test.go | 121 ++++++ tests/integration_test.go | 2 +- 58 files changed, 5015 insertions(+), 145 deletions(-) rename {libstore => pkg/libstore}/binary_cache.go (100%) rename {libstore => pkg/libstore}/doc.go (100%) rename {libstore => pkg/libstore}/file_binary_cache_store.go (100%) rename {libstore => pkg/libstore}/gcs_binary_cache_store.go (100%) rename {libstore => pkg/libstore}/http_binary_cache_store.go (100%) rename {libstore => pkg/libstore}/s3_binary_cache_store.go (100%) create mode 100644 pkg/nar/doc.go create mode 100644 pkg/nar/dump.go create mode 100644 pkg/nar/dump_nonwindows_test.go create mode 100644 pkg/nar/dump_test.go create mode 100644 pkg/nar/fixtures_test.go create mode 100644 pkg/nar/header.go create mode 100644 pkg/nar/header_mode.go create mode 100644 pkg/nar/header_mode_windows.go create mode 100644 pkg/nar/header_test.go create mode 100644 pkg/nar/ls/doc.go create mode 100644 pkg/nar/ls/list.go create mode 100644 pkg/nar/ls/list_test.go create mode 100644 pkg/nar/reader.go create mode 100644 pkg/nar/reader_test.go create mode 100644 pkg/nar/types.go create mode 100644 pkg/nar/util.go create mode 100644 pkg/nar/util_test.go create mode 100644 pkg/nar/writer.go create mode 100644 pkg/nar/writer_test.go create mode 100644 pkg/narinfo/check.go create mode 100644 pkg/narinfo/narinfo_test.go create mode 100644 pkg/narinfo/parser.go create mode 100644 pkg/narinfo/signature.go create mode 100644 pkg/narinfo/signature_test.go create mode 100644 pkg/narinfo/types.go create mode 100644 pkg/nixbase32/doc.go create mode 100644 pkg/nixbase32/nixbase32.go create mode 100644 pkg/nixbase32/nixbase32_test.go create mode 100644 pkg/nixhash/algo.go create mode 100644 pkg/nixhash/algo_test.go create mode 100644 pkg/nixhash/encoding.go create mode 100644 pkg/nixhash/hash.go create mode 100644 pkg/nixhash/hash_test.go create mode 100644 pkg/nixhash/hash_with_encoding.go create mode 100644 pkg/nixhash/parse.go create mode 100644 pkg/nixhash/util.go create mode 100644 pkg/nixpath/nixpath.go create mode 100644 pkg/nixpath/nixpath_test.go create mode 100644 pkg/nixpath/references/refs.go create mode 100644 pkg/nixpath/references/refs_test.go create mode 100644 pkg/wire/bytes_reader.go create mode 100644 pkg/wire/bytes_writer.go create mode 100644 pkg/wire/read.go create mode 100644 pkg/wire/read_test.go create mode 100644 pkg/wire/wire.go create mode 100644 pkg/wire/write.go create mode 100644 pkg/wire/write_test.go diff --git a/api/unpack/index.go b/api/unpack/index.go index e744a40..8beec6c 100644 --- a/api/unpack/index.go +++ b/api/unpack/index.go @@ -11,9 +11,9 @@ import ( "path/filepath" "strings" - "github.com/nix-community/go-nix/pkg/nar" - "github.com/nix-community/go-nix/pkg/narinfo" - "github.com/numtide/nar-serve/libstore" + "github.com/numtide/nar-serve/pkg/libstore" + "github.com/numtide/nar-serve/pkg/nar" + "github.com/numtide/nar-serve/pkg/narinfo" "github.com/ulikunitz/xz" ) diff --git a/default.nix b/default.nix index 69be858..4c3bcc2 100644 --- a/default.nix +++ b/default.nix @@ -6,7 +6,7 @@ pname = "nar-serve"; version = "latest"; src = nixpkgs.lib.cleanSource ./.; - vendorHash = "sha256-HTWCOnK81xLP0HKcpmzGlkexIl3s6p1d9aYCx3fz5x4="; + vendorHash = "sha256-KZ7dOwx52+2ljfedAMUR1FRv3kAO7Kl4y6wvjJeWdKc="; doCheck = false; }; diff --git a/go.mod b/go.mod index 0155495..e486040 100644 --- a/go.mod +++ b/go.mod @@ -1,59 +1,53 @@ module github.com/numtide/nar-serve -go 1.19 +go 1.21 + +toolchain go1.22.5 require ( - cloud.google.com/go/storage v1.39.0 - github.com/aws/aws-sdk-go v1.50.30 - github.com/nix-community/go-nix v0.0.0-20231219074122-93cb24a86856 + cloud.google.com/go/storage v1.43.0 + github.com/aws/aws-sdk-go v1.55.3 + github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.9.0 github.com/ulikunitz/xz v0.5.12 github.com/urfave/negroni v1.0.0 ) require ( - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.24.0 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.6 // indirect + cloud.google.com/go v0.115.0 // indirect + cloud.google.com/go/auth v0.7.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/iam v1.1.12 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/s2a-go v0.1.7 // indirect + github.com/google/s2a-go v0.1.8 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.1 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/klauspost/cpuid/v2 v2.0.9 // indirect - github.com/minio/sha256-simd v1.0.0 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect - github.com/multiformats/go-multihash v0.2.1 // indirect - github.com/multiformats/go-varint v0.0.6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect - go.opentelemetry.io/otel v1.23.0 // indirect - go.opentelemetry.io/otel/metric v1.23.0 // indirect - go.opentelemetry.io/otel/trace v1.23.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/oauth2 v0.17.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/net v0.27.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/api v0.166.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect - google.golang.org/grpc v1.61.1 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/api v0.189.0 // indirect + google.golang.org/genproto v0.0.0-20240725223205-93522f1f2a9f // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240725223205-93522f1f2a9f // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.1.6 // indirect ) diff --git a/go.sum b/go.sum index c6c56d3..860af39 100644 --- a/go.sum +++ b/go.sum @@ -1,21 +1,24 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= -cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= -cloud.google.com/go/storage v1.39.0 h1:brbjUa4hbDHhpQf48tjqMaXEV+f1OGoaTmQau9tmCsA= -cloud.google.com/go/storage v1.39.0/go.mod h1:OAEj/WZwUYjA3YHQ10/YcN9ttGuEpLwvaoyBXIPikEk= +cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= +cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE= +cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs= +cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= +cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= +cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= +cloud.google.com/go/longrunning v0.5.10 h1:eB/BniENNRKhjz/xgiillrdcH3G74TGSl3BXinGlI7E= +cloud.google.com/go/longrunning v0.5.10/go.mod h1:tljz5guTr5oc/qhlUjBlk7UAIFMOGuPNxkNDZXlLics= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/aws/aws-sdk-go v1.50.30 h1:2OelKH1eayeaH7OuL1Y9Ombfw4HK+/k0fEnJNWjyLts= -github.com/aws/aws-sdk-go v1.50.30/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E= +github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -23,12 +26,11 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -45,50 +47,34 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM= -github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= -github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= -github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= -github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= -github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/nix-community/go-nix v0.0.0-20231219074122-93cb24a86856 h1:CHnKW7ZH43KDkO9vDazQefi82Z0l1smKhSOpMsV1A9I= -github.com/nix-community/go-nix v0.0.0-20231219074122-93cb24a86856/go.mod h1:0FdXufC8BrrWsr65fGYC0fI6hlk4ku+JHGUiYhX/6g4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -97,74 +83,58 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= -github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= -go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= -go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= -go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= -go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= -go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -172,33 +142,27 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -google.golang.org/api v0.166.0 h1:6m4NUwrZYhAaVIHZWxaKjw1L1vNAjtMwORmKRyEEo24= -google.golang.org/api v0.166.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA= +google.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI= +google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= -google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= -google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c h1:9g7erC9qu44ks7UK4gDNlnk4kOxZG707xKm4jVniy6o= -google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= +google.golang.org/genproto v0.0.0-20240725223205-93522f1f2a9f h1:htT2I9bZvGm+110zq8bIErMX+WgBWxCzV3ChwbvnKnc= +google.golang.org/genproto v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:Sk3mLpoDFTAp6R4OvlcUgaG4ISTspKeFsIAXMn9Bm4Y= +google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= +google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240725223205-93522f1f2a9f h1:RARaIm8pxYuxyNPbBQf5igT7XdOyCNtat1qAT2ZxjU4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= -google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -208,10 +172,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= @@ -221,5 +183,3 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -lukechampine.com/blake3 v1.1.6 h1:H3cROdztr7RCfoaTpGZFQsrqvweFLrqS73j7L7cmR5c= -lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= diff --git a/libstore/binary_cache.go b/pkg/libstore/binary_cache.go similarity index 100% rename from libstore/binary_cache.go rename to pkg/libstore/binary_cache.go diff --git a/libstore/doc.go b/pkg/libstore/doc.go similarity index 100% rename from libstore/doc.go rename to pkg/libstore/doc.go diff --git a/libstore/file_binary_cache_store.go b/pkg/libstore/file_binary_cache_store.go similarity index 100% rename from libstore/file_binary_cache_store.go rename to pkg/libstore/file_binary_cache_store.go diff --git a/libstore/gcs_binary_cache_store.go b/pkg/libstore/gcs_binary_cache_store.go similarity index 100% rename from libstore/gcs_binary_cache_store.go rename to pkg/libstore/gcs_binary_cache_store.go diff --git a/libstore/http_binary_cache_store.go b/pkg/libstore/http_binary_cache_store.go similarity index 100% rename from libstore/http_binary_cache_store.go rename to pkg/libstore/http_binary_cache_store.go diff --git a/libstore/s3_binary_cache_store.go b/pkg/libstore/s3_binary_cache_store.go similarity index 100% rename from libstore/s3_binary_cache_store.go rename to pkg/libstore/s3_binary_cache_store.go diff --git a/pkg/nar/doc.go b/pkg/nar/doc.go new file mode 100644 index 0000000..33dbb4b --- /dev/null +++ b/pkg/nar/doc.go @@ -0,0 +1,8 @@ +// Package nar implements access to .nar files. +// +// Nix Archive (nar) is a file format for storing a directory or a single file +// in a binary reproducible format. This is the format that is being used to +// pack and distribute Nix build results. It doesn't store any timestamps or +// similar fields available in conventional filesystems. .nar files can be read +// and written in a streaming manner. +package nar diff --git a/pkg/nar/dump.go b/pkg/nar/dump.go new file mode 100644 index 0000000..29a2e4e --- /dev/null +++ b/pkg/nar/dump.go @@ -0,0 +1,150 @@ +package nar + +import ( + "fmt" + "io" + "os" + "path/filepath" + "syscall" +) + +// SourceFilterFunc is the interface for creating source filters. +// If the function returns true, the file is copied to the Nix store, otherwise it is omitted, +// this mimics the behaviour of the Nix function builtins.filterSource. +type SourceFilterFunc func(path string, nodeType NodeType) bool + +// DumpPath will serialize a path on the local file system to NAR format, +// and write it to the passed writer. +func DumpPath(w io.Writer, path string) error { + return DumpPathFilter(w, path, nil) +} + +// DumpPathFilter will serialize a path on the local file system to NAR format, +// and write it to the passed writer, filtering out any files where the filter +// function returns false. +func DumpPathFilter(w io.Writer, path string, filter SourceFilterFunc) error { + // initialize the nar writer + nw, err := NewWriter(w) + if err != nil { + return err + } + + // make sure the NAR writer is always closed, so the underlying goroutine is stopped + defer nw.Close() + + err = dumpPath(nw, path, "/", filter) + if err != nil { + return err + } + + return nw.Close() +} + +// dumpPath recursively calls itself for every node in the path. +func dumpPath(nw *Writer, path string, subpath string, filter SourceFilterFunc) error { + // assemble the full path. + p := filepath.Join(path, subpath) + + // peek at the path + fi, err := os.Lstat(p) + if err != nil { + return err + } + + var nodeType NodeType + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + nodeType = TypeSymlink + } else if fi.IsDir() { + nodeType = TypeDirectory + } else if fi.Mode().IsRegular() { + nodeType = TypeRegular + } else { + return fmt.Errorf("unknown type for %v", p) + } + + if filter != nil && !filter(p, nodeType) { + return nil + } + + switch nodeType { + case TypeSymlink: + linkTarget, err := os.Readlink(p) + if err != nil { + return err + } + + // write the symlink node + err = nw.WriteHeader(&Header{ + Path: subpath, + Type: TypeSymlink, + LinkTarget: linkTarget, + }) + if err != nil { + return err + } + + return nil + + case TypeDirectory: + // write directory node + err := nw.WriteHeader(&Header{ + Path: subpath, + Type: TypeDirectory, + }) + if err != nil { + return err + } + + // look at the children + files, err := os.ReadDir(filepath.Join(path, subpath)) + if err != nil { + return err + } + + // loop over all elements + for _, file := range files { + err := dumpPath(nw, path, filepath.Join(subpath, file.Name()), filter) + if err != nil { + return err + } + } + + return nil + + case TypeRegular: + // write regular node + err := nw.WriteHeader(&Header{ + Path: subpath, + Type: TypeRegular, + Size: fi.Size(), + // If it's executable by the user, it'll become executable. + // This matches nix's dump() function behaviour. + Executable: fi.Mode()&syscall.S_IXUSR != 0, + }) + if err != nil { + return err + } + + // open the file + f, err := os.Open(p) + if err != nil { + return err + } + defer f.Close() + + // read in contents + n, err := io.Copy(nw, f) + if err != nil { + return err + } + + // check if read bytes matches fi.Size() + if n != fi.Size() { + return fmt.Errorf("read %v, expected %v bytes while reading %v", n, fi.Size(), p) + } + + return nil + } + + return fmt.Errorf("unknown type for file %v", p) +} diff --git a/pkg/nar/dump_nonwindows_test.go b/pkg/nar/dump_nonwindows_test.go new file mode 100644 index 0000000..42cac06 --- /dev/null +++ b/pkg/nar/dump_nonwindows_test.go @@ -0,0 +1,32 @@ +//go:build !windows +// +build !windows + +package nar_test + +import ( + "bytes" + "path/filepath" + "syscall" + "testing" + + "github.com/numtide/nar-serve/pkg/nar" + "github.com/stretchr/testify/assert" +) + +// TestDumpPathUnknown makes sure calling DumpPath on a path with a fifo +// doesn't panic, but returns an error. +func TestDumpPathUnknown(t *testing.T) { + tmpDir := t.TempDir() + p := filepath.Join(tmpDir, "a") + + err := syscall.Mkfifo(p, 0o644) + if err != nil { + panic(err) + } + + var buf bytes.Buffer + + err = nar.DumpPath(&buf, p) + assert.Error(t, err) + assert.Containsf(t, err.Error(), "unknown type", "error should complain about unknown type") +} diff --git a/pkg/nar/dump_test.go b/pkg/nar/dump_test.go new file mode 100644 index 0000000..a69d743 --- /dev/null +++ b/pkg/nar/dump_test.go @@ -0,0 +1,195 @@ +package nar_test + +import ( + "bytes" + "io" + "os" + "path/filepath" + "runtime" + "syscall" + "testing" + + "github.com/numtide/nar-serve/pkg/nar" + "github.com/stretchr/testify/assert" +) + +func TestDumpPathEmptyDir(t *testing.T) { + var buf bytes.Buffer + + err := nar.DumpPath(&buf, t.TempDir()) + if assert.NoError(t, err) { + assert.Equal(t, genEmptyDirectoryNar(), buf.Bytes()) + } +} + +func TestDumpPathOneByteRegular(t *testing.T) { + t.Run("non-executable", func(t *testing.T) { + tmpDir := t.TempDir() + p := filepath.Join(tmpDir, "a") + + err := os.WriteFile(p, []byte{0x1}, os.ModePerm&syscall.S_IRUSR) + if err != nil { + panic(err) + } + + var buf bytes.Buffer + + err = nar.DumpPath(&buf, p) + if assert.NoError(t, err) { + assert.Equal(t, genOneByteRegularNar(), buf.Bytes()) + } + }) + + t.Run("executable", func(t *testing.T) { + // This writes to the filesystem and looks at the attributes. + // As you can't represent the executable bit on windows, it would fail. + if runtime.GOOS == "windows" { + return + } + tmpDir := t.TempDir() + p := filepath.Join(tmpDir, "a") + + err := os.WriteFile(p, []byte{0x1}, os.ModePerm&(syscall.S_IRUSR|syscall.S_IXUSR)) + if err != nil { + panic(err) + } + + var buf bytes.Buffer + + // call dump path on it again + err = nar.DumpPath(&buf, p) + if assert.NoError(t, err) { + // We don't have a fixture with executable bit set, + // so pipe the nar into a reader and check the returned first header. + nr, err := nar.NewReader(&buf) + if err != nil { + panic(err) + } + + hdr, err := nr.Next() + if err != nil { + panic(err) + } + assert.True(t, hdr.Executable, "regular should be executable") + } + }) +} + +func TestDumpPathSymlink(t *testing.T) { + tmpDir := t.TempDir() + p := filepath.Join(tmpDir, "a") + + err := os.Symlink("/nix/store/somewhereelse", p) + if err != nil { + panic(err) + } + + var buf bytes.Buffer + + err = nar.DumpPath(&buf, p) + if assert.NoError(t, err) { + assert.Equal(t, genSymlinkNar(), buf.Bytes()) + } +} + +func TestDumpPathRecursion(t *testing.T) { + tmpDir := t.TempDir() + p := filepath.Join(tmpDir, "a") + + err := os.WriteFile(p, []byte{0x1}, os.ModePerm&syscall.S_IRUSR) + if err != nil { + panic(err) + } + + var buf bytes.Buffer + + err = nar.DumpPath(&buf, tmpDir) + if assert.NoError(t, err) { + // We don't have a fixture for the created path + // so pipe the nar into a reader and check the headers returned. + nr, err := nar.NewReader(&buf) + if err != nil { + panic(err) + } + + // read in first node + hdr, err := nr.Next() + assert.NoError(t, err) + assert.Equal(t, &nar.Header{ + Path: "/", + Type: nar.TypeDirectory, + }, hdr) + + // read in second node + hdr, err = nr.Next() + assert.NoError(t, err) + assert.Equal(t, &nar.Header{ + Path: "/a", + Type: nar.TypeRegular, + Size: 1, + }, hdr) + + // read in contents + contents, err := io.ReadAll(nr) + assert.NoError(t, err) + assert.Equal(t, []byte{0x1}, contents) + + // we should be done + _, err = nr.Next() + assert.Equal(t, io.EOF, err) + } +} + +func TestDumpPathFilter(t *testing.T) { + t.Run("unfiltered", func(t *testing.T) { + tmpDir := t.TempDir() + p := filepath.Join(tmpDir, "a") + + err := os.WriteFile(p, []byte{0x1}, os.ModePerm&syscall.S_IRUSR) + if err != nil { + panic(err) + } + + var buf bytes.Buffer + + err = nar.DumpPathFilter(&buf, p, func(name string, nodeType nar.NodeType) bool { + assert.Equal(t, name, p) + assert.Equal(t, nodeType, nar.TypeRegular) + + return true + }) + if assert.NoError(t, err) { + assert.Equal(t, genOneByteRegularNar(), buf.Bytes()) + } + }) + + t.Run("filtered", func(t *testing.T) { + tmpDir := t.TempDir() + p := filepath.Join(tmpDir, "a") + + err := os.WriteFile(p, []byte{0x1}, os.ModePerm&syscall.S_IRUSR) + if err != nil { + panic(err) + } + + var buf bytes.Buffer + + err = nar.DumpPathFilter(&buf, tmpDir, func(name string, nodeType nar.NodeType) bool { + return name != p + }) + if assert.NoError(t, err) { + assert.NotEqual(t, genOneByteRegularNar(), buf.Bytes()) + } + }) +} + +func BenchmarkDumpPath(b *testing.B) { + b.Run("testdata", func(b *testing.B) { + for i := 0; i < b.N; i++ { + err := nar.DumpPath(io.Discard, "../../test/testdata") + if err != nil { + panic(err) + } + } + }) +} diff --git a/pkg/nar/fixtures_test.go b/pkg/nar/fixtures_test.go new file mode 100644 index 0000000..3649de5 --- /dev/null +++ b/pkg/nar/fixtures_test.go @@ -0,0 +1,277 @@ +package nar_test + +import ( + "bytes" + + "github.com/numtide/nar-serve/pkg/wire" +) + +// genEmptyNar returns just the magic header, without any actual nodes +// this is no valid NAR file, as it needs to contain at least a root. +func genEmptyNar() []byte { + var expectedBuf bytes.Buffer + + err := wire.WriteString(&expectedBuf, "nix-archive-1") + if err != nil { + panic(err) + } + + return expectedBuf.Bytes() +} + +// genEmptyDirectoryNar returns the bytes of a NAR file only containing an empty directory. +func genEmptyDirectoryNar() []byte { + var expectedBuf bytes.Buffer + + err := wire.WriteString(&expectedBuf, "nix-archive-1") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "(") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "type") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "directory") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, ")") + if err != nil { + panic(err) + } + + return expectedBuf.Bytes() +} + +// genOneByteRegularNar returns the bytes of a NAR only containing a single file at the root. +func genOneByteRegularNar() []byte { + var expectedBuf bytes.Buffer + + err := wire.WriteString(&expectedBuf, "nix-archive-1") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "(") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "type") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "regular") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "contents") + if err != nil { + panic(err) + } + + err = wire.WriteBytes(&expectedBuf, []byte{0x1}) + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, ")") + if err != nil { + panic(err) + } + + return expectedBuf.Bytes() +} + +// genSymlinkNar returns the bytes of a NAR only containing a single symlink at the root. +func genSymlinkNar() []byte { + var expectedBuf bytes.Buffer + + err := wire.WriteString(&expectedBuf, "nix-archive-1") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "(") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "type") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "symlink") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "target") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "/nix/store/somewhereelse") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, ")") + if err != nil { + panic(err) + } + + return expectedBuf.Bytes() +} + +// genInvalidOrderNAR returns the bytes of a NAR file that contains a folder +// with a and b directories inside, but in the wrong order (b comes first). +func genInvalidOrderNAR() []byte { + var expectedBuf bytes.Buffer + + err := wire.WriteString(&expectedBuf, "nix-archive-1") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "(") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "type") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "directory") + if err != nil { + panic(err) + } + + // first entry begin + err = wire.WriteString(&expectedBuf, "entry") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "(") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "name") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "b") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "node") + if err != nil { + panic(err) + } + + // begin + err = wire.WriteString(&expectedBuf, "(") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "type") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "directory") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, ")") + if err != nil { + panic(err) + } + // end + + err = wire.WriteString(&expectedBuf, ")") + if err != nil { + panic(err) + } + // first entry end + + // second entry begin + err = wire.WriteString(&expectedBuf, "entry") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "(") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "name") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "a") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "node") + if err != nil { + panic(err) + } + + // begin + err = wire.WriteString(&expectedBuf, "(") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "type") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, "directory") + if err != nil { + panic(err) + } + + err = wire.WriteString(&expectedBuf, ")") + if err != nil { + panic(err) + } + // end + + err = wire.WriteString(&expectedBuf, ")") + if err != nil { + panic(err) + } + // second entry end + + err = wire.WriteString(&expectedBuf, ")") + if err != nil { + panic(err) + } + + return expectedBuf.Bytes() +} diff --git a/pkg/nar/header.go b/pkg/nar/header.go new file mode 100644 index 0000000..3659454 --- /dev/null +++ b/pkg/nar/header.go @@ -0,0 +1,79 @@ +package nar + +import ( + "fmt" + "io/fs" + "path/filepath" + "strings" + "time" +) + +// Header represents a single header in a NAR archive. Some fields may not +// be populated depending on the Type. +type Header struct { + Path string // Path of the file entry, relative inside the NAR + Type NodeType // Typeflag is the type of header entry. + LinkTarget string // Target of symlink (valid for TypeSymlink) + Size int64 // Logical file size in bytes + Executable bool // Set to true for files that are executable +} + +// Validate does some consistency checking of the header structure, such as +// checking for valid paths and inconsistent fields, and returns an error if it +// fails validation. +func (h *Header) Validate() error { + // Path needs to start with a /, and must not contain null bytes + // as we might get passed windows paths, ToSlash them first. + if p := filepath.ToSlash(h.Path); len(h.Path) < 1 || p[0:1] != "/" { + return fmt.Errorf("path must start with a /") + } + + if strings.ContainsAny(h.Path, "\u0000") { + return fmt.Errorf("path may not contain null bytes") + } + + // Regular files and directories may not have LinkTarget set. + if h.Type == TypeRegular || h.Type == TypeDirectory { + if h.LinkTarget != "" { + return fmt.Errorf("type is %v, but LinkTarget is not empty", h.Type.String()) + } + } + + // Directories and Symlinks may not have Size and Executable set. + if h.Type == TypeDirectory || h.Type == TypeSymlink { + if h.Size != 0 { + return fmt.Errorf("type is %v, but Size is not 0", h.Type.String()) + } + + if h.Executable { + return fmt.Errorf("type is %v, but Executable is true", h.Type.String()) + } + } + + // Symlinks need to specify a target. + if h.Type == TypeSymlink { + if h.LinkTarget == "" { + return fmt.Errorf("type is symlink, but LinkTarget is empty") + } + } + + return nil +} + +// FileInfo returns an fs.FileInfo for the Header. +func (h *Header) FileInfo() fs.FileInfo { + return headerFileInfo{h} +} + +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.h.Type == TypeDirectory } +func (fi headerFileInfo) ModTime() time.Time { return time.Unix(0, 0) } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name of the file. +// Will be an empty string, if this describes the root of a NAR. +func (fi headerFileInfo) Name() string { return fi.h.Path } diff --git a/pkg/nar/header_mode.go b/pkg/nar/header_mode.go new file mode 100644 index 0000000..8d97acb --- /dev/null +++ b/pkg/nar/header_mode.go @@ -0,0 +1,29 @@ +//go:build !windows +// +build !windows + +package nar + +import ( + "io/fs" + "syscall" +) + +func (fi headerFileInfo) Mode() fs.FileMode { + // everything in the nix store is readable by user, group and other. + var mode fs.FileMode + + switch fi.h.Type { + case TypeRegular: + mode = syscall.S_IRUSR | syscall.S_IRGRP | syscall.S_IROTH + if fi.h.Executable { + mode |= (syscall.S_IXUSR | syscall.S_IXGRP | syscall.S_IXOTH) + } + case TypeDirectory: + mode = syscall.S_IRUSR | syscall.S_IRGRP | syscall.S_IROTH + mode |= (syscall.S_IXUSR | syscall.S_IXGRP | syscall.S_IXOTH) + case TypeSymlink: + mode = fs.ModePerm | fs.ModeSymlink + } + + return mode +} diff --git a/pkg/nar/header_mode_windows.go b/pkg/nar/header_mode_windows.go new file mode 100644 index 0000000..669777c --- /dev/null +++ b/pkg/nar/header_mode_windows.go @@ -0,0 +1,25 @@ +package nar + +import ( + "io/fs" +) + +func (fi headerFileInfo) Mode() fs.FileMode { + // On Windows, create a very basic variant of Mode(). + // we use fs.FileMode and clear the 0200 bit. + // Per https://golang.org/pkg/os/#Chmod: + // “On Windows, only the 0200 bit (owner writable) of mode is used; it + // controls whether the file's read-only attribute is set or cleared.” + var mode fs.FileMode + + switch fi.h.Type { + case TypeRegular: + mode = fs.ModePerm + case TypeDirectory: + mode = fs.ModeDir + case TypeSymlink: + mode = fs.ModeSymlink + } + + return mode & ^fs.FileMode(0200) +} diff --git a/pkg/nar/header_test.go b/pkg/nar/header_test.go new file mode 100644 index 0000000..3893025 --- /dev/null +++ b/pkg/nar/header_test.go @@ -0,0 +1,72 @@ +package nar_test + +import ( + "testing" + + "github.com/numtide/nar-serve/pkg/nar" + "github.com/stretchr/testify/assert" +) + +func TestHeaderValidate(t *testing.T) { + headerRegular := &nar.Header{ + Path: "/foo/bar", + Type: nar.TypeRegular, + LinkTarget: "", + Size: 0, + Executable: false, + } + + t.Run("valid", func(t *testing.T) { + vHeader := *headerRegular + assert.NoError(t, vHeader.Validate()) + }) + + t.Run("invalid path", func(t *testing.T) { + invHeader := *headerRegular + invHeader.Path = "foo/bar" + assert.Error(t, invHeader.Validate()) + + invHeader.Path = "/foo/bar\000/" + assert.Error(t, invHeader.Validate()) + }) + + t.Run("LinkTarget set on regulars or directories", func(t *testing.T) { + invHeader := *headerRegular + invHeader.LinkTarget = "foo" + + assert.Error(t, invHeader.Validate()) + + invHeader.Type = nar.TypeDirectory + assert.Error(t, invHeader.Validate()) + }) + + t.Run("Size set on directories or symlinks", func(t *testing.T) { + invHeader := *headerRegular + invHeader.Type = nar.TypeDirectory + invHeader.Size = 1 + assert.Error(t, invHeader.Validate()) + + invHeader = *headerRegular + invHeader.Type = nar.TypeSymlink + invHeader.Size = 1 + assert.Error(t, invHeader.Validate()) + }) + + t.Run("Executable set on directories or symlinks", func(t *testing.T) { + invHeader := *headerRegular + invHeader.Type = nar.TypeDirectory + invHeader.Executable = true + assert.Error(t, invHeader.Validate()) + + invHeader = *headerRegular + invHeader.Type = nar.TypeSymlink + invHeader.Executable = true + assert.Error(t, invHeader.Validate()) + }) + + t.Run("No LinkTarget set on symlinks", func(t *testing.T) { + invHeader := *headerRegular + invHeader.Type = nar.TypeSymlink + assert.Error(t, invHeader.Validate()) + }) +} diff --git a/pkg/nar/ls/doc.go b/pkg/nar/ls/doc.go new file mode 100644 index 0000000..8bae554 --- /dev/null +++ b/pkg/nar/ls/doc.go @@ -0,0 +1,7 @@ +// Package ls implements a parser for the .ls file format, which provides an +// index into .nar files. + +// It is provided on cache.nixos.org, and more generally, written when +// write-nar-listing=1 is passed while copying build results into a binary +// cache. +package ls diff --git a/pkg/nar/ls/list.go b/pkg/nar/ls/list.go new file mode 100644 index 0000000..19fedd6 --- /dev/null +++ b/pkg/nar/ls/list.go @@ -0,0 +1,95 @@ +package ls + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/numtide/nar-serve/pkg/nar" +) + +// Root represents the .ls file root entry. +type Root struct { + Version int `json:"version"` + Root Node +} + +// Node represents one of the entries in a .ls file. +type Node struct { + Type nar.NodeType `json:"type"` + Entries map[string]*Node `json:"entries"` + Size int64 `json:"size"` + LinkTarget string `json:"target"` + Executable bool `json:"executable"` + NAROffset int64 `json:"narOffset"` +} + +// validateNode runs some consistency checks on a node and all its child +// entries. It returns an error on failure. +func validateNode(node *Node) error { + // ensure the name of each entry is valid + for k, v := range node.Entries { + if !nar.IsValidNodeName(k) { + return fmt.Errorf("invalid entry name: %v", k) + } + + // Regular files and directories may not have LinkTarget set. + if node.Type == nar.TypeRegular || node.Type == nar.TypeDirectory { + if node.LinkTarget != "" { + return fmt.Errorf("type is %v, but LinkTarget is not empty", node.Type.String()) + } + } + + // Directories and Symlinks may not have Size and Executable set. + if node.Type == nar.TypeDirectory || node.Type == nar.TypeSymlink { + if node.Size != 0 { + return fmt.Errorf("type is %v, but Size is not 0", node.Type.String()) + } + + if node.Executable { + return fmt.Errorf("type is %v, but Executable is true", node.Type.String()) + } + } + + // Symlinks need to specify a target. + if node.Type == nar.TypeSymlink { + if node.LinkTarget == "" { + return fmt.Errorf("type is symlink, but LinkTarget is empty") + } + } + + // verify children + err := validateNode(v) + if err != nil { + return err + } + } + + return nil +} + +// ParseLS parses the NAR .ls file format. +// It returns a tree-like structure for all the entries. +func ParseLS(r io.Reader) (*Root, error) { + root := Root{} + + dec := json.NewDecoder(r) + dec.DisallowUnknownFields() + + err := dec.Decode(&root) + if err != nil { + return nil, err + } + + if root.Version != 1 { + return nil, fmt.Errorf("invalid version %d", root.Version) + } + + // ensure the nodes are valid + err = validateNode(&root.Root) + if err != nil { + return nil, err + } + + return &root, err +} diff --git a/pkg/nar/ls/list_test.go b/pkg/nar/ls/list_test.go new file mode 100644 index 0000000..b34fc15 --- /dev/null +++ b/pkg/nar/ls/list_test.go @@ -0,0 +1,59 @@ +package ls_test + +import ( + "strings" + "testing" + + "github.com/numtide/nar-serve/pkg/nar" + "github.com/numtide/nar-serve/pkg/nar/ls" + "github.com/stretchr/testify/assert" +) + +const fixture = ` +{ + "version": 1, + "root": { + "type": "directory", + "entries": { + "bin": { + "type": "directory", + "entries": { + "curl": { + "type": "regular", + "size": 182520, + "executable": true, + "narOffset": 400 + } + } + } + } + } +} +` + +func TestLS(t *testing.T) { + r := strings.NewReader(fixture) + root, err := ls.ParseLS(r) + assert.NoError(t, err) + + expectedRoot := &ls.Root{ + Version: 1, + Root: ls.Node{ + Type: nar.TypeDirectory, + Entries: map[string]*ls.Node{ + "bin": { + Type: nar.TypeDirectory, + Entries: map[string]*ls.Node{ + "curl": { + Type: nar.TypeRegular, + Size: 182520, + Executable: true, + NAROffset: 400, + }, + }, + }, + }, + }, + } + assert.Equal(t, expectedRoot, root) +} diff --git a/pkg/nar/reader.go b/pkg/nar/reader.go new file mode 100644 index 0000000..096bf31 --- /dev/null +++ b/pkg/nar/reader.go @@ -0,0 +1,385 @@ +package nar + +import ( + "bytes" + "fmt" + "io" + "math" + "path" + + "github.com/numtide/nar-serve/pkg/wire" +) + +const ( + // for small tokens, + // we use this to limit how large an invalid token we'll read. + tokenLenMax = 32 + // maximum length for a single path element + // NAME_MAX is 255 on Linux. + nameLenMax = 255 + // maximum length for a relative path + // PATH_MAX is 4096 on Linux, but that includes a null byte. + pathLenMax = 4096 - 1 +) + +// Reader implements io.ReadCloser. +var _ io.ReadCloser = &Reader{} + +// Reader providers sequential access to the contents of a NAR archive. +// Reader.Next advances to the next file in the archive (including the first), +// and then Reader can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + contentReader io.ReadCloser + + // channels to communicate with the parser goroutine + + // channel used by the parser to communicate back headers and erorrs + headers chan *Header + errors chan error + + // whenever we once got back an error from the parser, we blow a fuse, + // store the error here, and Next() won't resume the parser anymore. + err error + + // NarReader uses this to resume the parser + next chan bool + + // keep a record of the previously received hdr.Path. + // Only read and updated in the Next() method, receiving from the channel + // populated by the goroutine, not the goroutine itself. + // We do this to bail out if we receive a header from the channel that's + // lexicographically smaller than the previous one. + // Elements in NAR files need to be ordered for reproducibility. + previousHdrPath string +} + +// NewReader creates a new Reader reading from r. +// It'll try to detect the magic header and will fail if it can't be read. +func NewReader(r io.Reader) (*Reader, error) { + err := expectString(r, narVersionMagic1) + if err != nil { + return nil, fmt.Errorf("invalid nar version magic: %w", err) + } + + narReader := &Reader{ + r: r, + // create a dummy reader for lm, that'll return EOF immediately, + // so reading from Reader before Next is called won't oops. + contentReader: io.NopCloser(io.LimitReader(bytes.NewReader([]byte{}), 0)), + + headers: make(chan *Header), + errors: make(chan error), + err: nil, + next: make(chan bool), + } + + // kick off the goroutine + go func() { + // wait for the first Next() call + next := <-narReader.next + // immediate Close(), without ever calling Next() + if !next { + return + } + + err := narReader.parseNode("/") + if err != nil { + narReader.errors <- err + } else { + narReader.errors <- io.EOF + } + + close(narReader.headers) + close(narReader.errors) + }() + + return narReader, nil +} + +func (nr *Reader) parseNode(p string) error { + // accept a opening ( + err := expectString(nr.r, "(") + if err != nil { + return err + } + + // accept a type + err = expectString(nr.r, "type") + if err != nil { + return err + } + + var currentToken string + + // switch on the type label + currentToken, err = wire.ReadString(nr.r, tokenLenMax) + if err != nil { + return err + } + + switch currentToken { + case "regular": + // we optionally see executable, marking the file as executable, + // and then contents, with the contents afterwards + currentToken, err = wire.ReadString(nr.r, uint64(len("executable"))) + if err != nil { + return err + } + + executable := false + if currentToken == "executable" { + executable = true + + // These seems to be 8 null bytes after the executable field, + // which can be seen as an empty string field. + _, err := wire.ReadBytesFull(nr.r, 0) + if err != nil { + return fmt.Errorf("error reading placeholder: %w", err) + } + + currentToken, err = wire.ReadString(nr.r, tokenLenMax) + if err != nil { + return err + } + } + + if currentToken != "contents" { + return fmt.Errorf("invalid token: %v, expected 'contents'", currentToken) + } + + // peek at the bytes field + contentLength, contentReader, err := wire.ReadBytes(nr.r) + if err != nil { + return err + } + + if contentLength > math.MaxInt64 { + return fmt.Errorf("content length of %v is larger than MaxInt64", contentLength) + } + + nr.contentReader = contentReader + + nr.headers <- &Header{ + Path: p, + Type: TypeRegular, + LinkTarget: "", + Size: int64(contentLength), + Executable: executable, + } + + // wait for the Next() call + next := <-nr.next + if !next { + return nil + } + + // seek to the end of the bytes field - the consumer might not have read all of it + err = nr.contentReader.Close() + if err != nil { + return err + } + + // consume the next token + currentToken, err = wire.ReadString(nr.r, tokenLenMax) + if err != nil { + return err + } + + case "symlink": + // accept the `target` keyword + err := expectString(nr.r, "target") + if err != nil { + return err + } + + // read in the target + target, err := wire.ReadString(nr.r, pathLenMax) + if err != nil { + return err + } + + // set nr.contentReader to a empty reader, we can't read from symlinks! + nr.contentReader = io.NopCloser(io.LimitReader(bytes.NewReader([]byte{}), 0)) + + // yield back the header + nr.headers <- &Header{ + Path: p, + Type: TypeSymlink, + LinkTarget: target, + Size: 0, + Executable: false, + } + + // wait for the Next() call + next := <-nr.next + if !next { + return nil + } + + // consume the next token + currentToken, err = wire.ReadString(nr.r, tokenLenMax) + if err != nil { + return err + } + + case "directory": + // set nr.contentReader to a empty reader, we can't read from directories! + nr.contentReader = io.NopCloser(io.LimitReader(bytes.NewReader([]byte{}), 0)) + nr.headers <- &Header{ + Path: p, + Type: TypeDirectory, + LinkTarget: "", + Size: 0, + Executable: false, + } + + // wait for the Next() call + next := <-nr.next + if !next { + return nil + } + + // there can be none, one or multiple `entry ( name foo node )` + + for { + // read the next token + currentToken, err = wire.ReadString(nr.r, tokenLenMax) + if err != nil { + return err + } + + if currentToken == "entry" { //nolint:nestif + // ( name foo node ) + err = expectString(nr.r, "(") + if err != nil { + return err + } + + err = expectString(nr.r, "name") + if err != nil { + return err + } + + currentToken, err = wire.ReadString(nr.r, nameLenMax) + if err != nil { + return err + } + + // ensure the name is valid + if !IsValidNodeName(currentToken) { + return fmt.Errorf("name `%v` is invalid", currentToken) + } + + newPath := path.Join(p, currentToken) + + err = expectString(nr.r, "node") + if err != nil { + return err + } + + // , recurse + err = nr.parseNode(newPath) + if err != nil { + return err + } + + err = expectString(nr.r, ")") + if err != nil { + return err + } + } + + if currentToken == ")" { + break + } + } + } + + if currentToken != ")" { + return fmt.Errorf("unexpected token: %v, expected `)`", currentToken) + } + + return nil +} + +// Next advances to the next entry in the NAR archive. The Header.Size +// determines how many bytes can be read for the next file. Any remaining data +// in the current file is automatically discarded. +// +// io.EOF is returned at the end of input. +// Errors are returned in case invalid data was read. +// This includes non-canonically sorted NAR files. +func (nr *Reader) Next() (*Header, error) { + // if there's an error already stored, keep returning it + if nr.err != nil { + return nil, nr.err + } + + // else, resume the parser + nr.next <- true + + // return either an error or headers + select { + case hdr := <-nr.headers: + if !PathIsLexicographicallyOrdered(nr.previousHdrPath, hdr.Path) { + err := fmt.Errorf("received header in the wrong order, %v <= %v", hdr.Path, nr.previousHdrPath) + + // blow fuse + nr.err = err + + return nil, err + } + + nr.previousHdrPath = hdr.Path + + return hdr, nil + + case err := <-nr.errors: + if err != nil { + // blow fuse + nr.err = err + } + + return nil, err + } +} + +// Read reads from the current file in the NAR archive. It returns (0, io.EOF) +// when it reaches the end of that file, until Next is called to advance to +// the next file. +// +// Calling Read on special types like TypeSymlink or TypeDir returns (0, +// io.EOF). +func (nr *Reader) Read(b []byte) (int, error) { + return nr.contentReader.Read(b) +} + +// Close does all internal cleanup. It doesn't close the underlying reader (which can be any io.Reader). +func (nr *Reader) Close() error { + if nr.err != io.EOF { + // Signal the parser there won't be any next. + close(nr.next) + } + + return nil +} + +// expectString reads a string field from a reader, expecting a certain result, +// and errors out if the reader ends unexpected, or didn't read the expected. +func expectString(r io.Reader, expected string) error { + s, err := wire.ReadString(r, uint64(len(expected))) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + + return err + } + + if s != expected { + return fmt.Errorf("expected '%v' got '%v'", expected, s) + } + + return nil +} diff --git a/pkg/nar/reader_test.go b/pkg/nar/reader_test.go new file mode 100644 index 0000000..bb0cdeb --- /dev/null +++ b/pkg/nar/reader_test.go @@ -0,0 +1,367 @@ +package nar_test + +import ( + "bytes" + "io" + "os" + "testing" + + "github.com/numtide/nar-serve/pkg/nar" + "github.com/stretchr/testify/assert" +) + +func TestReaderEmpty(t *testing.T) { + nr, err := nar.NewReader(bytes.NewBuffer(genEmptyNar())) + assert.NoError(t, err) + + hdr, err := nr.Next() + // first Next() should return an non-nil error that's != io.EOF, + // as an empty NAR is invalid. + assert.Error(t, err, "first Next() on an empty NAR should return an error") + assert.NotEqual(t, io.EOF, err, "first Next() on an empty NAR shouldn't return io.EOF") + assert.Nil(t, hdr, "returned header should be nil") + + assert.NotPanics(t, func() { + nr.Close() + }, "closing the reader shouldn't panic") +} + +func TestReaderEmptyDirectory(t *testing.T) { + nr, err := nar.NewReader(bytes.NewBuffer(genEmptyDirectoryNar())) + assert.NoError(t, err) + + // get first header + hdr, err := nr.Next() + assert.NoError(t, err) + assert.Equal(t, &nar.Header{ + Path: "/", + Type: nar.TypeDirectory, + }, hdr) + + hdr, err = nr.Next() + assert.Equal(t, io.EOF, err, "Next() should return io.EOF as error") + assert.Nil(t, hdr, "returned header should be nil") + + assert.NotPanics(t, func() { + nr.Close() + }, "closing the reader shouldn't panic") +} + +func TestReaderOneByteRegular(t *testing.T) { + nr, err := nar.NewReader(bytes.NewBuffer(genOneByteRegularNar())) + assert.NoError(t, err) + + // get first header + hdr, err := nr.Next() + assert.NoError(t, err) + assert.Equal(t, &nar.Header{ + Path: "/", + Type: nar.TypeRegular, + Size: 1, + Executable: false, + }, hdr) + + // read contents + contents, err := io.ReadAll(nr) + assert.NoError(t, err) + assert.Equal(t, []byte{0x1}, contents) + + hdr, err = nr.Next() + assert.Equal(t, io.EOF, err, "Next() should return io.EOF as error") + assert.Nil(t, hdr, "returned header should be nil") + + assert.NotPanics(t, func() { + nr.Close() + }, "closing the reader shouldn't panic") +} + +func TestReaderSymlink(t *testing.T) { + nr, err := nar.NewReader(bytes.NewBuffer(genSymlinkNar())) + assert.NoError(t, err) + + // get first header + hdr, err := nr.Next() + assert.NoError(t, err) + assert.Equal(t, &nar.Header{ + Path: "/", + Type: nar.TypeSymlink, + LinkTarget: "/nix/store/somewhereelse", + Size: 0, + Executable: false, + }, hdr) + + // read contents should only return an empty byte slice + contents, err := io.ReadAll(nr) + assert.NoError(t, err) + assert.Equal(t, []byte{}, contents) + + hdr, err = nr.Next() + assert.Equal(t, io.EOF, err, "Next() should return io.EOF as error") + assert.Nil(t, hdr, "returned header should be nil") + + assert.NotPanics(t, func() { + nr.Close() + }, "closing the reader shouldn't panic") +} + +// TODO: various early close cases + +func TestReaderInvalidOrder(t *testing.T) { + nr, err := nar.NewReader(bytes.NewBuffer(genInvalidOrderNAR())) + assert.NoError(t, err) + + // get first header (/) + hdr, err := nr.Next() + assert.NoError(t, err) + assert.Equal(t, &nar.Header{ + Path: "/", + Type: nar.TypeDirectory, + }, hdr) + + // get first element inside / (/b) + hdr, err = nr.Next() + assert.NoError(t, err) + assert.Equal(t, &nar.Header{ + Path: "/b", + Type: nar.TypeDirectory, + }, hdr) + + // get second element inside / (/a) should fail + _, err = nr.Next() + assert.Error(t, err) + assert.NotErrorIs(t, err, io.EOF, "should not be io.EOF") +} + +func TestReaderSmoketest(t *testing.T) { + f, err := os.Open("../../test/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar") + if !assert.NoError(t, err) { + return + } + + nr, err := nar.NewReader(f) + assert.NoError(t, err, "instantiating the NAR Reader shouldn't error") + + // check premature reading doesn't do any harm + n, err := nr.Read(make([]byte, 1000)) + assert.Equal(t, 0, n) + assert.Equal(t, io.EOF, err) + + headers := []nar.Header{ + {Type: nar.TypeDirectory, Path: "/"}, + {Type: nar.TypeDirectory, Path: "/bin"}, + { + Type: nar.TypeRegular, + Path: "/bin/arp", + Executable: true, + Size: 55288, + }, + { + Type: nar.TypeSymlink, + Path: "/bin/dnsdomainname", + LinkTarget: "hostname", + }, + { + Type: nar.TypeSymlink, + Path: "/bin/domainname", + LinkTarget: "hostname", + }, + { + Type: nar.TypeRegular, + Path: "/bin/hostname", + Executable: true, + Size: 17704, + }, + { + Type: nar.TypeRegular, + Path: "/bin/ifconfig", + Executable: true, + Size: 72576, + }, + { + Type: nar.TypeRegular, + Path: "/bin/nameif", + Executable: true, + Size: 18776, + }, + { + Type: nar.TypeRegular, + Path: "/bin/netstat", + Executable: true, + Size: 131784, + }, + { + Type: nar.TypeSymlink, + Path: "/bin/nisdomainname", + LinkTarget: "hostname", + }, + { + Type: nar.TypeRegular, + Path: "/bin/plipconfig", + Executable: true, + Size: 13160, + }, + { + Type: nar.TypeRegular, + Path: "/bin/rarp", + Executable: true, + Size: 30384, + }, + { + Type: nar.TypeRegular, + Path: "/bin/route", + Executable: true, + Size: 61928, + }, + { + Type: nar.TypeRegular, + Path: "/bin/slattach", + Executable: true, + Size: 35672, + }, + { + Type: nar.TypeSymlink, + Path: "/bin/ypdomainname", + LinkTarget: "hostname", + }, + { + Type: nar.TypeSymlink, + Path: "/sbin", + LinkTarget: "bin", + }, + { + Type: nar.TypeDirectory, + Path: "/share", + }, + { + Type: nar.TypeDirectory, + Path: "/share/man", + }, + { + Type: nar.TypeDirectory, + Path: "/share/man/man1", + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man1/dnsdomainname.1.gz", + Size: 40, + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man1/domainname.1.gz", + Size: 40, + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man1/hostname.1.gz", + Size: 1660, + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man1/nisdomainname.1.gz", + Size: 40, + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man1/ypdomainname.1.gz", + Size: 40, + }, + { + Type: nar.TypeDirectory, + Path: "/share/man/man5", + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man5/ethers.5.gz", + Size: 563, + }, + { + Type: nar.TypeDirectory, + Path: "/share/man/man8", + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man8/arp.8.gz", + Size: 2464, + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man8/ifconfig.8.gz", + Size: 3382, + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man8/nameif.8.gz", + Size: 523, + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man8/netstat.8.gz", + Size: 4284, + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man8/plipconfig.8.gz", + Size: 889, + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man8/rarp.8.gz", + Size: 1198, + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man8/route.8.gz", + Size: 3525, + }, + { + Type: nar.TypeRegular, + Path: "/share/man/man8/slattach.8.gz", + Size: 1441, + }, + } + + for i, expectH := range headers { + hdr, e := nr.Next() + if !assert.NoError(t, e, i) { + return + } + + // read one of the files + if hdr.Path == "/bin/arp" { + f, err := os.Open("../../test/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar_bin_arp") + assert.NoError(t, err) + + defer f.Close() + + expectedContents, err := io.ReadAll(f) + assert.NoError(t, err) + + actualContents, err := io.ReadAll(nr) + if assert.NoError(t, err) { + assert.Equal(t, expectedContents, actualContents) + } + } + + // ensure reading from symlinks or directories doesn't return any actual contents + // we pick examples that previously returned a regular file, so there might + // previously have been a reader pointing to something. + if hdr.Path == "/bin/dnsdomainname" || hdr.Path == "/share/man/man5" { + actualContents, err := io.ReadAll(nr) + if assert.NoError(t, err) { + assert.Equal(t, []byte{}, actualContents) + } + } + + assert.Equal(t, expectH, *hdr) + } + + hdr, err := nr.Next() + // expect to return io.EOF at the end, and no more headers + assert.Nil(t, hdr) + assert.Equal(t, io.EOF, err) + + assert.NoError(t, nr.Close(), nil, "closing the reader shouldn't error") + assert.NotPanics(t, func() { + _ = nr.Close() + }, "closing the reader multiple times shouldn't panic") +} diff --git a/pkg/nar/types.go b/pkg/nar/types.go new file mode 100644 index 0000000..e9687f8 --- /dev/null +++ b/pkg/nar/types.go @@ -0,0 +1,19 @@ +package nar + +const narVersionMagic1 = "nix-archive-1" + +// Enum of all the node types possible. +type NodeType string + +const ( + // TypeRegular represents a regular file. + TypeRegular = NodeType("regular") + // TypeDirectory represents a directory entry. + TypeDirectory = NodeType("directory") + // TypeSymlink represents a file symlink. + TypeSymlink = NodeType("symlink") +) + +func (t NodeType) String() string { + return string(t) +} diff --git a/pkg/nar/util.go b/pkg/nar/util.go new file mode 100644 index 0000000..814d976 --- /dev/null +++ b/pkg/nar/util.go @@ -0,0 +1,39 @@ +package nar + +import "strings" + +// IsValidNodeName checks the name of a node +// it may not contain null bytes or slashes. +func IsValidNodeName(nodeName string) bool { + return !strings.Contains(nodeName, "/") && !strings.ContainsAny(nodeName, "\u0000") +} + +// PathIsLexicographicallyOrdered checks if two paths are lexicographically ordered component by component. +func PathIsLexicographicallyOrdered(path1 string, path2 string) bool { + if path1 <= path2 { + return true + } + + // n is the lower number of characters of the two paths. + var n int + if len(path1) < len(path2) { + n = len(path1) + } else { + n = len(path2) + } + + for i := 0; i < n; i++ { + if path1[i] == path2[i] { + continue + } + + if path1[i] == '/' && path2[i] != '/' { + return true + } + + return path1[i] < path2[i] + } + + // Cover cases like where path1 is a prefix of path2 (path1=/arp-foo path2=/arp) + return len(path2) >= len(path1) +} diff --git a/pkg/nar/util_test.go b/pkg/nar/util_test.go new file mode 100644 index 0000000..94d3492 --- /dev/null +++ b/pkg/nar/util_test.go @@ -0,0 +1,61 @@ +package nar_test + +import ( + "fmt" + "testing" + + "github.com/numtide/nar-serve/pkg/nar" + "github.com/stretchr/testify/assert" +) + +// nolint:gochecknoglobals +var cases = []struct { + path1 string + path2 string + expected bool +}{ + { + path1: "/foo", + path2: "/foo", + expected: true, + }, + { + path1: "/fooa", + path2: "/foob", + expected: true, + }, + { + path1: "/foob", + path2: "/fooa", + expected: false, + }, + { + path1: "/cmd/structlayout/main.go", + path2: "/cmd/structlayout-optimize", + expected: true, + }, + { + path1: "/cmd/structlayout-optimize", + path2: "/cmd/structlayout-ao/main.go", + expected: false, + }, +} + +func TestLexicographicallyOrdered(t *testing.T) { + for i, testCase := range cases { + t.Run(fmt.Sprint(i), func(t *testing.T) { + result := nar.PathIsLexicographicallyOrdered(testCase.path1, testCase.path2) + assert.Equal(t, result, testCase.expected) + }) + } +} + +func BenchmarkLexicographicallyOrdered(b *testing.B) { + for i, testCase := range cases { + b.Run(fmt.Sprint(i), func(b *testing.B) { + for i := 0; i < b.N; i++ { + nar.PathIsLexicographicallyOrdered(testCase.path1, testCase.path2) + } + }) + } +} diff --git a/pkg/nar/writer.go b/pkg/nar/writer.go new file mode 100644 index 0000000..87dae49 --- /dev/null +++ b/pkg/nar/writer.go @@ -0,0 +1,337 @@ +package nar + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + + "github.com/numtide/nar-serve/pkg/wire" +) + +// Writer provides sequential writing of a NAR (Nix Archive) file. +// Writer.WriteHeader begins a new file with the provided Header, +// and then Writer can be treated as an io.Writer to supply that +// file's data. +type Writer struct { + w io.Writer + contentWriter io.WriteCloser + + // channels used by the goroutine to communicate back to WriteHeader and Close. + doneWritingHeader chan struct{} // goroutine is done writing that header, WriteHeader() can return. + errors chan error // there were errors while writing + + // whether we closed + closed bool + + // this is used to send new headers to write to the emitter + headers chan *Header +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) (*Writer, error) { + // write magic + err := wire.WriteString(w, narVersionMagic1) + if err != nil { + return nil, err + } + + narWriter := &Writer{ + w: w, + + doneWritingHeader: make(chan struct{}), + errors: make(chan error), + + closed: false, + + headers: make(chan *Header), + } + + // kick off the goroutine + go func() { + // wait for the first WriteHeader() call + header, ok := <-narWriter.headers + // immediate Close(), without ever calling WriteHeader() + // as an empty nar is invalid, we return an error + if !ok { + narWriter.errors <- fmt.Errorf("unexpected Close()") + close(narWriter.errors) + + return + } + + // ensure the first item received always has a "/" as path. + if header.Path != "/" { + narWriter.errors <- fmt.Errorf("first header always needs to have a / as path") + close(narWriter.errors) + + return + } + + excessHdr, err := narWriter.emitNode(header) + if err != nil { + narWriter.errors <- err + } + + if excessHdr != nil { + narWriter.errors <- fmt.Errorf("additional header detected: %+v", excessHdr) + } + + close(narWriter.errors) + }() + + return narWriter, nil +} + +// emitNode writes one NAR node. It'll internally consume one or more headers. +// in case the header received a header that's not inside its own jurisdiction, +// it'll return it, assuming an upper level will handle it. +func (nw *Writer) emitNode(currentHeader *Header) (*Header, error) { + // write a opening ( + err := wire.WriteString(nw.w, "(") + if err != nil { + return nil, err + } + + // write type + err = wire.WriteString(nw.w, "type") + if err != nil { + return nil, err + } + + // store the current type in a var, we access it more often later. + currentType := currentHeader.Type + + err = wire.WriteString(nw.w, currentType.String()) + if err != nil { + return nil, err + } + + if currentType == TypeRegular { //nolint:nestif + // if the executable bit is set… + if currentHeader.Executable { + // write the executable token. + err = wire.WriteString(nw.w, "executable") + if err != nil { + return nil, err + } + + // write the placeholder + err = wire.WriteBytes(nw.w, []byte{}) + if err != nil { + return nil, err + } + } + + // write the contents keyword + err = wire.WriteString(nw.w, "contents") + if err != nil { + return nil, err + } + + nw.contentWriter, err = wire.NewBytesWriter(nw.w, uint64(currentHeader.Size)) + if err != nil { + return nil, err + } + } + + // The directory case doesn't write anything special after ( type directory . + // We need to inspect the next header before figuring out whether to list entries or not. + if currentType == TypeSymlink || currentType == TypeDirectory { // nolint:nestif + if currentType == TypeSymlink { + // write the target keyword + err = wire.WriteString(nw.w, "target") + if err != nil { + return nil, err + } + + // write the target location. Make sure to convert slashes. + err = wire.WriteString(nw.w, filepath.ToSlash(currentHeader.LinkTarget)) + if err != nil { + return nil, err + } + } + + // setup a dummy content write, that's not connected to the main writer, + // and will fail if you write anything to it. + var b bytes.Buffer + + nw.contentWriter, err = wire.NewBytesWriter(&b, 0) + if err != nil { + return nil, err + } + } + + // return from WriteHeader() + nw.doneWritingHeader <- struct{}{} + + // wait till we receive a new header + nextHeader, ok := <-nw.headers + + // Close the content writer to finish the packet and write possible padding + // This is a no-op for symlinks and directories, as the contentWriter is limited to 0 bytes, + // and not connected to the main writer. + // The writer itself will already ensure we wrote the right amount of bytes + err = nw.contentWriter.Close() + if err != nil { + return nil, err + } + + // if this was the last header, write the closing ) and return + if !ok { + err = wire.WriteString(nw.w, ")") + if err != nil { + return nil, err + } + + return nil, err + } + + // This is a loop, as nextHeader can either be what we received above, + // or in the case of a directory, something returned when recursing up. + for { + // if this was the last header, write the closing ) and return + if nextHeader == nil { + err = wire.WriteString(nw.w, ")") + if err != nil { + return nil, err + } + + return nil, err + } + + // compare Path of the received header. + // It needs to be lexicographically greater the previous one. + if !PathIsLexicographicallyOrdered(currentHeader.Path, nextHeader.Path) { + return nil, fmt.Errorf( + "received %v, which isn't lexicographically greater than the previous one %v", + nextHeader.Path, + currentHeader.Path, + ) + } + + // calculate the relative path between the previous and now-read header, + // which will become the new node name. + nodeName, err := filepath.Rel(currentHeader.Path, nextHeader.Path) + if err != nil { + return nil, err + } + + // make sure we're using slashes + nodeName = filepath.ToSlash(nodeName) + + // if the received header is something further up, or a sibling, we're done here. + if len(nodeName) > 2 && (nodeName[0:2] == "..") { + // write the closing ) + err = wire.WriteString(nw.w, ")") + if err != nil { + return nil, err + } + + // bounce further work up to above + return nextHeader, nil + } + + // in other cases, it describes something below. + // This only works if we previously were in a directory. + if currentHeader.Type != TypeDirectory { + return nil, fmt.Errorf("received descending path %v, but we're a %v", nextHeader.Path, currentHeader.Type.String()) + } + + // ensure the name is valid. At this point, there should be no more slashes, + // as we already recursed up. + if !IsValidNodeName(nodeName) { + return nil, fmt.Errorf("name `%v` is invalid, as it contains a slash", nodeName) + } + + // write the entry keyword + err = wire.WriteString(nw.w, "entry") + if err != nil { + return nil, err + } + + // write a opening ( + err = wire.WriteString(nw.w, "(") + if err != nil { + return nil, err + } + + // write a opening name + err = wire.WriteString(nw.w, "name") + if err != nil { + return nil, err + } + + // write the node name + err = wire.WriteString(nw.w, nodeName) + if err != nil { + return nil, err + } + + // write the node keyword + err = wire.WriteString(nw.w, "node") + if err != nil { + return nil, err + } + + // Emit the node inside. It'll consume another node, which is what we'll + // handle in the next loop iteration. + nextHeader, err = nw.emitNode(nextHeader) + if err != nil { + return nil, err + } + + // write the closing ) (from entry) + err = wire.WriteString(nw.w, ")") + if err != nil { + return nil, err + } + } +} + +// WriteHeader writes hdr and prepares to accept the file's contents. The +// Header.Size determines how many bytes can be written for the next file. If +// the current file is not fully written, then this returns an error. This +// implicitly flushes any padding necessary before writing the header. +func (nw *Writer) WriteHeader(hdr *Header) error { + if err := hdr.Validate(); err != nil { + return fmt.Errorf("unable to write header: %w", err) + } + + nw.headers <- hdr + select { + case err := <-nw.errors: + return err + case <-nw.doneWritingHeader: + } + + return nil +} + +// Write writes to the current file in the NAR. +// Write returns the ErrWriteTooLong if more than Header.Size bytes +// are written after WriteHeader. +// +// Calling Write on special types like TypeLink, TypeSymlink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless of +// what the Header.Size claims. +func (nw *Writer) Write(b []byte) (int, error) { + return nw.contentWriter.Write(b) +} + +// Close closes the NAR file. +// If the current file (from a prior call to WriteHeader) is not fully +// written, then this returns an error. +func (nw *Writer) Close() error { + if nw.closed { + return fmt.Errorf("already closed") + } + + // signal the emitter this was the last one + close(nw.headers) + + nw.closed = true + + // wait for it to signal its done (by closing errors) + return <-nw.errors +} diff --git a/pkg/nar/writer_test.go b/pkg/nar/writer_test.go new file mode 100644 index 0000000..fa7eaa8 --- /dev/null +++ b/pkg/nar/writer_test.go @@ -0,0 +1,333 @@ +package nar_test + +import ( + "bytes" + "io" + "os" + "testing" + + "github.com/numtide/nar-serve/pkg/nar" + "github.com/stretchr/testify/assert" +) + +func TestWriterEmpty(t *testing.T) { + var buf bytes.Buffer + nw, err := nar.NewWriter(&buf) + assert.NoError(t, err) + + // calling close on an empty NAR is an error, as it'd be invalid. + assert.Error(t, nw.Close()) + + assert.NotPanics(t, func() { + nw.Close() + }, "closing a second time, after the first one failed shouldn't panic") +} + +func TestWriterEmptyDirectory(t *testing.T) { + var buf bytes.Buffer + nw, err := nar.NewWriter(&buf) + assert.NoError(t, err) + + hdr := &nar.Header{ + Path: "/", + Type: nar.TypeDirectory, + } + + err = nw.WriteHeader(hdr) + assert.NoError(t, err) + + err = nw.Close() + assert.NoError(t, err) + + assert.Equal(t, genEmptyDirectoryNar(), buf.Bytes()) + + assert.NotPanics(t, func() { + nw.Close() + }, "closing a second time shouldn't panic") +} + +// TestWriterOneByteRegular writes a NAR only containing a single file at the root. +func TestWriterOneByteRegular(t *testing.T) { + var buf bytes.Buffer + nw, err := nar.NewWriter(&buf) + assert.NoError(t, err) + + hdr := nar.Header{ + Path: "/", + Type: nar.TypeRegular, + Size: 1, + Executable: false, + } + + err = nw.WriteHeader(&hdr) + assert.NoError(t, err) + + num, err := nw.Write([]byte{1}) + assert.Equal(t, num, 1) + assert.NoError(t, err) + + err = nw.Close() + assert.NoError(t, err) + + assert.Equal(t, genOneByteRegularNar(), buf.Bytes()) +} + +// TestWriterSymlink writes a NAR only containing a symlink. +func TestWriterSymlink(t *testing.T) { + var buf bytes.Buffer + nw, err := nar.NewWriter(&buf) + assert.NoError(t, err) + + hdr := nar.Header{ + Path: "/", + Type: nar.TypeSymlink, + LinkTarget: "/nix/store/somewhereelse", + Size: 0, + Executable: false, + } + + err = nw.WriteHeader(&hdr) + assert.NoError(t, err) + + err = nw.Close() + assert.NoError(t, err) + + assert.Equal(t, genSymlinkNar(), buf.Bytes()) +} + +// TestWriterSmoketest reads in our example nar, feeds it to the NAR reader, +// and collects all headers and contents returned +// It'll then use this to drive the NAR writer, and will compare the output +// to be the same as originally read in. +func TestWriterSmoketest(t *testing.T) { + f, err := os.Open("../../test/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar") + if !assert.NoError(t, err) { + return + } + + // read in the NAR contents once + narContents, err := io.ReadAll(f) + assert.NoError(t, err) + + // pass them into a NAR reader + nr, err := nar.NewReader(bytes.NewReader(narContents)) + assert.NoError(t, err) + + headers := []*nar.Header{} + contents := [][]byte{} + + for { + hdr, err := nr.Next() + if err != nil { + if err == io.EOF { + break + } + + panic("unexpected error while reading in file") + } + + headers = append(headers, hdr) + + fileContents, err := io.ReadAll(nr) + assert.NoError(t, err) + + contents = append(contents, fileContents) + } + + assert.True(t, len(headers) == len(contents), "headers and contents should have the same size") + + // drive the nar writer + var buf bytes.Buffer + nw, err := nar.NewWriter(&buf) + assert.NoError(t, err) + + // Loop over all headers + for i, hdr := range headers { + // Write header + err := nw.WriteHeader(hdr) + assert.NoError(t, err) + + // Write contents. In the case of directories and symlinks, it should be fine to write empty bytes + n, err := io.Copy(nw, bytes.NewReader(contents[i])) + assert.NoError(t, err) + assert.Equal(t, int64(len(contents[i])), n) + } + + err = nw.Close() + assert.NoError(t, err) + // check the NAR writer produced the same contents than what we read in + assert.Equal(t, narContents, buf.Bytes()) +} + +func TestWriterErrorsTransitions(t *testing.T) { + t.Run("missing directory in between", func(t *testing.T) { + var buf bytes.Buffer + nw, err := nar.NewWriter(&buf) + assert.NoError(t, err) + + // write a directory node + err = nw.WriteHeader(&nar.Header{ + Path: "/", + Type: nar.TypeDirectory, + }) + assert.NoError(t, err) + + // write a symlink "a/foo", but missing the directory node "a" in between should error + err = nw.WriteHeader(&nar.Header{ + Path: "/a/foo", + Type: nar.TypeSymlink, + LinkTarget: "doesntmatter", + }) + assert.Error(t, err) + }) + + t.Run("missing directory at the beginning, writing another directory", func(t *testing.T) { + var buf bytes.Buffer + nw, err := nar.NewWriter(&buf) + assert.NoError(t, err) + + // write a directory node for "/a" without writing the one for "/" + err = nw.WriteHeader(&nar.Header{ + Path: "/a", + Type: nar.TypeDirectory, + }) + assert.Error(t, err) + }) + + t.Run("missing directory at the beginning, writing a symlink", func(t *testing.T) { + var buf bytes.Buffer + nw, err := nar.NewWriter(&buf) + assert.NoError(t, err) + + // write a symlink for "a" without writing the directory one for "" + err = nw.WriteHeader(&nar.Header{ + Path: "/a", + Type: nar.TypeSymlink, + LinkTarget: "foo", + }) + assert.Error(t, err) + }) + + t.Run("transition via a symlink, not directory", func(t *testing.T) { + var buf bytes.Buffer + nw, err := nar.NewWriter(&buf) + assert.NoError(t, err) + + // write a directory node + err = nw.WriteHeader(&nar.Header{ + Path: "/", + Type: nar.TypeDirectory, + }) + assert.NoError(t, err) + + // write a symlink node for "/a" + err = nw.WriteHeader(&nar.Header{ + Path: "/a", + Type: nar.TypeSymlink, + LinkTarget: "doesntmatter", + }) + assert.NoError(t, err) + + // write a symlink "/a/b", which should fail, as a was a symlink, not directory + err = nw.WriteHeader(&nar.Header{ + Path: "/a/b", + Type: nar.TypeSymlink, + LinkTarget: "doesntmatter", + }) + assert.Error(t, err) + }) + + t.Run("not lexicographically sorted", func(t *testing.T) { + var buf bytes.Buffer + nw, err := nar.NewWriter(&buf) + assert.NoError(t, err) + + // write a directory node + err = nw.WriteHeader(&nar.Header{ + Path: "/", + Type: nar.TypeDirectory, + }) + assert.NoError(t, err) + + // write a symlink for "/b" + err = nw.WriteHeader(&nar.Header{ + Path: "/b", + Type: nar.TypeSymlink, + LinkTarget: "foo", + }) + assert.NoError(t, err) + + // write a symlink for "/a" + err = nw.WriteHeader(&nar.Header{ + Path: "/a", + Type: nar.TypeSymlink, + LinkTarget: "foo", + }) + assert.Error(t, err) + }) + + t.Run("not lexicographically sorted, but the same", func(t *testing.T) { + var buf bytes.Buffer + nw, err := nar.NewWriter(&buf) + assert.NoError(t, err) + + // write a directory node + err = nw.WriteHeader(&nar.Header{ + Path: "/", + Type: nar.TypeDirectory, + }) + assert.NoError(t, err) + + // write a symlink for "/a" + err = nw.WriteHeader(&nar.Header{ + Path: "/a", + Type: nar.TypeSymlink, + LinkTarget: "foo", + }) + assert.NoError(t, err) + + // write a symlink for "/a" + err = nw.WriteHeader(&nar.Header{ + Path: "/a", + Type: nar.TypeSymlink, + LinkTarget: "foo", + }) + assert.Error(t, err) + }) + + t.Run("lexicographically sorted with nested directory and common prefix", func(t *testing.T) { + var buf bytes.Buffer + nw, err := nar.NewWriter(&buf) + assert.NoError(t, err) + + // write a directory node + err = nw.WriteHeader(&nar.Header{ + Path: "/", + Type: nar.TypeDirectory, + }) + assert.NoError(t, err) + + // write a directory node with name "/foo" + err = nw.WriteHeader(&nar.Header{ + Path: "/foo", + Type: nar.TypeDirectory, + }) + assert.NoError(t, err) + + // write a symlink for "/foo/b" + err = nw.WriteHeader(&nar.Header{ + Path: "/foo/b", + Type: nar.TypeSymlink, + LinkTarget: "foo", + }) + assert.NoError(t, err) + + // write a symlink for "/foo-a" + err = nw.WriteHeader(&nar.Header{ + Path: "/foo-a", + Type: nar.TypeSymlink, + LinkTarget: "foo", + }) + assert.NoError(t, err) + }) +} diff --git a/pkg/narinfo/check.go b/pkg/narinfo/check.go new file mode 100644 index 0000000..f577f7a --- /dev/null +++ b/pkg/narinfo/check.go @@ -0,0 +1,49 @@ +package narinfo + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/numtide/nar-serve/pkg/nixpath" +) + +// Check does some sanity checking on a NarInfo struct, such as: +// +// - ensuring the paths in StorePath, References and Deriver are syntactically valid +// (references and deriver first need to be made absolute) +// - when no compression is present, ensuring File{Hash,Size} and +// Nar{Hash,Size} are equal +func (n *NarInfo) Check() error { + _, err := nixpath.FromString(n.StorePath) + if err != nil { + return fmt.Errorf("invalid NixPath at StorePath: %v", n.StorePath) + } + + for i, r := range n.References { + referenceAbsolute := nixpath.Absolute(r) + _, err = nixpath.FromString(referenceAbsolute) + + if err != nil { + return fmt.Errorf("invalid NixPath at Reference[%d]: %v", i, r) + } + } + + deriverAbsolute := nixpath.Absolute(n.Deriver) + + _, err = nixpath.FromString(deriverAbsolute) + if err != nil { + return fmt.Errorf("invalid NixPath at Deriver: %v", n.Deriver) + } + + if n.Compression == "none" { + if n.FileSize != n.NarSize { + return fmt.Errorf("compression is none, FileSize/NarSize differs: %d, %d", n.FileSize, n.NarSize) + } + + if !cmp.Equal(n.FileHash, n.NarHash) { + return fmt.Errorf("compression is none, FileHash/NarHash differs: %v, %v", n.FileHash, n.NarHash) + } + } + + return nil +} diff --git a/pkg/narinfo/narinfo_test.go b/pkg/narinfo/narinfo_test.go new file mode 100644 index 0000000..e28b301 --- /dev/null +++ b/pkg/narinfo/narinfo_test.go @@ -0,0 +1,214 @@ +package narinfo_test + +import ( + "bytes" + "io" + "os" + "strings" + "testing" + + "github.com/numtide/nar-serve/pkg/narinfo" + "github.com/numtide/nar-serve/pkg/nixhash" + "github.com/stretchr/testify/assert" +) + +// nolint:gochecknoglobals +var ( + strNarinfoSample = ` +StorePath: /nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432 +URL: nar/1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar.xz +Compression: xz +FileHash: sha256:1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d +FileSize: 114980 +NarHash: sha256:0lxjvvpr59c2mdram7ympy5ay741f180kv3349hvfc3f8nrmbqf6 +NarSize: 464152 +References: 7gx4kiv5m0i7d7qkixq2cwzbr10lvxwc-glibc-2.27 +Deriver: 10dx1q4ivjb115y3h90mipaaz533nr0d-net-tools-1.60_p20170221182432.drv +Sig: cache.nixos.org-1:sn5s/RrqEI+YG6/PjwdbPjcAC7rcta7sJU4mFOawGvJBLsWkyLtBrT2EuFt/LJjWkTZ+ZWOI9NTtjo/woMdvAg== +Sig: hydra.other.net-1:JXQ3Z/PXf0EZSFkFioa4FbyYpbbTbHlFBtZf4VqU0tuMTWzhMD7p9Q7acJjLn3jofOtilAAwRILKIfVuyrbjAA== +` + strNarinfoSampleWithoutFileFields = ` +StorePath: /nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432 +URL: nar/1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar.xz +Compression: xz +NarHash: sha256:0lxjvvpr59c2mdram7ympy5ay741f180kv3349hvfc3f8nrmbqf6 +NarSize: 464152 +References: 7gx4kiv5m0i7d7qkixq2cwzbr10lvxwc-glibc-2.27 +Deriver: 10dx1q4ivjb115y3h90mipaaz533nr0d-net-tools-1.60_p20170221182432.drv +Sig: cache.nixos.org-1:sn5s/RrqEI+YG6/PjwdbPjcAC7rcta7sJU4mFOawGvJBLsWkyLtBrT2EuFt/LJjWkTZ+ZWOI9NTtjo/woMdvAg== +Sig: hydra.other.net-1:JXQ3Z/PXf0EZSFkFioa4FbyYpbbTbHlFBtZf4VqU0tuMTWzhMD7p9Q7acJjLn3jofOtilAAwRILKIfVuyrbjAA== +` + + strNarinfoSampleCachix = ` +StorePath: /nix/store/8nb4qdm1n2mpfcfr3hdaxw54fjdn4hqz-treefmt-docs-4b33ba3 +URL: nar/b136fa7b36b966d63a93f983ee03070e44bffe9ba9005cda59835e2a0f0f64b9.nar.zst +Compression: zstd +FileHash: sha256:b136fa7b36b966d63a93f983ee03070e44bffe9ba9005cda59835e2a0f0f64b9 +FileSize: 873969 +NarHash: sha256:1d3pp407iawzv79w453x5ff5fs0cscwzxm7572q85nijc56faxr8 +NarSize: 1794360 +References: +Deriver: 7nzyn0l9402ya02g6sac073c3733k0p7-treefmt-docs-4b33ba3.drv +Sig: numtide.cachix.org-1:YYcsiDnC0WR2utXGy1G6PqjDPH7TsvMrpaK4QJV1MHLks4N5XPA+Na0yzfOqBxqn9BB8NsTAqSu2B08SiIQmDA== +` + + _NarHash = nixhash.MustNewHashWithEncoding(nixhash.SHA256, []uint8{ + 0xc6, 0xe1, 0x55, 0xb3, 0x45, 0x6e, 0x30, 0xb7, 0x61, 0x22, 0x63, 0xec, 0x09, 0x50, 0x70, 0x81, + 0x1c, 0xaf, 0x8a, 0xbf, 0xd5, 0x9f, 0xaa, 0x72, 0xab, 0x82, 0xa5, 0x92, 0xef, 0xde, 0xb2, 0x53, + }, nixhash.NixBase32, true) + + _Signatures = []*narinfo.Signature{ + { + KeyName: "cache.nixos.org-1", + Digest: []byte{ + 0xb2, 0x7e, 0x6c, 0xfd, 0x1a, 0xea, 0x10, 0x8f, 0x98, 0x1b, 0xaf, 0xcf, 0x8f, 0x07, 0x5b, 0x3e, + 0x37, 0x00, 0x0b, 0xba, 0xdc, 0xb5, 0xae, 0xec, 0x25, 0x4e, 0x26, 0x14, 0xe6, 0xb0, 0x1a, 0xf2, + 0x41, 0x2e, 0xc5, 0xa4, 0xc8, 0xbb, 0x41, 0xad, 0x3d, 0x84, 0xb8, 0x5b, 0x7f, 0x2c, 0x98, 0xd6, + 0x91, 0x36, 0x7e, 0x65, 0x63, 0x88, 0xf4, 0xd4, 0xed, 0x8e, 0x8f, 0xf0, 0xa0, 0xc7, 0x6f, 0x02, + }, + }, + { + KeyName: "hydra.other.net-1", + Digest: []byte{ + 0x25, 0x74, 0x37, 0x67, 0xf3, 0xd7, 0x7f, 0x41, 0x19, 0x48, 0x59, 0x05, 0x8a, 0x86, 0xb8, 0x15, + 0xbc, 0x98, 0xa5, 0xb6, 0xd3, 0x6c, 0x79, 0x45, 0x06, 0xd6, 0x5f, 0xe1, 0x5a, 0x94, 0xd2, 0xdb, + 0x8c, 0x4d, 0x6c, 0xe1, 0x30, 0x3e, 0xe9, 0xf5, 0x0e, 0xda, 0x70, 0x98, 0xcb, 0x9f, 0x78, 0xe8, + 0x7c, 0xeb, 0x62, 0x94, 0x00, 0x30, 0x44, 0x82, 0xca, 0x21, 0xf5, 0x6e, 0xca, 0xb6, 0xe3, 0x00, + }, + }, + } + + narinfoSample = &narinfo.NarInfo{ + StorePath: "/nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432", + URL: "nar/1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar.xz", + Compression: "xz", + FileHash: nixhash.MustNewHashWithEncoding(nixhash.SHA256, []byte{ + 0xed, 0x34, 0xdc, 0x8f, 0x36, 0x04, 0x7d, 0x68, 0x6d, 0xc2, 0x96, 0xb7, 0xb2, 0xe3, 0xf4, 0x27, + 0x84, 0x88, 0xbe, 0x5b, 0x6a, 0x94, 0xa6, 0xf7, 0xa3, 0xdc, 0x92, 0x9f, 0xe0, 0xe5, 0x24, 0x81, + }, nixhash.NixBase32, true), + FileSize: 114980, + NarHash: _NarHash, + NarSize: 464152, + References: []string{"7gx4kiv5m0i7d7qkixq2cwzbr10lvxwc-glibc-2.27"}, + Deriver: "10dx1q4ivjb115y3h90mipaaz533nr0d-net-tools-1.60_p20170221182432.drv", + Signatures: _Signatures, + } + + narinfoSampleWithoutFileFields = &narinfo.NarInfo{ + StorePath: "/nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432", + URL: "nar/1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar.xz", + Compression: "xz", + NarHash: _NarHash, + NarSize: 464152, + References: []string{"7gx4kiv5m0i7d7qkixq2cwzbr10lvxwc-glibc-2.27"}, + Deriver: "10dx1q4ivjb115y3h90mipaaz533nr0d-net-tools-1.60_p20170221182432.drv", + Signatures: _Signatures, + } + + narinfoSampleCachix = &narinfo.NarInfo{ + StorePath: "/nix/store/8nb4qdm1n2mpfcfr3hdaxw54fjdn4hqz-treefmt-docs-4b33ba3", + URL: "nar/b136fa7b36b966d63a93f983ee03070e44bffe9ba9005cda59835e2a0f0f64b9.nar.zst", + Compression: "zstd", + NarHash: nixhash.MustNewHashWithEncoding(nixhash.SHA256, []byte{ + 0x28, 0x77, 0xe5, 0x4c, 0x61, 0x32, 0xda, 0x82, 0xb0, 0x38, 0xe5, 0xd4, 0xfe, 0x39, 0xd3, 0x0c, + 0x68, 0x57, 0x9c, 0x2b, 0x7d, 0x14, 0xc2, 0xd3, 0xd9, 0x9f, 0xab, 0x78, 0x00, 0xb9, 0x77, 0xb4, + }, nixhash.NixBase32, true), + NarSize: 1794360, + FileHash: nixhash.MustNewHashWithEncoding(nixhash.SHA256, []byte{ + 0xb1, 0x36, 0xfa, 0x7b, 0x36, 0xb9, 0x66, 0xd6, 0x3a, 0x93, 0xf9, 0x83, 0xee, 0x03, 0x07, 0x0e, + 0x44, 0xbf, 0xfe, 0x9b, 0xa9, 0x00, 0x5c, 0xda, 0x59, 0x83, 0x5e, 0x2a, 0x0f, 0x0f, 0x64, 0xb9, + }, nixhash.Base16, true), + FileSize: 873969, + Deriver: "7nzyn0l9402ya02g6sac073c3733k0p7-treefmt-docs-4b33ba3.drv", + Signatures: []*narinfo.Signature{ + { + KeyName: "numtide.cachix.org-1", + Digest: []byte{ + 0x61, 0x87, 0x2c, 0x88, 0x39, 0xc2, 0xd1, 0x64, 0x76, 0xba, 0xd5, 0xc6, 0xcb, 0x51, 0xba, 0x3e, + 0xa8, 0xc3, 0x3c, 0x7e, 0xd3, 0xb2, 0xf3, 0x2b, 0xa5, 0xa2, 0xb8, 0x40, 0x95, 0x75, 0x30, 0x72, + 0xe4, 0xb3, 0x83, 0x79, 0x5c, 0xf0, 0x3e, 0x35, 0xad, 0x32, 0xcd, 0xf3, 0xaa, 0x07, 0x1a, 0xa7, + 0xf4, 0x10, 0x7c, 0x36, 0xc4, 0xc0, 0xa9, 0x2b, 0xb6, 0x07, 0x4f, 0x12, 0x88, 0x84, 0x26, 0x0c, + }, + }, + }, + } +) + +func TestNarInfo(t *testing.T) { + ni, err := narinfo.Parse(strings.NewReader(strNarinfoSample)) + assert.NoError(t, err) + + // Test the parsing happy path + assert.Equal(t, narinfoSample, ni) + assert.NoError(t, ni.Check()) + + // Test to string + assert.Equal(t, strNarinfoSample, "\n"+ni.String()) +} + +func TestNarInfoWithoutFileFields(t *testing.T) { + ni, err := narinfo.Parse(strings.NewReader(strNarinfoSampleWithoutFileFields)) + assert.NoError(t, err) + + // Test the parsing happy path + assert.Equal(t, narinfoSampleWithoutFileFields, ni) + assert.NoError(t, ni.Check()) + + // Test to string + assert.Equal(t, strNarinfoSampleWithoutFileFields, "\n"+ni.String()) +} + +func TestNarInfoCachix(t *testing.T) { + ni, err := narinfo.Parse(strings.NewReader(strNarinfoSampleCachix)) + assert.NoError(t, err) + + // Test the parsing happy path + assert.Equal(t, narinfoSampleCachix, ni) + assert.NoError(t, ni.Check()) + + // Test to string + assert.Equal(t, strNarinfoSampleCachix, "\n"+ni.String()) +} + +func TestBigNarinfo(t *testing.T) { + f, err := os.Open("../../../test/testdata/big.narinfo") + if err != nil { + panic(err) + } + defer f.Close() + + _, err = narinfo.Parse(f) + assert.NoError(t, err, "Parsing big .narinfo files shouldn't fail") +} + +func BenchmarkNarInfo(b *testing.B) { + b.Run("Regular", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := narinfo.Parse(strings.NewReader(strNarinfoSample)) + assert.NoError(b, err) + } + }) + + { + f, err := os.Open("../../../test/testdata/big.narinfo") + if err != nil { + panic(err) + } + defer f.Close() + + var buf bytes.Buffer + _, err = io.ReadAll(&buf) + if err != nil { + panic(err) + } + + big := buf.Bytes() + + b.Run("Big", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := narinfo.Parse(bytes.NewReader(big)) + assert.NoError(b, err) + } + }) + } +} diff --git a/pkg/narinfo/parser.go b/pkg/narinfo/parser.go new file mode 100644 index 0000000..9f458c4 --- /dev/null +++ b/pkg/narinfo/parser.go @@ -0,0 +1,119 @@ +package narinfo + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + + "github.com/numtide/nar-serve/pkg/nixhash" +) + +// Parse reads a .narinfo file content +// and returns a NarInfo struct with the parsed data. +func Parse(r io.Reader) (*NarInfo, error) { + narInfo := &NarInfo{} + scanner := bufio.NewScanner(r) + + // Increase the buffer size. + // Some .narinfo files have a lot of entries in References, + // and bufio.Scanner will error bufio.ErrTooLong otherwise. + const maxCapacity = 1048576 + buf := make([]byte, maxCapacity) + scanner.Buffer(buf, maxCapacity) + + for scanner.Scan() { + var err error + + line := scanner.Text() + // skip empty lines (like, an empty line at EOF) + if line == "" { + continue + } + + k, v, err := splitOnce(line, ": ") + if err != nil { + return nil, err + } + + switch k { + case "StorePath": + narInfo.StorePath = v + case "URL": + narInfo.URL = v + case "Compression": + narInfo.Compression = v + case "FileHash": + narInfo.FileHash, err = nixhash.ParseAny(v, nil) + if err != nil { + return nil, err + } + case "FileSize": + narInfo.FileSize, err = strconv.ParseUint(v, 10, 0) + if err != nil { + return nil, err + } + case "NarHash": + narInfo.NarHash, err = nixhash.ParseAny(v, nil) + if err != nil { + return nil, err + } + case "NarSize": + narInfo.NarSize, err = strconv.ParseUint(v, 10, 0) + if err != nil { + return nil, err + } + case "References": + if v == "" { + continue + } + + narInfo.References = append(narInfo.References, strings.Split(v, " ")...) + case "Deriver": + narInfo.Deriver = v + case "System": + narInfo.System = v + case "Sig": + signature, e := ParseSignatureLine(v) + if e != nil { + return nil, fmt.Errorf("unable to parse signature line %v: %v", v, err) + } + + narInfo.Signatures = append(narInfo.Signatures, signature) + case "CA": + narInfo.CA = v + default: + return nil, fmt.Errorf("unknown key %v", k) + } + + if err != nil { + return nil, fmt.Errorf("unable to parse line %v", line) + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + // An empty/non-existrent compression field is considered to mean bzip2 + if narInfo.Compression == "" { + narInfo.Compression = "bzip2" + } + + return narInfo, nil +} + +// splitOnce - Split a string and make sure it's only splittable once. +func splitOnce(s string, sep string) (string, string, error) { + idx := strings.Index(s, sep) + if idx == -1 { + return "", "", fmt.Errorf("unable to find separator '%s' in %v", sep, s) + } + + if strings.Contains(s[:idx], sep) { + return "", "", fmt.Errorf("found separator '%s' twice or more in %v", sep, s) + } + + return s[0:idx], s[idx+len(sep):], nil +} diff --git a/pkg/narinfo/signature.go b/pkg/narinfo/signature.go new file mode 100644 index 0000000..7791144 --- /dev/null +++ b/pkg/narinfo/signature.go @@ -0,0 +1,53 @@ +package narinfo + +import ( + "crypto/ed25519" + "encoding/base64" + "fmt" +) + +// Signature is used to sign a NarInfo (parts of it, to be precise). +type Signature struct { + KeyName string // An identifier for the key that's used for the signature + + Digest []byte // The digest itself, in bytes +} + +// ParseSignatureLine parses a signature line and returns a Signature struct, or error. +func ParseSignatureLine(signatureLine string) (*Signature, error) { + field0, field1, err := splitOnce(signatureLine, ":") + if err != nil { + return nil, err + } + + var sig [ed25519.SignatureSize]byte + + n, err := base64.StdEncoding.Decode(sig[:], []byte(field1)) + if err != nil { + return nil, fmt.Errorf("unable to decode base64: %v", field1) + } + + if n != len(sig) { + return nil, fmt.Errorf("invalid signature size: %d", n) + } + + return &Signature{ + KeyName: field0, + Digest: sig[:], + }, nil +} + +// MustParseSignatureLine parses a signature line and returns a Signature struct, or panics on error. +func MustParseSignatureLine(signatureLine string) *Signature { + s, err := ParseSignatureLine(signatureLine) + if err != nil { + panic(err) + } + + return s +} + +// String returns the string representation of a signature, which is `KeyName:base`. +func (s *Signature) String() string { + return s.KeyName + ":" + base64.StdEncoding.EncodeToString(s.Digest) +} diff --git a/pkg/narinfo/signature_test.go b/pkg/narinfo/signature_test.go new file mode 100644 index 0000000..93eda22 --- /dev/null +++ b/pkg/narinfo/signature_test.go @@ -0,0 +1,48 @@ +package narinfo_test + +import ( + "testing" + + "github.com/numtide/nar-serve/pkg/narinfo" + "github.com/stretchr/testify/assert" +) + +const ( + dummySigLine = "cache.nixos.org-1" + + ":" + "rH4wxlNRbTbViQon40C15og5zlcFEphwoF26IQGHi2QCwVYyaLj6LOag+MeWcZ65SWzy6PnOlXjriLNcxE0hAQ==" + expectedKeyName = "cache.nixos.org-1" +) + +// nolint:gochecknoglobals +var ( + expectedDigest = []byte{ + 0xac, 0x7e, 0x30, 0xc6, 0x53, 0x51, 0x6d, 0x36, 0xd5, 0x89, 0x0a, 0x27, 0xe3, 0x40, 0xb5, 0xe6, + 0x88, 0x39, 0xce, 0x57, 0x05, 0x12, 0x98, 0x70, 0xa0, 0x5d, 0xba, 0x21, 0x01, 0x87, 0x8b, 0x64, + 0x02, 0xc1, 0x56, 0x32, 0x68, 0xb8, 0xfa, 0x2c, 0xe6, 0xa0, 0xf8, 0xc7, 0x96, 0x71, 0x9e, 0xb9, + 0x49, 0x6c, 0xf2, 0xe8, 0xf9, 0xce, 0x95, 0x78, 0xeb, 0x88, 0xb3, 0x5c, 0xc4, 0x4d, 0x21, 0x01, + } +) + +func TestParseSignatureLine(t *testing.T) { + signature, err := narinfo.ParseSignatureLine(dummySigLine) + if assert.NoError(t, err) { + assert.Equal(t, expectedKeyName, signature.KeyName) + assert.Equal(t, expectedDigest, signature.Digest) + } +} + +func TestMustParseSignatureLine(t *testing.T) { + signature := narinfo.MustParseSignatureLine(dummySigLine) + assert.Equal(t, expectedKeyName, signature.KeyName) + assert.Equal(t, expectedDigest, signature.Digest) + + assert.Panics(t, func() { + _ = narinfo.MustParseSignatureLine(expectedKeyName) + }) +} + +func BenchmarkParseSignatureLine(b *testing.B) { + for i := 0; i < b.N; i++ { + narinfo.MustParseSignatureLine(dummySigLine) + } +} diff --git a/pkg/narinfo/types.go b/pkg/narinfo/types.go new file mode 100644 index 0000000..4f09626 --- /dev/null +++ b/pkg/narinfo/types.go @@ -0,0 +1,94 @@ +package narinfo + +import ( + "bytes" + "fmt" + + "github.com/numtide/nar-serve/pkg/nixhash" +) + +// NarInfo represents a parsed .narinfo file. +type NarInfo struct { + StorePath string // The full nix store path (/nix/store/…-pname-version) + + URL string // The relative location to the .nar[.xz,…] file. Usually nar/$fileHash.nar[.xz] + Compression string // The compression method file at URL is compressed with (none,xz,…) + + FileHash *nixhash.HashWithEncoding // The hash of the file at URL + FileSize uint64 // The size of the file at URL, in bytes + + // The hash of the .nar file, after possible decompression + // Identical to FileHash if no compression is used. + NarHash *nixhash.HashWithEncoding + // The size of the .nar file, after possible decompression, in bytes. + // Identical to FileSize if no compression is used. + NarSize uint64 + + // References to other store paths, contained in the .nar file + References []string + + // Path of the .drv for this store path + Deriver string + + // This doesn't seem to be used at all? + System string + + // Signatures, if any. + Signatures []*Signature + + // TODO: Figure out the meaning of this + CA string +} + +func (n *NarInfo) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "StorePath: %v\n", n.StorePath) + fmt.Fprintf(&buf, "URL: %v\n", n.URL) + fmt.Fprintf(&buf, "Compression: %v\n", n.Compression) + + if n.FileHash != nil && n.FileSize != 0 { + fmt.Fprintf(&buf, "FileHash: %s\n", n.FileHash.String()) + fmt.Fprintf(&buf, "FileSize: %d\n", n.FileSize) + } + + fmt.Fprintf(&buf, "NarHash: %s\n", n.NarHash.String()) + + fmt.Fprintf(&buf, "NarSize: %d\n", n.NarSize) + + buf.WriteString("References:") + + if len(n.References) == 0 { + buf.WriteByte(' ') + } else { + for _, r := range n.References { + buf.WriteByte(' ') + buf.WriteString(r) + } + } + + buf.WriteByte('\n') + + if n.Deriver != "" { + fmt.Fprintf(&buf, "Deriver: %v\n", n.Deriver) + } + + if n.System != "" { + fmt.Fprintf(&buf, "System: %v\n", n.System) + } + + for _, s := range n.Signatures { + fmt.Fprintf(&buf, "Sig: %v\n", s) + } + + if n.CA != "" { + fmt.Fprintf(&buf, "CA: %v\n", n.CA) + } + + return buf.String() +} + +// ContentType returns the mime content type of the object. +func (n NarInfo) ContentType() string { + return "text/x-nix-narinfo" +} diff --git a/pkg/nixbase32/doc.go b/pkg/nixbase32/doc.go new file mode 100644 index 0000000..1eb8bb4 --- /dev/null +++ b/pkg/nixbase32/doc.go @@ -0,0 +1,10 @@ +// Package nixbase32 implements the slightly odd "base32" encoding that's used +// in Nix. + +// Nix uses a custom alphabet. Contrary to other implementations (RFC4648), +// encoding to "nix base32" also reads in characters in reverse order (and +// doesn't use any padding), which makes adopting encoding/base32 hard. +// This package provides some of the functions defined in +// encoding/base32.Encoding. + +package nixbase32 diff --git a/pkg/nixbase32/nixbase32.go b/pkg/nixbase32/nixbase32.go new file mode 100644 index 0000000..4ae990b --- /dev/null +++ b/pkg/nixbase32/nixbase32.go @@ -0,0 +1,123 @@ +package nixbase32 + +import ( + "fmt" + "strings" +) + +// Alphabet contains the list of valid characters for the Nix base32 alphabet. +const Alphabet = "0123456789abcdfghijklmnpqrsvwxyz" + +func decodeString(s string, dst []byte) error { + var dstLen int + if dst != nil { + dstLen = len(dst) + } else { + dstLen = DecodedLen(len(s)) + } + + for n := 0; n < len(s); n++ { + c := s[len(s)-n-1] + + digit := strings.IndexByte(Alphabet, c) + if digit == -1 { + return fmt.Errorf("character %v not in alphabet", c) + } + + b := uint(n * 5) + i := b / 8 + j := b % 8 + + // OR the main pattern + if dst != nil { + dst[i] |= byte(digit) << j + } + + // calculate the "carry pattern" + carry := byte(digit) >> (8 - j) + + // if we're at the end of dst… + if i == uint(dstLen-1) { + // but have a nonzero carry, the encoding is invalid. + if carry != 0 { + return fmt.Errorf("invalid encoding") + } + } else if dst != nil { + dst[i+1] |= carry + } + } + + return nil +} + +// ValidateBytes validates if a byte slice is valid nixbase32. +func ValidateBytes(b []byte) error { + return ValidateString(string(b)) +} + +// ValidateString validates if a string is valid nixbase32. +func ValidateString(s string) error { + return decodeString(s, nil) +} + +// EncodedLen returns the length in bytes of the base32 encoding of an input +// buffer of length n. +func EncodedLen(n int) int { + if n == 0 { + return 0 + } + + return (n*8-1)/5 + 1 +} + +// DecodedLen returns the length in bytes of the decoded data +// corresponding to n bytes of base32-encoded data. +// If we have bits that don't fit into here, they are padding and must +// be 0. +func DecodedLen(n int) int { + return (n * 5) / 8 +} + +// EncodeToString returns the nixbase32 encoding of src. +func EncodeToString(src []byte) string { + l := EncodedLen(len(src)) + + var dst strings.Builder + + dst.Grow(l) + + for n := l - 1; n >= 0; n-- { + b := uint(n * 5) + i := b / 8 + j := b % 8 + + c := src[i] >> j + + if i+1 < uint(len(src)) { + c |= src[i+1] << (8 - j) + } + + dst.WriteByte(Alphabet[c&0x1f]) + } + + return dst.String() +} + +// DecodeString returns the bytes represented by the nixbase32 string s or +// returns an error. +func DecodeString(s string) ([]byte, error) { + dst := make([]byte, DecodedLen(len(s))) + + return dst, decodeString(s, dst) +} + +// MustDecodeString returns the bytes represented by the nixbase32 string s or +// panics on error. +func MustDecodeString(s string) []byte { + b, err := DecodeString(s) + if err != nil { + panic(err) + } + + return b +} diff --git a/pkg/nixbase32/nixbase32_test.go b/pkg/nixbase32/nixbase32_test.go new file mode 100644 index 0000000..8d37d51 --- /dev/null +++ b/pkg/nixbase32/nixbase32_test.go @@ -0,0 +1,115 @@ +package nixbase32_test + +import ( + "math/rand" + "strconv" + "testing" + + "github.com/numtide/nar-serve/pkg/nixbase32" + "github.com/stretchr/testify/assert" +) + +// nolint:gochecknoglobals +var tt = []struct { + dec []byte + enc string +}{ + {[]byte{}, ""}, + {[]byte{0x1f}, "0z"}, + { + []byte{ + 0xd8, 0x6b, 0x33, 0x92, 0xc1, 0x20, 0x2e, 0x8f, + 0xf5, 0xa4, 0x23, 0xb3, 0x02, 0xe6, 0x28, 0x4d, + 0xb7, 0xf8, 0xf4, 0x35, 0xea, 0x9f, 0x39, 0xb5, + 0xb1, 0xb2, 0x0f, 0xd3, 0xac, 0x36, 0xdf, 0xcb, + }, + "1jyz6snd63xjn6skk7za6psgidsd53k05cr3lksqybi0q6936syq", + }, +} + +func TestEncode(t *testing.T) { + for i := range tt { + assert.Equal(t, tt[i].enc, nixbase32.EncodeToString(tt[i].dec)) + } +} + +func TestDecode(t *testing.T) { + for i := range tt { + b, err := nixbase32.DecodeString(tt[i].enc) + + if assert.NoError(t, err) { + assert.Equal(t, tt[i].dec, b) + } + } +} + +func TestValidate(t *testing.T) { + for i := range tt { + err := nixbase32.ValidateString(tt[i].enc) + + assert.NoError(t, err) + } +} + +func TestMustDecodeString(t *testing.T) { + for i := range tt { + b := nixbase32.MustDecodeString(tt[i].enc) + assert.Equal(t, tt[i].dec, b) + } +} + +func TestDecodeInvalid(t *testing.T) { + invalidEncodings := []string{ + // this is invalid encoding, because it encodes 10 1-bytes, so the carry + // would be 2 1-bytes + "zz", + // this is an even more specific example - it'd decode as 00000000 11 + "c0", + } + + for _, c := range invalidEncodings { + _, err := nixbase32.DecodeString(c) + assert.Error(t, err) + + err = nixbase32.ValidateString(c) + assert.Error(t, err) + + assert.Panics(t, func() { + _ = nixbase32.MustDecodeString(c) + }) + } +} + +func BenchmarkEncode(b *testing.B) { + sizes := []int{32, 64, 128} + + for _, s := range sizes { + bytes := make([]byte, s) + rand.Read(bytes) // nolint:gosec + + b.Run(strconv.Itoa(s), func(b *testing.B) { + for i := 0; i < b.N; i++ { + nixbase32.EncodeToString(bytes) + } + }) + } +} + +func BenchmarkDecode(b *testing.B) { + sizes := []int{32, 64, 128} + + for _, s := range sizes { + bytes := make([]byte, s) + rand.Read(bytes) // nolint:gosec + input := nixbase32.EncodeToString(bytes) + + b.Run(strconv.Itoa(s), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := nixbase32.DecodeString(input) + if err != nil { + b.Fatal("error: %w", err) + } + } + }) + } +} diff --git a/pkg/nixhash/algo.go b/pkg/nixhash/algo.go new file mode 100644 index 0000000..cde6405 --- /dev/null +++ b/pkg/nixhash/algo.go @@ -0,0 +1,67 @@ +package nixhash + +import ( + "crypto" + "fmt" +) + +// Algorithm represent the hashing algorithm used to digest the data. +type Algorithm uint8 + +const ( + _ = iota + + // All the algorithms that Nix understands + MD5 = Algorithm(iota) + SHA1 = Algorithm(iota) + SHA256 = Algorithm(iota) + SHA512 = Algorithm(iota) +) + +func ParseAlgorithm(s string) (Algorithm, error) { + switch s { + case "md5": + return MD5, nil + case "sha1": + return SHA1, nil + case "sha256": + return SHA256, nil + case "sha512": + return SHA512, nil + default: + return 0, fmt.Errorf("unknown algorithm: %s", s) + } +} + +func (a Algorithm) String() string { + switch a { + case MD5: + return "md5" + case SHA1: + return "sha1" + case SHA256: + return "sha256" + case SHA512: + return "sha512" + default: + panic(fmt.Sprintf("bug: unknown algorithm %d", a)) + } +} + +// Func returns the cryptographic hash function for the Algorithm (implementing crypto.Hash) +// It panics when encountering an invalid Algorithm, as these can only occur by +// manually filling the struct. +func (a Algorithm) Func() crypto.Hash { + switch a { + case MD5: + return crypto.MD5 + case SHA1: + return crypto.SHA1 + case SHA256: + return crypto.SHA256 + case SHA512: + return crypto.SHA512 + default: + panic(fmt.Sprintf("Invalid hash type: %v", a)) + } +} diff --git a/pkg/nixhash/algo_test.go b/pkg/nixhash/algo_test.go new file mode 100644 index 0000000..56684c1 --- /dev/null +++ b/pkg/nixhash/algo_test.go @@ -0,0 +1,60 @@ +package nixhash_test + +import ( + "testing" + + "github.com/numtide/nar-serve/pkg/nixhash" + "github.com/stretchr/testify/assert" +) + +func TestAlgo(t *testing.T) { + cases := []struct { + Title string + Str string + Algo nixhash.Algorithm + }{ + { + "valid md5", + "md5", + nixhash.MD5, + }, + { + "valid sha1", + "sha1", + nixhash.SHA1, + }, + { + "valid sha256", + "sha256", + nixhash.SHA256, + }, + { + "valid sha512", + "sha512", + nixhash.SHA512, + }, + } + + t.Run("ParseAlgorithm", func(t *testing.T) { + for _, c := range cases { + t.Run(c.Title, func(t *testing.T) { + algo, err := nixhash.ParseAlgorithm(c.Str) + assert.NoError(t, err) + assert.Equal(t, c.Algo, algo) + assert.Equal(t, c.Str, algo.String()) + }) + } + }) + + t.Run("ParseInvalidAlgo", func(t *testing.T) { + _, err := nixhash.ParseAlgorithm("woot") + assert.Error(t, err) + }) + + t.Run("PrintInalidAlgo", func(t *testing.T) { + assert.Panics(t, func() { + _ = nixhash.Algorithm(0).String() + }) + }) + +} diff --git a/pkg/nixhash/encoding.go b/pkg/nixhash/encoding.go new file mode 100644 index 0000000..216f1fb --- /dev/null +++ b/pkg/nixhash/encoding.go @@ -0,0 +1,21 @@ +package nixhash + +import ( + "encoding/base64" +) + +// Encoding is the string representation of the hashed data. +type Encoding uint8 + +const ( + _ = iota // ignore zero value + + // All the encodings that Nix understands + Base16 = Encoding(iota) // Lowercase hexadecimal encoding. + Base64 = Encoding(iota) // [IETF RFC 4648, section 4](https://datatracker.ietf.org/doc/html/rfc4648#section-4). + NixBase32 = Encoding(iota) // Nix-specific base-32 encoding. + SRI = Encoding(iota) // W3C recommendation [Subresource Intergrity](https://www.w3.org/TR/SRI/) +) + +// b64 is the specific base64 encoding that we are using. +var b64 = base64.StdEncoding diff --git a/pkg/nixhash/hash.go b/pkg/nixhash/hash.go new file mode 100644 index 0000000..6807a7e --- /dev/null +++ b/pkg/nixhash/hash.go @@ -0,0 +1,66 @@ +// Package nixhash provides methods to serialize and deserialize some of the +// hashes used in nix code and .narinfo files. +// +// Nix uses different representation of hashes depending on the context +// and history of the project. This package provides the utilities to handle them. +package nixhash + +import ( + "encoding/hex" + "fmt" + + "github.com/numtide/nar-serve/pkg/nixbase32" +) + +type Hash struct { + algo Algorithm + digest []byte +} + +func NewHash(algo Algorithm, digest []byte) (*Hash, error) { + if algo.Func().Size() != len(digest) { + return nil, fmt.Errorf("algo length doesn't match digest size") + } + + return &Hash{algo, digest}, nil +} + +func MustNewHash(algo Algorithm, digest []byte) *Hash { + h, err := NewHash(algo, digest) + if err != nil { + panic(err) + } + return h +} + +func (h Hash) Algo() Algorithm { + return h.algo +} + +func (h Hash) Digest() []byte { + return h.digest +} + +// Format converts the hash to a string of the given encoding. +func (h Hash) Format(e Encoding, includeAlgo bool) string { + var s string + if e == SRI || includeAlgo { + s += h.algo.String() + if e == SRI { + s += "-" + } else { + s += ":" + } + } + switch e { + case Base16: + s += hex.EncodeToString(h.digest) + case NixBase32: + s += nixbase32.EncodeToString(h.digest) + case Base64, SRI: + s += b64.EncodeToString(h.digest) + default: + panic(fmt.Sprintf("bug: unknown encoding: %v", e)) + } + return s +} diff --git a/pkg/nixhash/hash_test.go b/pkg/nixhash/hash_test.go new file mode 100644 index 0000000..44e7c1d --- /dev/null +++ b/pkg/nixhash/hash_test.go @@ -0,0 +1,120 @@ +package nixhash_test + +import ( + "testing" + + "github.com/numtide/nar-serve/pkg/nixhash" + "github.com/stretchr/testify/assert" +) + +func TestDigest(t *testing.T) { + cases := []struct { + Title string + EncodedHash string + Algo nixhash.Algorithm + Encoding nixhash.Encoding + IncludePrefix bool + Digest []byte + }{ + { + "valid sha256", + "sha256:1rjs6c23nyf8zkmf7yxglz2q2m7v5kp51nc2m0lk4h998d0qiixs", + nixhash.SHA256, + nixhash.NixBase32, + true, + []byte{ + 0xba, 0xc7, 0x88, 0x41, 0x43, 0x29, 0x41, 0x32, + 0x29, 0xa8, 0x82, 0xd9, 0x50, 0xee, 0x2c, 0xfb, + 0x54, 0x81, 0xc5, 0xa7, 0xaf, 0xfb, 0xe3, 0xea, + 0xfc, 0xc8, 0x79, 0x3b, 0x04, 0x33, 0x5a, 0xe6, + }, + }, + { + "valid sha512", + "sha512:37iwwa5iw4m6pkd6qs2c5lw13q7y16hw2rv4i1cx6jax6yibhn6fgajbwc8p4j1fc6iicpy5r1vi7hpfq3n6z1ikhm5kcyz2b1frk80", + nixhash.SHA512, + nixhash.NixBase32, + true, + []byte{ + 0x00, 0xcd, 0xec, 0xc2, 0x12, 0xdf, 0xb3, 0x59, + 0x2a, 0x9c, 0x31, 0x7c, 0x63, 0x07, 0x76, 0x17, + 0x9e, 0xb8, 0x43, 0x2e, 0xfe, 0xb2, 0x18, 0x0d, + 0x73, 0x41, 0x92, 0x8b, 0x18, 0x5f, 0x52, 0x3d, + 0x67, 0x2c, 0x5c, 0xd1, 0x9b, 0xae, 0xa4, 0xe9, + 0x2c, 0x44, 0xb2, 0xb3, 0xe0, 0xd0, 0x04, 0x7f, + 0xf0, 0x08, 0x9c, 0x16, 0x26, 0x34, 0x36, 0x6d, + 0x5e, 0x53, 0x09, 0x8f, 0x45, 0x71, 0x1e, 0xcf, + }, + }, + { + "invalid base32", + "sha256:1rjs6c2tnyf8zkmf7yxglz2q2m7v5kp51nc2m0lk4h998d0qiixs", + nixhash.SHA256, + nixhash.NixBase32, + true, + nil, // means no result + }, + { + "invalid digest length", + "", // means this should panic + nixhash.SHA256, + nixhash.NixBase32, + true, + []byte{ + 0xba, 0xc7, 0x88, 0x41, 0x43, 0x29, 0x41, 0x32, + 0x29, 0xa8, 0x82, 0xd9, 0x50, 0xee, 0x2c, 0xfb, + 0x54, 0x81, 0xc5, 0xa7, 0xaf, 0xfb, 0xe3, 0xea, + 0xfc, 0xc8, 0x79, 0x3b, 0x04, 0x33, 0x5a, + }, + }, + { + "invalid encoded digest length", + "sha256:37iwwa5iw4m6pkd6qs2c5lw13q7y16hw2rv4i1cx6jax6yibhn6fgajbwc8p4j1fc6iicpy5r1vi7hpfq3n6z1ikhm5kcyz2b1frk80", + nixhash.SHA256, + nixhash.Base64, + true, + nil, + }, + } + + t.Run("ParseAny", func(t *testing.T) { + for _, c := range cases { + t.Run(c.Title, func(t *testing.T) { + if c.EncodedHash == "" { + return // there is no valid string representation to parse + } + + hash, err := nixhash.ParseAny(c.EncodedHash, &c.Algo) + + if c.Digest != nil { + if assert.NoError(t, err, "shouldn't error") { + h, err := nixhash.NewHashWithEncoding(c.Algo, c.Digest, c.Encoding, c.IncludePrefix) + assert.NoError(t, err) + assert.Equal(t, h, hash) + } + } else { + assert.Error(t, err, "should error") + } + }) + } + }) + + t.Run("Format", func(t *testing.T) { + for _, c := range cases { + t.Run(c.Title, func(t *testing.T) { + if c.Digest == nil { + return // there is no valid parsed representation to stringify + } + + hash, err := nixhash.NewHashWithEncoding(c.Algo, c.Digest, c.Encoding, c.IncludePrefix) + + if c.EncodedHash == "" { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, c.EncodedHash, hash.String()) + } + }) + } + }) +} diff --git a/pkg/nixhash/hash_with_encoding.go b/pkg/nixhash/hash_with_encoding.go new file mode 100644 index 0000000..bd45226 --- /dev/null +++ b/pkg/nixhash/hash_with_encoding.go @@ -0,0 +1,34 @@ +package nixhash + +// HashWithEncoding stores the original encoding so the user can get error messages with the same encoding. +type HashWithEncoding struct { + Hash + encoding Encoding + includeAlgo bool +} + +func NewHashWithEncoding(algo Algorithm, digest []byte, encoding Encoding, includeAlgo bool) (*HashWithEncoding, error) { + h, err := NewHash(algo, digest) + if err != nil { + return nil, err + } + return &HashWithEncoding{ + Hash: *h, + encoding: encoding, + includeAlgo: includeAlgo, + }, nil +} + +func MustNewHashWithEncoding(algo Algorithm, digest []byte, encoding Encoding, includeAlgo bool) *HashWithEncoding { + h := MustNewHash(algo, digest) + return &HashWithEncoding{ + Hash: *h, + encoding: encoding, + includeAlgo: includeAlgo, + } +} + +// String return the previous representation of a given hash. +func (h HashWithEncoding) String() string { + return h.Format(h.encoding, h.includeAlgo) +} diff --git a/pkg/nixhash/parse.go b/pkg/nixhash/parse.go new file mode 100644 index 0000000..ebd38dd --- /dev/null +++ b/pkg/nixhash/parse.go @@ -0,0 +1,102 @@ +package nixhash + +import ( + "encoding/hex" + "fmt" + "strings" + + "github.com/numtide/nar-serve/pkg/nixbase32" +) + +// Parse the hash from a string representation in the format +// "[:]" or "-" (a +// Subresource Integrity hash expression). If the 'optAlgo' argument +// is not present, then the hash algorithm must be specified in the +// string. +func ParseAny(s string, optAlgo *Algorithm) (*HashWithEncoding, error) { + var ( + isSRI = false + err error + ) + h := &HashWithEncoding{} + + // Look for prefix + i := strings.IndexByte(s, ':') + if i <= 0 { + i = strings.IndexByte(s, '-') + if i > 0 { + isSRI = true + } + } + + // If has prefix, get the algo + if i > 0 { + h.includeAlgo = true + h.algo, err = ParseAlgorithm(s[:i]) + if err != nil { + return nil, err + } + if optAlgo != nil && h.algo != *optAlgo { + return nil, fmt.Errorf("algo doesn't match expected algo: %v, %v", h.algo, optAlgo) + } + // keep the remainder for the encoding + s = s[i+1:] + } else if optAlgo != nil { + h.algo = *optAlgo + } else { + return nil, fmt.Errorf("unable to find separator in %v", s) + } + + // Decode the string. Because we know the algo, and each encoding has a different size, we + // can find out which of the encoding was used to represent the hash. + digestLenBytes := h.algo.Func().Size() + switch len(s) { + case hex.EncodedLen(digestLenBytes): + h.encoding = Base16 + h.digest, err = hex.DecodeString(s) + case nixbase32.EncodedLen(digestLenBytes): + h.encoding = NixBase32 + h.digest, err = nixbase32.DecodeString(s) + case b64.EncodedLen(digestLenBytes): + h.encoding = Base64 + h.digest, err = b64.DecodeString(s) + default: + return h, fmt.Errorf("unknown encoding for %v", s) + } + if err != nil { + return h, err + } + + // Post-processing for SRI + if isSRI { + if h.encoding == Base64 { + h.encoding = SRI + } else { + return h, fmt.Errorf("invalid encoding for SRI: %v", h.encoding) + } + } + + return h, nil +} + +// ParseNixBase32 returns a new Hash struct, by parsing a hashtype:nixbase32 string, or an error. +func ParseNixBase32(s string) (*Hash, error) { + h, err := ParseAny(s, nil) + if err != nil { + return nil, err + } + if h.encoding != NixBase32 { + return nil, fmt.Errorf("expected NixBase32 encoding but got %v", h.encoding) + } + return &h.Hash, nil +} + +// MustParseNixBase32 returns a new Hash struct, by parsing a hashtype:nixbase32 string, or panics on error. +func MustParseNixBase32(s string) *Hash { + h, err := ParseNixBase32(s) + if err != nil { + panic(err) + } + + return h +} diff --git a/pkg/nixhash/util.go b/pkg/nixhash/util.go new file mode 100644 index 0000000..4a62c19 --- /dev/null +++ b/pkg/nixhash/util.go @@ -0,0 +1,15 @@ +package nixhash + +// CompressHash takes an arbitrary long sequence of bytes (usually a hash digest), +// and returns a sequence of bytes of length newSize. +// It's calculated by rotating through the bytes in the output buffer (zero-initialized), +// and XOR'ing with each byte in the passed input +// It consumes 1 byte at a time, and XOR's it with the current value in the output buffer. +func CompressHash(input []byte, outputSize int) []byte { + buf := make([]byte, outputSize) + for i := 0; i < len(input); i++ { + buf[i%outputSize] ^= input[i] + } + + return buf +} diff --git a/pkg/nixpath/nixpath.go b/pkg/nixpath/nixpath.go new file mode 100644 index 0000000..641bc85 --- /dev/null +++ b/pkg/nixpath/nixpath.go @@ -0,0 +1,116 @@ +// Package nixpath parses and renders Nix store paths. +package nixpath + +import ( + "fmt" + "path" + "regexp" + + "github.com/numtide/nar-serve/pkg/nixbase32" +) + +const ( + StoreDir = "/nix/store" + PathHashSize = 20 +) + +// nolint:gochecknoglobals +var ( + NameRe = regexp.MustCompile(`[a-zA-Z0-9+\-_?=][.a-zA-Z0-9+\-_?=]*`) + PathRe = regexp.MustCompile(fmt.Sprintf( + `^%v/([%v]{%d})-(%v)$`, + regexp.QuoteMeta(StoreDir), + nixbase32.Alphabet, + nixbase32.EncodedLen(PathHashSize), + NameRe, + )) + + // Length of the hash portion of the store path in base32. + encodedPathHashSize = nixbase32.EncodedLen(PathHashSize) + + // Offset in path string to name. + nameOffset = len(StoreDir) + 1 + encodedPathHashSize + 1 + // Offset in path string to hash. + hashOffset = len(StoreDir) + 1 +) + +// NixPath represents a bare nix store path, without any paths underneath `/nix/store/…-…`. +type NixPath struct { + Name string + Digest []byte +} + +func (n *NixPath) String() string { + return Absolute(nixbase32.EncodeToString(n.Digest) + "-" + n.Name) +} + +func (n *NixPath) Validate() error { + return Validate(n.String()) +} + +// FromString parses a path string into a nix path, +// verifying it's syntactically valid +// It returns an error if it fails to parse. +func FromString(s string) (*NixPath, error) { + if err := Validate(s); err != nil { + return nil, err + } + + digest, err := nixbase32.DecodeString(s[hashOffset : hashOffset+encodedPathHashSize]) + if err != nil { + return nil, fmt.Errorf("unable to decode hash: %v", err) + } + + return &NixPath{ + Name: s[nameOffset:], + Digest: digest, + }, nil +} + +// Absolute prefixes a nixpath name with StoreDir and a '/', and cleans the path. +// It does not prevent from leaving StoreDir, so check if it still starts with StoreDir +// if you accept untrusted input. +// This should be used when assembling store paths in hashing contexts. +// Even if this code is running on windows, we want to use forward +// slashes to construct them. +func Absolute(name string) string { + return path.Join(StoreDir, name) +} + +// Validate validates a path string, verifying it's syntactically valid. +func Validate(s string) error { + if len(s) < nameOffset+1 { + return fmt.Errorf("unable to parse path: invalid path length %d for path %v", len(s), s) + } + + if s[:len(StoreDir)] != StoreDir { + return fmt.Errorf("unable to parse path: mismatching store path prefix for path %v", s) + } + + if err := nixbase32.ValidateString(s[hashOffset : hashOffset+encodedPathHashSize]); err != nil { + return fmt.Errorf("unable to parse path: error validating path nixbase32 %v: %v", err, s) + } + + for _, c := range s[nameOffset:] { + if (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && (c < '0' || c > '9') { + switch c { + case '-': + continue + case '_': + continue + case '.': + continue + case '+': + continue + case '?': + continue + case '=': + continue + } + + return fmt.Errorf("unable to parse path: invalid character in path: %v", s) + } + } + + return nil +} diff --git a/pkg/nixpath/nixpath_test.go b/pkg/nixpath/nixpath_test.go new file mode 100644 index 0000000..075628f --- /dev/null +++ b/pkg/nixpath/nixpath_test.go @@ -0,0 +1,120 @@ +package nixpath_test + +import ( + "path" + "strings" + "testing" + + "github.com/numtide/nar-serve/pkg/nixpath" + "github.com/stretchr/testify/assert" +) + +func TestNixPath(t *testing.T) { + t.Run("happy path", func(t *testing.T) { + exampleNixPathStr := "/nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432" + nixpath, err := nixpath.FromString(exampleNixPathStr) + + if assert.NoError(t, err) { + assert.Equal(t, "net-tools-1.60_p20170221182432", nixpath.Name) + assert.Equal(t, []byte{ + 0x8a, 0x12, 0x32, 0x15, 0x22, 0xfd, 0x91, 0xef, 0xbd, 0x60, 0xeb, 0xb2, 0x48, 0x1a, 0xf8, 0x85, + 0x80, 0xf6, 0x16, 0x00, + }, nixpath.Digest) + } + + // Test to string + assert.Equal(t, exampleNixPathStr, nixpath.String()) + }) + + t.Run("invalid hash length", func(t *testing.T) { + s := "/nix/store/00bgd045z0d4icpbc2yy-net-tools-1.60_p20170221182432" + + _, err := nixpath.FromString(s) + assert.Error(t, err) + + err = nixpath.Validate(s) + assert.Error(t, err) + }) + + t.Run("invalid encoding in hash", func(t *testing.T) { + s := "/nix/store/00bgd045z0d4icpbc2yyz4gx48aku4la-net-tools-1.60_p20170221182432" + + _, err := nixpath.FromString(s) + assert.Error(t, err) + + err = nixpath.Validate(s) + assert.Error(t, err) + }) + + t.Run("more than just the bare nix store path", func(t *testing.T) { + s := "/nix/store/00bgd045z0d4icpbc2yyz4gx48aku4la-net-tools-1.60_p20170221182432/bin/arp" + + _, err := nixpath.FromString(s) + assert.Error(t, err) + + err = nixpath.Validate(s) + assert.Error(t, err) + }) +} + +func TestNixPathAbsolute(t *testing.T) { + t.Run("simple (foo)", func(t *testing.T) { + s := nixpath.Absolute("foo") + assert.Equal(t, nixpath.StoreDir+"/"+"foo", s) + }) + t.Run("subdir (foo/bar)", func(t *testing.T) { + s := nixpath.Absolute("foo/bar") + assert.Equal(t, nixpath.StoreDir+"/"+"foo/bar", s) + }) + t.Run("with ../ getting cleaned (foo/bar/.. -> foo)", func(t *testing.T) { + s := nixpath.Absolute("foo/bar/..") + assert.Equal(t, nixpath.StoreDir+"/"+"foo", s) + }) + // test you can use this to exit nixpath.StoreDir + // Note path.Join does a path.Clean already, this is only + // written for additional clarity. + t.Run("leave storeDir", func(t *testing.T) { + s := nixpath.Absolute("..") + assert.Equal(t, path.Clean(path.Join(nixpath.StoreDir, "..")), s) + assert.False(t, strings.HasPrefix(s, nixpath.StoreDir), + "path shouldn't have the full storedir as prefix anymore (/nix)") + }) +} + +func BenchmarkNixPath(b *testing.B) { + path := "/nix/store/00bgd045z0d4icpbc2yyz4gx48ak44la-net-tools-1.60_p20170221182432" + + b.Run("FromString", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := nixpath.FromString(path) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("Validate", func(b *testing.B) { + for i := 0; i < b.N; i++ { + err := nixpath.Validate(path) + if err != nil { + b.Fatal(err) + } + } + }) + + { + p, err := nixpath.FromString(path) + if err != nil { + b.Fatal(err) + } + + b.Run("ValidateStruct", func(b *testing.B) { + for i := 0; i < b.N; i++ { + err := p.Validate() + if err != nil { + b.Fatal(err) + } + } + }) + } +} diff --git a/pkg/nixpath/references/refs.go b/pkg/nixpath/references/refs.go new file mode 100644 index 0000000..ed75693 --- /dev/null +++ b/pkg/nixpath/references/refs.go @@ -0,0 +1,105 @@ +package references + +import ( + "fmt" + "sort" + "strings" + + "github.com/numtide/nar-serve/pkg/nixbase32" + "github.com/numtide/nar-serve/pkg/nixpath" +) + +const ( + storePrefixLength = len(nixpath.StoreDir) + 1 + refLength = len(nixbase32.Alphabet) // Store path hash prefix length +) + +// nolint:gochecknoglobals +// This creates an array to check if a given byte is in the Nix base32 alphabet. +var isNixBase32 = func() (arr [256]bool) { + for _, c := range nixbase32.Alphabet { + arr[c] = true + } + + return +}() + +// ReferenceScanner scans a stream of data for references to store paths to extract run time dependencies. +type ReferenceScanner struct { + // Map of store path hashes to full store paths. + hashes map[string]string + + // Set of hits. + hits map[string]struct{} + + // Buffer for current partial hit. + buf [refLength]byte + + // How far into buf is currently written. + n int +} + +func NewReferenceScanner(storePathCandidates []string) (*ReferenceScanner, error) { + var buf [refLength]byte + + hashes := make(map[string]string) + + for _, storePath := range storePathCandidates { + if !strings.HasPrefix(storePath, nixpath.StoreDir) { + return nil, fmt.Errorf("missing store path prefix: %s", storePath) + } + + // Check length is a valid store path length including dashes + if len(storePath) < len(nixpath.StoreDir)+refLength+3 { + return nil, fmt.Errorf("invalid store path length: %d for store path '%s'", len(storePath), storePath) + } + + hashes[storePath[storePrefixLength:storePrefixLength+refLength]] = storePath + } + + return &ReferenceScanner{ + hits: make(map[string]struct{}), + hashes: hashes, + buf: buf, + n: 0, + }, nil +} + +func (r *ReferenceScanner) References() []string { + paths := make([]string, len(r.hits)) + + i := 0 + + for hash := range r.hits { + paths[i] = r.hashes[hash] + i++ + } + + sort.Strings(paths) + + return paths +} + +func (r *ReferenceScanner) Write(s []byte) (int, error) { + for _, c := range s { + if !isNixBase32[c] { + r.n = 0 + + continue + } + + r.buf[r.n] = c + r.n++ + + if r.n == refLength { + hash := string(r.buf[:]) + if _, ok := r.hashes[hash]; ok { + r.hits[hash] = struct{}{} + } + + r.n = 0 + } + } + + return len(s), nil +} diff --git a/pkg/nixpath/references/refs_test.go b/pkg/nixpath/references/refs_test.go new file mode 100644 index 0000000..60267ca --- /dev/null +++ b/pkg/nixpath/references/refs_test.go @@ -0,0 +1,96 @@ +package references_test + +import ( + "testing" + + "github.com/numtide/nar-serve/pkg/nixpath/references" + "github.com/stretchr/testify/assert" +) + +//nolint:gochecknoglobals +var cases = []struct { + Title string + Chunks []string + Expected []string +}{ + { + Title: "Basic", + Chunks: []string{ + "/nix/store/knn6wc1a89c47yb70qwv56rmxylia6wx-hello-2.12/bin/hello", + }, + Expected: []string{ + "/nix/store/knn6wc1a89c47yb70qwv56rmxylia6wx-hello-2.12", + }, + }, + { + Title: "PartialWrites", + Chunks: []string{ + "/nix/store/knn6wc1a89c47yb70", + "qwv56rmxylia6wx-hello-2.12/bin/hello", + }, + Expected: []string{ + "/nix/store/knn6wc1a89c47yb70qwv56rmxylia6wx-hello-2.12", + }, + }, + { + Title: "IgnoredPaths", + Chunks: []string{ + "/nix/store/knn6wc1a89c47yb70qwv56rmxylia6wx-hello-2.12/bin/hello", + "/nix/store/c4pcgriqgiwz8vxrjxg7p38q3y7w3ni3-go-1.18.2/bin/go", + }, + Expected: []string{ + "/nix/store/knn6wc1a89c47yb70qwv56rmxylia6wx-hello-2.12", + }, + }, +} + +func TestReferences(t *testing.T) { + t.Run("ScanReferences", func(t *testing.T) { + for _, c := range cases { + t.Run(c.Title, func(t *testing.T) { + refScanner, err := references.NewReferenceScanner(c.Expected) + if err != nil { + panic(err) + } + + for _, line := range c.Chunks { + _, err = refScanner.Write([]byte(line)) + if err != nil { + panic(err) + } + } + + assert.Equal(t, c.Expected, refScanner.References()) + }) + } + }) +} + +func BenchmarkReferences(b *testing.B) { + for _, c := range cases { + c := c + + refScanner, err := references.NewReferenceScanner(c.Expected) + if err != nil { + panic(err) + } + + chunks := make([][]byte, len(c.Chunks)) + for i, c := range c.Chunks { + chunks[i] = []byte(c) + } + + b.Run(c.Title, func(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, chunk := range chunks { + _, err = refScanner.Write(chunk) + if err != nil { + panic(err) + } + } + } + + assert.Equal(b, c.Expected, refScanner.References()) + }) + } +} diff --git a/pkg/wire/bytes_reader.go b/pkg/wire/bytes_reader.go new file mode 100644 index 0000000..6a27f29 --- /dev/null +++ b/pkg/wire/bytes_reader.go @@ -0,0 +1,59 @@ +package wire + +import ( + "io" +) + +// BytesReader implements io.ReadCloser. +var _ io.ReadCloser = &BytesReader{} + +// BytesReader implements reading from bytes fields. +// It'll return a limited reader to the actual contents. +// Closing the reader will seek to the end of the packet (including padding). +// It's fine to not close, in case you don't want to seek to the end. +type BytesReader struct { + contentLength uint64 // the total length of the field + lr io.Reader // a reader limited to the actual contents of the field + r io.Reader // the underlying real reader, used when seeking over the padding. +} + +// NewBytesReader constructs a Reader of a bytes packet. +// Closing the reader will skip over any padding. +func NewBytesReader(r io.Reader, contentLength uint64) *BytesReader { + return &BytesReader{ + contentLength: contentLength, + lr: io.LimitReader(r, int64(contentLength)), + r: r, + } +} + +// Read will read into b until all bytes from the field have been read +// Keep in mind there might be some padding at the end still, +// which can be seek'ed over by closing the reader. +func (br *BytesReader) Read(b []byte) (int, error) { + n, err := br.lr.Read(b) + + return n, err +} + +// Close will skip to the end and consume any remaining padding. +// It'll return an error if the padding contains something else than null +// bytes. +// It's fine to not close, in case you don't want to seek to the end. +func (br *BytesReader) Close() error { + // seek to the end of the limited reader + for { + buf := make([]byte, 1024) + + _, err := br.lr.Read(buf) + if err != nil { + if err == io.EOF { + break + } + + return err + } + } + // skip over padding + return readPadding(br.r, br.contentLength) +} diff --git a/pkg/wire/bytes_writer.go b/pkg/wire/bytes_writer.go new file mode 100644 index 0000000..16fe0f5 --- /dev/null +++ b/pkg/wire/bytes_writer.go @@ -0,0 +1,72 @@ +package wire + +import ( + "fmt" + "io" +) + +var _ io.WriteCloser = &BytesWriter{} + +// BytesWriter implements writing bytes fields. +// It'll return a io.WriteCloser that can be written to. +// On Write(), it'll verify we don't write more than was initially specified. +// On Close(), it'll verify exactly the previously specified number of bytes were written, +// then write any necessary padding. +type BytesWriter struct { + w io.Writer + bytesWritten uint64 // the number of bytes written so far + totalLength uint64 // the expected length of the contents, without padding + paddingWritten bool +} + +func NewBytesWriter(w io.Writer, contentLength uint64) (*BytesWriter, error) { + // write the size field + n := contentLength + if err := WriteUint64(w, n); err != nil { + return nil, err + } + + bytesWriter := &BytesWriter{ + w: w, + bytesWritten: 0, + totalLength: contentLength, + paddingWritten: false, + } + + return bytesWriter, nil +} + +func (bw *BytesWriter) Write(p []byte) (n int, err error) { + l := len(p) + + if bw.bytesWritten+uint64(l) > bw.totalLength { + return 0, fmt.Errorf("maximum number of bytes exceeded") + } + + bytesWritten, err := bw.w.Write(p) + bw.bytesWritten += uint64(bytesWritten) + + return bytesWritten, err +} + +// Close ensures the previously specified number of bytes were written, then writes padding. +func (bw *BytesWriter) Close() error { + // if we already closed once, don't close again + if bw.paddingWritten { + return nil + } + + if bw.bytesWritten != bw.totalLength { + return fmt.Errorf("wrote %v bytes in total, but expected %v", bw.bytesWritten, bw.totalLength) + } + + // write padding + err := writePadding(bw.w, bw.totalLength) + if err != nil { + return err + } + + bw.paddingWritten = true + + return nil +} diff --git a/pkg/wire/read.go b/pkg/wire/read.go new file mode 100644 index 0000000..73b9997 --- /dev/null +++ b/pkg/wire/read.go @@ -0,0 +1,103 @@ +package wire + +import ( + "fmt" + "io" +) + +// ReadUint64 consumes exactly 8 bytes and returns a uint64. +func ReadUint64(r io.Reader) (n uint64, err error) { + buf := bufPool.Get().(*[8]byte) + defer bufPool.Put(buf) + + if _, err := io.ReadFull(r, buf[:]); err != nil { + return 0, err + } + + return byteOrder.Uint64(buf[:]), nil +} + +// ReadBool consumes a boolean in nix wire format. +func ReadBool(r io.Reader) (v bool, err error) { + n, err := ReadUint64(r) + if err != nil { + return false, err + } + + if n != 0 && n != 1 { + return false, fmt.Errorf("invalid value for boolean: %v", n) + } + + return n == 1, nil +} + +// readPadding consumes the remaining padding, if any, and errors out if it's not null bytes. +// In nix archive format, byte packets are padded to 8 byte blocks each. +func readPadding(r io.Reader, contentLength uint64) error { + // n marks the position inside the last block + n := contentLength % 8 + if n == 0 { + return nil + } + + buf := bufPool.Get().(*[8]byte) + defer bufPool.Put(buf) + + // we read the padding contents into the tail of the buf slice + if _, err := io.ReadFull(r, buf[n:]); err != nil { + return err + } + // … and check if it's only null bytes + for _, b := range buf[n:] { + if b != 0 { + return fmt.Errorf("invalid padding, should be null bytes, found %v", buf[n:]) + } + } + + return nil +} + +// ReadBytes parses the size field, and returns a ReadCloser to its contents. +// That reader is limited to the actual contents of the bytes field. +// Closing the reader will skip to the end of the last byte packet, including the padding. +func ReadBytes(r io.Reader) (uint64, io.ReadCloser, error) { + // read content length + contentLength, err := ReadUint64(r) + if err != nil { + return 0, nil, err + } + + return contentLength, NewBytesReader(r, contentLength), nil +} + +// ReadBytesFull reads a byte packet, and will return its content, or an error. +// A maximum number of bytes can be specified in max. +// In the case of a packet exceeding the maximum number of bytes, +// the reader won't seek to the end of the packet. +func ReadBytesFull(r io.Reader, max uint64) ([]byte, error) { + contentLength, rd, err := ReadBytes(r) + if err != nil { + return []byte{}, err + } + + if contentLength > max { + return nil, fmt.Errorf("content length of %v bytes exceeds maximum of %v bytes", contentLength, max) + } + + defer rd.Close() + + // consume content + buf := make([]byte, contentLength) + if _, err := io.ReadFull(rd, buf); err != nil { + return nil, err + } + + return buf, nil +} + +// ReadString reads a bytes packet and converts it to string. +func ReadString(r io.Reader, max uint64) (string, error) { + buf, err := ReadBytesFull(r, max) + + return string(buf), err +} diff --git a/pkg/wire/read_test.go b/pkg/wire/read_test.go new file mode 100644 index 0000000..0419a74 --- /dev/null +++ b/pkg/wire/read_test.go @@ -0,0 +1,157 @@ +package wire_test + +import ( + "bytes" + "io" + "testing" + + "github.com/numtide/nar-serve/pkg/wire" + "github.com/stretchr/testify/assert" +) + +// nolint:gochecknoglobals +var ( + wireBytesFalse = []byte{0, 0, 0, 0, 0, 0, 0, 0} + wireBytesTrue = []byte{1, 0, 0, 0, 0, 0, 0, 0} + wireBytesInvalidBool = []byte{2, 0, 0, 0, 0, 0, 0, 0} + + contents8Bytes = []byte{ + 42, 23, 42, 23, 42, 23, 42, 23, // the actual data + } + wire8Bytes = []byte{ + 8, 0, 0, 0, 0, 0, 0, 0, // length field - 8 bytes + 42, 23, 42, 23, 42, 23, 42, 23, // the actual data + } + + contents10Bytes = []byte{ + 42, 23, 42, 23, 42, 23, 42, 23, // the actual data + 42, 23, + } + wire10Bytes = []byte{ + 10, 0, 0, 0, 0, 0, 0, 0, // length field - 8 bytes + 42, 23, 42, 23, 42, 23, 42, 23, // the actual data + 42, 23, 0, 0, 0, 0, 0, 0, // more actual data (2 bytes), then padding + } + + wireStringFoo = []byte{ + 3, 0, 0, 0, 0, 0, 0, 0, // length field - 3 bytes + 0x46, 0x6F, 0x6F, 0, 0, 0, 0, 0, // contents, Foo, then 5 bytes padding + } +) + +// hesitantReader implements an io.Reader. +type hesitantReader struct { + data [][]byte +} + +// Read returns the topmost []byte in data, or io.EOF if empty. +func (r *hesitantReader) Read(p []byte) (n int, err error) { + if len(r.data) == 0 { + return 0, io.EOF + } + + copy(p, r.data[0]) + lenRead := len(r.data[0]) + + // pop first element in r.data + r.data = r.data[1:] + + return lenRead, nil +} + +// TestReadUint64 tests a reading a single uint64 field. +func TestReadUint64(t *testing.T) { + bs := []byte{13, 0, 0, 0, 0, 0, 0, 0} + r := bytes.NewReader(bs) + + num, err := wire.ReadUint64(r) + + assert.NoError(t, err) + assert.Equal(t, num, uint64(13)) +} + +// TestReadLongLongPartial tests reading a single uint64 field, but through a +// reader not returning everything at once. +func TestReadUint64Slow(t *testing.T) { + r := &hesitantReader{data: [][]byte{ + {13}, + {}, + {0, 0, 0, 0, 0, 0, 0}, + }} + + num, err := wire.ReadUint64(r) + assert.NoError(t, err) + assert.Equal(t, num, uint64(13)) +} + +// TestReadBool tests reading boolean values works. +func TestReadBool(t *testing.T) { + rdBytesFalse := bytes.NewReader(wireBytesFalse) + rdBytesTrue := bytes.NewReader(wireBytesTrue) + rdBytesInvalidBool := bytes.NewReader(wireBytesInvalidBool) + + v, err := wire.ReadBool(rdBytesFalse) + if assert.NoError(t, err) { + assert.Equal(t, v, false) + } + + v, err = wire.ReadBool(rdBytesTrue) + if assert.NoError(t, err) { + assert.Equal(t, v, true) + } + + _, err = wire.ReadBool(rdBytesInvalidBool) + assert.Error(t, err) +} + +func TestReadBytes(t *testing.T) { + buf, err := wire.ReadBytesFull(bytes.NewReader(wire8Bytes), 1024) + if assert.NoError(t, err) { + assert.Equal(t, 8, len(buf)) + assert.Equal(t, buf, contents8Bytes) + } + + buf, err = wire.ReadBytesFull(bytes.NewReader(wire10Bytes), 1024) + if assert.NoError(t, err) { + assert.Equal(t, 10, len(buf)) + assert.Equal(t, buf, contents10Bytes) + } + + // concatenate the 10 bytes, then 8 bytes dummy data together, + // and see if we can get out both bytes. This will test we properly skip over the padding. + payloadCombined := []byte{} + payloadCombined = append(payloadCombined, wire10Bytes...) + payloadCombined = append(payloadCombined, wire8Bytes...) + + rd := bytes.NewReader(payloadCombined) + + buf, err = wire.ReadBytesFull(rd, 1024) + if assert.NoError(t, err) { + assert.Equal(t, 10, len(buf)) + assert.Equal(t, buf, contents10Bytes) + } + + buf, err = wire.ReadBytesFull(rd, 1024) + if assert.NoError(t, err) { + assert.Equal(t, 8, len(buf)) + assert.Equal(t, buf, contents8Bytes) + } +} + +func TestReadString(t *testing.T) { + s, err := wire.ReadString(bytes.NewReader(wireStringFoo), 1024) + if assert.NoError(t, err) { + assert.Equal(t, s, "Foo") + } + + // exceeding max should error + rd := bytes.NewReader(wireStringFoo) + _, err = wire.ReadString(rd, 2) + assert.Error(t, err) + + // the reader should not have seeked to the end of the packet + buf, err := io.ReadAll(rd) + if assert.NoError(t, err, "reading the rest shouldn't error") { + assert.Equal(t, wireStringFoo[8:], buf, "the reader should not have seeked to the end of the packet") + } +} diff --git a/pkg/wire/wire.go b/pkg/wire/wire.go new file mode 100644 index 0000000..3956b0d --- /dev/null +++ b/pkg/wire/wire.go @@ -0,0 +1,12 @@ +// Package wire provides methods to parse and produce fields used in the +// low-level Nix wire protocol, operating on io.Reader and io.Writer +// When reading fields with arbitrary lengths, a maximum number of bytes needs +// to be specified. +package wire + +import ( + "encoding/binary" +) + +// nolint:gochecknoglobals +var byteOrder = binary.LittleEndian diff --git a/pkg/wire/write.go b/pkg/wire/write.go new file mode 100644 index 0000000..7b43396 --- /dev/null +++ b/pkg/wire/write.go @@ -0,0 +1,76 @@ +package wire + +import ( + "io" + "sync" +) + +// nolint:gochecknoglobals +var ( + padding [8]byte + + bufPool = sync.Pool{ + New: func() interface{} { + return new([8]byte) + }, + } +) + +// WriteUint64 writes an uint64 in Nix wire format. +func WriteUint64(w io.Writer, n uint64) error { + buf := bufPool.Get().(*[8]byte) + defer bufPool.Put(buf) + + byteOrder.PutUint64(buf[:], n) + _, err := w.Write(buf[:]) + + return err +} + +// WriteBool writes a boolean in Nix wire format. +func WriteBool(w io.Writer, v bool) error { + if v { + return WriteUint64(w, 1) + } + + return WriteUint64(w, 0) +} + +// WriteBytes writes a bytes packet. See ReadBytes for its structure. +func WriteBytes(w io.Writer, buf []byte) error { + n := uint64(len(buf)) + if err := WriteUint64(w, n); err != nil { + return err + } + + if _, err := w.Write(buf); err != nil { + return err + } + + return writePadding(w, n) +} + +// WriteString writes a bytes packet. +func WriteString(w io.Writer, s string) error { + n := uint64(len(s)) + if err := WriteUint64(w, n); err != nil { + return err + } + + if _, err := io.WriteString(w, s); err != nil { + return err + } + + return writePadding(w, n) +} + +// writePadding writes the appropriate amount of padding. +func writePadding(w io.Writer, contentLength uint64) error { + if m := contentLength % 8; m != 0 { + _, err := w.Write(padding[m:]) + + return err + } + + return nil +} diff --git a/pkg/wire/write_test.go b/pkg/wire/write_test.go new file mode 100644 index 0000000..d909113 --- /dev/null +++ b/pkg/wire/write_test.go @@ -0,0 +1,121 @@ +package wire_test + +import ( + "bytes" + "testing" + + "github.com/numtide/nar-serve/pkg/wire" + "github.com/stretchr/testify/assert" +) + +func TestWriteUint64(t *testing.T) { + var buf bytes.Buffer + + err := wire.WriteUint64(&buf, 1) + assert.NoError(t, err) + assert.Equal(t, wireBytesTrue, buf.Bytes()) +} + +func TestWriteBool(t *testing.T) { + var buf bytes.Buffer + + err := wire.WriteBool(&buf, true) + assert.NoError(t, err) + assert.Equal(t, wireBytesTrue, buf.Bytes()) + + buf.Reset() + err = wire.WriteBool(&buf, false) + assert.NoError(t, err) + assert.Equal(t, wireBytesFalse, buf.Bytes()) +} + +func TestWriteBytes(t *testing.T) { + var buf bytes.Buffer + + err := wire.WriteBytes(&buf, contents8Bytes) + assert.NoError(t, err) + assert.Equal(t, wire8Bytes, buf.Bytes()) + + buf.Reset() + + err = wire.WriteBytes(&buf, contents10Bytes) + assert.NoError(t, err) + assert.Equal(t, wire10Bytes, buf.Bytes()) +} + +func TestWriteString(t *testing.T) { + var buf bytes.Buffer + + err := wire.WriteString(&buf, "Foo") + assert.NoError(t, err) + assert.Equal(t, wireStringFoo, buf.Bytes()) +} + +func TestBytesWriter8Bytes(t *testing.T) { + var buf bytes.Buffer + + bw, err := wire.NewBytesWriter(&buf, uint64(len(contents8Bytes))) + assert.NoError(t, err) + + n, err := bw.Write(contents8Bytes[:4]) + assert.NoError(t, err) + assert.Equal(t, 4, n) + n, err = bw.Write(contents8Bytes[4:]) + assert.NoError(t, err) + assert.Equal(t, 4, n) + + err = bw.Close() + assert.NoError(t, err) + + assert.Equal(t, wire8Bytes, buf.Bytes()) +} + +func TestBytesWriter10Bytes(t *testing.T) { + var buf bytes.Buffer + + bw, err := wire.NewBytesWriter(&buf, uint64(len(contents10Bytes))) + assert.NoError(t, err) + + n, err := bw.Write(contents10Bytes[:4]) + assert.NoError(t, err) + assert.Equal(t, 4, n) + n, err = bw.Write(contents10Bytes[4:]) + assert.NoError(t, err) + assert.Equal(t, 6, n) + + err = bw.Close() + assert.NoError(t, err) + + // closing again shouldn't panic + assert.NotPanics(t, func() { + bw.Close() + }) + + assert.Equal(t, wire10Bytes, buf.Bytes()) +} + +func TestBytesWriterError(t *testing.T) { + var buf bytes.Buffer + + // initialize a bytes writer with a len of 9 + bw, err := wire.NewBytesWriter(&buf, 9) + assert.NoError(t, err) + + // try to write 10 bytes into it + _, err = bw.Write(contents10Bytes) + assert.Error(t, err) + + buf.Reset() + + // initialize a bytes writer with a len of 11 + bw, err = wire.NewBytesWriter(&buf, 11) + assert.NoError(t, err) + + // write 10 bytes into it + n, err := bw.Write(contents10Bytes) + assert.NoError(t, err) + assert.Equal(t, 10, n) + + err = bw.Close() + assert.Error(t, err, "closing should fail, as one byte is still missing") +} diff --git a/tests/integration_test.go b/tests/integration_test.go index 6d1ed7b..c8e0dd9 100644 --- a/tests/integration_test.go +++ b/tests/integration_test.go @@ -9,7 +9,7 @@ import ( "path/filepath" "testing" - "github.com/numtide/nar-serve/libstore" + "github.com/numtide/nar-serve/pkg/libstore" "github.com/stretchr/testify/assert" ) From c13ee5782fd7c976032fba143239b8e90ef6d92d Mon Sep 17 00:00:00 2001 From: zimbatm Date: Sat, 27 Jul 2024 23:01:17 +0200 Subject: [PATCH 2/3] chore: clean flake --- default.nix | 9 ++++++--- flake.lock | 37 +++++++++++-------------------------- flake.nix | 38 +++++++++++++++++++++++--------------- overlay.nix | 4 +--- shell.nix | 5 +++-- 5 files changed, 44 insertions(+), 49 deletions(-) diff --git a/default.nix b/default.nix index 4c3bcc2..85e7613 100644 --- a/default.nix +++ b/default.nix @@ -1,7 +1,8 @@ -{ system ? builtins.currentSystem -, nixpkgs ? import { inherit system; } -}: { + system ? builtins.currentSystem, + nixpkgs ? import { inherit system; }, +}: +rec { nar-serve = nixpkgs.buildGoModule { pname = "nar-serve"; version = "latest"; @@ -10,5 +11,7 @@ doCheck = false; }; + default = nar-serve; + devShell = import ./shell.nix { inherit nixpkgs; }; } diff --git a/flake.lock b/flake.lock index 09dba7b..8223d5d 100644 --- a/flake.lock +++ b/flake.lock @@ -1,40 +1,25 @@ { "nodes": { - "flake-utils": { - "inputs": { - "systems": "systems" - }, + "nixpkgs": { "locked": { - "lastModified": 1709126324, - "narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "d465f4819400de7c8d874d50b982301f28a84605", + "lastModified": 1722062969, + "narHash": "sha256-QOS0ykELUmPbrrUGmegAUlpmUFznDQeR4q7rFhl8eQg=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "b73c2221a46c13557b1b3be9c2070cc42cf01eb3", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", "type": "github" } }, - "nixpkgs": { - "locked": { - "lastModified": 1707689078, - "narHash": "sha256-UUGmRa84ZJHpGZ1WZEBEUOzaPOWG8LZ0yPg1pdDF/yM=", - "path": "/nix/store/8zgn73r04imfsn9sjynvp0nkkwppxngf-source", - "rev": "f9d39fb9aff0efee4a3d5f4a6d7c17701d38a1d8", - "type": "path" - }, - "original": { - "id": "nixpkgs", - "type": "indirect" - } - }, "root": { "inputs": { - "flake-utils": "flake-utils", - "nixpkgs": "nixpkgs" + "nixpkgs": "nixpkgs", + "systems": "systems" } }, "systems": { diff --git a/flake.nix b/flake.nix index 41b0370..0644289 100644 --- a/flake.nix +++ b/flake.nix @@ -1,21 +1,29 @@ { description = "NAR serve"; - inputs.flake-utils.url = "github:numtide/flake-utils"; + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; + systems.url = "github:nix-systems/default"; + }; - outputs = { self, nixpkgs, flake-utils }: + outputs = { - overlay = import ./overlay.nix; - } - // - flake-utils.lib.eachDefaultSystem (system: - let - pkgs = nixpkgs.legacyPackages.${system}; - in - rec { - packages = import ./. { nixpkgs = pkgs; }; - defaultPackage = packages.nar-serve; - devShell = packages.devShell; - } - ); + self, + nixpkgs, + systems, + }: + let + eachSystem = f: nixpkgs.lib.genAttrs (import systems) (system: f nixpkgs.legacyPackages.${system}); + in + { + overlays.default = import ./overlay.nix; + + packages = eachSystem (pkgs: import ./. { nixpkgs = pkgs; }); + + formatter = eachSystem (pkgs: pkgs.nixfmt-rfc-style); + + devShells = eachSystem (pkgs: { + default = self.packages.${pkgs.system}.devShell; + }); + }; } diff --git a/overlay.nix b/overlay.nix index dc59aff..d54df73 100644 --- a/overlay.nix +++ b/overlay.nix @@ -1,3 +1 @@ -final: prev: { - nar-serve = import ./. { nixpkgs = final; }; -} +final: prev: { nar-serve = import ./. { nixpkgs = final; }; } diff --git a/shell.nix b/shell.nix index 005ac1d..9a92443 100644 --- a/shell.nix +++ b/shell.nix @@ -1,5 +1,6 @@ -{ system ? builtins.currentSystem -, nixpkgs ? import { inherit system; } +{ + system ? builtins.currentSystem, + nixpkgs ? import { inherit system; }, }: nixpkgs.mkShell { buildInputs = with nixpkgs; [ From 882ab89e24b5d321e5b8d5000a5e1f3480596237 Mon Sep 17 00:00:00 2001 From: zimbatm Date: Sat, 27 Jul 2024 23:07:59 +0200 Subject: [PATCH 3/3] chore(flake): add checks --- flake.nix | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flake.nix b/flake.nix index 0644289..2a6adfa 100644 --- a/flake.nix +++ b/flake.nix @@ -25,5 +25,7 @@ devShells = eachSystem (pkgs: { default = self.packages.${pkgs.system}.devShell; }); + + checks = self.packages; }; }