diff --git a/go.mod b/go.mod index ddf494f578..270a3ddf5b 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/containers/storage v1.45.6 github.com/coreos/go-systemd/v22 v22.5.0 github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3 - github.com/cyphar/filepath-securejoin v0.2.3 + github.com/cyphar/filepath-securejoin v0.2.4 github.com/digitalocean/go-qemu v0.0.0-20210326154740-ac9e0b687001 github.com/docker/docker v20.10.23+incompatible github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 @@ -44,7 +44,7 @@ require ( github.com/onsi/gomega v1.26.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0-rc2 - github.com/opencontainers/runc v1.1.4 + github.com/opencontainers/runc v1.1.12 github.com/opencontainers/runtime-spec v1.1.0-rc.1 github.com/opencontainers/runtime-tools v0.9.1-0.20221014010322-58c91d646d86 github.com/opencontainers/selinux v1.11.0 @@ -78,6 +78,7 @@ require ( github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect github.com/chzyer/readline v1.5.1 // indirect + github.com/cilium/ebpf v0.7.0 // indirect github.com/containerd/cgroups v1.0.4 // indirect github.com/containerd/containerd v1.6.15 // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.1 // indirect @@ -175,5 +176,3 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) - -replace github.com/opencontainers/runc => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc diff --git a/go.sum b/go.sum index 7f17751ebc..c570eec751 100644 --- a/go.sum +++ b/go.sum @@ -143,6 +143,8 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/checkpoint-restore/checkpointctl v0.0.0-20220321135231-33f4a66335f0 h1:txB5jvhzUCSiiQmqmMWpo5CEB7Gj/Hq5Xqi7eaPl8ko= github.com/checkpoint-restore/checkpointctl v0.0.0-20220321135231-33f4a66335f0/go.mod h1:67kWC1PXQLR3lM/mmNnu3Kzn7K4TSWZAGUuQP1JSngk= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/checkpoint-restore/go-criu/v5 v5.2.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/checkpoint-restore/go-criu/v6 v6.3.0 h1:mIdrSO2cPNWQY1truPg6uHLXyKHk3Z5Odx4wjKOASzA= @@ -160,7 +162,9 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.9.0/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -319,8 +323,10 @@ github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 h1:vU+EP9ZuFUCYE0NYLwTSob+3LNEJATzNfP/DC7SWGWI= github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= @@ -391,8 +397,8 @@ github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= @@ -505,7 +511,7 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.1-0.20221029134443-4b691ce883d5 h1:boOtwyhKoC3Aadiw5zbhU54YyCkm9EpZCSN6mOx0KLc= github.com/godbus/dbus/v5 v5.1.1-0.20221029134443-4b691ce883d5/go.mod h1:fXoNnqaUvdKqjJmMGeiBgmRphUg+kO0MT4AhPOP6+Qg= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= @@ -794,6 +800,7 @@ github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= @@ -860,8 +867,16 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= -github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc h1:qjkUzmFsOFbQyjObybk40mRida83j5IHRaKzLGdBbEU= -github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc/go.mod h1:wUOQGsiKae6VzA/UvlCK3cO+pHk8F2VQHlIoITEfMM8= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= +github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -879,6 +894,7 @@ github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3 github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= @@ -963,6 +979,8 @@ github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdh github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= @@ -1065,7 +1083,6 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.9/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vbauerster/mpb/v7 v7.5.3 h1:BkGfmb6nMrrBQDFECR/Q7RkKCw7ylMetCb4079CGs4w= @@ -1392,6 +1409,8 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index 5740d70664..9819c83bae 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -267,49 +267,25 @@ func GetLimits(resource *spec.LinuxResources) (runcconfig.Resources, error) { if resource.BlockIO != nil { if len(resource.BlockIO.ThrottleReadBpsDevice) > 0 { for _, entry := range resource.BlockIO.ThrottleReadBpsDevice { - throttle := &runcconfig.ThrottleDevice{} - dev := &runcconfig.BlockIODevice{ - Major: entry.Major, - Minor: entry.Minor, - } - throttle.BlockIODevice = *dev - throttle.Rate = entry.Rate + throttle := runcconfig.NewThrottleDevice(entry.Major, entry.Minor, entry.Rate) final.BlkioThrottleReadBpsDevice = append(final.BlkioThrottleReadBpsDevice, throttle) } } if len(resource.BlockIO.ThrottleWriteBpsDevice) > 0 { for _, entry := range resource.BlockIO.ThrottleWriteBpsDevice { - throttle := &runcconfig.ThrottleDevice{} - dev := &runcconfig.BlockIODevice{ - Major: entry.Major, - Minor: entry.Minor, - } - throttle.BlockIODevice = *dev - throttle.Rate = entry.Rate + throttle := runcconfig.NewThrottleDevice(entry.Major, entry.Minor, entry.Rate) final.BlkioThrottleWriteBpsDevice = append(final.BlkioThrottleWriteBpsDevice, throttle) } } if len(resource.BlockIO.ThrottleReadIOPSDevice) > 0 { for _, entry := range resource.BlockIO.ThrottleReadIOPSDevice { - throttle := &runcconfig.ThrottleDevice{} - dev := &runcconfig.BlockIODevice{ - Major: entry.Major, - Minor: entry.Minor, - } - throttle.BlockIODevice = *dev - throttle.Rate = entry.Rate + throttle := runcconfig.NewThrottleDevice(entry.Major, entry.Minor, entry.Rate) final.BlkioThrottleReadIOPSDevice = append(final.BlkioThrottleReadIOPSDevice, throttle) } } if len(resource.BlockIO.ThrottleWriteIOPSDevice) > 0 { for _, entry := range resource.BlockIO.ThrottleWriteIOPSDevice { - throttle := &runcconfig.ThrottleDevice{} - dev := &runcconfig.BlockIODevice{ - Major: entry.Major, - Minor: entry.Minor, - } - throttle.BlockIODevice = *dev - throttle.Rate = entry.Rate + throttle := runcconfig.NewThrottleDevice(entry.Major, entry.Minor, entry.Rate) final.BlkioThrottleWriteIOPSDevice = append(final.BlkioThrottleWriteIOPSDevice, throttle) } } @@ -321,18 +297,14 @@ func GetLimits(resource *spec.LinuxResources) (runcconfig.Resources, error) { } if len(resource.BlockIO.WeightDevice) > 0 { for _, entry := range resource.BlockIO.WeightDevice { - weight := &runcconfig.WeightDevice{} - dev := &runcconfig.BlockIODevice{ - Major: entry.Major, - Minor: entry.Minor, - } + var w, lw uint16 if entry.Weight != nil { - weight.Weight = *entry.Weight + w = *entry.Weight } if entry.LeafWeight != nil { - weight.LeafWeight = *entry.LeafWeight + lw = *entry.LeafWeight } - weight.BlockIODevice = *dev + weight := runcconfig.NewWeightDevice(entry.Major, entry.Minor, w, lw) final.BlkioWeightDevice = append(final.BlkioWeightDevice, weight) } } diff --git a/vendor/github.com/cilium/ebpf/.clang-format b/vendor/github.com/cilium/ebpf/.clang-format new file mode 100644 index 0000000000..4eb94b1baa --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.clang-format @@ -0,0 +1,17 @@ +--- +Language: Cpp +BasedOnStyle: LLVM +AlignAfterOpenBracket: DontAlign +AlignConsecutiveAssignments: true +AlignEscapedNewlines: DontAlign +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortFunctionsOnASingleLine: false +BreakBeforeBraces: Attach +IndentWidth: 4 +KeepEmptyLinesAtTheStartOfBlocks: false +TabWidth: 4 +UseTab: ForContinuationAndIndentation +ColumnLimit: 1000 +... diff --git a/vendor/github.com/cilium/ebpf/.gitignore b/vendor/github.com/cilium/ebpf/.gitignore new file mode 100644 index 0000000000..b46162b8ec --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.o +!*_bpf*.o + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/cilium/ebpf/.golangci.yaml b/vendor/github.com/cilium/ebpf/.golangci.yaml new file mode 100644 index 0000000000..dc62dd6d0f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.golangci.yaml @@ -0,0 +1,28 @@ +--- +issues: + exclude-rules: + # syscall param structs will have unused fields in Go code. + - path: syscall.*.go + linters: + - structcheck + +linters: + disable-all: true + enable: + - deadcode + - errcheck + - goimports + - gosimple + - govet + - ineffassign + - misspell + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + + # Could be enabled later: + # - gocyclo + # - maligned + # - gosec diff --git a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md new file mode 100644 index 0000000000..6cbb31b648 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md @@ -0,0 +1,80 @@ +Architecture of the library +=== + + ELF -> Specifications -> Objects -> Links + +ELF +--- + +BPF is usually produced by using Clang to compile a subset of C. Clang outputs +an ELF file which contains program byte code (aka BPF), but also metadata for +maps used by the program. The metadata follows the conventions set by libbpf +shipped with the kernel. Certain ELF sections have special meaning +and contain structures defined by libbpf. Newer versions of clang emit +additional metadata in BPF Type Format (aka BTF). + +The library aims to be compatible with libbpf so that moving from a C toolchain +to a Go one creates little friction. To that end, the [ELF reader](elf_reader.go) +is tested against the Linux selftests and avoids introducing custom behaviour +if possible. + +The output of the ELF reader is a `CollectionSpec` which encodes +all of the information contained in the ELF in a form that is easy to work with +in Go. + +### BTF + +The BPF Type Format describes more than just the types used by a BPF program. It +includes debug aids like which source line corresponds to which instructions and +what global variables are used. + +[BTF parsing](internal/btf/) lives in a separate internal package since exposing +it would mean an additional maintenance burden, and because the API still +has sharp corners. The most important concept is the `btf.Type` interface, which +also describes things that aren't really types like `.rodata` or `.bss` sections. +`btf.Type`s can form cyclical graphs, which can easily lead to infinite loops if +one is not careful. Hopefully a safe pattern to work with `btf.Type` emerges as +we write more code that deals with it. + +Specifications +--- + +`CollectionSpec`, `ProgramSpec` and `MapSpec` are blueprints for in-kernel +objects and contain everything necessary to execute the relevant `bpf(2)` +syscalls. Since the ELF reader outputs a `CollectionSpec` it's possible to +modify clang-compiled BPF code, for example to rewrite constants. At the same +time the [asm](asm/) package provides an assembler that can be used to generate +`ProgramSpec` on the fly. + +Creating a spec should never require any privileges or be restricted in any way, +for example by only allowing programs in native endianness. This ensures that +the library stays flexible. + +Objects +--- + +`Program` and `Map` are the result of loading specs into the kernel. Sometimes +loading a spec will fail because the kernel is too old, or a feature is not +enabled. There are multiple ways the library deals with that: + +* Fallback: older kernels don't allow naming programs and maps. The library + automatically detects support for names, and omits them during load if + necessary. This works since name is primarily a debug aid. + +* Sentinel error: sometimes it's possible to detect that a feature isn't available. + In that case the library will return an error wrapping `ErrNotSupported`. + This is also useful to skip tests that can't run on the current kernel. + +Once program and map objects are loaded they expose the kernel's low-level API, +e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer +wrappers on top of the low-level API, like `MapIterator`. The low-level API is +useful when our higher-level API doesn't support a particular use case. + +Links +--- + +BPF can be attached to many different points in the kernel and newer BPF hooks +tend to use bpf_link to do so. Older hooks unfortunately use a combination of +syscalls, netlink messages, etc. Adding support for a new link type should not +pull in large dependencies like netlink, so XDP programs or tracepoints are +out of scope. diff --git a/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..8e42838c5a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at nathanjsweet at gmail dot com or i at lmb dot io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md new file mode 100644 index 0000000000..0d29eae81e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# How to contribute + +Development is on [GitHub](https://github.com/cilium/ebpf) and contributions in +the form of pull requests and issues reporting bugs or suggesting new features +are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get +a better understanding for the high-level goals. + +New features must be accompanied by tests. Before starting work on any large +feature, please [join](https://ebpf.io/slack) the +[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack to +discuss the design first. + +When submitting pull requests, consider writing details about what problem you +are solving and why the proposed approach solves that problem in commit messages +and/or pull request description to help future library users and maintainers to +reason about the proposed changes. + +## Running the tests + +Many of the tests require privileges to set resource limits and load eBPF code. +The easiest way to obtain these is to run the tests with `sudo`. + +To test the current package with your local kernel you can simply run: +``` +go test -exec sudo ./... +``` + +To test the current package with a different kernel version you can use the [run-tests.sh](run-tests.sh) script. +It requires [virtme](https://github.com/amluto/virtme) and qemu to be installed. + +Examples: + +```bash +# Run all tests on a 5.4 kernel +./run-tests.sh 5.4 + +# Run a subset of tests: +./run-tests.sh 5.4 go test ./link +``` + diff --git a/vendor/github.com/cilium/ebpf/LICENSE b/vendor/github.com/cilium/ebpf/LICENSE new file mode 100644 index 0000000000..c637ae99c2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/LICENSE @@ -0,0 +1,23 @@ +MIT License + +Copyright (c) 2017 Nathan Sweet +Copyright (c) 2018, 2019 Cloudflare +Copyright (c) 2019 Authors of Cilium + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile new file mode 100644 index 0000000000..0bc15c0810 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/Makefile @@ -0,0 +1,73 @@ +# The development version of clang is distributed as the 'clang' binary, +# while stable/released versions have a version number attached. +# Pin the default clang to a stable version. +CLANG ?= clang-12 +CFLAGS := -target bpf -O2 -g -Wall -Werror $(CFLAGS) + +# Obtain an absolute path to the directory of the Makefile. +# Assume the Makefile is in the root of the repository. +REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) +UIDGID := $(shell stat -c '%u:%g' ${REPODIR}) + +IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE) +VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION) + +# clang <8 doesn't tag relocs properly (STT_NOTYPE) +# clang 9 is the first version emitting BTF +TARGETS := \ + testdata/loader-clang-7 \ + testdata/loader-clang-9 \ + testdata/loader-$(CLANG) \ + testdata/btf_map_init \ + testdata/invalid_map \ + testdata/raw_tracepoint \ + testdata/invalid_map_static \ + testdata/invalid_btf_map_init \ + testdata/strings \ + testdata/freplace \ + testdata/iproute2_map_compat \ + internal/btf/testdata/relocs + +.PHONY: all clean docker-all docker-shell + +.DEFAULT_TARGET = docker-all + +# Build all ELF binaries using a Dockerized LLVM toolchain. +docker-all: + docker run --rm --user "${UIDGID}" \ + -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \ + --env CFLAGS="-fdebug-prefix-map=/ebpf=." \ + "${IMAGE}:${VERSION}" \ + make all + +# (debug) Drop the user into a shell inside the Docker container as root. +docker-shell: + docker run --rm -ti \ + -v "${REPODIR}":/ebpf -w /ebpf \ + "${IMAGE}:${VERSION}" + +clean: + -$(RM) testdata/*.elf + -$(RM) internal/btf/testdata/*.elf + +all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) + ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf + ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf + +testdata/loader-%-el.elf: testdata/loader.c + $* $(CFLAGS) -mlittle-endian -c $< -o $@ + +testdata/loader-%-eb.elf: testdata/loader.c + $* $(CFLAGS) -mbig-endian -c $< -o $@ + +%-el.elf: %.c + $(CLANG) $(CFLAGS) -mlittle-endian -c $< -o $@ + +%-eb.elf : %.c + $(CLANG) $(CFLAGS) -mbig-endian -c $< -o $@ + +# Usage: make VMLINUX=/path/to/vmlinux vmlinux-btf +.PHONY: vmlinux-btf +vmlinux-btf: internal/btf/testdata/vmlinux-btf.gz +internal/btf/testdata/vmlinux-btf.gz: $(VMLINUX) + objcopy --dump-section .BTF=/dev/stdout "$<" /dev/null | gzip > "$@" diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md new file mode 100644 index 0000000000..01e2fff92b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/README.md @@ -0,0 +1,70 @@ +# eBPF + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf) + +![HoneyGopher](.github/images/cilium-ebpf.png) + +eBPF is a pure Go library that provides utilities for loading, compiling, and +debugging eBPF programs. It has minimal external dependencies and is intended to +be used in long running processes. + +The library is maintained by [Cloudflare](https://www.cloudflare.com) and +[Cilium](https://www.cilium.io). + +See [ebpf.io](https://ebpf.io) for other projects from the eBPF ecosystem. + +## Getting Started + +A small collection of Go and eBPF programs that serve as examples for building +your own tools can be found under [examples/](examples/). + +Contributions are highly encouraged, as they highlight certain use cases of +eBPF and the library, and help shape the future of the project. + +## Getting Help + +Please +[join](https://ebpf.io/slack) the +[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you +have questions regarding the library. + +## Packages + +This library includes the following packages: + +* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic + assembler, allowing you to write eBPF assembly instructions directly + within your Go code. (You don't need to use this if you prefer to write your eBPF program in C.) +* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows + compiling and embedding eBPF programs written in C within Go code. As well as + compiling the C code, it auto-generates Go code for loading and manipulating + the eBPF program and map objects. +* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF + to various hooks +* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a + `PERF_EVENT_ARRAY` +* [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a + `BPF_MAP_TYPE_RINGBUF` map + + +## Requirements + +* A version of Go that is [supported by + upstream](https://golang.org/doc/devel/release.html#policy) +* Linux >= 4.9. CI is run against LTS releases. + +## Regenerating Testdata + +Run `make` in the root of this repository to rebuild testdata in all +subpackages. This requires Docker, as it relies on a standardized build +environment to keep the build output stable. + +The toolchain image build files are kept in [testdata/docker/](testdata/docker/). + +## License + +MIT + +### eBPF Gopher + +The eBPF honeygopher is based on the Go gopher designed by Renee French. diff --git a/vendor/github.com/cilium/ebpf/asm/alu.go b/vendor/github.com/cilium/ebpf/asm/alu.go new file mode 100644 index 0000000000..70ccc4d151 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/alu.go @@ -0,0 +1,149 @@ +package asm + +//go:generate stringer -output alu_string.go -type=Source,Endianness,ALUOp + +// Source of ALU / ALU64 / Branch operations +// +// msb lsb +// +----+-+---+ +// |op |S|cls| +// +----+-+---+ +type Source uint8 + +const sourceMask OpCode = 0x08 + +// Source bitmask +const ( + // InvalidSource is returned by getters when invoked + // on non ALU / branch OpCodes. + InvalidSource Source = 0xff + // ImmSource src is from constant + ImmSource Source = 0x00 + // RegSource src is from register + RegSource Source = 0x08 +) + +// The Endianness of a byte swap instruction. +type Endianness uint8 + +const endianMask = sourceMask + +// Endian flags +const ( + InvalidEndian Endianness = 0xff + // Convert to little endian + LE Endianness = 0x00 + // Convert to big endian + BE Endianness = 0x08 +) + +// ALUOp are ALU / ALU64 operations +// +// msb lsb +// +----+-+---+ +// |OP |s|cls| +// +----+-+---+ +type ALUOp uint8 + +const aluMask OpCode = 0xf0 + +const ( + // InvalidALUOp is returned by getters when invoked + // on non ALU OpCodes + InvalidALUOp ALUOp = 0xff + // Add - addition + Add ALUOp = 0x00 + // Sub - subtraction + Sub ALUOp = 0x10 + // Mul - multiplication + Mul ALUOp = 0x20 + // Div - division + Div ALUOp = 0x30 + // Or - bitwise or + Or ALUOp = 0x40 + // And - bitwise and + And ALUOp = 0x50 + // LSh - bitwise shift left + LSh ALUOp = 0x60 + // RSh - bitwise shift right + RSh ALUOp = 0x70 + // Neg - sign/unsign signing bit + Neg ALUOp = 0x80 + // Mod - modulo + Mod ALUOp = 0x90 + // Xor - bitwise xor + Xor ALUOp = 0xa0 + // Mov - move value from one place to another + Mov ALUOp = 0xb0 + // ArSh - arithmatic shift + ArSh ALUOp = 0xc0 + // Swap - endian conversions + Swap ALUOp = 0xd0 +) + +// HostTo converts from host to another endianness. +func HostTo(endian Endianness, dst Register, size Size) Instruction { + var imm int64 + switch size { + case Half: + imm = 16 + case Word: + imm = 32 + case DWord: + imm = 64 + default: + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(ALUClass).SetALUOp(Swap).SetSource(Source(endian)), + Dst: dst, + Constant: imm, + } +} + +// Op returns the OpCode for an ALU operation with a given source. +func (op ALUOp) Op(source Source) OpCode { + return OpCode(ALU64Class).SetALUOp(op).SetSource(source) +} + +// Reg emits `dst (op) src`. +func (op ALUOp) Reg(dst, src Register) Instruction { + return Instruction{ + OpCode: op.Op(RegSource), + Dst: dst, + Src: src, + } +} + +// Imm emits `dst (op) value`. +func (op ALUOp) Imm(dst Register, value int32) Instruction { + return Instruction{ + OpCode: op.Op(ImmSource), + Dst: dst, + Constant: int64(value), + } +} + +// Op32 returns the OpCode for a 32-bit ALU operation with a given source. +func (op ALUOp) Op32(source Source) OpCode { + return OpCode(ALUClass).SetALUOp(op).SetSource(source) +} + +// Reg32 emits `dst (op) src`, zeroing the upper 32 bit of dst. +func (op ALUOp) Reg32(dst, src Register) Instruction { + return Instruction{ + OpCode: op.Op32(RegSource), + Dst: dst, + Src: src, + } +} + +// Imm32 emits `dst (op) value`, zeroing the upper 32 bit of dst. +func (op ALUOp) Imm32(dst Register, value int32) Instruction { + return Instruction{ + OpCode: op.Op32(ImmSource), + Dst: dst, + Constant: int64(value), + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/alu_string.go b/vendor/github.com/cilium/ebpf/asm/alu_string.go new file mode 100644 index 0000000000..72d3fe6292 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/alu_string.go @@ -0,0 +1,107 @@ +// Code generated by "stringer -output alu_string.go -type=Source,Endianness,ALUOp"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSource-255] + _ = x[ImmSource-0] + _ = x[RegSource-8] +} + +const ( + _Source_name_0 = "ImmSource" + _Source_name_1 = "RegSource" + _Source_name_2 = "InvalidSource" +) + +func (i Source) String() string { + switch { + case i == 0: + return _Source_name_0 + case i == 8: + return _Source_name_1 + case i == 255: + return _Source_name_2 + default: + return "Source(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidEndian-255] + _ = x[LE-0] + _ = x[BE-8] +} + +const ( + _Endianness_name_0 = "LE" + _Endianness_name_1 = "BE" + _Endianness_name_2 = "InvalidEndian" +) + +func (i Endianness) String() string { + switch { + case i == 0: + return _Endianness_name_0 + case i == 8: + return _Endianness_name_1 + case i == 255: + return _Endianness_name_2 + default: + return "Endianness(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidALUOp-255] + _ = x[Add-0] + _ = x[Sub-16] + _ = x[Mul-32] + _ = x[Div-48] + _ = x[Or-64] + _ = x[And-80] + _ = x[LSh-96] + _ = x[RSh-112] + _ = x[Neg-128] + _ = x[Mod-144] + _ = x[Xor-160] + _ = x[Mov-176] + _ = x[ArSh-192] + _ = x[Swap-208] +} + +const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapInvalidALUOp" + +var _ALUOp_map = map[ALUOp]string{ + 0: _ALUOp_name[0:3], + 16: _ALUOp_name[3:6], + 32: _ALUOp_name[6:9], + 48: _ALUOp_name[9:12], + 64: _ALUOp_name[12:14], + 80: _ALUOp_name[14:17], + 96: _ALUOp_name[17:20], + 112: _ALUOp_name[20:23], + 128: _ALUOp_name[23:26], + 144: _ALUOp_name[26:29], + 160: _ALUOp_name[29:32], + 176: _ALUOp_name[32:35], + 192: _ALUOp_name[35:39], + 208: _ALUOp_name[39:43], + 255: _ALUOp_name[43:55], +} + +func (i ALUOp) String() string { + if str, ok := _ALUOp_map[i]; ok { + return str + } + return "ALUOp(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/cilium/ebpf/asm/doc.go b/vendor/github.com/cilium/ebpf/asm/doc.go new file mode 100644 index 0000000000..7031bdc276 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/doc.go @@ -0,0 +1,2 @@ +// Package asm is an assembler for eBPF bytecode. +package asm diff --git a/vendor/github.com/cilium/ebpf/asm/func.go b/vendor/github.com/cilium/ebpf/asm/func.go new file mode 100644 index 0000000000..bfa5d59c97 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/func.go @@ -0,0 +1,201 @@ +package asm + +//go:generate stringer -output func_string.go -type=BuiltinFunc + +// BuiltinFunc is a built-in eBPF function. +type BuiltinFunc int32 + +// eBPF built-in functions +// +// You can regenerate this list using the following gawk script: +// +// /FN\(.+\),/ { +// match($1, /\((.+)\)/, r) +// split(r[1], p, "_") +// printf "Fn" +// for (i in p) { +// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2) +// } +// print "" +// } +// +// The script expects include/uapi/linux/bpf.h as it's input. +const ( + FnUnspec BuiltinFunc = iota + FnMapLookupElem + FnMapUpdateElem + FnMapDeleteElem + FnProbeRead + FnKtimeGetNs + FnTracePrintk + FnGetPrandomU32 + FnGetSmpProcessorId + FnSkbStoreBytes + FnL3CsumReplace + FnL4CsumReplace + FnTailCall + FnCloneRedirect + FnGetCurrentPidTgid + FnGetCurrentUidGid + FnGetCurrentComm + FnGetCgroupClassid + FnSkbVlanPush + FnSkbVlanPop + FnSkbGetTunnelKey + FnSkbSetTunnelKey + FnPerfEventRead + FnRedirect + FnGetRouteRealm + FnPerfEventOutput + FnSkbLoadBytes + FnGetStackid + FnCsumDiff + FnSkbGetTunnelOpt + FnSkbSetTunnelOpt + FnSkbChangeProto + FnSkbChangeType + FnSkbUnderCgroup + FnGetHashRecalc + FnGetCurrentTask + FnProbeWriteUser + FnCurrentTaskUnderCgroup + FnSkbChangeTail + FnSkbPullData + FnCsumUpdate + FnSetHashInvalid + FnGetNumaNodeId + FnSkbChangeHead + FnXdpAdjustHead + FnProbeReadStr + FnGetSocketCookie + FnGetSocketUid + FnSetHash + FnSetsockopt + FnSkbAdjustRoom + FnRedirectMap + FnSkRedirectMap + FnSockMapUpdate + FnXdpAdjustMeta + FnPerfEventReadValue + FnPerfProgReadValue + FnGetsockopt + FnOverrideReturn + FnSockOpsCbFlagsSet + FnMsgRedirectMap + FnMsgApplyBytes + FnMsgCorkBytes + FnMsgPullData + FnBind + FnXdpAdjustTail + FnSkbGetXfrmState + FnGetStack + FnSkbLoadBytesRelative + FnFibLookup + FnSockHashUpdate + FnMsgRedirectHash + FnSkRedirectHash + FnLwtPushEncap + FnLwtSeg6StoreBytes + FnLwtSeg6AdjustSrh + FnLwtSeg6Action + FnRcRepeat + FnRcKeydown + FnSkbCgroupId + FnGetCurrentCgroupId + FnGetLocalStorage + FnSkSelectReuseport + FnSkbAncestorCgroupId + FnSkLookupTcp + FnSkLookupUdp + FnSkRelease + FnMapPushElem + FnMapPopElem + FnMapPeekElem + FnMsgPushData + FnMsgPopData + FnRcPointerRel + FnSpinLock + FnSpinUnlock + FnSkFullsock + FnTcpSock + FnSkbEcnSetCe + FnGetListenerSock + FnSkcLookupTcp + FnTcpCheckSyncookie + FnSysctlGetName + FnSysctlGetCurrentValue + FnSysctlGetNewValue + FnSysctlSetNewValue + FnStrtol + FnStrtoul + FnSkStorageGet + FnSkStorageDelete + FnSendSignal + FnTcpGenSyncookie + FnSkbOutput + FnProbeReadUser + FnProbeReadKernel + FnProbeReadUserStr + FnProbeReadKernelStr + FnTcpSendAck + FnSendSignalThread + FnJiffies64 + FnReadBranchRecords + FnGetNsCurrentPidTgid + FnXdpOutput + FnGetNetnsCookie + FnGetCurrentAncestorCgroupId + FnSkAssign + FnKtimeGetBootNs + FnSeqPrintf + FnSeqWrite + FnSkCgroupId + FnSkAncestorCgroupId + FnRingbufOutput + FnRingbufReserve + FnRingbufSubmit + FnRingbufDiscard + FnRingbufQuery + FnCsumLevel + FnSkcToTcp6Sock + FnSkcToTcpSock + FnSkcToTcpTimewaitSock + FnSkcToTcpRequestSock + FnSkcToUdp6Sock + FnGetTaskStack + FnLoadHdrOpt + FnStoreHdrOpt + FnReserveHdrOpt + FnInodeStorageGet + FnInodeStorageDelete + FnDPath + FnCopyFromUser + FnSnprintfBtf + FnSeqPrintfBtf + FnSkbCgroupClassid + FnRedirectNeigh + FnPerCpuPtr + FnThisCpuPtr + FnRedirectPeer + FnTaskStorageGet + FnTaskStorageDelete + FnGetCurrentTaskBtf + FnBprmOptsSet + FnKtimeGetCoarseNs + FnImaInodeHash + FnSockFromFile + FnCheckMtu + FnForEachMapElem + FnSnprintf + FnSysBpf + FnBtfFindByNameKind + FnSysClose +) + +// Call emits a function call. +func (fn BuiltinFunc) Call() Instruction { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Constant: int64(fn), + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go new file mode 100644 index 0000000000..5a0e333639 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/func_string.go @@ -0,0 +1,191 @@ +// Code generated by "stringer -output func_string.go -type=BuiltinFunc"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[FnUnspec-0] + _ = x[FnMapLookupElem-1] + _ = x[FnMapUpdateElem-2] + _ = x[FnMapDeleteElem-3] + _ = x[FnProbeRead-4] + _ = x[FnKtimeGetNs-5] + _ = x[FnTracePrintk-6] + _ = x[FnGetPrandomU32-7] + _ = x[FnGetSmpProcessorId-8] + _ = x[FnSkbStoreBytes-9] + _ = x[FnL3CsumReplace-10] + _ = x[FnL4CsumReplace-11] + _ = x[FnTailCall-12] + _ = x[FnCloneRedirect-13] + _ = x[FnGetCurrentPidTgid-14] + _ = x[FnGetCurrentUidGid-15] + _ = x[FnGetCurrentComm-16] + _ = x[FnGetCgroupClassid-17] + _ = x[FnSkbVlanPush-18] + _ = x[FnSkbVlanPop-19] + _ = x[FnSkbGetTunnelKey-20] + _ = x[FnSkbSetTunnelKey-21] + _ = x[FnPerfEventRead-22] + _ = x[FnRedirect-23] + _ = x[FnGetRouteRealm-24] + _ = x[FnPerfEventOutput-25] + _ = x[FnSkbLoadBytes-26] + _ = x[FnGetStackid-27] + _ = x[FnCsumDiff-28] + _ = x[FnSkbGetTunnelOpt-29] + _ = x[FnSkbSetTunnelOpt-30] + _ = x[FnSkbChangeProto-31] + _ = x[FnSkbChangeType-32] + _ = x[FnSkbUnderCgroup-33] + _ = x[FnGetHashRecalc-34] + _ = x[FnGetCurrentTask-35] + _ = x[FnProbeWriteUser-36] + _ = x[FnCurrentTaskUnderCgroup-37] + _ = x[FnSkbChangeTail-38] + _ = x[FnSkbPullData-39] + _ = x[FnCsumUpdate-40] + _ = x[FnSetHashInvalid-41] + _ = x[FnGetNumaNodeId-42] + _ = x[FnSkbChangeHead-43] + _ = x[FnXdpAdjustHead-44] + _ = x[FnProbeReadStr-45] + _ = x[FnGetSocketCookie-46] + _ = x[FnGetSocketUid-47] + _ = x[FnSetHash-48] + _ = x[FnSetsockopt-49] + _ = x[FnSkbAdjustRoom-50] + _ = x[FnRedirectMap-51] + _ = x[FnSkRedirectMap-52] + _ = x[FnSockMapUpdate-53] + _ = x[FnXdpAdjustMeta-54] + _ = x[FnPerfEventReadValue-55] + _ = x[FnPerfProgReadValue-56] + _ = x[FnGetsockopt-57] + _ = x[FnOverrideReturn-58] + _ = x[FnSockOpsCbFlagsSet-59] + _ = x[FnMsgRedirectMap-60] + _ = x[FnMsgApplyBytes-61] + _ = x[FnMsgCorkBytes-62] + _ = x[FnMsgPullData-63] + _ = x[FnBind-64] + _ = x[FnXdpAdjustTail-65] + _ = x[FnSkbGetXfrmState-66] + _ = x[FnGetStack-67] + _ = x[FnSkbLoadBytesRelative-68] + _ = x[FnFibLookup-69] + _ = x[FnSockHashUpdate-70] + _ = x[FnMsgRedirectHash-71] + _ = x[FnSkRedirectHash-72] + _ = x[FnLwtPushEncap-73] + _ = x[FnLwtSeg6StoreBytes-74] + _ = x[FnLwtSeg6AdjustSrh-75] + _ = x[FnLwtSeg6Action-76] + _ = x[FnRcRepeat-77] + _ = x[FnRcKeydown-78] + _ = x[FnSkbCgroupId-79] + _ = x[FnGetCurrentCgroupId-80] + _ = x[FnGetLocalStorage-81] + _ = x[FnSkSelectReuseport-82] + _ = x[FnSkbAncestorCgroupId-83] + _ = x[FnSkLookupTcp-84] + _ = x[FnSkLookupUdp-85] + _ = x[FnSkRelease-86] + _ = x[FnMapPushElem-87] + _ = x[FnMapPopElem-88] + _ = x[FnMapPeekElem-89] + _ = x[FnMsgPushData-90] + _ = x[FnMsgPopData-91] + _ = x[FnRcPointerRel-92] + _ = x[FnSpinLock-93] + _ = x[FnSpinUnlock-94] + _ = x[FnSkFullsock-95] + _ = x[FnTcpSock-96] + _ = x[FnSkbEcnSetCe-97] + _ = x[FnGetListenerSock-98] + _ = x[FnSkcLookupTcp-99] + _ = x[FnTcpCheckSyncookie-100] + _ = x[FnSysctlGetName-101] + _ = x[FnSysctlGetCurrentValue-102] + _ = x[FnSysctlGetNewValue-103] + _ = x[FnSysctlSetNewValue-104] + _ = x[FnStrtol-105] + _ = x[FnStrtoul-106] + _ = x[FnSkStorageGet-107] + _ = x[FnSkStorageDelete-108] + _ = x[FnSendSignal-109] + _ = x[FnTcpGenSyncookie-110] + _ = x[FnSkbOutput-111] + _ = x[FnProbeReadUser-112] + _ = x[FnProbeReadKernel-113] + _ = x[FnProbeReadUserStr-114] + _ = x[FnProbeReadKernelStr-115] + _ = x[FnTcpSendAck-116] + _ = x[FnSendSignalThread-117] + _ = x[FnJiffies64-118] + _ = x[FnReadBranchRecords-119] + _ = x[FnGetNsCurrentPidTgid-120] + _ = x[FnXdpOutput-121] + _ = x[FnGetNetnsCookie-122] + _ = x[FnGetCurrentAncestorCgroupId-123] + _ = x[FnSkAssign-124] + _ = x[FnKtimeGetBootNs-125] + _ = x[FnSeqPrintf-126] + _ = x[FnSeqWrite-127] + _ = x[FnSkCgroupId-128] + _ = x[FnSkAncestorCgroupId-129] + _ = x[FnRingbufOutput-130] + _ = x[FnRingbufReserve-131] + _ = x[FnRingbufSubmit-132] + _ = x[FnRingbufDiscard-133] + _ = x[FnRingbufQuery-134] + _ = x[FnCsumLevel-135] + _ = x[FnSkcToTcp6Sock-136] + _ = x[FnSkcToTcpSock-137] + _ = x[FnSkcToTcpTimewaitSock-138] + _ = x[FnSkcToTcpRequestSock-139] + _ = x[FnSkcToUdp6Sock-140] + _ = x[FnGetTaskStack-141] + _ = x[FnLoadHdrOpt-142] + _ = x[FnStoreHdrOpt-143] + _ = x[FnReserveHdrOpt-144] + _ = x[FnInodeStorageGet-145] + _ = x[FnInodeStorageDelete-146] + _ = x[FnDPath-147] + _ = x[FnCopyFromUser-148] + _ = x[FnSnprintfBtf-149] + _ = x[FnSeqPrintfBtf-150] + _ = x[FnSkbCgroupClassid-151] + _ = x[FnRedirectNeigh-152] + _ = x[FnPerCpuPtr-153] + _ = x[FnThisCpuPtr-154] + _ = x[FnRedirectPeer-155] + _ = x[FnTaskStorageGet-156] + _ = x[FnTaskStorageDelete-157] + _ = x[FnGetCurrentTaskBtf-158] + _ = x[FnBprmOptsSet-159] + _ = x[FnKtimeGetCoarseNs-160] + _ = x[FnImaInodeHash-161] + _ = x[FnSockFromFile-162] + _ = x[FnCheckMtu-163] + _ = x[FnForEachMapElem-164] + _ = x[FnSnprintf-165] + _ = x[FnSysBpf-166] + _ = x[FnBtfFindByNameKind-167] + _ = x[FnSysClose-168] +} + +const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysClose" + +var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497} + +func (i BuiltinFunc) String() string { + if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) { + return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _BuiltinFunc_name[_BuiltinFunc_index[i]:_BuiltinFunc_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go new file mode 100644 index 0000000000..64d717d156 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -0,0 +1,511 @@ +package asm + +import ( + "crypto/sha1" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "strings" + + "github.com/cilium/ebpf/internal/unix" +) + +// InstructionSize is the size of a BPF instruction in bytes +const InstructionSize = 8 + +// RawInstructionOffset is an offset in units of raw BPF instructions. +type RawInstructionOffset uint64 + +// Bytes returns the offset of an instruction in bytes. +func (rio RawInstructionOffset) Bytes() uint64 { + return uint64(rio) * InstructionSize +} + +// Instruction is a single eBPF instruction. +type Instruction struct { + OpCode OpCode + Dst Register + Src Register + Offset int16 + Constant int64 + Reference string + Symbol string +} + +// Sym creates a symbol. +func (ins Instruction) Sym(name string) Instruction { + ins.Symbol = name + return ins +} + +// Unmarshal decodes a BPF instruction. +func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) { + var bi bpfInstruction + err := binary.Read(r, bo, &bi) + if err != nil { + return 0, err + } + + ins.OpCode = bi.OpCode + ins.Offset = bi.Offset + ins.Constant = int64(bi.Constant) + ins.Dst, ins.Src, err = bi.Registers.Unmarshal(bo) + if err != nil { + return 0, fmt.Errorf("can't unmarshal registers: %s", err) + } + + if !bi.OpCode.IsDWordLoad() { + return InstructionSize, nil + } + + var bi2 bpfInstruction + if err := binary.Read(r, bo, &bi2); err != nil { + // No Wrap, to avoid io.EOF clash + return 0, errors.New("64bit immediate is missing second half") + } + if bi2.OpCode != 0 || bi2.Offset != 0 || bi2.Registers != 0 { + return 0, errors.New("64bit immediate has non-zero fields") + } + ins.Constant = int64(uint64(uint32(bi2.Constant))<<32 | uint64(uint32(bi.Constant))) + + return 2 * InstructionSize, nil +} + +// Marshal encodes a BPF instruction. +func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) { + if ins.OpCode == InvalidOpCode { + return 0, errors.New("invalid opcode") + } + + isDWordLoad := ins.OpCode.IsDWordLoad() + + cons := int32(ins.Constant) + if isDWordLoad { + // Encode least significant 32bit first for 64bit operations. + cons = int32(uint32(ins.Constant)) + } + + regs, err := newBPFRegisters(ins.Dst, ins.Src, bo) + if err != nil { + return 0, fmt.Errorf("can't marshal registers: %s", err) + } + + bpfi := bpfInstruction{ + ins.OpCode, + regs, + ins.Offset, + cons, + } + + if err := binary.Write(w, bo, &bpfi); err != nil { + return 0, err + } + + if !isDWordLoad { + return InstructionSize, nil + } + + bpfi = bpfInstruction{ + Constant: int32(ins.Constant >> 32), + } + + if err := binary.Write(w, bo, &bpfi); err != nil { + return 0, err + } + + return 2 * InstructionSize, nil +} + +// RewriteMapPtr changes an instruction to use a new map fd. +// +// Returns an error if the instruction doesn't load a map. +func (ins *Instruction) RewriteMapPtr(fd int) error { + if !ins.OpCode.IsDWordLoad() { + return fmt.Errorf("%s is not a 64 bit load", ins.OpCode) + } + + if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue { + return errors.New("not a load from a map") + } + + // Preserve the offset value for direct map loads. + offset := uint64(ins.Constant) & (math.MaxUint32 << 32) + rawFd := uint64(uint32(fd)) + ins.Constant = int64(offset | rawFd) + return nil +} + +// MapPtr returns the map fd for this instruction. +// +// The result is undefined if the instruction is not a load from a map, +// see IsLoadFromMap. +func (ins *Instruction) MapPtr() int { + return int(int32(uint64(ins.Constant) & math.MaxUint32)) +} + +// RewriteMapOffset changes the offset of a direct load from a map. +// +// Returns an error if the instruction is not a direct load. +func (ins *Instruction) RewriteMapOffset(offset uint32) error { + if !ins.OpCode.IsDWordLoad() { + return fmt.Errorf("%s is not a 64 bit load", ins.OpCode) + } + + if ins.Src != PseudoMapValue { + return errors.New("not a direct load from a map") + } + + fd := uint64(ins.Constant) & math.MaxUint32 + ins.Constant = int64(uint64(offset)<<32 | fd) + return nil +} + +func (ins *Instruction) mapOffset() uint32 { + return uint32(uint64(ins.Constant) >> 32) +} + +// IsLoadFromMap returns true if the instruction loads from a map. +// +// This covers both loading the map pointer and direct map value loads. +func (ins *Instruction) IsLoadFromMap() bool { + return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue) +} + +// IsFunctionCall returns true if the instruction calls another BPF function. +// +// This is not the same thing as a BPF helper call. +func (ins *Instruction) IsFunctionCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall +} + +// IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call. +func (ins *Instruction) IsBuiltinCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0 +} + +// IsConstantLoad returns true if the instruction loads a constant of the +// given size. +func (ins *Instruction) IsConstantLoad(size Size) bool { + return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0 +} + +// Format implements fmt.Formatter. +func (ins Instruction) Format(f fmt.State, c rune) { + if c != 'v' { + fmt.Fprintf(f, "{UNRECOGNIZED: %c}", c) + return + } + + op := ins.OpCode + + if op == InvalidOpCode { + fmt.Fprint(f, "INVALID") + return + } + + // Omit trailing space for Exit + if op.JumpOp() == Exit { + fmt.Fprint(f, op) + return + } + + if ins.IsLoadFromMap() { + fd := ins.MapPtr() + switch ins.Src { + case PseudoMapFD: + fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) + + case PseudoMapValue: + fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) + } + + goto ref + } + + fmt.Fprintf(f, "%v ", op) + switch cls := op.Class(); cls { + case LdClass, LdXClass, StClass, StXClass: + switch op.Mode() { + case ImmMode: + fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant) + case AbsMode: + fmt.Fprintf(f, "imm: %d", ins.Constant) + case IndMode: + fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant) + case MemMode: + fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant) + case XAddMode: + fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src) + } + + case ALU64Class, ALUClass: + fmt.Fprintf(f, "dst: %s ", ins.Dst) + if op.ALUOp() == Swap || op.Source() == ImmSource { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "src: %s", ins.Src) + } + + case JumpClass: + switch jop := op.JumpOp(); jop { + case Call: + if ins.Src == PseudoCall { + // bpf-to-bpf call + fmt.Fprint(f, ins.Constant) + } else { + fmt.Fprint(f, BuiltinFunc(ins.Constant)) + } + + default: + fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset) + if op.Source() == ImmSource { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "src: %s", ins.Src) + } + } + } + +ref: + if ins.Reference != "" { + fmt.Fprintf(f, " <%s>", ins.Reference) + } +} + +// Instructions is an eBPF program. +type Instructions []Instruction + +func (insns Instructions) String() string { + return fmt.Sprint(insns) +} + +// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. +// +// Returns an error if the symbol isn't used, see IsUnreferencedSymbol. +func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { + if symbol == "" { + return errors.New("empty symbol") + } + + found := false + for i := range insns { + ins := &insns[i] + if ins.Reference != symbol { + continue + } + + if err := ins.RewriteMapPtr(fd); err != nil { + return err + } + + found = true + } + + if !found { + return &unreferencedSymbolError{symbol} + } + + return nil +} + +// SymbolOffsets returns the set of symbols and their offset in +// the instructions. +func (insns Instructions) SymbolOffsets() (map[string]int, error) { + offsets := make(map[string]int) + + for i, ins := range insns { + if ins.Symbol == "" { + continue + } + + if _, ok := offsets[ins.Symbol]; ok { + return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol) + } + + offsets[ins.Symbol] = i + } + + return offsets, nil +} + +// ReferenceOffsets returns the set of references and their offset in +// the instructions. +func (insns Instructions) ReferenceOffsets() map[string][]int { + offsets := make(map[string][]int) + + for i, ins := range insns { + if ins.Reference == "" { + continue + } + + offsets[ins.Reference] = append(offsets[ins.Reference], i) + } + + return offsets +} + +// Format implements fmt.Formatter. +// +// You can control indentation of symbols by +// specifying a width. Setting a precision controls the indentation of +// instructions. +// The default character is a tab, which can be overridden by specifying +// the ' ' space flag. +func (insns Instructions) Format(f fmt.State, c rune) { + if c != 's' && c != 'v' { + fmt.Fprintf(f, "{UNKNOWN FORMAT '%c'}", c) + return + } + + // Precision is better in this case, because it allows + // specifying 0 padding easily. + padding, ok := f.Precision() + if !ok { + padding = 1 + } + + indent := strings.Repeat("\t", padding) + if f.Flag(' ') { + indent = strings.Repeat(" ", padding) + } + + symPadding, ok := f.Width() + if !ok { + symPadding = padding - 1 + } + if symPadding < 0 { + symPadding = 0 + } + + symIndent := strings.Repeat("\t", symPadding) + if f.Flag(' ') { + symIndent = strings.Repeat(" ", symPadding) + } + + // Guess how many digits we need at most, by assuming that all instructions + // are double wide. + highestOffset := len(insns) * 2 + offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset)))) + + iter := insns.Iterate() + for iter.Next() { + if iter.Ins.Symbol != "" { + fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol) + } + fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins) + } +} + +// Marshal encodes a BPF program into the kernel format. +func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error { + for i, ins := range insns { + _, err := ins.Marshal(w, bo) + if err != nil { + return fmt.Errorf("instruction %d: %w", i, err) + } + } + return nil +} + +// Tag calculates the kernel tag for a series of instructions. +// +// It mirrors bpf_prog_calc_tag in the kernel and so can be compared +// to ProgramInfo.Tag to figure out whether a loaded program matches +// certain instructions. +func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) { + h := sha1.New() + for i, ins := range insns { + if ins.IsLoadFromMap() { + ins.Constant = 0 + } + _, err := ins.Marshal(h, bo) + if err != nil { + return "", fmt.Errorf("instruction %d: %w", i, err) + } + } + return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil +} + +// Iterate allows iterating a BPF program while keeping track of +// various offsets. +// +// Modifying the instruction slice will lead to undefined behaviour. +func (insns Instructions) Iterate() *InstructionIterator { + return &InstructionIterator{insns: insns} +} + +// InstructionIterator iterates over a BPF program. +type InstructionIterator struct { + insns Instructions + // The instruction in question. + Ins *Instruction + // The index of the instruction in the original instruction slice. + Index int + // The offset of the instruction in raw BPF instructions. This accounts + // for double-wide instructions. + Offset RawInstructionOffset +} + +// Next returns true as long as there are any instructions remaining. +func (iter *InstructionIterator) Next() bool { + if len(iter.insns) == 0 { + return false + } + + if iter.Ins != nil { + iter.Index++ + iter.Offset += RawInstructionOffset(iter.Ins.OpCode.rawInstructions()) + } + iter.Ins = &iter.insns[0] + iter.insns = iter.insns[1:] + return true +} + +type bpfInstruction struct { + OpCode OpCode + Registers bpfRegisters + Offset int16 + Constant int32 +} + +type bpfRegisters uint8 + +func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) { + switch bo { + case binary.LittleEndian: + return bpfRegisters((src << 4) | (dst & 0xF)), nil + case binary.BigEndian: + return bpfRegisters((dst << 4) | (src & 0xF)), nil + default: + return 0, fmt.Errorf("unrecognized ByteOrder %T", bo) + } +} + +func (r bpfRegisters) Unmarshal(bo binary.ByteOrder) (dst, src Register, err error) { + switch bo { + case binary.LittleEndian: + return Register(r & 0xF), Register(r >> 4), nil + case binary.BigEndian: + return Register(r >> 4), Register(r & 0xf), nil + default: + return 0, 0, fmt.Errorf("unrecognized ByteOrder %T", bo) + } +} + +type unreferencedSymbolError struct { + symbol string +} + +func (use *unreferencedSymbolError) Error() string { + return fmt.Sprintf("unreferenced symbol %s", use.symbol) +} + +// IsUnreferencedSymbol returns true if err was caused by +// an unreferenced symbol. +func IsUnreferencedSymbol(err error) bool { + _, ok := err.(*unreferencedSymbolError) + return ok +} diff --git a/vendor/github.com/cilium/ebpf/asm/jump.go b/vendor/github.com/cilium/ebpf/asm/jump.go new file mode 100644 index 0000000000..7757179de6 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/jump.go @@ -0,0 +1,109 @@ +package asm + +//go:generate stringer -output jump_string.go -type=JumpOp + +// JumpOp affect control flow. +// +// msb lsb +// +----+-+---+ +// |OP |s|cls| +// +----+-+---+ +type JumpOp uint8 + +const jumpMask OpCode = aluMask + +const ( + // InvalidJumpOp is returned by getters when invoked + // on non branch OpCodes + InvalidJumpOp JumpOp = 0xff + // Ja jumps by offset unconditionally + Ja JumpOp = 0x00 + // JEq jumps by offset if r == imm + JEq JumpOp = 0x10 + // JGT jumps by offset if r > imm + JGT JumpOp = 0x20 + // JGE jumps by offset if r >= imm + JGE JumpOp = 0x30 + // JSet jumps by offset if r & imm + JSet JumpOp = 0x40 + // JNE jumps by offset if r != imm + JNE JumpOp = 0x50 + // JSGT jumps by offset if signed r > signed imm + JSGT JumpOp = 0x60 + // JSGE jumps by offset if signed r >= signed imm + JSGE JumpOp = 0x70 + // Call builtin or user defined function from imm + Call JumpOp = 0x80 + // Exit ends execution, with value in r0 + Exit JumpOp = 0x90 + // JLT jumps by offset if r < imm + JLT JumpOp = 0xa0 + // JLE jumps by offset if r <= imm + JLE JumpOp = 0xb0 + // JSLT jumps by offset if signed r < signed imm + JSLT JumpOp = 0xc0 + // JSLE jumps by offset if signed r <= signed imm + JSLE JumpOp = 0xd0 +) + +// Return emits an exit instruction. +// +// Requires a return value in R0. +func Return() Instruction { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Exit), + } +} + +// Op returns the OpCode for a given jump source. +func (op JumpOp) Op(source Source) OpCode { + return OpCode(JumpClass).SetJumpOp(op).SetSource(source) +} + +// Imm compares dst to value, and adjusts PC by offset if the condition is fulfilled. +func (op JumpOp) Imm(dst Register, value int32, label string) Instruction { + if op == Exit || op == Call || op == Ja { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + Reference: label, + } +} + +// Reg compares dst to src, and adjusts PC by offset if the condition is fulfilled. +func (op JumpOp) Reg(dst, src Register, label string) Instruction { + if op == Exit || op == Call || op == Ja { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(RegSource), + Dst: dst, + Src: src, + Offset: -1, + Reference: label, + } +} + +// Label adjusts PC to the address of the label. +func (op JumpOp) Label(label string) Instruction { + if op == Call { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Src: PseudoCall, + Constant: -1, + Reference: label, + } + } + + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(op), + Offset: -1, + Reference: label, + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/jump_string.go b/vendor/github.com/cilium/ebpf/asm/jump_string.go new file mode 100644 index 0000000000..85a4aaffa5 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/jump_string.go @@ -0,0 +1,53 @@ +// Code generated by "stringer -output jump_string.go -type=JumpOp"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidJumpOp-255] + _ = x[Ja-0] + _ = x[JEq-16] + _ = x[JGT-32] + _ = x[JGE-48] + _ = x[JSet-64] + _ = x[JNE-80] + _ = x[JSGT-96] + _ = x[JSGE-112] + _ = x[Call-128] + _ = x[Exit-144] + _ = x[JLT-160] + _ = x[JLE-176] + _ = x[JSLT-192] + _ = x[JSLE-208] +} + +const _JumpOp_name = "JaJEqJGTJGEJSetJNEJSGTJSGECallExitJLTJLEJSLTJSLEInvalidJumpOp" + +var _JumpOp_map = map[JumpOp]string{ + 0: _JumpOp_name[0:2], + 16: _JumpOp_name[2:5], + 32: _JumpOp_name[5:8], + 48: _JumpOp_name[8:11], + 64: _JumpOp_name[11:15], + 80: _JumpOp_name[15:18], + 96: _JumpOp_name[18:22], + 112: _JumpOp_name[22:26], + 128: _JumpOp_name[26:30], + 144: _JumpOp_name[30:34], + 160: _JumpOp_name[34:37], + 176: _JumpOp_name[37:40], + 192: _JumpOp_name[40:44], + 208: _JumpOp_name[44:48], + 255: _JumpOp_name[48:61], +} + +func (i JumpOp) String() string { + if str, ok := _JumpOp_map[i]; ok { + return str + } + return "JumpOp(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/cilium/ebpf/asm/load_store.go b/vendor/github.com/cilium/ebpf/asm/load_store.go new file mode 100644 index 0000000000..85ed286b02 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/load_store.go @@ -0,0 +1,204 @@ +package asm + +//go:generate stringer -output load_store_string.go -type=Mode,Size + +// Mode for load and store operations +// +// msb lsb +// +---+--+---+ +// |MDE|sz|cls| +// +---+--+---+ +type Mode uint8 + +const modeMask OpCode = 0xe0 + +const ( + // InvalidMode is returned by getters when invoked + // on non load / store OpCodes + InvalidMode Mode = 0xff + // ImmMode - immediate value + ImmMode Mode = 0x00 + // AbsMode - immediate value + offset + AbsMode Mode = 0x20 + // IndMode - indirect (imm+src) + IndMode Mode = 0x40 + // MemMode - load from memory + MemMode Mode = 0x60 + // XAddMode - add atomically across processors. + XAddMode Mode = 0xc0 +) + +// Size of load and store operations +// +// msb lsb +// +---+--+---+ +// |mde|SZ|cls| +// +---+--+---+ +type Size uint8 + +const sizeMask OpCode = 0x18 + +const ( + // InvalidSize is returned by getters when invoked + // on non load / store OpCodes + InvalidSize Size = 0xff + // DWord - double word; 64 bits + DWord Size = 0x18 + // Word - word; 32 bits + Word Size = 0x00 + // Half - half-word; 16 bits + Half Size = 0x08 + // Byte - byte; 8 bits + Byte Size = 0x10 +) + +// Sizeof returns the size in bytes. +func (s Size) Sizeof() int { + switch s { + case DWord: + return 8 + case Word: + return 4 + case Half: + return 2 + case Byte: + return 1 + default: + return -1 + } +} + +// LoadMemOp returns the OpCode to load a value of given size from memory. +func LoadMemOp(size Size) OpCode { + return OpCode(LdXClass).SetMode(MemMode).SetSize(size) +} + +// LoadMem emits `dst = *(size *)(src + offset)`. +func LoadMem(dst, src Register, offset int16, size Size) Instruction { + return Instruction{ + OpCode: LoadMemOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// LoadImmOp returns the OpCode to load an immediate of given size. +// +// As of kernel 4.20, only DWord size is accepted. +func LoadImmOp(size Size) OpCode { + return OpCode(LdClass).SetMode(ImmMode).SetSize(size) +} + +// LoadImm emits `dst = (size)value`. +// +// As of kernel 4.20, only DWord size is accepted. +func LoadImm(dst Register, value int64, size Size) Instruction { + return Instruction{ + OpCode: LoadImmOp(size), + Dst: dst, + Constant: value, + } +} + +// LoadMapPtr stores a pointer to a map in dst. +func LoadMapPtr(dst Register, fd int) Instruction { + if fd < 0 { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: LoadImmOp(DWord), + Dst: dst, + Src: PseudoMapFD, + Constant: int64(uint32(fd)), + } +} + +// LoadMapValue stores a pointer to the value at a certain offset of a map. +func LoadMapValue(dst Register, fd int, offset uint32) Instruction { + if fd < 0 { + return Instruction{OpCode: InvalidOpCode} + } + + fdAndOffset := (uint64(offset) << 32) | uint64(uint32(fd)) + return Instruction{ + OpCode: LoadImmOp(DWord), + Dst: dst, + Src: PseudoMapValue, + Constant: int64(fdAndOffset), + } +} + +// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff. +func LoadIndOp(size Size) OpCode { + return OpCode(LdClass).SetMode(IndMode).SetSize(size) +} + +// LoadInd emits `dst = ntoh(*(size *)(((sk_buff *)R6)->data + src + offset))`. +func LoadInd(dst, src Register, offset int32, size Size) Instruction { + return Instruction{ + OpCode: LoadIndOp(size), + Dst: dst, + Src: src, + Constant: int64(offset), + } +} + +// LoadAbsOp returns the OpCode for loading a value of given size from an sk_buff. +func LoadAbsOp(size Size) OpCode { + return OpCode(LdClass).SetMode(AbsMode).SetSize(size) +} + +// LoadAbs emits `r0 = ntoh(*(size *)(((sk_buff *)R6)->data + offset))`. +func LoadAbs(offset int32, size Size) Instruction { + return Instruction{ + OpCode: LoadAbsOp(size), + Dst: R0, + Constant: int64(offset), + } +} + +// StoreMemOp returns the OpCode for storing a register of given size in memory. +func StoreMemOp(size Size) OpCode { + return OpCode(StXClass).SetMode(MemMode).SetSize(size) +} + +// StoreMem emits `*(size *)(dst + offset) = src` +func StoreMem(dst Register, offset int16, src Register, size Size) Instruction { + return Instruction{ + OpCode: StoreMemOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// StoreImmOp returns the OpCode for storing an immediate of given size in memory. +func StoreImmOp(size Size) OpCode { + return OpCode(StClass).SetMode(MemMode).SetSize(size) +} + +// StoreImm emits `*(size *)(dst + offset) = value`. +func StoreImm(dst Register, offset int16, value int64, size Size) Instruction { + return Instruction{ + OpCode: StoreImmOp(size), + Dst: dst, + Offset: offset, + Constant: value, + } +} + +// StoreXAddOp returns the OpCode to atomically add a register to a value in memory. +func StoreXAddOp(size Size) OpCode { + return OpCode(StXClass).SetMode(XAddMode).SetSize(size) +} + +// StoreXAdd atomically adds src to *dst. +func StoreXAdd(dst, src Register, size Size) Instruction { + return Instruction{ + OpCode: StoreXAddOp(size), + Dst: dst, + Src: src, + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/load_store_string.go b/vendor/github.com/cilium/ebpf/asm/load_store_string.go new file mode 100644 index 0000000000..76d29a0756 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/load_store_string.go @@ -0,0 +1,80 @@ +// Code generated by "stringer -output load_store_string.go -type=Mode,Size"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidMode-255] + _ = x[ImmMode-0] + _ = x[AbsMode-32] + _ = x[IndMode-64] + _ = x[MemMode-96] + _ = x[XAddMode-192] +} + +const ( + _Mode_name_0 = "ImmMode" + _Mode_name_1 = "AbsMode" + _Mode_name_2 = "IndMode" + _Mode_name_3 = "MemMode" + _Mode_name_4 = "XAddMode" + _Mode_name_5 = "InvalidMode" +) + +func (i Mode) String() string { + switch { + case i == 0: + return _Mode_name_0 + case i == 32: + return _Mode_name_1 + case i == 64: + return _Mode_name_2 + case i == 96: + return _Mode_name_3 + case i == 192: + return _Mode_name_4 + case i == 255: + return _Mode_name_5 + default: + return "Mode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSize-255] + _ = x[DWord-24] + _ = x[Word-0] + _ = x[Half-8] + _ = x[Byte-16] +} + +const ( + _Size_name_0 = "Word" + _Size_name_1 = "Half" + _Size_name_2 = "Byte" + _Size_name_3 = "DWord" + _Size_name_4 = "InvalidSize" +) + +func (i Size) String() string { + switch { + case i == 0: + return _Size_name_0 + case i == 8: + return _Size_name_1 + case i == 16: + return _Size_name_2 + case i == 24: + return _Size_name_3 + case i == 255: + return _Size_name_4 + default: + return "Size(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/opcode.go b/vendor/github.com/cilium/ebpf/asm/opcode.go new file mode 100644 index 0000000000..6edc3cf591 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/opcode.go @@ -0,0 +1,237 @@ +package asm + +import ( + "fmt" + "strings" +) + +//go:generate stringer -output opcode_string.go -type=Class + +type encoding int + +const ( + unknownEncoding encoding = iota + loadOrStore + jumpOrALU +) + +// Class of operations +// +// msb lsb +// +---+--+---+ +// | ?? |CLS| +// +---+--+---+ +type Class uint8 + +const classMask OpCode = 0x07 + +const ( + // LdClass load memory + LdClass Class = 0x00 + // LdXClass load memory from constant + LdXClass Class = 0x01 + // StClass load register from memory + StClass Class = 0x02 + // StXClass load register from constant + StXClass Class = 0x03 + // ALUClass arithmetic operators + ALUClass Class = 0x04 + // JumpClass jump operators + JumpClass Class = 0x05 + // ALU64Class arithmetic in 64 bit mode + ALU64Class Class = 0x07 +) + +func (cls Class) encoding() encoding { + switch cls { + case LdClass, LdXClass, StClass, StXClass: + return loadOrStore + case ALU64Class, ALUClass, JumpClass: + return jumpOrALU + default: + return unknownEncoding + } +} + +// OpCode is a packed eBPF opcode. +// +// Its encoding is defined by a Class value: +// +// msb lsb +// +----+-+---+ +// | ???? |CLS| +// +----+-+---+ +type OpCode uint8 + +// InvalidOpCode is returned by setters on OpCode +const InvalidOpCode OpCode = 0xff + +// rawInstructions returns the number of BPF instructions required +// to encode this opcode. +func (op OpCode) rawInstructions() int { + if op.IsDWordLoad() { + return 2 + } + return 1 +} + +func (op OpCode) IsDWordLoad() bool { + return op == LoadImmOp(DWord) +} + +// Class returns the class of operation. +func (op OpCode) Class() Class { + return Class(op & classMask) +} + +// Mode returns the mode for load and store operations. +func (op OpCode) Mode() Mode { + if op.Class().encoding() != loadOrStore { + return InvalidMode + } + return Mode(op & modeMask) +} + +// Size returns the size for load and store operations. +func (op OpCode) Size() Size { + if op.Class().encoding() != loadOrStore { + return InvalidSize + } + return Size(op & sizeMask) +} + +// Source returns the source for branch and ALU operations. +func (op OpCode) Source() Source { + if op.Class().encoding() != jumpOrALU || op.ALUOp() == Swap { + return InvalidSource + } + return Source(op & sourceMask) +} + +// ALUOp returns the ALUOp. +func (op OpCode) ALUOp() ALUOp { + if op.Class().encoding() != jumpOrALU { + return InvalidALUOp + } + return ALUOp(op & aluMask) +} + +// Endianness returns the Endianness for a byte swap instruction. +func (op OpCode) Endianness() Endianness { + if op.ALUOp() != Swap { + return InvalidEndian + } + return Endianness(op & endianMask) +} + +// JumpOp returns the JumpOp. +func (op OpCode) JumpOp() JumpOp { + if op.Class().encoding() != jumpOrALU { + return InvalidJumpOp + } + return JumpOp(op & jumpMask) +} + +// SetMode sets the mode on load and store operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetMode(mode Mode) OpCode { + if op.Class().encoding() != loadOrStore || !valid(OpCode(mode), modeMask) { + return InvalidOpCode + } + return (op & ^modeMask) | OpCode(mode) +} + +// SetSize sets the size on load and store operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetSize(size Size) OpCode { + if op.Class().encoding() != loadOrStore || !valid(OpCode(size), sizeMask) { + return InvalidOpCode + } + return (op & ^sizeMask) | OpCode(size) +} + +// SetSource sets the source on jump and ALU operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetSource(source Source) OpCode { + if op.Class().encoding() != jumpOrALU || !valid(OpCode(source), sourceMask) { + return InvalidOpCode + } + return (op & ^sourceMask) | OpCode(source) +} + +// SetALUOp sets the ALUOp on ALU operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetALUOp(alu ALUOp) OpCode { + class := op.Class() + if (class != ALUClass && class != ALU64Class) || !valid(OpCode(alu), aluMask) { + return InvalidOpCode + } + return (op & ^aluMask) | OpCode(alu) +} + +// SetJumpOp sets the JumpOp on jump operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetJumpOp(jump JumpOp) OpCode { + if op.Class() != JumpClass || !valid(OpCode(jump), jumpMask) { + return InvalidOpCode + } + return (op & ^jumpMask) | OpCode(jump) +} + +func (op OpCode) String() string { + var f strings.Builder + + switch class := op.Class(); class { + case LdClass, LdXClass, StClass, StXClass: + f.WriteString(strings.TrimSuffix(class.String(), "Class")) + + mode := op.Mode() + f.WriteString(strings.TrimSuffix(mode.String(), "Mode")) + + switch op.Size() { + case DWord: + f.WriteString("DW") + case Word: + f.WriteString("W") + case Half: + f.WriteString("H") + case Byte: + f.WriteString("B") + } + + case ALU64Class, ALUClass: + f.WriteString(op.ALUOp().String()) + + if op.ALUOp() == Swap { + // Width for Endian is controlled by Constant + f.WriteString(op.Endianness().String()) + } else { + if class == ALUClass { + f.WriteString("32") + } + + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + } + + case JumpClass: + f.WriteString(op.JumpOp().String()) + if jop := op.JumpOp(); jop != Exit && jop != Call { + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + } + + default: + fmt.Fprintf(&f, "OpCode(%#x)", uint8(op)) + } + + return f.String() +} + +// valid returns true if all bits in value are covered by mask. +func valid(value, mask OpCode) bool { + return value & ^mask == 0 +} diff --git a/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/vendor/github.com/cilium/ebpf/asm/opcode_string.go new file mode 100644 index 0000000000..079ce1db0b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/opcode_string.go @@ -0,0 +1,38 @@ +// Code generated by "stringer -output opcode_string.go -type=Class"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[LdClass-0] + _ = x[LdXClass-1] + _ = x[StClass-2] + _ = x[StXClass-3] + _ = x[ALUClass-4] + _ = x[JumpClass-5] + _ = x[ALU64Class-7] +} + +const ( + _Class_name_0 = "LdClassLdXClassStClassStXClassALUClassJumpClass" + _Class_name_1 = "ALU64Class" +) + +var ( + _Class_index_0 = [...]uint8{0, 7, 15, 22, 30, 38, 47} +) + +func (i Class) String() string { + switch { + case 0 <= i && i <= 5: + return _Class_name_0[_Class_index_0[i]:_Class_index_0[i+1]] + case i == 7: + return _Class_name_1 + default: + return "Class(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/register.go b/vendor/github.com/cilium/ebpf/asm/register.go new file mode 100644 index 0000000000..76cb44bffc --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/register.go @@ -0,0 +1,49 @@ +package asm + +import ( + "fmt" +) + +// Register is the source or destination of most operations. +type Register uint8 + +// R0 contains return values. +const R0 Register = 0 + +// Registers for function arguments. +const ( + R1 Register = R0 + 1 + iota + R2 + R3 + R4 + R5 +) + +// Callee saved registers preserved by function calls. +const ( + R6 Register = R5 + 1 + iota + R7 + R8 + R9 +) + +// Read-only frame pointer to access stack. +const ( + R10 Register = R9 + 1 + RFP = R10 +) + +// Pseudo registers used by 64bit loads and jumps +const ( + PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD + PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE + PseudoCall = R1 // BPF_PSEUDO_CALL +) + +func (r Register) String() string { + v := uint8(r) + if v == 10 { + return "rfp" + } + return fmt.Sprintf("r%d", v) +} diff --git a/vendor/github.com/cilium/ebpf/attachtype_string.go b/vendor/github.com/cilium/ebpf/attachtype_string.go new file mode 100644 index 0000000000..de355ed909 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/attachtype_string.go @@ -0,0 +1,65 @@ +// Code generated by "stringer -type AttachType -trimprefix Attach"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[AttachNone-0] + _ = x[AttachCGroupInetIngress-0] + _ = x[AttachCGroupInetEgress-1] + _ = x[AttachCGroupInetSockCreate-2] + _ = x[AttachCGroupSockOps-3] + _ = x[AttachSkSKBStreamParser-4] + _ = x[AttachSkSKBStreamVerdict-5] + _ = x[AttachCGroupDevice-6] + _ = x[AttachSkMsgVerdict-7] + _ = x[AttachCGroupInet4Bind-8] + _ = x[AttachCGroupInet6Bind-9] + _ = x[AttachCGroupInet4Connect-10] + _ = x[AttachCGroupInet6Connect-11] + _ = x[AttachCGroupInet4PostBind-12] + _ = x[AttachCGroupInet6PostBind-13] + _ = x[AttachCGroupUDP4Sendmsg-14] + _ = x[AttachCGroupUDP6Sendmsg-15] + _ = x[AttachLircMode2-16] + _ = x[AttachFlowDissector-17] + _ = x[AttachCGroupSysctl-18] + _ = x[AttachCGroupUDP4Recvmsg-19] + _ = x[AttachCGroupUDP6Recvmsg-20] + _ = x[AttachCGroupGetsockopt-21] + _ = x[AttachCGroupSetsockopt-22] + _ = x[AttachTraceRawTp-23] + _ = x[AttachTraceFEntry-24] + _ = x[AttachTraceFExit-25] + _ = x[AttachModifyReturn-26] + _ = x[AttachLSMMac-27] + _ = x[AttachTraceIter-28] + _ = x[AttachCgroupInet4GetPeername-29] + _ = x[AttachCgroupInet6GetPeername-30] + _ = x[AttachCgroupInet4GetSockname-31] + _ = x[AttachCgroupInet6GetSockname-32] + _ = x[AttachXDPDevMap-33] + _ = x[AttachCgroupInetSockRelease-34] + _ = x[AttachXDPCPUMap-35] + _ = x[AttachSkLookup-36] + _ = x[AttachXDP-37] + _ = x[AttachSkSKBVerdict-38] + _ = x[AttachSkReuseportSelect-39] + _ = x[AttachSkReuseportSelectOrMigrate-40] + _ = x[AttachPerfEvent-41] +} + +const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEvent" + +var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610} + +func (i AttachType) String() string { + if i >= AttachType(len(_AttachType_index)-1) { + return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go new file mode 100644 index 0000000000..2ededc87a0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/collection.go @@ -0,0 +1,668 @@ +package ebpf + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "reflect" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/btf" +) + +// CollectionOptions control loading a collection into the kernel. +// +// Maps and Programs are passed to NewMapWithOptions and NewProgramsWithOptions. +type CollectionOptions struct { + Maps MapOptions + Programs ProgramOptions +} + +// CollectionSpec describes a collection. +type CollectionSpec struct { + Maps map[string]*MapSpec + Programs map[string]*ProgramSpec + + // ByteOrder specifies whether the ELF was compiled for + // big-endian or little-endian architectures. + ByteOrder binary.ByteOrder +} + +// Copy returns a recursive copy of the spec. +func (cs *CollectionSpec) Copy() *CollectionSpec { + if cs == nil { + return nil + } + + cpy := CollectionSpec{ + Maps: make(map[string]*MapSpec, len(cs.Maps)), + Programs: make(map[string]*ProgramSpec, len(cs.Programs)), + ByteOrder: cs.ByteOrder, + } + + for name, spec := range cs.Maps { + cpy.Maps[name] = spec.Copy() + } + + for name, spec := range cs.Programs { + cpy.Programs[name] = spec.Copy() + } + + return &cpy +} + +// RewriteMaps replaces all references to specific maps. +// +// Use this function to use pre-existing maps instead of creating new ones +// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps. +// +// Returns an error if a named map isn't used in at least one program. +func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { + for symbol, m := range maps { + // have we seen a program that uses this symbol / map + seen := false + fd := m.FD() + for progName, progSpec := range cs.Programs { + err := progSpec.Instructions.RewriteMapPtr(symbol, fd) + + switch { + case err == nil: + seen = true + + case asm.IsUnreferencedSymbol(err): + // Not all programs need to use the map + + default: + return fmt.Errorf("program %s: %w", progName, err) + } + } + + if !seen { + return fmt.Errorf("map %s not referenced by any programs", symbol) + } + + // Prevent NewCollection from creating rewritten maps + delete(cs.Maps, symbol) + } + + return nil +} + +// RewriteConstants replaces the value of multiple constants. +// +// The constant must be defined like so in the C program: +// +// volatile const type foobar; +// volatile const type foobar = default; +// +// Replacement values must be of the same length as the C sizeof(type). +// If necessary, they are marshalled according to the same rules as +// map values. +// +// From Linux 5.5 the verifier will use constants to eliminate dead code. +// +// Returns an error if a constant doesn't exist. +func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error { + rodata := cs.Maps[".rodata"] + if rodata == nil { + return errors.New("missing .rodata section") + } + + if rodata.BTF == nil { + return errors.New(".rodata section has no BTF") + } + + if n := len(rodata.Contents); n != 1 { + return fmt.Errorf("expected one key in .rodata, found %d", n) + } + + kv := rodata.Contents[0] + value, ok := kv.Value.([]byte) + if !ok { + return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value) + } + + buf := make([]byte, len(value)) + copy(buf, value) + + err := patchValue(buf, rodata.BTF.Value, consts) + if err != nil { + return err + } + + rodata.Contents[0] = MapKV{kv.Key, buf} + return nil +} + +// Assign the contents of a CollectionSpec to a struct. +// +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. +// +// 'to' must be a pointer to a struct. A field of the +// struct is updated with values from Programs or Maps if it +// has an `ebpf` tag and its type is *ProgramSpec or *MapSpec. +// The tag's value specifies the name of the program or map as +// found in the CollectionSpec. +// +// struct { +// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"` +// Bar *ebpf.MapSpec `ebpf:"bar_map"` +// Ignored int +// } +// +// Returns an error if any of the eBPF objects can't be found, or +// if the same MapSpec or ProgramSpec is assigned multiple times. +func (cs *CollectionSpec) Assign(to interface{}) error { + // Assign() only supports assigning ProgramSpecs and MapSpecs, + // so doesn't load any resources into the kernel. + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*ProgramSpec)(nil)): + if p := cs.Programs[name]; p != nil { + return p, nil + } + return nil, fmt.Errorf("missing program %q", name) + + case reflect.TypeOf((*MapSpec)(nil)): + if m := cs.Maps[name]; m != nil { + return m, nil + } + return nil, fmt.Errorf("missing map %q", name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + return assignValues(to, getValue) +} + +// LoadAndAssign loads Maps and Programs into the kernel and assigns them +// to a struct. +// +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. +// +// 'to' must be a pointer to a struct. A field of the struct is updated with +// a Program or Map if it has an `ebpf` tag and its type is *Program or *Map. +// The tag's value specifies the name of the program or map as found in the +// CollectionSpec. Before updating the struct, the requested objects and their +// dependent resources are loaded into the kernel and populated with values if +// specified. +// +// struct { +// Foo *ebpf.Program `ebpf:"xdp_foo"` +// Bar *ebpf.Map `ebpf:"bar_map"` +// Ignored int +// } +// +// opts may be nil. +// +// Returns an error if any of the fields can't be found, or +// if the same Map or Program is assigned multiple times. +func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error { + loader := newCollectionLoader(cs, opts) + defer loader.cleanup() + + // Support assigning Programs and Maps, lazy-loading the required objects. + assignedMaps := make(map[string]bool) + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*Program)(nil)): + return loader.loadProgram(name) + + case reflect.TypeOf((*Map)(nil)): + assignedMaps[name] = true + return loader.loadMap(name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + // Load the Maps and Programs requested by the annotated struct. + if err := assignValues(to, getValue); err != nil { + return err + } + + // Populate the requested maps. Has a chance of lazy-loading other dependent maps. + if err := loader.populateMaps(); err != nil { + return err + } + + // Evaluate the loader's objects after all (lazy)loading has taken place. + for n, m := range loader.maps { + switch m.typ { + case ProgramArray: + // Require all lazy-loaded ProgramArrays to be assigned to the given object. + // Without any references, they will be closed on the first GC and all tail + // calls into them will miss. + if !assignedMaps[n] { + return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n) + } + } + } + + loader.finalize() + + return nil +} + +// Collection is a collection of Programs and Maps associated +// with their symbols +type Collection struct { + Programs map[string]*Program + Maps map[string]*Map +} + +// NewCollection creates a Collection from a specification. +func NewCollection(spec *CollectionSpec) (*Collection, error) { + return NewCollectionWithOptions(spec, CollectionOptions{}) +} + +// NewCollectionWithOptions creates a Collection from a specification. +func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) { + loader := newCollectionLoader(spec, &opts) + defer loader.cleanup() + + // Create maps first, as their fds need to be linked into programs. + for mapName := range spec.Maps { + if _, err := loader.loadMap(mapName); err != nil { + return nil, err + } + } + + for progName := range spec.Programs { + if _, err := loader.loadProgram(progName); err != nil { + return nil, err + } + } + + // Maps can contain Program and Map stubs, so populate them after + // all Maps and Programs have been successfully loaded. + if err := loader.populateMaps(); err != nil { + return nil, err + } + + maps, progs := loader.maps, loader.programs + + loader.finalize() + + return &Collection{ + progs, + maps, + }, nil +} + +type handleCache struct { + btfHandles map[*btf.Spec]*btf.Handle + btfSpecs map[io.ReaderAt]*btf.Spec +} + +func newHandleCache() *handleCache { + return &handleCache{ + btfHandles: make(map[*btf.Spec]*btf.Handle), + btfSpecs: make(map[io.ReaderAt]*btf.Spec), + } +} + +func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) { + if hc.btfHandles[spec] != nil { + return hc.btfHandles[spec], nil + } + + handle, err := btf.NewHandle(spec) + if err != nil { + return nil, err + } + + hc.btfHandles[spec] = handle + return handle, nil +} + +func (hc handleCache) btfSpec(rd io.ReaderAt) (*btf.Spec, error) { + if hc.btfSpecs[rd] != nil { + return hc.btfSpecs[rd], nil + } + + spec, err := btf.LoadSpecFromReader(rd) + if err != nil { + return nil, err + } + + hc.btfSpecs[rd] = spec + return spec, nil +} + +func (hc handleCache) close() { + for _, handle := range hc.btfHandles { + handle.Close() + } +} + +type collectionLoader struct { + coll *CollectionSpec + opts *CollectionOptions + maps map[string]*Map + programs map[string]*Program + handles *handleCache +} + +func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) *collectionLoader { + if opts == nil { + opts = &CollectionOptions{} + } + + return &collectionLoader{ + coll, + opts, + make(map[string]*Map), + make(map[string]*Program), + newHandleCache(), + } +} + +// finalize should be called when all the collectionLoader's resources +// have been successfully loaded into the kernel and populated with values. +func (cl *collectionLoader) finalize() { + cl.maps, cl.programs = nil, nil +} + +// cleanup cleans up all resources left over in the collectionLoader. +// Call finalize() when Map and Program creation/population is successful +// to prevent them from getting closed. +func (cl *collectionLoader) cleanup() { + cl.handles.close() + for _, m := range cl.maps { + m.Close() + } + for _, p := range cl.programs { + p.Close() + } +} + +func (cl *collectionLoader) loadMap(mapName string) (*Map, error) { + if m := cl.maps[mapName]; m != nil { + return m, nil + } + + mapSpec := cl.coll.Maps[mapName] + if mapSpec == nil { + return nil, fmt.Errorf("missing map %s", mapName) + } + + m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.handles) + if err != nil { + return nil, fmt.Errorf("map %s: %w", mapName, err) + } + + cl.maps[mapName] = m + return m, nil +} + +func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { + if prog := cl.programs[progName]; prog != nil { + return prog, nil + } + + progSpec := cl.coll.Programs[progName] + if progSpec == nil { + return nil, fmt.Errorf("unknown program %s", progName) + } + + progSpec = progSpec.Copy() + + // Rewrite any reference to a valid map. + for i := range progSpec.Instructions { + ins := &progSpec.Instructions[i] + + if !ins.IsLoadFromMap() || ins.Reference == "" { + continue + } + + if uint32(ins.Constant) != math.MaxUint32 { + // Don't overwrite maps already rewritten, users can + // rewrite programs in the spec themselves + continue + } + + m, err := cl.loadMap(ins.Reference) + if err != nil { + return nil, fmt.Errorf("program %s: %w", progName, err) + } + + fd := m.FD() + if fd < 0 { + return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd) + } + if err := ins.RewriteMapPtr(m.FD()); err != nil { + return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference, err) + } + } + + prog, err := newProgramWithOptions(progSpec, cl.opts.Programs, cl.handles) + if err != nil { + return nil, fmt.Errorf("program %s: %w", progName, err) + } + + cl.programs[progName] = prog + return prog, nil +} + +func (cl *collectionLoader) populateMaps() error { + for mapName, m := range cl.maps { + mapSpec, ok := cl.coll.Maps[mapName] + if !ok { + return fmt.Errorf("missing map spec %s", mapName) + } + + mapSpec = mapSpec.Copy() + + // Replace any object stubs with loaded objects. + for i, kv := range mapSpec.Contents { + switch v := kv.Value.(type) { + case programStub: + // loadProgram is idempotent and could return an existing Program. + prog, err := cl.loadProgram(string(v)) + if err != nil { + return fmt.Errorf("loading program %s, for map %s: %w", v, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, prog} + + case mapStub: + // loadMap is idempotent and could return an existing Map. + innerMap, err := cl.loadMap(string(v)) + if err != nil { + return fmt.Errorf("loading inner map %s, for map %s: %w", v, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, innerMap} + } + } + + // Populate and freeze the map if specified. + if err := m.finalize(mapSpec); err != nil { + return fmt.Errorf("populating map %s: %w", mapName, err) + } + } + + return nil +} + +// LoadCollection parses an object file and converts it to a collection. +func LoadCollection(file string) (*Collection, error) { + spec, err := LoadCollectionSpec(file) + if err != nil { + return nil, err + } + return NewCollection(spec) +} + +// Close frees all maps and programs associated with the collection. +// +// The collection mustn't be used afterwards. +func (coll *Collection) Close() { + for _, prog := range coll.Programs { + prog.Close() + } + for _, m := range coll.Maps { + m.Close() + } +} + +// DetachMap removes the named map from the Collection. +// +// This means that a later call to Close() will not affect this map. +// +// Returns nil if no map of that name exists. +func (coll *Collection) DetachMap(name string) *Map { + m := coll.Maps[name] + delete(coll.Maps, name) + return m +} + +// DetachProgram removes the named program from the Collection. +// +// This means that a later call to Close() will not affect this program. +// +// Returns nil if no program of that name exists. +func (coll *Collection) DetachProgram(name string) *Program { + p := coll.Programs[name] + delete(coll.Programs, name) + return p +} + +// structField represents a struct field containing the ebpf struct tag. +type structField struct { + reflect.StructField + value reflect.Value +} + +// ebpfFields extracts field names tagged with 'ebpf' from a struct type. +// Keep track of visited types to avoid infinite recursion. +func ebpfFields(structVal reflect.Value, visited map[reflect.Type]bool) ([]structField, error) { + if visited == nil { + visited = make(map[reflect.Type]bool) + } + + structType := structVal.Type() + if structType.Kind() != reflect.Struct { + return nil, fmt.Errorf("%s is not a struct", structType) + } + + if visited[structType] { + return nil, fmt.Errorf("recursion on type %s", structType) + } + + fields := make([]structField, 0, structType.NumField()) + for i := 0; i < structType.NumField(); i++ { + field := structField{structType.Field(i), structVal.Field(i)} + + // If the field is tagged, gather it and move on. + name := field.Tag.Get("ebpf") + if name != "" { + fields = append(fields, field) + continue + } + + // If the field does not have an ebpf tag, but is a struct or a pointer + // to a struct, attempt to gather its fields as well. + var v reflect.Value + switch field.Type.Kind() { + case reflect.Ptr: + if field.Type.Elem().Kind() != reflect.Struct { + continue + } + + if field.value.IsNil() { + return nil, fmt.Errorf("nil pointer to %s", structType) + } + + // Obtain the destination type of the pointer. + v = field.value.Elem() + + case reflect.Struct: + // Reference the value's type directly. + v = field.value + + default: + continue + } + + inner, err := ebpfFields(v, visited) + if err != nil { + return nil, fmt.Errorf("field %s: %w", field.Name, err) + } + + fields = append(fields, inner...) + } + + return fields, nil +} + +// assignValues attempts to populate all fields of 'to' tagged with 'ebpf'. +// +// getValue is called for every tagged field of 'to' and must return the value +// to be assigned to the field with the given typ and name. +func assignValues(to interface{}, + getValue func(typ reflect.Type, name string) (interface{}, error)) error { + + toValue := reflect.ValueOf(to) + if toValue.Type().Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer to struct", to) + } + + if toValue.IsNil() { + return fmt.Errorf("nil pointer to %T", to) + } + + fields, err := ebpfFields(toValue.Elem(), nil) + if err != nil { + return err + } + + type elem struct { + // Either *Map or *Program + typ reflect.Type + name string + } + + assigned := make(map[elem]string) + for _, field := range fields { + // Get string value the field is tagged with. + tag := field.Tag.Get("ebpf") + if strings.Contains(tag, ",") { + return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name) + } + + // Check if the eBPF object with the requested + // type and tag was already assigned elsewhere. + e := elem{field.Type, tag} + if af := assigned[e]; af != "" { + return fmt.Errorf("field %s: object %q was already assigned to %s", field.Name, tag, af) + } + + // Get the eBPF object referred to by the tag. + value, err := getValue(field.Type, tag) + if err != nil { + return fmt.Errorf("field %s: %w", field.Name, err) + } + + if !field.value.CanSet() { + return fmt.Errorf("field %s: can't set value", field.Name) + } + field.value.Set(reflect.ValueOf(value)) + + assigned[e] = field.Name + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/doc.go b/vendor/github.com/cilium/ebpf/doc.go new file mode 100644 index 0000000000..f7f34da8f4 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/doc.go @@ -0,0 +1,16 @@ +// Package ebpf is a toolkit for working with eBPF programs. +// +// eBPF programs are small snippets of code which are executed directly +// in a VM in the Linux kernel, which makes them very fast and flexible. +// Many Linux subsystems now accept eBPF programs. This makes it possible +// to implement highly application specific logic inside the kernel, +// without having to modify the actual kernel itself. +// +// This package is designed for long-running processes which +// want to use eBPF to implement part of their application logic. It has no +// run-time dependencies outside of the library and the Linux kernel itself. +// eBPF code should be compiled ahead of time using clang, and shipped with +// your application as any other resource. +// +// Use the link subpackage to attach a loaded program to a hook in the kernel. +package ebpf diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go new file mode 100644 index 0000000000..42010f43e5 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/elf_reader.go @@ -0,0 +1,1077 @@ +package ebpf + +import ( + "bufio" + "bytes" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/unix" +) + +// elfCode is a convenience to reduce the amount of arguments that have to +// be passed around explicitly. You should treat its contents as immutable. +type elfCode struct { + *internal.SafeELFFile + sections map[elf.SectionIndex]*elfSection + license string + version uint32 + btf *btf.Spec +} + +// LoadCollectionSpec parses an ELF file into a CollectionSpec. +func LoadCollectionSpec(file string) (*CollectionSpec, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + spec, err := LoadCollectionSpecFromReader(f) + if err != nil { + return nil, fmt.Errorf("file %s: %w", file, err) + } + return spec, nil +} + +// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec. +func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { + f, err := internal.NewSafeELFFile(rd) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + licenseSection *elf.Section + versionSection *elf.Section + sections = make(map[elf.SectionIndex]*elfSection) + relSections = make(map[elf.SectionIndex]*elf.Section) + ) + + // This is the target of relocations generated by inline assembly. + sections[elf.SHN_UNDEF] = newElfSection(new(elf.Section), undefSection) + + // Collect all the sections we're interested in. This includes relocations + // which we parse later. + for i, sec := range f.Sections { + idx := elf.SectionIndex(i) + + switch { + case strings.HasPrefix(sec.Name, "license"): + licenseSection = sec + case strings.HasPrefix(sec.Name, "version"): + versionSection = sec + case strings.HasPrefix(sec.Name, "maps"): + sections[idx] = newElfSection(sec, mapSection) + case sec.Name == ".maps": + sections[idx] = newElfSection(sec, btfMapSection) + case sec.Name == ".bss" || sec.Name == ".data" || strings.HasPrefix(sec.Name, ".rodata"): + sections[idx] = newElfSection(sec, dataSection) + case sec.Type == elf.SHT_REL: + // Store relocations under the section index of the target + relSections[elf.SectionIndex(sec.Info)] = sec + case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0: + sections[idx] = newElfSection(sec, programSection) + } + } + + license, err := loadLicense(licenseSection) + if err != nil { + return nil, fmt.Errorf("load license: %w", err) + } + + version, err := loadVersion(versionSection, f.ByteOrder) + if err != nil { + return nil, fmt.Errorf("load version: %w", err) + } + + btfSpec, err := btf.LoadSpecFromReader(rd) + if err != nil && !errors.Is(err, btf.ErrNotFound) { + return nil, fmt.Errorf("load BTF: %w", err) + } + + // Assign symbols to all the sections we're interested in. + symbols, err := f.Symbols() + if err != nil { + return nil, fmt.Errorf("load symbols: %v", err) + } + + for _, symbol := range symbols { + idx := symbol.Section + symType := elf.ST_TYPE(symbol.Info) + + section := sections[idx] + if section == nil { + continue + } + + // Older versions of LLVM don't tag symbols correctly, so keep + // all NOTYPE ones. + keep := symType == elf.STT_NOTYPE + switch section.kind { + case mapSection, btfMapSection, dataSection: + keep = keep || symType == elf.STT_OBJECT + case programSection: + keep = keep || symType == elf.STT_FUNC + } + if !keep || symbol.Name == "" { + continue + } + + section.symbols[symbol.Value] = symbol + } + + ec := &elfCode{ + SafeELFFile: f, + sections: sections, + license: license, + version: version, + btf: btfSpec, + } + + // Go through relocation sections, and parse the ones for sections we're + // interested in. Make sure that relocations point at valid sections. + for idx, relSection := range relSections { + section := sections[idx] + if section == nil { + continue + } + + rels, err := ec.loadRelocations(relSection, symbols) + if err != nil { + return nil, fmt.Errorf("relocation for section %q: %w", section.Name, err) + } + + for _, rel := range rels { + target := sections[rel.Section] + if target == nil { + return nil, fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported) + } + + if target.Flags&elf.SHF_STRINGS > 0 { + return nil, fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported) + } + + target.references++ + } + + section.relocations = rels + } + + // Collect all the various ways to define maps. + maps := make(map[string]*MapSpec) + if err := ec.loadMaps(maps); err != nil { + return nil, fmt.Errorf("load maps: %w", err) + } + + if err := ec.loadBTFMaps(maps); err != nil { + return nil, fmt.Errorf("load BTF maps: %w", err) + } + + if err := ec.loadDataSections(maps); err != nil { + return nil, fmt.Errorf("load data sections: %w", err) + } + + // Finally, collect programs and link them. + progs, err := ec.loadPrograms() + if err != nil { + return nil, fmt.Errorf("load programs: %w", err) + } + + return &CollectionSpec{maps, progs, ec.ByteOrder}, nil +} + +func loadLicense(sec *elf.Section) (string, error) { + if sec == nil { + return "", nil + } + + data, err := sec.Data() + if err != nil { + return "", fmt.Errorf("section %s: %v", sec.Name, err) + } + return string(bytes.TrimRight(data, "\000")), nil +} + +func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) { + if sec == nil { + return 0, nil + } + + var version uint32 + if err := binary.Read(sec.Open(), bo, &version); err != nil { + return 0, fmt.Errorf("section %s: %v", sec.Name, err) + } + return version, nil +} + +type elfSectionKind int + +const ( + undefSection elfSectionKind = iota + mapSection + btfMapSection + programSection + dataSection +) + +type elfSection struct { + *elf.Section + kind elfSectionKind + // Offset from the start of the section to a symbol + symbols map[uint64]elf.Symbol + // Offset from the start of the section to a relocation, which points at + // a symbol in another section. + relocations map[uint64]elf.Symbol + // The number of relocations pointing at this section. + references int +} + +func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection { + return &elfSection{ + section, + kind, + make(map[uint64]elf.Symbol), + make(map[uint64]elf.Symbol), + 0, + } +} + +func (ec *elfCode) loadPrograms() (map[string]*ProgramSpec, error) { + var ( + progs []*ProgramSpec + libs []*ProgramSpec + ) + + for _, sec := range ec.sections { + if sec.kind != programSection { + continue + } + + if len(sec.symbols) == 0 { + return nil, fmt.Errorf("section %v: missing symbols", sec.Name) + } + + funcSym, ok := sec.symbols[0] + if !ok { + return nil, fmt.Errorf("section %v: no label at start", sec.Name) + } + + insns, length, err := ec.loadInstructions(sec) + if err != nil { + return nil, fmt.Errorf("program %s: %w", funcSym.Name, err) + } + + progType, attachType, progFlags, attachTo := getProgType(sec.Name) + + spec := &ProgramSpec{ + Name: funcSym.Name, + Type: progType, + Flags: progFlags, + AttachType: attachType, + AttachTo: attachTo, + License: ec.license, + KernelVersion: ec.version, + Instructions: insns, + ByteOrder: ec.ByteOrder, + } + + if ec.btf != nil { + spec.BTF, err = ec.btf.Program(sec.Name, length) + if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) { + return nil, fmt.Errorf("program %s: %w", funcSym.Name, err) + } + } + + if spec.Type == UnspecifiedProgram { + // There is no single name we can use for "library" sections, + // since they may contain multiple functions. We'll decode the + // labels they contain later on, and then link sections that way. + libs = append(libs, spec) + } else { + progs = append(progs, spec) + } + } + + res := make(map[string]*ProgramSpec, len(progs)) + for _, prog := range progs { + err := link(prog, libs) + if err != nil { + return nil, fmt.Errorf("program %s: %w", prog.Name, err) + } + res[prog.Name] = prog + } + + return res, nil +} + +func (ec *elfCode) loadInstructions(section *elfSection) (asm.Instructions, uint64, error) { + var ( + r = bufio.NewReader(section.Open()) + insns asm.Instructions + offset uint64 + ) + for { + var ins asm.Instruction + n, err := ins.Unmarshal(r, ec.ByteOrder) + if err == io.EOF { + return insns, offset, nil + } + if err != nil { + return nil, 0, fmt.Errorf("offset %d: %w", offset, err) + } + + ins.Symbol = section.symbols[offset].Name + + if rel, ok := section.relocations[offset]; ok { + if err = ec.relocateInstruction(&ins, rel); err != nil { + return nil, 0, fmt.Errorf("offset %d: relocate instruction: %w", offset, err) + } + } + + insns = append(insns, ins) + offset += n + } +} + +func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error { + var ( + typ = elf.ST_TYPE(rel.Info) + bind = elf.ST_BIND(rel.Info) + name = rel.Name + ) + + target := ec.sections[rel.Section] + + switch target.kind { + case mapSection, btfMapSection: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("possible erroneous static qualifier on map definition: found reference to %q", name) + } + + if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE { + // STT_NOTYPE is generated on clang < 8 which doesn't tag + // relocations appropriately. + return fmt.Errorf("map load: incorrect relocation type %v", typ) + } + + ins.Src = asm.PseudoMapFD + + // Mark the instruction as needing an update when creating the + // collection. + if err := ins.RewriteMapPtr(-1); err != nil { + return err + } + + case dataSection: + var offset uint32 + switch typ { + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind) + } + + // This is really a reference to a static symbol, which clang doesn't + // emit a symbol table entry for. Instead it encodes the offset in + // the instruction itself. + offset = uint32(uint64(ins.Constant)) + + case elf.STT_OBJECT: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind) + } + + offset = uint32(rel.Value) + + default: + return fmt.Errorf("incorrect relocation type %v for direct map load", typ) + } + + // We rely on using the name of the data section as the reference. It + // would be nicer to keep the real name in case of an STT_OBJECT, but + // it's not clear how to encode that into Instruction. + name = target.Name + + // The kernel expects the offset in the second basic BPF instruction. + ins.Constant = int64(uint64(offset) << 32) + ins.Src = asm.PseudoMapValue + + // Mark the instruction as needing an update when creating the + // collection. + if err := ins.RewriteMapPtr(-1); err != nil { + return err + } + + case programSection: + if ins.OpCode.JumpOp() != asm.Call { + return fmt.Errorf("not a call instruction: %s", ins) + } + + if ins.Src != asm.PseudoCall { + return fmt.Errorf("call: %s: incorrect source register", name) + } + + switch typ { + case elf.STT_NOTYPE, elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + } + + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + } + + // The function we want to call is in the indicated section, + // at the offset encoded in the instruction itself. Reverse + // the calculation to find the real function we're looking for. + // A value of -1 references the first instruction in the section. + offset := int64(int32(ins.Constant)+1) * asm.InstructionSize + if offset < 0 { + return fmt.Errorf("call: %s: invalid offset %d", name, offset) + } + + sym, ok := target.symbols[uint64(offset)] + if !ok { + return fmt.Errorf("call: %s: no symbol at offset %d", name, offset) + } + + ins.Constant = -1 + name = sym.Name + + default: + return fmt.Errorf("call: %s: invalid symbol type %s", name, typ) + } + + case undefSection: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("asm relocation: %s: unsupported binding: %s", name, bind) + } + + if typ != elf.STT_NOTYPE { + return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ) + } + + // There is nothing to do here but set ins.Reference. + + default: + return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported) + } + + ins.Reference = name + return nil +} + +func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error { + for _, sec := range ec.sections { + if sec.kind != mapSection { + continue + } + + nSym := len(sec.symbols) + if nSym == 0 { + return fmt.Errorf("section %v: no symbols", sec.Name) + } + + if sec.Size%uint64(nSym) != 0 { + return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name) + } + + var ( + r = bufio.NewReader(sec.Open()) + size = sec.Size / uint64(nSym) + ) + for i, offset := 0, uint64(0); i < nSym; i, offset = i+1, offset+size { + mapSym, ok := sec.symbols[offset] + if !ok { + return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset) + } + + mapName := mapSym.Name + if maps[mapName] != nil { + return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym) + } + + lr := io.LimitReader(r, int64(size)) + + spec := MapSpec{ + Name: SanitizeName(mapName, -1), + } + switch { + case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil: + return fmt.Errorf("map %s: missing type", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil: + return fmt.Errorf("map %s: missing key size", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil: + return fmt.Errorf("map %s: missing value size", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil: + return fmt.Errorf("map %s: missing max entries", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil: + return fmt.Errorf("map %s: missing flags", mapName) + } + + extra, err := io.ReadAll(lr) + if err != nil { + return fmt.Errorf("map %s: reading map tail: %w", mapName, err) + } + if len(extra) > 0 { + spec.Extra = *bytes.NewReader(extra) + } + + if err := spec.clampPerfEventArraySize(); err != nil { + return fmt.Errorf("map %s: %w", mapName, err) + } + + maps[mapName] = &spec + } + } + + return nil +} + +// loadBTFMaps iterates over all ELF sections marked as BTF map sections +// (like .maps) and parses them into MapSpecs. Dump the .maps section and +// any relocations with `readelf -x .maps -r `. +func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { + for _, sec := range ec.sections { + if sec.kind != btfMapSection { + continue + } + + if ec.btf == nil { + return fmt.Errorf("missing BTF") + } + + // Each section must appear as a DataSec in the ELF's BTF blob. + var ds *btf.Datasec + if err := ec.btf.FindType(sec.Name, &ds); err != nil { + return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err) + } + + // Open a Reader to the ELF's raw section bytes so we can assert that all + // of them are zero on a per-map (per-Var) basis. For now, the section's + // sole purpose is to receive relocations, so all must be zero. + rs := sec.Open() + + for _, vs := range ds.Vars { + // BPF maps are declared as and assigned to global variables, + // so iterate over each Var in the DataSec and validate their types. + v, ok := vs.Type.(*btf.Var) + if !ok { + return fmt.Errorf("section %v: unexpected type %s", sec.Name, vs.Type) + } + name := string(v.Name) + + // The BTF metadata for each Var contains the full length of the map + // declaration, so read the corresponding amount of bytes from the ELF. + // This way, we can pinpoint which map declaration contains unexpected + // (and therefore unsupported) data. + _, err := io.Copy(internal.DiscardZeroes{}, io.LimitReader(rs, int64(vs.Size))) + if err != nil { + return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported) + } + + if maps[name] != nil { + return fmt.Errorf("section %v: map %s already exists", sec.Name, name) + } + + // Each Var representing a BTF map definition contains a Struct. + mapStruct, ok := v.Type.(*btf.Struct) + if !ok { + return fmt.Errorf("expected struct, got %s", v.Type) + } + + mapSpec, err := mapSpecFromBTF(sec, &vs, mapStruct, ec.btf, name, false) + if err != nil { + return fmt.Errorf("map %v: %w", name, err) + } + + if err := mapSpec.clampPerfEventArraySize(); err != nil { + return fmt.Errorf("map %v: %w", name, err) + } + + maps[name] = mapSpec + } + + // Drain the ELF section reader to make sure all bytes are accounted for + // with BTF metadata. + i, err := io.Copy(io.Discard, rs) + if err != nil { + return fmt.Errorf("section %v: unexpected error reading remainder of ELF section: %w", sec.Name, err) + } + if i > 0 { + return fmt.Errorf("section %v: %d unexpected remaining bytes in ELF section, invalid BTF?", sec.Name, i) + } + } + + return nil +} + +// A programStub is a placeholder for a Program to be inserted at a certain map key. +// It needs to be resolved into a Program later on in the loader process. +type programStub string + +// A mapStub is a placeholder for a Map to be inserted at a certain map key. +// It needs to be resolved into a Map later on in the loader process. +type mapStub string + +// mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing +// a BTF map definition. The name and spec arguments will be copied to the +// resulting MapSpec, and inner must be true on any resursive invocations. +func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *btf.Spec, name string, inner bool) (*MapSpec, error) { + var ( + key, value btf.Type + keySize, valueSize uint32 + mapType MapType + flags, maxEntries uint32 + pinType PinType + innerMapSpec *MapSpec + contents []MapKV + err error + ) + + for i, member := range def.Members { + switch member.Name { + case "type": + mt, err := uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get type: %w", err) + } + mapType = MapType(mt) + + case "map_flags": + flags, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF map flags: %w", err) + } + + case "max_entries": + maxEntries, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF map max entries: %w", err) + } + + case "key": + if keySize != 0 { + return nil, errors.New("both key and key_size given") + } + + pk, ok := member.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("key type is not a pointer: %T", member.Type) + } + + key = pk.Target + + size, err := btf.Sizeof(pk.Target) + if err != nil { + return nil, fmt.Errorf("can't get size of BTF key: %w", err) + } + + keySize = uint32(size) + + case "value": + if valueSize != 0 { + return nil, errors.New("both value and value_size given") + } + + vk, ok := member.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("value type is not a pointer: %T", member.Type) + } + + value = vk.Target + + size, err := btf.Sizeof(vk.Target) + if err != nil { + return nil, fmt.Errorf("can't get size of BTF value: %w", err) + } + + valueSize = uint32(size) + + case "key_size": + // Key needs to be nil and keySize needs to be 0 for key_size to be + // considered a valid member. + if key != nil || keySize != 0 { + return nil, errors.New("both key and key_size given") + } + + keySize, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF key size: %w", err) + } + + case "value_size": + // Value needs to be nil and valueSize needs to be 0 for value_size to be + // considered a valid member. + if value != nil || valueSize != 0 { + return nil, errors.New("both value and value_size given") + } + + valueSize, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF value size: %w", err) + } + + case "pinning": + if inner { + return nil, errors.New("inner maps can't be pinned") + } + + pinning, err := uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get pinning: %w", err) + } + + pinType = PinType(pinning) + + case "values": + // The 'values' field in BTF map definitions is used for declaring map + // value types that are references to other BPF objects, like other maps + // or programs. It is always expected to be an array of pointers. + if i != len(def.Members)-1 { + return nil, errors.New("'values' must be the last member in a BTF map definition") + } + + if valueSize != 0 && valueSize != 4 { + return nil, errors.New("value_size must be 0 or 4") + } + valueSize = 4 + + valueType, err := resolveBTFArrayMacro(member.Type) + if err != nil { + return nil, fmt.Errorf("can't resolve type of member 'values': %w", err) + } + + switch t := valueType.(type) { + case *btf.Struct: + // The values member pointing to an array of structs means we're expecting + // a map-in-map declaration. + if mapType != ArrayOfMaps && mapType != HashOfMaps { + return nil, errors.New("outer map needs to be an array or a hash of maps") + } + if inner { + return nil, fmt.Errorf("nested inner maps are not supported") + } + + // This inner map spec is used as a map template, but it needs to be + // created as a traditional map before it can be used to do so. + // libbpf names the inner map template '.inner', but we + // opted for _inner to simplify validation logic. (dots only supported + // on kernels 5.2 and up) + // Pass the BTF spec from the parent object, since both parent and + // child must be created from the same BTF blob (on kernels that support BTF). + innerMapSpec, err = mapSpecFromBTF(es, vs, t, spec, name+"_inner", true) + if err != nil { + return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err) + } + + case *btf.FuncProto: + // The values member contains an array of function pointers, meaning an + // autopopulated PROG_ARRAY. + if mapType != ProgramArray { + return nil, errors.New("map needs to be a program array") + } + + default: + return nil, fmt.Errorf("unsupported value type %q in 'values' field", t) + } + + contents, err = resolveBTFValuesContents(es, vs, member) + if err != nil { + return nil, fmt.Errorf("resolving values contents: %w", err) + } + + default: + return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name) + } + } + + if key == nil { + key = &btf.Void{} + } + if value == nil { + value = &btf.Void{} + } + + return &MapSpec{ + Name: SanitizeName(name, -1), + Type: MapType(mapType), + KeySize: keySize, + ValueSize: valueSize, + MaxEntries: maxEntries, + Flags: flags, + BTF: &btf.Map{Spec: spec, Key: key, Value: value}, + Pinning: pinType, + InnerMap: innerMapSpec, + Contents: contents, + }, nil +} + +// uintFromBTF resolves the __uint macro, which is a pointer to a sized +// array, e.g. for int (*foo)[10], this function will return 10. +func uintFromBTF(typ btf.Type) (uint32, error) { + ptr, ok := typ.(*btf.Pointer) + if !ok { + return 0, fmt.Errorf("not a pointer: %v", typ) + } + + arr, ok := ptr.Target.(*btf.Array) + if !ok { + return 0, fmt.Errorf("not a pointer to array: %v", typ) + } + + return arr.Nelems, nil +} + +// resolveBTFArrayMacro resolves the __array macro, which declares an array +// of pointers to a given type. This function returns the target Type of +// the pointers in the array. +func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) { + arr, ok := typ.(*btf.Array) + if !ok { + return nil, fmt.Errorf("not an array: %v", typ) + } + + ptr, ok := arr.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("not an array of pointers: %v", typ) + } + + return ptr.Target, nil +} + +// resolveBTFValuesContents resolves relocations into ELF sections belonging +// to btf.VarSecinfo's. This can be used on the 'values' member in BTF map +// definitions to extract static declarations of map contents. +func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Member) ([]MapKV, error) { + // The elements of a .values pointer array are not encoded in BTF. + // Instead, relocations are generated into each array index. + // However, it's possible to leave certain array indices empty, so all + // indices' offsets need to be checked for emitted relocations. + + // The offset of the 'values' member within the _struct_ (in bits) + // is the starting point of the array. Convert to bytes. Add VarSecinfo + // offset to get the absolute position in the ELF blob. + start := (member.OffsetBits / 8) + vs.Offset + // 'values' is encoded in BTF as a zero (variable) length struct + // member, and its contents run until the end of the VarSecinfo. + // Add VarSecinfo offset to get the absolute position in the ELF blob. + end := vs.Size + vs.Offset + // The size of an address in this section. This determines the width of + // an index in the array. + align := uint32(es.SectionHeader.Addralign) + + // Check if variable-length section is aligned. + if (end-start)%align != 0 { + return nil, errors.New("unaligned static values section") + } + elems := (end - start) / align + + if elems == 0 { + return nil, nil + } + + contents := make([]MapKV, 0, elems) + + // k is the array index, off is its corresponding ELF section offset. + for k, off := uint32(0), start; k < elems; k, off = k+1, off+align { + r, ok := es.relocations[uint64(off)] + if !ok { + continue + } + + // Relocation exists for the current offset in the ELF section. + // Emit a value stub based on the type of relocation to be replaced by + // a real fd later in the pipeline before populating the map. + // Map keys are encoded in MapKV entries, so empty array indices are + // skipped here. + switch t := elf.ST_TYPE(r.Info); t { + case elf.STT_FUNC: + contents = append(contents, MapKV{uint32(k), programStub(r.Name)}) + case elf.STT_OBJECT: + contents = append(contents, MapKV{uint32(k), mapStub(r.Name)}) + default: + return nil, fmt.Errorf("unknown relocation type %v", t) + } + } + + return contents, nil +} + +func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { + for _, sec := range ec.sections { + if sec.kind != dataSection { + continue + } + + if sec.references == 0 { + // Prune data sections which are not referenced by any + // instructions. + continue + } + + if ec.btf == nil { + return errors.New("data sections require BTF, make sure all consts are marked as static") + } + + var datasec *btf.Datasec + if err := ec.btf.FindType(sec.Name, &datasec); err != nil { + return fmt.Errorf("data section %s: can't get BTF: %w", sec.Name, err) + } + + data, err := sec.Data() + if err != nil { + return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err) + } + + if uint64(len(data)) > math.MaxUint32 { + return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name) + } + + mapSpec := &MapSpec{ + Name: SanitizeName(sec.Name, -1), + Type: Array, + KeySize: 4, + ValueSize: uint32(len(data)), + MaxEntries: 1, + Contents: []MapKV{{uint32(0), data}}, + BTF: &btf.Map{Spec: ec.btf, Key: &btf.Void{}, Value: datasec}, + } + + switch sec.Name { + case ".rodata": + mapSpec.Flags = unix.BPF_F_RDONLY_PROG + mapSpec.Freeze = true + case ".bss": + // The kernel already zero-initializes the map + mapSpec.Contents = nil + } + + maps[sec.Name] = mapSpec + } + return nil +} + +func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { + types := map[string]struct { + progType ProgramType + attachType AttachType + progFlags uint32 + }{ + // From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c + "socket": {SocketFilter, AttachNone, 0}, + "sk_reuseport/migrate": {SkReuseport, AttachSkReuseportSelectOrMigrate, 0}, + "sk_reuseport": {SkReuseport, AttachSkReuseportSelect, 0}, + "seccomp": {SocketFilter, AttachNone, 0}, + "kprobe/": {Kprobe, AttachNone, 0}, + "uprobe/": {Kprobe, AttachNone, 0}, + "kretprobe/": {Kprobe, AttachNone, 0}, + "uretprobe/": {Kprobe, AttachNone, 0}, + "tracepoint/": {TracePoint, AttachNone, 0}, + "raw_tracepoint/": {RawTracepoint, AttachNone, 0}, + "raw_tp/": {RawTracepoint, AttachNone, 0}, + "tp_btf/": {Tracing, AttachTraceRawTp, 0}, + "xdp": {XDP, AttachNone, 0}, + "perf_event": {PerfEvent, AttachNone, 0}, + "lwt_in": {LWTIn, AttachNone, 0}, + "lwt_out": {LWTOut, AttachNone, 0}, + "lwt_xmit": {LWTXmit, AttachNone, 0}, + "lwt_seg6local": {LWTSeg6Local, AttachNone, 0}, + "sockops": {SockOps, AttachCGroupSockOps, 0}, + "sk_skb/stream_parser": {SkSKB, AttachSkSKBStreamParser, 0}, + "sk_skb/stream_verdict": {SkSKB, AttachSkSKBStreamParser, 0}, + "sk_msg": {SkMsg, AttachSkSKBStreamVerdict, 0}, + "lirc_mode2": {LircMode2, AttachLircMode2, 0}, + "flow_dissector": {FlowDissector, AttachFlowDissector, 0}, + "iter/": {Tracing, AttachTraceIter, 0}, + "fentry/": {Tracing, AttachTraceFEntry, 0}, + "fmod_ret/": {Tracing, AttachModifyReturn, 0}, + "fexit/": {Tracing, AttachTraceFExit, 0}, + "fentry.s/": {Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE}, + "fmod_ret.s/": {Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE}, + "fexit.s/": {Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE}, + "sk_lookup/": {SkLookup, AttachSkLookup, 0}, + "freplace/": {Extension, AttachNone, 0}, + "lsm/": {LSM, AttachLSMMac, 0}, + "lsm.s/": {LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE}, + + "cgroup_skb/ingress": {CGroupSKB, AttachCGroupInetIngress, 0}, + "cgroup_skb/egress": {CGroupSKB, AttachCGroupInetEgress, 0}, + "cgroup/dev": {CGroupDevice, AttachCGroupDevice, 0}, + "cgroup/skb": {CGroupSKB, AttachNone, 0}, + "cgroup/sock": {CGroupSock, AttachCGroupInetSockCreate, 0}, + "cgroup/post_bind4": {CGroupSock, AttachCGroupInet4PostBind, 0}, + "cgroup/post_bind6": {CGroupSock, AttachCGroupInet6PostBind, 0}, + "cgroup/bind4": {CGroupSockAddr, AttachCGroupInet4Bind, 0}, + "cgroup/bind6": {CGroupSockAddr, AttachCGroupInet6Bind, 0}, + "cgroup/connect4": {CGroupSockAddr, AttachCGroupInet4Connect, 0}, + "cgroup/connect6": {CGroupSockAddr, AttachCGroupInet6Connect, 0}, + "cgroup/sendmsg4": {CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0}, + "cgroup/sendmsg6": {CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0}, + "cgroup/recvmsg4": {CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0}, + "cgroup/recvmsg6": {CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0}, + "cgroup/sysctl": {CGroupSysctl, AttachCGroupSysctl, 0}, + "cgroup/getsockopt": {CGroupSockopt, AttachCGroupGetsockopt, 0}, + "cgroup/setsockopt": {CGroupSockopt, AttachCGroupSetsockopt, 0}, + "classifier": {SchedCLS, AttachNone, 0}, + "action": {SchedACT, AttachNone, 0}, + + "cgroup/getsockname4": {CGroupSockAddr, AttachCgroupInet4GetSockname, 0}, + "cgroup/getsockname6": {CGroupSockAddr, AttachCgroupInet6GetSockname, 0}, + "cgroup/getpeername4": {CGroupSockAddr, AttachCgroupInet4GetPeername, 0}, + "cgroup/getpeername6": {CGroupSockAddr, AttachCgroupInet6GetPeername, 0}, + } + + for prefix, t := range types { + if !strings.HasPrefix(sectionName, prefix) { + continue + } + + if !strings.HasSuffix(prefix, "/") { + return t.progType, t.attachType, t.progFlags, "" + } + + return t.progType, t.attachType, t.progFlags, sectionName[len(prefix):] + } + + return UnspecifiedProgram, AttachNone, 0, "" +} + +func (ec *elfCode) loadRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) { + rels := make(map[uint64]elf.Symbol) + + if sec.Entsize < 16 { + return nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name) + } + + r := bufio.NewReader(sec.Open()) + for off := uint64(0); off < sec.Size; off += sec.Entsize { + ent := io.LimitReader(r, int64(sec.Entsize)) + + var rel elf.Rel64 + if binary.Read(ent, ec.ByteOrder, &rel) != nil { + return nil, fmt.Errorf("can't parse relocation at offset %v", off) + } + + symNo := int(elf.R_SYM64(rel.Info) - 1) + if symNo >= len(symbols) { + return nil, fmt.Errorf("offset %d: symbol %d doesn't exist", off, symNo) + } + + symbol := symbols[symNo] + rels[rel.Off] = symbol + } + + return rels, nil +} diff --git a/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go b/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go new file mode 100644 index 0000000000..5f4e0a0ad0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go @@ -0,0 +1,22 @@ +//go:build gofuzz +// +build gofuzz + +// Use with https://github.com/dvyukov/go-fuzz + +package ebpf + +import "bytes" + +func FuzzLoadCollectionSpec(data []byte) int { + spec, err := LoadCollectionSpecFromReader(bytes.NewReader(data)) + if err != nil { + if spec != nil { + panic("spec is not nil") + } + return 0 + } + if spec == nil { + panic("spec is nil") + } + return 1 +} diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go new file mode 100644 index 0000000000..65fa4d7d85 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/info.go @@ -0,0 +1,273 @@ +package ebpf + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + "strings" + "syscall" + "time" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/btf" +) + +// MapInfo describes a map. +type MapInfo struct { + Type MapType + id MapID + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + Flags uint32 + // Name as supplied by user space at load time. + Name string +} + +func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) { + info, err := bpfGetMapInfoByFD(fd) + if errors.Is(err, syscall.EINVAL) { + return newMapInfoFromProc(fd) + } + if err != nil { + return nil, err + } + + return &MapInfo{ + MapType(info.map_type), + MapID(info.id), + info.key_size, + info.value_size, + info.max_entries, + info.map_flags, + // name is available from 4.15. + internal.CString(info.name[:]), + }, nil +} + +func newMapInfoFromProc(fd *internal.FD) (*MapInfo, error) { + var mi MapInfo + err := scanFdInfo(fd, map[string]interface{}{ + "map_type": &mi.Type, + "key_size": &mi.KeySize, + "value_size": &mi.ValueSize, + "max_entries": &mi.MaxEntries, + "map_flags": &mi.Flags, + }) + if err != nil { + return nil, err + } + return &mi, nil +} + +// ID returns the map ID. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (mi *MapInfo) ID() (MapID, bool) { + return mi.id, mi.id > 0 +} + +// programStats holds statistics of a program. +type programStats struct { + // Total accumulated runtime of the program ins ns. + runtime time.Duration + // Total number of times the program was called. + runCount uint64 +} + +// ProgramInfo describes a program. +type ProgramInfo struct { + Type ProgramType + id ProgramID + // Truncated hash of the BPF bytecode. + Tag string + // Name as supplied by user space at load time. + Name string + // BTF for the program. + btf btf.ID + // IDS map ids related to program. + ids []MapID + + stats *programStats +} + +func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) { + info, err := bpfGetProgInfoByFD(fd, nil) + if errors.Is(err, syscall.EINVAL) { + return newProgramInfoFromProc(fd) + } + if err != nil { + return nil, err + } + + var mapIDs []MapID + if info.nr_map_ids > 0 { + mapIDs = make([]MapID, info.nr_map_ids) + info, err = bpfGetProgInfoByFD(fd, mapIDs) + if err != nil { + return nil, err + } + } + + return &ProgramInfo{ + Type: ProgramType(info.prog_type), + id: ProgramID(info.id), + // tag is available if the kernel supports BPF_PROG_GET_INFO_BY_FD. + Tag: hex.EncodeToString(info.tag[:]), + // name is available from 4.15. + Name: internal.CString(info.name[:]), + btf: btf.ID(info.btf_id), + ids: mapIDs, + stats: &programStats{ + runtime: time.Duration(info.run_time_ns), + runCount: info.run_cnt, + }, + }, nil +} + +func newProgramInfoFromProc(fd *internal.FD) (*ProgramInfo, error) { + var info ProgramInfo + err := scanFdInfo(fd, map[string]interface{}{ + "prog_type": &info.Type, + "prog_tag": &info.Tag, + }) + if errors.Is(err, errMissingFields) { + return nil, &internal.UnsupportedFeatureError{ + Name: "reading program info from /proc/self/fdinfo", + MinimumVersion: internal.Version{4, 10, 0}, + } + } + if err != nil { + return nil, err + } + + return &info, nil +} + +// ID returns the program ID. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) ID() (ProgramID, bool) { + return pi.id, pi.id > 0 +} + +// BTFID returns the BTF ID associated with the program. +// +// Available from 5.0. +// +// The bool return value indicates whether this optional field is available and +// populated. (The field may be available but not populated if the kernel +// supports the field but the program was loaded without BTF information.) +func (pi *ProgramInfo) BTFID() (btf.ID, bool) { + return pi.btf, pi.btf > 0 +} + +// RunCount returns the total number of times the program was called. +// +// Can return 0 if the collection of statistics is not enabled. See EnableStats(). +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) RunCount() (uint64, bool) { + if pi.stats != nil { + return pi.stats.runCount, true + } + return 0, false +} + +// Runtime returns the total accumulated runtime of the program. +// +// Can return 0 if the collection of statistics is not enabled. See EnableStats(). +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) Runtime() (time.Duration, bool) { + if pi.stats != nil { + return pi.stats.runtime, true + } + return time.Duration(0), false +} + +// MapIDs returns the maps related to the program. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) MapIDs() ([]MapID, bool) { + return pi.ids, pi.ids != nil +} + +func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error { + raw, err := fd.Value() + if err != nil { + return err + } + + fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", raw)) + if err != nil { + return err + } + defer fh.Close() + + if err := scanFdInfoReader(fh, fields); err != nil { + return fmt.Errorf("%s: %w", fh.Name(), err) + } + return nil +} + +var errMissingFields = errors.New("missing fields") + +func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { + var ( + scanner = bufio.NewScanner(r) + scanned int + ) + + for scanner.Scan() { + parts := strings.SplitN(scanner.Text(), "\t", 2) + if len(parts) != 2 { + continue + } + + name := strings.TrimSuffix(parts[0], ":") + field, ok := fields[string(name)] + if !ok { + continue + } + + if n, err := fmt.Sscanln(parts[1], field); err != nil || n != 1 { + return fmt.Errorf("can't parse field %s: %v", name, err) + } + + scanned++ + } + + if err := scanner.Err(); err != nil { + return err + } + + if scanned != len(fields) { + return errMissingFields + } + + return nil +} + +// EnableStats starts the measuring of the runtime +// and run counts of eBPF programs. +// +// Collecting statistics can have an impact on the performance. +// +// Requires at least 5.8. +func EnableStats(which uint32) (io.Closer, error) { + attr := internal.BPFEnableStatsAttr{ + StatsType: which, + } + + fd, err := internal.BPFEnableStats(&attr) + if err != nil { + return nil, err + } + return fd, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/align.go b/vendor/github.com/cilium/ebpf/internal/align.go new file mode 100644 index 0000000000..8b4f2658ea --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/align.go @@ -0,0 +1,6 @@ +package internal + +// Align returns 'n' updated to 'alignment' boundary. +func Align(n, alignment int) int { + return (int(n) + alignment - 1) / alignment * alignment +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf.go b/vendor/github.com/cilium/ebpf/internal/btf/btf.go new file mode 100644 index 0000000000..2b5f6d226a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/btf.go @@ -0,0 +1,798 @@ +package btf + +import ( + "bytes" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "reflect" + "sync" + "unsafe" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +const btfMagic = 0xeB9F + +// Errors returned by BTF functions. +var ( + ErrNotSupported = internal.ErrNotSupported + ErrNotFound = errors.New("not found") + ErrNoExtendedInfo = errors.New("no extended info") +) + +// ID represents the unique ID of a BTF object. +type ID uint32 + +// Spec represents decoded BTF. +type Spec struct { + rawTypes []rawType + strings stringTable + types []Type + namedTypes map[string][]NamedType + funcInfos map[string]extInfo + lineInfos map[string]extInfo + coreRelos map[string]coreRelos + byteOrder binary.ByteOrder +} + +type btfHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + HdrLen uint32 + + TypeOff uint32 + TypeLen uint32 + StringOff uint32 + StringLen uint32 +} + +// LoadSpecFromReader reads BTF sections from an ELF. +// +// Returns ErrNotFound if the reader contains no BTF. +func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + return nil, err + } + defer file.Close() + + symbols, err := file.Symbols() + if err != nil { + return nil, fmt.Errorf("can't read symbols: %v", err) + } + + variableOffsets := make(map[variable]uint32) + for _, symbol := range symbols { + if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { + // Ignore things like SHN_ABS + continue + } + + if int(symbol.Section) >= len(file.Sections) { + return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section) + } + + secName := file.Sections[symbol.Section].Name + if symbol.Value > math.MaxUint32 { + return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name) + } + + variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value) + } + + return loadSpecFromELF(file, variableOffsets) +} + +func loadSpecFromELF(file *internal.SafeELFFile, variableOffsets map[variable]uint32) (*Spec, error) { + var ( + btfSection *elf.Section + btfExtSection *elf.Section + sectionSizes = make(map[string]uint32) + ) + + for _, sec := range file.Sections { + switch sec.Name { + case ".BTF": + btfSection = sec + case ".BTF.ext": + btfExtSection = sec + default: + if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { + break + } + + if sec.Size > math.MaxUint32 { + return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) + } + + sectionSizes[sec.Name] = uint32(sec.Size) + } + } + + if btfSection == nil { + return nil, fmt.Errorf("btf: %w", ErrNotFound) + } + + spec, err := loadRawSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets) + if err != nil { + return nil, err + } + + if btfExtSection == nil { + return spec, nil + } + + spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings) + if err != nil { + return nil, fmt.Errorf("can't read ext info: %w", err) + } + + return spec, nil +} + +// LoadRawSpec reads a blob of BTF data that isn't wrapped in an ELF file. +// +// Prefer using LoadSpecFromReader, since this function only supports a subset +// of BTF. +func LoadRawSpec(btf io.Reader, bo binary.ByteOrder) (*Spec, error) { + // This will return an error if we encounter a Datasec, since we can't fix + // it up. + return loadRawSpec(btf, bo, nil, nil) +} + +func loadRawSpec(btf io.Reader, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) { + rawTypes, rawStrings, err := parseBTF(btf, bo) + if err != nil { + return nil, err + } + + err = fixupDatasec(rawTypes, rawStrings, sectionSizes, variableOffsets) + if err != nil { + return nil, err + } + + types, typesByName, err := inflateRawTypes(rawTypes, rawStrings) + if err != nil { + return nil, err + } + + return &Spec{ + rawTypes: rawTypes, + namedTypes: typesByName, + types: types, + strings: rawStrings, + byteOrder: bo, + }, nil +} + +var kernelBTF struct { + sync.Mutex + *Spec +} + +// LoadKernelSpec returns the current kernel's BTF information. +// +// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns +// ErrNotSupported if BTF is not enabled. +func LoadKernelSpec() (*Spec, error) { + kernelBTF.Lock() + defer kernelBTF.Unlock() + + if kernelBTF.Spec != nil { + return kernelBTF.Spec, nil + } + + var err error + kernelBTF.Spec, err = loadKernelSpec() + return kernelBTF.Spec, err +} + +func loadKernelSpec() (*Spec, error) { + release, err := unix.KernelRelease() + if err != nil { + return nil, fmt.Errorf("can't read kernel release number: %w", err) + } + + fh, err := os.Open("/sys/kernel/btf/vmlinux") + if err == nil { + defer fh.Close() + + return LoadRawSpec(fh, internal.NativeEndian) + } + + // use same list of locations as libbpf + // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 + locations := []string{ + "/boot/vmlinux-%s", + "/lib/modules/%s/vmlinux-%[1]s", + "/lib/modules/%s/build/vmlinux", + "/usr/lib/modules/%s/kernel/vmlinux", + "/usr/lib/debug/boot/vmlinux-%s", + "/usr/lib/debug/boot/vmlinux-%s.debug", + "/usr/lib/debug/lib/modules/%s/vmlinux", + } + + for _, loc := range locations { + path := fmt.Sprintf(loc, release) + + fh, err := os.Open(path) + if err != nil { + continue + } + defer fh.Close() + + file, err := internal.NewSafeELFFile(fh) + if err != nil { + return nil, err + } + defer file.Close() + + return loadSpecFromELF(file, nil) + } + + return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported) +} + +func parseBTF(btf io.Reader, bo binary.ByteOrder) ([]rawType, stringTable, error) { + rawBTF, err := io.ReadAll(btf) + if err != nil { + return nil, nil, fmt.Errorf("can't read BTF: %v", err) + } + + rd := bytes.NewReader(rawBTF) + + var header btfHeader + if err := binary.Read(rd, bo, &header); err != nil { + return nil, nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + remainder := int64(header.HdrLen) - int64(binary.Size(&header)) + if remainder < 0 { + return nil, nil, errors.New("header is too short") + } + + if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil { + return nil, nil, fmt.Errorf("header padding: %v", err) + } + + if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil { + return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err) + } + + rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen))) + if err != nil { + return nil, nil, fmt.Errorf("can't read type names: %w", err) + } + + if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil { + return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err) + } + + rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo) + if err != nil { + return nil, nil, fmt.Errorf("can't read types: %w", err) + } + + return rawTypes, rawStrings, nil +} + +type variable struct { + section string + name string +} + +func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error { + for i, rawType := range rawTypes { + if rawType.Kind() != kindDatasec { + continue + } + + name, err := rawStrings.Lookup(rawType.NameOff) + if err != nil { + return err + } + + if name == ".kconfig" || name == ".ksyms" { + return fmt.Errorf("reference to %s: %w", name, ErrNotSupported) + } + + if rawTypes[i].SizeType != 0 { + continue + } + + size, ok := sectionSizes[name] + if !ok { + return fmt.Errorf("data section %s: missing size", name) + } + + rawTypes[i].SizeType = size + + secinfos := rawType.data.([]btfVarSecinfo) + for j, secInfo := range secinfos { + id := int(secInfo.Type - 1) + if id >= len(rawTypes) { + return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j) + } + + varName, err := rawStrings.Lookup(rawTypes[id].NameOff) + if err != nil { + return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err) + } + + offset, ok := variableOffsets[variable{name, varName}] + if !ok { + return fmt.Errorf("data section %s: missing offset for variable %s", name, varName) + } + + secinfos[j].Offset = offset + } + } + + return nil +} + +// Copy creates a copy of Spec. +func (s *Spec) Copy() *Spec { + types, _ := copyTypes(s.types, nil) + namedTypes := make(map[string][]NamedType) + for _, typ := range types { + if named, ok := typ.(NamedType); ok { + name := essentialName(named.TypeName()) + namedTypes[name] = append(namedTypes[name], named) + } + } + + // NB: Other parts of spec are not copied since they are immutable. + return &Spec{ + s.rawTypes, + s.strings, + types, + namedTypes, + s.funcInfos, + s.lineInfos, + s.coreRelos, + s.byteOrder, + } +} + +type marshalOpts struct { + ByteOrder binary.ByteOrder + StripFuncLinkage bool +} + +func (s *Spec) marshal(opts marshalOpts) ([]byte, error) { + var ( + buf bytes.Buffer + header = new(btfHeader) + headerLen = binary.Size(header) + ) + + // Reserve space for the header. We have to write it last since + // we don't know the size of the type section yet. + _, _ = buf.Write(make([]byte, headerLen)) + + // Write type section, just after the header. + for _, raw := range s.rawTypes { + switch { + case opts.StripFuncLinkage && raw.Kind() == kindFunc: + raw.SetLinkage(StaticFunc) + } + + if err := raw.Marshal(&buf, opts.ByteOrder); err != nil { + return nil, fmt.Errorf("can't marshal BTF: %w", err) + } + } + + typeLen := uint32(buf.Len() - headerLen) + + // Write string section after type section. + _, _ = buf.Write(s.strings) + + // Fill out the header, and write it out. + header = &btfHeader{ + Magic: btfMagic, + Version: 1, + Flags: 0, + HdrLen: uint32(headerLen), + TypeOff: 0, + TypeLen: typeLen, + StringOff: typeLen, + StringLen: uint32(len(s.strings)), + } + + raw := buf.Bytes() + err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header) + if err != nil { + return nil, fmt.Errorf("can't write header: %v", err) + } + + return raw, nil +} + +type sliceWriter []byte + +func (sw sliceWriter) Write(p []byte) (int, error) { + if len(p) != len(sw) { + return 0, errors.New("size doesn't match") + } + + return copy(sw, p), nil +} + +// Program finds the BTF for a specific section. +// +// Length is the number of bytes in the raw BPF instruction stream. +// +// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't +// contain extended BTF info. +func (s *Spec) Program(name string, length uint64) (*Program, error) { + if length == 0 { + return nil, errors.New("length musn't be zero") + } + + if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil { + return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo) + } + + funcInfos, funcOK := s.funcInfos[name] + lineInfos, lineOK := s.lineInfos[name] + relos, coreOK := s.coreRelos[name] + + if !funcOK && !lineOK && !coreOK { + return nil, fmt.Errorf("no extended BTF info for section %s", name) + } + + return &Program{s, length, funcInfos, lineInfos, relos}, nil +} + +// FindType searches for a type with a specific name. +// +// Called T a type that satisfies Type, typ must be a non-nil **T. +// On success, the address of the found type will be copied in typ. +// +// Returns an error wrapping ErrNotFound if no matching +// type exists in spec. +func (s *Spec) FindType(name string, typ interface{}) error { + typValue := reflect.ValueOf(typ) + if typValue.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", typ) + } + + typPtr := typValue.Elem() + if !typPtr.CanSet() { + return fmt.Errorf("%T cannot be set", typ) + } + + wanted := typPtr.Type() + if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) { + return fmt.Errorf("%T does not satisfy Type interface", typ) + } + + var candidate Type + for _, typ := range s.namedTypes[essentialName(name)] { + if reflect.TypeOf(typ) != wanted { + continue + } + + // Match against the full name, not just the essential one. + if typ.TypeName() != name { + continue + } + + if candidate != nil { + return fmt.Errorf("type %s: multiple candidates for %T", name, typ) + } + + candidate = typ + } + + if candidate == nil { + return fmt.Errorf("type %s: %w", name, ErrNotFound) + } + + typPtr.Set(reflect.ValueOf(candidate)) + + return nil +} + +// Handle is a reference to BTF loaded into the kernel. +type Handle struct { + spec *Spec + fd *internal.FD +} + +// NewHandle loads BTF into the kernel. +// +// Returns ErrNotSupported if BTF is not supported. +func NewHandle(spec *Spec) (*Handle, error) { + if err := haveBTF(); err != nil { + return nil, err + } + + if spec.byteOrder != internal.NativeEndian { + return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian) + } + + btf, err := spec.marshal(marshalOpts{ + ByteOrder: internal.NativeEndian, + StripFuncLinkage: haveFuncLinkage() != nil, + }) + if err != nil { + return nil, fmt.Errorf("can't marshal BTF: %w", err) + } + + if uint64(len(btf)) > math.MaxUint32 { + return nil, errors.New("BTF exceeds the maximum size") + } + + attr := &bpfLoadBTFAttr{ + btf: internal.NewSlicePointer(btf), + btfSize: uint32(len(btf)), + } + + fd, err := bpfLoadBTF(attr) + if err != nil { + logBuf := make([]byte, 64*1024) + attr.logBuf = internal.NewSlicePointer(logBuf) + attr.btfLogSize = uint32(len(logBuf)) + attr.btfLogLevel = 1 + _, logErr := bpfLoadBTF(attr) + return nil, internal.ErrorWithLog(err, logBuf, logErr) + } + + return &Handle{spec.Copy(), fd}, nil +} + +// NewHandleFromID returns the BTF handle for a given id. +// +// Returns ErrNotExist, if there is no BTF with the given id. +// +// Requires CAP_SYS_ADMIN. +func NewHandleFromID(id ID) (*Handle, error) { + fd, err := internal.BPFObjGetFDByID(internal.BPF_BTF_GET_FD_BY_ID, uint32(id)) + if err != nil { + return nil, fmt.Errorf("get BTF by id: %w", err) + } + + info, err := newInfoFromFd(fd) + if err != nil { + _ = fd.Close() + return nil, fmt.Errorf("get BTF spec for handle: %w", err) + } + + return &Handle{info.BTF, fd}, nil +} + +// Spec returns the Spec that defined the BTF loaded into the kernel. +func (h *Handle) Spec() *Spec { + return h.spec +} + +// Close destroys the handle. +// +// Subsequent calls to FD will return an invalid value. +func (h *Handle) Close() error { + return h.fd.Close() +} + +// FD returns the file descriptor for the handle. +func (h *Handle) FD() int { + value, err := h.fd.Value() + if err != nil { + return -1 + } + + return int(value) +} + +// Map is the BTF for a map. +type Map struct { + Spec *Spec + Key, Value Type +} + +// Program is the BTF information for a stream of instructions. +type Program struct { + spec *Spec + length uint64 + funcInfos, lineInfos extInfo + coreRelos coreRelos +} + +// Spec returns the BTF spec of this program. +func (p *Program) Spec() *Spec { + return p.spec +} + +// Append the information from other to the Program. +func (p *Program) Append(other *Program) error { + if other.spec != p.spec { + return fmt.Errorf("can't append program with different BTF specs") + } + + funcInfos, err := p.funcInfos.append(other.funcInfos, p.length) + if err != nil { + return fmt.Errorf("func infos: %w", err) + } + + lineInfos, err := p.lineInfos.append(other.lineInfos, p.length) + if err != nil { + return fmt.Errorf("line infos: %w", err) + } + + p.funcInfos = funcInfos + p.lineInfos = lineInfos + p.coreRelos = p.coreRelos.append(other.coreRelos, p.length) + p.length += other.length + return nil +} + +// FuncInfos returns the binary form of BTF function infos. +func (p *Program) FuncInfos() (recordSize uint32, bytes []byte, err error) { + bytes, err = p.funcInfos.MarshalBinary() + if err != nil { + return 0, nil, fmt.Errorf("func infos: %w", err) + } + + return p.funcInfos.recordSize, bytes, nil +} + +// LineInfos returns the binary form of BTF line infos. +func (p *Program) LineInfos() (recordSize uint32, bytes []byte, err error) { + bytes, err = p.lineInfos.MarshalBinary() + if err != nil { + return 0, nil, fmt.Errorf("line infos: %w", err) + } + + return p.lineInfos.recordSize, bytes, nil +} + +// Fixups returns the changes required to adjust the program to the target. +// +// Passing a nil target will relocate against the running kernel. +func (p *Program) Fixups(target *Spec) (COREFixups, error) { + if len(p.coreRelos) == 0 { + return nil, nil + } + + if target == nil { + var err error + target, err = LoadKernelSpec() + if err != nil { + return nil, err + } + } + + return coreRelocate(p.spec, target, p.coreRelos) +} + +type bpfLoadBTFAttr struct { + btf internal.Pointer + logBuf internal.Pointer + btfSize uint32 + btfLogSize uint32 + btfLogLevel uint32 +} + +func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) { + fd, err := internal.BPF(internal.BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + + return internal.NewFD(uint32(fd)), nil +} + +func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte { + const minHeaderLength = 24 + + typesLen := uint32(binary.Size(types)) + header := btfHeader{ + Magic: btfMagic, + Version: 1, + HdrLen: minHeaderLength, + TypeOff: 0, + TypeLen: typesLen, + StringOff: typesLen, + StringLen: uint32(len(strings)), + } + + buf := new(bytes.Buffer) + _ = binary.Write(buf, bo, &header) + _ = binary.Write(buf, bo, types) + buf.Write(strings) + + return buf.Bytes() +} + +var haveBTF = internal.FeatureTest("BTF", "5.1", func() error { + var ( + types struct { + Integer btfType + Var btfType + btfVar struct{ Linkage uint32 } + } + strings = []byte{0, 'a', 0} + ) + + // We use a BTF_KIND_VAR here, to make sure that + // the kernel understands BTF at least as well as we + // do. BTF_KIND_VAR was introduced ~5.1. + types.Integer.SetKind(kindPointer) + types.Var.NameOff = 1 + types.Var.SetKind(kindVar) + types.Var.SizeType = 1 + + btf := marshalBTF(&types, strings, internal.NativeEndian) + + fd, err := bpfLoadBTF(&bpfLoadBTFAttr{ + btf: internal.NewSlicePointer(btf), + btfSize: uint32(len(btf)), + }) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + // Treat both EINVAL and EPERM as not supported: loading the program + // might still succeed without BTF. + return internal.ErrNotSupported + } + if err != nil { + return err + } + + fd.Close() + return nil +}) + +var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error { + if err := haveBTF(); err != nil { + return err + } + + var ( + types struct { + FuncProto btfType + Func btfType + } + strings = []byte{0, 'a', 0} + ) + + types.FuncProto.SetKind(kindFuncProto) + types.Func.SetKind(kindFunc) + types.Func.SizeType = 1 // aka FuncProto + types.Func.NameOff = 1 + types.Func.SetLinkage(GlobalFunc) + + btf := marshalBTF(&types, strings, internal.NativeEndian) + + fd, err := bpfLoadBTF(&bpfLoadBTFAttr{ + btf: internal.NewSlicePointer(btf), + btfSize: uint32(len(btf)), + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + + fd.Close() + return nil +}) diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go b/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go new file mode 100644 index 0000000000..d98c73ca59 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go @@ -0,0 +1,287 @@ +package btf + +import ( + "encoding/binary" + "fmt" + "io" +) + +//go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage + +// btfKind describes a Type. +type btfKind uint8 + +// Equivalents of the BTF_KIND_* constants. +const ( + kindUnknown btfKind = iota + kindInt + kindPointer + kindArray + kindStruct + kindUnion + kindEnum + kindForward + kindTypedef + kindVolatile + kindConst + kindRestrict + // Added ~4.20 + kindFunc + kindFuncProto + // Added ~5.1 + kindVar + kindDatasec + // Added ~5.13 + kindFloat +) + +// FuncLinkage describes BTF function linkage metadata. +type FuncLinkage int + +// Equivalent of enum btf_func_linkage. +const ( + StaticFunc FuncLinkage = iota // static + GlobalFunc // global + ExternFunc // extern +) + +// VarLinkage describes BTF variable linkage metadata. +type VarLinkage int + +const ( + StaticVar VarLinkage = iota // static + GlobalVar // global + ExternVar // extern +) + +const ( + btfTypeKindShift = 24 + btfTypeKindLen = 5 + btfTypeVlenShift = 0 + btfTypeVlenMask = 16 + btfTypeKindFlagShift = 31 + btfTypeKindFlagMask = 1 +) + +// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst. +type btfType struct { + NameOff uint32 + /* "info" bits arrangement + * bits 0-15: vlen (e.g. # of struct's members), linkage + * bits 16-23: unused + * bits 24-28: kind (e.g. int, ptr, array...etc) + * bits 29-30: unused + * bit 31: kind_flag, currently used by + * struct, union and fwd + */ + Info uint32 + /* "size" is used by INT, ENUM, STRUCT and UNION. + * "size" tells the size of the type it is describing. + * + * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, + * FUNC and FUNC_PROTO. + * "type" is a type_id referring to another type. + */ + SizeType uint32 +} + +func (k btfKind) String() string { + switch k { + case kindUnknown: + return "Unknown" + case kindInt: + return "Integer" + case kindPointer: + return "Pointer" + case kindArray: + return "Array" + case kindStruct: + return "Struct" + case kindUnion: + return "Union" + case kindEnum: + return "Enumeration" + case kindForward: + return "Forward" + case kindTypedef: + return "Typedef" + case kindVolatile: + return "Volatile" + case kindConst: + return "Const" + case kindRestrict: + return "Restrict" + case kindFunc: + return "Function" + case kindFuncProto: + return "Function Proto" + case kindVar: + return "Variable" + case kindDatasec: + return "Section" + case kindFloat: + return "Float" + default: + return fmt.Sprintf("Unknown (%d)", k) + } +} + +func mask(len uint32) uint32 { + return (1 << len) - 1 +} + +func (bt *btfType) info(len, shift uint32) uint32 { + return (bt.Info >> shift) & mask(len) +} + +func (bt *btfType) setInfo(value, len, shift uint32) { + bt.Info &^= mask(len) << shift + bt.Info |= (value & mask(len)) << shift +} + +func (bt *btfType) Kind() btfKind { + return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift)) +} + +func (bt *btfType) SetKind(kind btfKind) { + bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift) +} + +func (bt *btfType) Vlen() int { + return int(bt.info(btfTypeVlenMask, btfTypeVlenShift)) +} + +func (bt *btfType) SetVlen(vlen int) { + bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift) +} + +func (bt *btfType) KindFlag() bool { + return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1 +} + +func (bt *btfType) Linkage() FuncLinkage { + return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift)) +} + +func (bt *btfType) SetLinkage(linkage FuncLinkage) { + bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift) +} + +func (bt *btfType) Type() TypeID { + // TODO: Panic here if wrong kind? + return TypeID(bt.SizeType) +} + +func (bt *btfType) Size() uint32 { + // TODO: Panic here if wrong kind? + return bt.SizeType +} + +type rawType struct { + btfType + data interface{} +} + +func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error { + if err := binary.Write(w, bo, &rt.btfType); err != nil { + return err + } + + if rt.data == nil { + return nil + } + + return binary.Write(w, bo, rt.data) +} + +type btfArray struct { + Type TypeID + IndexType TypeID + Nelems uint32 +} + +type btfMember struct { + NameOff uint32 + Type TypeID + Offset uint32 +} + +type btfVarSecinfo struct { + Type TypeID + Offset uint32 + Size uint32 +} + +type btfVariable struct { + Linkage uint32 +} + +type btfEnum struct { + NameOff uint32 + Val int32 +} + +type btfParam struct { + NameOff uint32 + Type TypeID +} + +func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { + var ( + header btfType + types []rawType + ) + + for id := TypeID(1); ; id++ { + if err := binary.Read(r, bo, &header); err == io.EOF { + return types, nil + } else if err != nil { + return nil, fmt.Errorf("can't read type info for id %v: %v", id, err) + } + + var data interface{} + switch header.Kind() { + case kindInt: + data = new(uint32) + case kindPointer: + case kindArray: + data = new(btfArray) + case kindStruct: + fallthrough + case kindUnion: + data = make([]btfMember, header.Vlen()) + case kindEnum: + data = make([]btfEnum, header.Vlen()) + case kindForward: + case kindTypedef: + case kindVolatile: + case kindConst: + case kindRestrict: + case kindFunc: + case kindFuncProto: + data = make([]btfParam, header.Vlen()) + case kindVar: + data = new(btfVariable) + case kindDatasec: + data = make([]btfVarSecinfo, header.Vlen()) + case kindFloat: + default: + return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind()) + } + + if data == nil { + types = append(types, rawType{header, nil}) + continue + } + + if err := binary.Read(r, bo, data); err != nil { + return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err) + } + + types = append(types, rawType{header, data}) + } +} + +func intEncoding(raw uint32) (IntEncoding, uint32, byte) { + return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff) +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go b/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go new file mode 100644 index 0000000000..0e0c17d68b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go @@ -0,0 +1,44 @@ +// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage"; DO NOT EDIT. + +package btf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StaticFunc-0] + _ = x[GlobalFunc-1] + _ = x[ExternFunc-2] +} + +const _FuncLinkage_name = "staticglobalextern" + +var _FuncLinkage_index = [...]uint8{0, 6, 12, 18} + +func (i FuncLinkage) String() string { + if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) { + return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StaticVar-0] + _ = x[GlobalVar-1] + _ = x[ExternVar-2] +} + +const _VarLinkage_name = "staticglobalextern" + +var _VarLinkage_index = [...]uint8{0, 6, 12, 18} + +func (i VarLinkage) String() string { + if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) { + return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/core.go b/vendor/github.com/cilium/ebpf/internal/btf/core.go new file mode 100644 index 0000000000..d02df9d50b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/core.go @@ -0,0 +1,888 @@ +package btf + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/cilium/ebpf/asm" +) + +// Code in this file is derived from libbpf, which is available under a BSD +// 2-Clause license. + +// COREFixup is the result of computing a CO-RE relocation for a target. +type COREFixup struct { + Kind COREKind + Local uint32 + Target uint32 + Poison bool +} + +func (f COREFixup) equal(other COREFixup) bool { + return f.Local == other.Local && f.Target == other.Target +} + +func (f COREFixup) String() string { + if f.Poison { + return fmt.Sprintf("%s=poison", f.Kind) + } + return fmt.Sprintf("%s=%d->%d", f.Kind, f.Local, f.Target) +} + +func (f COREFixup) apply(ins *asm.Instruction) error { + if f.Poison { + return errors.New("can't poison individual instruction") + } + + switch class := ins.OpCode.Class(); class { + case asm.LdXClass, asm.StClass, asm.StXClass: + if want := int16(f.Local); want != ins.Offset { + return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want) + } + + if f.Target > math.MaxInt16 { + return fmt.Errorf("offset %d exceeds MaxInt16", f.Target) + } + + ins.Offset = int16(f.Target) + + case asm.LdClass: + if !ins.IsConstantLoad(asm.DWord) { + return fmt.Errorf("not a dword-sized immediate load") + } + + if want := int64(f.Local); want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want) + } + + ins.Constant = int64(f.Target) + + case asm.ALUClass: + if ins.OpCode.ALUOp() == asm.Swap { + return fmt.Errorf("relocation against swap") + } + + fallthrough + + case asm.ALU64Class: + if src := ins.OpCode.Source(); src != asm.ImmSource { + return fmt.Errorf("invalid source %s", src) + } + + if want := int64(f.Local); want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want) + } + + if f.Target > math.MaxInt32 { + return fmt.Errorf("immediate %d exceeds MaxInt32", f.Target) + } + + ins.Constant = int64(f.Target) + + default: + return fmt.Errorf("invalid class %s", class) + } + + return nil +} + +func (f COREFixup) isNonExistant() bool { + return f.Kind.checksForExistence() && f.Target == 0 +} + +type COREFixups map[uint64]COREFixup + +// Apply a set of CO-RE relocations to a BPF program. +func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) { + if len(fs) == 0 { + cpy := make(asm.Instructions, len(insns)) + copy(cpy, insns) + return insns, nil + } + + cpy := make(asm.Instructions, 0, len(insns)) + iter := insns.Iterate() + for iter.Next() { + fixup, ok := fs[iter.Offset.Bytes()] + if !ok { + cpy = append(cpy, *iter.Ins) + continue + } + + ins := *iter.Ins + if fixup.Poison { + const badRelo = asm.BuiltinFunc(0xbad2310) + + cpy = append(cpy, badRelo.Call()) + if ins.OpCode.IsDWordLoad() { + // 64 bit constant loads occupy two raw bpf instructions, so + // we need to add another instruction as padding. + cpy = append(cpy, badRelo.Call()) + } + + continue + } + + if err := fixup.apply(&ins); err != nil { + return nil, fmt.Errorf("instruction %d, offset %d: %s: %w", iter.Index, iter.Offset.Bytes(), fixup.Kind, err) + } + + cpy = append(cpy, ins) + } + + return cpy, nil +} + +// COREKind is the type of CO-RE relocation +type COREKind uint32 + +const ( + reloFieldByteOffset COREKind = iota /* field byte offset */ + reloFieldByteSize /* field size in bytes */ + reloFieldExists /* field existence in target kernel */ + reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */ + reloFieldLShiftU64 /* bitfield-specific left bitshift */ + reloFieldRShiftU64 /* bitfield-specific right bitshift */ + reloTypeIDLocal /* type ID in local BPF object */ + reloTypeIDTarget /* type ID in target kernel */ + reloTypeExists /* type existence in target kernel */ + reloTypeSize /* type size in bytes */ + reloEnumvalExists /* enum value existence in target kernel */ + reloEnumvalValue /* enum value integer value */ +) + +func (k COREKind) String() string { + switch k { + case reloFieldByteOffset: + return "byte_off" + case reloFieldByteSize: + return "byte_sz" + case reloFieldExists: + return "field_exists" + case reloFieldSigned: + return "signed" + case reloFieldLShiftU64: + return "lshift_u64" + case reloFieldRShiftU64: + return "rshift_u64" + case reloTypeIDLocal: + return "local_type_id" + case reloTypeIDTarget: + return "target_type_id" + case reloTypeExists: + return "type_exists" + case reloTypeSize: + return "type_size" + case reloEnumvalExists: + return "enumval_exists" + case reloEnumvalValue: + return "enumval_value" + default: + return "unknown" + } +} + +func (k COREKind) checksForExistence() bool { + return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists +} + +func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) { + if local.byteOrder != target.byteOrder { + return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder) + } + + var ids []TypeID + relosByID := make(map[TypeID]coreRelos) + result := make(COREFixups, len(relos)) + for _, relo := range relos { + if relo.kind == reloTypeIDLocal { + // Filtering out reloTypeIDLocal here makes our lives a lot easier + // down the line, since it doesn't have a target at all. + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) + } + + result[uint64(relo.insnOff)] = COREFixup{ + relo.kind, + uint32(relo.typeID), + uint32(relo.typeID), + false, + } + continue + } + + relos, ok := relosByID[relo.typeID] + if !ok { + ids = append(ids, relo.typeID) + } + relosByID[relo.typeID] = append(relos, relo) + } + + // Ensure we work on relocations in a deterministic order. + sort.Slice(ids, func(i, j int) bool { + return ids[i] < ids[j] + }) + + for _, id := range ids { + if int(id) >= len(local.types) { + return nil, fmt.Errorf("invalid type id %d", id) + } + + localType := local.types[id] + named, ok := localType.(NamedType) + if !ok || named.TypeName() == "" { + return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) + } + + relos := relosByID[id] + targets := target.namedTypes[essentialName(named.TypeName())] + fixups, err := coreCalculateFixups(localType, targets, relos) + if err != nil { + return nil, fmt.Errorf("relocate %s: %w", localType, err) + } + + for i, relo := range relos { + result[uint64(relo.insnOff)] = fixups[i] + } + } + + return result, nil +} + +var errAmbiguousRelocation = errors.New("ambiguous relocation") +var errImpossibleRelocation = errors.New("impossible relocation") + +// coreCalculateFixups calculates the fixups for the given relocations using +// the "best" target. +// +// The best target is determined by scoring: the less poisoning we have to do +// the better the target is. +func coreCalculateFixups(local Type, targets []NamedType, relos coreRelos) ([]COREFixup, error) { + localID := local.ID() + local, err := copyType(local, skipQualifierAndTypedef) + if err != nil { + return nil, err + } + + bestScore := len(relos) + var bestFixups []COREFixup + for i := range targets { + targetID := targets[i].ID() + target, err := copyType(targets[i], skipQualifierAndTypedef) + if err != nil { + return nil, err + } + + score := 0 // lower is better + fixups := make([]COREFixup, 0, len(relos)) + for _, relo := range relos { + fixup, err := coreCalculateFixup(local, localID, target, targetID, relo) + if err != nil { + return nil, fmt.Errorf("target %s: %w", target, err) + } + if fixup.Poison || fixup.isNonExistant() { + score++ + } + fixups = append(fixups, fixup) + } + + if score > bestScore { + // We have a better target already, ignore this one. + continue + } + + if score < bestScore { + // This is the best target yet, use it. + bestScore = score + bestFixups = fixups + continue + } + + // Some other target has the same score as the current one. Make sure + // the fixups agree with each other. + for i, fixup := range bestFixups { + if !fixup.equal(fixups[i]) { + return nil, fmt.Errorf("%s: multiple types match: %w", fixup.Kind, errAmbiguousRelocation) + } + } + } + + if bestFixups == nil { + // Nothing at all matched, probably because there are no suitable + // targets at all. Poison everything! + bestFixups = make([]COREFixup, len(relos)) + for i, relo := range relos { + bestFixups[i] = COREFixup{Kind: relo.kind, Poison: true} + } + } + + return bestFixups, nil +} + +// coreCalculateFixup calculates the fixup for a single local type, target type +// and relocation. +func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo coreRelo) (COREFixup, error) { + fixup := func(local, target uint32) (COREFixup, error) { + return COREFixup{relo.kind, local, target, false}, nil + } + poison := func() (COREFixup, error) { + if relo.kind.checksForExistence() { + return fixup(1, 0) + } + return COREFixup{relo.kind, 0, 0, true}, nil + } + zero := COREFixup{} + + switch relo.kind { + case reloTypeIDTarget, reloTypeSize, reloTypeExists: + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) + } + + err := coreAreTypesCompatible(local, target) + if errors.Is(err, errImpossibleRelocation) { + return poison() + } + if err != nil { + return zero, fmt.Errorf("relocation %s: %w", relo.kind, err) + } + + switch relo.kind { + case reloTypeExists: + return fixup(1, 1) + + case reloTypeIDTarget: + return fixup(uint32(localID), uint32(targetID)) + + case reloTypeSize: + localSize, err := Sizeof(local) + if err != nil { + return zero, err + } + + targetSize, err := Sizeof(target) + if err != nil { + return zero, err + } + + return fixup(uint32(localSize), uint32(targetSize)) + } + + case reloEnumvalValue, reloEnumvalExists: + localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target) + if errors.Is(err, errImpossibleRelocation) { + return poison() + } + if err != nil { + return zero, fmt.Errorf("relocation %s: %w", relo.kind, err) + } + + switch relo.kind { + case reloEnumvalExists: + return fixup(1, 1) + + case reloEnumvalValue: + return fixup(uint32(localValue.Value), uint32(targetValue.Value)) + } + + case reloFieldByteOffset, reloFieldByteSize, reloFieldExists: + if _, ok := target.(*Fwd); ok { + // We can't relocate fields using a forward declaration, so + // skip it. If a non-forward declaration is present in the BTF + // we'll find it in one of the other iterations. + return poison() + } + + localField, targetField, err := coreFindField(local, relo.accessor, target) + if errors.Is(err, errImpossibleRelocation) { + return poison() + } + if err != nil { + return zero, fmt.Errorf("target %s: %w", target, err) + } + + switch relo.kind { + case reloFieldExists: + return fixup(1, 1) + + case reloFieldByteOffset: + return fixup(localField.offset/8, targetField.offset/8) + + case reloFieldByteSize: + localSize, err := Sizeof(localField.Type) + if err != nil { + return zero, err + } + + targetSize, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + + return fixup(uint32(localSize), uint32(targetSize)) + + } + } + + return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported) +} + +/* coreAccessor contains a path through a struct. It contains at least one index. + * + * The interpretation depends on the kind of the relocation. The following is + * taken from struct bpf_core_relo in libbpf_internal.h: + * + * - for field-based relocations, string encodes an accessed field using + * a sequence of field and array indices, separated by colon (:). It's + * conceptually very close to LLVM's getelementptr ([0]) instruction's + * arguments for identifying offset to a field. + * - for type-based relocations, strings is expected to be just "0"; + * - for enum value-based relocations, string contains an index of enum + * value within its enum type; + * + * Example to provide a better feel. + * + * struct sample { + * int a; + * struct { + * int b[10]; + * }; + * }; + * + * struct sample s = ...; + * int x = &s->a; // encoded as "0:0" (a is field #0) + * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, + * // b is field #0 inside anon struct, accessing elem #5) + * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) + */ +type coreAccessor []int + +func parseCoreAccessor(accessor string) (coreAccessor, error) { + if accessor == "" { + return nil, fmt.Errorf("empty accessor") + } + + parts := strings.Split(accessor, ":") + result := make(coreAccessor, 0, len(parts)) + for _, part := range parts { + // 31 bits to avoid overflowing int on 32 bit platforms. + index, err := strconv.ParseUint(part, 10, 31) + if err != nil { + return nil, fmt.Errorf("accessor index %q: %s", part, err) + } + + result = append(result, int(index)) + } + + return result, nil +} + +func (ca coreAccessor) String() string { + strs := make([]string, 0, len(ca)) + for _, i := range ca { + strs = append(strs, strconv.Itoa(i)) + } + return strings.Join(strs, ":") +} + +func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { + e, ok := t.(*Enum) + if !ok { + return nil, fmt.Errorf("not an enum: %s", t) + } + + if len(ca) > 1 { + return nil, fmt.Errorf("invalid accessor %s for enum", ca) + } + + i := ca[0] + if i >= len(e.Values) { + return nil, fmt.Errorf("invalid index %d for %s", i, e) + } + + return &e.Values[i], nil +} + +type coreField struct { + Type Type + offset uint32 +} + +func adjustOffset(base uint32, t Type, n int) (uint32, error) { + size, err := Sizeof(t) + if err != nil { + return 0, err + } + + return base + (uint32(n) * uint32(size) * 8), nil +} + +// coreFindField descends into the local type using the accessor and tries to +// find an equivalent field in target at each step. +// +// Returns the field and the offset of the field from the start of +// target in bits. +func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) { + // The first index is used to offset a pointer of the base type like + // when accessing an array. + localOffset, err := adjustOffset(0, local, localAcc[0]) + if err != nil { + return coreField{}, coreField{}, err + } + + targetOffset, err := adjustOffset(0, target, localAcc[0]) + if err != nil { + return coreField{}, coreField{}, err + } + + if err := coreAreMembersCompatible(local, target); err != nil { + return coreField{}, coreField{}, fmt.Errorf("fields: %w", err) + } + + var localMaybeFlex, targetMaybeFlex bool + for _, acc := range localAcc[1:] { + switch localType := local.(type) { + case composite: + // For composite types acc is used to find the field in the local type, + // and then we try to find a field in target with the same name. + localMembers := localType.members() + if acc >= len(localMembers) { + return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, local) + } + + localMember := localMembers[acc] + if localMember.Name == "" { + _, ok := localMember.Type.(composite) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported) + } + + // This is an anonymous struct or union, ignore it. + local = localMember.Type + localOffset += localMember.OffsetBits + localMaybeFlex = false + continue + } + + targetType, ok := target.(composite) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation) + } + + targetMember, last, err := coreFindMember(targetType, localMember.Name) + if err != nil { + return coreField{}, coreField{}, err + } + + if targetMember.BitfieldSize > 0 { + return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported) + } + + local = localMember.Type + localMaybeFlex = acc == len(localMembers)-1 + localOffset += localMember.OffsetBits + target = targetMember.Type + targetMaybeFlex = last + targetOffset += targetMember.OffsetBits + + case *Array: + // For arrays, acc is the index in the target. + targetType, ok := target.(*Array) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation) + } + + if localType.Nelems == 0 && !localMaybeFlex { + return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array") + } + if targetType.Nelems == 0 && !targetMaybeFlex { + return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array") + } + + if localType.Nelems > 0 && acc >= int(localType.Nelems) { + return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc) + } + if targetType.Nelems > 0 && acc >= int(targetType.Nelems) { + return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation) + } + + local = localType.Type + localMaybeFlex = false + localOffset, err = adjustOffset(localOffset, local, acc) + if err != nil { + return coreField{}, coreField{}, err + } + + target = targetType.Type + targetMaybeFlex = false + targetOffset, err = adjustOffset(targetOffset, target, acc) + if err != nil { + return coreField{}, coreField{}, err + } + + default: + return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported) + } + + if err := coreAreMembersCompatible(local, target); err != nil { + return coreField{}, coreField{}, err + } + } + + return coreField{local, localOffset}, coreField{target, targetOffset}, nil +} + +// coreFindMember finds a member in a composite type while handling anonymous +// structs and unions. +func coreFindMember(typ composite, name string) (Member, bool, error) { + if name == "" { + return Member{}, false, errors.New("can't search for anonymous member") + } + + type offsetTarget struct { + composite + offset uint32 + } + + targets := []offsetTarget{{typ, 0}} + visited := make(map[composite]bool) + + for i := 0; i < len(targets); i++ { + target := targets[i] + + // Only visit targets once to prevent infinite recursion. + if visited[target] { + continue + } + if len(visited) >= maxTypeDepth { + // This check is different than libbpf, which restricts the entire + // path to BPF_CORE_SPEC_MAX_LEN items. + return Member{}, false, fmt.Errorf("type is nested too deep") + } + visited[target] = true + + members := target.members() + for j, member := range members { + if member.Name == name { + // NB: This is safe because member is a copy. + member.OffsetBits += target.offset + return member, j == len(members)-1, nil + } + + // The names don't match, but this member could be an anonymous struct + // or union. + if member.Name != "" { + continue + } + + comp, ok := member.Type.(composite) + if !ok { + return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type) + } + + targets = append(targets, offsetTarget{comp, target.offset + member.OffsetBits}) + } + } + + return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation) +} + +// coreFindEnumValue follows localAcc to find the equivalent enum value in target. +func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) { + localValue, err := localAcc.enumValue(local) + if err != nil { + return nil, nil, err + } + + targetEnum, ok := target.(*Enum) + if !ok { + return nil, nil, errImpossibleRelocation + } + + localName := essentialName(localValue.Name) + for i, targetValue := range targetEnum.Values { + if essentialName(targetValue.Name) != localName { + continue + } + + return localValue, &targetEnum.Values[i], nil + } + + return nil, nil, errImpossibleRelocation +} + +/* The comment below is from bpf_core_types_are_compat in libbpf.c: + * + * Check local and target types for compatibility. This check is used for + * type-based CO-RE relocations and follow slightly different rules than + * field-based relocations. This function assumes that root types were already + * checked for name match. Beyond that initial root-level name check, names + * are completely ignored. Compatibility rules are as follows: + * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but + * kind should match for local and target types (i.e., STRUCT is not + * compatible with UNION); + * - for ENUMs, the size is ignored; + * - for INT, size and signedness are ignored; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * - CONST/VOLATILE/RESTRICT modifiers are ignored; + * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; + * - FUNC_PROTOs are compatible if they have compatible signature: same + * number of input args and compatible return and argument types. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + * + * Returns errImpossibleRelocation if types are not compatible. + */ +func coreAreTypesCompatible(localType Type, targetType Type) error { + var ( + localTs, targetTs typeDeque + l, t = &localType, &targetType + depth = 0 + ) + + for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() { + if depth >= maxTypeDepth { + return errors.New("types are nested too deep") + } + + localType = *l + targetType = *t + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) + } + + switch lv := (localType).(type) { + case *Void, *Struct, *Union, *Enum, *Fwd: + // Nothing to do here + + case *Int: + tv := targetType.(*Int) + if lv.isBitfield() || tv.isBitfield() { + return fmt.Errorf("bitfield: %w", errImpossibleRelocation) + } + + case *Pointer, *Array: + depth++ + localType.walk(&localTs) + targetType.walk(&targetTs) + + case *FuncProto: + tv := targetType.(*FuncProto) + if len(lv.Params) != len(tv.Params) { + return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation) + } + + depth++ + localType.walk(&localTs) + targetType.walk(&targetTs) + + default: + return fmt.Errorf("unsupported type %T", localType) + } + } + + if l != nil { + return fmt.Errorf("dangling local type %T", *l) + } + + if t != nil { + return fmt.Errorf("dangling target type %T", *t) + } + + return nil +} + +/* coreAreMembersCompatible checks two types for field-based relocation compatibility. + * + * The comment below is from bpf_core_fields_are_compat in libbpf.c: + * + * Check two types for compatibility for the purpose of field access + * relocation. const/volatile/restrict and typedefs are skipped to ensure we + * are relocating semantically compatible entities: + * - any two STRUCTs/UNIONs are compatible and can be mixed; + * - any two FWDs are compatible, if their names match (modulo flavor suffix); + * - any two PTRs are always compatible; + * - for ENUMs, names should be the same (ignoring flavor suffix) or at + * least one of enums should be anonymous; + * - for ENUMs, check sizes, names are ignored; + * - for INT, size and signedness are ignored; + * - any two FLOATs are always compatible; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * [ NB: coreAreMembersCompatible doesn't recurse, this check is done + * by coreFindField. ] + * - everything else shouldn't be ever a target of relocation. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + * + * Returns errImpossibleRelocation if the members are not compatible. + */ +func coreAreMembersCompatible(localType Type, targetType Type) error { + doNamesMatch := func(a, b string) error { + if a == "" || b == "" { + // allow anonymous and named type to match + return nil + } + + if essentialName(a) == essentialName(b) { + return nil + } + + return fmt.Errorf("names don't match: %w", errImpossibleRelocation) + } + + _, lok := localType.(composite) + _, tok := targetType.(composite) + if lok && tok { + return nil + } + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) + } + + switch lv := localType.(type) { + case *Array, *Pointer, *Float: + return nil + + case *Enum: + tv := targetType.(*Enum) + return doNamesMatch(lv.Name, tv.Name) + + case *Fwd: + tv := targetType.(*Fwd) + return doNamesMatch(lv.Name, tv.Name) + + case *Int: + tv := targetType.(*Int) + if lv.isBitfield() || tv.isBitfield() { + return fmt.Errorf("bitfield: %w", errImpossibleRelocation) + } + return nil + + default: + return fmt.Errorf("type %s: %w", localType, ErrNotSupported) + } +} + +func skipQualifierAndTypedef(typ Type) (Type, error) { + result := typ + for depth := 0; depth <= maxTypeDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + case *Typedef: + result = v.Type + default: + return result, nil + } + } + return nil, errors.New("exceeded type depth") +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/doc.go b/vendor/github.com/cilium/ebpf/internal/btf/doc.go new file mode 100644 index 0000000000..ad2576cb23 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/doc.go @@ -0,0 +1,8 @@ +// Package btf handles data encoded according to the BPF Type Format. +// +// The canonical documentation lives in the Linux kernel repository and is +// available at https://www.kernel.org/doc/html/latest/bpf/btf.html +// +// The API is very much unstable. You should only use this via the main +// ebpf library. +package btf diff --git a/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go b/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go new file mode 100644 index 0000000000..cdae2ec408 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go @@ -0,0 +1,312 @@ +package btf + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" +) + +type btfExtHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + HdrLen uint32 + + FuncInfoOff uint32 + FuncInfoLen uint32 + LineInfoOff uint32 + LineInfoLen uint32 +} + +type btfExtCoreHeader struct { + CoreReloOff uint32 + CoreReloLen uint32 +} + +func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, relos map[string]coreRelos, err error) { + var header btfExtHeader + var coreHeader btfExtCoreHeader + if err := binary.Read(r, bo, &header); err != nil { + return nil, nil, nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, nil, nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + remainder := int64(header.HdrLen) - int64(binary.Size(&header)) + if remainder < 0 { + return nil, nil, nil, errors.New("header is too short") + } + + coreHdrSize := int64(binary.Size(&coreHeader)) + if remainder >= coreHdrSize { + if err := binary.Read(r, bo, &coreHeader); err != nil { + return nil, nil, nil, fmt.Errorf("can't read CO-RE relocation header: %v", err) + } + remainder -= coreHdrSize + } + + // Of course, the .BTF.ext header has different semantics than the + // .BTF ext header. We need to ignore non-null values. + _, err = io.CopyN(io.Discard, r, remainder) + if err != nil { + return nil, nil, nil, fmt.Errorf("header padding: %v", err) + } + + if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil { + return nil, nil, nil, fmt.Errorf("can't seek to function info section: %v", err) + } + + buf := bufio.NewReader(io.LimitReader(r, int64(header.FuncInfoLen))) + funcInfo, err = parseExtInfo(buf, bo, strings) + if err != nil { + return nil, nil, nil, fmt.Errorf("function info: %w", err) + } + + if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil { + return nil, nil, nil, fmt.Errorf("can't seek to line info section: %v", err) + } + + buf = bufio.NewReader(io.LimitReader(r, int64(header.LineInfoLen))) + lineInfo, err = parseExtInfo(buf, bo, strings) + if err != nil { + return nil, nil, nil, fmt.Errorf("line info: %w", err) + } + + if coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 { + if _, err := r.Seek(int64(header.HdrLen+coreHeader.CoreReloOff), io.SeekStart); err != nil { + return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err) + } + + relos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings) + if err != nil { + return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err) + } + } + + return funcInfo, lineInfo, relos, nil +} + +type btfExtInfoSec struct { + SecNameOff uint32 + NumInfo uint32 +} + +type extInfoRecord struct { + InsnOff uint64 + Opaque []byte +} + +type extInfo struct { + byteOrder binary.ByteOrder + recordSize uint32 + records []extInfoRecord +} + +func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) { + if other.byteOrder != ei.byteOrder { + return extInfo{}, fmt.Errorf("ext_info byte order mismatch, want %v (got %v)", ei.byteOrder, other.byteOrder) + } + + if other.recordSize != ei.recordSize { + return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize) + } + + records := make([]extInfoRecord, 0, len(ei.records)+len(other.records)) + records = append(records, ei.records...) + for _, info := range other.records { + records = append(records, extInfoRecord{ + InsnOff: info.InsnOff + offset, + Opaque: info.Opaque, + }) + } + return extInfo{ei.byteOrder, ei.recordSize, records}, nil +} + +func (ei extInfo) MarshalBinary() ([]byte, error) { + if ei.byteOrder != internal.NativeEndian { + return nil, fmt.Errorf("%s is not the native byte order", ei.byteOrder) + } + + if len(ei.records) == 0 { + return nil, nil + } + + buf := bytes.NewBuffer(make([]byte, 0, int(ei.recordSize)*len(ei.records))) + for _, info := range ei.records { + // The kernel expects offsets in number of raw bpf instructions, + // while the ELF tracks it in bytes. + insnOff := uint32(info.InsnOff / asm.InstructionSize) + if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil { + return nil, fmt.Errorf("can't write instruction offset: %v", err) + } + + buf.Write(info.Opaque) + } + + return buf.Bytes(), nil +} + +func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) { + const maxRecordSize = 256 + + var recordSize uint32 + if err := binary.Read(r, bo, &recordSize); err != nil { + return nil, fmt.Errorf("can't read record size: %v", err) + } + + if recordSize < 4 { + // Need at least insnOff + return nil, errors.New("record size too short") + } + if recordSize > maxRecordSize { + return nil, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) + } + + result := make(map[string]extInfo) + for { + secName, infoHeader, err := parseExtInfoHeader(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + + var records []extInfoRecord + for i := uint32(0); i < infoHeader.NumInfo; i++ { + var byteOff uint32 + if err := binary.Read(r, bo, &byteOff); err != nil { + return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err) + } + + buf := make([]byte, int(recordSize-4)) + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("section %v: can't read record: %v", secName, err) + } + + if byteOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff) + } + + records = append(records, extInfoRecord{uint64(byteOff), buf}) + } + + result[secName] = extInfo{ + bo, + recordSize, + records, + } + } +} + +// bpfCoreRelo matches `struct bpf_core_relo` from the kernel +type bpfCoreRelo struct { + InsnOff uint32 + TypeID TypeID + AccessStrOff uint32 + Kind COREKind +} + +type coreRelo struct { + insnOff uint32 + typeID TypeID + accessor coreAccessor + kind COREKind +} + +type coreRelos []coreRelo + +// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted +// by offset. +func (r coreRelos) append(other coreRelos, offset uint64) coreRelos { + result := make([]coreRelo, 0, len(r)+len(other)) + result = append(result, r...) + for _, relo := range other { + relo.insnOff += uint32(offset) + result = append(result, relo) + } + return result +} + +var extInfoReloSize = binary.Size(bpfCoreRelo{}) + +func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]coreRelos, error) { + var recordSize uint32 + if err := binary.Read(r, bo, &recordSize); err != nil { + return nil, fmt.Errorf("read record size: %v", err) + } + + if recordSize != uint32(extInfoReloSize) { + return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) + } + + result := make(map[string]coreRelos) + for { + secName, infoHeader, err := parseExtInfoHeader(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + + var relos coreRelos + for i := uint32(0); i < infoHeader.NumInfo; i++ { + var relo bpfCoreRelo + if err := binary.Read(r, bo, &relo); err != nil { + return nil, fmt.Errorf("section %v: read record: %v", secName, err) + } + + if relo.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff) + } + + accessorStr, err := strings.Lookup(relo.AccessStrOff) + if err != nil { + return nil, err + } + + accessor, err := parseCoreAccessor(accessorStr) + if err != nil { + return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) + } + + relos = append(relos, coreRelo{ + relo.InsnOff, + relo.TypeID, + accessor, + relo.Kind, + }) + } + + result[secName] = relos + } +} + +func parseExtInfoHeader(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) { + var infoHeader btfExtInfoSec + if err := binary.Read(r, bo, &infoHeader); err != nil { + return "", nil, fmt.Errorf("read ext info header: %w", err) + } + + secName, err := strings.Lookup(infoHeader.SecNameOff) + if err != nil { + return "", nil, fmt.Errorf("get section name: %w", err) + } + + if infoHeader.NumInfo == 0 { + return "", nil, fmt.Errorf("section %s has zero records", secName) + } + + return secName, &infoHeader, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go b/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go new file mode 100644 index 0000000000..220b285afe --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go @@ -0,0 +1,50 @@ +//go:build gofuzz +// +build gofuzz + +// Use with https://github.com/dvyukov/go-fuzz + +package btf + +import ( + "bytes" + "encoding/binary" + + "github.com/cilium/ebpf/internal" +) + +func FuzzSpec(data []byte) int { + if len(data) < binary.Size(btfHeader{}) { + return -1 + } + + spec, err := loadNakedSpec(bytes.NewReader(data), internal.NativeEndian, nil, nil) + if err != nil { + if spec != nil { + panic("spec is not nil") + } + return 0 + } + if spec == nil { + panic("spec is nil") + } + return 1 +} + +func FuzzExtInfo(data []byte) int { + if len(data) < binary.Size(btfExtHeader{}) { + return -1 + } + + table := stringTable("\x00foo\x00barfoo\x00") + info, err := parseExtInfo(bytes.NewReader(data), internal.NativeEndian, table) + if err != nil { + if info != nil { + panic("info is not nil") + } + return 0 + } + if info == nil { + panic("info is nil") + } + return 1 +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/info.go b/vendor/github.com/cilium/ebpf/internal/btf/info.go new file mode 100644 index 0000000000..6a9b5d2e0b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/info.go @@ -0,0 +1,48 @@ +package btf + +import ( + "bytes" + + "github.com/cilium/ebpf/internal" +) + +// info describes a BTF object. +type info struct { + BTF *Spec + ID ID + // Name is an identifying name for the BTF, currently only used by the + // kernel. + Name string + // KernelBTF is true if the BTf originated with the kernel and not + // userspace. + KernelBTF bool +} + +func newInfoFromFd(fd *internal.FD) (*info, error) { + // We invoke the syscall once with a empty BTF and name buffers to get size + // information to allocate buffers. Then we invoke it a second time with + // buffers to receive the data. + bpfInfo, err := bpfGetBTFInfoByFD(fd, nil, nil) + if err != nil { + return nil, err + } + + btfBuffer := make([]byte, bpfInfo.btfSize) + nameBuffer := make([]byte, bpfInfo.nameLen) + bpfInfo, err = bpfGetBTFInfoByFD(fd, btfBuffer, nameBuffer) + if err != nil { + return nil, err + } + + spec, err := loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, nil, nil) + if err != nil { + return nil, err + } + + return &info{ + BTF: spec, + ID: ID(bpfInfo.id), + Name: internal.CString(nameBuffer), + KernelBTF: bpfInfo.kernelBTF != 0, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/strings.go b/vendor/github.com/cilium/ebpf/internal/btf/strings.go new file mode 100644 index 0000000000..9876aa227c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/strings.go @@ -0,0 +1,54 @@ +package btf + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +type stringTable []byte + +func readStringTable(r io.Reader) (stringTable, error) { + contents, err := io.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("can't read string table: %v", err) + } + + if len(contents) < 1 { + return nil, errors.New("string table is empty") + } + + if contents[0] != '\x00' { + return nil, errors.New("first item in string table is non-empty") + } + + if contents[len(contents)-1] != '\x00' { + return nil, errors.New("string table isn't null terminated") + } + + return stringTable(contents), nil +} + +func (st stringTable) Lookup(offset uint32) (string, error) { + if int64(offset) > int64(^uint(0)>>1) { + return "", fmt.Errorf("offset %d overflows int", offset) + } + + pos := int(offset) + if pos >= len(st) { + return "", fmt.Errorf("offset %d is out of bounds", offset) + } + + if pos > 0 && st[pos-1] != '\x00' { + return "", fmt.Errorf("offset %d isn't start of a string", offset) + } + + str := st[pos:] + end := bytes.IndexByte(str, '\x00') + if end == -1 { + return "", fmt.Errorf("offset %d isn't null terminated", offset) + } + + return string(str[:end]), nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/syscalls.go b/vendor/github.com/cilium/ebpf/internal/btf/syscalls.go new file mode 100644 index 0000000000..a4f80abd01 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/syscalls.go @@ -0,0 +1,31 @@ +package btf + +import ( + "fmt" + "unsafe" + + "github.com/cilium/ebpf/internal" +) + +type bpfBTFInfo struct { + btf internal.Pointer + btfSize uint32 + id uint32 + name internal.Pointer + nameLen uint32 + kernelBTF uint32 +} + +func bpfGetBTFInfoByFD(fd *internal.FD, btf, name []byte) (*bpfBTFInfo, error) { + info := bpfBTFInfo{ + btf: internal.NewSlicePointer(btf), + btfSize: uint32(len(btf)), + name: internal.NewSlicePointer(name), + nameLen: uint32(len(name)), + } + if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil { + return nil, fmt.Errorf("can't get program info: %w", err) + } + + return &info, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/types.go b/vendor/github.com/cilium/ebpf/internal/btf/types.go new file mode 100644 index 0000000000..5c8e7c6e59 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/types.go @@ -0,0 +1,957 @@ +package btf + +import ( + "fmt" + "math" + "strings" +) + +const maxTypeDepth = 32 + +// TypeID identifies a type in a BTF section. +type TypeID uint32 + +// ID implements part of the Type interface. +func (tid TypeID) ID() TypeID { + return tid +} + +// Type represents a type described by BTF. +type Type interface { + ID() TypeID + + String() string + + // Make a copy of the type, without copying Type members. + copy() Type + + // Enumerate all nested Types. Repeated calls must visit nested + // types in the same order. + walk(*typeDeque) +} + +// NamedType is a type with a name. +type NamedType interface { + Type + + // Name of the type, empty for anonymous types. + TypeName() string +} + +var ( + _ NamedType = (*Int)(nil) + _ NamedType = (*Struct)(nil) + _ NamedType = (*Union)(nil) + _ NamedType = (*Enum)(nil) + _ NamedType = (*Fwd)(nil) + _ NamedType = (*Func)(nil) + _ NamedType = (*Typedef)(nil) + _ NamedType = (*Var)(nil) + _ NamedType = (*Datasec)(nil) + _ NamedType = (*Float)(nil) +) + +// Void is the unit type of BTF. +type Void struct{} + +func (v *Void) ID() TypeID { return 0 } +func (v *Void) String() string { return "void#0" } +func (v *Void) size() uint32 { return 0 } +func (v *Void) copy() Type { return (*Void)(nil) } +func (v *Void) walk(*typeDeque) {} + +type IntEncoding byte + +const ( + Signed IntEncoding = 1 << iota + Char + Bool +) + +// Int is an integer of a given length. +type Int struct { + TypeID + Name string + + // The size of the integer in bytes. + Size uint32 + Encoding IntEncoding + // OffsetBits is the starting bit offset. Currently always 0. + // See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int + OffsetBits uint32 + Bits byte +} + +func (i *Int) String() string { + var s strings.Builder + + switch { + case i.Encoding&Char != 0: + s.WriteString("char") + case i.Encoding&Bool != 0: + s.WriteString("bool") + default: + if i.Encoding&Signed == 0 { + s.WriteRune('u') + } + s.WriteString("int") + fmt.Fprintf(&s, "%d", i.Size*8) + } + + fmt.Fprintf(&s, "#%d", i.TypeID) + + if i.Bits > 0 { + fmt.Fprintf(&s, "[bits=%d]", i.Bits) + } + + return s.String() +} + +func (i *Int) TypeName() string { return i.Name } +func (i *Int) size() uint32 { return i.Size } +func (i *Int) walk(*typeDeque) {} +func (i *Int) copy() Type { + cpy := *i + return &cpy +} + +func (i *Int) isBitfield() bool { + return i.OffsetBits > 0 +} + +// Pointer is a pointer to another type. +type Pointer struct { + TypeID + Target Type +} + +func (p *Pointer) String() string { + return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID()) +} + +func (p *Pointer) size() uint32 { return 8 } +func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) } +func (p *Pointer) copy() Type { + cpy := *p + return &cpy +} + +// Array is an array with a fixed number of elements. +type Array struct { + TypeID + Type Type + Nelems uint32 +} + +func (arr *Array) String() string { + return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems) +} + +func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) } +func (arr *Array) copy() Type { + cpy := *arr + return &cpy +} + +// Struct is a compound type of consecutive members. +type Struct struct { + TypeID + Name string + // The size of the struct including padding, in bytes + Size uint32 + Members []Member +} + +func (s *Struct) String() string { + return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name) +} + +func (s *Struct) TypeName() string { return s.Name } + +func (s *Struct) size() uint32 { return s.Size } + +func (s *Struct) walk(tdq *typeDeque) { + for i := range s.Members { + tdq.push(&s.Members[i].Type) + } +} + +func (s *Struct) copy() Type { + cpy := *s + cpy.Members = copyMembers(s.Members) + return &cpy +} + +func (s *Struct) members() []Member { + return s.Members +} + +// Union is a compound type where members occupy the same memory. +type Union struct { + TypeID + Name string + // The size of the union including padding, in bytes. + Size uint32 + Members []Member +} + +func (u *Union) String() string { + return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name) +} + +func (u *Union) TypeName() string { return u.Name } + +func (u *Union) size() uint32 { return u.Size } + +func (u *Union) walk(tdq *typeDeque) { + for i := range u.Members { + tdq.push(&u.Members[i].Type) + } +} + +func (u *Union) copy() Type { + cpy := *u + cpy.Members = copyMembers(u.Members) + return &cpy +} + +func (u *Union) members() []Member { + return u.Members +} + +func copyMembers(orig []Member) []Member { + cpy := make([]Member, len(orig)) + copy(cpy, orig) + return cpy +} + +type composite interface { + members() []Member +} + +var ( + _ composite = (*Struct)(nil) + _ composite = (*Union)(nil) +) + +// Member is part of a Struct or Union. +// +// It is not a valid Type. +type Member struct { + Name string + Type Type + // OffsetBits is the bit offset of this member. + OffsetBits uint32 + BitfieldSize uint32 +} + +// Enum lists possible values. +type Enum struct { + TypeID + Name string + Values []EnumValue +} + +func (e *Enum) String() string { + return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name) +} + +func (e *Enum) TypeName() string { return e.Name } + +// EnumValue is part of an Enum +// +// Is is not a valid Type +type EnumValue struct { + Name string + Value int32 +} + +func (e *Enum) size() uint32 { return 4 } +func (e *Enum) walk(*typeDeque) {} +func (e *Enum) copy() Type { + cpy := *e + cpy.Values = make([]EnumValue, len(e.Values)) + copy(cpy.Values, e.Values) + return &cpy +} + +// FwdKind is the type of forward declaration. +type FwdKind int + +// Valid types of forward declaration. +const ( + FwdStruct FwdKind = iota + FwdUnion +) + +func (fk FwdKind) String() string { + switch fk { + case FwdStruct: + return "struct" + case FwdUnion: + return "union" + default: + return fmt.Sprintf("%T(%d)", fk, int(fk)) + } +} + +// Fwd is a forward declaration of a Type. +type Fwd struct { + TypeID + Name string + Kind FwdKind +} + +func (f *Fwd) String() string { + return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name) +} + +func (f *Fwd) TypeName() string { return f.Name } + +func (f *Fwd) walk(*typeDeque) {} +func (f *Fwd) copy() Type { + cpy := *f + return &cpy +} + +// Typedef is an alias of a Type. +type Typedef struct { + TypeID + Name string + Type Type +} + +func (td *Typedef) String() string { + return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID()) +} + +func (td *Typedef) TypeName() string { return td.Name } + +func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) } +func (td *Typedef) copy() Type { + cpy := *td + return &cpy +} + +// Volatile is a qualifier. +type Volatile struct { + TypeID + Type Type +} + +func (v *Volatile) String() string { + return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID()) +} + +func (v *Volatile) qualify() Type { return v.Type } +func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) } +func (v *Volatile) copy() Type { + cpy := *v + return &cpy +} + +// Const is a qualifier. +type Const struct { + TypeID + Type Type +} + +func (c *Const) String() string { + return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID()) +} + +func (c *Const) qualify() Type { return c.Type } +func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) } +func (c *Const) copy() Type { + cpy := *c + return &cpy +} + +// Restrict is a qualifier. +type Restrict struct { + TypeID + Type Type +} + +func (r *Restrict) String() string { + return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID()) +} + +func (r *Restrict) qualify() Type { return r.Type } +func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) } +func (r *Restrict) copy() Type { + cpy := *r + return &cpy +} + +// Func is a function definition. +type Func struct { + TypeID + Name string + Type Type + Linkage FuncLinkage +} + +func (f *Func) String() string { + return fmt.Sprintf("func#%d[%s %q proto=#%d]", f.TypeID, f.Linkage, f.Name, f.Type.ID()) +} + +func (f *Func) TypeName() string { return f.Name } + +func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) } +func (f *Func) copy() Type { + cpy := *f + return &cpy +} + +// FuncProto is a function declaration. +type FuncProto struct { + TypeID + Return Type + Params []FuncParam +} + +func (fp *FuncProto) String() string { + var s strings.Builder + fmt.Fprintf(&s, "proto#%d[", fp.TypeID) + for _, param := range fp.Params { + fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID()) + } + fmt.Fprintf(&s, "return=#%d]", fp.Return.ID()) + return s.String() +} + +func (fp *FuncProto) walk(tdq *typeDeque) { + tdq.push(&fp.Return) + for i := range fp.Params { + tdq.push(&fp.Params[i].Type) + } +} + +func (fp *FuncProto) copy() Type { + cpy := *fp + cpy.Params = make([]FuncParam, len(fp.Params)) + copy(cpy.Params, fp.Params) + return &cpy +} + +type FuncParam struct { + Name string + Type Type +} + +// Var is a global variable. +type Var struct { + TypeID + Name string + Type Type + Linkage VarLinkage +} + +func (v *Var) String() string { + return fmt.Sprintf("var#%d[%s %q]", v.TypeID, v.Linkage, v.Name) +} + +func (v *Var) TypeName() string { return v.Name } + +func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) } +func (v *Var) copy() Type { + cpy := *v + return &cpy +} + +// Datasec is a global program section containing data. +type Datasec struct { + TypeID + Name string + Size uint32 + Vars []VarSecinfo +} + +func (ds *Datasec) String() string { + return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name) +} + +func (ds *Datasec) TypeName() string { return ds.Name } + +func (ds *Datasec) size() uint32 { return ds.Size } + +func (ds *Datasec) walk(tdq *typeDeque) { + for i := range ds.Vars { + tdq.push(&ds.Vars[i].Type) + } +} + +func (ds *Datasec) copy() Type { + cpy := *ds + cpy.Vars = make([]VarSecinfo, len(ds.Vars)) + copy(cpy.Vars, ds.Vars) + return &cpy +} + +// VarSecinfo describes variable in a Datasec. +// +// It is not a valid Type. +type VarSecinfo struct { + Type Type + Offset uint32 + Size uint32 +} + +// Float is a float of a given length. +type Float struct { + TypeID + Name string + + // The size of the float in bytes. + Size uint32 +} + +func (f *Float) String() string { + return fmt.Sprintf("float%d#%d[%q]", f.Size*8, f.TypeID, f.Name) +} + +func (f *Float) TypeName() string { return f.Name } +func (f *Float) size() uint32 { return f.Size } +func (f *Float) walk(*typeDeque) {} +func (f *Float) copy() Type { + cpy := *f + return &cpy +} + +type sizer interface { + size() uint32 +} + +var ( + _ sizer = (*Int)(nil) + _ sizer = (*Pointer)(nil) + _ sizer = (*Struct)(nil) + _ sizer = (*Union)(nil) + _ sizer = (*Enum)(nil) + _ sizer = (*Datasec)(nil) +) + +type qualifier interface { + qualify() Type +} + +var ( + _ qualifier = (*Const)(nil) + _ qualifier = (*Restrict)(nil) + _ qualifier = (*Volatile)(nil) +) + +// Sizeof returns the size of a type in bytes. +// +// Returns an error if the size can't be computed. +func Sizeof(typ Type) (int, error) { + var ( + n = int64(1) + elem int64 + ) + + for i := 0; i < maxTypeDepth; i++ { + switch v := typ.(type) { + case *Array: + if n > 0 && int64(v.Nelems) > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + // Arrays may be of zero length, which allows + // n to be zero as well. + n *= int64(v.Nelems) + typ = v.Type + continue + + case sizer: + elem = int64(v.size()) + + case *Typedef: + typ = v.Type + continue + + case qualifier: + typ = v.qualify() + continue + + default: + return 0, fmt.Errorf("unsized type %T", typ) + } + + if n > 0 && elem > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + size := n * elem + if int64(int(size)) != size { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + return int(size), nil + } + + return 0, fmt.Errorf("type %s: exceeded type depth", typ) +} + +// copy a Type recursively. +// +// typ may form a cycle. +// +// Returns any errors from transform verbatim. +func copyType(typ Type, transform func(Type) (Type, error)) (Type, error) { + copies := make(copier) + return typ, copies.copy(&typ, transform) +} + +// copy a slice of Types recursively. +// +// Types may form a cycle. +// +// Returns any errors from transform verbatim. +func copyTypes(types []Type, transform func(Type) (Type, error)) ([]Type, error) { + result := make([]Type, len(types)) + copy(result, types) + + copies := make(copier) + for i := range result { + if err := copies.copy(&result[i], transform); err != nil { + return nil, err + } + } + + return result, nil +} + +type copier map[Type]Type + +func (c copier) copy(typ *Type, transform func(Type) (Type, error)) error { + var work typeDeque + for t := typ; t != nil; t = work.pop() { + // *t is the identity of the type. + if cpy := c[*t]; cpy != nil { + *t = cpy + continue + } + + var cpy Type + if transform != nil { + tf, err := transform(*t) + if err != nil { + return fmt.Errorf("copy %s: %w", *t, err) + } + cpy = tf.copy() + } else { + cpy = (*t).copy() + } + + c[*t] = cpy + *t = cpy + + // Mark any nested types for copying. + cpy.walk(&work) + } + + return nil +} + +// typeDeque keeps track of pointers to types which still +// need to be visited. +type typeDeque struct { + types []*Type + read, write uint64 + mask uint64 +} + +func (dq *typeDeque) empty() bool { + return dq.read == dq.write +} + +// push adds a type to the stack. +func (dq *typeDeque) push(t *Type) { + if dq.write-dq.read < uint64(len(dq.types)) { + dq.types[dq.write&dq.mask] = t + dq.write++ + return + } + + new := len(dq.types) * 2 + if new == 0 { + new = 8 + } + + types := make([]*Type, new) + pivot := dq.read & dq.mask + n := copy(types, dq.types[pivot:]) + n += copy(types[n:], dq.types[:pivot]) + types[n] = t + + dq.types = types + dq.mask = uint64(new) - 1 + dq.read, dq.write = 0, uint64(n+1) +} + +// shift returns the first element or null. +func (dq *typeDeque) shift() *Type { + if dq.empty() { + return nil + } + + index := dq.read & dq.mask + t := dq.types[index] + dq.types[index] = nil + dq.read++ + return t +} + +// pop returns the last element or null. +func (dq *typeDeque) pop() *Type { + if dq.empty() { + return nil + } + + dq.write-- + index := dq.write & dq.mask + t := dq.types[index] + dq.types[index] = nil + return t +} + +// all returns all elements. +// +// The deque is empty after calling this method. +func (dq *typeDeque) all() []*Type { + length := dq.write - dq.read + types := make([]*Type, 0, length) + for t := dq.shift(); t != nil; t = dq.shift() { + types = append(types, t) + } + return types +} + +// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns +// it into a graph of Types connected via pointers. +// +// Returns a map of named types (so, where NameOff is non-zero) and a slice of types +// indexed by TypeID. Since BTF ignores compilation units, multiple types may share +// the same name. A Type may form a cyclic graph by pointing at itself. +func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]NamedType, err error) { + type fixupDef struct { + id TypeID + expectedKind btfKind + typ *Type + } + + var fixups []fixupDef + fixup := func(id TypeID, expectedKind btfKind, typ *Type) { + fixups = append(fixups, fixupDef{id, expectedKind, typ}) + } + + convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) { + // NB: The fixup below relies on pre-allocating this array to + // work, since otherwise append might re-allocate members. + members := make([]Member, 0, len(raw)) + for i, btfMember := range raw { + name, err := rawStrings.Lookup(btfMember.NameOff) + if err != nil { + return nil, fmt.Errorf("can't get name for member %d: %w", i, err) + } + m := Member{ + Name: name, + OffsetBits: btfMember.Offset, + } + if kindFlag { + m.BitfieldSize = btfMember.Offset >> 24 + m.OffsetBits &= 0xffffff + } + members = append(members, m) + } + for i := range members { + fixup(raw[i].Type, kindUnknown, &members[i].Type) + } + return members, nil + } + + types = make([]Type, 0, len(rawTypes)) + types = append(types, (*Void)(nil)) + namedTypes = make(map[string][]NamedType) + + for i, raw := range rawTypes { + var ( + // Void is defined to always be type ID 0, and is thus + // omitted from BTF. + id = TypeID(i + 1) + typ Type + ) + + name, err := rawStrings.Lookup(raw.NameOff) + if err != nil { + return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err) + } + + switch raw.Kind() { + case kindInt: + encoding, offset, bits := intEncoding(*raw.data.(*uint32)) + typ = &Int{id, name, raw.Size(), encoding, offset, bits} + + case kindPointer: + ptr := &Pointer{id, nil} + fixup(raw.Type(), kindUnknown, &ptr.Target) + typ = ptr + + case kindArray: + btfArr := raw.data.(*btfArray) + + // IndexType is unused according to btf.rst. + // Don't make it available right now. + arr := &Array{id, nil, btfArr.Nelems} + fixup(btfArr.Type, kindUnknown, &arr.Type) + typ = arr + + case kindStruct: + members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) + if err != nil { + return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) + } + typ = &Struct{id, name, raw.Size(), members} + + case kindUnion: + members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) + if err != nil { + return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err) + } + typ = &Union{id, name, raw.Size(), members} + + case kindEnum: + rawvals := raw.data.([]btfEnum) + vals := make([]EnumValue, 0, len(rawvals)) + for i, btfVal := range rawvals { + name, err := rawStrings.Lookup(btfVal.NameOff) + if err != nil { + return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err) + } + vals = append(vals, EnumValue{ + Name: name, + Value: btfVal.Val, + }) + } + typ = &Enum{id, name, vals} + + case kindForward: + if raw.KindFlag() { + typ = &Fwd{id, name, FwdUnion} + } else { + typ = &Fwd{id, name, FwdStruct} + } + + case kindTypedef: + typedef := &Typedef{id, name, nil} + fixup(raw.Type(), kindUnknown, &typedef.Type) + typ = typedef + + case kindVolatile: + volatile := &Volatile{id, nil} + fixup(raw.Type(), kindUnknown, &volatile.Type) + typ = volatile + + case kindConst: + cnst := &Const{id, nil} + fixup(raw.Type(), kindUnknown, &cnst.Type) + typ = cnst + + case kindRestrict: + restrict := &Restrict{id, nil} + fixup(raw.Type(), kindUnknown, &restrict.Type) + typ = restrict + + case kindFunc: + fn := &Func{id, name, nil, raw.Linkage()} + fixup(raw.Type(), kindFuncProto, &fn.Type) + typ = fn + + case kindFuncProto: + rawparams := raw.data.([]btfParam) + params := make([]FuncParam, 0, len(rawparams)) + for i, param := range rawparams { + name, err := rawStrings.Lookup(param.NameOff) + if err != nil { + return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) + } + params = append(params, FuncParam{ + Name: name, + }) + } + for i := range params { + fixup(rawparams[i].Type, kindUnknown, ¶ms[i].Type) + } + + fp := &FuncProto{id, nil, params} + fixup(raw.Type(), kindUnknown, &fp.Return) + typ = fp + + case kindVar: + variable := raw.data.(*btfVariable) + v := &Var{id, name, nil, VarLinkage(variable.Linkage)} + fixup(raw.Type(), kindUnknown, &v.Type) + typ = v + + case kindDatasec: + btfVars := raw.data.([]btfVarSecinfo) + vars := make([]VarSecinfo, 0, len(btfVars)) + for _, btfVar := range btfVars { + vars = append(vars, VarSecinfo{ + Offset: btfVar.Offset, + Size: btfVar.Size, + }) + } + for i := range vars { + fixup(btfVars[i].Type, kindVar, &vars[i].Type) + } + typ = &Datasec{id, name, raw.SizeType, vars} + + case kindFloat: + typ = &Float{id, name, raw.Size()} + + default: + return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind()) + } + + types = append(types, typ) + + if named, ok := typ.(NamedType); ok { + if name := essentialName(named.TypeName()); name != "" { + namedTypes[name] = append(namedTypes[name], named) + } + } + } + + for _, fixup := range fixups { + i := int(fixup.id) + if i >= len(types) { + return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id) + } + + // Default void (id 0) to unknown + rawKind := kindUnknown + if i > 0 { + rawKind = rawTypes[i-1].Kind() + } + + if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected { + return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind) + } + + *fixup.typ = types[i] + } + + return types, namedTypes, nil +} + +// essentialName returns name without a ___ suffix. +func essentialName(name string) string { + lastIdx := strings.LastIndex(name, "___") + if lastIdx > 0 { + return name[:lastIdx] + } + return name +} diff --git a/vendor/github.com/cilium/ebpf/internal/cpu.go b/vendor/github.com/cilium/ebpf/internal/cpu.go new file mode 100644 index 0000000000..3affa1efb9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/cpu.go @@ -0,0 +1,62 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "sync" +) + +var sysCPU struct { + once sync.Once + err error + num int +} + +// PossibleCPUs returns the max number of CPUs a system may possibly have +// Logical CPU numbers must be of the form 0-n +func PossibleCPUs() (int, error) { + sysCPU.once.Do(func() { + sysCPU.num, sysCPU.err = parseCPUsFromFile("/sys/devices/system/cpu/possible") + }) + + return sysCPU.num, sysCPU.err +} + +func parseCPUsFromFile(path string) (int, error) { + spec, err := os.ReadFile(path) + if err != nil { + return 0, err + } + + n, err := parseCPUs(string(spec)) + if err != nil { + return 0, fmt.Errorf("can't parse %s: %v", path, err) + } + + return n, nil +} + +// parseCPUs parses the number of cpus from a string produced +// by bitmap_list_string() in the Linux kernel. +// Multiple ranges are rejected, since they can't be unified +// into a single number. +// This is the format of /sys/devices/system/cpu/possible, it +// is not suitable for /sys/devices/system/cpu/online, etc. +func parseCPUs(spec string) (int, error) { + if strings.Trim(spec, "\n") == "0" { + return 1, nil + } + + var low, high int + n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high) + if n != 2 || err != nil { + return 0, fmt.Errorf("invalid format: %s", spec) + } + if low != 0 { + return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec) + } + + // cpus is 0 indexed + return high + 1, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/elf.go b/vendor/github.com/cilium/ebpf/internal/elf.go new file mode 100644 index 0000000000..54a4313130 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/elf.go @@ -0,0 +1,68 @@ +package internal + +import ( + "debug/elf" + "fmt" + "io" +) + +type SafeELFFile struct { + *elf.File +} + +// NewSafeELFFile reads an ELF safely. +// +// Any panic during parsing is turned into an error. This is necessary since +// there are a bunch of unfixed bugs in debug/elf. +// +// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle +func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + safe = nil + err = fmt.Errorf("reading ELF file panicked: %s", r) + }() + + file, err := elf.NewFile(r) + if err != nil { + return nil, err + } + + return &SafeELFFile{file}, nil +} + +// Symbols is the safe version of elf.File.Symbols. +func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + syms = nil + err = fmt.Errorf("reading ELF symbols panicked: %s", r) + }() + + syms, err = se.File.Symbols() + return +} + +// DynamicSymbols is the safe version of elf.File.DynamicSymbols. +func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + syms = nil + err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r) + }() + + syms, err = se.File.DynamicSymbols() + return +} diff --git a/vendor/github.com/cilium/ebpf/internal/endian.go b/vendor/github.com/cilium/ebpf/internal/endian.go new file mode 100644 index 0000000000..6ae99fcd5f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/endian.go @@ -0,0 +1,29 @@ +package internal + +import ( + "encoding/binary" + "unsafe" +) + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian binary.ByteOrder + +// Clang is set to either "el" or "eb" depending on the host's endianness. +var ClangEndian string + +func init() { + if isBigEndian() { + NativeEndian = binary.BigEndian + ClangEndian = "eb" + } else { + NativeEndian = binary.LittleEndian + ClangEndian = "el" + } +} + +func isBigEndian() (ret bool) { + i := int(0x1) + bs := (*[int(unsafe.Sizeof(i))]byte)(unsafe.Pointer(&i)) + return bs[0] == 0 +} diff --git a/vendor/github.com/cilium/ebpf/internal/errors.go b/vendor/github.com/cilium/ebpf/internal/errors.go new file mode 100644 index 0000000000..877bd72ee2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/errors.go @@ -0,0 +1,51 @@ +package internal + +import ( + "bytes" + "errors" + "fmt" + "strings" + + "github.com/cilium/ebpf/internal/unix" +) + +// ErrorWithLog returns an error that includes logs from the +// kernel verifier. +// +// logErr should be the error returned by the syscall that generated +// the log. It is used to check for truncation of the output. +func ErrorWithLog(err error, log []byte, logErr error) error { + logStr := strings.Trim(CString(log), "\t\r\n ") + if errors.Is(logErr, unix.ENOSPC) { + logStr += " (truncated...)" + } + + return &VerifierError{err, logStr} +} + +// VerifierError includes information from the eBPF verifier. +type VerifierError struct { + cause error + log string +} + +func (le *VerifierError) Unwrap() error { + return le.cause +} + +func (le *VerifierError) Error() string { + if le.log == "" { + return le.cause.Error() + } + + return fmt.Sprintf("%s: %s", le.cause, le.log) +} + +// CString turns a NUL / zero terminated byte buffer into a string. +func CString(in []byte) string { + inLen := bytes.IndexByte(in, 0) + if inLen == -1 { + return "" + } + return string(in[:inLen]) +} diff --git a/vendor/github.com/cilium/ebpf/internal/fd.go b/vendor/github.com/cilium/ebpf/internal/fd.go new file mode 100644 index 0000000000..af04955bd5 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/fd.go @@ -0,0 +1,69 @@ +package internal + +import ( + "errors" + "fmt" + "os" + "runtime" + "strconv" + + "github.com/cilium/ebpf/internal/unix" +) + +var ErrClosedFd = errors.New("use of closed file descriptor") + +type FD struct { + raw int64 +} + +func NewFD(value uint32) *FD { + fd := &FD{int64(value)} + runtime.SetFinalizer(fd, (*FD).Close) + return fd +} + +func (fd *FD) String() string { + return strconv.FormatInt(fd.raw, 10) +} + +func (fd *FD) Value() (uint32, error) { + if fd.raw < 0 { + return 0, ErrClosedFd + } + + return uint32(fd.raw), nil +} + +func (fd *FD) Close() error { + if fd.raw < 0 { + return nil + } + + value := int(fd.raw) + fd.raw = -1 + + fd.Forget() + return unix.Close(value) +} + +func (fd *FD) Forget() { + runtime.SetFinalizer(fd, nil) +} + +func (fd *FD) Dup() (*FD, error) { + if fd.raw < 0 { + return nil, ErrClosedFd + } + + dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0) + if err != nil { + return nil, fmt.Errorf("can't dup fd: %v", err) + } + + return NewFD(uint32(dup)), nil +} + +func (fd *FD) File(name string) *os.File { + fd.Forget() + return os.NewFile(uintptr(fd.raw), name) +} diff --git a/vendor/github.com/cilium/ebpf/internal/feature.go b/vendor/github.com/cilium/ebpf/internal/feature.go new file mode 100644 index 0000000000..c94a2e1ee0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/feature.go @@ -0,0 +1,100 @@ +package internal + +import ( + "errors" + "fmt" + "sync" +) + +// ErrNotSupported indicates that a feature is not supported by the current kernel. +var ErrNotSupported = errors.New("not supported") + +// UnsupportedFeatureError is returned by FeatureTest() functions. +type UnsupportedFeatureError struct { + // The minimum Linux mainline version required for this feature. + // Used for the error string, and for sanity checking during testing. + MinimumVersion Version + + // The name of the feature that isn't supported. + Name string +} + +func (ufe *UnsupportedFeatureError) Error() string { + if ufe.MinimumVersion.Unspecified() { + return fmt.Sprintf("%s not supported", ufe.Name) + } + return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion) +} + +// Is indicates that UnsupportedFeatureError is ErrNotSupported. +func (ufe *UnsupportedFeatureError) Is(target error) bool { + return target == ErrNotSupported +} + +type featureTest struct { + sync.RWMutex + successful bool + result error +} + +// FeatureTestFn is used to determine whether the kernel supports +// a certain feature. +// +// The return values have the following semantics: +// +// err == ErrNotSupported: the feature is not available +// err == nil: the feature is available +// err != nil: the test couldn't be executed +type FeatureTestFn func() error + +// FeatureTest wraps a function so that it is run at most once. +// +// name should identify the tested feature, while version must be in the +// form Major.Minor[.Patch]. +// +// Returns an error wrapping ErrNotSupported if the feature is not supported. +func FeatureTest(name, version string, fn FeatureTestFn) func() error { + v, err := NewVersion(version) + if err != nil { + return func() error { return err } + } + + ft := new(featureTest) + return func() error { + ft.RLock() + if ft.successful { + defer ft.RUnlock() + return ft.result + } + ft.RUnlock() + ft.Lock() + defer ft.Unlock() + // check one more time on the off + // chance that two go routines + // were able to call into the write + // lock + if ft.successful { + return ft.result + } + err := fn() + switch { + case errors.Is(err, ErrNotSupported): + ft.result = &UnsupportedFeatureError{ + MinimumVersion: v, + Name: name, + } + fallthrough + + case err == nil: + ft.successful = true + + default: + // We couldn't execute the feature test to a point + // where it could make a determination. + // Don't cache the result, just return it. + return fmt.Errorf("detect support for %s: %w", name, err) + } + + return ft.result + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/io.go b/vendor/github.com/cilium/ebpf/internal/io.go new file mode 100644 index 0000000000..fa7402782d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/io.go @@ -0,0 +1,16 @@ +package internal + +import "errors" + +// DiscardZeroes makes sure that all written bytes are zero +// before discarding them. +type DiscardZeroes struct{} + +func (DiscardZeroes) Write(p []byte) (int, error) { + for _, b := range p { + if b != 0 { + return 0, errors.New("encountered non-zero byte") + } + } + return len(p), nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/pinning.go b/vendor/github.com/cilium/ebpf/internal/pinning.go new file mode 100644 index 0000000000..5329b432d7 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/pinning.go @@ -0,0 +1,44 @@ +package internal + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf/internal/unix" +) + +func Pin(currentPath, newPath string, fd *FD) error { + if newPath == "" { + return errors.New("given pinning path cannot be empty") + } + if currentPath == newPath { + return nil + } + if currentPath == "" { + return BPFObjPin(newPath, fd) + } + var err error + // Renameat2 is used instead of os.Rename to disallow the new path replacing + // an existing path. + if err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE); err == nil { + // Object is now moved to the new pinning path. + return nil + } + if !os.IsNotExist(err) { + return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err) + } + // Internal state not in sync with the file system so let's fix it. + return BPFObjPin(newPath, fd) +} + +func Unpin(pinnedPath string) error { + if pinnedPath == "" { + return nil + } + err := os.Remove(pinnedPath) + if err == nil || os.IsNotExist(err) { + return nil + } + return err +} diff --git a/vendor/github.com/cilium/ebpf/internal/ptr.go b/vendor/github.com/cilium/ebpf/internal/ptr.go new file mode 100644 index 0000000000..f295de72cf --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/ptr.go @@ -0,0 +1,31 @@ +package internal + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// NewPointer creates a 64-bit pointer from an unsafe Pointer. +func NewPointer(ptr unsafe.Pointer) Pointer { + return Pointer{ptr: ptr} +} + +// NewSlicePointer creates a 64-bit pointer from a byte slice. +func NewSlicePointer(buf []byte) Pointer { + if len(buf) == 0 { + return Pointer{} + } + + return Pointer{ptr: unsafe.Pointer(&buf[0])} +} + +// NewStringPointer creates a 64-bit pointer from a string. +func NewStringPointer(str string) Pointer { + p, err := unix.BytePtrFromString(str) + if err != nil { + return Pointer{} + } + + return Pointer{ptr: unsafe.Pointer(p)} +} diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go new file mode 100644 index 0000000000..8c114ddf47 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go @@ -0,0 +1,15 @@ +//go:build armbe || mips || mips64p32 +// +build armbe mips mips64p32 + +package internal + +import ( + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + pad uint32 + ptr unsafe.Pointer +} diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go new file mode 100644 index 0000000000..e65a61e45d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go @@ -0,0 +1,15 @@ +//go:build 386 || amd64p32 || arm || mipsle || mips64p32le +// +build 386 amd64p32 arm mipsle mips64p32le + +package internal + +import ( + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + ptr unsafe.Pointer + pad uint32 +} diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/ptr_64.go new file mode 100644 index 0000000000..71a3afe307 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/ptr_64.go @@ -0,0 +1,14 @@ +//go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32 +// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le,!armbe,!mips,!mips64p32 + +package internal + +import ( + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + ptr unsafe.Pointer +} diff --git a/vendor/github.com/cilium/ebpf/internal/syscall.go b/vendor/github.com/cilium/ebpf/internal/syscall.go new file mode 100644 index 0000000000..b75037bb9d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/syscall.go @@ -0,0 +1,304 @@ +package internal + +import ( + "errors" + "fmt" + "path/filepath" + "runtime" + "syscall" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +//go:generate stringer -output syscall_string.go -type=BPFCmd + +// BPFCmd identifies a subcommand of the bpf syscall. +type BPFCmd int + +// Well known BPF commands. +const ( + BPF_MAP_CREATE BPFCmd = iota + BPF_MAP_LOOKUP_ELEM + BPF_MAP_UPDATE_ELEM + BPF_MAP_DELETE_ELEM + BPF_MAP_GET_NEXT_KEY + BPF_PROG_LOAD + BPF_OBJ_PIN + BPF_OBJ_GET + BPF_PROG_ATTACH + BPF_PROG_DETACH + BPF_PROG_TEST_RUN + BPF_PROG_GET_NEXT_ID + BPF_MAP_GET_NEXT_ID + BPF_PROG_GET_FD_BY_ID + BPF_MAP_GET_FD_BY_ID + BPF_OBJ_GET_INFO_BY_FD + BPF_PROG_QUERY + BPF_RAW_TRACEPOINT_OPEN + BPF_BTF_LOAD + BPF_BTF_GET_FD_BY_ID + BPF_TASK_FD_QUERY + BPF_MAP_LOOKUP_AND_DELETE_ELEM + BPF_MAP_FREEZE + BPF_BTF_GET_NEXT_ID + BPF_MAP_LOOKUP_BATCH + BPF_MAP_LOOKUP_AND_DELETE_BATCH + BPF_MAP_UPDATE_BATCH + BPF_MAP_DELETE_BATCH + BPF_LINK_CREATE + BPF_LINK_UPDATE + BPF_LINK_GET_FD_BY_ID + BPF_LINK_GET_NEXT_ID + BPF_ENABLE_STATS + BPF_ITER_CREATE +) + +// BPF wraps SYS_BPF. +// +// Any pointers contained in attr must use the Pointer type from this package. +func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { + r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) + runtime.KeepAlive(attr) + + var err error + if errNo != 0 { + err = wrappedErrno{errNo} + } + + return r1, err +} + +type BPFProgLoadAttr struct { + ProgType uint32 + InsCount uint32 + Instructions Pointer + License Pointer + LogLevel uint32 + LogSize uint32 + LogBuf Pointer + KernelVersion uint32 // since 4.1 2541517c32be + ProgFlags uint32 // since 4.11 e07b98d9bffe + ProgName BPFObjName // since 4.15 067cae47771c + ProgIfIndex uint32 // since 4.15 1f6f4cb7ba21 + ExpectedAttachType uint32 // since 4.17 5e43f899b03a + ProgBTFFd uint32 + FuncInfoRecSize uint32 + FuncInfo Pointer + FuncInfoCnt uint32 + LineInfoRecSize uint32 + LineInfo Pointer + LineInfoCnt uint32 + AttachBTFID uint32 + AttachProgFd uint32 +} + +// BPFProgLoad wraps BPF_PROG_LOAD. +func BPFProgLoad(attr *BPFProgLoadAttr) (*FD, error) { + for { + fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + // As of ~4.20 the verifier can be interrupted by a signal, + // and returns EAGAIN in that case. + if errors.Is(err, unix.EAGAIN) { + continue + } + + if err != nil { + return nil, err + } + + return NewFD(uint32(fd)), nil + } +} + +type BPFProgAttachAttr struct { + TargetFd uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + ReplaceBpfFd uint32 +} + +func BPFProgAttach(attr *BPFProgAttachAttr) error { + _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type BPFProgDetachAttr struct { + TargetFd uint32 + AttachBpfFd uint32 + AttachType uint32 +} + +func BPFProgDetach(attr *BPFProgDetachAttr) error { + _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type BPFEnableStatsAttr struct { + StatsType uint32 +} + +func BPFEnableStats(attr *BPFEnableStatsAttr) (*FD, error) { + ptr, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, fmt.Errorf("enable stats: %w", err) + } + return NewFD(uint32(ptr)), nil + +} + +type bpfObjAttr struct { + fileName Pointer + fd uint32 + fileFlags uint32 +} + +const bpfFSType = 0xcafe4a11 + +// BPFObjPin wraps BPF_OBJ_PIN. +func BPFObjPin(fileName string, fd *FD) error { + dirName := filepath.Dir(fileName) + var statfs unix.Statfs_t + if err := unix.Statfs(dirName, &statfs); err != nil { + return err + } + if uint64(statfs.Type) != bpfFSType { + return fmt.Errorf("%s is not on a bpf filesystem", fileName) + } + + value, err := fd.Value() + if err != nil { + return err + } + + attr := bpfObjAttr{ + fileName: NewStringPointer(fileName), + fd: value, + } + _, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + if err != nil { + return fmt.Errorf("pin object %s: %w", fileName, err) + } + return nil +} + +// BPFObjGet wraps BPF_OBJ_GET. +func BPFObjGet(fileName string, flags uint32) (*FD, error) { + attr := bpfObjAttr{ + fileName: NewStringPointer(fileName), + fileFlags: flags, + } + ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + if err != nil { + return nil, fmt.Errorf("get object %s: %w", fileName, err) + } + return NewFD(uint32(ptr)), nil +} + +type bpfObjGetInfoByFDAttr struct { + fd uint32 + infoLen uint32 + info Pointer +} + +// BPFObjGetInfoByFD wraps BPF_OBJ_GET_INFO_BY_FD. +// +// Available from 4.13. +func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error { + value, err := fd.Value() + if err != nil { + return err + } + + attr := bpfObjGetInfoByFDAttr{ + fd: value, + infoLen: uint32(size), + info: NewPointer(info), + } + _, err = BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + if err != nil { + return fmt.Errorf("fd %v: %w", fd, err) + } + return nil +} + +type bpfGetFDByIDAttr struct { + id uint32 + next uint32 +} + +// BPFObjGetInfoByFD wraps BPF_*_GET_FD_BY_ID. +// +// Available from 4.13. +func BPFObjGetFDByID(cmd BPFCmd, id uint32) (*FD, error) { + attr := bpfGetFDByIDAttr{ + id: id, + } + ptr, err := BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return NewFD(uint32(ptr)), err +} + +// BPFObjName is a null-terminated string made up of +// 'A-Za-z0-9_' characters. +type BPFObjName [unix.BPF_OBJ_NAME_LEN]byte + +// NewBPFObjName truncates the result if it is too long. +func NewBPFObjName(name string) BPFObjName { + var result BPFObjName + copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) + return result +} + +type BPFMapCreateAttr struct { + MapType uint32 + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + Flags uint32 + InnerMapFd uint32 // since 4.12 56f668dfe00d + NumaNode uint32 // since 4.14 96eabe7a40aa + MapName BPFObjName // since 4.15 ad5b177bd73f + MapIfIndex uint32 + BTFFd uint32 + BTFKeyTypeID uint32 + BTFValueTypeID uint32 +} + +func BPFMapCreate(attr *BPFMapCreateAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + + return NewFD(uint32(fd)), nil +} + +// wrappedErrno wraps syscall.Errno to prevent direct comparisons with +// syscall.E* or unix.E* constants. +// +// You should never export an error of this type. +type wrappedErrno struct { + syscall.Errno +} + +func (we wrappedErrno) Unwrap() error { + return we.Errno +} + +type syscallError struct { + error + errno syscall.Errno +} + +func SyscallError(err error, errno syscall.Errno) error { + return &syscallError{err, errno} +} + +func (se *syscallError) Is(target error) bool { + return target == se.error +} + +func (se *syscallError) Unwrap() error { + return se.errno +} diff --git a/vendor/github.com/cilium/ebpf/internal/syscall_string.go b/vendor/github.com/cilium/ebpf/internal/syscall_string.go new file mode 100644 index 0000000000..85df047797 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/syscall_string.go @@ -0,0 +1,56 @@ +// Code generated by "stringer -output syscall_string.go -type=BPFCmd"; DO NOT EDIT. + +package internal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[BPF_MAP_CREATE-0] + _ = x[BPF_MAP_LOOKUP_ELEM-1] + _ = x[BPF_MAP_UPDATE_ELEM-2] + _ = x[BPF_MAP_DELETE_ELEM-3] + _ = x[BPF_MAP_GET_NEXT_KEY-4] + _ = x[BPF_PROG_LOAD-5] + _ = x[BPF_OBJ_PIN-6] + _ = x[BPF_OBJ_GET-7] + _ = x[BPF_PROG_ATTACH-8] + _ = x[BPF_PROG_DETACH-9] + _ = x[BPF_PROG_TEST_RUN-10] + _ = x[BPF_PROG_GET_NEXT_ID-11] + _ = x[BPF_MAP_GET_NEXT_ID-12] + _ = x[BPF_PROG_GET_FD_BY_ID-13] + _ = x[BPF_MAP_GET_FD_BY_ID-14] + _ = x[BPF_OBJ_GET_INFO_BY_FD-15] + _ = x[BPF_PROG_QUERY-16] + _ = x[BPF_RAW_TRACEPOINT_OPEN-17] + _ = x[BPF_BTF_LOAD-18] + _ = x[BPF_BTF_GET_FD_BY_ID-19] + _ = x[BPF_TASK_FD_QUERY-20] + _ = x[BPF_MAP_LOOKUP_AND_DELETE_ELEM-21] + _ = x[BPF_MAP_FREEZE-22] + _ = x[BPF_BTF_GET_NEXT_ID-23] + _ = x[BPF_MAP_LOOKUP_BATCH-24] + _ = x[BPF_MAP_LOOKUP_AND_DELETE_BATCH-25] + _ = x[BPF_MAP_UPDATE_BATCH-26] + _ = x[BPF_MAP_DELETE_BATCH-27] + _ = x[BPF_LINK_CREATE-28] + _ = x[BPF_LINK_UPDATE-29] + _ = x[BPF_LINK_GET_FD_BY_ID-30] + _ = x[BPF_LINK_GET_NEXT_ID-31] + _ = x[BPF_ENABLE_STATS-32] + _ = x[BPF_ITER_CREATE-33] +} + +const _BPFCmd_name = "BPF_MAP_CREATEBPF_MAP_LOOKUP_ELEMBPF_MAP_UPDATE_ELEMBPF_MAP_DELETE_ELEMBPF_MAP_GET_NEXT_KEYBPF_PROG_LOADBPF_OBJ_PINBPF_OBJ_GETBPF_PROG_ATTACHBPF_PROG_DETACHBPF_PROG_TEST_RUNBPF_PROG_GET_NEXT_IDBPF_MAP_GET_NEXT_IDBPF_PROG_GET_FD_BY_IDBPF_MAP_GET_FD_BY_IDBPF_OBJ_GET_INFO_BY_FDBPF_PROG_QUERYBPF_RAW_TRACEPOINT_OPENBPF_BTF_LOADBPF_BTF_GET_FD_BY_IDBPF_TASK_FD_QUERYBPF_MAP_LOOKUP_AND_DELETE_ELEMBPF_MAP_FREEZEBPF_BTF_GET_NEXT_IDBPF_MAP_LOOKUP_BATCHBPF_MAP_LOOKUP_AND_DELETE_BATCHBPF_MAP_UPDATE_BATCHBPF_MAP_DELETE_BATCHBPF_LINK_CREATEBPF_LINK_UPDATEBPF_LINK_GET_FD_BY_IDBPF_LINK_GET_NEXT_IDBPF_ENABLE_STATSBPF_ITER_CREATE" + +var _BPFCmd_index = [...]uint16{0, 14, 33, 52, 71, 91, 104, 115, 126, 141, 156, 173, 193, 212, 233, 253, 275, 289, 312, 324, 344, 361, 391, 405, 424, 444, 475, 495, 515, 530, 545, 566, 586, 602, 617} + +func (i BPFCmd) String() string { + if i < 0 || i >= BPFCmd(len(_BPFCmd_index)-1) { + return "BPFCmd(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _BPFCmd_name[_BPFCmd_index[i]:_BPFCmd_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go new file mode 100644 index 0000000000..9aa70fa78c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -0,0 +1,208 @@ +//go:build linux +// +build linux + +package unix + +import ( + "bytes" + "syscall" + + linux "golang.org/x/sys/unix" +) + +const ( + ENOENT = linux.ENOENT + EEXIST = linux.EEXIST + EAGAIN = linux.EAGAIN + ENOSPC = linux.ENOSPC + EINVAL = linux.EINVAL + EPOLLIN = linux.EPOLLIN + EINTR = linux.EINTR + EPERM = linux.EPERM + ESRCH = linux.ESRCH + ENODEV = linux.ENODEV + EBADF = linux.EBADF + E2BIG = linux.E2BIG + // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP + ENOTSUPP = syscall.Errno(0x20c) + + BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC + BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE + BPF_F_RDONLY = linux.BPF_F_RDONLY + BPF_F_WRONLY = linux.BPF_F_WRONLY + BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG + BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE + BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE + BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP + BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN + BPF_TAG_SIZE = linux.BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ + SYS_BPF = linux.SYS_BPF + F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC + EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD + EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC + O_CLOEXEC = linux.O_CLOEXEC + O_NONBLOCK = linux.O_NONBLOCK + PROT_READ = linux.PROT_READ + PROT_WRITE = linux.PROT_WRITE + MAP_SHARED = linux.MAP_SHARED + PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1 + PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE + PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT + PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT + PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE + PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE + PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF + PerfBitWatermark = linux.PerfBitWatermark + PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW + PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC + RLIM_INFINITY = linux.RLIM_INFINITY + RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK + BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME + PERF_RECORD_LOST = linux.PERF_RECORD_LOST + PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE + AT_FDCWD = linux.AT_FDCWD + RENAME_NOREPLACE = linux.RENAME_NOREPLACE +) + +// Statfs_t is a wrapper +type Statfs_t = linux.Statfs_t + +// Rlimit is a wrapper +type Rlimit = linux.Rlimit + +// Syscall is a wrapper +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return linux.Syscall(trap, a1, a2, a3) +} + +// FcntlInt is a wrapper +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return linux.FcntlInt(fd, cmd, arg) +} + +// IoctlSetInt is a wrapper +func IoctlSetInt(fd int, req uint, value int) error { + return linux.IoctlSetInt(fd, req, value) +} + +// Statfs is a wrapper +func Statfs(path string, buf *Statfs_t) (err error) { + return linux.Statfs(path, buf) +} + +// Close is a wrapper +func Close(fd int) (err error) { + return linux.Close(fd) +} + +// EpollEvent is a wrapper +type EpollEvent = linux.EpollEvent + +// EpollWait is a wrapper +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return linux.EpollWait(epfd, events, msec) +} + +// EpollCtl is a wrapper +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return linux.EpollCtl(epfd, op, fd, event) +} + +// Eventfd is a wrapper +func Eventfd(initval uint, flags int) (fd int, err error) { + return linux.Eventfd(initval, flags) +} + +// Write is a wrapper +func Write(fd int, p []byte) (n int, err error) { + return linux.Write(fd, p) +} + +// EpollCreate1 is a wrapper +func EpollCreate1(flag int) (fd int, err error) { + return linux.EpollCreate1(flag) +} + +// PerfEventMmapPage is a wrapper +type PerfEventMmapPage linux.PerfEventMmapPage + +// SetNonblock is a wrapper +func SetNonblock(fd int, nonblocking bool) (err error) { + return linux.SetNonblock(fd, nonblocking) +} + +// Mmap is a wrapper +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return linux.Mmap(fd, offset, length, prot, flags) +} + +// Munmap is a wrapper +func Munmap(b []byte) (err error) { + return linux.Munmap(b) +} + +// PerfEventAttr is a wrapper +type PerfEventAttr = linux.PerfEventAttr + +// PerfEventOpen is a wrapper +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags) +} + +// Utsname is a wrapper +type Utsname = linux.Utsname + +// Uname is a wrapper +func Uname(buf *Utsname) (err error) { + return linux.Uname(buf) +} + +// Getpid is a wrapper +func Getpid() int { + return linux.Getpid() +} + +// Gettid is a wrapper +func Gettid() int { + return linux.Gettid() +} + +// Tgkill is a wrapper +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + return linux.Tgkill(tgid, tid, sig) +} + +// BytePtrFromString is a wrapper +func BytePtrFromString(s string) (*byte, error) { + return linux.BytePtrFromString(s) +} + +// ByteSliceToString is a wrapper +func ByteSliceToString(s []byte) string { + return linux.ByteSliceToString(s) +} + +// Renameat2 is a wrapper +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { + return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags) +} + +func KernelRelease() (string, error) { + var uname Utsname + err := Uname(&uname) + if err != nil { + return "", err + } + + end := bytes.IndexByte(uname.Release[:], 0) + release := string(uname.Release[:end]) + return release, nil +} + +func Prlimit(pid, resource int, new, old *Rlimit) error { + return linux.Prlimit(pid, resource, new, old) +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go new file mode 100644 index 0000000000..4f50d896eb --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -0,0 +1,267 @@ +//go:build !linux +// +build !linux + +package unix + +import ( + "fmt" + "runtime" + "syscall" +) + +var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) + +const ( + ENOENT = syscall.ENOENT + EEXIST = syscall.EEXIST + EAGAIN = syscall.EAGAIN + ENOSPC = syscall.ENOSPC + EINVAL = syscall.EINVAL + EINTR = syscall.EINTR + EPERM = syscall.EPERM + ESRCH = syscall.ESRCH + ENODEV = syscall.ENODEV + EBADF = syscall.Errno(0) + E2BIG = syscall.Errno(0) + // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP + ENOTSUPP = syscall.Errno(0x20c) + + BPF_F_NO_PREALLOC = 0 + BPF_F_NUMA_NODE = 0 + BPF_F_RDONLY = 0 + BPF_F_WRONLY = 0 + BPF_F_RDONLY_PROG = 0 + BPF_F_WRONLY_PROG = 0 + BPF_F_SLEEPABLE = 0 + BPF_F_MMAPABLE = 0 + BPF_F_INNER_MAP = 0 + BPF_OBJ_NAME_LEN = 0x10 + BPF_TAG_SIZE = 0x8 + BPF_RINGBUF_BUSY_BIT = 0 + BPF_RINGBUF_DISCARD_BIT = 0 + BPF_RINGBUF_HDR_SZ = 0 + SYS_BPF = 321 + F_DUPFD_CLOEXEC = 0x406 + EPOLLIN = 0x1 + EPOLL_CTL_ADD = 0x1 + EPOLL_CLOEXEC = 0x80000 + O_CLOEXEC = 0x80000 + O_NONBLOCK = 0x800 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + MAP_SHARED = 0x1 + PERF_ATTR_SIZE_VER1 = 0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0 + PERF_COUNT_SW_BPF_OUTPUT = 0xa + PERF_EVENT_IOC_DISABLE = 0 + PERF_EVENT_IOC_ENABLE = 0 + PERF_EVENT_IOC_SET_BPF = 0 + PerfBitWatermark = 0x4000 + PERF_SAMPLE_RAW = 0x400 + PERF_FLAG_FD_CLOEXEC = 0x8 + RLIM_INFINITY = 0x7fffffffffffffff + RLIMIT_MEMLOCK = 8 + BPF_STATS_RUN_TIME = 0 + PERF_RECORD_LOST = 2 + PERF_RECORD_SAMPLE = 9 + AT_FDCWD = -0x2 + RENAME_NOREPLACE = 0x1 +) + +// Statfs_t is a wrapper +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid [2]int32 + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +// Rlimit is a wrapper +type Rlimit struct { + Cur uint64 + Max uint64 +} + +// Syscall is a wrapper +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return 0, 0, syscall.Errno(1) +} + +// FcntlInt is a wrapper +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return -1, errNonLinux +} + +// IoctlSetInt is a wrapper +func IoctlSetInt(fd int, req uint, value int) error { + return errNonLinux +} + +// Statfs is a wrapper +func Statfs(path string, buf *Statfs_t) error { + return errNonLinux +} + +// Close is a wrapper +func Close(fd int) (err error) { + return errNonLinux +} + +// EpollEvent is a wrapper +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +// EpollWait is a wrapper +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return 0, errNonLinux +} + +// EpollCtl is a wrapper +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return errNonLinux +} + +// Eventfd is a wrapper +func Eventfd(initval uint, flags int) (fd int, err error) { + return 0, errNonLinux +} + +// Write is a wrapper +func Write(fd int, p []byte) (n int, err error) { + return 0, errNonLinux +} + +// EpollCreate1 is a wrapper +func EpollCreate1(flag int) (fd int, err error) { + return 0, errNonLinux +} + +// PerfEventMmapPage is a wrapper +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +// SetNonblock is a wrapper +func SetNonblock(fd int, nonblocking bool) (err error) { + return errNonLinux +} + +// Mmap is a wrapper +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return []byte{}, errNonLinux +} + +// Munmap is a wrapper +func Munmap(b []byte) (err error) { + return errNonLinux +} + +// PerfEventAttr is a wrapper +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + Sample_max_stack uint16 +} + +// PerfEventOpen is a wrapper +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + return 0, errNonLinux +} + +// Utsname is a wrapper +type Utsname struct { + Release [65]byte + Version [65]byte +} + +// Uname is a wrapper +func Uname(buf *Utsname) (err error) { + return errNonLinux +} + +// Getpid is a wrapper +func Getpid() int { + return -1 +} + +// Gettid is a wrapper +func Gettid() int { + return -1 +} + +// Tgkill is a wrapper +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + return errNonLinux +} + +// BytePtrFromString is a wrapper +func BytePtrFromString(s string) (*byte, error) { + return nil, errNonLinux +} + +// ByteSliceToString is a wrapper +func ByteSliceToString(s []byte) string { + return "" +} + +// Renameat2 is a wrapper +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { + return errNonLinux +} + +func KernelRelease() (string, error) { + return "", errNonLinux +} + +func Prlimit(pid, resource int, new, old *Rlimit) error { + return errNonLinux +} diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go new file mode 100644 index 0000000000..4915e58376 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/version.go @@ -0,0 +1,163 @@ +package internal + +import ( + "fmt" + "os" + "regexp" + "sync" + + "github.com/cilium/ebpf/internal/unix" +) + +const ( + // Version constant used in ELF binaries indicating that the loader needs to + // substitute the eBPF program's version with the value of the kernel's + // KERNEL_VERSION compile-time macro. Used for compatibility with BCC, gobpf + // and RedSift. + MagicKernelVersion = 0xFFFFFFFE +) + +var ( + // Match between one and three decimals separated by dots, with the last + // segment (patch level) being optional on some kernels. + // The x.y.z string must appear at the start of a string or right after + // whitespace to prevent sequences like 'x.y.z-a.b.c' from matching 'a.b.c'. + rgxKernelVersion = regexp.MustCompile(`(?:\A|\s)\d{1,3}\.\d{1,3}(?:\.\d{1,3})?`) + + kernelVersion = struct { + once sync.Once + version Version + err error + }{} +) + +// A Version in the form Major.Minor.Patch. +type Version [3]uint16 + +// NewVersion creates a version from a string like "Major.Minor.Patch". +// +// Patch is optional. +func NewVersion(ver string) (Version, error) { + var major, minor, patch uint16 + n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch) + if n < 2 { + return Version{}, fmt.Errorf("invalid version: %s", ver) + } + return Version{major, minor, patch}, nil +} + +func (v Version) String() string { + if v[2] == 0 { + return fmt.Sprintf("v%d.%d", v[0], v[1]) + } + return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2]) +} + +// Less returns true if the version is less than another version. +func (v Version) Less(other Version) bool { + for i, a := range v { + if a == other[i] { + continue + } + return a < other[i] + } + return false +} + +// Unspecified returns true if the version is all zero. +func (v Version) Unspecified() bool { + return v[0] == 0 && v[1] == 0 && v[2] == 0 +} + +// Kernel implements the kernel's KERNEL_VERSION macro from linux/version.h. +// It represents the kernel version and patch level as a single value. +func (v Version) Kernel() uint32 { + + // Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid + // overflowing into PATCHLEVEL. + // See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255"). + s := v[2] + if s > 255 { + s = 255 + } + + // Truncate members to uint8 to prevent them from spilling over into + // each other when overflowing 8 bits. + return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s)) +} + +// KernelVersion returns the version of the currently running kernel. +func KernelVersion() (Version, error) { + kernelVersion.once.Do(func() { + kernelVersion.version, kernelVersion.err = detectKernelVersion() + }) + + if kernelVersion.err != nil { + return Version{}, kernelVersion.err + } + return kernelVersion.version, nil +} + +// detectKernelVersion returns the version of the running kernel. It scans the +// following sources in order: /proc/version_signature, uname -v, uname -r. +// In each of those locations, the last-appearing x.y(.z) value is selected +// for parsing. The first location that yields a usable version number is +// returned. +func detectKernelVersion() (Version, error) { + + // Try reading /proc/version_signature for Ubuntu compatibility. + // Example format: Ubuntu 4.15.0-91.92-generic 4.15.18 + // This method exists in the kernel itself, see d18acd15c + // ("perf tools: Fix kernel version error in ubuntu"). + if pvs, err := os.ReadFile("/proc/version_signature"); err == nil { + // If /proc/version_signature exists, failing to parse it is an error. + // It only exists on Ubuntu, where the real patch level is not obtainable + // through any other method. + v, err := findKernelVersion(string(pvs)) + if err != nil { + return Version{}, err + } + return v, nil + } + + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return Version{}, fmt.Errorf("calling uname: %w", err) + } + + // Debian puts the version including the patch level in uname.Version. + // It is not an error if there's no version number in uname.Version, + // as most distributions don't use it. Parsing can continue on uname.Release. + // Example format: #1 SMP Debian 4.19.37-5+deb10u2 (2019-08-08) + if v, err := findKernelVersion(unix.ByteSliceToString(uname.Version[:])); err == nil { + return v, nil + } + + // Most other distributions have the full kernel version including patch + // level in uname.Release. + // Example format: 4.19.0-5-amd64, 5.5.10-arch1-1 + v, err := findKernelVersion(unix.ByteSliceToString(uname.Release[:])) + if err != nil { + return Version{}, err + } + + return v, nil +} + +// findKernelVersion matches s against rgxKernelVersion and parses the result +// into a Version. If s contains multiple matches, the last entry is selected. +func findKernelVersion(s string) (Version, error) { + m := rgxKernelVersion.FindAllString(s, -1) + if m == nil { + return Version{}, fmt.Errorf("no kernel version in string: %s", s) + } + // Pick the last match of the string in case there are multiple. + s = m[len(m)-1] + + v, err := NewVersion(s) + if err != nil { + return Version{}, fmt.Errorf("parsing version string %s: %w", s, err) + } + + return v, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go new file mode 100644 index 0000000000..5540bb068c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/cgroup.go @@ -0,0 +1,171 @@ +package link + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf" +) + +type cgroupAttachFlags uint32 + +// cgroup attach flags +const ( + flagAllowOverride cgroupAttachFlags = 1 << iota + flagAllowMulti + flagReplace +) + +type CgroupOptions struct { + // Path to a cgroupv2 folder. + Path string + // One of the AttachCgroup* constants + Attach ebpf.AttachType + // Program must be of type CGroup*, and the attach type must match Attach. + Program *ebpf.Program +} + +// AttachCgroup links a BPF program to a cgroup. +func AttachCgroup(opts CgroupOptions) (Link, error) { + cgroup, err := os.Open(opts.Path) + if err != nil { + return nil, fmt.Errorf("can't open cgroup: %s", err) + } + + clone, err := opts.Program.Clone() + if err != nil { + cgroup.Close() + return nil, err + } + + var cg Link + cg, err = newLinkCgroup(cgroup, opts.Attach, clone) + if errors.Is(err, ErrNotSupported) { + cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowMulti) + } + if errors.Is(err, ErrNotSupported) { + cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowOverride) + } + if err != nil { + cgroup.Close() + clone.Close() + return nil, err + } + + return cg, nil +} + +// LoadPinnedCgroup loads a pinned cgroup from a bpffs. +func LoadPinnedCgroup(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { + link, err := LoadPinnedRawLink(fileName, CgroupType, opts) + if err != nil { + return nil, err + } + + return &linkCgroup{*link}, nil +} + +type progAttachCgroup struct { + cgroup *os.File + current *ebpf.Program + attachType ebpf.AttachType + flags cgroupAttachFlags +} + +var _ Link = (*progAttachCgroup)(nil) + +func (cg *progAttachCgroup) isLink() {} + +func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) { + if flags&flagAllowMulti > 0 { + if err := haveProgAttachReplace(); err != nil { + return nil, fmt.Errorf("can't support multiple programs: %w", err) + } + } + + err := RawAttachProgram(RawAttachProgramOptions{ + Target: int(cgroup.Fd()), + Program: prog, + Flags: uint32(flags), + Attach: attach, + }) + if err != nil { + return nil, fmt.Errorf("cgroup: %w", err) + } + + return &progAttachCgroup{cgroup, prog, attach, flags}, nil +} + +func (cg *progAttachCgroup) Close() error { + defer cg.cgroup.Close() + defer cg.current.Close() + + err := RawDetachProgram(RawDetachProgramOptions{ + Target: int(cg.cgroup.Fd()), + Program: cg.current, + Attach: cg.attachType, + }) + if err != nil { + return fmt.Errorf("close cgroup: %s", err) + } + return nil +} + +func (cg *progAttachCgroup) Update(prog *ebpf.Program) error { + new, err := prog.Clone() + if err != nil { + return err + } + + args := RawAttachProgramOptions{ + Target: int(cg.cgroup.Fd()), + Program: prog, + Attach: cg.attachType, + Flags: uint32(cg.flags), + } + + if cg.flags&flagAllowMulti > 0 { + // Atomically replacing multiple programs requires at least + // 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf + // program in MULTI mode") + args.Flags |= uint32(flagReplace) + args.Replace = cg.current + } + + if err := RawAttachProgram(args); err != nil { + new.Close() + return fmt.Errorf("can't update cgroup: %s", err) + } + + cg.current.Close() + cg.current = new + return nil +} + +func (cg *progAttachCgroup) Pin(string) error { + return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported) +} + +func (cg *progAttachCgroup) Unpin() error { + return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported) +} + +type linkCgroup struct { + RawLink +} + +var _ Link = (*linkCgroup)(nil) + +func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) { + link, err := AttachRawLink(RawLinkOptions{ + Target: int(cgroup.Fd()), + Program: prog, + Attach: attach, + }) + if err != nil { + return nil, err + } + + return &linkCgroup{*link}, err +} diff --git a/vendor/github.com/cilium/ebpf/link/doc.go b/vendor/github.com/cilium/ebpf/link/doc.go new file mode 100644 index 0000000000..2bde35ed7a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/doc.go @@ -0,0 +1,2 @@ +// Package link allows attaching eBPF programs to various kernel hooks. +package link diff --git a/vendor/github.com/cilium/ebpf/link/freplace.go b/vendor/github.com/cilium/ebpf/link/freplace.go new file mode 100644 index 0000000000..a698e1a9d3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/freplace.go @@ -0,0 +1,88 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/btf" +) + +type FreplaceLink struct { + RawLink +} + +// AttachFreplace attaches the given eBPF program to the function it replaces. +// +// The program and name can either be provided at link time, or can be provided +// at program load time. If they were provided at load time, they should be nil +// and empty respectively here, as they will be ignored by the kernel. +// Examples: +// +// AttachFreplace(dispatcher, "function", replacement) +// AttachFreplace(nil, "", replacement) +func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (*FreplaceLink, error) { + if (name == "") != (targetProg == nil) { + return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.Extension { + return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput) + } + + var ( + target int + typeID btf.TypeID + ) + if targetProg != nil { + info, err := targetProg.Info() + if err != nil { + return nil, err + } + btfID, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("could not get BTF ID for program %s: %w", info.Name, errInvalidInput) + } + btfHandle, err := btf.NewHandleFromID(btfID) + if err != nil { + return nil, err + } + defer btfHandle.Close() + + var function *btf.Func + if err := btfHandle.Spec().FindType(name, &function); err != nil { + return nil, err + } + + target = targetProg.FD() + typeID = function.ID() + } + + link, err := AttachRawLink(RawLinkOptions{ + Target: target, + Program: prog, + Attach: ebpf.AttachNone, + BTF: typeID, + }) + if err != nil { + return nil, err + } + + return &FreplaceLink{*link}, nil +} + +// Update implements the Link interface. +func (f *FreplaceLink) Update(new *ebpf.Program) error { + return fmt.Errorf("freplace update: %w", ErrNotSupported) +} + +// LoadPinnedFreplace loads a pinned iterator from a bpffs. +func LoadPinnedFreplace(fileName string, opts *ebpf.LoadPinOptions) (*FreplaceLink, error) { + link, err := LoadPinnedRawLink(fileName, TracingType, opts) + if err != nil { + return nil, err + } + + return &FreplaceLink{*link}, err +} diff --git a/vendor/github.com/cilium/ebpf/link/iter.go b/vendor/github.com/cilium/ebpf/link/iter.go new file mode 100644 index 0000000000..654d34ef84 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/iter.go @@ -0,0 +1,100 @@ +package link + +import ( + "fmt" + "io" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" +) + +type IterOptions struct { + // Program must be of type Tracing with attach type + // AttachTraceIter. The kind of iterator to attach to is + // determined at load time via the AttachTo field. + // + // AttachTo requires the kernel to include BTF of itself, + // and it to be compiled with a recent pahole (>= 1.16). + Program *ebpf.Program + + // Map specifies the target map for bpf_map_elem and sockmap iterators. + // It may be nil. + Map *ebpf.Map +} + +// AttachIter attaches a BPF seq_file iterator. +func AttachIter(opts IterOptions) (*Iter, error) { + if err := haveBPFLink(); err != nil { + return nil, err + } + + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd) + } + + var info bpfIterLinkInfoMap + if opts.Map != nil { + mapFd := opts.Map.FD() + if mapFd < 0 { + return nil, fmt.Errorf("invalid map: %w", internal.ErrClosedFd) + } + info.map_fd = uint32(mapFd) + } + + attr := bpfLinkCreateIterAttr{ + prog_fd: uint32(progFd), + attach_type: ebpf.AttachTraceIter, + iter_info: internal.NewPointer(unsafe.Pointer(&info)), + iter_info_len: uint32(unsafe.Sizeof(info)), + } + + fd, err := bpfLinkCreateIter(&attr) + if err != nil { + return nil, fmt.Errorf("can't link iterator: %w", err) + } + + return &Iter{RawLink{fd, ""}}, err +} + +// LoadPinnedIter loads a pinned iterator from a bpffs. +func LoadPinnedIter(fileName string, opts *ebpf.LoadPinOptions) (*Iter, error) { + link, err := LoadPinnedRawLink(fileName, IterType, opts) + if err != nil { + return nil, err + } + + return &Iter{*link}, err +} + +// Iter represents an attached bpf_iter. +type Iter struct { + RawLink +} + +// Open creates a new instance of the iterator. +// +// Reading from the returned reader triggers the BPF program. +func (it *Iter) Open() (io.ReadCloser, error) { + linkFd, err := it.fd.Value() + if err != nil { + return nil, err + } + + attr := &bpfIterCreateAttr{ + linkFd: linkFd, + } + + fd, err := bpfIterCreate(attr) + if err != nil { + return nil, fmt.Errorf("can't create iterator: %w", err) + } + + return fd.File("bpf_iter"), nil +} + +// union bpf_iter_link_info.map +type bpfIterLinkInfoMap struct { + map_fd uint32 +} diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go new file mode 100644 index 0000000000..b6577b5a99 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/kprobe.go @@ -0,0 +1,444 @@ +package link + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + kprobeEventsPath = filepath.Join(tracefsPath, "kprobe_events") + + kprobeRetprobeBit = struct { + once sync.Once + value uint64 + err error + }{} +) + +type probeType uint8 + +const ( + kprobeType probeType = iota + uprobeType +) + +func (pt probeType) String() string { + if pt == kprobeType { + return "kprobe" + } + return "uprobe" +} + +func (pt probeType) EventsPath() string { + if pt == kprobeType { + return kprobeEventsPath + } + return uprobeEventsPath +} + +func (pt probeType) PerfEventType(ret bool) perfEventType { + if pt == kprobeType { + if ret { + return kretprobeEvent + } + return kprobeEvent + } + if ret { + return uretprobeEvent + } + return uprobeEvent +} + +func (pt probeType) RetprobeBit() (uint64, error) { + if pt == kprobeType { + return kretprobeBit() + } + return uretprobeBit() +} + +// Kprobe attaches the given eBPF program to a perf event that fires when the +// given kernel symbol starts executing. See /proc/kallsyms for available +// symbols. For example, printk(): +// +// kp, err := Kprobe("printk", prog) +// +// Losing the reference to the resulting Link (kp) will close the Kprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +func Kprobe(symbol string, prog *ebpf.Program) (Link, error) { + k, err := kprobe(symbol, prog, false) + if err != nil { + return nil, err + } + + err = k.attach(prog) + if err != nil { + k.Close() + return nil, err + } + + return k, nil +} + +// Kretprobe attaches the given eBPF program to a perf event that fires right +// before the given kernel symbol exits, with the function stack left intact. +// See /proc/kallsyms for available symbols. For example, printk(): +// +// kp, err := Kretprobe("printk", prog) +// +// Losing the reference to the resulting Link (kp) will close the Kretprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +func Kretprobe(symbol string, prog *ebpf.Program) (Link, error) { + k, err := kprobe(symbol, prog, true) + if err != nil { + return nil, err + } + + err = k.attach(prog) + if err != nil { + k.Close() + return nil, err + } + + return k, nil +} + +// kprobe opens a perf event on the given symbol and attaches prog to it. +// If ret is true, create a kretprobe. +func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) { + if symbol == "" { + return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if !rgxTraceEvent.MatchString(symbol) { + return nil, fmt.Errorf("symbol '%s' must be alphanumeric or underscore: %w", symbol, errInvalidInput) + } + if prog.Type() != ebpf.Kprobe { + return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput) + } + + // Use kprobe PMU if the kernel has it available. + tp, err := pmuKprobe(platformPrefix(symbol), ret) + if errors.Is(err, os.ErrNotExist) { + tp, err = pmuKprobe(symbol, ret) + } + if err == nil { + return tp, nil + } + if err != nil && !errors.Is(err, ErrNotSupported) { + return nil, fmt.Errorf("creating perf_kprobe PMU: %w", err) + } + + // Use tracefs if kprobe PMU is missing. + tp, err = tracefsKprobe(platformPrefix(symbol), ret) + if errors.Is(err, os.ErrNotExist) { + tp, err = tracefsKprobe(symbol, ret) + } + if err != nil { + return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err) + } + + return tp, nil +} + +// pmuKprobe opens a perf event based on the kprobe PMU. +// Returns os.ErrNotExist if the given symbol does not exist in the kernel. +func pmuKprobe(symbol string, ret bool) (*perfEvent, error) { + return pmuProbe(kprobeType, symbol, "", 0, perfAllThreads, ret) +} + +// pmuProbe opens a perf event based on a Performance Monitoring Unit. +// +// Requires at least a 4.17 kernel. +// e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU" +// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU" +// +// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU +func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { + // Getting the PMU type will fail if the kernel doesn't support + // the perf_[k,u]probe PMU. + et, err := getPMUEventType(typ) + if err != nil { + return nil, err + } + + var config uint64 + if ret { + bit, err := typ.RetprobeBit() + if err != nil { + return nil, err + } + config |= 1 << bit + } + + var ( + attr unix.PerfEventAttr + sp unsafe.Pointer + ) + switch typ { + case kprobeType: + // Create a pointer to a NUL-terminated string for the kernel. + sp, err = unsafeStringPtr(symbol) + if err != nil { + return nil, err + } + + attr = unix.PerfEventAttr{ + Type: uint32(et), // PMU event type read from sysfs + Ext1: uint64(uintptr(sp)), // Kernel symbol to trace + Config: config, // Retprobe flag + } + case uprobeType: + sp, err = unsafeStringPtr(path) + if err != nil { + return nil, err + } + + attr = unix.PerfEventAttr{ + // The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1, + // since it added the config2 (Ext2) field. The Size field controls the + // size of the internal buffer the kernel allocates for reading the + // perf_event_attr argument from userspace. + Size: unix.PERF_ATTR_SIZE_VER1, + Type: uint32(et), // PMU event type read from sysfs + Ext1: uint64(uintptr(sp)), // Uprobe path + Ext2: offset, // Uprobe offset + Config: config, // Retprobe flag + } + } + + fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) + + // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL + // when trying to create a kretprobe for a missing symbol. Make sure ENOENT + // is returned to the caller. + if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("symbol '%s' not found: %w", symbol, os.ErrNotExist) + } + // Since at least commit cb9a19fe4aa51, ENOTSUPP is returned + // when attempting to set a uprobe on a trap instruction. + if errors.Is(err, unix.ENOTSUPP) { + return nil, fmt.Errorf("failed setting uprobe on offset %#x (possible trap insn): %w", offset, err) + } + if err != nil { + return nil, fmt.Errorf("opening perf event: %w", err) + } + + // Ensure the string pointer is not collected before PerfEventOpen returns. + runtime.KeepAlive(sp) + + // Kernel has perf_[k,u]probe PMU available, initialize perf event. + return &perfEvent{ + fd: internal.NewFD(uint32(fd)), + pmuID: et, + name: symbol, + typ: typ.PerfEventType(ret), + }, nil +} + +// tracefsKprobe creates a Kprobe tracefs entry. +func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) { + return tracefsProbe(kprobeType, symbol, "", 0, perfAllThreads, ret) +} + +// tracefsProbe creates a trace event by writing an entry to /[k,u]probe_events. +// A new trace event group name is generated on every call to support creating +// multiple trace events for the same kernel or userspace symbol. +// Path and offset are only set in the case of uprobe(s) and are used to set +// the executable/library path on the filesystem and the offset where the probe is inserted. +// A perf event is then opened on the newly-created trace event and returned to the caller. +func tracefsProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { + // Generate a random string for each trace event we attempt to create. + // This value is used as the 'group' token in tracefs to allow creating + // multiple kprobe trace events with the same name. + group, err := randomGroup("ebpf") + if err != nil { + return nil, fmt.Errorf("randomizing group name: %w", err) + } + + // Before attempting to create a trace event through tracefs, + // check if an event with the same group and name already exists. + // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate + // entry, so we need to rely on reads for detecting uniqueness. + _, err = getTraceEventID(group, symbol) + if err == nil { + return nil, fmt.Errorf("trace event already exists: %s/%s", group, symbol) + } + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("checking trace event %s/%s: %w", group, symbol, err) + } + + // Create the [k,u]probe trace event using tracefs. + if err := createTraceFSProbeEvent(typ, group, symbol, path, offset, ret); err != nil { + return nil, fmt.Errorf("creating probe entry on tracefs: %w", err) + } + + // Get the newly-created trace event's id. + tid, err := getTraceEventID(group, symbol) + if err != nil { + return nil, fmt.Errorf("getting trace event id: %w", err) + } + + // Kprobes are ephemeral tracepoints and share the same perf event type. + fd, err := openTracepointPerfEvent(tid, pid) + if err != nil { + return nil, err + } + + return &perfEvent{ + fd: fd, + group: group, + name: symbol, + tracefsID: tid, + typ: typ.PerfEventType(ret), + }, nil +} + +// createTraceFSProbeEvent creates a new ephemeral trace event by writing to +// /[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid +// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist +// if a probe with the same group and symbol already exists. +func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset uint64, ret bool) error { + // Open the kprobe_events file in tracefs. + f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666) + if err != nil { + return fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err) + } + defer f.Close() + + var pe string + switch typ { + case kprobeType: + // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt): + // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe + // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe + // -:[GRP/]EVENT : Clear a probe + // + // Some examples: + // r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy + // p:ebpf_5678/p_my_kprobe __x64_sys_execve + // + // Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the + // kernel default to NR_CPUS. This is desired in most eBPF cases since + // subsampling or rate limiting logic can be more accurately implemented in + // the eBPF program itself. + // See Documentation/kprobes.txt for more details. + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, symbol) + case uprobeType: + // The uprobe_events syntax is as follows: + // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe + // r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe + // -:[GRP/]EVENT : Clear a probe + // + // Some examples: + // r:ebpf_1234/readline /bin/bash:0x12345 + // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345 + // + // See Documentation/trace/uprobetracer.txt for more details. + pathOffset := uprobePathOffset(path, offset) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, pathOffset) + } + _, err = f.WriteString(pe) + // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL + // when trying to create a kretprobe for a missing symbol. Make sure ENOENT + // is returned to the caller. + if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { + return fmt.Errorf("symbol %s not found: %w", symbol, os.ErrNotExist) + } + if err != nil { + return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err) + } + + return nil +} + +// closeTraceFSProbeEvent removes the [k,u]probe with the given type, group and symbol +// from /[k,u]probe_events. +func closeTraceFSProbeEvent(typ probeType, group, symbol string) error { + f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666) + if err != nil { + return fmt.Errorf("error opening %s: %w", typ.EventsPath(), err) + } + defer f.Close() + + // See [k,u]probe_events syntax above. The probe type does not need to be specified + // for removals. + pe := fmt.Sprintf("-:%s/%s", group, symbol) + if _, err = f.WriteString(pe); err != nil { + return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err) + } + + return nil +} + +// randomGroup generates a pseudorandom string for use as a tracefs group name. +// Returns an error when the output string would exceed 63 characters (kernel +// limitation), when rand.Read() fails or when prefix contains characters not +// allowed by rgxTraceEvent. +func randomGroup(prefix string) (string, error) { + if !rgxTraceEvent.MatchString(prefix) { + return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, errInvalidInput) + } + + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("reading random bytes: %w", err) + } + + group := fmt.Sprintf("%s_%x", prefix, b) + if len(group) > 63 { + return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, errInvalidInput) + } + + return group, nil +} + +func probePrefix(ret bool) string { + if ret { + return "r" + } + return "p" +} + +// determineRetprobeBit reads a Performance Monitoring Unit's retprobe bit +// from /sys/bus/event_source/devices//format/retprobe. +func determineRetprobeBit(typ probeType) (uint64, error) { + p := filepath.Join("/sys/bus/event_source/devices/", typ.String(), "/format/retprobe") + + data, err := os.ReadFile(p) + if err != nil { + return 0, err + } + + var rp uint64 + n, err := fmt.Sscanf(string(bytes.TrimSpace(data)), "config:%d", &rp) + if err != nil { + return 0, fmt.Errorf("parse retprobe bit: %w", err) + } + if n != 1 { + return 0, fmt.Errorf("parse retprobe bit: expected 1 item, got %d", n) + } + + return rp, nil +} + +func kretprobeBit() (uint64, error) { + kprobeRetprobeBit.once.Do(func() { + kprobeRetprobeBit.value, kprobeRetprobeBit.err = determineRetprobeBit(kprobeType) + }) + return kprobeRetprobeBit.value, kprobeRetprobeBit.err +} diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go new file mode 100644 index 0000000000..4926584696 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/link.go @@ -0,0 +1,233 @@ +package link + +import ( + "fmt" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/btf" +) + +var ErrNotSupported = internal.ErrNotSupported + +// Link represents a Program attached to a BPF hook. +type Link interface { + // Replace the current program with a new program. + // + // Passing a nil program is an error. May return an error wrapping ErrNotSupported. + Update(*ebpf.Program) error + + // Persist a link by pinning it into a bpffs. + // + // May return an error wrapping ErrNotSupported. + Pin(string) error + + // Undo a previous call to Pin. + // + // May return an error wrapping ErrNotSupported. + Unpin() error + + // Close frees resources. + // + // The link will be broken unless it has been successfully pinned. + // A link may continue past the lifetime of the process if Close is + // not called. + Close() error + + // Prevent external users from implementing this interface. + isLink() +} + +// ID uniquely identifies a BPF link. +type ID uint32 + +// RawLinkOptions control the creation of a raw link. +type RawLinkOptions struct { + // File descriptor to attach to. This differs for each attach type. + Target int + // Program to attach. + Program *ebpf.Program + // Attach must match the attach type of Program. + Attach ebpf.AttachType + // BTF is the BTF of the attachment target. + BTF btf.TypeID +} + +// RawLinkInfo contains metadata on a link. +type RawLinkInfo struct { + Type Type + ID ID + Program ebpf.ProgramID +} + +// RawLink is the low-level API to bpf_link. +// +// You should consider using the higher level interfaces in this +// package instead. +type RawLink struct { + fd *internal.FD + pinnedPath string +} + +// AttachRawLink creates a raw link. +func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { + if err := haveBPFLink(); err != nil { + return nil, err + } + + if opts.Target < 0 { + return nil, fmt.Errorf("invalid target: %s", internal.ErrClosedFd) + } + + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd) + } + + attr := bpfLinkCreateAttr{ + targetFd: uint32(opts.Target), + progFd: uint32(progFd), + attachType: opts.Attach, + targetBTFID: uint32(opts.BTF), + } + fd, err := bpfLinkCreate(&attr) + if err != nil { + return nil, fmt.Errorf("can't create link: %s", err) + } + + return &RawLink{fd, ""}, nil +} + +// LoadPinnedRawLink loads a persisted link from a bpffs. +// +// Returns an error if the pinned link type doesn't match linkType. Pass +// UnspecifiedType to disable this behaviour. +func LoadPinnedRawLink(fileName string, linkType Type, opts *ebpf.LoadPinOptions) (*RawLink, error) { + fd, err := internal.BPFObjGet(fileName, opts.Marshal()) + if err != nil { + return nil, fmt.Errorf("load pinned link: %w", err) + } + + link := &RawLink{fd, fileName} + if linkType == UnspecifiedType { + return link, nil + } + + info, err := link.Info() + if err != nil { + link.Close() + return nil, fmt.Errorf("get pinned link info: %s", err) + } + + if info.Type != linkType { + link.Close() + return nil, fmt.Errorf("link type %v doesn't match %v", info.Type, linkType) + } + + return link, nil +} + +func (l *RawLink) isLink() {} + +// FD returns the raw file descriptor. +func (l *RawLink) FD() int { + fd, err := l.fd.Value() + if err != nil { + return -1 + } + return int(fd) +} + +// Close breaks the link. +// +// Use Pin if you want to make the link persistent. +func (l *RawLink) Close() error { + return l.fd.Close() +} + +// Pin persists a link past the lifetime of the process. +// +// Calling Close on a pinned Link will not break the link +// until the pin is removed. +func (l *RawLink) Pin(fileName string) error { + if err := internal.Pin(l.pinnedPath, fileName, l.fd); err != nil { + return err + } + l.pinnedPath = fileName + return nil +} + +// Unpin implements the Link interface. +func (l *RawLink) Unpin() error { + if err := internal.Unpin(l.pinnedPath); err != nil { + return err + } + l.pinnedPath = "" + return nil +} + +// Update implements the Link interface. +func (l *RawLink) Update(new *ebpf.Program) error { + return l.UpdateArgs(RawLinkUpdateOptions{ + New: new, + }) +} + +// RawLinkUpdateOptions control the behaviour of RawLink.UpdateArgs. +type RawLinkUpdateOptions struct { + New *ebpf.Program + Old *ebpf.Program + Flags uint32 +} + +// UpdateArgs updates a link based on args. +func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error { + newFd := opts.New.FD() + if newFd < 0 { + return fmt.Errorf("invalid program: %s", internal.ErrClosedFd) + } + + var oldFd int + if opts.Old != nil { + oldFd = opts.Old.FD() + if oldFd < 0 { + return fmt.Errorf("invalid replacement program: %s", internal.ErrClosedFd) + } + } + + linkFd, err := l.fd.Value() + if err != nil { + return fmt.Errorf("can't update link: %s", err) + } + + attr := bpfLinkUpdateAttr{ + linkFd: linkFd, + newProgFd: uint32(newFd), + oldProgFd: uint32(oldFd), + flags: opts.Flags, + } + return bpfLinkUpdate(&attr) +} + +// struct bpf_link_info +type bpfLinkInfo struct { + typ uint32 + id uint32 + prog_id uint32 +} + +// Info returns metadata about the link. +func (l *RawLink) Info() (*RawLinkInfo, error) { + var info bpfLinkInfo + err := internal.BPFObjGetInfoByFD(l.fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) + if err != nil { + return nil, fmt.Errorf("link info: %s", err) + } + + return &RawLinkInfo{ + Type(info.typ), + ID(info.id), + ebpf.ProgramID(info.prog_id), + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/netns.go b/vendor/github.com/cilium/ebpf/link/netns.go new file mode 100644 index 0000000000..37e5b84c4d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/netns.go @@ -0,0 +1,60 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" +) + +// NetNsInfo contains metadata about a network namespace link. +type NetNsInfo struct { + RawLinkInfo +} + +// NetNsLink is a program attached to a network namespace. +type NetNsLink struct { + *RawLink +} + +// AttachNetNs attaches a program to a network namespace. +func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) { + var attach ebpf.AttachType + switch t := prog.Type(); t { + case ebpf.FlowDissector: + attach = ebpf.AttachFlowDissector + case ebpf.SkLookup: + attach = ebpf.AttachSkLookup + default: + return nil, fmt.Errorf("can't attach %v to network namespace", t) + } + + link, err := AttachRawLink(RawLinkOptions{ + Target: ns, + Program: prog, + Attach: attach, + }) + if err != nil { + return nil, err + } + + return &NetNsLink{link}, nil +} + +// LoadPinnedNetNs loads a network namespace link from bpffs. +func LoadPinnedNetNs(fileName string, opts *ebpf.LoadPinOptions) (*NetNsLink, error) { + link, err := LoadPinnedRawLink(fileName, NetNsType, opts) + if err != nil { + return nil, err + } + + return &NetNsLink{link}, nil +} + +// Info returns information about the link. +func (nns *NetNsLink) Info() (*NetNsInfo, error) { + info, err := nns.RawLink.Info() + if err != nil { + return nil, err + } + return &NetNsInfo{*info}, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go new file mode 100644 index 0000000000..7e0443a75c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/perf_event.go @@ -0,0 +1,272 @@ +package link + +import ( + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +// Getting the terminology right is usually the hardest part. For posterity and +// for staying sane during implementation: +// +// - trace event: Representation of a kernel runtime hook. Filesystem entries +// under /events. Can be tracepoints (static), kprobes or uprobes. +// Can be instantiated into perf events (see below). +// - tracepoint: A predetermined hook point in the kernel. Exposed as trace +// events in (sub)directories under /events. Cannot be closed or +// removed, they are static. +// - k(ret)probe: Ephemeral trace events based on entry or exit points of +// exported kernel symbols. kprobe-based (tracefs) trace events can be +// created system-wide by writing to the /kprobe_events file, or +// they can be scoped to the current process by creating PMU perf events. +// - u(ret)probe: Ephemeral trace events based on user provides ELF binaries +// and offsets. uprobe-based (tracefs) trace events can be +// created system-wide by writing to the /uprobe_events file, or +// they can be scoped to the current process by creating PMU perf events. +// - perf event: An object instantiated based on an existing trace event or +// kernel symbol. Referred to by fd in userspace. +// Exactly one eBPF program can be attached to a perf event. Multiple perf +// events can be created from a single trace event. Closing a perf event +// stops any further invocations of the attached eBPF program. + +var ( + tracefsPath = "/sys/kernel/debug/tracing" + + // Trace event groups, names and kernel symbols must adhere to this set + // of characters. Non-empty, first character must not be a number, all + // characters must be alphanumeric or underscore. + rgxTraceEvent = regexp.MustCompile("^[a-zA-Z_][0-9a-zA-Z_]*$") + + errInvalidInput = errors.New("invalid input") +) + +const ( + perfAllThreads = -1 +) + +type perfEventType uint8 + +const ( + tracepointEvent perfEventType = iota + kprobeEvent + kretprobeEvent + uprobeEvent + uretprobeEvent +) + +// A perfEvent represents a perf event kernel object. Exactly one eBPF program +// can be attached to it. It is created based on a tracefs trace event or a +// Performance Monitoring Unit (PMU). +type perfEvent struct { + + // Group and name of the tracepoint/kprobe/uprobe. + group string + name string + + // PMU event ID read from sysfs. Valid IDs are non-zero. + pmuID uint64 + // ID of the trace event read from tracefs. Valid IDs are non-zero. + tracefsID uint64 + + // The event type determines the types of programs that can be attached. + typ perfEventType + + fd *internal.FD +} + +func (pe *perfEvent) isLink() {} + +func (pe *perfEvent) Pin(string) error { + return fmt.Errorf("pin perf event: %w", ErrNotSupported) +} + +func (pe *perfEvent) Unpin() error { + return fmt.Errorf("unpin perf event: %w", ErrNotSupported) +} + +// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"), +// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array +// owned by the perf event, which means multiple programs can be attached +// simultaneously. +// +// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event +// returns EEXIST. +// +// Detaching a program from a perf event is currently not possible, so a +// program replacement mechanism cannot be implemented for perf events. +func (pe *perfEvent) Update(prog *ebpf.Program) error { + return fmt.Errorf("can't replace eBPF program in perf event: %w", ErrNotSupported) +} + +func (pe *perfEvent) Close() error { + if pe.fd == nil { + return nil + } + + pfd, err := pe.fd.Value() + if err != nil { + return fmt.Errorf("getting perf event fd: %w", err) + } + + err = unix.IoctlSetInt(int(pfd), unix.PERF_EVENT_IOC_DISABLE, 0) + if err != nil { + return fmt.Errorf("disabling perf event: %w", err) + } + + err = pe.fd.Close() + if err != nil { + return fmt.Errorf("closing perf event fd: %w", err) + } + + switch pe.typ { + case kprobeEvent, kretprobeEvent: + // Clean up kprobe tracefs entry. + if pe.tracefsID != 0 { + return closeTraceFSProbeEvent(kprobeType, pe.group, pe.name) + } + case uprobeEvent, uretprobeEvent: + // Clean up uprobe tracefs entry. + if pe.tracefsID != 0 { + return closeTraceFSProbeEvent(uprobeType, pe.group, pe.name) + } + case tracepointEvent: + // Tracepoint trace events don't hold any extra resources. + return nil + } + + return nil +} + +// attach the given eBPF prog to the perf event stored in pe. +// pe must contain a valid perf event fd. +// prog's type must match the program type stored in pe. +func (pe *perfEvent) attach(prog *ebpf.Program) error { + if prog == nil { + return errors.New("cannot attach a nil program") + } + if pe.fd == nil { + return errors.New("cannot attach to nil perf event") + } + if prog.FD() < 0 { + return fmt.Errorf("invalid program: %w", internal.ErrClosedFd) + } + switch pe.typ { + case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent: + if t := prog.Type(); t != ebpf.Kprobe { + return fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t) + } + case tracepointEvent: + if t := prog.Type(); t != ebpf.TracePoint { + return fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t) + } + default: + return fmt.Errorf("unknown perf event type: %d", pe.typ) + } + + // The ioctl below will fail when the fd is invalid. + kfd, _ := pe.fd.Value() + + // Assign the eBPF program to the perf event. + err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_SET_BPF, prog.FD()) + if err != nil { + return fmt.Errorf("setting perf event bpf program: %w", err) + } + + // PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values. + if err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil { + return fmt.Errorf("enable perf event: %s", err) + } + + // Close the perf event when its reference is lost to avoid leaking system resources. + runtime.SetFinalizer(pe, (*perfEvent).Close) + return nil +} + +// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str. +func unsafeStringPtr(str string) (unsafe.Pointer, error) { + p, err := unix.BytePtrFromString(str) + if err != nil { + return nil, err + } + return unsafe.Pointer(p), nil +} + +// getTraceEventID reads a trace event's ID from tracefs given its group and name. +// group and name must be alphanumeric or underscore, as required by the kernel. +func getTraceEventID(group, name string) (uint64, error) { + tid, err := uint64FromFile(tracefsPath, "events", group, name, "id") + if errors.Is(err, os.ErrNotExist) { + return 0, fmt.Errorf("trace event %s/%s: %w", group, name, os.ErrNotExist) + } + if err != nil { + return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err) + } + + return tid, nil +} + +// getPMUEventType reads a Performance Monitoring Unit's type (numeric identifier) +// from /sys/bus/event_source/devices//type. +// +// Returns ErrNotSupported if the pmu type is not supported. +func getPMUEventType(typ probeType) (uint64, error) { + et, err := uint64FromFile("/sys/bus/event_source/devices", typ.String(), "type") + if errors.Is(err, os.ErrNotExist) { + return 0, fmt.Errorf("pmu type %s: %w", typ, ErrNotSupported) + } + if err != nil { + return 0, fmt.Errorf("reading pmu type %s: %w", typ, err) + } + + return et, nil +} + +// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide +// [k,u]probes created by writing to /[k,u]probe_events are tracepoints +// behind the scenes, and can be attached to using these perf events. +func openTracepointPerfEvent(tid uint64, pid int) (*internal.FD, error) { + attr := unix.PerfEventAttr{ + Type: unix.PERF_TYPE_TRACEPOINT, + Config: tid, + Sample_type: unix.PERF_SAMPLE_RAW, + Sample: 1, + Wakeup: 1, + } + + fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) + if err != nil { + return nil, fmt.Errorf("opening tracepoint perf event: %w", err) + } + + return internal.NewFD(uint32(fd)), nil +} + +// uint64FromFile reads a uint64 from a file. All elements of path are sanitized +// and joined onto base. Returns error if base no longer prefixes the path after +// joining all components. +func uint64FromFile(base string, path ...string) (uint64, error) { + l := filepath.Join(path...) + p := filepath.Join(base, l) + if !strings.HasPrefix(p, base) { + return 0, fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput) + } + + data, err := os.ReadFile(p) + if err != nil { + return 0, fmt.Errorf("reading file %s: %w", p, err) + } + + et := bytes.TrimSpace(data) + return strconv.ParseUint(string(et), 10, 64) +} diff --git a/vendor/github.com/cilium/ebpf/link/platform.go b/vendor/github.com/cilium/ebpf/link/platform.go new file mode 100644 index 0000000000..eb6f7b7a37 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/platform.go @@ -0,0 +1,25 @@ +package link + +import ( + "fmt" + "runtime" +) + +func platformPrefix(symbol string) string { + + prefix := runtime.GOARCH + + // per https://github.com/golang/go/blob/master/src/go/build/syslist.go + switch prefix { + case "386": + prefix = "ia32" + case "amd64", "amd64p32": + prefix = "x64" + case "arm64", "arm64be": + prefix = "arm64" + default: + return symbol + } + + return fmt.Sprintf("__%s_%s", prefix, symbol) +} diff --git a/vendor/github.com/cilium/ebpf/link/program.go b/vendor/github.com/cilium/ebpf/link/program.go new file mode 100644 index 0000000000..b90c457467 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/program.go @@ -0,0 +1,76 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" +) + +type RawAttachProgramOptions struct { + // File descriptor to attach to. This differs for each attach type. + Target int + // Program to attach. + Program *ebpf.Program + // Program to replace (cgroups). + Replace *ebpf.Program + // Attach must match the attach type of Program (and Replace). + Attach ebpf.AttachType + // Flags control the attach behaviour. This differs for each attach type. + Flags uint32 +} + +// RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH. +// +// You should use one of the higher level abstractions available in this +// package if possible. +func RawAttachProgram(opts RawAttachProgramOptions) error { + if err := haveProgAttach(); err != nil { + return err + } + + var replaceFd uint32 + if opts.Replace != nil { + replaceFd = uint32(opts.Replace.FD()) + } + + attr := internal.BPFProgAttachAttr{ + TargetFd: uint32(opts.Target), + AttachBpfFd: uint32(opts.Program.FD()), + ReplaceBpfFd: replaceFd, + AttachType: uint32(opts.Attach), + AttachFlags: uint32(opts.Flags), + } + + if err := internal.BPFProgAttach(&attr); err != nil { + return fmt.Errorf("can't attach program: %w", err) + } + return nil +} + +type RawDetachProgramOptions struct { + Target int + Program *ebpf.Program + Attach ebpf.AttachType +} + +// RawDetachProgram is a low level wrapper around BPF_PROG_DETACH. +// +// You should use one of the higher level abstractions available in this +// package if possible. +func RawDetachProgram(opts RawDetachProgramOptions) error { + if err := haveProgAttach(); err != nil { + return err + } + + attr := internal.BPFProgDetachAttr{ + TargetFd: uint32(opts.Target), + AttachBpfFd: uint32(opts.Program.FD()), + AttachType: uint32(opts.Attach), + } + if err := internal.BPFProgDetach(&attr); err != nil { + return fmt.Errorf("can't detach program: %w", err) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go new file mode 100644 index 0000000000..f4beb1e078 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go @@ -0,0 +1,61 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" +) + +type RawTracepointOptions struct { + // Tracepoint name. + Name string + // Program must be of type RawTracepoint* + Program *ebpf.Program +} + +// AttachRawTracepoint links a BPF program to a raw_tracepoint. +// +// Requires at least Linux 4.17. +func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.RawTracepoint && t != ebpf.RawTracepointWritable { + return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t) + } + if opts.Program.FD() < 0 { + return nil, fmt.Errorf("invalid program: %w", internal.ErrClosedFd) + } + + fd, err := bpfRawTracepointOpen(&bpfRawTracepointOpenAttr{ + name: internal.NewStringPointer(opts.Name), + fd: uint32(opts.Program.FD()), + }) + if err != nil { + return nil, err + } + + return &progAttachRawTracepoint{fd: fd}, nil +} + +type progAttachRawTracepoint struct { + fd *internal.FD +} + +var _ Link = (*progAttachRawTracepoint)(nil) + +func (rt *progAttachRawTracepoint) isLink() {} + +func (rt *progAttachRawTracepoint) Close() error { + return rt.fd.Close() +} + +func (rt *progAttachRawTracepoint) Update(_ *ebpf.Program) error { + return fmt.Errorf("can't update raw_tracepoint: %w", ErrNotSupported) +} + +func (rt *progAttachRawTracepoint) Pin(_ string) error { + return fmt.Errorf("can't pin raw_tracepoint: %w", ErrNotSupported) +} + +func (rt *progAttachRawTracepoint) Unpin() error { + return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported) +} diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go new file mode 100644 index 0000000000..a61499438b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/syscalls.go @@ -0,0 +1,191 @@ +package link + +import ( + "errors" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +// Type is the kind of link. +type Type uint32 + +// Valid link types. +// +// Equivalent to enum bpf_link_type. +const ( + UnspecifiedType Type = iota + RawTracepointType + TracingType + CgroupType + IterType + NetNsType + XDPType +) + +var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSKB, + AttachType: ebpf.AttachCGroupInetIngress, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + if err != nil { + return internal.ErrNotSupported + } + + // BPF_PROG_ATTACH was introduced at the same time as CGgroupSKB, + // so being able to load the program is enough to infer that we + // have the syscall. + prog.Close() + return nil +}) + +var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replacement", "5.5", func() error { + if err := haveProgAttach(); err != nil { + return err + } + + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSKB, + AttachType: ebpf.AttachCGroupInetIngress, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + if err != nil { + return internal.ErrNotSupported + } + defer prog.Close() + + // We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs. + // If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't + // present. + attr := internal.BPFProgAttachAttr{ + // We rely on this being checked after attachFlags. + TargetFd: ^uint32(0), + AttachBpfFd: uint32(prog.FD()), + AttachType: uint32(ebpf.AttachCGroupInetIngress), + AttachFlags: uint32(flagReplace), + } + + err = internal.BPFProgAttach(&attr) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}) + +type bpfLinkCreateAttr struct { + progFd uint32 + targetFd uint32 + attachType ebpf.AttachType + flags uint32 + targetBTFID uint32 +} + +func bpfLinkCreate(attr *bpfLinkCreateAttr) (*internal.FD, error) { + ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return internal.NewFD(uint32(ptr)), nil +} + +type bpfLinkCreateIterAttr struct { + prog_fd uint32 + target_fd uint32 + attach_type ebpf.AttachType + flags uint32 + iter_info internal.Pointer + iter_info_len uint32 +} + +func bpfLinkCreateIter(attr *bpfLinkCreateIterAttr) (*internal.FD, error) { + ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return internal.NewFD(uint32(ptr)), nil +} + +type bpfLinkUpdateAttr struct { + linkFd uint32 + newProgFd uint32 + flags uint32 + oldProgFd uint32 +} + +func bpfLinkUpdate(attr *bpfLinkUpdateAttr) error { + _, err := internal.BPF(internal.BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSKB, + AttachType: ebpf.AttachCGroupInetIngress, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + if err != nil { + return internal.ErrNotSupported + } + defer prog.Close() + + attr := bpfLinkCreateAttr{ + // This is a hopefully invalid file descriptor, which triggers EBADF. + targetFd: ^uint32(0), + progFd: uint32(prog.FD()), + attachType: ebpf.AttachCGroupInetIngress, + } + _, err = bpfLinkCreate(&attr) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}) + +type bpfIterCreateAttr struct { + linkFd uint32 + flags uint32 +} + +func bpfIterCreate(attr *bpfIterCreateAttr) (*internal.FD, error) { + ptr, err := internal.BPF(internal.BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err == nil { + return internal.NewFD(uint32(ptr)), nil + } + return nil, err +} + +type bpfRawTracepointOpenAttr struct { + name internal.Pointer + fd uint32 + _ uint32 +} + +func bpfRawTracepointOpen(attr *bpfRawTracepointOpenAttr) (*internal.FD, error) { + ptr, err := internal.BPF(internal.BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err == nil { + return internal.NewFD(uint32(ptr)), nil + } + return nil, err +} diff --git a/vendor/github.com/cilium/ebpf/link/tracepoint.go b/vendor/github.com/cilium/ebpf/link/tracepoint.go new file mode 100644 index 0000000000..7423df86b1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/tracepoint.go @@ -0,0 +1,60 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" +) + +// Tracepoint attaches the given eBPF program to the tracepoint with the given +// group and name. See /sys/kernel/debug/tracing/events to find available +// tracepoints. The top-level directory is the group, the event's subdirectory +// is the name. Example: +// +// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog) +// +// Losing the reference to the resulting Link (tp) will close the Tracepoint +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is +// only possible as of kernel 4.14 (commit cf5f5ce). +func Tracepoint(group, name string, prog *ebpf.Program) (Link, error) { + if group == "" || name == "" { + return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if !rgxTraceEvent.MatchString(group) || !rgxTraceEvent.MatchString(name) { + return nil, fmt.Errorf("group and name '%s/%s' must be alphanumeric or underscore: %w", group, name, errInvalidInput) + } + if prog.Type() != ebpf.TracePoint { + return nil, fmt.Errorf("eBPF program type %s is not a Tracepoint: %w", prog.Type(), errInvalidInput) + } + + tid, err := getTraceEventID(group, name) + if err != nil { + return nil, err + } + + fd, err := openTracepointPerfEvent(tid, perfAllThreads) + if err != nil { + return nil, err + } + + pe := &perfEvent{ + fd: fd, + tracefsID: tid, + group: group, + name: name, + typ: tracepointEvent, + } + + if err := pe.attach(prog); err != nil { + pe.Close() + return nil, err + } + + return pe, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go new file mode 100644 index 0000000000..59170ce046 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/uprobe.go @@ -0,0 +1,288 @@ +package link + +import ( + "debug/elf" + "errors" + "fmt" + "os" + "path/filepath" + "regexp" + "sync" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" +) + +var ( + uprobeEventsPath = filepath.Join(tracefsPath, "uprobe_events") + + // rgxUprobeSymbol is used to strip invalid characters from the uprobe symbol + // as they are not allowed to be used as the EVENT token in tracefs. + rgxUprobeSymbol = regexp.MustCompile("[^a-zA-Z0-9]+") + + uprobeRetprobeBit = struct { + once sync.Once + value uint64 + err error + }{} + + // ErrNoSymbol indicates that the given symbol was not found + // in the ELF symbols table. + ErrNoSymbol = errors.New("not found") +) + +// Executable defines an executable program on the filesystem. +type Executable struct { + // Path of the executable on the filesystem. + path string + // Parsed ELF symbols and dynamic symbols offsets. + offsets map[string]uint64 +} + +// UprobeOptions defines additional parameters that will be used +// when loading Uprobes. +type UprobeOptions struct { + // Symbol offset. Must be provided in case of external symbols (shared libs). + // If set, overrides the offset eventually parsed from the executable. + Offset uint64 + // Only set the uprobe on the given process ID. Useful when tracing + // shared library calls or programs that have many running instances. + PID int +} + +// To open a new Executable, use: +// +// OpenExecutable("/bin/bash") +// +// The returned value can then be used to open Uprobe(s). +func OpenExecutable(path string) (*Executable, error) { + if path == "" { + return nil, fmt.Errorf("path cannot be empty") + } + + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("open file '%s': %w", path, err) + } + defer f.Close() + + se, err := internal.NewSafeELFFile(f) + if err != nil { + return nil, fmt.Errorf("parse ELF file: %w", err) + } + + if se.Type != elf.ET_EXEC && se.Type != elf.ET_DYN { + // ELF is not an executable or a shared object. + return nil, errors.New("the given file is not an executable or a shared object") + } + + ex := Executable{ + path: path, + offsets: make(map[string]uint64), + } + + if err := ex.load(se); err != nil { + return nil, err + } + + return &ex, nil +} + +func (ex *Executable) load(f *internal.SafeELFFile) error { + syms, err := f.Symbols() + if err != nil && !errors.Is(err, elf.ErrNoSymbols) { + return err + } + + dynsyms, err := f.DynamicSymbols() + if err != nil && !errors.Is(err, elf.ErrNoSymbols) { + return err + } + + syms = append(syms, dynsyms...) + + for _, s := range syms { + if elf.ST_TYPE(s.Info) != elf.STT_FUNC { + // Symbol not associated with a function or other executable code. + continue + } + + off := s.Value + + // Loop over ELF segments. + for _, prog := range f.Progs { + // Skip uninteresting segments. + if prog.Type != elf.PT_LOAD || (prog.Flags&elf.PF_X) == 0 { + continue + } + + if prog.Vaddr <= s.Value && s.Value < (prog.Vaddr+prog.Memsz) { + // If the symbol value is contained in the segment, calculate + // the symbol offset. + // + // fn symbol offset = fn symbol VA - .text VA + .text offset + // + // stackoverflow.com/a/40249502 + off = s.Value - prog.Vaddr + prog.Off + break + } + } + + ex.offsets[s.Name] = off + } + + return nil +} + +func (ex *Executable) offset(symbol string) (uint64, error) { + if off, ok := ex.offsets[symbol]; ok { + // Symbols with location 0 from section undef are shared library calls and + // are relocated before the binary is executed. Dynamic linking is not + // implemented by the library, so mark this as unsupported for now. + // + // Since only offset values are stored and not elf.Symbol, if the value is 0, + // assume it's an external symbol. + if off == 0 { + return 0, fmt.Errorf("cannot resolve %s library call '%s', "+ + "consider providing the offset via options: %w", ex.path, symbol, ErrNotSupported) + } + return off, nil + } + return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol) +} + +// Uprobe attaches the given eBPF program to a perf event that fires when the +// given symbol starts executing in the given Executable. +// For example, /bin/bash::main(): +// +// ex, _ = OpenExecutable("/bin/bash") +// ex.Uprobe("main", prog, nil) +// +// When using symbols which belongs to shared libraries, +// an offset must be provided via options: +// +// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Losing the reference to the resulting Link (up) will close the Uprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// Functions provided by shared libraries can currently not be traced and +// will result in an ErrNotSupported. +func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { + u, err := ex.uprobe(symbol, prog, opts, false) + if err != nil { + return nil, err + } + + err = u.attach(prog) + if err != nil { + u.Close() + return nil, err + } + + return u, nil +} + +// Uretprobe attaches the given eBPF program to a perf event that fires right +// before the given symbol exits. For example, /bin/bash::main(): +// +// ex, _ = OpenExecutable("/bin/bash") +// ex.Uretprobe("main", prog, nil) +// +// When using symbols which belongs to shared libraries, +// an offset must be provided via options: +// +// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Losing the reference to the resulting Link (up) will close the Uprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// Functions provided by shared libraries can currently not be traced and +// will result in an ErrNotSupported. +func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { + u, err := ex.uprobe(symbol, prog, opts, true) + if err != nil { + return nil, err + } + + err = u.attach(prog) + if err != nil { + u.Close() + return nil, err + } + + return u, nil +} + +// uprobe opens a perf event for the given binary/symbol and attaches prog to it. +// If ret is true, create a uretprobe. +func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions, ret bool) (*perfEvent, error) { + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.Kprobe { + return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput) + } + + var offset uint64 + if opts != nil && opts.Offset != 0 { + offset = opts.Offset + } else { + off, err := ex.offset(symbol) + if err != nil { + return nil, err + } + offset = off + } + + pid := perfAllThreads + if opts != nil && opts.PID != 0 { + pid = opts.PID + } + + // Use uprobe PMU if the kernel has it available. + tp, err := pmuUprobe(symbol, ex.path, offset, pid, ret) + if err == nil { + return tp, nil + } + if err != nil && !errors.Is(err, ErrNotSupported) { + return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err) + } + + // Use tracefs if uprobe PMU is missing. + tp, err = tracefsUprobe(uprobeSanitizedSymbol(symbol), ex.path, offset, pid, ret) + if err != nil { + return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err) + } + + return tp, nil +} + +// pmuUprobe opens a perf event based on the uprobe PMU. +func pmuUprobe(symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { + return pmuProbe(uprobeType, symbol, path, offset, pid, ret) +} + +// tracefsUprobe creates a Uprobe tracefs entry. +func tracefsUprobe(symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { + return tracefsProbe(uprobeType, symbol, path, offset, pid, ret) +} + +// uprobeSanitizedSymbol replaces every invalid characted for the tracefs api with an underscore. +func uprobeSanitizedSymbol(symbol string) string { + return rgxUprobeSymbol.ReplaceAllString(symbol, "_") +} + +// uprobePathOffset creates the PATH:OFFSET token for the tracefs api. +func uprobePathOffset(path string, offset uint64) string { + return fmt.Sprintf("%s:%#x", path, offset) +} + +func uretprobeBit() (uint64, error) { + uprobeRetprobeBit.once.Do(func() { + uprobeRetprobeBit.value, uprobeRetprobeBit.err = determineRetprobeBit(uprobeType) + }) + return uprobeRetprobeBit.value, uprobeRetprobeBit.err +} diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go new file mode 100644 index 0000000000..f3b1629e70 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/linker.go @@ -0,0 +1,159 @@ +package ebpf + +import ( + "fmt" + + "github.com/cilium/ebpf/asm" +) + +// link resolves bpf-to-bpf calls. +// +// Each library may contain multiple functions / labels, and is only linked +// if prog references one of these functions. +// +// Libraries also linked. +func link(prog *ProgramSpec, libs []*ProgramSpec) error { + var ( + linked = make(map[*ProgramSpec]bool) + pending = []asm.Instructions{prog.Instructions} + insns asm.Instructions + ) + for len(pending) > 0 { + insns, pending = pending[0], pending[1:] + for _, lib := range libs { + if linked[lib] { + continue + } + + needed, err := needSection(insns, lib.Instructions) + if err != nil { + return fmt.Errorf("linking %s: %w", lib.Name, err) + } + + if !needed { + continue + } + + linked[lib] = true + prog.Instructions = append(prog.Instructions, lib.Instructions...) + pending = append(pending, lib.Instructions) + + if prog.BTF != nil && lib.BTF != nil { + if err := prog.BTF.Append(lib.BTF); err != nil { + return fmt.Errorf("linking BTF of %s: %w", lib.Name, err) + } + } + } + } + + return nil +} + +func needSection(insns, section asm.Instructions) (bool, error) { + // A map of symbols to the libraries which contain them. + symbols, err := section.SymbolOffsets() + if err != nil { + return false, err + } + + for _, ins := range insns { + if ins.Reference == "" { + continue + } + + if ins.OpCode.JumpOp() != asm.Call || ins.Src != asm.PseudoCall { + continue + } + + if ins.Constant != -1 { + // This is already a valid call, no need to link again. + continue + } + + if _, ok := symbols[ins.Reference]; !ok { + // Symbol isn't available in this section + continue + } + + // At this point we know that at least one function in the + // library is called from insns, so we have to link it. + return true, nil + } + + // None of the functions in the section are called. + return false, nil +} + +func fixupJumpsAndCalls(insns asm.Instructions) error { + symbolOffsets := make(map[string]asm.RawInstructionOffset) + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if ins.Symbol == "" { + continue + } + + if _, ok := symbolOffsets[ins.Symbol]; ok { + return fmt.Errorf("duplicate symbol %s", ins.Symbol) + } + + symbolOffsets[ins.Symbol] = iter.Offset + } + + iter = insns.Iterate() + for iter.Next() { + i := iter.Index + offset := iter.Offset + ins := iter.Ins + + if ins.Reference == "" { + continue + } + + switch { + case ins.IsFunctionCall() && ins.Constant == -1: + // Rewrite bpf to bpf call + callOffset, ok := symbolOffsets[ins.Reference] + if !ok { + return fmt.Errorf("call at %d: reference to missing symbol %q", i, ins.Reference) + } + + ins.Constant = int64(callOffset - offset - 1) + + case ins.OpCode.Class() == asm.JumpClass && ins.Offset == -1: + // Rewrite jump to label + jumpOffset, ok := symbolOffsets[ins.Reference] + if !ok { + return fmt.Errorf("jump at %d: reference to missing symbol %q", i, ins.Reference) + } + + ins.Offset = int16(jumpOffset - offset - 1) + + case ins.IsLoadFromMap() && ins.MapPtr() == -1: + return fmt.Errorf("map %s: %w", ins.Reference, errUnsatisfiedReference) + } + } + + // fixupBPFCalls replaces bpf_probe_read_{kernel,user}[_str] with bpf_probe_read[_str] on older kernels + // https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L6009 + iter = insns.Iterate() + for iter.Next() { + ins := iter.Ins + if !ins.IsBuiltinCall() { + continue + } + switch asm.BuiltinFunc(ins.Constant) { + case asm.FnProbeReadKernel, asm.FnProbeReadUser: + if err := haveProbeReadKernel(); err != nil { + ins.Constant = int64(asm.FnProbeRead) + } + case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr: + if err := haveProbeReadKernel(); err != nil { + ins.Constant = int64(asm.FnProbeReadStr) + } + } + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go new file mode 100644 index 0000000000..cca387ead0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/map.go @@ -0,0 +1,1267 @@ +package ebpf + +import ( + "bytes" + "errors" + "fmt" + "io" + "path/filepath" + "reflect" + "strings" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/unix" +) + +// Errors returned by Map and MapIterator methods. +var ( + ErrKeyNotExist = errors.New("key does not exist") + ErrKeyExist = errors.New("key already exists") + ErrIterationAborted = errors.New("iteration aborted") + ErrMapIncompatible = errors.New("map's spec is incompatible with pinned map") +) + +// MapOptions control loading a map into the kernel. +type MapOptions struct { + // The base path to pin maps in if requested via PinByName. + // Existing maps will be re-used if they are compatible, otherwise an + // error is returned. + PinPath string + LoadPinOptions LoadPinOptions +} + +// MapID represents the unique ID of an eBPF map +type MapID uint32 + +// MapSpec defines a Map. +type MapSpec struct { + // Name is passed to the kernel as a debug aid. Must only contain + // alpha numeric and '_' characters. + Name string + Type MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + + // Flags is passed to the kernel and specifies additional map + // creation attributes. + Flags uint32 + + // Automatically pin and load a map from MapOptions.PinPath. + // Generates an error if an existing pinned map is incompatible with the MapSpec. + Pinning PinType + + // Specify numa node during map creation + // (effective only if unix.BPF_F_NUMA_NODE flag is set, + // which can be imported from golang.org/x/sys/unix) + NumaNode uint32 + + // The initial contents of the map. May be nil. + Contents []MapKV + + // Whether to freeze a map after setting its initial contents. + Freeze bool + + // InnerMap is used as a template for ArrayOfMaps and HashOfMaps + InnerMap *MapSpec + + // Extra trailing bytes found in the ELF map definition when using structs + // larger than libbpf's bpf_map_def. Must be empty before instantiating + // the MapSpec into a Map. + Extra bytes.Reader + + // The BTF associated with this map. + BTF *btf.Map +} + +func (ms *MapSpec) String() string { + return fmt.Sprintf("%s(keySize=%d, valueSize=%d, maxEntries=%d, flags=%d)", ms.Type, ms.KeySize, ms.ValueSize, ms.MaxEntries, ms.Flags) +} + +// Copy returns a copy of the spec. +// +// MapSpec.Contents is a shallow copy. +func (ms *MapSpec) Copy() *MapSpec { + if ms == nil { + return nil + } + + cpy := *ms + + cpy.Contents = make([]MapKV, len(ms.Contents)) + copy(cpy.Contents, ms.Contents) + + cpy.InnerMap = ms.InnerMap.Copy() + + return &cpy +} + +func (ms *MapSpec) clampPerfEventArraySize() error { + if ms.Type != PerfEventArray { + return nil + } + + n, err := internal.PossibleCPUs() + if err != nil { + return fmt.Errorf("perf event array: %w", err) + } + + if n := uint32(n); ms.MaxEntries > n { + ms.MaxEntries = n + } + + return nil +} + +// MapKV is used to initialize the contents of a Map. +type MapKV struct { + Key interface{} + Value interface{} +} + +func (ms *MapSpec) checkCompatibility(m *Map) error { + switch { + case m.typ != ms.Type: + return fmt.Errorf("expected type %v, got %v: %w", ms.Type, m.typ, ErrMapIncompatible) + + case m.keySize != ms.KeySize: + return fmt.Errorf("expected key size %v, got %v: %w", ms.KeySize, m.keySize, ErrMapIncompatible) + + case m.valueSize != ms.ValueSize: + return fmt.Errorf("expected value size %v, got %v: %w", ms.ValueSize, m.valueSize, ErrMapIncompatible) + + case m.maxEntries != ms.MaxEntries: + return fmt.Errorf("expected max entries %v, got %v: %w", ms.MaxEntries, m.maxEntries, ErrMapIncompatible) + + case m.flags != ms.Flags: + return fmt.Errorf("expected flags %v, got %v: %w", ms.Flags, m.flags, ErrMapIncompatible) + } + return nil +} + +// Map represents a Map file descriptor. +// +// It is not safe to close a map which is used by other goroutines. +// +// Methods which take interface{} arguments by default encode +// them using binary.Read/Write in the machine's native endianness. +// +// Implement encoding.BinaryMarshaler or encoding.BinaryUnmarshaler +// if you require custom encoding. +type Map struct { + name string + fd *internal.FD + typ MapType + keySize uint32 + valueSize uint32 + maxEntries uint32 + flags uint32 + pinnedPath string + // Per CPU maps return values larger than the size in the spec + fullValueSize int +} + +// NewMapFromFD creates a map from a raw fd. +// +// You should not use fd after calling this function. +func NewMapFromFD(fd int) (*Map, error) { + if fd < 0 { + return nil, errors.New("invalid fd") + } + + return newMapFromFD(internal.NewFD(uint32(fd))) +} + +func newMapFromFD(fd *internal.FD) (*Map, error) { + info, err := newMapInfoFromFd(fd) + if err != nil { + fd.Close() + return nil, fmt.Errorf("get map info: %s", err) + } + + return newMap(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags) +} + +// NewMap creates a new Map. +// +// It's equivalent to calling NewMapWithOptions with default options. +func NewMap(spec *MapSpec) (*Map, error) { + return NewMapWithOptions(spec, MapOptions{}) +} + +// NewMapWithOptions creates a new Map. +// +// Creating a map for the first time will perform feature detection +// by creating small, temporary maps. +// +// The caller is responsible for ensuring the process' rlimit is set +// sufficiently high for locking memory during map creation. This can be done +// by calling rlimit.RemoveMemlock() prior to calling NewMapWithOptions. +// +// May return an error wrapping ErrMapIncompatible. +func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) { + handles := newHandleCache() + defer handles.close() + + m, err := newMapWithOptions(spec, opts, handles) + if err != nil { + return nil, fmt.Errorf("creating map: %w", err) + } + + err = m.finalize(spec) + if err != nil { + return nil, fmt.Errorf("populating map: %w", err) + } + + return m, nil +} + +func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ *Map, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + switch spec.Pinning { + case PinByName: + if spec.Name == "" { + return nil, fmt.Errorf("pin by name: missing Name") + } + + if opts.PinPath == "" { + return nil, fmt.Errorf("pin by name: missing MapOptions.PinPath") + } + + path := filepath.Join(opts.PinPath, spec.Name) + m, err := LoadPinnedMap(path, &opts.LoadPinOptions) + if errors.Is(err, unix.ENOENT) { + break + } + if err != nil { + return nil, fmt.Errorf("load pinned map: %w", err) + } + defer closeOnError(m) + + if err := spec.checkCompatibility(m); err != nil { + return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err) + } + + return m, nil + + case PinNone: + // Nothing to do here + + default: + return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported) + } + + var innerFd *internal.FD + if spec.Type == ArrayOfMaps || spec.Type == HashOfMaps { + if spec.InnerMap == nil { + return nil, fmt.Errorf("%s requires InnerMap", spec.Type) + } + + if spec.InnerMap.Pinning != PinNone { + return nil, errors.New("inner maps cannot be pinned") + } + + template, err := spec.InnerMap.createMap(nil, opts, handles) + if err != nil { + return nil, fmt.Errorf("inner map: %w", err) + } + defer template.Close() + + // Intentionally skip populating and freezing (finalizing) + // the inner map template since it will be removed shortly. + + innerFd = template.fd + } + + m, err := spec.createMap(innerFd, opts, handles) + if err != nil { + return nil, err + } + defer closeOnError(m) + + if spec.Pinning == PinByName { + path := filepath.Join(opts.PinPath, spec.Name) + if err := m.Pin(path); err != nil { + return nil, fmt.Errorf("pin map: %s", err) + } + } + + return m, nil +} + +// createMap validates the spec's properties and creates the map in the kernel +// using the given opts. It does not populate or freeze the map. +func (spec *MapSpec) createMap(inner *internal.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) { + closeOnError := func(closer io.Closer) { + if err != nil { + closer.Close() + } + } + + spec = spec.Copy() + + // Kernels 4.13 through 5.4 used a struct bpf_map_def that contained + // additional 'inner_map_idx' and later 'numa_node' fields. + // In order to support loading these definitions, tolerate the presence of + // extra bytes, but require them to be zeroes. + if _, err := io.Copy(internal.DiscardZeroes{}, &spec.Extra); err != nil { + return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map") + } + + switch spec.Type { + case ArrayOfMaps, HashOfMaps: + if err := haveNestedMaps(); err != nil { + return nil, err + } + + if spec.ValueSize != 0 && spec.ValueSize != 4 { + return nil, errors.New("ValueSize must be zero or four for map of map") + } + spec.ValueSize = 4 + + case PerfEventArray: + if spec.KeySize != 0 && spec.KeySize != 4 { + return nil, errors.New("KeySize must be zero or four for perf event array") + } + spec.KeySize = 4 + + if spec.ValueSize != 0 && spec.ValueSize != 4 { + return nil, errors.New("ValueSize must be zero or four for perf event array") + } + spec.ValueSize = 4 + + if spec.MaxEntries == 0 { + n, err := internal.PossibleCPUs() + if err != nil { + return nil, fmt.Errorf("perf event array: %w", err) + } + spec.MaxEntries = uint32(n) + } + } + + if spec.Flags&(unix.BPF_F_RDONLY_PROG|unix.BPF_F_WRONLY_PROG) > 0 || spec.Freeze { + if err := haveMapMutabilityModifiers(); err != nil { + return nil, fmt.Errorf("map create: %w", err) + } + } + if spec.Flags&unix.BPF_F_MMAPABLE > 0 { + if err := haveMmapableMaps(); err != nil { + return nil, fmt.Errorf("map create: %w", err) + } + } + if spec.Flags&unix.BPF_F_INNER_MAP > 0 { + if err := haveInnerMaps(); err != nil { + return nil, fmt.Errorf("map create: %w", err) + } + } + + attr := internal.BPFMapCreateAttr{ + MapType: uint32(spec.Type), + KeySize: spec.KeySize, + ValueSize: spec.ValueSize, + MaxEntries: spec.MaxEntries, + Flags: spec.Flags, + NumaNode: spec.NumaNode, + } + + if inner != nil { + var err error + attr.InnerMapFd, err = inner.Value() + if err != nil { + return nil, fmt.Errorf("map create: %w", err) + } + } + + if haveObjName() == nil { + attr.MapName = internal.NewBPFObjName(spec.Name) + } + + var btfDisabled bool + if spec.BTF != nil { + handle, err := handles.btfHandle(spec.BTF.Spec) + btfDisabled = errors.Is(err, btf.ErrNotSupported) + if err != nil && !btfDisabled { + return nil, fmt.Errorf("load BTF: %w", err) + } + + if handle != nil { + attr.BTFFd = uint32(handle.FD()) + attr.BTFKeyTypeID = uint32(spec.BTF.Key.ID()) + attr.BTFValueTypeID = uint32(spec.BTF.Value.ID()) + } + } + + fd, err := internal.BPFMapCreate(&attr) + if err != nil { + if errors.Is(err, unix.EPERM) { + return nil, fmt.Errorf("map create: %w (MEMLOCK bay be too low, consider rlimit.RemoveMemlock)", err) + } + if btfDisabled { + return nil, fmt.Errorf("map create without BTF: %w", err) + } + return nil, fmt.Errorf("map create: %w", err) + } + defer closeOnError(fd) + + m, err := newMap(fd, spec.Name, spec.Type, spec.KeySize, spec.ValueSize, spec.MaxEntries, spec.Flags) + if err != nil { + return nil, fmt.Errorf("map create: %w", err) + } + + return m, nil +} + +// newMap allocates and returns a new Map structure. +// Sets the fullValueSize on per-CPU maps. +func newMap(fd *internal.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { + m := &Map{ + name, + fd, + typ, + keySize, + valueSize, + maxEntries, + flags, + "", + int(valueSize), + } + + if !typ.hasPerCPUValue() { + return m, nil + } + + possibleCPUs, err := internal.PossibleCPUs() + if err != nil { + return nil, err + } + + m.fullValueSize = internal.Align(int(valueSize), 8) * possibleCPUs + return m, nil +} + +func (m *Map) String() string { + if m.name != "" { + return fmt.Sprintf("%s(%s)#%v", m.typ, m.name, m.fd) + } + return fmt.Sprintf("%s#%v", m.typ, m.fd) +} + +// Type returns the underlying type of the map. +func (m *Map) Type() MapType { + return m.typ +} + +// KeySize returns the size of the map key in bytes. +func (m *Map) KeySize() uint32 { + return m.keySize +} + +// ValueSize returns the size of the map value in bytes. +func (m *Map) ValueSize() uint32 { + return m.valueSize +} + +// MaxEntries returns the maximum number of elements the map can hold. +func (m *Map) MaxEntries() uint32 { + return m.maxEntries +} + +// Flags returns the flags of the map. +func (m *Map) Flags() uint32 { + return m.flags +} + +// Info returns metadata about the map. +func (m *Map) Info() (*MapInfo, error) { + return newMapInfoFromFd(m.fd) +} + +// Lookup retrieves a value from a Map. +// +// Calls Close() on valueOut if it is of type **Map or **Program, +// and *valueOut is not nil. +// +// Returns an error if the key doesn't exist, see ErrKeyNotExist. +func (m *Map) Lookup(key, valueOut interface{}) error { + valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) + if err := m.lookup(key, valuePtr); err != nil { + return err + } + + return m.unmarshalValue(valueOut, valueBytes) +} + +// LookupAndDelete retrieves and deletes a value from a Map. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDelete(key, valueOut interface{}) error { + valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) + + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + if err := bpfMapLookupAndDelete(m.fd, keyPtr, valuePtr); err != nil { + return fmt.Errorf("lookup and delete failed: %w", err) + } + + return m.unmarshalValue(valueOut, valueBytes) +} + +// LookupBytes gets a value from Map. +// +// Returns a nil value if a key doesn't exist. +func (m *Map) LookupBytes(key interface{}) ([]byte, error) { + valueBytes := make([]byte, m.fullValueSize) + valuePtr := internal.NewSlicePointer(valueBytes) + + err := m.lookup(key, valuePtr) + if errors.Is(err, ErrKeyNotExist) { + return nil, nil + } + + return valueBytes, err +} + +func (m *Map) lookup(key interface{}, valueOut internal.Pointer) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + if err = bpfMapLookupElem(m.fd, keyPtr, valueOut); err != nil { + return fmt.Errorf("lookup failed: %w", err) + } + return nil +} + +// MapUpdateFlags controls the behaviour of the Map.Update call. +// +// The exact semantics depend on the specific MapType. +type MapUpdateFlags uint64 + +const ( + // UpdateAny creates a new element or update an existing one. + UpdateAny MapUpdateFlags = iota + // UpdateNoExist creates a new element. + UpdateNoExist MapUpdateFlags = 1 << (iota - 1) + // UpdateExist updates an existing element. + UpdateExist +) + +// Put replaces or creates a value in map. +// +// It is equivalent to calling Update with UpdateAny. +func (m *Map) Put(key, value interface{}) error { + return m.Update(key, value, UpdateAny) +} + +// Update changes the value of a key. +func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + valuePtr, err := m.marshalValue(value) + if err != nil { + return fmt.Errorf("can't marshal value: %w", err) + } + + if err = bpfMapUpdateElem(m.fd, keyPtr, valuePtr, uint64(flags)); err != nil { + return fmt.Errorf("update failed: %w", err) + } + + return nil +} + +// Delete removes a value. +// +// Returns ErrKeyNotExist if the key does not exist. +func (m *Map) Delete(key interface{}) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + if err = bpfMapDeleteElem(m.fd, keyPtr); err != nil { + return fmt.Errorf("delete failed: %w", err) + } + return nil +} + +// NextKey finds the key following an initial key. +// +// See NextKeyBytes for details. +// +// Returns ErrKeyNotExist if there is no next key. +func (m *Map) NextKey(key, nextKeyOut interface{}) error { + nextKeyPtr, nextKeyBytes := makeBuffer(nextKeyOut, int(m.keySize)) + + if err := m.nextKey(key, nextKeyPtr); err != nil { + return err + } + + if err := m.unmarshalKey(nextKeyOut, nextKeyBytes); err != nil { + return fmt.Errorf("can't unmarshal next key: %w", err) + } + return nil +} + +// NextKeyBytes returns the key following an initial key as a byte slice. +// +// Passing nil will return the first key. +// +// Use Iterate if you want to traverse all entries in the map. +// +// Returns nil if there are no more keys. +func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { + nextKey := make([]byte, m.keySize) + nextKeyPtr := internal.NewSlicePointer(nextKey) + + err := m.nextKey(key, nextKeyPtr) + if errors.Is(err, ErrKeyNotExist) { + return nil, nil + } + + return nextKey, err +} + +func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error { + var ( + keyPtr internal.Pointer + err error + ) + + if key != nil { + keyPtr, err = m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + } + + if err = bpfMapGetNextKey(m.fd, keyPtr, nextKeyOut); err != nil { + return fmt.Errorf("next key failed: %w", err) + } + return nil +} + +// BatchLookup looks up many elements in a map at once. +// +// "keysOut" and "valuesOut" must be of type slice, a pointer +// to a slice or buffer will not work. +// "prevKey" is the key to start the batch lookup from, it will +// *not* be included in the results. Use nil to start at the first key. +// +// ErrKeyNotExist is returned when the batch lookup has reached +// the end of all possible results, even when partial results +// are returned. It should be used to evaluate when lookup is "done". +func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + return m.batchLookup(internal.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) +} + +// BatchLookupAndDelete looks up many elements in a map at once, +// +// It then deletes all those elements. +// "keysOut" and "valuesOut" must be of type slice, a pointer +// to a slice or buffer will not work. +// "prevKey" is the key to start the batch lookup from, it will +// *not* be included in the results. Use nil to start at the first key. +// +// ErrKeyNotExist is returned when the batch lookup has reached +// the end of all possible results, even when partial results +// are returned. It should be used to evaluate when lookup is "done". +func (m *Map) BatchLookupAndDelete(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + return m.batchLookup(internal.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) +} + +func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + if err := haveBatchAPI(); err != nil { + return 0, err + } + if m.typ.hasPerCPUValue() { + return 0, ErrNotSupported + } + keysValue := reflect.ValueOf(keysOut) + if keysValue.Kind() != reflect.Slice { + return 0, fmt.Errorf("keys must be a slice") + } + valuesValue := reflect.ValueOf(valuesOut) + if valuesValue.Kind() != reflect.Slice { + return 0, fmt.Errorf("valuesOut must be a slice") + } + count := keysValue.Len() + if count != valuesValue.Len() { + return 0, fmt.Errorf("keysOut and valuesOut must be the same length") + } + keyBuf := make([]byte, count*int(m.keySize)) + keyPtr := internal.NewSlicePointer(keyBuf) + valueBuf := make([]byte, count*int(m.fullValueSize)) + valuePtr := internal.NewSlicePointer(valueBuf) + + var ( + startPtr internal.Pointer + err error + retErr error + ) + if startKey != nil { + startPtr, err = marshalPtr(startKey, int(m.keySize)) + if err != nil { + return 0, err + } + } + nextPtr, nextBuf := makeBuffer(nextKeyOut, int(m.keySize)) + + ct, err := bpfMapBatch(cmd, m.fd, startPtr, nextPtr, keyPtr, valuePtr, uint32(count), opts) + if err != nil { + if !errors.Is(err, ErrKeyNotExist) { + return 0, err + } + retErr = ErrKeyNotExist + } + + err = m.unmarshalKey(nextKeyOut, nextBuf) + if err != nil { + return 0, err + } + err = unmarshalBytes(keysOut, keyBuf) + if err != nil { + return 0, err + } + err = unmarshalBytes(valuesOut, valueBuf) + if err != nil { + retErr = err + } + return int(ct), retErr +} + +// BatchUpdate updates the map with multiple keys and values +// simultaneously. +// "keys" and "values" must be of type slice, a pointer +// to a slice or buffer will not work. +func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, error) { + if err := haveBatchAPI(); err != nil { + return 0, err + } + if m.typ.hasPerCPUValue() { + return 0, ErrNotSupported + } + keysValue := reflect.ValueOf(keys) + if keysValue.Kind() != reflect.Slice { + return 0, fmt.Errorf("keys must be a slice") + } + valuesValue := reflect.ValueOf(values) + if valuesValue.Kind() != reflect.Slice { + return 0, fmt.Errorf("values must be a slice") + } + var ( + count = keysValue.Len() + valuePtr internal.Pointer + err error + ) + if count != valuesValue.Len() { + return 0, fmt.Errorf("keys and values must be the same length") + } + keyPtr, err := marshalPtr(keys, count*int(m.keySize)) + if err != nil { + return 0, err + } + valuePtr, err = marshalPtr(values, count*int(m.valueSize)) + if err != nil { + return 0, err + } + var nilPtr internal.Pointer + ct, err := bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, valuePtr, uint32(count), opts) + return int(ct), err +} + +// BatchDelete batch deletes entries in the map by keys. +// "keys" must be of type slice, a pointer to a slice or buffer will not work. +func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) { + if err := haveBatchAPI(); err != nil { + return 0, err + } + if m.typ.hasPerCPUValue() { + return 0, ErrNotSupported + } + keysValue := reflect.ValueOf(keys) + if keysValue.Kind() != reflect.Slice { + return 0, fmt.Errorf("keys must be a slice") + } + count := keysValue.Len() + keyPtr, err := marshalPtr(keys, count*int(m.keySize)) + if err != nil { + return 0, fmt.Errorf("cannot marshal keys: %v", err) + } + var nilPtr internal.Pointer + ct, err := bpfMapBatch(internal.BPF_MAP_DELETE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, nilPtr, uint32(count), opts) + return int(ct), err +} + +// Iterate traverses a map. +// +// It's safe to create multiple iterators at the same time. +// +// It's not possible to guarantee that all keys in a map will be +// returned if there are concurrent modifications to the map. +func (m *Map) Iterate() *MapIterator { + return newMapIterator(m) +} + +// Close removes a Map +func (m *Map) Close() error { + if m == nil { + // This makes it easier to clean up when iterating maps + // of maps / programs. + return nil + } + + return m.fd.Close() +} + +// FD gets the file descriptor of the Map. +// +// Calling this function is invalid after Close has been called. +func (m *Map) FD() int { + fd, err := m.fd.Value() + if err != nil { + // Best effort: -1 is the number most likely to be an + // invalid file descriptor. + return -1 + } + + return int(fd) +} + +// Clone creates a duplicate of the Map. +// +// Closing the duplicate does not affect the original, and vice versa. +// Changes made to the map are reflected by both instances however. +// If the original map was pinned, the cloned map will not be pinned by default. +// +// Cloning a nil Map returns nil. +func (m *Map) Clone() (*Map, error) { + if m == nil { + return nil, nil + } + + dup, err := m.fd.Dup() + if err != nil { + return nil, fmt.Errorf("can't clone map: %w", err) + } + + return &Map{ + m.name, + dup, + m.typ, + m.keySize, + m.valueSize, + m.maxEntries, + m.flags, + "", + m.fullValueSize, + }, nil +} + +// Pin persists the map on the BPF virtual file system past the lifetime of +// the process that created it . +// +// Calling Pin on a previously pinned map will overwrite the path, except when +// the new path already exists. Re-pinning across filesystems is not supported. +// You can Clone a map to pin it to a different path. +// +// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs +func (m *Map) Pin(fileName string) error { + if err := internal.Pin(m.pinnedPath, fileName, m.fd); err != nil { + return err + } + m.pinnedPath = fileName + return nil +} + +// Unpin removes the persisted state for the map from the BPF virtual filesystem. +// +// Failed calls to Unpin will not alter the state returned by IsPinned. +// +// Unpinning an unpinned Map returns nil. +func (m *Map) Unpin() error { + if err := internal.Unpin(m.pinnedPath); err != nil { + return err + } + m.pinnedPath = "" + return nil +} + +// IsPinned returns true if the map has a non-empty pinned path. +func (m *Map) IsPinned() bool { + return m.pinnedPath != "" +} + +// Freeze prevents a map to be modified from user space. +// +// It makes no changes to kernel-side restrictions. +func (m *Map) Freeze() error { + if err := haveMapMutabilityModifiers(); err != nil { + return fmt.Errorf("can't freeze map: %w", err) + } + + if err := bpfMapFreeze(m.fd); err != nil { + return fmt.Errorf("can't freeze map: %w", err) + } + return nil +} + +// finalize populates the Map according to the Contents specified +// in spec and freezes the Map if requested by spec. +func (m *Map) finalize(spec *MapSpec) error { + for _, kv := range spec.Contents { + if err := m.Put(kv.Key, kv.Value); err != nil { + return fmt.Errorf("putting value: key %v: %w", kv.Key, err) + } + } + + if spec.Freeze { + if err := m.Freeze(); err != nil { + return fmt.Errorf("freezing map: %w", err) + } + } + + return nil +} + +func (m *Map) marshalKey(data interface{}) (internal.Pointer, error) { + if data == nil { + if m.keySize == 0 { + // Queues have a key length of zero, so passing nil here is valid. + return internal.NewPointer(nil), nil + } + return internal.Pointer{}, errors.New("can't use nil as key of map") + } + + return marshalPtr(data, int(m.keySize)) +} + +func (m *Map) unmarshalKey(data interface{}, buf []byte) error { + if buf == nil { + // This is from a makeBuffer call, nothing do do here. + return nil + } + + return unmarshalBytes(data, buf) +} + +func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) { + if m.typ.hasPerCPUValue() { + return marshalPerCPUValue(data, int(m.valueSize)) + } + + var ( + buf []byte + err error + ) + + switch value := data.(type) { + case *Map: + if !m.typ.canStoreMap() { + return internal.Pointer{}, fmt.Errorf("can't store map in %s", m.typ) + } + buf, err = marshalMap(value, int(m.valueSize)) + + case *Program: + if !m.typ.canStoreProgram() { + return internal.Pointer{}, fmt.Errorf("can't store program in %s", m.typ) + } + buf, err = marshalProgram(value, int(m.valueSize)) + + default: + return marshalPtr(data, int(m.valueSize)) + } + + if err != nil { + return internal.Pointer{}, err + } + + return internal.NewSlicePointer(buf), nil +} + +func (m *Map) unmarshalValue(value interface{}, buf []byte) error { + if buf == nil { + // This is from a makeBuffer call, nothing do do here. + return nil + } + + if m.typ.hasPerCPUValue() { + return unmarshalPerCPUValue(value, int(m.valueSize), buf) + } + + switch value := value.(type) { + case **Map: + if !m.typ.canStoreMap() { + return fmt.Errorf("can't read a map from %s", m.typ) + } + + other, err := unmarshalMap(buf) + if err != nil { + return err + } + + // The caller might close the map externally, so ignore errors. + _ = (*value).Close() + + *value = other + return nil + + case *Map: + if !m.typ.canStoreMap() { + return fmt.Errorf("can't read a map from %s", m.typ) + } + return errors.New("require pointer to *Map") + + case **Program: + if !m.typ.canStoreProgram() { + return fmt.Errorf("can't read a program from %s", m.typ) + } + + other, err := unmarshalProgram(buf) + if err != nil { + return err + } + + // The caller might close the program externally, so ignore errors. + _ = (*value).Close() + + *value = other + return nil + + case *Program: + if !m.typ.canStoreProgram() { + return fmt.Errorf("can't read a program from %s", m.typ) + } + return errors.New("require pointer to *Program") + } + + return unmarshalBytes(value, buf) +} + +// LoadPinnedMap loads a Map from a BPF file. +func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { + fd, err := internal.BPFObjGet(fileName, opts.Marshal()) + if err != nil { + return nil, err + } + + m, err := newMapFromFD(fd) + if err == nil { + m.pinnedPath = fileName + } + + return m, err +} + +// unmarshalMap creates a map from a map ID encoded in host endianness. +func unmarshalMap(buf []byte) (*Map, error) { + if len(buf) != 4 { + return nil, errors.New("map id requires 4 byte value") + } + + id := internal.NativeEndian.Uint32(buf) + return NewMapFromID(MapID(id)) +} + +// marshalMap marshals the fd of a map into a buffer in host endianness. +func marshalMap(m *Map, length int) ([]byte, error) { + if length != 4 { + return nil, fmt.Errorf("can't marshal map to %d bytes", length) + } + + fd, err := m.fd.Value() + if err != nil { + return nil, err + } + + buf := make([]byte, 4) + internal.NativeEndian.PutUint32(buf, fd) + return buf, nil +} + +func patchValue(value []byte, typ btf.Type, replacements map[string]interface{}) error { + replaced := make(map[string]bool) + replace := func(name string, offset, size int, replacement interface{}) error { + if offset+size > len(value) { + return fmt.Errorf("%s: offset %d(+%d) is out of bounds", name, offset, size) + } + + buf, err := marshalBytes(replacement, size) + if err != nil { + return fmt.Errorf("marshal %s: %w", name, err) + } + + copy(value[offset:offset+size], buf) + replaced[name] = true + return nil + } + + switch parent := typ.(type) { + case *btf.Datasec: + for _, secinfo := range parent.Vars { + name := string(secinfo.Type.(*btf.Var).Name) + replacement, ok := replacements[name] + if !ok { + continue + } + + err := replace(name, int(secinfo.Offset), int(secinfo.Size), replacement) + if err != nil { + return err + } + } + + default: + return fmt.Errorf("patching %T is not supported", typ) + } + + if len(replaced) == len(replacements) { + return nil + } + + var missing []string + for name := range replacements { + if !replaced[name] { + missing = append(missing, name) + } + } + + if len(missing) == 1 { + return fmt.Errorf("unknown field: %s", missing[0]) + } + + return fmt.Errorf("unknown fields: %s", strings.Join(missing, ",")) +} + +// MapIterator iterates a Map. +// +// See Map.Iterate. +type MapIterator struct { + target *Map + prevKey interface{} + prevBytes []byte + count, maxEntries uint32 + done bool + err error +} + +func newMapIterator(target *Map) *MapIterator { + return &MapIterator{ + target: target, + maxEntries: target.maxEntries, + prevBytes: make([]byte, target.keySize), + } +} + +// Next decodes the next key and value. +// +// Iterating a hash map from which keys are being deleted is not +// safe. You may see the same key multiple times. Iteration may +// also abort with an error, see IsIterationAborted. +// +// Returns false if there are no more entries. You must check +// the result of Err afterwards. +// +// See Map.Get for further caveats around valueOut. +func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool { + if mi.err != nil || mi.done { + return false + } + + // For array-like maps NextKeyBytes returns nil only on after maxEntries + // iterations. + for mi.count <= mi.maxEntries { + var nextBytes []byte + nextBytes, mi.err = mi.target.NextKeyBytes(mi.prevKey) + if mi.err != nil { + return false + } + + if nextBytes == nil { + mi.done = true + return false + } + + // The user can get access to nextBytes since unmarshalBytes + // does not copy when unmarshaling into a []byte. + // Make a copy to prevent accidental corruption of + // iterator state. + copy(mi.prevBytes, nextBytes) + mi.prevKey = mi.prevBytes + + mi.count++ + mi.err = mi.target.Lookup(nextBytes, valueOut) + if errors.Is(mi.err, ErrKeyNotExist) { + // Even though the key should be valid, we couldn't look up + // its value. If we're iterating a hash map this is probably + // because a concurrent delete removed the value before we + // could get it. This means that the next call to NextKeyBytes + // is very likely to restart iteration. + // If we're iterating one of the fd maps like + // ProgramArray it means that a given slot doesn't have + // a valid fd associated. It's OK to continue to the next slot. + continue + } + if mi.err != nil { + return false + } + + mi.err = mi.target.unmarshalKey(keyOut, nextBytes) + return mi.err == nil + } + + mi.err = fmt.Errorf("%w", ErrIterationAborted) + return false +} + +// Err returns any encountered error. +// +// The method must be called after Next returns nil. +// +// Returns ErrIterationAborted if it wasn't possible to do a full iteration. +func (mi *MapIterator) Err() error { + return mi.err +} + +// MapGetNextID returns the ID of the next eBPF map. +// +// Returns ErrNotExist, if there is no next eBPF map. +func MapGetNextID(startID MapID) (MapID, error) { + id, err := objGetNextID(internal.BPF_MAP_GET_NEXT_ID, uint32(startID)) + return MapID(id), err +} + +// NewMapFromID returns the map for a given id. +// +// Returns ErrNotExist, if there is no eBPF map with the given id. +func NewMapFromID(id MapID) (*Map, error) { + fd, err := internal.BPFObjGetFDByID(internal.BPF_MAP_GET_FD_BY_ID, uint32(id)) + if err != nil { + return nil, err + } + + return newMapFromFD(fd) +} + +// ID returns the systemwide unique ID of the map. +// +// Deprecated: use MapInfo.ID() instead. +func (m *Map) ID() (MapID, error) { + info, err := bpfGetMapInfoByFD(m.fd) + if err != nil { + return MapID(0), err + } + return MapID(info.id), nil +} diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go new file mode 100644 index 0000000000..e461d673d7 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/marshalers.go @@ -0,0 +1,253 @@ +package ebpf + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "reflect" + "runtime" + "sync" + "unsafe" + + "github.com/cilium/ebpf/internal" +) + +// marshalPtr converts an arbitrary value into a pointer suitable +// to be passed to the kernel. +// +// As an optimization, it returns the original value if it is an +// unsafe.Pointer. +func marshalPtr(data interface{}, length int) (internal.Pointer, error) { + if ptr, ok := data.(unsafe.Pointer); ok { + return internal.NewPointer(ptr), nil + } + + buf, err := marshalBytes(data, length) + if err != nil { + return internal.Pointer{}, err + } + + return internal.NewSlicePointer(buf), nil +} + +// marshalBytes converts an arbitrary value into a byte buffer. +// +// Prefer using Map.marshalKey and Map.marshalValue if possible, since +// those have special cases that allow more types to be encoded. +// +// Returns an error if the given value isn't representable in exactly +// length bytes. +func marshalBytes(data interface{}, length int) (buf []byte, err error) { + if data == nil { + return nil, errors.New("can't marshal a nil value") + } + + switch value := data.(type) { + case encoding.BinaryMarshaler: + buf, err = value.MarshalBinary() + case string: + buf = []byte(value) + case []byte: + buf = value + case unsafe.Pointer: + err = errors.New("can't marshal from unsafe.Pointer") + case Map, *Map, Program, *Program: + err = fmt.Errorf("can't marshal %T", value) + default: + var wr bytes.Buffer + err = binary.Write(&wr, internal.NativeEndian, value) + if err != nil { + err = fmt.Errorf("encoding %T: %v", value, err) + } + buf = wr.Bytes() + } + if err != nil { + return nil, err + } + + if len(buf) != length { + return nil, fmt.Errorf("%T doesn't marshal to %d bytes", data, length) + } + return buf, nil +} + +func makeBuffer(dst interface{}, length int) (internal.Pointer, []byte) { + if ptr, ok := dst.(unsafe.Pointer); ok { + return internal.NewPointer(ptr), nil + } + + buf := make([]byte, length) + return internal.NewSlicePointer(buf), buf +} + +var bytesReaderPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Reader) + }, +} + +// unmarshalBytes converts a byte buffer into an arbitrary value. +// +// Prefer using Map.unmarshalKey and Map.unmarshalValue if possible, since +// those have special cases that allow more types to be encoded. +// +// The common int32 and int64 types are directly handled to avoid +// unnecessary heap allocations as happening in the default case. +func unmarshalBytes(data interface{}, buf []byte) error { + switch value := data.(type) { + case unsafe.Pointer: + var dst []byte + // Use unsafe.Slice when we drop support for pre1.17 (https://github.com/golang/go/issues/19367) + // We could opt for removing unsafe.Pointer support in the lib as well + sh := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) + sh.Data = uintptr(value) + sh.Len = len(buf) + sh.Cap = len(buf) + + copy(dst, buf) + runtime.KeepAlive(value) + return nil + case Map, *Map, Program, *Program: + return fmt.Errorf("can't unmarshal into %T", value) + case encoding.BinaryUnmarshaler: + return value.UnmarshalBinary(buf) + case *string: + *value = string(buf) + return nil + case *[]byte: + *value = buf + return nil + case *int32: + if len(buf) < 4 { + return errors.New("int32 requires 4 bytes") + } + *value = int32(internal.NativeEndian.Uint32(buf)) + return nil + case *uint32: + if len(buf) < 4 { + return errors.New("uint32 requires 4 bytes") + } + *value = internal.NativeEndian.Uint32(buf) + return nil + case *int64: + if len(buf) < 8 { + return errors.New("int64 requires 8 bytes") + } + *value = int64(internal.NativeEndian.Uint64(buf)) + return nil + case *uint64: + if len(buf) < 8 { + return errors.New("uint64 requires 8 bytes") + } + *value = internal.NativeEndian.Uint64(buf) + return nil + case string: + return errors.New("require pointer to string") + case []byte: + return errors.New("require pointer to []byte") + default: + rd := bytesReaderPool.Get().(*bytes.Reader) + rd.Reset(buf) + defer bytesReaderPool.Put(rd) + if err := binary.Read(rd, internal.NativeEndian, value); err != nil { + return fmt.Errorf("decoding %T: %v", value, err) + } + return nil + } +} + +// marshalPerCPUValue encodes a slice containing one value per +// possible CPU into a buffer of bytes. +// +// Values are initialized to zero if the slice has less elements than CPUs. +// +// slice must have a type like []elementType. +func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, error) { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return internal.Pointer{}, errors.New("per-CPU value requires slice") + } + + possibleCPUs, err := internal.PossibleCPUs() + if err != nil { + return internal.Pointer{}, err + } + + sliceValue := reflect.ValueOf(slice) + sliceLen := sliceValue.Len() + if sliceLen > possibleCPUs { + return internal.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs") + } + + alignedElemLength := internal.Align(elemLength, 8) + buf := make([]byte, alignedElemLength*possibleCPUs) + + for i := 0; i < sliceLen; i++ { + elem := sliceValue.Index(i).Interface() + elemBytes, err := marshalBytes(elem, elemLength) + if err != nil { + return internal.Pointer{}, err + } + + offset := i * alignedElemLength + copy(buf[offset:offset+elemLength], elemBytes) + } + + return internal.NewSlicePointer(buf), nil +} + +// unmarshalPerCPUValue decodes a buffer into a slice containing one value per +// possible CPU. +// +// valueOut must have a type like *[]elementType +func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) error { + slicePtrType := reflect.TypeOf(slicePtr) + if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice { + return fmt.Errorf("per-cpu value requires pointer to slice") + } + + possibleCPUs, err := internal.PossibleCPUs() + if err != nil { + return err + } + + sliceType := slicePtrType.Elem() + slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs) + + sliceElemType := sliceType.Elem() + sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr + if sliceElemIsPointer { + sliceElemType = sliceElemType.Elem() + } + + step := len(buf) / possibleCPUs + if step < elemLength { + return fmt.Errorf("per-cpu element length is larger than available data") + } + for i := 0; i < possibleCPUs; i++ { + var elem interface{} + if sliceElemIsPointer { + newElem := reflect.New(sliceElemType) + slice.Index(i).Set(newElem) + elem = newElem.Interface() + } else { + elem = slice.Index(i).Addr().Interface() + } + + // Make a copy, since unmarshal can hold on to itemBytes + elemBytes := make([]byte, elemLength) + copy(elemBytes, buf[:elemLength]) + + err := unmarshalBytes(elem, elemBytes) + if err != nil { + return fmt.Errorf("cpu %d: %w", i, err) + } + + buf = buf[step:] + } + + reflect.ValueOf(slicePtr).Elem().Set(slice) + return nil +} diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go new file mode 100644 index 0000000000..3549a3fe3f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/prog.go @@ -0,0 +1,761 @@ +package ebpf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "path/filepath" + "strings" + "time" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/unix" +) + +// ErrNotSupported is returned whenever the kernel doesn't support a feature. +var ErrNotSupported = internal.ErrNotSupported + +var errUnsatisfiedReference = errors.New("unsatisfied reference") + +// ProgramID represents the unique ID of an eBPF program. +type ProgramID uint32 + +const ( + // Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN. + // This is currently the maximum of spare space allocated for SKB + // and XDP programs, and equal to XDP_PACKET_HEADROOM + NET_IP_ALIGN. + outputPad = 256 + 2 +) + +// DefaultVerifierLogSize is the default number of bytes allocated for the +// verifier log. +const DefaultVerifierLogSize = 64 * 1024 + +// ProgramOptions control loading a program into the kernel. +type ProgramOptions struct { + // Controls the detail emitted by the kernel verifier. Set to non-zero + // to enable logging. + LogLevel uint32 + // Controls the output buffer size for the verifier. Defaults to + // DefaultVerifierLogSize. + LogSize int + // An ELF containing the target BTF for this program. It is used both to + // find the correct function to trace and to apply CO-RE relocations. + // This is useful in environments where the kernel BTF is not available + // (containers) or where it is in a non-standard location. Defaults to + // use the kernel BTF from a well-known location. + TargetBTF io.ReaderAt +} + +// ProgramSpec defines a Program. +type ProgramSpec struct { + // Name is passed to the kernel as a debug aid. Must only contain + // alpha numeric and '_' characters. + Name string + + // Type determines at which hook in the kernel a program will run. + Type ProgramType + AttachType AttachType + // Name of a kernel data structure or function to attach to. Its + // interpretation depends on Type and AttachType. + AttachTo string + // The program to attach to. Must be provided manually. + AttachTarget *Program + Instructions asm.Instructions + + // Flags is passed to the kernel and specifies additional program + // load attributes. + Flags uint32 + + // License of the program. Some helpers are only available if + // the license is deemed compatible with the GPL. + // + // See https://www.kernel.org/doc/html/latest/process/license-rules.html#id1 + License string + + // Version used by Kprobe programs. + // + // Deprecated on kernels 5.0 and later. Leave empty to let the library + // detect this value automatically. + KernelVersion uint32 + + // The BTF associated with this program. Changing Instructions + // will most likely invalidate the contained data, and may + // result in errors when attempting to load it into the kernel. + BTF *btf.Program + + // The byte order this program was compiled for, may be nil. + ByteOrder binary.ByteOrder +} + +// Copy returns a copy of the spec. +func (ps *ProgramSpec) Copy() *ProgramSpec { + if ps == nil { + return nil + } + + cpy := *ps + cpy.Instructions = make(asm.Instructions, len(ps.Instructions)) + copy(cpy.Instructions, ps.Instructions) + return &cpy +} + +// Tag calculates the kernel tag for a series of instructions. +// +// Use asm.Instructions.Tag if you need to calculate for non-native endianness. +func (ps *ProgramSpec) Tag() (string, error) { + return ps.Instructions.Tag(internal.NativeEndian) +} + +// Program represents BPF program loaded into the kernel. +// +// It is not safe to close a Program which is used by other goroutines. +type Program struct { + // Contains the output of the kernel verifier if enabled, + // otherwise it is empty. + VerifierLog string + + fd *internal.FD + name string + pinnedPath string + typ ProgramType +} + +// NewProgram creates a new Program. +// +// Loading a program for the first time will perform +// feature detection by loading small, temporary programs. +func NewProgram(spec *ProgramSpec) (*Program, error) { + return NewProgramWithOptions(spec, ProgramOptions{}) +} + +// NewProgramWithOptions creates a new Program. +// +// Loading a program for the first time will perform +// feature detection by loading small, temporary programs. +func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { + handles := newHandleCache() + defer handles.close() + + prog, err := newProgramWithOptions(spec, opts, handles) + if errors.Is(err, errUnsatisfiedReference) { + return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err) + } + return prog, err +} + +func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *handleCache) (*Program, error) { + if len(spec.Instructions) == 0 { + return nil, errors.New("instructions cannot be empty") + } + + if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian { + return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian) + } + + // Kernels before 5.0 (6c4fc209fcf9 "bpf: remove useless version check for prog load") + // require the version field to be set to the value of the KERNEL_VERSION + // macro for kprobe-type programs. + // Overwrite Kprobe program version if set to zero or the magic version constant. + kv := spec.KernelVersion + if spec.Type == Kprobe && (kv == 0 || kv == internal.MagicKernelVersion) { + v, err := internal.KernelVersion() + if err != nil { + return nil, fmt.Errorf("detecting kernel version: %w", err) + } + kv = v.Kernel() + } + + attr := &internal.BPFProgLoadAttr{ + ProgType: uint32(spec.Type), + ProgFlags: spec.Flags, + ExpectedAttachType: uint32(spec.AttachType), + License: internal.NewStringPointer(spec.License), + KernelVersion: kv, + } + + if haveObjName() == nil { + attr.ProgName = internal.NewBPFObjName(spec.Name) + } + + var err error + var targetBTF *btf.Spec + if opts.TargetBTF != nil { + targetBTF, err = handles.btfSpec(opts.TargetBTF) + if err != nil { + return nil, fmt.Errorf("load target BTF: %w", err) + } + } + + var btfDisabled bool + var core btf.COREFixups + if spec.BTF != nil { + core, err = spec.BTF.Fixups(targetBTF) + if err != nil { + return nil, fmt.Errorf("CO-RE relocations: %w", err) + } + + handle, err := handles.btfHandle(spec.BTF.Spec()) + btfDisabled = errors.Is(err, btf.ErrNotSupported) + if err != nil && !btfDisabled { + return nil, fmt.Errorf("load BTF: %w", err) + } + + if handle != nil { + attr.ProgBTFFd = uint32(handle.FD()) + + recSize, bytes, err := spec.BTF.LineInfos() + if err != nil { + return nil, fmt.Errorf("get BTF line infos: %w", err) + } + attr.LineInfoRecSize = recSize + attr.LineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) + attr.LineInfo = internal.NewSlicePointer(bytes) + + recSize, bytes, err = spec.BTF.FuncInfos() + if err != nil { + return nil, fmt.Errorf("get BTF function infos: %w", err) + } + attr.FuncInfoRecSize = recSize + attr.FuncInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) + attr.FuncInfo = internal.NewSlicePointer(bytes) + } + } + + insns, err := core.Apply(spec.Instructions) + if err != nil { + return nil, fmt.Errorf("CO-RE fixup: %w", err) + } + + if err := fixupJumpsAndCalls(insns); err != nil { + return nil, err + } + + buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize)) + err = insns.Marshal(buf, internal.NativeEndian) + if err != nil { + return nil, err + } + + bytecode := buf.Bytes() + attr.Instructions = internal.NewSlicePointer(bytecode) + attr.InsCount = uint32(len(bytecode) / asm.InstructionSize) + + if spec.AttachTo != "" { + if spec.AttachTarget != nil { + info, err := spec.AttachTarget.Info() + if err != nil { + return nil, fmt.Errorf("load target BTF: %w", err) + } + + btfID, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("load target BTF: no BTF info available") + } + btfHandle, err := btf.NewHandleFromID(btfID) + if err != nil { + return nil, fmt.Errorf("load target BTF: %w", err) + } + defer btfHandle.Close() + + targetBTF = btfHandle.Spec() + if err != nil { + return nil, fmt.Errorf("load target BTF: %w", err) + } + } + + target, err := resolveBTFType(targetBTF, spec.AttachTo, spec.Type, spec.AttachType) + if err != nil { + return nil, err + } + if target != nil { + attr.AttachBTFID = uint32(target.ID()) + } + if spec.AttachTarget != nil { + attr.AttachProgFd = uint32(spec.AttachTarget.FD()) + } + } + + logSize := DefaultVerifierLogSize + if opts.LogSize > 0 { + logSize = opts.LogSize + } + + var logBuf []byte + if opts.LogLevel > 0 { + logBuf = make([]byte, logSize) + attr.LogLevel = opts.LogLevel + attr.LogSize = uint32(len(logBuf)) + attr.LogBuf = internal.NewSlicePointer(logBuf) + } + + fd, err := internal.BPFProgLoad(attr) + if err == nil { + return &Program{internal.CString(logBuf), fd, spec.Name, "", spec.Type}, nil + } + + logErr := err + if opts.LogLevel == 0 && opts.LogSize >= 0 { + // Re-run with the verifier enabled to get better error messages. + logBuf = make([]byte, logSize) + attr.LogLevel = 1 + attr.LogSize = uint32(len(logBuf)) + attr.LogBuf = internal.NewSlicePointer(logBuf) + + fd, logErr = internal.BPFProgLoad(attr) + if logErr == nil { + fd.Close() + } + } + + if errors.Is(logErr, unix.EPERM) && logBuf[0] == 0 { + // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can + // check that the log is empty to reduce false positives. + return nil, fmt.Errorf("load program: %w (MEMLOCK bay be too low, consider rlimit.RemoveMemlock)", logErr) + } + + err = internal.ErrorWithLog(err, logBuf, logErr) + if btfDisabled { + return nil, fmt.Errorf("load program without BTF: %w", err) + } + return nil, fmt.Errorf("load program: %w", err) +} + +// NewProgramFromFD creates a program from a raw fd. +// +// You should not use fd after calling this function. +// +// Requires at least Linux 4.10. +func NewProgramFromFD(fd int) (*Program, error) { + if fd < 0 { + return nil, errors.New("invalid fd") + } + + return newProgramFromFD(internal.NewFD(uint32(fd))) +} + +// NewProgramFromID returns the program for a given id. +// +// Returns ErrNotExist, if there is no eBPF program with the given id. +func NewProgramFromID(id ProgramID) (*Program, error) { + fd, err := internal.BPFObjGetFDByID(internal.BPF_PROG_GET_FD_BY_ID, uint32(id)) + if err != nil { + return nil, fmt.Errorf("get program by id: %w", err) + } + + return newProgramFromFD(fd) +} + +func newProgramFromFD(fd *internal.FD) (*Program, error) { + info, err := newProgramInfoFromFd(fd) + if err != nil { + fd.Close() + return nil, fmt.Errorf("discover program type: %w", err) + } + + return &Program{"", fd, "", "", info.Type}, nil +} + +func (p *Program) String() string { + if p.name != "" { + return fmt.Sprintf("%s(%s)#%v", p.typ, p.name, p.fd) + } + return fmt.Sprintf("%s(%v)", p.typ, p.fd) +} + +// Type returns the underlying type of the program. +func (p *Program) Type() ProgramType { + return p.typ +} + +// Info returns metadata about the program. +// +// Requires at least 4.10. +func (p *Program) Info() (*ProgramInfo, error) { + return newProgramInfoFromFd(p.fd) +} + +// FD gets the file descriptor of the Program. +// +// It is invalid to call this function after Close has been called. +func (p *Program) FD() int { + fd, err := p.fd.Value() + if err != nil { + // Best effort: -1 is the number most likely to be an + // invalid file descriptor. + return -1 + } + + return int(fd) +} + +// Clone creates a duplicate of the Program. +// +// Closing the duplicate does not affect the original, and vice versa. +// +// Cloning a nil Program returns nil. +func (p *Program) Clone() (*Program, error) { + if p == nil { + return nil, nil + } + + dup, err := p.fd.Dup() + if err != nil { + return nil, fmt.Errorf("can't clone program: %w", err) + } + + return &Program{p.VerifierLog, dup, p.name, "", p.typ}, nil +} + +// Pin persists the Program on the BPF virtual file system past the lifetime of +// the process that created it +// +// Calling Pin on a previously pinned program will overwrite the path, except when +// the new path already exists. Re-pinning across filesystems is not supported. +// +// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs +func (p *Program) Pin(fileName string) error { + if err := internal.Pin(p.pinnedPath, fileName, p.fd); err != nil { + return err + } + p.pinnedPath = fileName + return nil +} + +// Unpin removes the persisted state for the Program from the BPF virtual filesystem. +// +// Failed calls to Unpin will not alter the state returned by IsPinned. +// +// Unpinning an unpinned Program returns nil. +func (p *Program) Unpin() error { + if err := internal.Unpin(p.pinnedPath); err != nil { + return err + } + p.pinnedPath = "" + return nil +} + +// IsPinned returns true if the Program has a non-empty pinned path. +func (p *Program) IsPinned() bool { + return p.pinnedPath != "" +} + +// Close unloads the program from the kernel. +func (p *Program) Close() error { + if p == nil { + return nil + } + + return p.fd.Close() +} + +// Test runs the Program in the kernel with the given input and returns the +// value returned by the eBPF program. outLen may be zero. +// +// Note: the kernel expects at least 14 bytes input for an ethernet header for +// XDP and SKB programs. +// +// This function requires at least Linux 4.12. +func (p *Program) Test(in []byte) (uint32, []byte, error) { + ret, out, _, err := p.testRun(in, 1, nil) + if err != nil { + return ret, nil, fmt.Errorf("can't test program: %w", err) + } + return ret, out, nil +} + +// Benchmark runs the Program with the given input for a number of times +// and returns the time taken per iteration. +// +// Returns the result of the last execution of the program and the time per +// run or an error. reset is called whenever the benchmark syscall is +// interrupted, and should be set to testing.B.ResetTimer or similar. +// +// Note: profiling a call to this function will skew it's results, see +// https://github.com/cilium/ebpf/issues/24 +// +// This function requires at least Linux 4.12. +func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) { + ret, _, total, err := p.testRun(in, repeat, reset) + if err != nil { + return ret, total, fmt.Errorf("can't benchmark program: %w", err) + } + return ret, total, nil +} + +var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() error { + prog, err := NewProgram(&ProgramSpec{ + Type: SocketFilter, + Instructions: asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + License: "MIT", + }) + if err != nil { + // This may be because we lack sufficient permissions, etc. + return err + } + defer prog.Close() + + // Programs require at least 14 bytes input + in := make([]byte, 14) + attr := bpfProgTestRunAttr{ + fd: uint32(prog.FD()), + dataSizeIn: uint32(len(in)), + dataIn: internal.NewSlicePointer(in), + } + + err = bpfProgTestRun(&attr) + if errors.Is(err, unix.EINVAL) { + // Check for EINVAL specifically, rather than err != nil since we + // otherwise misdetect due to insufficient permissions. + return internal.ErrNotSupported + } + if errors.Is(err, unix.EINTR) { + // We know that PROG_TEST_RUN is supported if we get EINTR. + return nil + } + return err +}) + +func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte, time.Duration, error) { + if uint(repeat) > math.MaxUint32 { + return 0, nil, 0, fmt.Errorf("repeat is too high") + } + + if len(in) == 0 { + return 0, nil, 0, fmt.Errorf("missing input") + } + + if uint(len(in)) > math.MaxUint32 { + return 0, nil, 0, fmt.Errorf("input is too long") + } + + if err := haveProgTestRun(); err != nil { + return 0, nil, 0, err + } + + // Older kernels ignore the dataSizeOut argument when copying to user space. + // Combined with things like bpf_xdp_adjust_head() we don't really know what the final + // size will be. Hence we allocate an output buffer which we hope will always be large + // enough, and panic if the kernel wrote past the end of the allocation. + // See https://patchwork.ozlabs.org/cover/1006822/ + out := make([]byte, len(in)+outputPad) + + fd, err := p.fd.Value() + if err != nil { + return 0, nil, 0, err + } + + attr := bpfProgTestRunAttr{ + fd: fd, + dataSizeIn: uint32(len(in)), + dataSizeOut: uint32(len(out)), + dataIn: internal.NewSlicePointer(in), + dataOut: internal.NewSlicePointer(out), + repeat: uint32(repeat), + } + + for { + err = bpfProgTestRun(&attr) + if err == nil { + break + } + + if errors.Is(err, unix.EINTR) { + if reset != nil { + reset() + } + continue + } + + return 0, nil, 0, fmt.Errorf("can't run test: %w", err) + } + + if int(attr.dataSizeOut) > cap(out) { + // Houston, we have a problem. The program created more data than we allocated, + // and the kernel wrote past the end of our buffer. + panic("kernel wrote past end of output buffer") + } + out = out[:int(attr.dataSizeOut)] + + total := time.Duration(attr.duration) * time.Nanosecond + return attr.retval, out, total, nil +} + +func unmarshalProgram(buf []byte) (*Program, error) { + if len(buf) != 4 { + return nil, errors.New("program id requires 4 byte value") + } + + // Looking up an entry in a nested map or prog array returns an id, + // not an fd. + id := internal.NativeEndian.Uint32(buf) + return NewProgramFromID(ProgramID(id)) +} + +func marshalProgram(p *Program, length int) ([]byte, error) { + if length != 4 { + return nil, fmt.Errorf("can't marshal program to %d bytes", length) + } + + value, err := p.fd.Value() + if err != nil { + return nil, err + } + + buf := make([]byte, 4) + internal.NativeEndian.PutUint32(buf, value) + return buf, nil +} + +// Attach a Program. +// +// Deprecated: use link.RawAttachProgram instead. +func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error { + if fd < 0 { + return errors.New("invalid fd") + } + + pfd, err := p.fd.Value() + if err != nil { + return err + } + + attr := internal.BPFProgAttachAttr{ + TargetFd: uint32(fd), + AttachBpfFd: pfd, + AttachType: uint32(typ), + AttachFlags: uint32(flags), + } + + return internal.BPFProgAttach(&attr) +} + +// Detach a Program. +// +// Deprecated: use link.RawDetachProgram instead. +func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error { + if fd < 0 { + return errors.New("invalid fd") + } + + if flags != 0 { + return errors.New("flags must be zero") + } + + pfd, err := p.fd.Value() + if err != nil { + return err + } + + attr := internal.BPFProgDetachAttr{ + TargetFd: uint32(fd), + AttachBpfFd: pfd, + AttachType: uint32(typ), + } + + return internal.BPFProgDetach(&attr) +} + +// LoadPinnedProgram loads a Program from a BPF file. +// +// Requires at least Linux 4.11. +func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) { + fd, err := internal.BPFObjGet(fileName, opts.Marshal()) + if err != nil { + return nil, err + } + + info, err := newProgramInfoFromFd(fd) + if err != nil { + _ = fd.Close() + return nil, fmt.Errorf("info for %s: %w", fileName, err) + } + + return &Program{"", fd, filepath.Base(fileName), fileName, info.Type}, nil +} + +// SanitizeName replaces all invalid characters in name with replacement. +// Passing a negative value for replacement will delete characters instead +// of replacing them. Use this to automatically generate valid names for maps +// and programs at runtime. +// +// The set of allowed characters depends on the running kernel version. +// Dots are only allowed as of kernel 5.2. +func SanitizeName(name string, replacement rune) string { + return strings.Map(func(char rune) rune { + if invalidBPFObjNameChar(char) { + return replacement + } + return char + }, name) +} + +// ProgramGetNextID returns the ID of the next eBPF program. +// +// Returns ErrNotExist, if there is no next eBPF program. +func ProgramGetNextID(startID ProgramID) (ProgramID, error) { + id, err := objGetNextID(internal.BPF_PROG_GET_NEXT_ID, uint32(startID)) + return ProgramID(id), err +} + +// ID returns the systemwide unique ID of the program. +// +// Deprecated: use ProgramInfo.ID() instead. +func (p *Program) ID() (ProgramID, error) { + info, err := bpfGetProgInfoByFD(p.fd, nil) + if err != nil { + return ProgramID(0), err + } + return ProgramID(info.id), nil +} + +func resolveBTFType(spec *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.Type, error) { + type match struct { + p ProgramType + a AttachType + } + + var typeName, featureName string + switch (match{progType, attachType}) { + case match{LSM, AttachLSMMac}: + typeName = "bpf_lsm_" + name + featureName = name + " LSM hook" + case match{Tracing, AttachTraceIter}: + typeName = "bpf_iter_" + name + featureName = name + " iterator" + case match{Extension, AttachNone}: + typeName = name + featureName = fmt.Sprintf("freplace %s", name) + default: + return nil, nil + } + + if spec == nil { + var err error + spec, err = btf.LoadKernelSpec() + if err != nil { + return nil, fmt.Errorf("load kernel spec: %w", err) + } + } + + var target *btf.Func + err := spec.FindType(typeName, &target) + if errors.Is(err, btf.ErrNotFound) { + return nil, &internal.UnsupportedFeatureError{ + Name: featureName, + } + } + if err != nil { + return nil, fmt.Errorf("resolve BTF for %s: %w", featureName, err) + } + + return target, nil +} diff --git a/vendor/github.com/cilium/ebpf/run-tests.sh b/vendor/github.com/cilium/ebpf/run-tests.sh new file mode 100644 index 0000000000..a079edc7e1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/run-tests.sh @@ -0,0 +1,125 @@ +#!/bin/bash +# Test the current package under a different kernel. +# Requires virtme and qemu to be installed. +# Examples: +# Run all tests on a 5.4 kernel +# $ ./run-tests.sh 5.4 +# Run a subset of tests: +# $ ./run-tests.sh 5.4 ./link + +set -euo pipefail + +script="$(realpath "$0")" +readonly script + +# This script is a bit like a Matryoshka doll since it keeps re-executing itself +# in various different contexts: +# +# 1. invoked by the user like run-tests.sh 5.4 +# 2. invoked by go test like run-tests.sh --exec-vm +# 3. invoked by init in the vm like run-tests.sh --exec-test +# +# This allows us to use all available CPU on the host machine to compile our +# code, and then only use the VM to execute the test. This is because the VM +# is usually slower at compiling than the host. +if [[ "${1:-}" = "--exec-vm" ]]; then + shift + + input="$1" + shift + + # Use sudo if /dev/kvm isn't accessible by the current user. + sudo="" + if [[ ! -r /dev/kvm || ! -w /dev/kvm ]]; then + sudo="sudo" + fi + readonly sudo + + testdir="$(dirname "$1")" + output="$(mktemp -d)" + printf -v cmd "%q " "$@" + + if [[ "$(stat -c '%t:%T' -L /proc/$$/fd/0)" == "1:3" ]]; then + # stdin is /dev/null, which doesn't play well with qemu. Use a fifo as a + # blocking substitute. + mkfifo "${output}/fake-stdin" + # Open for reading and writing to avoid blocking. + exec 0<> "${output}/fake-stdin" + rm "${output}/fake-stdin" + fi + + if ! $sudo virtme-run --kimg "${input}/bzImage" --memory 768M --pwd \ + --rwdir="${testdir}=${testdir}" \ + --rodir=/run/input="${input}" \ + --rwdir=/run/output="${output}" \ + --script-sh "PATH=\"$PATH\" \"$script\" --exec-test $cmd" \ + --kopt possible_cpus=2; then # need at least two CPUs for some tests + exit 23 + fi + + if [[ ! -e "${output}/success" ]]; then + exit 42 + fi + + $sudo rm -r "$output" + exit 0 +elif [[ "${1:-}" = "--exec-test" ]]; then + shift + + mount -t bpf bpf /sys/fs/bpf + mount -t tracefs tracefs /sys/kernel/debug/tracing + + if [[ -d "/run/input/bpf" ]]; then + export KERNEL_SELFTESTS="/run/input/bpf" + fi + + dmesg -C + if ! "$@"; then + dmesg + exit 1 # this return code is "swallowed" by qemu + fi + touch "/run/output/success" + exit 0 +fi + +readonly kernel_version="${1:-}" +if [[ -z "${kernel_version}" ]]; then + echo "Expecting kernel version as first argument" + exit 1 +fi +shift + +readonly kernel="linux-${kernel_version}.bz" +readonly selftests="linux-${kernel_version}-selftests-bpf.bz" +readonly input="$(mktemp -d)" +readonly tmp_dir="${TMPDIR:-/tmp}" +readonly branch="${BRANCH:-master}" + +fetch() { + echo Fetching "${1}" + wget -nv -N -P "${tmp_dir}" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}" +} + +fetch "${kernel}" +cp "${tmp_dir}/${kernel}" "${input}/bzImage" + +if fetch "${selftests}"; then + mkdir "${input}/bpf" + tar --strip-components=4 -xjf "${tmp_dir}/${selftests}" -C "${input}/bpf" +else + echo "No selftests found, disabling" +fi + +args=(-short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...) +if (( $# > 0 )); then + args=("$@") +fi + +export GOFLAGS=-mod=readonly +export CGO_ENABLED=0 + +echo Testing on "${kernel_version}" +go test -exec "$script --exec-vm $input" "${args[@]}" +echo "Test successful on ${kernel_version}" + +rm -r "${input}" diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go new file mode 100644 index 0000000000..f8cb5f0e0c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/syscalls.go @@ -0,0 +1,464 @@ +package ebpf + +import ( + "bytes" + "errors" + "fmt" + "os" + "unsafe" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +// ErrNotExist is returned when loading a non-existing map or program. +// +// Deprecated: use os.ErrNotExist instead. +var ErrNotExist = os.ErrNotExist + +// invalidBPFObjNameChar returns true if char may not appear in +// a BPF object name. +func invalidBPFObjNameChar(char rune) bool { + dotAllowed := objNameAllowsDot() == nil + + switch { + case char >= 'A' && char <= 'Z': + return false + case char >= 'a' && char <= 'z': + return false + case char >= '0' && char <= '9': + return false + case dotAllowed && char == '.': + return false + case char == '_': + return false + default: + return true + } +} + +type bpfMapOpAttr struct { + mapFd uint32 + padding uint32 + key internal.Pointer + value internal.Pointer + flags uint64 +} + +type bpfBatchMapOpAttr struct { + inBatch internal.Pointer + outBatch internal.Pointer + keys internal.Pointer + values internal.Pointer + count uint32 + mapFd uint32 + elemFlags uint64 + flags uint64 +} + +type bpfMapInfo struct { + map_type uint32 // since 4.12 1e2709769086 + id uint32 + key_size uint32 + value_size uint32 + max_entries uint32 + map_flags uint32 + name internal.BPFObjName // since 4.15 ad5b177bd73f + ifindex uint32 // since 4.16 52775b33bb50 + btf_vmlinux_value_type_id uint32 // since 5.6 85d33df357b6 + netns_dev uint64 // since 4.16 52775b33bb50 + netns_ino uint64 + btf_id uint32 // since 4.18 78958fca7ead + btf_key_type_id uint32 // since 4.18 9b2cf328b2ec + btf_value_type_id uint32 +} + +type bpfProgInfo struct { + prog_type uint32 + id uint32 + tag [unix.BPF_TAG_SIZE]byte + jited_prog_len uint32 + xlated_prog_len uint32 + jited_prog_insns internal.Pointer + xlated_prog_insns internal.Pointer + load_time uint64 // since 4.15 cb4d2b3f03d8 + created_by_uid uint32 + nr_map_ids uint32 // since 4.15 cb4d2b3f03d8 + map_ids internal.Pointer + name internal.BPFObjName // since 4.15 067cae47771c + ifindex uint32 + gpl_compatible uint32 + netns_dev uint64 + netns_ino uint64 + nr_jited_ksyms uint32 + nr_jited_func_lens uint32 + jited_ksyms internal.Pointer + jited_func_lens internal.Pointer + btf_id uint32 + func_info_rec_size uint32 + func_info internal.Pointer + nr_func_info uint32 + nr_line_info uint32 + line_info internal.Pointer + jited_line_info internal.Pointer + nr_jited_line_info uint32 + line_info_rec_size uint32 + jited_line_info_rec_size uint32 + nr_prog_tags uint32 + prog_tags internal.Pointer + run_time_ns uint64 + run_cnt uint64 +} + +type bpfProgTestRunAttr struct { + fd uint32 + retval uint32 + dataSizeIn uint32 + dataSizeOut uint32 + dataIn internal.Pointer + dataOut internal.Pointer + repeat uint32 + duration uint32 +} + +type bpfMapFreezeAttr struct { + mapFd uint32 +} + +type bpfObjGetNextIDAttr struct { + startID uint32 + nextID uint32 + openFlags uint32 +} + +func bpfProgTestRun(attr *bpfProgTestRunAttr) error { + _, err := internal.BPF(internal.BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error { + _, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ + MapType: uint32(ArrayOfMaps), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + // Invalid file descriptor. + InnerMapFd: ^uint32(0), + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}) + +var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() error { + // This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since + // BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check. + m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ + MapType: uint32(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + Flags: unix.BPF_F_RDONLY_PROG, + }) + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}) + +var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error { + // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps. + m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ + MapType: uint32(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + Flags: unix.BPF_F_MMAPABLE, + }) + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}) + +var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error { + // This checks BPF_F_INNER_MAP, which appeared in 5.10. + m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ + MapType: uint32(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + Flags: unix.BPF_F_INNER_MAP, + }) + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}) + +func bpfMapLookupElem(m *internal.FD, key, valueOut internal.Pointer) error { + fd, err := m.Value() + if err != nil { + return err + } + + attr := bpfMapOpAttr{ + mapFd: fd, + key: key, + value: valueOut, + } + _, err = internal.BPF(internal.BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return wrapMapError(err) +} + +func bpfMapLookupAndDelete(m *internal.FD, key, valueOut internal.Pointer) error { + fd, err := m.Value() + if err != nil { + return err + } + + attr := bpfMapOpAttr{ + mapFd: fd, + key: key, + value: valueOut, + } + _, err = internal.BPF(internal.BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return wrapMapError(err) +} + +func bpfMapUpdateElem(m *internal.FD, key, valueOut internal.Pointer, flags uint64) error { + fd, err := m.Value() + if err != nil { + return err + } + + attr := bpfMapOpAttr{ + mapFd: fd, + key: key, + value: valueOut, + flags: flags, + } + _, err = internal.BPF(internal.BPF_MAP_UPDATE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return wrapMapError(err) +} + +func bpfMapDeleteElem(m *internal.FD, key internal.Pointer) error { + fd, err := m.Value() + if err != nil { + return err + } + + attr := bpfMapOpAttr{ + mapFd: fd, + key: key, + } + _, err = internal.BPF(internal.BPF_MAP_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return wrapMapError(err) +} + +func bpfMapGetNextKey(m *internal.FD, key, nextKeyOut internal.Pointer) error { + fd, err := m.Value() + if err != nil { + return err + } + + attr := bpfMapOpAttr{ + mapFd: fd, + key: key, + value: nextKeyOut, + } + _, err = internal.BPF(internal.BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return wrapMapError(err) +} + +func objGetNextID(cmd internal.BPFCmd, start uint32) (uint32, error) { + attr := bpfObjGetNextIDAttr{ + startID: start, + } + _, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return attr.nextID, err +} + +func bpfMapBatch(cmd internal.BPFCmd, m *internal.FD, inBatch, outBatch, keys, values internal.Pointer, count uint32, opts *BatchOptions) (uint32, error) { + fd, err := m.Value() + if err != nil { + return 0, err + } + + attr := bpfBatchMapOpAttr{ + inBatch: inBatch, + outBatch: outBatch, + keys: keys, + values: values, + count: count, + mapFd: fd, + } + if opts != nil { + attr.elemFlags = opts.ElemFlags + attr.flags = opts.Flags + } + _, err = internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + // always return count even on an error, as things like update might partially be fulfilled. + return attr.count, wrapMapError(err) +} + +func wrapMapError(err error) error { + if err == nil { + return nil + } + + if errors.Is(err, unix.ENOENT) { + return internal.SyscallError(ErrKeyNotExist, unix.ENOENT) + } + + if errors.Is(err, unix.EEXIST) { + return internal.SyscallError(ErrKeyExist, unix.EEXIST) + } + + if errors.Is(err, unix.ENOTSUPP) { + return internal.SyscallError(ErrNotSupported, unix.ENOTSUPP) + } + + if errors.Is(err, unix.E2BIG) { + return fmt.Errorf("key too big for map: %w", err) + } + + return err +} + +func bpfMapFreeze(m *internal.FD) error { + fd, err := m.Value() + if err != nil { + return err + } + + attr := bpfMapFreezeAttr{ + mapFd: fd, + } + _, err = internal.BPF(internal.BPF_MAP_FREEZE, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return err +} + +func bpfGetProgInfoByFD(fd *internal.FD, ids []MapID) (*bpfProgInfo, error) { + var info bpfProgInfo + if len(ids) > 0 { + info.nr_map_ids = uint32(len(ids)) + info.map_ids = internal.NewPointer(unsafe.Pointer(&ids[0])) + } + + if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil { + return nil, fmt.Errorf("can't get program info: %w", err) + } + return &info, nil +} + +func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) { + var info bpfMapInfo + err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) + if err != nil { + return nil, fmt.Errorf("can't get map info: %w", err) + } + return &info, nil +} + +var haveObjName = internal.FeatureTest("object names", "4.15", func() error { + attr := internal.BPFMapCreateAttr{ + MapType: uint32(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapName: internal.NewBPFObjName("feature_test"), + } + + fd, err := internal.BPFMapCreate(&attr) + if err != nil { + return internal.ErrNotSupported + } + + _ = fd.Close() + return nil +}) + +var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() error { + if err := haveObjName(); err != nil { + return err + } + + attr := internal.BPFMapCreateAttr{ + MapType: uint32(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapName: internal.NewBPFObjName(".test"), + } + + fd, err := internal.BPFMapCreate(&attr) + if err != nil { + return internal.ErrNotSupported + } + + _ = fd.Close() + return nil +}) + +var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error { + var maxEntries uint32 = 2 + attr := internal.BPFMapCreateAttr{ + MapType: uint32(Hash), + KeySize: 4, + ValueSize: 4, + MaxEntries: maxEntries, + } + + fd, err := internal.BPFMapCreate(&attr) + if err != nil { + return internal.ErrNotSupported + } + defer fd.Close() + keys := []uint32{1, 2} + values := []uint32{3, 4} + kp, _ := marshalPtr(keys, 8) + vp, _ := marshalPtr(values, 8) + nilPtr := internal.NewPointer(nil) + _, err = bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, fd, nilPtr, nilPtr, kp, vp, maxEntries, nil) + if err != nil { + return internal.ErrNotSupported + } + return nil +}) + +var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", func() error { + insns := asm.Instructions{ + asm.Mov.Reg(asm.R1, asm.R10), + asm.Add.Imm(asm.R1, -8), + asm.Mov.Imm(asm.R2, 8), + asm.Mov.Imm(asm.R3, 0), + asm.FnProbeReadKernel.Call(), + asm.Return(), + } + buf := bytes.NewBuffer(make([]byte, 0, len(insns)*asm.InstructionSize)) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return err + } + bytecode := buf.Bytes() + + fd, err := internal.BPFProgLoad(&internal.BPFProgLoadAttr{ + ProgType: uint32(Kprobe), + License: internal.NewStringPointer("GPL"), + Instructions: internal.NewSlicePointer(bytecode), + InsCount: uint32(len(bytecode) / asm.InstructionSize), + }) + if err != nil { + return internal.ErrNotSupported + } + _ = fd.Close() + return nil +}) diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go new file mode 100644 index 0000000000..84b83f9f98 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/types.go @@ -0,0 +1,278 @@ +package ebpf + +import ( + "github.com/cilium/ebpf/internal/unix" +) + +//go:generate stringer -output types_string.go -type=MapType,ProgramType,PinType + +// MapType indicates the type map structure +// that will be initialized in the kernel. +type MapType uint32 + +// Max returns the latest supported MapType. +func (_ MapType) Max() MapType { + return maxMapType - 1 +} + +// All the various map types that can be created +const ( + UnspecifiedMap MapType = iota + // Hash is a hash map + Hash + // Array is an array map + Array + // ProgramArray - A program array map is a special kind of array map whose map + // values contain only file descriptors referring to other eBPF + // programs. Thus, both the key_size and value_size must be + // exactly four bytes. This map is used in conjunction with the + // TailCall helper. + ProgramArray + // PerfEventArray - A perf event array is used in conjunction with PerfEventRead + // and PerfEventOutput calls, to read the raw bpf_perf_data from the registers. + PerfEventArray + // PerCPUHash - This data structure is useful for people who have high performance + // network needs and can reconcile adds at the end of some cycle, so that + // hashes can be lock free without the use of XAdd, which can be costly. + PerCPUHash + // PerCPUArray - This data structure is useful for people who have high performance + // network needs and can reconcile adds at the end of some cycle, so that + // hashes can be lock free without the use of XAdd, which can be costly. + // Each CPU gets a copy of this hash, the contents of all of which can be reconciled + // later. + PerCPUArray + // StackTrace - This holds whole user and kernel stack traces, it can be retrieved with + // GetStackID + StackTrace + // CGroupArray - This is a very niche structure used to help SKBInCGroup determine + // if an skb is from a socket belonging to a specific cgroup + CGroupArray + // LRUHash - This allows you to create a small hash structure that will purge the + // least recently used items rather than thow an error when you run out of memory + LRUHash + // LRUCPUHash - This is NOT like PerCPUHash, this structure is shared among the CPUs, + // it has more to do with including the CPU id with the LRU calculation so that if a + // particular CPU is using a value over-and-over again, then it will be saved, but if + // a value is being retrieved a lot but sparsely across CPUs it is not as important, basically + // giving weight to CPU locality over overall usage. + LRUCPUHash + // LPMTrie - This is an implementation of Longest-Prefix-Match Trie structure. It is useful, + // for storing things like IP addresses which can be bit masked allowing for keys of differing + // values to refer to the same reference based on their masks. See wikipedia for more details. + LPMTrie + // ArrayOfMaps - Each item in the array is another map. The inner map mustn't be a map of maps + // itself. + ArrayOfMaps + // HashOfMaps - Each item in the hash map is another map. The inner map mustn't be a map of maps + // itself. + HashOfMaps + // DevMap - Specialized map to store references to network devices. + DevMap + // SockMap - Specialized map to store references to sockets. + SockMap + // CPUMap - Specialized map to store references to CPUs. + CPUMap + // XSKMap - Specialized map for XDP programs to store references to open sockets. + XSKMap + // SockHash - Specialized hash to store references to sockets. + SockHash + // CGroupStorage - Special map for CGroups. + CGroupStorage + // ReusePortSockArray - Specialized map to store references to sockets that can be reused. + ReusePortSockArray + // PerCPUCGroupStorage - Special per CPU map for CGroups. + PerCPUCGroupStorage + // Queue - FIFO storage for BPF programs. + Queue + // Stack - LIFO storage for BPF programs. + Stack + // SkStorage - Specialized map for local storage at SK for BPF programs. + SkStorage + // DevMapHash - Hash-based indexing scheme for references to network devices. + DevMapHash + // StructOpsMap - This map holds a kernel struct with its function pointer implemented in a BPF + // program. + StructOpsMap + // RingBuf - Similar to PerfEventArray, but shared across all CPUs. + RingBuf + // InodeStorage - Specialized local storage map for inodes. + InodeStorage + // TaskStorage - Specialized local storage map for task_struct. + TaskStorage + // maxMapType - Bound enum of MapTypes, has to be last in enum. + maxMapType +) + +// Deprecated: StructOpts was a typo, use StructOpsMap instead. +// +// Declared as a variable to prevent stringer from picking it up +// as an enum value. +var StructOpts MapType = StructOpsMap + +// hasPerCPUValue returns true if the Map stores a value per CPU. +func (mt MapType) hasPerCPUValue() bool { + return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage +} + +// canStoreMap returns true if the map type accepts a map fd +// for update and returns a map id for lookup. +func (mt MapType) canStoreMap() bool { + return mt == ArrayOfMaps || mt == HashOfMaps +} + +// canStoreProgram returns true if the map type accepts a program fd +// for update and returns a program id for lookup. +func (mt MapType) canStoreProgram() bool { + return mt == ProgramArray +} + +// ProgramType of the eBPF program +type ProgramType uint32 + +// Max return the latest supported ProgramType. +func (_ ProgramType) Max() ProgramType { + return maxProgramType - 1 +} + +// eBPF program types +const ( + UnspecifiedProgram ProgramType = iota + SocketFilter + Kprobe + SchedCLS + SchedACT + TracePoint + XDP + PerfEvent + CGroupSKB + CGroupSock + LWTIn + LWTOut + LWTXmit + SockOps + SkSKB + CGroupDevice + SkMsg + RawTracepoint + CGroupSockAddr + LWTSeg6Local + LircMode2 + SkReuseport + FlowDissector + CGroupSysctl + RawTracepointWritable + CGroupSockopt + Tracing + StructOps + Extension + LSM + SkLookup + maxProgramType +) + +// AttachType of the eBPF program, needed to differentiate allowed context accesses in +// some newer program types like CGroupSockAddr. Should be set to AttachNone if not required. +// Will cause invalid argument (EINVAL) at program load time if set incorrectly. +type AttachType uint32 + +//go:generate stringer -type AttachType -trimprefix Attach + +// AttachNone is an alias for AttachCGroupInetIngress for readability reasons. +const AttachNone AttachType = 0 + +const ( + AttachCGroupInetIngress AttachType = iota + AttachCGroupInetEgress + AttachCGroupInetSockCreate + AttachCGroupSockOps + AttachSkSKBStreamParser + AttachSkSKBStreamVerdict + AttachCGroupDevice + AttachSkMsgVerdict + AttachCGroupInet4Bind + AttachCGroupInet6Bind + AttachCGroupInet4Connect + AttachCGroupInet6Connect + AttachCGroupInet4PostBind + AttachCGroupInet6PostBind + AttachCGroupUDP4Sendmsg + AttachCGroupUDP6Sendmsg + AttachLircMode2 + AttachFlowDissector + AttachCGroupSysctl + AttachCGroupUDP4Recvmsg + AttachCGroupUDP6Recvmsg + AttachCGroupGetsockopt + AttachCGroupSetsockopt + AttachTraceRawTp + AttachTraceFEntry + AttachTraceFExit + AttachModifyReturn + AttachLSMMac + AttachTraceIter + AttachCgroupInet4GetPeername + AttachCgroupInet6GetPeername + AttachCgroupInet4GetSockname + AttachCgroupInet6GetSockname + AttachXDPDevMap + AttachCgroupInetSockRelease + AttachXDPCPUMap + AttachSkLookup + AttachXDP + AttachSkSKBVerdict + AttachSkReuseportSelect + AttachSkReuseportSelectOrMigrate + AttachPerfEvent +) + +// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command +type AttachFlags uint32 + +// PinType determines whether a map is pinned into a BPFFS. +type PinType int + +// Valid pin types. +// +// Mirrors enum libbpf_pin_type. +const ( + PinNone PinType = iota + // Pin an object by using its name as the filename. + PinByName +) + +// LoadPinOptions control how a pinned object is loaded. +type LoadPinOptions struct { + // Request a read-only or write-only object. The default is a read-write + // object. Only one of the flags may be set. + ReadOnly bool + WriteOnly bool + + // Raw flags for the syscall. Other fields of this struct take precedence. + Flags uint32 +} + +// Marshal returns a value suitable for BPF_OBJ_GET syscall file_flags parameter. +func (lpo *LoadPinOptions) Marshal() uint32 { + if lpo == nil { + return 0 + } + + flags := lpo.Flags + if lpo.ReadOnly { + flags |= unix.BPF_F_RDONLY + } + if lpo.WriteOnly { + flags |= unix.BPF_F_WRONLY + } + return flags +} + +// BatchOptions batch map operations options +// +// Mirrors libbpf struct bpf_map_batch_opts +// Currently BPF_F_FLAG is the only supported +// flag (for ElemFlags). +type BatchOptions struct { + ElemFlags uint64 + Flags uint64 +} diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go new file mode 100644 index 0000000000..81cbc9efde --- /dev/null +++ b/vendor/github.com/cilium/ebpf/types_string.go @@ -0,0 +1,119 @@ +// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,PinType"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnspecifiedMap-0] + _ = x[Hash-1] + _ = x[Array-2] + _ = x[ProgramArray-3] + _ = x[PerfEventArray-4] + _ = x[PerCPUHash-5] + _ = x[PerCPUArray-6] + _ = x[StackTrace-7] + _ = x[CGroupArray-8] + _ = x[LRUHash-9] + _ = x[LRUCPUHash-10] + _ = x[LPMTrie-11] + _ = x[ArrayOfMaps-12] + _ = x[HashOfMaps-13] + _ = x[DevMap-14] + _ = x[SockMap-15] + _ = x[CPUMap-16] + _ = x[XSKMap-17] + _ = x[SockHash-18] + _ = x[CGroupStorage-19] + _ = x[ReusePortSockArray-20] + _ = x[PerCPUCGroupStorage-21] + _ = x[Queue-22] + _ = x[Stack-23] + _ = x[SkStorage-24] + _ = x[DevMapHash-25] + _ = x[StructOpsMap-26] + _ = x[RingBuf-27] + _ = x[InodeStorage-28] + _ = x[TaskStorage-29] + _ = x[maxMapType-30] +} + +const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStoragemaxMapType" + +var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290, 300} + +func (i MapType) String() string { + if i >= MapType(len(_MapType_index)-1) { + return "MapType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _MapType_name[_MapType_index[i]:_MapType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnspecifiedProgram-0] + _ = x[SocketFilter-1] + _ = x[Kprobe-2] + _ = x[SchedCLS-3] + _ = x[SchedACT-4] + _ = x[TracePoint-5] + _ = x[XDP-6] + _ = x[PerfEvent-7] + _ = x[CGroupSKB-8] + _ = x[CGroupSock-9] + _ = x[LWTIn-10] + _ = x[LWTOut-11] + _ = x[LWTXmit-12] + _ = x[SockOps-13] + _ = x[SkSKB-14] + _ = x[CGroupDevice-15] + _ = x[SkMsg-16] + _ = x[RawTracepoint-17] + _ = x[CGroupSockAddr-18] + _ = x[LWTSeg6Local-19] + _ = x[LircMode2-20] + _ = x[SkReuseport-21] + _ = x[FlowDissector-22] + _ = x[CGroupSysctl-23] + _ = x[RawTracepointWritable-24] + _ = x[CGroupSockopt-25] + _ = x[Tracing-26] + _ = x[StructOps-27] + _ = x[Extension-28] + _ = x[LSM-29] + _ = x[SkLookup-30] + _ = x[maxProgramType-31] +} + +const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupmaxProgramType" + +var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 308} + +func (i ProgramType) String() string { + if i >= ProgramType(len(_ProgramType_index)-1) { + return "ProgramType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[PinNone-0] + _ = x[PinByName-1] +} + +const _PinType_name = "PinNonePinByName" + +var _PinType_index = [...]uint8{0, 7, 16} + +func (i PinType) String() string { + if i < 0 || i >= PinType(len(_PinType_index)-1) { + return "PinType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _PinType_name[_PinType_index[i]:_PinType_index[i+1]] +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml deleted file mode 100644 index b94ff8cf92..0000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2017 SUSE LLC. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -language: go -go: - - 1.13.x - - 1.16.x - - tip -arch: - - AMD64 - - ppc64le -os: - - linux - - osx - -script: - - go test -cover -v ./... - -notifications: - email: false diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md index 3624617c89..4eca0f2355 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/README.md +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -1,6 +1,6 @@ ## `filepath-securejoin` ## -[![Build Status](https://travis-ci.org/cyphar/filepath-securejoin.svg?branch=master)](https://travis-ci.org/cyphar/filepath-securejoin) +[![Build Status](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml/badge.svg)](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml) An implementation of `SecureJoin`, a [candidate for inclusion in the Go standard library][go#20126]. The purpose of this function is to be a "secure" diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index 7179039691..abd410582d 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.2.3 +0.2.4 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index 7dd08dbbdf..aa32b85fb8 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -39,17 +39,27 @@ func IsNotExist(err error) bool { // components in the returned string are not modified (in other words are not // replaced with symlinks on the filesystem) after this function has returned. // Such a symlink race is necessarily out-of-scope of SecureJoin. +// +// Volume names in unsafePath are always discarded, regardless if they are +// provided via direct input or when evaluating symlinks. Therefore: +// +// "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt" func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { // Use the os.* VFS implementation if none was specified. if vfs == nil { vfs = osVFS{} } + unsafePath = filepath.FromSlash(unsafePath) var path bytes.Buffer n := 0 for unsafePath != "" { if n > 255 { - return "", &os.PathError{Op: "SecureJoin", Path: root + "/" + unsafePath, Err: syscall.ELOOP} + return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} + } + + if v := filepath.VolumeName(unsafePath); v != "" { + unsafePath = unsafePath[len(v):] } // Next path component, p. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go index b9ba889b7a..ba2b2266c9 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go @@ -1,24 +1,9 @@ package cgroups import ( - "errors" - "github.com/opencontainers/runc/libcontainer/configs" ) -var ( - // ErrDevicesUnsupported is an error returned when a cgroup manager - // is not configured to set device rules. - ErrDevicesUnsupported = errors.New("cgroup manager is not configured to set device rules") - - // DevicesSetV1 and DevicesSetV2 are functions to set devices for - // cgroup v1 and v2, respectively. Unless libcontainer/cgroups/devices - // package is imported, it is set to nil, so cgroup managers can't - // manage devices. - DevicesSetV1 func(path string, r *configs.Resources) error - DevicesSetV2 func(path string, r *configs.Resources) error -) - type Manager interface { // Apply creates a cgroup, if not yet created, and adds a process // with the specified pid into that cgroup. A special value of -1 diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/devices/devices_emulator.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/devices/devices_emulator.go new file mode 100644 index 0000000000..6c61ee4c03 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/devices/devices_emulator.go @@ -0,0 +1,386 @@ +// SPDX-License-Identifier: Apache-2.0 +/* + * Copyright (C) 2020 Aleksa Sarai + * Copyright (C) 2020 SUSE LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package devices + +import ( + "bufio" + "fmt" + "io" + "sort" + "strconv" + "strings" + + "github.com/opencontainers/runc/libcontainer/devices" +) + +// deviceMeta is a Rule without the Allow or Permissions fields, and no +// wildcard-type support. It's effectively the "match" portion of a metadata +// rule, for the purposes of our emulation. +type deviceMeta struct { + node devices.Type + major int64 + minor int64 +} + +// deviceRule is effectively the tuple (deviceMeta, Permissions). +type deviceRule struct { + meta deviceMeta + perms devices.Permissions +} + +// deviceRules is a mapping of device metadata rules to the associated +// permissions in the ruleset. +type deviceRules map[deviceMeta]devices.Permissions + +func (r deviceRules) orderedEntries() []deviceRule { + var rules []deviceRule + for meta, perms := range r { + rules = append(rules, deviceRule{meta: meta, perms: perms}) + } + sort.Slice(rules, func(i, j int) bool { + // Sort by (major, minor, type). + a, b := rules[i].meta, rules[j].meta + return a.major < b.major || + (a.major == b.major && a.minor < b.minor) || + (a.major == b.major && a.minor == b.minor && a.node < b.node) + }) + return rules +} + +type Emulator struct { + defaultAllow bool + rules deviceRules +} + +func (e *Emulator) IsBlacklist() bool { + return e.defaultAllow +} + +func (e *Emulator) IsAllowAll() bool { + return e.IsBlacklist() && len(e.rules) == 0 +} + +func parseLine(line string) (*deviceRule, error) { + // Input: node major:minor perms. + fields := strings.FieldsFunc(line, func(r rune) bool { + return r == ' ' || r == ':' + }) + if len(fields) != 4 { + return nil, fmt.Errorf("malformed devices.list rule %s", line) + } + + var ( + rule deviceRule + node = fields[0] + major = fields[1] + minor = fields[2] + perms = fields[3] + ) + + // Parse the node type. + switch node { + case "a": + // Super-special case -- "a" always means every device with every + // access mode. In fact, for devices.list this actually indicates that + // the cgroup is in black-list mode. + // TODO: Double-check that the entire file is "a *:* rwm". + return nil, nil + case "b": + rule.meta.node = devices.BlockDevice + case "c": + rule.meta.node = devices.CharDevice + default: + return nil, fmt.Errorf("unknown device type %q", node) + } + + // Parse the major number. + if major == "*" { + rule.meta.major = devices.Wildcard + } else { + val, err := strconv.ParseUint(major, 10, 32) + if err != nil { + return nil, fmt.Errorf("invalid major number: %w", err) + } + rule.meta.major = int64(val) + } + + // Parse the minor number. + if minor == "*" { + rule.meta.minor = devices.Wildcard + } else { + val, err := strconv.ParseUint(minor, 10, 32) + if err != nil { + return nil, fmt.Errorf("invalid minor number: %w", err) + } + rule.meta.minor = int64(val) + } + + // Parse the access permissions. + rule.perms = devices.Permissions(perms) + if !rule.perms.IsValid() || rule.perms.IsEmpty() { + return nil, fmt.Errorf("parse access mode: contained unknown modes or is empty: %q", perms) + } + return &rule, nil +} + +func (e *Emulator) addRule(rule deviceRule) error { //nolint:unparam + if e.rules == nil { + e.rules = make(map[deviceMeta]devices.Permissions) + } + + // Merge with any pre-existing permissions. + oldPerms := e.rules[rule.meta] + newPerms := rule.perms.Union(oldPerms) + e.rules[rule.meta] = newPerms + return nil +} + +func (e *Emulator) rmRule(rule deviceRule) error { + // Give an error if any of the permissions requested to be removed are + // present in a partially-matching wildcard rule, because such rules will + // be ignored by cgroupv1. + // + // This is a diversion from cgroupv1, but is necessary to avoid leading + // users into a false sense of security. cgroupv1 will silently(!) ignore + // requests to remove partial exceptions, but we really shouldn't do that. + // + // It may seem like we could just "split" wildcard rules which hit this + // issue, but unfortunately there are 2^32 possible major and minor + // numbers, which would exhaust kernel memory quickly if we did this. Not + // to mention it'd be really slow (the kernel side is implemented as a + // linked-list of exceptions). + for _, partialMeta := range []deviceMeta{ + {node: rule.meta.node, major: devices.Wildcard, minor: rule.meta.minor}, + {node: rule.meta.node, major: rule.meta.major, minor: devices.Wildcard}, + {node: rule.meta.node, major: devices.Wildcard, minor: devices.Wildcard}, + } { + // This wildcard rule is equivalent to the requested rule, so skip it. + if rule.meta == partialMeta { + continue + } + // Only give an error if the set of permissions overlap. + partialPerms := e.rules[partialMeta] + if !partialPerms.Intersection(rule.perms).IsEmpty() { + return fmt.Errorf("requested rule [%v %v] not supported by devices cgroupv1 (cannot punch hole in existing wildcard rule [%v %v])", rule.meta, rule.perms, partialMeta, partialPerms) + } + } + + // Subtract all of the permissions listed from the full match rule. If the + // rule didn't exist, all of this is a no-op. + newPerms := e.rules[rule.meta].Difference(rule.perms) + if newPerms.IsEmpty() { + delete(e.rules, rule.meta) + } else { + e.rules[rule.meta] = newPerms + } + // TODO: The actual cgroup code doesn't care if an exception didn't exist + // during removal, so not erroring out here is /accurate/ but quite + // worrying. Maybe we should do additional validation, but again we + // have to worry about backwards-compatibility. + return nil +} + +func (e *Emulator) allow(rule *deviceRule) error { + // This cgroup is configured as a black-list. Reset the entire emulator, + // and put is into black-list mode. + if rule == nil || rule.meta.node == devices.WildcardDevice { + *e = Emulator{ + defaultAllow: true, + rules: nil, + } + return nil + } + + var err error + if e.defaultAllow { + err = wrapErr(e.rmRule(*rule), "unable to remove 'deny' exception") + } else { + err = wrapErr(e.addRule(*rule), "unable to add 'allow' exception") + } + return err +} + +func (e *Emulator) deny(rule *deviceRule) error { + // This cgroup is configured as a white-list. Reset the entire emulator, + // and put is into white-list mode. + if rule == nil || rule.meta.node == devices.WildcardDevice { + *e = Emulator{ + defaultAllow: false, + rules: nil, + } + return nil + } + + var err error + if e.defaultAllow { + err = wrapErr(e.addRule(*rule), "unable to add 'deny' exception") + } else { + err = wrapErr(e.rmRule(*rule), "unable to remove 'allow' exception") + } + return err +} + +func (e *Emulator) Apply(rule devices.Rule) error { + if !rule.Type.CanCgroup() { + return fmt.Errorf("cannot add rule [%#v] with non-cgroup type %q", rule, rule.Type) + } + + innerRule := &deviceRule{ + meta: deviceMeta{ + node: rule.Type, + major: rule.Major, + minor: rule.Minor, + }, + perms: rule.Permissions, + } + if innerRule.meta.node == devices.WildcardDevice { + innerRule = nil + } + + if rule.Allow { + return e.allow(innerRule) + } + + return e.deny(innerRule) +} + +// EmulatorFromList takes a reader to a "devices.list"-like source, and returns +// a new Emulator that represents the state of the devices cgroup. Note that +// black-list devices cgroups cannot be fully reconstructed, due to limitations +// in the devices cgroup API. Instead, such cgroups are always treated as +// "allow all" cgroups. +func EmulatorFromList(list io.Reader) (*Emulator, error) { + // Normally cgroups are in black-list mode by default, but the way we + // figure out the current mode is whether or not devices.list has an + // allow-all rule. So we default to a white-list, and the existence of an + // "a *:* rwm" entry will tell us otherwise. + e := &Emulator{ + defaultAllow: false, + } + + // Parse the "devices.list". + s := bufio.NewScanner(list) + for s.Scan() { + line := s.Text() + deviceRule, err := parseLine(line) + if err != nil { + return nil, fmt.Errorf("error parsing line %q: %w", line, err) + } + // "devices.list" is an allow list. Note that this means that in + // black-list mode, we have no idea what rules are in play. As a + // result, we need to be very careful in Transition(). + if err := e.allow(deviceRule); err != nil { + return nil, fmt.Errorf("error adding devices.list rule: %w", err) + } + } + if err := s.Err(); err != nil { + return nil, fmt.Errorf("error reading devices.list lines: %w", err) + } + return e, nil +} + +// Transition calculates what is the minimally-disruptive set of rules need to +// be applied to a devices cgroup in order to transition to the given target. +// This means that any already-existing rules will not be applied, and +// disruptive rules (like denying all device access) will only be applied if +// necessary. +// +// This function is the sole reason for all of Emulator -- to allow us +// to figure out how to update a containers' cgroups without causing spurious +// device errors (if possible). +func (source *Emulator) Transition(target *Emulator) ([]*devices.Rule, error) { + var transitionRules []*devices.Rule + oldRules := source.rules + + // If the default policy doesn't match, we need to include a "disruptive" + // rule (either allow-all or deny-all) in order to switch the cgroup to the + // correct default policy. + // + // However, due to a limitation in "devices.list" we cannot be sure what + // deny rules are in place in a black-list cgroup. Thus if the source is a + // black-list we also have to include a disruptive rule. + if source.IsBlacklist() || source.defaultAllow != target.defaultAllow { + transitionRules = append(transitionRules, &devices.Rule{ + Type: 'a', + Major: -1, + Minor: -1, + Permissions: devices.Permissions("rwm"), + Allow: target.defaultAllow, + }) + // The old rules are only relevant if we aren't starting out with a + // disruptive rule. + oldRules = nil + } + + // NOTE: We traverse through the rules in a sorted order so we always write + // the same set of rules (this is to aid testing). + + // First, we create inverse rules for any old rules not in the new set. + // This includes partial-inverse rules for specific permissions. This is a + // no-op if we added a disruptive rule, since oldRules will be empty. + for _, rule := range oldRules.orderedEntries() { + meta, oldPerms := rule.meta, rule.perms + newPerms := target.rules[meta] + droppedPerms := oldPerms.Difference(newPerms) + if !droppedPerms.IsEmpty() { + transitionRules = append(transitionRules, &devices.Rule{ + Type: meta.node, + Major: meta.major, + Minor: meta.minor, + Permissions: droppedPerms, + Allow: target.defaultAllow, + }) + } + } + + // Add any additional rules which weren't in the old set. We happen to + // filter out rules which are present in both sets, though this isn't + // strictly necessary. + for _, rule := range target.rules.orderedEntries() { + meta, newPerms := rule.meta, rule.perms + oldPerms := oldRules[meta] + gainedPerms := newPerms.Difference(oldPerms) + if !gainedPerms.IsEmpty() { + transitionRules = append(transitionRules, &devices.Rule{ + Type: meta.node, + Major: meta.major, + Minor: meta.minor, + Permissions: gainedPerms, + Allow: !target.defaultAllow, + }) + } + } + return transitionRules, nil +} + +// Rules returns the minimum set of rules necessary to convert a *deny-all* +// cgroup to the emulated filter state (note that this is not the same as a +// default cgroupv1 cgroup -- which is allow-all). This is effectively just a +// wrapper around Transition() with the source emulator being an empty cgroup. +func (e *Emulator) Rules() ([]*devices.Rule, error) { + defaultCgroup := &Emulator{defaultAllow: false} + return defaultCgroup.Transition(e) +} + +func wrapErr(err error, text string) error { + if err == nil { + return nil + } + return fmt.Errorf(text+": %w", err) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter/devicefilter.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter/devicefilter.go new file mode 100644 index 0000000000..4e69b35bcd --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter/devicefilter.go @@ -0,0 +1,208 @@ +// Package devicefilter contains eBPF device filter program +// +// The implementation is based on https://github.com/containers/crun/blob/0.10.2/src/libcrun/ebpf.c +// +// Although ebpf.c is originally licensed under LGPL-3.0-or-later, the author (Giuseppe Scrivano) +// agreed to relicense the file in Apache License 2.0: https://github.com/opencontainers/runc/issues/2144#issuecomment-543116397 +package devicefilter + +import ( + "errors" + "fmt" + "math" + "strconv" + + "github.com/cilium/ebpf/asm" + devicesemulator "github.com/opencontainers/runc/libcontainer/cgroups/devices" + "github.com/opencontainers/runc/libcontainer/devices" + "golang.org/x/sys/unix" +) + +const ( + // license string format is same as kernel MODULE_LICENSE macro + license = "Apache" +) + +// DeviceFilter returns eBPF device filter program and its license string +func DeviceFilter(rules []*devices.Rule) (asm.Instructions, string, error) { + // Generate the minimum ruleset for the device rules we are given. While we + // don't care about minimum transitions in cgroupv2, using the emulator + // gives us a guarantee that the behaviour of devices filtering is the same + // as cgroupv1, including security hardenings to avoid misconfiguration + // (such as punching holes in wildcard rules). + emu := new(devicesemulator.Emulator) + for _, rule := range rules { + if err := emu.Apply(*rule); err != nil { + return nil, "", err + } + } + cleanRules, err := emu.Rules() + if err != nil { + return nil, "", err + } + + p := &program{ + defaultAllow: emu.IsBlacklist(), + } + p.init() + + for idx, rule := range cleanRules { + if rule.Type == devices.WildcardDevice { + // We can safely skip over wildcard entries because there should + // only be one (at most) at the very start to instruct cgroupv1 to + // go into allow-list mode. However we do double-check this here. + if idx != 0 || rule.Allow != emu.IsBlacklist() { + return nil, "", fmt.Errorf("[internal error] emulated cgroupv2 devices ruleset had bad wildcard at idx %v (%s)", idx, rule.CgroupString()) + } + continue + } + if rule.Allow == p.defaultAllow { + // There should be no rules which have an action equal to the + // default action, the emulator removes those. + return nil, "", fmt.Errorf("[internal error] emulated cgroupv2 devices ruleset had no-op rule at idx %v (%s)", idx, rule.CgroupString()) + } + if err := p.appendRule(rule); err != nil { + return nil, "", err + } + } + return p.finalize(), license, nil +} + +type program struct { + insts asm.Instructions + defaultAllow bool + blockID int +} + +func (p *program) init() { + // struct bpf_cgroup_dev_ctx: https://elixir.bootlin.com/linux/v5.3.6/source/include/uapi/linux/bpf.h#L3423 + /* + u32 access_type + u32 major + u32 minor + */ + // R2 <- type (lower 16 bit of u32 access_type at R1[0]) + p.insts = append(p.insts, + asm.LoadMem(asm.R2, asm.R1, 0, asm.Word), + asm.And.Imm32(asm.R2, 0xFFFF)) + + // R3 <- access (upper 16 bit of u32 access_type at R1[0]) + p.insts = append(p.insts, + asm.LoadMem(asm.R3, asm.R1, 0, asm.Word), + // RSh: bitwise shift right + asm.RSh.Imm32(asm.R3, 16)) + + // R4 <- major (u32 major at R1[4]) + p.insts = append(p.insts, + asm.LoadMem(asm.R4, asm.R1, 4, asm.Word)) + + // R5 <- minor (u32 minor at R1[8]) + p.insts = append(p.insts, + asm.LoadMem(asm.R5, asm.R1, 8, asm.Word)) +} + +// appendRule rule converts an OCI rule to the relevant eBPF block and adds it +// to the in-progress filter program. In order to operate properly, it must be +// called with a "clean" rule list (generated by devices.Emulator.Rules() -- +// with any "a" rules removed). +func (p *program) appendRule(rule *devices.Rule) error { + if p.blockID < 0 { + return errors.New("the program is finalized") + } + + var bpfType int32 + switch rule.Type { + case devices.CharDevice: + bpfType = int32(unix.BPF_DEVCG_DEV_CHAR) + case devices.BlockDevice: + bpfType = int32(unix.BPF_DEVCG_DEV_BLOCK) + default: + // We do not permit 'a', nor any other types we don't know about. + return fmt.Errorf("invalid type %q", string(rule.Type)) + } + if rule.Major > math.MaxUint32 { + return fmt.Errorf("invalid major %d", rule.Major) + } + if rule.Minor > math.MaxUint32 { + return fmt.Errorf("invalid minor %d", rule.Major) + } + hasMajor := rule.Major >= 0 // if not specified in OCI json, major is set to -1 + hasMinor := rule.Minor >= 0 + bpfAccess := int32(0) + for _, r := range rule.Permissions { + switch r { + case 'r': + bpfAccess |= unix.BPF_DEVCG_ACC_READ + case 'w': + bpfAccess |= unix.BPF_DEVCG_ACC_WRITE + case 'm': + bpfAccess |= unix.BPF_DEVCG_ACC_MKNOD + default: + return fmt.Errorf("unknown device access %v", r) + } + } + // If the access is rwm, skip the check. + hasAccess := bpfAccess != (unix.BPF_DEVCG_ACC_READ | unix.BPF_DEVCG_ACC_WRITE | unix.BPF_DEVCG_ACC_MKNOD) + + var ( + blockSym = "block-" + strconv.Itoa(p.blockID) + nextBlockSym = "block-" + strconv.Itoa(p.blockID+1) + prevBlockLastIdx = len(p.insts) - 1 + ) + p.insts = append(p.insts, + // if (R2 != bpfType) goto next + asm.JNE.Imm(asm.R2, bpfType, nextBlockSym), + ) + if hasAccess { + p.insts = append(p.insts, + // if (R3 & bpfAccess != R3 /* use R1 as a temp var */) goto next + asm.Mov.Reg32(asm.R1, asm.R3), + asm.And.Imm32(asm.R1, bpfAccess), + asm.JNE.Reg(asm.R1, asm.R3, nextBlockSym), + ) + } + if hasMajor { + p.insts = append(p.insts, + // if (R4 != major) goto next + asm.JNE.Imm(asm.R4, int32(rule.Major), nextBlockSym), + ) + } + if hasMinor { + p.insts = append(p.insts, + // if (R5 != minor) goto next + asm.JNE.Imm(asm.R5, int32(rule.Minor), nextBlockSym), + ) + } + p.insts = append(p.insts, acceptBlock(rule.Allow)...) + // set blockSym to the first instruction we added in this iteration + p.insts[prevBlockLastIdx+1] = p.insts[prevBlockLastIdx+1].Sym(blockSym) + p.blockID++ + return nil +} + +func (p *program) finalize() asm.Instructions { + var v int32 + if p.defaultAllow { + v = 1 + } + blockSym := "block-" + strconv.Itoa(p.blockID) + p.insts = append(p.insts, + // R0 <- v + asm.Mov.Imm32(asm.R0, v).Sym(blockSym), + asm.Return(), + ) + p.blockID = -1 + return p.insts +} + +func acceptBlock(accept bool) asm.Instructions { + var v int32 + if accept { + v = 1 + } + return []asm.Instruction{ + // R0 <- v + asm.Mov.Imm32(asm.R0, v), + asm.Return(), + } +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go new file mode 100644 index 0000000000..35b00aaf05 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go @@ -0,0 +1,253 @@ +package ebpf + +import ( + "errors" + "fmt" + "os" + "runtime" + "sync" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/link" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func nilCloser() error { + return nil +} + +func findAttachedCgroupDeviceFilters(dirFd int) ([]*ebpf.Program, error) { + type bpfAttrQuery struct { + TargetFd uint32 + AttachType uint32 + QueryType uint32 + AttachFlags uint32 + ProgIds uint64 // __aligned_u64 + ProgCnt uint32 + } + + // Currently you can only have 64 eBPF programs attached to a cgroup. + size := 64 + retries := 0 + for retries < 10 { + progIds := make([]uint32, size) + query := bpfAttrQuery{ + TargetFd: uint32(dirFd), + AttachType: uint32(unix.BPF_CGROUP_DEVICE), + ProgIds: uint64(uintptr(unsafe.Pointer(&progIds[0]))), + ProgCnt: uint32(len(progIds)), + } + + // Fetch the list of program ids. + _, _, errno := unix.Syscall(unix.SYS_BPF, + uintptr(unix.BPF_PROG_QUERY), + uintptr(unsafe.Pointer(&query)), + unsafe.Sizeof(query)) + size = int(query.ProgCnt) + runtime.KeepAlive(query) + if errno != 0 { + // On ENOSPC we get the correct number of programs. + if errno == unix.ENOSPC { + retries++ + continue + } + return nil, fmt.Errorf("bpf_prog_query(BPF_CGROUP_DEVICE) failed: %w", errno) + } + + // Convert the ids to program handles. + progIds = progIds[:size] + programs := make([]*ebpf.Program, 0, len(progIds)) + for _, progId := range progIds { + program, err := ebpf.NewProgramFromID(ebpf.ProgramID(progId)) + if err != nil { + // We skip over programs that give us -EACCES or -EPERM. This + // is necessary because there may be BPF programs that have + // been attached (such as with --systemd-cgroup) which have an + // LSM label that blocks us from interacting with the program. + // + // Because additional BPF_CGROUP_DEVICE programs only can add + // restrictions, there's no real issue with just ignoring these + // programs (and stops runc from breaking on distributions with + // very strict SELinux policies). + if errors.Is(err, os.ErrPermission) { + logrus.Debugf("ignoring existing CGROUP_DEVICE program (prog_id=%v) which cannot be accessed by runc -- likely due to LSM policy: %v", progId, err) + continue + } + return nil, fmt.Errorf("cannot fetch program from id: %w", err) + } + programs = append(programs, program) + } + runtime.KeepAlive(progIds) + return programs, nil + } + + return nil, errors.New("could not get complete list of CGROUP_DEVICE programs") +} + +var ( + haveBpfProgReplaceBool bool + haveBpfProgReplaceOnce sync.Once +) + +// Loosely based on the BPF_F_REPLACE support check in +// https://github.com/cilium/ebpf/blob/v0.6.0/link/syscalls.go. +// +// TODO: move this logic to cilium/ebpf +func haveBpfProgReplace() bool { + haveBpfProgReplaceOnce.Do(func() { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupDevice, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + if err != nil { + logrus.Debugf("checking for BPF_F_REPLACE support: ebpf.NewProgram failed: %v", err) + return + } + defer prog.Close() + + devnull, err := os.Open("/dev/null") + if err != nil { + logrus.Debugf("checking for BPF_F_REPLACE support: open dummy target fd: %v", err) + return + } + defer devnull.Close() + + // We know that we have BPF_PROG_ATTACH since we can load + // BPF_CGROUP_DEVICE programs. If passing BPF_F_REPLACE gives us EINVAL + // we know that the feature isn't present. + err = link.RawAttachProgram(link.RawAttachProgramOptions{ + // We rely on this fd being checked after attachFlags. + Target: int(devnull.Fd()), + // Attempt to "replace" bad fds with this program. + Program: prog, + Attach: ebpf.AttachCGroupDevice, + Flags: unix.BPF_F_ALLOW_MULTI | unix.BPF_F_REPLACE, + }) + if errors.Is(err, unix.EINVAL) { + // not supported + return + } + // attach_flags test succeeded. + if !errors.Is(err, unix.EBADF) { + logrus.Debugf("checking for BPF_F_REPLACE: got unexpected (not EBADF or EINVAL) error: %v", err) + } + haveBpfProgReplaceBool = true + }) + return haveBpfProgReplaceBool +} + +// LoadAttachCgroupDeviceFilter installs eBPF device filter program to /sys/fs/cgroup/ directory. +// +// Requires the system to be running in cgroup2 unified-mode with kernel >= 4.15 . +// +// https://github.com/torvalds/linux/commit/ebc614f687369f9df99828572b1d85a7c2de3d92 +func LoadAttachCgroupDeviceFilter(insts asm.Instructions, license string, dirFd int) (func() error, error) { + // Increase `ulimit -l` limit to avoid BPF_PROG_LOAD error (#2167). + // This limit is not inherited into the container. + memlockLimit := &unix.Rlimit{ + Cur: unix.RLIM_INFINITY, + Max: unix.RLIM_INFINITY, + } + _ = unix.Setrlimit(unix.RLIMIT_MEMLOCK, memlockLimit) + + // Get the list of existing programs. + oldProgs, err := findAttachedCgroupDeviceFilters(dirFd) + if err != nil { + return nilCloser, err + } + useReplaceProg := haveBpfProgReplace() && len(oldProgs) == 1 + + // Generate new program. + spec := &ebpf.ProgramSpec{ + Type: ebpf.CGroupDevice, + Instructions: insts, + License: license, + } + prog, err := ebpf.NewProgram(spec) + if err != nil { + return nilCloser, err + } + + // If there is only one old program, we can just replace it directly. + var ( + replaceProg *ebpf.Program + attachFlags uint32 = unix.BPF_F_ALLOW_MULTI + ) + if useReplaceProg { + replaceProg = oldProgs[0] + attachFlags |= unix.BPF_F_REPLACE + } + err = link.RawAttachProgram(link.RawAttachProgramOptions{ + Target: dirFd, + Program: prog, + Replace: replaceProg, + Attach: ebpf.AttachCGroupDevice, + Flags: attachFlags, + }) + if err != nil { + return nilCloser, fmt.Errorf("failed to call BPF_PROG_ATTACH (BPF_CGROUP_DEVICE, BPF_F_ALLOW_MULTI): %w", err) + } + closer := func() error { + err = link.RawDetachProgram(link.RawDetachProgramOptions{ + Target: dirFd, + Program: prog, + Attach: ebpf.AttachCGroupDevice, + }) + if err != nil { + return fmt.Errorf("failed to call BPF_PROG_DETACH (BPF_CGROUP_DEVICE): %w", err) + } + // TODO: Should we attach the old filters back in this case? Otherwise + // we fail-open on a security feature, which is a bit scary. + return nil + } + if !useReplaceProg { + logLevel := logrus.DebugLevel + // If there was more than one old program, give a warning (since this + // really shouldn't happen with runc-managed cgroups) and then detach + // all the old programs. + if len(oldProgs) > 1 { + // NOTE: Ideally this should be a warning but it turns out that + // systemd-managed cgroups trigger this warning (apparently + // systemd doesn't delete old non-systemd programs when + // setting properties). + logrus.Infof("found more than one filter (%d) attached to a cgroup -- removing extra filters!", len(oldProgs)) + logLevel = logrus.InfoLevel + } + for idx, oldProg := range oldProgs { + // Output some extra debug info. + if info, err := oldProg.Info(); err == nil { + fields := logrus.Fields{ + "type": info.Type.String(), + "tag": info.Tag, + "name": info.Name, + } + if id, ok := info.ID(); ok { + fields["id"] = id + } + if runCount, ok := info.RunCount(); ok { + fields["run_count"] = runCount + } + if runtime, ok := info.Runtime(); ok { + fields["runtime"] = runtime.String() + } + logrus.WithFields(fields).Logf(logLevel, "removing old filter %d from cgroup", idx) + } + err = link.RawDetachProgram(link.RawDetachProgramOptions{ + Target: dirFd, + Program: oldProg, + Attach: ebpf.AttachCGroupDevice, + }) + if err != nil { + return closer, fmt.Errorf("failed to call BPF_PROG_DETACH (BPF_CGROUP_DEVICE) on old filter program: %w", err) + } + } + } + return closer, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go index 0cdaf74784..f6e1b73bd9 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go @@ -10,6 +10,7 @@ import ( "strings" "sync" + "github.com/opencontainers/runc/libcontainer/utils" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -76,16 +77,16 @@ var ( // TestMode is set to true by unit tests that need "fake" cgroupfs. TestMode bool - cgroupFd int = -1 - prepOnce sync.Once - prepErr error - resolveFlags uint64 + cgroupRootHandle *os.File + prepOnce sync.Once + prepErr error + resolveFlags uint64 ) func prepareOpenat2() error { prepOnce.Do(func() { fd, err := unix.Openat2(-1, cgroupfsDir, &unix.OpenHow{ - Flags: unix.O_DIRECTORY | unix.O_PATH, + Flags: unix.O_DIRECTORY | unix.O_PATH | unix.O_CLOEXEC, }) if err != nil { prepErr = &os.PathError{Op: "openat2", Path: cgroupfsDir, Err: err} @@ -96,15 +97,16 @@ func prepareOpenat2() error { } return } + file := os.NewFile(uintptr(fd), cgroupfsDir) + var st unix.Statfs_t - if err = unix.Fstatfs(fd, &st); err != nil { + if err := unix.Fstatfs(int(file.Fd()), &st); err != nil { prepErr = &os.PathError{Op: "statfs", Path: cgroupfsDir, Err: err} logrus.Warnf("falling back to securejoin: %s", prepErr) return } - cgroupFd = fd - + cgroupRootHandle = file resolveFlags = unix.RESOLVE_BENEATH | unix.RESOLVE_NO_MAGICLINKS if st.Type == unix.CGROUP2_SUPER_MAGIC { // cgroupv2 has a single mountpoint and no "cpu,cpuacct" symlinks @@ -122,7 +124,7 @@ func openFile(dir, file string, flags int) (*os.File, error) { flags |= os.O_TRUNC | os.O_CREATE mode = 0o600 } - path := path.Join(dir, file) + path := path.Join(dir, utils.CleanPath(file)) if prepareOpenat2() != nil { return openFallback(path, flags, mode) } @@ -131,7 +133,7 @@ func openFile(dir, file string, flags int) (*os.File, error) { return openFallback(path, flags, mode) } - fd, err := unix.Openat2(cgroupFd, relPath, + fd, err := unix.Openat2(int(cgroupRootHandle.Fd()), relPath, &unix.OpenHow{ Resolve: resolveFlags, Flags: uint64(flags) | unix.O_CLOEXEC, @@ -139,20 +141,20 @@ func openFile(dir, file string, flags int) (*os.File, error) { }) if err != nil { err = &os.PathError{Op: "openat2", Path: path, Err: err} - // Check if cgroupFd is still opened to cgroupfsDir + // Check if cgroupRootHandle is still opened to cgroupfsDir // (happens when this package is incorrectly used // across the chroot/pivot_root/mntns boundary, or // when /sys/fs/cgroup is remounted). // // TODO: if such usage will ever be common, amend this - // to reopen cgroupFd and retry openat2. - fdStr := strconv.Itoa(cgroupFd) + // to reopen cgroupRootHandle and retry openat2. + fdStr := strconv.Itoa(int(cgroupRootHandle.Fd())) fdDest, _ := os.Readlink("/proc/self/fd/" + fdStr) if fdDest != cgroupfsDir { - // Wrap the error so it is clear that cgroupFd + // Wrap the error so it is clear that cgroupRootHandle // is opened to an unexpected/wrong directory. - err = fmt.Errorf("cgroupFd %s unexpectedly opened to %s != %s: %w", - fdStr, fdDest, cgroupfsDir, err) + err = fmt.Errorf("cgroupRootHandle %d unexpectedly opened to %s != %s: %w", + cgroupRootHandle.Fd(), fdDest, cgroupfsDir, err) } return nil, err } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go index 0bf3d9debb..4527a70ebf 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go @@ -1,11 +1,20 @@ package fs import ( + "bytes" + "errors" + "reflect" + "github.com/opencontainers/runc/libcontainer/cgroups" + cgroupdevices "github.com/opencontainers/runc/libcontainer/cgroups/devices" "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/userns" ) -type DevicesGroup struct{} +type DevicesGroup struct { + TestingSkipFinalCheck bool +} func (s *DevicesGroup) Name() string { return "devices" @@ -24,14 +33,75 @@ func (s *DevicesGroup) Apply(path string, r *configs.Resources, pid int) error { return apply(path, pid) } +func loadEmulator(path string) (*cgroupdevices.Emulator, error) { + list, err := cgroups.ReadFile(path, "devices.list") + if err != nil { + return nil, err + } + return cgroupdevices.EmulatorFromList(bytes.NewBufferString(list)) +} + +func buildEmulator(rules []*devices.Rule) (*cgroupdevices.Emulator, error) { + // This defaults to a white-list -- which is what we want! + emu := &cgroupdevices.Emulator{} + for _, rule := range rules { + if err := emu.Apply(*rule); err != nil { + return nil, err + } + } + return emu, nil +} + func (s *DevicesGroup) Set(path string, r *configs.Resources) error { - if cgroups.DevicesSetV1 == nil { - if len(r.Devices) == 0 { - return nil + if userns.RunningInUserNS() || r.SkipDevices { + return nil + } + + // Generate two emulators, one for the current state of the cgroup and one + // for the requested state by the user. + current, err := loadEmulator(path) + if err != nil { + return err + } + target, err := buildEmulator(r.Devices) + if err != nil { + return err + } + + // Compute the minimal set of transition rules needed to achieve the + // requested state. + transitionRules, err := current.Transition(target) + if err != nil { + return err + } + for _, rule := range transitionRules { + file := "devices.deny" + if rule.Allow { + file = "devices.allow" + } + if err := cgroups.WriteFile(path, file, rule.CgroupString()); err != nil { + return err } - return cgroups.ErrDevicesUnsupported } - return cgroups.DevicesSetV1(path, r) + + // Final safety check -- ensure that the resulting state is what was + // requested. This is only really correct for white-lists, but for + // black-lists we can at least check that the cgroup is in the right mode. + // + // This safety-check is skipped for the unit tests because we cannot + // currently mock devices.list correctly. + if !s.TestingSkipFinalCheck { + currentAfter, err := loadEmulator(path) + if err != nil { + return err + } + if !target.IsBlacklist() && !reflect.DeepEqual(currentAfter, target) { + return errors.New("resulting devices cgroup doesn't precisely match target") + } else if target.IsBlacklist() != currentAfter.IsBlacklist() { + return errors.New("resulting devices cgroup doesn't match target mode") + } + } + return nil } func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go index be4dcc341b..9e2f0ec04c 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go @@ -28,6 +28,7 @@ var subsystems = []subsystem{ &FreezerGroup{}, &RdmaGroup{}, &NameGroup{GroupName: "name=systemd", Join: true}, + &NameGroup{GroupName: "misc", Join: true}, } var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist") @@ -182,7 +183,7 @@ func (m *manager) Set(r *configs.Resources) error { if err := sys.Set(path, r); err != nil { // When rootless is true, errors from the device subsystem // are ignored, as it is really not expected to work. - if m.cgroups.Rootless && sys.Name() == "devices" && !errors.Is(err, cgroups.ErrDevicesUnsupported) { + if m.cgroups.Rootless && sys.Name() == "devices" { continue } // However, errors from other subsystems are not ignored. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go index 8ddd6fdd83..50f8f30cd3 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go @@ -1,6 +1,8 @@ package fs import ( + "errors" + "os" "strconv" "github.com/opencontainers/runc/libcontainer/cgroups" @@ -19,8 +21,23 @@ func (s *HugetlbGroup) Apply(path string, _ *configs.Resources, pid int) error { } func (s *HugetlbGroup) Set(path string, r *configs.Resources) error { + const suffix = ".limit_in_bytes" + skipRsvd := false + for _, hugetlb := range r.HugetlbLimit { - if err := cgroups.WriteFile(path, "hugetlb."+hugetlb.Pagesize+".limit_in_bytes", strconv.FormatUint(hugetlb.Limit, 10)); err != nil { + prefix := "hugetlb." + hugetlb.Pagesize + val := strconv.FormatUint(hugetlb.Limit, 10) + if err := cgroups.WriteFile(path, prefix+suffix, val); err != nil { + return err + } + if skipRsvd { + continue + } + if err := cgroups.WriteFile(path, prefix+".rsvd"+suffix, val); err != nil { + if errors.Is(err, os.ErrNotExist) { + skipRsvd = true + continue + } return err } } @@ -32,24 +49,29 @@ func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error { if !cgroups.PathExists(path) { return nil } + rsvd := ".rsvd" hugetlbStats := cgroups.HugetlbStats{} for _, pageSize := range cgroups.HugePageSizes() { - usage := "hugetlb." + pageSize + ".usage_in_bytes" - value, err := fscommon.GetCgroupParamUint(path, usage) + again: + prefix := "hugetlb." + pageSize + rsvd + + value, err := fscommon.GetCgroupParamUint(path, prefix+".usage_in_bytes") if err != nil { + if rsvd != "" && errors.Is(err, os.ErrNotExist) { + rsvd = "" + goto again + } return err } hugetlbStats.Usage = value - maxUsage := "hugetlb." + pageSize + ".max_usage_in_bytes" - value, err = fscommon.GetCgroupParamUint(path, maxUsage) + value, err = fscommon.GetCgroupParamUint(path, prefix+".max_usage_in_bytes") if err != nil { return err } hugetlbStats.MaxUsage = value - failcnt := "hugetlb." + pageSize + ".failcnt" - value, err = fscommon.GetCgroupParamUint(path, failcnt) + value, err = fscommon.GetCgroupParamUint(path, prefix+".failcnt") if err != nil { return err } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go index b7c75f9413..783566d68f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go @@ -170,6 +170,10 @@ func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error { return err } stats.MemoryStats.SwapUsage = swapUsage + stats.MemoryStats.SwapOnlyUsage = cgroups.MemoryData{ + Usage: swapUsage.Usage - memoryUsage.Usage, + Failcnt: swapUsage.Failcnt - memoryUsage.Failcnt, + } kernelUsage, err := getMemoryData(path, "kmem") if err != nil { return err @@ -234,6 +238,12 @@ func getMemoryData(path, name string) (cgroups.MemoryData, error) { memoryData.Failcnt = value value, err = fscommon.GetCgroupParamUint(path, limit) if err != nil { + if name == "kmem" && os.IsNotExist(err) { + // Ignore ENOENT as kmem.limit_in_bytes has + // been removed in newer kernels. + return memoryData, nil + } + return cgroups.MemoryData{}, err } memoryData.Limit = value diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go index 1092331b25..2cb970a3d5 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go @@ -83,6 +83,7 @@ func tryDefaultCgroupRoot() string { if err != nil { return "" } + defer dir.Close() names, err := dir.Readdirnames(1) if err != nil { return "" diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/devices.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/devices.go new file mode 100644 index 0000000000..0d23456072 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/devices.go @@ -0,0 +1,75 @@ +package fs2 + +import ( + "fmt" + + "golang.org/x/sys/unix" + + "github.com/opencontainers/runc/libcontainer/cgroups/ebpf" + "github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/userns" +) + +func isRWM(perms devices.Permissions) bool { + var r, w, m bool + for _, perm := range perms { + switch perm { + case 'r': + r = true + case 'w': + w = true + case 'm': + m = true + } + } + return r && w && m +} + +// This is similar to the logic applied in crun for handling errors from bpf(2) +// . +func canSkipEBPFError(r *configs.Resources) bool { + // If we're running in a user namespace we can ignore eBPF rules because we + // usually cannot use bpf(2), as well as rootless containers usually don't + // have the necessary privileges to mknod(2) device inodes or access + // host-level instances (though ideally we would be blocking device access + // for rootless containers anyway). + if userns.RunningInUserNS() { + return true + } + + // We cannot ignore an eBPF load error if any rule if is a block rule or it + // doesn't permit all access modes. + // + // NOTE: This will sometimes trigger in cases where access modes are split + // between different rules but to handle this correctly would require + // using ".../libcontainer/cgroup/devices".Emulator. + for _, dev := range r.Devices { + if !dev.Allow || !isRWM(dev.Permissions) { + return false + } + } + return true +} + +func setDevices(dirPath string, r *configs.Resources) error { + if r.SkipDevices { + return nil + } + insts, license, err := devicefilter.DeviceFilter(r.Devices) + if err != nil { + return err + } + dirFD, err := unix.Open(dirPath, unix.O_DIRECTORY|unix.O_RDONLY, 0o600) + if err != nil { + return fmt.Errorf("cannot get dir FD for %s", dirPath) + } + defer unix.Close(dirFD) + if _, err := ebpf.LoadAttachCgroupDeviceFilter(insts, license, dirFD); err != nil { + if !canSkipEBPFError(r) { + return err + } + } + return nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go index d5208d7782..492778e310 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go @@ -175,10 +175,8 @@ func (m *manager) Set(r *configs.Resources) error { // When rootless is true, errors from the device subsystem are ignored because it is really not expected to work. // However, errors from other subsystems are not ignored. // see @test "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error" - if err := setDevices(m.dirPath, r); err != nil { - if !m.config.Rootless || errors.Is(err, cgroups.ErrDevicesUnsupported) { - return err - } + if err := setDevices(m.dirPath, r); err != nil && !m.config.Rootless { + return err } // cpuset (since kernel 5.0) if err := setCpuset(m.dirPath, r); err != nil { @@ -203,16 +201,6 @@ func (m *manager) Set(r *configs.Resources) error { return nil } -func setDevices(dirPath string, r *configs.Resources) error { - if cgroups.DevicesSetV2 == nil { - if len(r.Devices) > 0 { - return cgroups.ErrDevicesUnsupported - } - return nil - } - return cgroups.DevicesSetV2(dirPath, r) -} - func (m *manager) setUnified(res map[string]string) error { for k, v := range res { if strings.Contains(k, "/") { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go index c92a7e64af..2ce2631e18 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go @@ -1,6 +1,8 @@ package fs2 import ( + "errors" + "os" "strconv" "github.com/opencontainers/runc/libcontainer/cgroups" @@ -16,8 +18,22 @@ func setHugeTlb(dirPath string, r *configs.Resources) error { if !isHugeTlbSet(r) { return nil } + const suffix = ".max" + skipRsvd := false for _, hugetlb := range r.HugetlbLimit { - if err := cgroups.WriteFile(dirPath, "hugetlb."+hugetlb.Pagesize+".max", strconv.FormatUint(hugetlb.Limit, 10)); err != nil { + prefix := "hugetlb." + hugetlb.Pagesize + val := strconv.FormatUint(hugetlb.Limit, 10) + if err := cgroups.WriteFile(dirPath, prefix+suffix, val); err != nil { + return err + } + if skipRsvd { + continue + } + if err := cgroups.WriteFile(dirPath, prefix+".rsvd"+suffix, val); err != nil { + if errors.Is(err, os.ErrNotExist) { + skipRsvd = true + continue + } return err } } @@ -27,15 +43,21 @@ func setHugeTlb(dirPath string, r *configs.Resources) error { func statHugeTlb(dirPath string, stats *cgroups.Stats) error { hugetlbStats := cgroups.HugetlbStats{} + rsvd := ".rsvd" for _, pagesize := range cgroups.HugePageSizes() { - value, err := fscommon.GetCgroupParamUint(dirPath, "hugetlb."+pagesize+".current") + again: + prefix := "hugetlb." + pagesize + rsvd + value, err := fscommon.GetCgroupParamUint(dirPath, prefix+".current") if err != nil { + if rsvd != "" && errors.Is(err, os.ErrNotExist) { + rsvd = "" + goto again + } return err } hugetlbStats.Usage = value - fileName := "hugetlb." + pagesize + ".events" - value, err = fscommon.GetValueByKey(dirPath, fileName, "max") + value, err = fscommon.GetValueByKey(dirPath, prefix+".events", "max") if err != nil { return err } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go index adbc4b2308..01fe7d8e12 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go @@ -100,17 +100,20 @@ func statMemory(dirPath string, stats *cgroups.Stats) error { memoryUsage, err := getMemoryDataV2(dirPath, "") if err != nil { if errors.Is(err, unix.ENOENT) && dirPath == UnifiedMountpoint { - // The root cgroup does not have memory.{current,max} - // so emulate those using data from /proc/meminfo. - return statsFromMeminfo(stats) + // The root cgroup does not have memory.{current,max,peak} + // so emulate those using data from /proc/meminfo and + // /sys/fs/cgroup/memory.stat + return rootStatsFromMeminfo(stats) } return err } stats.MemoryStats.Usage = memoryUsage - swapUsage, err := getMemoryDataV2(dirPath, "swap") + swapOnlyUsage, err := getMemoryDataV2(dirPath, "swap") if err != nil { return err } + stats.MemoryStats.SwapOnlyUsage = swapOnlyUsage + swapUsage := swapOnlyUsage // As cgroup v1 reports SwapUsage values as mem+swap combined, // while in cgroup v2 swap values do not include memory, // report combined mem+swap for v1 compatibility. @@ -118,6 +121,9 @@ func statMemory(dirPath string, stats *cgroups.Stats) error { if swapUsage.Limit != math.MaxUint64 { swapUsage.Limit += memoryUsage.Limit } + // The `MaxUsage` of mem+swap cannot simply combine mem with + // swap. So set it to 0 for v1 compatibility. + swapUsage.MaxUsage = 0 stats.MemoryStats.SwapUsage = swapUsage return nil @@ -132,6 +138,7 @@ func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) { } usage := moduleName + ".current" limit := moduleName + ".max" + maxUsage := moduleName + ".peak" value, err := fscommon.GetCgroupParamUint(path, usage) if err != nil { @@ -151,10 +158,18 @@ func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) { } memoryData.Limit = value + // `memory.peak` since kernel 5.19 + // `memory.swap.peak` since kernel 6.5 + value, err = fscommon.GetCgroupParamUint(path, maxUsage) + if err != nil && !os.IsNotExist(err) { + return cgroups.MemoryData{}, err + } + memoryData.MaxUsage = value + return memoryData, nil } -func statsFromMeminfo(stats *cgroups.Stats) error { +func rootStatsFromMeminfo(stats *cgroups.Stats) error { const file = "/proc/meminfo" f, err := os.Open(file) if err != nil { @@ -166,14 +181,10 @@ func statsFromMeminfo(stats *cgroups.Stats) error { var ( swap_free uint64 swap_total uint64 - main_total uint64 - main_free uint64 ) mem := map[string]*uint64{ "SwapFree": &swap_free, "SwapTotal": &swap_total, - "MemTotal": &main_total, - "MemFree": &main_free, } found := 0 @@ -206,11 +217,18 @@ func statsFromMeminfo(stats *cgroups.Stats) error { return &parseError{Path: "", File: file, Err: err} } + // cgroup v1 `usage_in_bytes` reports memory usage as the sum of + // - rss (NR_ANON_MAPPED) + // - cache (NR_FILE_PAGES) + // cgroup v1 reports SwapUsage values as mem+swap combined + // cgroup v2 reports rss and cache as anon and file. + // sum `anon` + `file` to report the same value as `usage_in_bytes` in v1. + // sum swap usage as combined mem+swap usage for consistency as well. + stats.MemoryStats.Usage.Usage = stats.MemoryStats.Stats["anon"] + stats.MemoryStats.Stats["file"] + stats.MemoryStats.Usage.Limit = math.MaxUint64 stats.MemoryStats.SwapUsage.Usage = (swap_total - swap_free) * 1024 stats.MemoryStats.SwapUsage.Limit = math.MaxUint64 - - stats.MemoryStats.Usage.Usage = (main_total - main_free) * 1024 - stats.MemoryStats.Usage.Limit = math.MaxUint64 + stats.MemoryStats.SwapUsage.Usage += stats.MemoryStats.Usage.Usage return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go index 40a81dd5a8..0d8371b05f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go @@ -78,6 +78,8 @@ type MemoryStats struct { Usage MemoryData `json:"usage,omitempty"` // usage of memory + swap SwapUsage MemoryData `json:"swap_usage,omitempty"` + // usage of swap only + SwapOnlyUsage MemoryData `json:"swap_only_usage,omitempty"` // usage of kernel memory KernelUsage MemoryData `json:"kernel_usage,omitempty"` // usage of kernel TCP memory diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go index b32af4ee53..fc4ae44a48 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go @@ -162,8 +162,10 @@ func readProcsFile(dir string) ([]int, error) { // ParseCgroupFile parses the given cgroup file, typically /proc/self/cgroup // or /proc//cgroup, into a map of subsystems to cgroup paths, e.g. -// "cpu": "/user.slice/user-1000.slice" -// "pids": "/user.slice/user-1000.slice" +// +// "cpu": "/user.slice/user-1000.slice" +// "pids": "/user.slice/user-1000.slice" +// // etc. // // Note that for cgroup v2 unified hierarchy, there are no per-controller diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go index 865344f99c..fa195bf90f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go @@ -2,8 +2,8 @@ package configs import "fmt" -// BlockIODevice holds major:minor format supported in blkio cgroup. -type BlockIODevice struct { +// blockIODevice holds major:minor format supported in blkio cgroup +type blockIODevice struct { // Major is the device's major number Major int64 `json:"major"` // Minor is the device's minor number @@ -12,7 +12,7 @@ type BlockIODevice struct { // WeightDevice struct holds a `major:minor weight`|`major:minor leaf_weight` pair type WeightDevice struct { - BlockIODevice + blockIODevice // Weight is the bandwidth rate for the device, range is from 10 to 1000 Weight uint16 `json:"weight"` // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only @@ -41,7 +41,7 @@ func (wd *WeightDevice) LeafWeightString() string { // ThrottleDevice struct holds a `major:minor rate_per_second` pair type ThrottleDevice struct { - BlockIODevice + blockIODevice // Rate is the IO rate limit per cgroup per device Rate uint64 `json:"rate"` } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go index 7cf2fb6575..6ebf5ec7b6 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go @@ -21,9 +21,9 @@ type Rlimit struct { // IDMap represents UID/GID Mappings for User Namespaces. type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` + ContainerID int64 `json:"container_id"` + HostID int64 `json:"host_id"` + Size int64 `json:"size"` } // Seccomp represents syscall restrictions @@ -83,6 +83,9 @@ type Syscall struct { Args []*Arg `json:"args"` } +// TODO Windows. Many of these fields should be factored out into those parts +// which are common across platforms, and those which are platform specific. + // Config defines configuration options for executing a process inside a contained environment. type Config struct { // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go index 8c02848b70..51fe940748 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go @@ -1,6 +1,10 @@ package configs -import "errors" +import ( + "errors" + "fmt" + "math" +) var ( errNoUIDMap = errors.New("User namespaces enabled, but no uid mappings found.") @@ -16,11 +20,18 @@ func (c Config) HostUID(containerId int) (int, error) { if c.UidMappings == nil { return -1, errNoUIDMap } - id, found := c.hostIDFromMapping(containerId, c.UidMappings) + id, found := c.hostIDFromMapping(int64(containerId), c.UidMappings) if !found { return -1, errNoUserMap } - return id, nil + // If we are a 32-bit binary running on a 64-bit system, it's possible + // the mapped user is too large to store in an int, which means we + // cannot do the mapping. We can't just return an int64, because + // os.Setuid() takes an int. + if id > math.MaxInt { + return -1, fmt.Errorf("mapping for uid %d (host id %d) is larger than native integer size (%d)", containerId, id, math.MaxInt) + } + return int(id), nil } // Return unchanged id. return containerId, nil @@ -39,11 +50,18 @@ func (c Config) HostGID(containerId int) (int, error) { if c.GidMappings == nil { return -1, errNoGIDMap } - id, found := c.hostIDFromMapping(containerId, c.GidMappings) + id, found := c.hostIDFromMapping(int64(containerId), c.GidMappings) if !found { return -1, errNoGroupMap } - return id, nil + // If we are a 32-bit binary running on a 64-bit system, it's possible + // the mapped user is too large to store in an int, which means we + // cannot do the mapping. We can't just return an int64, because + // os.Setgid() takes an int. + if id > math.MaxInt { + return -1, fmt.Errorf("mapping for gid %d (host id %d) is larger than native integer size (%d)", containerId, id, math.MaxInt) + } + return int(id), nil } // Return unchanged id. return containerId, nil @@ -57,7 +75,7 @@ func (c Config) HostRootGID() (int, error) { // Utility function that gets a host ID for a container ID from user namespace map // if that ID is present in the map. -func (c Config) hostIDFromMapping(containerID int, uMap []IDMap) (int, bool) { +func (c Config) hostIDFromMapping(containerID int64, uMap []IDMap) (int64, bool) { for _, m := range uMap { if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) { hostID := m.HostID + (containerID - m.ContainerID) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go index b4c616d553..784c618205 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go @@ -35,6 +35,12 @@ type Mount struct { // Extensions are additional flags that are specific to runc. Extensions int `json:"extensions"` + + // Optional Command to be run before Source is mounted. + PremountCmds []Command `json:"premount_cmds"` + + // Optional Command to be run after Source is mounted. + PostmountCmds []Command `json:"postmount_cmds"` } func (m *Mount) IsBind() bool { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index 2473c5eadd..984466d1ab 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -201,7 +201,7 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { if err != nil { // We should return no error if EOF is reached // without a match. - if err == io.EOF { //nolint:errorlint // comparison with io.EOF is legit, https://github.com/polyfloyd/go-errorlint/pull/12 + if err == io.EOF { err = nil } return out, err @@ -280,13 +280,13 @@ func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath // found in any entry in passwd and group respectively. // // Examples of valid user specifications are: -// * "" -// * "user" -// * "uid" -// * "user:group" -// * "uid:gid -// * "user:gid" -// * "uid:group" +// - "" +// - "user" +// - "uid" +// - "user:group" +// - "uid:gid +// - "user:gid" +// - "uid:group" // // It should be noted that if you specify a numeric user or group id, they will // not be evaluated as usernames (only the metadata will be filled). So attempting diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps.c b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps.c new file mode 100644 index 0000000000..84f2c6188c --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps.c @@ -0,0 +1,79 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include + +/* + * All of the code here is run inside an aync-signal-safe context, so we need + * to be careful to not call any functions that could cause issues. In theory, + * since we are a Go program, there are fewer restrictions in practice, it's + * better to be safe than sorry. + * + * The only exception is exit, which we need to call to make sure we don't + * return into runc. + */ + +void bail(int pipefd, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vdprintf(pipefd, fmt, args); + va_end(args); + + exit(1); +} + +int spawn_userns_cat(char *userns_path, char *path, int outfd, int errfd) +{ + char buffer[4096] = { 0 }; + + pid_t child = fork(); + if (child != 0) + return child; + /* in child */ + + /* Join the target userns. */ + int nsfd = open(userns_path, O_RDONLY); + if (nsfd < 0) + bail(errfd, "open userns path %s failed: %m", userns_path); + + int err = setns(nsfd, CLONE_NEWUSER); + if (err < 0) + bail(errfd, "setns %s failed: %m", userns_path); + + close(nsfd); + + /* Pipe the requested file contents. */ + int fd = open(path, O_RDONLY); + if (fd < 0) + bail(errfd, "open %s in userns %s failed: %m", path, userns_path); + + int nread, ntotal = 0; + while ((nread = read(fd, buffer, sizeof(buffer))) != 0) { + if (nread < 0) + bail(errfd, "read bytes from %s failed (after %d total bytes read): %m", path, ntotal); + ntotal += nread; + + int nwritten = 0; + while (nwritten < nread) { + int n = write(outfd, buffer, nread - nwritten); + if (n < 0) + bail(errfd, "write %d bytes from %s failed (after %d bytes written): %m", + nread - nwritten, path, nwritten); + nwritten += n; + } + if (nread != nwritten) + bail(errfd, "mismatch for bytes read and written: %d read != %d written", nread, nwritten); + } + + close(fd); + close(outfd); + close(errfd); + + /* We must exit here, otherwise we would return into a forked runc. */ + exit(0); +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps_linux.go new file mode 100644 index 0000000000..7a8c2b023b --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps_linux.go @@ -0,0 +1,186 @@ +//go:build linux + +package userns + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "unsafe" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/sirupsen/logrus" +) + +/* +#include +extern int spawn_userns_cat(char *userns_path, char *path, int outfd, int errfd); +*/ +import "C" + +func parseIdmapData(data []byte) (ms []configs.IDMap, err error) { + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + var m configs.IDMap + line := scanner.Text() + if _, err := fmt.Sscanf(line, "%d %d %d", &m.ContainerID, &m.HostID, &m.Size); err != nil { + return nil, fmt.Errorf("parsing id map failed: invalid format in line %q: %w", line, err) + } + ms = append(ms, m) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("parsing id map failed: %w", err) + } + return ms, nil +} + +// Do something equivalent to nsenter --user= cat , but more +// efficiently. Returns the contents of the requested file from within the user +// namespace. +func spawnUserNamespaceCat(nsPath string, path string) ([]byte, error) { + rdr, wtr, err := os.Pipe() + if err != nil { + return nil, fmt.Errorf("create pipe for userns spawn failed: %w", err) + } + defer rdr.Close() + defer wtr.Close() + + errRdr, errWtr, err := os.Pipe() + if err != nil { + return nil, fmt.Errorf("create error pipe for userns spawn failed: %w", err) + } + defer errRdr.Close() + defer errWtr.Close() + + cNsPath := C.CString(nsPath) + defer C.free(unsafe.Pointer(cNsPath)) + cPath := C.CString(path) + defer C.free(unsafe.Pointer(cPath)) + + childPid := C.spawn_userns_cat(cNsPath, cPath, C.int(wtr.Fd()), C.int(errWtr.Fd())) + + if childPid < 0 { + return nil, fmt.Errorf("failed to spawn fork for userns") + } else if childPid == 0 { + // this should never happen + panic("runc executing inside fork child -- unsafe state!") + } + + // We are in the parent -- close the write end of the pipe before reading. + wtr.Close() + output, err := io.ReadAll(rdr) + rdr.Close() + if err != nil { + return nil, fmt.Errorf("reading from userns spawn failed: %w", err) + } + + // Ditto for the error pipe. + errWtr.Close() + errOutput, err := io.ReadAll(errRdr) + errRdr.Close() + if err != nil { + return nil, fmt.Errorf("reading from userns spawn error pipe failed: %w", err) + } + errOutput = bytes.TrimSpace(errOutput) + + // Clean up the child. + child, err := os.FindProcess(int(childPid)) + if err != nil { + return nil, fmt.Errorf("could not find userns spawn process: %w", err) + } + state, err := child.Wait() + if err != nil { + return nil, fmt.Errorf("failed to wait for userns spawn process: %w", err) + } + if !state.Success() { + errStr := string(errOutput) + if errStr == "" { + errStr = fmt.Sprintf("unknown error (status code %d)", state.ExitCode()) + } + return nil, fmt.Errorf("userns spawn: %s", errStr) + } else if len(errOutput) > 0 { + // We can just ignore weird output in the error pipe if the process + // didn't bail(), but for completeness output for debugging. + logrus.Debugf("userns spawn succeeded but unexpected error message found: %s", string(errOutput)) + } + // The subprocess succeeded, return whatever it wrote to the pipe. + return output, nil +} + +func GetUserNamespaceMappings(nsPath string) (uidMap, gidMap []configs.IDMap, err error) { + var ( + pid int + extra rune + tryFastPath bool + ) + + // nsPath is usually of the form /proc//ns/user, which means that we + // already have a pid that is part of the user namespace and thus we can + // just use the pid to read from /proc//*id_map. + // + // Note that Sscanf doesn't consume the whole input, so we check for any + // trailing data with %c. That way, we can be sure the pattern matched + // /proc/$pid/ns/user _exactly_ iff n === 1. + if n, _ := fmt.Sscanf(nsPath, "/proc/%d/ns/user%c", &pid, &extra); n == 1 { + tryFastPath = pid > 0 + } + + for _, mapType := range []struct { + name string + idMap *[]configs.IDMap + }{ + {"uid_map", &uidMap}, + {"gid_map", &gidMap}, + } { + var mapData []byte + + if tryFastPath { + path := fmt.Sprintf("/proc/%d/%s", pid, mapType.name) + data, err := os.ReadFile(path) + if err != nil { + // Do not error out here -- we need to try the slow path if the + // fast path failed. + logrus.Debugf("failed to use fast path to read %s from userns %s (error: %s), falling back to slow userns-join path", mapType.name, nsPath, err) + } else { + mapData = data + } + } else { + logrus.Debugf("cannot use fast path to read %s from userns %s, falling back to slow userns-join path", mapType.name, nsPath) + } + + if mapData == nil { + // We have to actually join the namespace if we cannot take the + // fast path. The path is resolved with respect to the child + // process, so just use /proc/self. + data, err := spawnUserNamespaceCat(nsPath, "/proc/self/"+mapType.name) + if err != nil { + return nil, nil, err + } + mapData = data + } + idMap, err := parseIdmapData(mapData) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse %s of userns %s: %w", mapType.name, nsPath, err) + } + *mapType.idMap = idMap + } + + return uidMap, gidMap, nil +} + +// IsSameMapping returns whether or not the two id mappings are the same. Note +// that if the order of the mappings is different, or a mapping has been split, +// the mappings will be considered different. +func IsSameMapping(a, b []configs.IDMap) bool { + if len(a) != len(b) { + return false + } + for idx := range a { + if a[idx] != b[idx] { + return false + } + } + return true +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go index dbd4353416..6b9fc34352 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go @@ -132,16 +132,19 @@ func WithProcfd(root, unsafePath string, fn func(procfd string) error) error { return fn(procfd) } -// SearchLabels searches through a list of key=value pairs for a given key, -// returning its value, and the binary flag telling whether the key exist. -func SearchLabels(labels []string, key string) (string, bool) { - key += "=" - for _, s := range labels { - if strings.HasPrefix(s, key) { - return s[len(key):], true +// SearchLabels searches a list of key-value pairs for the provided key and +// returns the corresponding value. The pairs must be separated with '='. +func SearchLabels(labels []string, query string) string { + for _, l := range labels { + parts := strings.SplitN(l, "=", 2) + if len(parts) < 2 { + continue + } + if parts[0] == query { + return parts[1] } } - return "", false + return "" } // Annotations returns the bundle path and user defined annotations from the diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go index 220d0b4393..bf3237a291 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go @@ -7,6 +7,7 @@ import ( "fmt" "os" "strconv" + _ "unsafe" // for go:linkname "golang.org/x/sys/unix" ) @@ -23,9 +24,11 @@ func EnsureProcHandle(fh *os.File) error { return nil } -// CloseExecFrom applies O_CLOEXEC to all file descriptors currently open for -// the process (except for those below the given fd value). -func CloseExecFrom(minFd int) error { +type fdFunc func(fd int) + +// fdRangeFrom calls the passed fdFunc for each file descriptor that is open in +// the current process. +func fdRangeFrom(minFd int, fn fdFunc) error { fdDir, err := os.Open("/proc/self/fd") if err != nil { return err @@ -50,15 +53,60 @@ func CloseExecFrom(minFd int) error { if fd < minFd { continue } - // Intentionally ignore errors from unix.CloseOnExec -- the cases where - // this might fail are basically file descriptors that have already - // been closed (including and especially the one that was created when - // os.ReadDir did the "opendir" syscall). - unix.CloseOnExec(fd) + // Ignore the file descriptor we used for readdir, as it will be closed + // when we return. + if uintptr(fd) == fdDir.Fd() { + continue + } + // Run the closure. + fn(fd) } return nil } +// CloseExecFrom sets the O_CLOEXEC flag on all file descriptors greater or +// equal to minFd in the current process. +func CloseExecFrom(minFd int) error { + return fdRangeFrom(minFd, unix.CloseOnExec) +} + +//go:linkname runtime_IsPollDescriptor internal/poll.IsPollDescriptor + +// In order to make sure we do not close the internal epoll descriptors the Go +// runtime uses, we need to ensure that we skip descriptors that match +// "internal/poll".IsPollDescriptor. Yes, this is a Go runtime internal thing, +// unfortunately there's no other way to be sure we're only keeping the file +// descriptors the Go runtime needs. Hopefully nothing blows up doing this... +func runtime_IsPollDescriptor(fd uintptr) bool //nolint:revive + +// UnsafeCloseFrom closes all file descriptors greater or equal to minFd in the +// current process, except for those critical to Go's runtime (such as the +// netpoll management descriptors). +// +// NOTE: That this function is incredibly dangerous to use in most Go code, as +// closing file descriptors from underneath *os.File handles can lead to very +// bad behaviour (the closed file descriptor can be re-used and then any +// *os.File operations would apply to the wrong file). This function is only +// intended to be called from the last stage of runc init. +func UnsafeCloseFrom(minFd int) error { + // We must not close some file descriptors. + return fdRangeFrom(minFd, func(fd int) { + if runtime_IsPollDescriptor(uintptr(fd)) { + // These are the Go runtimes internal netpoll file descriptors. + // These file descriptors are operated on deep in the Go scheduler, + // and closing those files from underneath Go can result in panics. + // There is no issue with keeping them because they are not + // executable and are not useful to an attacker anyway. Also we + // don't have any choice. + return + } + // There's nothing we can do about errors from close(2), and the + // only likely error to be seen is EBADF which indicates the fd was + // already closed (in which case, we got what we wanted). + _ = unix.Close(fd) + }) +} + // NewSockPair returns a new unix socket pair func NewSockPair(name string) (parent *os.File, child *os.File, err error) { fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0) diff --git a/vendor/modules.txt b/vendor/modules.txt index d9d0252ba3..6a77e49e6f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -64,6 +64,14 @@ github.com/checkpoint-restore/go-criu/v6/stats # github.com/chzyer/readline v1.5.1 ## explicit; go 1.15 github.com/chzyer/readline +# github.com/cilium/ebpf v0.7.0 +## explicit; go 1.16 +github.com/cilium/ebpf +github.com/cilium/ebpf/asm +github.com/cilium/ebpf/internal +github.com/cilium/ebpf/internal/btf +github.com/cilium/ebpf/internal/unix +github.com/cilium/ebpf/link # github.com/container-orchestrated-devices/container-device-interface v0.5.3 ## explicit; go 1.17 github.com/container-orchestrated-devices/container-device-interface/internal/multierror @@ -348,7 +356,7 @@ github.com/coreos/stream-metadata-go/stream/rhcos # github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer -# github.com/cyphar/filepath-securejoin v0.2.3 +# github.com/cyphar/filepath-securejoin v0.2.4 ## explicit; go 1.13 github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 @@ -706,10 +714,13 @@ github.com/opencontainers/go-digest ## explicit; go 1.17 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.1.4 => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc +# github.com/opencontainers/runc v1.1.12 ## explicit; go 1.17 github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/cgroups +github.com/opencontainers/runc/libcontainer/cgroups/devices +github.com/opencontainers/runc/libcontainer/cgroups/ebpf +github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter github.com/opencontainers/runc/libcontainer/cgroups/fs github.com/opencontainers/runc/libcontainer/cgroups/fs2 github.com/opencontainers/runc/libcontainer/cgroups/fscommon @@ -1129,4 +1140,3 @@ gopkg.in/yaml.v3 # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# github.com/opencontainers/runc => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc