diff --git a/.github/workflows/auto-upgrade-ci.yaml b/.github/workflows/auto-upgrade-ci.yaml index a0239d0691..552f997e7f 100644 --- a/.github/workflows/auto-upgrade-ci.yaml +++ b/.github/workflows/auto-upgrade-ci.yaml @@ -247,8 +247,13 @@ jobs: for ITEM in $TAR_FILES ; do IMAGE_NAME=${ITEM%*.tar} echo ${IMAGE_NAME} - cat test/.download/${ITEM} | docker import - ${IMAGE_NAME}:${{ needs.call_build_old_ci_image.outputs.imageTag }} + docker load -i test/.download/${ITEM} + echo "list docker images" && docker images + ITEM_IMAGE_ID=$(docker images | grep ${IMAGE_NAME%*-race}| grep ${{ needs.call_build_old_ci_image.outputs.imageTag }} | awk '{print $3}') + docker tag ${ITEM_IMAGE_ID} ${IMAGE_NAME}:${{ needs.call_build_old_ci_image.outputs.imageTag }} done + echo "list all docker images" + docker images - name: Prepare id: prepare @@ -326,8 +331,13 @@ jobs: for ITEM in $TAR_FILES ; do IMAGE_NAME=${ITEM%*.tar} echo ${IMAGE_NAME} - cat test/.download/${ITEM} | docker import - ${IMAGE_NAME}:${{ needs.call_build_new_ci_image.outputs.imageTag }} + docker load -i test/.download/${ITEM} + echo "list docker images" && docker images + ITEM_IMAGE_ID=$(docker images | grep ${IMAGE_NAME%*-race}| grep ${{ needs.call_build_new_ci_image.outputs.imageTag }} | awk '{print $3}') + docker tag ${ITEM_IMAGE_ID} ${IMAGE_NAME}:${{ needs.call_build_new_ci_image.outputs.imageTag }} done + echo "list all docker images" + docker images - name: Upgrade to version ${{ needs.get_ref.outputs.new_version }} id: upgrade diff --git a/.github/workflows/build-image-ci.yaml b/.github/workflows/build-image-ci.yaml index 6b0447bf2d..9ce89fde22 100644 --- a/.github/workflows/build-image-ci.yaml +++ b/.github/workflows/build-image-ci.yaml @@ -182,7 +182,7 @@ jobs: # docker cache after the workflow "Image CI Cache Cleaner" was terminated. push: ${{ env.push }} platforms: linux/amd64 - outputs: type=tar,dest=/tmp/${{ matrix.name }}-race.tar + outputs: type=docker,dest=/tmp/${{ matrix.name }}-race.tar github-token: ${{ secrets.WELAN_PAT }} tags: | ${{ env.ONLINE_REGISTER }}/${{ github.repository }}/${{ matrix.name }}-ci:${{ env.tag }}-race @@ -231,7 +231,7 @@ jobs: push: ${{ env.push }} platforms: linux/amd64 github-token: ${{ secrets.WELAN_PAT }} - outputs: type=tar,dest=/tmp/${{ matrix.name }}-race.tar + outputs: type=docker,dest=/tmp/${{ matrix.name }}-race.tar tags: | ${{ env.ONLINE_REGISTER }}/${{ github.repository }}/${{ matrix.name }}-ci:${{ env.tag }}-race build-args: | diff --git a/.github/workflows/e2e-init.yaml b/.github/workflows/e2e-init.yaml index 607e257766..9ed35ab8a1 100644 --- a/.github/workflows/e2e-init.yaml +++ b/.github/workflows/e2e-init.yaml @@ -128,8 +128,13 @@ jobs: for ITEM in $TAR_FILES ; do IMAGE_NAME=${ITEM%*.tar} echo ${IMAGE_NAME} - cat test/.download/${ITEM} | docker import - ${IMAGE_NAME}:${{ inputs.image_tag }} + docker load -i test/.download/${ITEM} + echo "list docker images" && docker images + ITEM_IMAGE_ID=$(docker images | grep ${IMAGE_NAME%*-race}| grep ${{ inputs.image_tag }} | awk '{print $3}') + docker tag ${ITEM_IMAGE_ID} ${IMAGE_NAME}:${{ inputs.image_tag }} done + echo "list all docker images" + docker images # test against commit version # https://github.com/kubernetes-sigs/kind/issues/2863 @@ -163,7 +168,33 @@ jobs: -e INSTALL_KDOCTOR=true \ -e INSTALL_OVS=${INSTALL_OVS_VALUE} \ -e INSTALL_RDMA=true \ - -e INSTALL_SRIOV=true + -e INSTALL_SRIOV=true || RESULT=1 + if ((RESULT==0)) ; then + echo "RUN_SETUP_KIND_CLUSTER_PASS=true" >> $GITHUB_ENV + else + echo "RUN_SETUP_KIND_CLUSTER_PASS=false" >> $GITHUB_ENV + fi + if [ -f "test/e2edebugLog.txt" ] ; then + echo "UPLOAD_SETUP_KIND_CLUSTER_LOG=true" >> $GITHUB_ENV + else + echo "UPLOAD_SETUP_KIND_CLUSTER_LOG=false" >> $GITHUB_ENV + fi + + - name: Upload Setup Kind Cluster log + if: ${{ env.RUN_SETUP_KIND_CLUSTER_PASS == 'false' && env.UPLOAD_SETUP_KIND_CLUSTER_LOG == 'true' }} + uses: actions/upload-artifact@v3.1.3 + with: + name: ${{ inputs.os }}-${{ inputs.ip_family }}-${{ matrix.e2e_test_mode }}-${{ inputs.k8s_version }}-setupkind.txt + path: test/e2edebugLog.txt + retention-days: 7 + + - name: Show Setup Kind Cluster Result + run: | + if ${{ env.RUN_SETUP_KIND_CLUSTER_PASS == 'true' }} ;then + exit 0 + else + exit 1 + fi - name: Run e2e Test id: run_e2e diff --git a/.github/workflows/trivy-scan-image.yaml b/.github/workflows/trivy-scan-image.yaml index 19e9bbdd7f..a2f3d9068b 100644 --- a/.github/workflows/trivy-scan-image.yaml +++ b/.github/workflows/trivy-scan-image.yaml @@ -35,24 +35,22 @@ jobs: name: image-tar-spiderpool-controller path: test/.download - - name: Load And Scan Images - run: | - TAR_FILES=` ls test/.download ` - echo $TAR_FILES - for ITEM in $TAR_FILES ; do - IMAGE_NAME=${ITEM%*.tar} - echo ${IMAGE_NAME} - cat test/.download/${ITEM} | docker import - ${IMAGE_NAME}:${{ inputs.image_tag }} - echo "---------trivy checkout image ${IMAGE_NAME}:${{ inputs.image_tag }} --------------------" - make lint_image_trivy -e IMAGE_NAME=${IMAGE_NAME}:${{ inputs.image_tag }} \ - || { echo "RUN_IMAGE_TRIVY_FAIL=true" >> $GITHUB_ENV ; echo "error, image ${IMAGE_NAME}:${{ inputs.image_tag }} is bad" ; } - done + - name: List downloaded files + run: ls -al test/.download - - name: Show Trivy Scan Report - run: | - if [ "${{ env.RUN_IMAGE_TRIVY_FAIL }}" == "true" ] ; then - echo "error, image is not secure, see detail on Step 'Load And Scan Images' " - exit 1 - else - exit 0 - fi + # https://github.com/aquasecurity/trivy-action/issues/389 + - name: load and scan spiderpool-agent image + uses: aquasecurity/trivy-action@0.28.0 + env: + TRIVY_DB_REPOSITORY: public.ecr.aws/aquasecurity/trivy-db,ghcr.io/aquasecurity/trivy-db + with: + input: test/.download/spiderpool-agent-race.tar + severity: 'CRITICAL,HIGH' + + - name: load and scan spiderpool-controller image + uses: aquasecurity/trivy-action@0.28.0 + env: + TRIVY_DB_REPOSITORY: public.ecr.aws/aquasecurity/trivy-db,ghcr.io/aquasecurity/trivy-db + with: + input: test/.download/spiderpool-controller-race.tar + severity: 'CRITICAL,HIGH' diff --git a/Makefile b/Makefile index 589301a99a..b7dee8351f 100644 --- a/Makefile +++ b/Makefile @@ -501,4 +501,3 @@ lint_chart_trivy: .PHONY: build-chart build-chart: @ cd charts ; make - diff --git a/api/v1/agent/models/coordinator_config.go b/api/v1/agent/models/coordinator_config.go index 9948de0bfb..3654a25c81 100644 --- a/api/v1/agent/models/coordinator_config.go +++ b/api/v1/agent/models/coordinator_config.go @@ -61,6 +61,9 @@ type CoordinatorConfig struct { // tx queue len TxQueueLen int64 `json:"txQueueLen,omitempty"` + + // veth link address + VethLinkAddress string `json:"vethLinkAddress,omitempty"` } // Validate validates this coordinator config diff --git a/api/v1/agent/openapi.yaml b/api/v1/agent/openapi.yaml index 8d7f3ca371..ea32dcd9da 100644 --- a/api/v1/agent/openapi.yaml +++ b/api/v1/agent/openapi.yaml @@ -344,6 +344,8 @@ definitions: type: boolean detectGateway: type: boolean + vethLinkAddress: + type: string required: - overlayPodCIDR - serviceCIDR diff --git a/api/v1/agent/server/embedded_spec.go b/api/v1/agent/server/embedded_spec.go index ef9cda858f..dd718cb184 100644 --- a/api/v1/agent/server/embedded_spec.go +++ b/api/v1/agent/server/embedded_spec.go @@ -335,6 +335,9 @@ func init() { }, "txQueueLen": { "type": "integer" + }, + "vethLinkAddress": { + "type": "string" } } }, @@ -887,6 +890,9 @@ func init() { }, "txQueueLen": { "type": "integer" + }, + "vethLinkAddress": { + "type": "string" } } }, diff --git a/charts/spiderpool/README.md b/charts/spiderpool/README.md index 58124ae3c7..1b06862891 100644 --- a/charts/spiderpool/README.md +++ b/charts/spiderpool/README.md @@ -163,6 +163,7 @@ helm install spiderpool spiderpool/spiderpool --wait --namespace kube-system \ | `coordinator.detectIPConflict` | detect IP address conflicts | `false` | | `coordinator.tunePodRoutes` | tune Pod routes | `true` | | `coordinator.hijackCIDR` | Additional subnets that need to be hijacked to the host forward, the default link-local range "169.254.0.0/16" is used for NodeLocal DNS | `["169.254.0.0/16"]` | +| `coordinator.vethLinkAddress` | configure an link-local address for veth0 device. empty means disable. default is empty. Format is like 169.254.100.1 | `""` | ### rdma parameters @@ -212,14 +213,6 @@ helm install spiderpool spiderpool/spiderpool --wait --namespace kube-system \ | `multus.multusCNI.log.logLevel` | the multus-CNI daemonset pod log level | `debug` | | `multus.multusCNI.log.logFile` | the multus-CNI daemonset pod log file | `/var/log/multus.log` | -### dra parameters - -| Name | Description | Value | -| -------------------- | -------------------------- | -------------- | -| `dra.enabled` | to enable dra feature | `false` | -| `dra.cdiRootPath` | the dir of cdi root | `/var/run/cdi` | -| `dra.hostDevicePath` | the dir path of the device | `""` | - ### plugins parameters | Name | Description | Value | @@ -313,74 +306,77 @@ helm install spiderpool spiderpool/spiderpool --wait --namespace kube-system \ ### spiderpoolController parameters -| Name | Description | Value | -| ------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------- | -| `spiderpoolController.name` | the spiderpoolController name | `spiderpool-controller` | -| `spiderpoolController.replicas` | the replicas number of spiderpoolController pod | `1` | -| `spiderpoolController.binName` | the binName name of spiderpoolController | `/usr/bin/spiderpool-controller` | -| `spiderpoolController.hostnetwork` | enable hostnetwork mode of spiderpoolController pod. Notice, if no CNI available before spiderpool installation, must enable this | `true` | -| `spiderpoolController.image.registry` | the image registry of spiderpoolController | `ghcr.io` | -| `spiderpoolController.image.repository` | the image repository of spiderpoolController | `spidernet-io/spiderpool/spiderpool-controller` | -| `spiderpoolController.image.pullPolicy` | the image pullPolicy of spiderpoolController | `IfNotPresent` | -| `spiderpoolController.image.digest` | the image digest of spiderpoolController, which takes preference over tag | `""` | -| `spiderpoolController.image.tag` | the image tag of spiderpoolController, overrides the image tag whose default is the chart appVersion. | `""` | -| `spiderpoolController.image.imagePullSecrets` | the image imagePullSecrets of spiderpoolController | `[]` | -| `spiderpoolController.serviceAccount.create` | create the service account for the spiderpoolController | `true` | -| `spiderpoolController.serviceAccount.annotations` | the annotations of spiderpoolController service account | `{}` | -| `spiderpoolController.service.annotations` | the annotations for spiderpoolController service | `{}` | -| `spiderpoolController.service.type` | the type for spiderpoolController service | `ClusterIP` | -| `spiderpoolController.priorityClassName` | the priority Class Name for spiderpoolController | `system-node-critical` | -| `spiderpoolController.affinity` | the affinity of spiderpoolController | `{}` | -| `spiderpoolController.extraArgs` | the additional arguments of spiderpoolController container | `[]` | -| `spiderpoolController.extraEnv` | the additional environment variables of spiderpoolController container | `[]` | -| `spiderpoolController.extraVolumes` | the additional volumes of spiderpoolController container | `[]` | -| `spiderpoolController.extraVolumeMounts` | the additional hostPath mounts of spiderpoolController container | `[]` | -| `spiderpoolController.podAnnotations` | the additional annotations of spiderpoolController pod | `{}` | -| `spiderpoolController.podLabels` | the additional label of spiderpoolController pod | `{}` | -| `spiderpoolController.securityContext` | the security Context of spiderpoolController pod | `{}` | -| `spiderpoolController.resources.limits.cpu` | the cpu limit of spiderpoolController pod | `500m` | -| `spiderpoolController.resources.limits.memory` | the memory limit of spiderpoolController pod | `1024Mi` | -| `spiderpoolController.resources.requests.cpu` | the cpu requests of spiderpoolController pod | `100m` | -| `spiderpoolController.resources.requests.memory` | the memory requests of spiderpoolController pod | `128Mi` | -| `spiderpoolController.podDisruptionBudget.enabled` | enable podDisruptionBudget for spiderpoolController pod | `false` | -| `spiderpoolController.podDisruptionBudget.minAvailable` | minimum number/percentage of pods that should remain scheduled. | `1` | -| `spiderpoolController.httpPort` | the http Port for spiderpoolController, for health checking and http service | `5720` | -| `spiderpoolController.healthChecking.startupProbe.failureThreshold` | the failure threshold of startup probe for spiderpoolController health checking | `30` | -| `spiderpoolController.healthChecking.startupProbe.periodSeconds` | the period seconds of startup probe for spiderpoolController health checking | `2` | -| `spiderpoolController.healthChecking.livenessProbe.failureThreshold` | the failure threshold of startup probe for spiderpoolController health checking | `6` | -| `spiderpoolController.healthChecking.livenessProbe.periodSeconds` | the period seconds of startup probe for spiderpoolController health checking | `10` | -| `spiderpoolController.healthChecking.readinessProbe.failureThreshold` | the failure threshold of startup probe for spiderpoolController health checking | `3` | -| `spiderpoolController.healthChecking.readinessProbe.periodSeconds` | the period seconds of startup probe for spiderpoolController health checking | `10` | -| `spiderpoolController.webhookPort` | the http port for spiderpoolController webhook | `5722` | -| `spiderpoolController.prometheus.enabled` | enable spiderpool Controller to collect metrics | `false` | -| `spiderpoolController.prometheus.enabledDebugMetric` | enable spiderpool Controller to collect debug level metrics | `false` | -| `spiderpoolController.prometheus.port` | the metrics port of spiderpool Controller | `5721` | -| `spiderpoolController.prometheus.serviceMonitor.install` | install serviceMonitor for spiderpool agent. This requires the prometheus CRDs to be available | `false` | -| `spiderpoolController.prometheus.serviceMonitor.namespace` | the serviceMonitor namespace. Default to the namespace of helm instance | `""` | -| `spiderpoolController.prometheus.serviceMonitor.annotations` | the additional annotations of spiderpoolController serviceMonitor | `{}` | -| `spiderpoolController.prometheus.serviceMonitor.labels` | the additional label of spiderpoolController serviceMonitor | `{}` | -| `spiderpoolController.prometheus.serviceMonitor.interval` | represents the interval of spiderpoolController serviceMonitor's scraping action | `10s` | -| `spiderpoolController.prometheus.prometheusRule.install` | install prometheusRule for spiderpool agent. This requires the prometheus CRDs to be available | `false` | -| `spiderpoolController.prometheus.prometheusRule.namespace` | the prometheusRule namespace. Default to the namespace of helm instance | `""` | -| `spiderpoolController.prometheus.prometheusRule.annotations` | the additional annotations of spiderpoolController prometheusRule | `{}` | -| `spiderpoolController.prometheus.prometheusRule.labels` | the additional label of spiderpoolController prometheusRule | `{}` | -| `spiderpoolController.prometheus.prometheusRule.enableWarningIPGCFailureCounts` | the additional rule of spiderpoolController prometheusRule | `true` | -| `spiderpoolController.debug.logLevel` | the log level of spiderpool Controller [debug, info, warn, error, fatal, panic] | `info` | -| `spiderpoolController.debug.gopsPort` | the gops port of spiderpool Controller | `5724` | -| `spiderpoolController.tls.method` | the method for generating TLS certificates. [ provided , certmanager , auto] | `auto` | -| `spiderpoolController.tls.secretName` | the secret name for storing TLS certificates | `spiderpool-controller-server-certs` | -| `spiderpoolController.tls.certmanager.certValidityDuration` | generated certificates validity duration in days for 'certmanager' method | `36500` | -| `spiderpoolController.tls.certmanager.issuerName` | issuer name of cert manager 'certmanager'. If not specified, a CA issuer will be created. | `""` | -| `spiderpoolController.tls.certmanager.extraDnsNames` | extra DNS names added to certificate when it's auto generated | `[]` | -| `spiderpoolController.tls.certmanager.extraIPAddresses` | extra IP addresses added to certificate when it's auto generated | `[]` | -| `spiderpoolController.tls.provided.tlsCert` | encoded tls certificate for provided method | `""` | -| `spiderpoolController.tls.provided.tlsKey` | encoded tls key for provided method | `""` | -| `spiderpoolController.tls.provided.tlsCa` | encoded tls CA for provided method | `""` | -| `spiderpoolController.tls.auto.caExpiration` | ca expiration for auto method | `73000` | -| `spiderpoolController.tls.auto.certExpiration` | server cert expiration for auto method | `73000` | -| `spiderpoolController.tls.auto.extraIpAddresses` | extra IP addresses of server certificate for auto method | `[]` | -| `spiderpoolController.tls.auto.extraDnsNames` | extra DNS names of server cert for auto method | `[]` | -| `spiderpoolController.cleanup.enable` | clean up resources when helm uninstall | `true` | +| Name | Description | Value | +| ------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------- | +| `spiderpoolController.name` | the spiderpoolController name | `spiderpool-controller` | +| `spiderpoolController.replicas` | the replicas number of spiderpoolController pod | `1` | +| `spiderpoolController.binName` | the binName name of spiderpoolController | `/usr/bin/spiderpool-controller` | +| `spiderpoolController.hostnetwork` | enable hostnetwork mode of spiderpoolController pod. Notice, if no CNI available before spiderpool installation, must enable this | `true` | +| `spiderpoolController.image.registry` | the image registry of spiderpoolController | `ghcr.io` | +| `spiderpoolController.image.repository` | the image repository of spiderpoolController | `spidernet-io/spiderpool/spiderpool-controller` | +| `spiderpoolController.image.pullPolicy` | the image pullPolicy of spiderpoolController | `IfNotPresent` | +| `spiderpoolController.image.digest` | the image digest of spiderpoolController, which takes preference over tag | `""` | +| `spiderpoolController.image.tag` | the image tag of spiderpoolController, overrides the image tag whose default is the chart appVersion. | `""` | +| `spiderpoolController.image.imagePullSecrets` | the image imagePullSecrets of spiderpoolController | `[]` | +| `spiderpoolController.serviceAccount.create` | create the service account for the spiderpoolController | `true` | +| `spiderpoolController.serviceAccount.annotations` | the annotations of spiderpoolController service account | `{}` | +| `spiderpoolController.service.annotations` | the annotations for spiderpoolController service | `{}` | +| `spiderpoolController.service.type` | the type for spiderpoolController service | `ClusterIP` | +| `spiderpoolController.priorityClassName` | the priority Class Name for spiderpoolController | `system-node-critical` | +| `spiderpoolController.affinity` | the affinity of spiderpoolController | `{}` | +| `spiderpoolController.extraArgs` | the additional arguments of spiderpoolController container | `[]` | +| `spiderpoolController.extraEnv` | the additional environment variables of spiderpoolController container | `[]` | +| `spiderpoolController.extraVolumes` | the additional volumes of spiderpoolController container | `[]` | +| `spiderpoolController.extraVolumeMounts` | the additional hostPath mounts of spiderpoolController container | `[]` | +| `spiderpoolController.podAnnotations` | the additional annotations of spiderpoolController pod | `{}` | +| `spiderpoolController.podLabels` | the additional label of spiderpoolController pod | `{}` | +| `spiderpoolController.securityContext` | the security Context of spiderpoolController pod | `{}` | +| `spiderpoolController.resources.limits.cpu` | the cpu limit of spiderpoolController pod | `500m` | +| `spiderpoolController.resources.limits.memory` | the memory limit of spiderpoolController pod | `1024Mi` | +| `spiderpoolController.resources.requests.cpu` | the cpu requests of spiderpoolController pod | `100m` | +| `spiderpoolController.resources.requests.memory` | the memory requests of spiderpoolController pod | `128Mi` | +| `spiderpoolController.podDisruptionBudget.enabled` | enable podDisruptionBudget for spiderpoolController pod | `false` | +| `spiderpoolController.podDisruptionBudget.minAvailable` | minimum number/percentage of pods that should remain scheduled. | `1` | +| `spiderpoolController.httpPort` | the http Port for spiderpoolController, for health checking and http service | `5720` | +| `spiderpoolController.healthChecking.startupProbe.failureThreshold` | the failure threshold of startup probe for spiderpoolController health checking | `30` | +| `spiderpoolController.healthChecking.startupProbe.periodSeconds` | the period seconds of startup probe for spiderpoolController health checking | `2` | +| `spiderpoolController.healthChecking.livenessProbe.failureThreshold` | the failure threshold of startup probe for spiderpoolController health checking | `6` | +| `spiderpoolController.healthChecking.livenessProbe.periodSeconds` | the period seconds of startup probe for spiderpoolController health checking | `10` | +| `spiderpoolController.healthChecking.readinessProbe.failureThreshold` | the failure threshold of startup probe for spiderpoolController health checking | `3` | +| `spiderpoolController.healthChecking.readinessProbe.periodSeconds` | the period seconds of startup probe for spiderpoolController health checking | `10` | +| `spiderpoolController.webhookPort` | the http port for spiderpoolController webhook | `5722` | +| `spiderpoolController.podResourceInject.enabled` | enable pod resource inject | `false` | +| `spiderpoolController.podResourceInject.namespacesExclude` | exclude the namespaces of the pod resource inject | `["kube-system","spiderpool","metallb-system","istio-system"]` | +| `spiderpoolController.podResourceInject.namespacesInclude` | include the namespaces of the pod resource inject, empty means all namespaces but exclude the namespaces in namespacesExclude, not empty means only include the namespaces in namespacesInclude | `[]` | +| `spiderpoolController.prometheus.enabled` | enable spiderpool Controller to collect metrics | `false` | +| `spiderpoolController.prometheus.enabledDebugMetric` | enable spiderpool Controller to collect debug level metrics | `false` | +| `spiderpoolController.prometheus.port` | the metrics port of spiderpool Controller | `5721` | +| `spiderpoolController.prometheus.serviceMonitor.install` | install serviceMonitor for spiderpool agent. This requires the prometheus CRDs to be available | `false` | +| `spiderpoolController.prometheus.serviceMonitor.namespace` | the serviceMonitor namespace. Default to the namespace of helm instance | `""` | +| `spiderpoolController.prometheus.serviceMonitor.annotations` | the additional annotations of spiderpoolController serviceMonitor | `{}` | +| `spiderpoolController.prometheus.serviceMonitor.labels` | the additional label of spiderpoolController serviceMonitor | `{}` | +| `spiderpoolController.prometheus.serviceMonitor.interval` | represents the interval of spiderpoolController serviceMonitor's scraping action | `10s` | +| `spiderpoolController.prometheus.prometheusRule.install` | install prometheusRule for spiderpool agent. This requires the prometheus CRDs to be available | `false` | +| `spiderpoolController.prometheus.prometheusRule.namespace` | the prometheusRule namespace. Default to the namespace of helm instance | `""` | +| `spiderpoolController.prometheus.prometheusRule.annotations` | the additional annotations of spiderpoolController prometheusRule | `{}` | +| `spiderpoolController.prometheus.prometheusRule.labels` | the additional label of spiderpoolController prometheusRule | `{}` | +| `spiderpoolController.prometheus.prometheusRule.enableWarningIPGCFailureCounts` | the additional rule of spiderpoolController prometheusRule | `true` | +| `spiderpoolController.debug.logLevel` | the log level of spiderpool Controller [debug, info, warn, error, fatal, panic] | `info` | +| `spiderpoolController.debug.gopsPort` | the gops port of spiderpool Controller | `5724` | +| `spiderpoolController.tls.method` | the method for generating TLS certificates. [ provided , certmanager , auto] | `auto` | +| `spiderpoolController.tls.secretName` | the secret name for storing TLS certificates | `spiderpool-controller-server-certs` | +| `spiderpoolController.tls.certmanager.certValidityDuration` | generated certificates validity duration in days for 'certmanager' method | `36500` | +| `spiderpoolController.tls.certmanager.issuerName` | issuer name of cert manager 'certmanager'. If not specified, a CA issuer will be created. | `""` | +| `spiderpoolController.tls.certmanager.extraDnsNames` | extra DNS names added to certificate when it's auto generated | `[]` | +| `spiderpoolController.tls.certmanager.extraIPAddresses` | extra IP addresses added to certificate when it's auto generated | `[]` | +| `spiderpoolController.tls.provided.tlsCert` | encoded tls certificate for provided method | `""` | +| `spiderpoolController.tls.provided.tlsKey` | encoded tls key for provided method | `""` | +| `spiderpoolController.tls.provided.tlsCa` | encoded tls CA for provided method | `""` | +| `spiderpoolController.tls.auto.caExpiration` | ca expiration for auto method | `73000` | +| `spiderpoolController.tls.auto.certExpiration` | server cert expiration for auto method | `73000` | +| `spiderpoolController.tls.auto.extraIpAddresses` | extra IP addresses of server certificate for auto method | `[]` | +| `spiderpoolController.tls.auto.extraDnsNames` | extra DNS names of server cert for auto method | `[]` | +| `spiderpoolController.cleanup.enable` | clean up resources when helm uninstall | `true` | ### spiderpoolInit parameters diff --git a/charts/spiderpool/crds/spiderpool.spidernet.io_spidercoordinators.yaml b/charts/spiderpool/crds/spiderpool.spidernet.io_spidercoordinators.yaml index 72e1dacd9c..00dc8d9239 100644 --- a/charts/spiderpool/crds/spiderpool.spidernet.io_spidercoordinators.yaml +++ b/charts/spiderpool/crds/spiderpool.spidernet.io_spidercoordinators.yaml @@ -120,6 +120,11 @@ spec: is a positive integer the default value is 0, which means leaving it as it is. type: integer + vethLinkAddress: + description: VethLinkAddress configure a ipv4 link-local address for + veth0 device. empty means disable. default is empty. Format is like + 169.254.100.1 + type: string type: object status: description: CoordinationStatus defines the observed state of SpiderCoordinator. diff --git a/charts/spiderpool/crds/spiderpool.spidernet.io_spidermultusconfigs.yaml b/charts/spiderpool/crds/spiderpool.spidernet.io_spidermultusconfigs.yaml index 456de6e849..461d27a569 100644 --- a/charts/spiderpool/crds/spiderpool.spidernet.io_spidermultusconfigs.yaml +++ b/charts/spiderpool/crds/spiderpool.spidernet.io_spidermultusconfigs.yaml @@ -141,6 +141,11 @@ spec: is a positive integer the default value is 0, which means leaving it as it is. type: integer + vethLinkAddress: + description: VethLinkAddress configure a ipv4 link-local address + for veth0 device. empty means disable. default is empty. Format + is like 169.254.100.1 + type: string type: object customCNI: description: OtherCniTypeConfig only used for CniType custom, valid diff --git a/charts/spiderpool/templates/configmap.yaml b/charts/spiderpool/templates/configmap.yaml index 174588ee98..2202605c5c 100644 --- a/charts/spiderpool/templates/configmap.yaml +++ b/charts/spiderpool/templates/configmap.yaml @@ -26,11 +26,11 @@ data: {{- else}} clusterSubnetDefaultFlexibleIPNumber: 0 {{- end }} - dra: - enabled: {{ .Values.dra.enabled }} - cdiRootPath: {{ .Values.dra.cdiRootPath }} - hostDevicePath: {{ .Values.dra.hostDevicePath }} tuneSysctlConfig: {{ .Values.spiderpoolAgent.tuneSysctlConfig }} + podResourceInject: + enabled: {{ .Values.spiderpoolController.podResourceInject.enabled }} + namespacesExclude: {{ toJson .Values.spiderpoolController.podResourceInject.namespacesExclude }} + namespacesInclude: {{ toJson .Values.spiderpoolController.podResourceInject.namespacesInclude }} {{- if .Values.multus.multusCNI.install }} --- kind: ConfigMap diff --git a/charts/spiderpool/templates/daemonset.yaml b/charts/spiderpool/templates/daemonset.yaml index 77017d5377..af1d5f69c5 100644 --- a/charts/spiderpool/templates/daemonset.yaml +++ b/charts/spiderpool/templates/daemonset.yaml @@ -203,7 +203,7 @@ spec: {{- with .Values.spiderpoolAgent.extraEnv }} {{- toYaml . | nindent 12 }} {{- end }} - {{- if or .Values.dra.enabled .Values.spiderpoolAgent.tuneSysctlConfig .Values.spiderpoolAgent.securityContext }} + {{- if or .Values.spiderpoolAgent.tuneSysctlConfig .Values.spiderpoolAgent.securityContext }} securityContext: privileged: true {{- with .Values.spiderpoolAgent.securityContext }} @@ -222,19 +222,6 @@ spec: - name: cni mountPath: /host/etc/cni/net.d {{- end }} - {{- if .Values.dra.enabled }} - - name: plugins-registry - mountPath: /var/lib/kubelet/plugins_registry - - name: plugins - mountPath: /var/lib/kubelet/plugins - mountPropagation: Bidirectional - - name: cdi - mountPath: /var/run/cdi - {{- if .Values.dra.hostDevicePath }} - - name: library - mountPath: {{ .Values.dra.hostDevicePath }} - {{- end }} - {{- end }} {{- if .Values.spiderpoolAgent.extraVolumes }} {{- include "tplvalues.render" ( dict "value" .Values.spiderpoolAgent.extraVolumeMounts "context" $ ) | nindent 8 }} {{- end }} @@ -306,22 +293,6 @@ spec: - key: cni-conf.json path: 00-multus.conf {{- end }} - {{- if .Values.dra.enabled }} - - name: plugins-registry - hostPath: - path: /var/lib/kubelet/plugins_registry - - name: plugins - hostPath: - path: /var/lib/kubelet/plugins - - name: cdi - hostPath: - path: /var/run/cdi - {{- if .Values.dra.hostDevicePath }} - - name: library - hostPath: - path: {{ .Values.dra.hostDevicePath }} - {{- end }} - {{- end }} {{- if .Values.spiderpoolAgent.extraVolumeMounts }} {{- include "tplvalues.render" ( dict "value" .Values.spiderpoolAgent.extraVolumeMounts "context" $ ) | nindent 6 }} {{- end }} diff --git a/charts/spiderpool/templates/deployment.yaml b/charts/spiderpool/templates/deployment.yaml index d78ce1b080..45cbc4be28 100644 --- a/charts/spiderpool/templates/deployment.yaml +++ b/charts/spiderpool/templates/deployment.yaml @@ -187,6 +187,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: SPIDERPOOL_CONTROLLER_DEPLOYMENT_NAME + value: {{ .Values.spiderpoolController.name | quote }} {{- with .Values.spiderpoolController.extraEnv }} {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/spiderpool/templates/pod.yaml b/charts/spiderpool/templates/pod.yaml index 53a4ae6e22..5e178ea694 100644 --- a/charts/spiderpool/templates/pod.yaml +++ b/charts/spiderpool/templates/pod.yaml @@ -47,6 +47,8 @@ spec: value: {{ .Values.coordinator.tunePodRoutes | quote }} - name: SPIDERPOOL_INIT_DEFAULT_COORDINATOR_HIJACK_CIDR value: {{ toJson .Values.coordinator.hijackCIDR | quote }} + - name: SPIDERPOOL_INIT_DEFAULT_COORDINATOR_VETH_LINK_ADDRESS + value: {{ .Values.coordinator.vethLinkAddress | quote }} {{- end }} {{- if and .Values.clusterDefaultPool.installIPv4IPPool .Values.ipam.enableIPv4 }} - name: SPIDERPOOL_INIT_DEFAULT_IPV4_IPPOOL_NAME diff --git a/charts/spiderpool/templates/resourceclass.yaml b/charts/spiderpool/templates/resourceclass.yaml deleted file mode 100644 index 865bccf0dc..0000000000 --- a/charts/spiderpool/templates/resourceclass.yaml +++ /dev/null @@ -1,7 +0,0 @@ -{{- if .Values.dra.enabled }} -apiVersion: resource.k8s.io/v1alpha2 -kind: ResourceClass -metadata: - name: netresources.spidernet.io -driverName: netresources.spidernet.io -{{- end }} \ No newline at end of file diff --git a/charts/spiderpool/templates/role.yaml b/charts/spiderpool/templates/role.yaml index ca4ecec499..031b9240a8 100644 --- a/charts/spiderpool/templates/role.yaml +++ b/charts/spiderpool/templates/role.yaml @@ -49,6 +49,7 @@ rules: - delete - get - list + - update - watch - apiGroups: - apiextensions.k8s.io diff --git a/charts/spiderpool/templates/tls.yaml b/charts/spiderpool/templates/tls.yaml index 690142ad01..940d4274bd 100644 --- a/charts/spiderpool/templates/tls.yaml +++ b/charts/spiderpool/templates/tls.yaml @@ -289,35 +289,6 @@ webhooks: - spidermultusconfigs sideEffects: None {{- end }} -{{- if .Values.dra.enabled }} -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: {{ .Values.spiderpoolController.name | trunc 63 | trimSuffix "-" }} - namespace: {{ .Release.Namespace }} - path: /validate-spiderpool-spidernet-io-v2beta1-spiderclaimparameter - port: {{ .Values.spiderpoolController.webhookPort }} - {{- if (eq .Values.spiderpoolController.tls.method "provided") }} - caBundle: {{ .Values.spiderpoolController.tls.provided.tlsCa | required "missing spiderpoolController.tls.provided.tlsCa" }} - {{- else if (eq .Values.spiderpoolController.tls.method "auto") }} - caBundle: {{ .ca.Cert | b64enc }} - {{- end }} - failurePolicy: Fail - name: spiderclaimparameter.spiderpool.spidernet.io - rules: - - apiGroups: - - spiderpool.spidernet.io - apiVersions: - - v2beta1 - operations: - - CREATE - - UPDATE - resources: - - spiderclaimparameters - sideEffects: None -{{- end }} - {{- if eq .Values.spiderpoolController.tls.method "certmanager" -}} --- apiVersion: cert-manager.io/v1 diff --git a/charts/spiderpool/values.yaml b/charts/spiderpool/values.yaml index 6eb2de239d..9cca7e60c3 100644 --- a/charts/spiderpool/values.yaml +++ b/charts/spiderpool/values.yaml @@ -124,6 +124,9 @@ coordinator: ## @param coordinator.hijackCIDR Additional subnets that need to be hijacked to the host forward, the default link-local range "169.254.0.0/16" is used for NodeLocal DNS hijackCIDR: ["169.254.0.0/16"] + ## @param coordinator.vethLinkAddress configure an link-local address for veth0 device. empty means disable. default is empty. Format is like 169.254.100.1 + vethLinkAddress: "" + ## @section rdma parameters ## rdma: @@ -269,16 +272,6 @@ multus: ## @param multus.multusCNI.log.logFile the multus-CNI daemonset pod log file logFile: "/var/log/multus.log" -## @section dra parameters -## -dra: - ## @param dra.enabled to enable dra feature - enabled: false - ## @param dra.cdiRootPath the dir of cdi root - cdiRootPath: "/var/run/cdi" - ## @param dra.hostDevicePath the dir path of the device - hostDevicePath: "" - ## @section plugins parameters ## plugins: @@ -674,6 +667,20 @@ spiderpoolController: ## @param spiderpoolController.webhookPort the http port for spiderpoolController webhook webhookPort: 5722 + podResourceInject: + ## @param spiderpoolController.podResourceInject.enabled enable pod resource inject + enabled: false + + ## @param spiderpoolController.podResourceInject.namespacesExclude exclude the namespaces of the pod resource inject + namespacesExclude: + - kube-system + - spiderpool + - metallb-system + - istio-system + + ## @param spiderpoolController.podResourceInject.namespacesInclude include the namespaces of the pod resource inject, empty means all namespaces but exclude the namespaces in namespacesExclude, not empty means only include the namespaces in namespacesInclude + namespacesInclude: [] + prometheus: ## @param spiderpoolController.prometheus.enabled enable spiderpool Controller to collect metrics enabled: false diff --git a/cmd/coordinator/cmd/cni_types.go b/cmd/coordinator/cmd/cni_types.go index 6babf5936c..1f4630ba65 100644 --- a/cmd/coordinator/cmd/cni_types.go +++ b/cmd/coordinator/cmd/cni_types.go @@ -45,6 +45,7 @@ const ( type Config struct { types.NetConf DetectGateway *bool `json:"detectGateway,omitempty"` + VethLinkAddress string `json:"vethLinkAddress,omitempty"` MacPrefix string `json:"podMACPrefix,omitempty"` MultusNicPrefix string `json:"multusNicPrefix,omitempty"` PodDefaultCniNic string `json:"podDefaultCniNic,omitempty"` @@ -178,6 +179,9 @@ func ParseConfig(stdin []byte, coordinatorConfig *models.CoordinatorConfig) (*Co conf.PodDefaultRouteNIC = coordinatorConfig.PodDefaultRouteNIC } + if conf.VethLinkAddress == "" { + conf.VethLinkAddress = coordinatorConfig.VethLinkAddress + } return &conf, nil } diff --git a/cmd/coordinator/cmd/command_add.go b/cmd/coordinator/cmd/command_add.go index 980b48f954..94718b64bd 100644 --- a/cmd/coordinator/cmd/command_add.go +++ b/cmd/coordinator/cmd/command_add.go @@ -102,6 +102,7 @@ func CmdAdd(args *skel.CmdArgs) (err error) { ipFamily: ipFamily, currentInterface: args.IfName, tuneMode: conf.Mode, + vethLinkAddress: conf.VethLinkAddress, } c.HijackCIDR = append(c.HijackCIDR, conf.ServiceCIDR...) c.HijackCIDR = append(c.HijackCIDR, conf.HijackCIDR...) diff --git a/cmd/coordinator/cmd/utils.go b/cmd/coordinator/cmd/utils.go index a7209cc161..c7696ca15c 100644 --- a/cmd/coordinator/cmd/utils.go +++ b/cmd/coordinator/cmd/utils.go @@ -21,17 +21,17 @@ import ( ) type coordinator struct { - firstInvoke bool - ipFamily, currentRuleTable, hostRuleTable int - tuneMode Mode - hostVethName, podVethName, currentInterface string - v4HijackRouteGw, v6HijackRouteGw net.IP - HijackCIDR []string - netns, hostNs ns.NetNS - hostVethHwAddress, podVethHwAddress net.HardwareAddr - currentAddress []netlink.Addr - v4PodOverlayNicAddr, v6PodOverlayNicAddr *net.IPNet - hostIPRouteForPod []net.IP + firstInvoke bool + ipFamily, currentRuleTable, hostRuleTable int + tuneMode Mode + hostVethName, podVethName, vethLinkAddress, currentInterface string + v4HijackRouteGw, v6HijackRouteGw net.IP + HijackCIDR []string + netns, hostNs ns.NetNS + hostVethHwAddress, podVethHwAddress net.HardwareAddr + currentAddress []netlink.Addr + v4PodOverlayNicAddr, v6PodOverlayNicAddr *net.IPNet + hostIPRouteForPod []net.IP } func (c *coordinator) autoModeToSpecificMode(mode Mode, podFirstInterface string, vethExist bool) error { @@ -189,9 +189,13 @@ func (c *coordinator) setupVeth(logger *zap.Logger, containerID string) error { return nil } + if c.vethLinkAddress == "" { + return nil + } + if err = netlink.AddrAdd(link, &netlink.Addr{ IPNet: &net.IPNet{ - IP: net.ParseIP("169.254.200.1"), + IP: net.ParseIP(c.vethLinkAddress), Mask: net.CIDRMask(32, 32), }, }); err != nil { diff --git a/cmd/spiderpool-agent/cmd/config.go b/cmd/spiderpool-agent/cmd/config.go index f37610deba..6cab225820 100644 --- a/cmd/spiderpool-agent/cmd/config.go +++ b/cmd/spiderpool-agent/cmd/config.go @@ -31,7 +31,6 @@ import ( "github.com/spidernet-io/spiderpool/pkg/subnetmanager" spiderpooltypes "github.com/spidernet-io/spiderpool/pkg/types" "github.com/spidernet-io/spiderpool/pkg/workloadendpointmanager" - "k8s.io/dynamic-resource-allocation/kubeletplugin" ) var agentContext = new(AgentContext) @@ -125,9 +124,6 @@ type AgentContext struct { UnixServer *server.Server MetricsHttpServer *http.Server - // dra - DraPlugin kubeletplugin.DRAPlugin - // client unixClient *client.SpiderpoolAgentAPI diff --git a/cmd/spiderpool-agent/cmd/coordinator.go b/cmd/spiderpool-agent/cmd/coordinator.go index b84cadc9c5..1697d2a8ca 100644 --- a/cmd/spiderpool-agent/cmd/coordinator.go +++ b/cmd/spiderpool-agent/cmd/coordinator.go @@ -94,6 +94,11 @@ func (g *_unixGetCoordinatorConfig) Handle(params daemonset.GetCoordinatorConfig nic = *coord.Spec.PodDefaultRouteNIC } + var vethLinkAddress string + if coord.Spec.VethLinkAddress != nil { + vethLinkAddress = *coord.Spec.VethLinkAddress + } + defaultRouteNic, ok := pod.Annotations[constant.AnnoDefaultRouteInterface] if ok { nic = defaultRouteNic @@ -107,6 +112,7 @@ func (g *_unixGetCoordinatorConfig) Handle(params daemonset.GetCoordinatorConfig PodMACPrefix: prefix, TunePodRoutes: coord.Spec.TunePodRoutes, PodDefaultRouteNIC: nic, + VethLinkAddress: vethLinkAddress, HostRuleTable: int64(*coord.Spec.HostRuleTable), PodRPFilter: int64(*coord.Spec.PodRPFilter), TxQueueLen: int64(*coord.Spec.TxQueueLen), diff --git a/cmd/spiderpool-agent/cmd/daemon.go b/cmd/spiderpool-agent/cmd/daemon.go index f25c7d0a6a..dd892bffd2 100644 --- a/cmd/spiderpool-agent/cmd/daemon.go +++ b/cmd/spiderpool-agent/cmd/daemon.go @@ -24,7 +24,6 @@ import ( "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" - draplugin "github.com/spidernet-io/spiderpool/pkg/dra/dra-plugin" "github.com/spidernet-io/spiderpool/pkg/ipam" "github.com/spidernet-io/spiderpool/pkg/ippoolmanager" "github.com/spidernet-io/spiderpool/pkg/kubevirtmanager" @@ -74,7 +73,7 @@ func DaemonMain() { // Load spiderpool's global Comfigmap. if err := agentContext.LoadConfigmap(); err != nil { - logger.Sugar().Fatal("Failed to load Configmap spiderpool-conf: %v", err) + logger.Sugar().Fatalf("Failed to load Configmap spiderpool-conf: %v", err) } logger.Sugar().Infof("Spiderpool-agent config: %+v", agentContext.Cfg) @@ -248,16 +247,6 @@ func DaemonMain() { } agentContext.unixClient = spiderpoolAgentAPI - if agentContext.Cfg.DraEnabled { - logger.Info("Begin to start dra-plugin Server") - agentContext.DraPlugin, err = draplugin.StartDRAPlugin(logger, agentContext.Cfg.DraCdiRootPath, agentContext.Cfg.DraHostDevicePath) - if err != nil { - logger.Fatal("failed to start dra-plugin server", zap.Error(err)) - } - } else { - logger.Info("Dra feature is disable.") - } - logger.Info("Set spiderpool-agent startup probe ready") agentContext.IsStartupProbe.Store(true) @@ -290,12 +279,6 @@ func WatchSignal(sigCh chan os.Signal) { logger.Sugar().Errorf("Failed to shut down spiderpool-agent UNIX server: %v", err) } } - - if agentContext.DraPlugin != nil { - logger.Debug("Stopping the dra-plugin server") - agentContext.DraPlugin.Stop() - } - // others... } diff --git a/cmd/spiderpool-controller/cmd/config.go b/cmd/spiderpool-controller/cmd/config.go index 276c5cebe0..84f11d1cae 100644 --- a/cmd/spiderpool-controller/cmd/config.go +++ b/cmd/spiderpool-controller/cmd/config.go @@ -99,6 +99,7 @@ var envInfo = []envConf{ {"SPIDERPOOL_MULTUS_CONFIG_INFORMER_RESYNC_PERIOD", "60", false, nil, nil, &controllerContext.Cfg.MultusConfigInformerResyncPeriod}, {"SPIDERPOOL_CILIUM_CONFIGMAP_NAMESPACE_NAME", "kube-system/cilium-config", false, &controllerContext.Cfg.CiliumConfigName, nil, nil}, + {"SPIDERPOOL_CONTROLLER_DEPLOYMENT_NAME", "spiderpool-controller", true, &controllerContext.Cfg.ControllerDeploymentName, nil, nil}, {"SPIDERPOOL_IPPOOL_INFORMER_RESYNC_PERIOD", "300", false, nil, nil, &controllerContext.Cfg.IPPoolInformerResyncPeriod}, {"SPIDERPOOL_IPPOOL_INFORMER_WORKERS", "3", true, nil, nil, &controllerContext.Cfg.IPPoolInformerWorkers}, {"SPIDERPOOL_AUTO_IPPOOL_HANDLER_MAX_WORKQUEUE_LENGTH", "10000", true, nil, nil, &controllerContext.Cfg.IPPoolInformerMaxWorkQueueLength}, @@ -128,16 +129,18 @@ type Config struct { GopsListenPort string PyroscopeAddress string DefaultCniConfDir string - // CiliumConfigName is formatted by namespace and name,default is kube-system/cilium-config + // CiliumConfigName is formatted by namespace and name + // default is kube-system/cilium-config CiliumConfigName string - ControllerPodNamespace string - ControllerPodName string - DefaultCoordinatorName string - LeaseDuration int - LeaseRenewDeadline int - LeaseRetryPeriod int - LeaseRetryGap int + ControllerDeploymentName string + ControllerPodNamespace string + ControllerPodName string + DefaultCoordinatorName string + LeaseDuration int + LeaseRenewDeadline int + LeaseRetryPeriod int + LeaseRetryGap int IPPoolMaxAllocatedIPs int diff --git a/cmd/spiderpool-controller/cmd/crd_manager.go b/cmd/spiderpool-controller/cmd/crd_manager.go index c998ca4e7c..b50ed9ea6d 100644 --- a/cmd/spiderpool-controller/cmd/crd_manager.go +++ b/cmd/spiderpool-controller/cmd/crd_manager.go @@ -11,6 +11,7 @@ import ( "github.com/go-logr/logr" multusv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" calicov1 "github.com/tigera/operator/pkg/apis/crd.projectcalico.org/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -35,6 +36,7 @@ func init() { utilruntime.Must(multusv1.AddToScheme(scheme)) utilruntime.Must(apiextensionsv1.AddToScheme(scheme)) utilruntime.Must(kubevirtv1.AddToScheme(scheme)) + utilruntime.Must(admissionregistrationv1.AddToScheme(scheme)) } func newCRDManager() (ctrl.Manager, error) { @@ -90,7 +92,7 @@ type _webhookHealthCheck struct{} // ServeHTTP only serves for SpiderIPPool webhook health check, it will return http status code 200 for GET request func (*_webhookHealthCheck) ServeHTTP(writer http.ResponseWriter, request *http.Request) { if request.Method == http.MethodGet { - //logger.Debug("SpiderIPPool webhook health check ready") writer.WriteHeader(http.StatusOK) + logger.Info("Webhook health check successful") } } diff --git a/cmd/spiderpool-controller/cmd/daemon.go b/cmd/spiderpool-controller/cmd/daemon.go index 2750208b3a..e534dbf06f 100644 --- a/cmd/spiderpool-controller/cmd/daemon.go +++ b/cmd/spiderpool-controller/cmd/daemon.go @@ -17,7 +17,6 @@ import ( "github.com/grafana/pyroscope-go" "go.uber.org/automaxprocs/maxprocs" "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" @@ -25,7 +24,6 @@ import ( "github.com/spidernet-io/spiderpool/pkg/applicationcontroller/applicationinformers" "github.com/spidernet-io/spiderpool/pkg/constant" "github.com/spidernet-io/spiderpool/pkg/coordinatormanager" - dracontroller "github.com/spidernet-io/spiderpool/pkg/dra/dra-controller" "github.com/spidernet-io/spiderpool/pkg/election" "github.com/spidernet-io/spiderpool/pkg/event" "github.com/spidernet-io/spiderpool/pkg/gcmanager" @@ -33,7 +31,6 @@ import ( crdclientset "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" "github.com/spidernet-io/spiderpool/pkg/kubevirtmanager" "github.com/spidernet-io/spiderpool/pkg/logutils" - "github.com/spidernet-io/spiderpool/pkg/manager/spidercliamparameter" "github.com/spidernet-io/spiderpool/pkg/multuscniconfig" "github.com/spidernet-io/spiderpool/pkg/namespacemanager" "github.com/spidernet-io/spiderpool/pkg/nodemanager" @@ -268,6 +265,22 @@ func initControllerServiceManagers(ctx context.Context) { } controllerContext.PodManager = podManager + if controllerContext.Cfg.PodResourceInjectConfig.Enabled { + logger.Debug("Begin to init Pod MutatingWebhook") + if err := podmanager.InitPodWebhook(controllerContext.ClientSet.AdmissionregistrationV1(), + controllerContext.CRDManager, controllerContext.Cfg.ControllerDeploymentName, + controllerContext.Cfg.PodResourceInjectConfig.NamespacesExclude, + controllerContext.Cfg.PodResourceInjectConfig.NamespacesInclude); err != nil { + logger.Fatal(err.Error()) + } + } else { + logger.Debug("InjectPodNetworkResource is disabled, try to remove the pod part in the MutatingWebhook") + if err := podmanager.RemovePodMutatingWebhook(controllerContext.ClientSet.AdmissionregistrationV1(), + controllerContext.Cfg.ControllerDeploymentName); err != nil { + logger.Error(err.Error()) + } + } + logger.Info("Begin to initialize StatefulSet manager") statefulSetManager, err := statefulsetmanager.NewStatefulSetManager( controllerContext.CRDManager.GetClient(), @@ -348,14 +361,6 @@ func initControllerServiceManagers(ctx context.Context) { } } - if controllerContext.Cfg.DraEnabled { - logger.Debug("Begin to setup SpiderClaimParameter webhook") - if err = spidercliamparameter.New(controllerContext.CRDManager.GetClient(), - controllerContext.CRDManager.GetAPIReader(), controllerContext.CRDManager); err != nil { - logger.Fatal(err.Error()) - } - } - if controllerContext.Cfg.EnableSpiderSubnet { logger.Debug("Begin to initialize Subnet manager") subnetManager, err := subnetmanager.NewSubnetManager( @@ -577,19 +582,6 @@ func setupInformers(k8sClient *kubernetes.Clientset) { logger.Fatal(err.Error()) } } - - if controllerContext.Cfg.DraEnabled { - logger.Info("Begin to start DRA-Controller") - informerFactory := informers.NewSharedInformerFactory(k8sClient, 0 /* resync period */) - if err = dracontroller.StartController(controllerContext.InnerCtx, - time.Duration(controllerContext.Cfg.LeaseRetryGap)*time.Second, - crdClient, k8sClient, informerFactory, - controllerContext.Leader); err != nil { - logger.Fatal(err.Error()) - } - } else { - logger.Info("the dra feature is disabled.") - } } func checkWebhookReady() { diff --git a/cmd/spiderpool-init/cmd/client.go b/cmd/spiderpool-init/cmd/client.go index f8ef23d3f3..2e57bcbe5b 100644 --- a/cmd/spiderpool-init/cmd/client.go +++ b/cmd/spiderpool-init/cmd/client.go @@ -91,21 +91,21 @@ func (c *CoreClient) WaitForIPPoolCreated(ctx context.Context, ipPool *spiderpoo logger := logutils.FromContext(ctx) for { - err := c.Create(ctx, ipPool) - if err == nil { - logger.Sugar().Infof("Succeed to create default IPv%d IPPool: %+v", *ipPool.Spec.IPVersion, *ipPool) - return nil - } - - if apierrors.IsAlreadyExists(err) { - logger.Sugar().Infof("Default IPv%d IPPool %s is already exists, ignore creating", *ipPool.Spec.IPVersion, ipPool.Name) - return nil - } - select { case <-ctx.Done(): return ctx.Err() default: + err := c.Create(ctx, ipPool) + if err == nil { + logger.Sugar().Infof("Succeed to create default IPv%d IPPool: %+v", *ipPool.Spec.IPVersion, *ipPool) + return nil + } + + if apierrors.IsAlreadyExists(err) { + logger.Sugar().Infof("Default IPv%d IPPool %s is already exists, ignore creating", *ipPool.Spec.IPVersion, ipPool.Name) + return nil + } + interval := retryIntervalSec * time.Second logger.Sugar().Infof("Failed to create default IPv%d IPPool %s, recreate in %s: %v", *ipPool.Spec.IPVersion, ipPool.Name, interval, err) time.Sleep(interval) diff --git a/cmd/spiderpool-init/cmd/config.go b/cmd/spiderpool-init/cmd/config.go index 2fabb12fb3..65fe70948c 100644 --- a/cmd/spiderpool-init/cmd/config.go +++ b/cmd/spiderpool-init/cmd/config.go @@ -44,13 +44,14 @@ const ( ENVDefaultIPv6IPRanges = "SPIDERPOOL_INIT_DEFAULT_IPV6_IPPOOL_IPRANGES" ENVDefaultIPv6Gateway = "SPIDERPOOL_INIT_DEFAULT_IPV6_IPPOOL_GATEWAY" - ENVEnableMultusConfig = "SPIDERPOOL_INIT_ENABLE_MULTUS_CONFIG" - ENVInstallMultusCNI = "SPIDERPOOL_INIT_INSTALL_MULTUS" - ENVDefaultCNIDir = "SPIDERPOOL_INIT_DEFAULT_CNI_DIR" - ENVDefaultCNIName = "SPIDERPOOL_INIT_DEFAULT_CNI_NAME" - ENVDefaultCNINamespace = "SPIDERPOOL_INIT_DEFAULT_CNI_NAMESPACE" - ENVDefaultMultusConfigMap = "SPIDERPOOL_INIT_MULTUS_CONFIGMAP" - ENVDefaultReadinessFile = "SPIDERPOOL_INIT_READINESS_FILE" + ENVEnableMultusConfig = "SPIDERPOOL_INIT_ENABLE_MULTUS_CONFIG" + ENVInstallMultusCNI = "SPIDERPOOL_INIT_INSTALL_MULTUS" + ENVDefaultCNIDir = "SPIDERPOOL_INIT_DEFAULT_CNI_DIR" + ENVDefaultCNIName = "SPIDERPOOL_INIT_DEFAULT_CNI_NAME" + ENVDefaultCNINamespace = "SPIDERPOOL_INIT_DEFAULT_CNI_NAMESPACE" + ENVDefaultMultusConfigMap = "SPIDERPOOL_INIT_MULTUS_CONFIGMAP" + ENVDefaultReadinessFile = "SPIDERPOOL_INIT_READINESS_FILE" + ENVDefaultCoordinatorVethLinkAddress = "SPIDERPOOL_INIT_DEFAULT_COORDINATOR_VETH_LINK_ADDRESS" ) var ( @@ -69,6 +70,7 @@ type InitDefaultConfig struct { CoordinatorPodCIDRType string CoordinatorPodDefaultRouteNic string CoordinatorPodMACPrefix string + CoordinatorVethLinkAddress string CoordinatorDetectGateway bool CoordinatorDetectIPConflict bool CoordinatorTunePodRoutes bool @@ -168,6 +170,8 @@ func parseENVAsDefault() InitDefaultConfig { } else { config.CoordinatorHijackCIDR = []string{} } + + config.CoordinatorVethLinkAddress = strings.ReplaceAll(os.Getenv(ENVDefaultCoordinatorVethLinkAddress), "\"", "") } else { logger.Info("Ignore creating default Coordinator") } diff --git a/cmd/spiderpool-init/cmd/root.go b/cmd/spiderpool-init/cmd/root.go index 49ff174ddd..151ac337b2 100644 --- a/cmd/spiderpool-init/cmd/root.go +++ b/cmd/spiderpool-init/cmd/root.go @@ -48,6 +48,7 @@ func Execute() { DetectGateway: &config.CoordinatorDetectGateway, PodDefaultRouteNIC: &config.CoordinatorPodDefaultRouteNic, PodMACPrefix: &config.CoordinatorPodMACPrefix, + VethLinkAddress: &config.CoordinatorVethLinkAddress, HijackCIDR: config.CoordinatorHijackCIDR, }, } diff --git a/docs/concepts/arch-zh_CN.md b/docs/concepts/arch-zh_CN.md index 33ef6b49c1..a3cbe1f646 100644 --- a/docs/concepts/arch-zh_CN.md +++ b/docs/concepts/arch-zh_CN.md @@ -46,7 +46,7 @@ Spiderpool 架构如上所示,包含了以下组件: [RDMA CNI](https://github.com/k8snetworkplumbingwg/rdma-cni): 实现 RDMA 网卡的网络命名空间隔离 [SR-IOV network operator](https://github.com/k8snetworkplumbingwg/sriov-network-operator): 便于安装和配置使用 sriov-cni - + ## 应用场景:Pod 接入一个 overlay CNI 和若干个 underlay CNI 网卡 ![arch_underlay](../images/spiderpool-overlay.jpg) diff --git a/docs/concepts/coordinator-zh_CN.md b/docs/concepts/coordinator-zh_CN.md index 9d548db81a..4a958314de 100644 --- a/docs/concepts/coordinator-zh_CN.md +++ b/docs/concepts/coordinator-zh_CN.md @@ -1,6 +1,6 @@ # Coordinator -**简体中文** | [**English**](coordinator.md) +**简体中文** | [**English**](coordinator.md) Spiderpool 内置一个叫 `coordinator` 的 CNI meta-plugin, 它在 Main CNI 被调用之后再工作,它主要提供以下几个主要功能: @@ -152,6 +152,27 @@ spec: txQueueLen: 2000 ``` +## 为 Pod 的 veth0 网卡配置本地链路地址,支持服务网格场景 + +默认情况下,Coordinator 不会为 veth0 网卡配置本地链路地址。但有些场景下(比如服务网格),经过 veth0 网卡流入的网格流量会随 istio 设置的 iptables 规则重定向,如果 veth0 没有 IP 地址,这会导致这部分流量被丢弃(见[#Issue3568](https://github.com/spidernet-io/spiderpool/issues/3568))。所以在这个场景下,我们需要为 veth0 配置一个本地链路地址。 + +```yaml +apiVersion: spiderpool.spidernet.io/v2beta1 +kind: SpiderMultusConfig +metadata: + name: istio-demo + namespace: default +spec: + cniType: macvlan + macvlan: + master: ["eth0"] + enableCoordinator: true + coordinator: + vethLinkAddress: "169.254.200.1" +``` + +> `vethLinkAddress` 默认为空,表示不配置。不为空则必须是一个合法的本地链路地址。 + ## 自动获取集群 Service 的 CIDR Kubernetes 1.29 开始支持以 ServiceCIDR 资源的方式配置集群 Service 的 CIDR,更多信息参考 [KEP 1880](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/1880-multiple-service-cidrs/README.md)。如果您的集群支持 ServiceCIDR,Spiderpool-controller 组件 自动监听 ServiceCIDR 资源的变化,将读取到的 Service 子网信息自动更新到 Spidercoordinator 的 Status 中。 diff --git a/docs/concepts/coordinator.md b/docs/concepts/coordinator.md index 963e0298d0..ffd053f5c8 100644 --- a/docs/concepts/coordinator.md +++ b/docs/concepts/coordinator.md @@ -10,7 +10,7 @@ Spiderpool incorporates a CNI meta-plugin called `coordinator` that works after - Check the reachability of Pod gateways - Support fixed Mac address prefixes for Pods -Note: If your OS(such as Fedora, CentOS, etc.) uses NetworkManager, highly recommend configuring following configuration file at `/etc/NetworkManager/conf.d/spidernet.conf` to +Note: If your OS(such as Fedora, CentOS, etc.) uses NetworkManager, highly recommend configuring following configuration file at `/etc/NetworkManager/conf.d/spidernet.conf` to prevent interference from NetworkManager with veth interfaces created through `coordinator`: ```shell @@ -154,6 +154,27 @@ spec: txQueueLen: 2000 ``` +## Configure a link-local address for the Pod's veth0 interface to support service mesh scenarios + +By default, Coordinator does not configure a link-local address for the veth0 interface. However, in some scenarios (such as service mesh), mesh traffic flowing through the veth0 interface will be redirected according to iptables rules set by Istio. If veth0 does not have an IP address, this can cause that traffic to be dropped (see #Issue3568). Therefore, in this scenario, we need to configure a link-local address for veth0. + +```yaml +apiVersion: spiderpool.spidernet.io/v2beta1 +kind: SpiderMultusConfig +metadata: + name: istio-demo + namespace: default +spec: + cniType: macvlan + macvlan: + master: ["eth0"] + enableCoordinator: true + coordinator: + vethLinkAddress: "169.254.100.1" +``` + +> `vethLinkAddress` default to "", It means that we don't configure an address for veth0. It must an valid link-local address if it isn't empty. + ## Automatically get the CIDR of a clustered Service Kubernetes 1.29 starts to support configuring the CIDR of a clustered Service as a ServiceCIDR resource, for more information refer to [KEP 1880](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/1880-multiple-service-cidrs/README.md). If your cluster supports ServiceCIDR, the Spiderpool-controller component automatically listens for changes to the ServiceCIDR resource and automatically updates the Service subnet information it reads into the Status of the Spidercoordinator. diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 37d4d8dec3..1e004ba131 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -98,12 +98,12 @@ nav: - Node-based Topology: usage/network-topology.md - RDMA with RoCE: usage/rdma-roce.md - RDMA with Infiniband: usage/rdma-ib.md - - Dynamic-Resource-Allocation: usage/dra.md - Multi-Cluster Networking: usage/submariner.md - Access Service for Underlay CNI: usage/underlay_cni_service.md - Bandwidth Manage for IPVlan CNI: usage/ipvlan_bandwidth.md - Coexistence of multi CNIs: usage/multi_cni_coexist.md - Kubevirt: usage/kubevirt.md + - Istio: usage/istio.md - FAQ: usage/faq.md - Reference: - Annotations: reference/annotation.md diff --git a/docs/reference/crd-spidercoordinator.md b/docs/reference/crd-spidercoordinator.md index 297e6900a0..23ba528b46 100644 --- a/docs/reference/crd-spidercoordinator.md +++ b/docs/reference/crd-spidercoordinator.md @@ -14,6 +14,7 @@ metadata: spec: detectGateway: false detectIPConflict: false + enableVethLinkLocalAddress: false hostRPFilter: 0 podRPFilter: 0 hostRuleTable: 500 @@ -51,13 +52,14 @@ This is the Spidercoordinators spec for users to configure. | podCIDRType | The ways to fetch the CIDR of the cluster. auto(default), This means that it will automatically switch podCIDRType to cluster or calico or cilium. based on cluster CNI. calico: auto fetch the subnet of the pod from the ip pools of calico, This only works if the cluster CNI is calico; cilium: Auto fetch the pod's subnet from cilium's configMap or ip pools. Supported IPAM modes: ["cluster-pool","kubernetes","multi-pool"]; cluster: auto fetch the subnet of the pod from the kubeadm-config configmap, This is useful if there is only a globally unique default pod's subnet; none: don't get the subnet of the pod, which is useful for some special cases. In this case,you can manually configure the hijackCIDR field | string | require | auto,cluster,calico,cilium,none | auto | | tunePodRoutes | tune pod's route while the pod is attached to multiple NICs | bool | optional | true,false | true | | podDefaultRouteNIC | The NIC where the pod's default route resides | string | optional | "",eth0,net1... | underlay: eth0,overlay: net1 | -| detectGateway | enable detect gateway while launching pod, If the gateway is unreachable, pod will be failed to created; Note: We use ARP probes to detect if the gateway is reachable, and some gateway routers may warn about this | boolean | optional | true,false | false | -| detectIPConflict | enable the pod's ip if is conflicting while launching pod. If an IP conflict of the pod is detected, pod will be failed to created | boolean | optional | true,false | false | -| podMACPrefix | fix the pod's mac address with this prefix + 4 bytes IP | string | optional | a invalid mac address prefix | "" | +| detectGateway | enable detect gateway while launching pod, If the gateway is unreachable, pod will be failed to created; Note: We use ARP probes to detect if the gateway is reachable, and some gateway routers may warn about this | boolean | optional | true,false | false | +| detectIPConflict | enable the pod's ip if is conflicting while launching pod. If an IP conflict of the pod is detected, pod will be failed to created | boolean | optional | true,false | false | +| vethLinkAddress | configure an link-local address for veth0 device, fix the istio case | boolean | optional | true,false | false | +| podMACPrefix | fix the pod's mac address with this prefix + 4 bytes IP | string | optional | a invalid mac address prefix | "" | | podRPFilter | set rp_filter sysctl for the pod | int | required | 0,1,2;suggest to be 0 | 0 | | hostRPFilter | (deprecated) set rp_filter sysctl for the node | int | required | 0,1,2;suggest to be 0 | 0 | | hostRuleTable | The directly routing table of the host accessing the pod's underlay IP will be placed in this policy routing table | int | required | int | 500 | -| txQueueLen | The Transmit Queue Length (txqueuelen) is a TCP/IP stack network interface value that sets the number of packets allowed per kernel transmit queue of a network interface device | int | optional | >= 0, default to 0, it's mean to don't set it | +| txQueueLen | The Transmit Queue Length (txqueuelen) is a TCP/IP stack network interface value that sets the number of packets allowed per kernel transmit queue of a network interface device | int | optional | >= 0, default to 0, it's mean to don't set it | ### Status (subresource) diff --git a/docs/reference/spiderpool-controller.md b/docs/reference/spiderpool-controller.md index 847ff3f572..12ecdf254d 100644 --- a/docs/reference/spiderpool-controller.md +++ b/docs/reference/spiderpool-controller.md @@ -32,6 +32,7 @@ Run the spiderpool controller daemon. | SPIDERPOOL_CNI_CONFIG_DIR | /etc/cni/net.d | The host path of the cni config directory. | | SPIDERPOOL_CILIUM_CONFIGMAP_NAMESPACE_NAME | kube-system/cilium-config. | The cilium's configMap, default is kube-system/cilium-config. | | SPIDERPOOL_COORDINATOR_DEFAULT_NAME | default | the name of default spidercoordinator CR | +| SPIDERPOOL_CONTROLLER_DEPLOYMENT_NAME | spiderpool-controller | The deployment name of spiderpool-controller. | ## spiderpool-controller shutdown diff --git a/docs/usage/dra.md b/docs/usage/dra.md deleted file mode 100644 index 724ba5d32c..0000000000 --- a/docs/usage/dra.md +++ /dev/null @@ -1,220 +0,0 @@ -# Dynamic-Resource-Allocation - -## Introduction - -Dynamic-Resource-Allocation (DRA) is a new feature introduced by Kubernetes that puts resource scheduling in the hands of third-party developers. It provides an API more akin to a storage persistent volume, instead of the countable model (e.g., "nvidia.com/gpu: 2") that device-plugin used to request access to resources, with the main benefit being a more flexible and dynamic allocation of hardware resources, resulting in improved resource utilization. The main benefit is more flexible and dynamic allocation of hardware resources, which improves resource utilization and enhances resource scheduling, enabling Pods to schedule the best nodes. DRA is currently available as an alpha feature in Kubernetes 1.26 (December 2022 release), driven by Nvidia and Intel. -Spiderpool currently integrates with the DRA framework, which allows for the following, but not limited to: - -* Automatically scheduling to the appropriate node based on the NIC and subnet information reported by each node, combined with the SpiderMultusConfig configuration used by the Pod, so as to prevent the Pod from not being able to start up after scheduling to the node. -* Unify the resource usage of multiple device-plugins: [sriov-network-device-plugin](https://github.com/k8snetworkplumbingwg/sriov-network-device-plugin), [k8s-rdma-shared-dev-plugin](https://github.com/Mellanox/k8s-rdma-shared-dev-plugin) in the SpiderClaimParameter. -* Continuously updated, see for details. [RoadMap](../develop/roadmap.md) - -## Explanation of nouns - -* ResourceClaimTemplate: resourceclaim template for generating resourceclaim resources. One resourceClaimTemplate can generate multiple resourceclaims. -* ResourceClaim: ResourceClaim binds a specific set of node resources for use by the Pod. -* ResourceClass: A ResourceClass represents a resource (e.g., GPU), and a DRA plugin is responsible for driving the resource represented by a ResourceClass. - -## Environment Preparation - -* Prepare a Kubernetes cluster with a higher version than v1.29.0, and enable the dra feature-gate function of the cluster. -* Have Kubectl, [Helm] () installed. - -## Quick Start - -1. Currently DRA is not turned on by default as an alpha feature of Kubernetes. So we need to turn it on manualways, as following steps. - - Add the following to the kube-apiserver startup parameters. - - ```shell - --feature-gates=DynamicResourceAllocation=true - --runtime-config=resource.k8s.io/v1alpha2=true - ``` - - Add the following to the kube-controller-manager startup parameters. - - ```shell - --feature-gates=DynamicResourceAllocation=true - ``` - - Add the following to kube-scheduler's startup parameters: - - ```shell - --feature-gates=DynamicResourceAllocation=true - ``` - -2. DRA needs to rely on [CDI] (), so it needs container runtime support. In this article, we take containerd as an example, and we need to enable cdi function manually. - - Modify the containerd configuration file to configure CDI. - - ```shell - ~# vim /etc/containerd/config.toml - ... - [plugins. "io.containerd.grpc.v1.cri"] - enable_cdi = true - cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"] - ~# systemctl restart containerd - ``` - - > It is recommended that containerd be older than v1.7.0, as CDI is supported in later versions. The version supported by different runtimes is not the same, please check if it is supported first. - -3. Install Spiderpool, taking care to enable CDI. - - ```shell - helm repo add spiderpool https://spidernet-io.github.io/spiderpool - helm repo update spiderpool - helm install spiderpool spiderpool/spiderpool --namespace kube-system --set dra.enabled=true - -4. Verify the installation - - Check that the Spiderpool pod is running correctly, and check for the presence of the resourceclass resource: - - ```shell - ~# kubectl get po -n kube-system | grep spiderpool - spiderpool-agent-hqt2b 1/1 Running 0 20d - spiderpool-agent-nm9vl 1/1 Running 0 20d - spiderpool-controller-7d7f4f55d4-w2rv5 1/1 Running 0 20d - spiderpool-init 0/1 Completed 0 21d - ~# kubectl get resourceclass - NAME DRIVERNAME AGE - netresources.spidernet.io netresources.spidernet.io 20d - ``` - - > netresources.spidernet.io is Spiderpool's resourceclass, and Spiderpool will take care of creating and allocating resourceclaims belonging to this resourceclass. - -5. Create SpiderIPPool and SpiderMultusConfig instances. - - > Note: This step can be skipped if your cluster already has other CNIs installed or does not require an underlay CNI with Macvlan. - - ```shell - MACVLAN_MASTER_INTERFACE="eth0" - cat < SpiderMultusConfig will automatically create the Multus network-attachment-definetion instance - - ```shell - cat < Create a ResourceClaimTemplate, K8s will create its own unique Resourceclaim for each Pod based on this ResourceClaimTemplate. the declaration cycle of the Resourceclaim will be consistent with that of the Pod. The declaration cycle of the Resourceclaim is consistent with that of the Pod. - > - > The SpiderClaimParameter is used to extend the configuration parameters of the ResourceClaim, which will affect the scheduling of the ResourceClaim and the generation of its CDI file. In this example, setting rdmaAcc to true will affect whether or not the configured so file is mounted. - > - > A Pod's container affects the resources required by containerd by declaring the use of claims in Resources. The CDI file corresponding to the claim is translated into an OCI Spec configuration when the container is run, which determines the container's creation. - > - > If the Pod creation fails with "unresolvable CDI devices: xxxx", it is possible that the CDI version supported by the container at runtime is too low, which makes the container unable to parse the cdi file at runtime. Currently, the default CDI version of Spiderpool is the latest one. You can specify a lower version in the SpiderClaimParameter instance via annotation: "dra.spidernet.io/cdi-version", e.g.: dra.spidernet.io/cdi-version: 0.5.0 - -7. Validation - - After creating the Pod, view the generated resource files such as ResourceClaim. - - ```shell - ~# kubectl get resourceclaim - NAME RESOURCECLASSNAME ALLOCATIONMODE STATE AGE - demo-745fb4c498-72g7g-demo-7d458 netresources.spidernet.io WaitForFirstConsumer allocated,reserved 20d - ~# cat /var/run/cdi/k8s.netresources.spidernet.io-claim_1e15705a-62fe-4694-8535-93a5f0ccf996.yaml - --- - cdiVersion: 0.6.0 - containerEdits: {} - devices: - - containerEdits: - env: - - DRA_CLAIM_UID=1e15705a-62fe-4694-8535-93a5f0ccf996 - name: 1e15705a-62fe-4694-8535-93a5f0ccf996 - kind: k8s.netresources.spidernet.io/claim - ``` - - This shows that the ResourceClaim has been created, and STATE shows allocated and reserverd, indicating that it has been used by the pod. And spiderpool has generated a CDI file for the ResourceClaim, which describes the files and environment variables to be mounted. - - Check that the pod is Running and verify that the the environment variable (DRA_CLAIM_UID) is declared. - - ```shell - ~# kubectl get po - NAME READY STATUS RESTARTS AGE - nginx-745fb4c498-72g7g 1/1 Running 0 20m - nginx-745fb4c498-s92qr 1/1 Running 0 20m - ~# kubectl exec -it nginx-745fb4c498-72g7g sh - ~# printenv DRA_CLAIM_UID - 1e15705a-62fe-4694-8535-93a5f0ccf996 - ``` - - You can see that the Pod's containers have correctly declared environment variables, It shows the dra is works. - -## Welcome to try it out - -DRA is currently available as an alpha feature of Spiderpool, and we'll be expanding it with more capabilities in the future, so feel free to try it out. Please let us know if you have any further questions or requests. diff --git a/docs/usage/dra_zh_CN.md b/docs/usage/dra_zh_CN.md deleted file mode 100644 index 21f9007cc6..0000000000 --- a/docs/usage/dra_zh_CN.md +++ /dev/null @@ -1,225 +0,0 @@ -# Dynamic-Resource-Allocation - -## 介绍 - -动态资源分配(DRA)是 Kubernetes 推出的一项新 feature,它将资源调度交到第三方开发人员手中。它摒弃了之前 device-plugin 请求访问资源时的可计数的模式(例如 "nvidia.com/gpu: 2"),提供了更类似于存储持久卷的 API。它的主要好处是更加灵活、动态的分配硬件资源,提高了资源的利用率。并且增强资源调度、使 Pod 能够调度最佳节点。目前在 Nvidia 和 Intel 的推动下,DRA 已经作为 Kubernetes 1.26(2022 年 12 月发布)的 alpha 功能。 - -目前 Spiderpool 已经集成 DRA 框架,基于该功能可实现以下但不限于的能力: - -* 可根据每个节点上报的网卡和子网信息,并结合 Pod 使用的 SpiderMultusConfig 配置,自动调度到合适的节点,避免 Pod 调度到节点之后无法启动 -* 在 SpiderClaimParameter 中统一多个 device-plugin 如 [sriov-network-device-plugin](https://github.com/k8snetworkplumbingwg/sriov-network-device-plugin), [k8s-rdma-shared-dev-plugin](https://github.com/Mellanox/k8s-rdma-shared-dev-plugin) 的资源使用方式 -* 持续更新, 详见 [RoadMap](../develop/roadmap.md) - -## 名称解释 - -* ResourceClaimTemplate: resourceclaim 模版,用于生成 resourceclaim 资源。一份 resourceClaimTemplate 可以生成多个 resourceclaim. -* ResourceClaim: ResourceClaim 绑定一组特定的节点资源,供于 Pod 使用。 -* ResourceClass: 一种 ResourceClass 代表一种资源(比如 GPU), 一种 DRA 插件负责驱动一种 ResourceClass 所代表的资源。 - -## 环境准备 - -1. 准备一个高版本的 Kubernetes 集群, 推荐版本大于 v1.29.0, 并且开启集群的 dra feature-gate 功能 -2. 已安装 Kubectl、[Helm](https://helm.sh/docs/intro/install/) - -## 快速开始 - -1. 目前 DRA 作为 Kubernetes 的 Alpha 功能,默认不打开。所以我们需要以手动方式开启,步骤如下: - - 在 kube-apiserver 的启动参数中加入: - - ```shell - - --feature-gates=DynamicResourceAllocation=true - - --runtime-config=resource.k8s.io/v1alpha2=true - ``` - - 在 kube-controller-manager 的启动参数中加入: - - ```shell - - --feature-gates=DynamicResourceAllocation=true - ``` - - 在 kube-scheduler 的启动参数中加入: - - ```shell - - --feature-gates=DynamicResourceAllocation=true - ``` - -2. DRA 需要依赖 [CDI](https://github.com/cncf-tags/container-device-interface), 所以需要容器运行时支持。本文以 containerd 为例,需要手动开启 cdi 功能: - - 修改 containerd 的配置文件,配置 CDI: - - ```shell - ~# vim /etc/containerd/config.toml - ... - [plugins."io.containerd.grpc.v1.cri"] - enable_cdi = true - cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"] - - ~# systemctl restart containerd - ``` - - > 建议 containerd 版本大于 v1.7.0, 此后版本才支持 CDI 功能。不同运行时支持的版本不一致,请先检查是否支持。 - -3. 安装 Spiderpool, 注意开启 CDI 功能 - - ```shell - helm repo add spiderpool https://spidernet-io.github.io/spiderpool - - helm repo update spiderpool - - helm install spiderpool spiderpool/spiderpool --namespace kube-system --set dra.enabled=true - ``` - -4. 验证安装 - - 检查 Spiderpool pod 是否正常 running, 并检查是否存在 resourceclass 资源: - - ```shell - ~# kubectl get po -n kube-system | grep spiderpool - spiderpool-agent-hqt2b 1/1 Running 0 20d - spiderpool-agent-nm9vl 1/1 Running 0 20d - spiderpool-controller-7d7f4f55d4-w2rv5 1/1 Running 0 20d - spiderpool-init 0/1 Completed 0 21d - ~# kubectl get resourceclass - NAME DRIVERNAME AGE - netresources.spidernet.io netresources.spidernet.io 20d - ``` - - > netresources.spidernet.io 为 Spiderpool 的 resourceclass, Spiderpool 将会关注属于该 resourceclass 的 resourceclaim 的创建与分配 - -5. 创建 SpiderIPPool 和 SpiderMultusConfig 实例: - - > 注意: 如果您的集群已经安装了其他 CNI 或不需要使用 Macvlan 的 underlay CNI,这一步可以跳过。 - - ```shell - MACVLAN_MASTER_INTERFACE="eth0" - cat < SpiderMultusConfig 将会自动创建 Multus network-attachment-definetion 实例 - - ```shell - cat < 创建一个 ResourceClaimTemplate, K8s 将会根据这个 ResourceClaimTemplate 为每个 Pod 创建自己独有的 Resourceclaim。该 Resourceclaim 的声明周期与该 Pod保持一致。 - > - > SpiderClaimParameter 用于扩展 ResourceClaim 的配置参数,将会影响 ResourceClaim 的调度以及其 CDI 文件的生成。 - > - > Pod 的 container 通过在 Resources 中声明 claims 的使用,这将影响 containerd 所需要的资源。容器运行时会将该 claim 对应的 CDI 文件翻译为 OCI Spec配置,从而决定container的创建。 - > - > 如果创建 Pod 失败,提示 “unresolvable CDI devices: xxxx”, 这可能是容器运行时支持的 CDI 版本过低,导致容器运行时无法解析 cdi 文件。目前 Spiderpool 默认的 CDI 版本为最新。可以通过在 SpiderClaimParameter 实例中通过 annotation: "dra.spidernet.io/cdi-version" 指定较低版本,比如: dra.spidernet.io/cdi-version: 0.5.0 - -7. 验证 - - 创建 Pod 之后, 查看生成的 ResourceClaim 等资源文件: - - ```shell - ~# kubectl get resourceclaim - NAME RESOURCECLASSNAME ALLOCATIONMODE STATE AGE - demo-745fb4c498-72g7g-demo-7d458 netresources.spidernet.io WaitForFirstConsumer allocated,reserved 20d - ~# cat /var/run/cdi/k8s.netresources.spidernet.io-claim_1e15705a-62fe-4694-8535-93a5f0ccf996.yaml - --- - cdiVersion: 0.6.0 - containerEdits: {} - devices: - - containerEdits: - env: - - DRA_CLAIM_UID=1e15705a-62fe-4694-8535-93a5f0ccf996 - name: 1e15705a-62fe-4694-8535-93a5f0ccf996 - kind: k8s.netresources.spidernet.io/claim - ``` - - 这里显示 ResourceClaim 已经被创建,并且 STATE 显示 allocated 和 reserverd,说明已经被 pod 使用。并且 spiderpool 已经为该 ResourceClaim 生成了对应的 CDI 文件。CDI 文件描述了需要挂载的文件和环境变量等。 - - 检查 Pod 是否 Running,并且验证 Pod 是否指定了环境变量 `DRA_CLAIM_UID`: - - ```shell - ~# kubectl get po - NAME READY STATUS RESTARTS AGE - nginx-745fb4c498-72g7g 1/1 Running 0 20m - nginx-745fb4c498-s92qr 1/1 Running 0 20m - ~# kubectl exec -it nginx-745fb4c498-72g7g sh - ~# printenv DRA_CLAIM_UID - 1e15705a-62fe-4694-8535-93a5f0ccf996 - ``` - - 可以看到 Pod 的容器已经正确写入环境变量,说明 DRA 工作正常。 - -## 欢迎试用 - -目前 DRA 作为 Spiderpool 的 Alpha 功能,在未来我们会扩展更多能力,欢迎试用。如果您有更多问题或需求,请告诉我们。 diff --git a/docs/usage/install/ai/get-started-macvlan-zh_CN.md b/docs/usage/install/ai/get-started-macvlan-zh_CN.md index 469775d27e..f3d4eabb65 100644 --- a/docs/usage/install/ai/get-started-macvlan-zh_CN.md +++ b/docs/usage/install/ai/get-started-macvlan-zh_CN.md @@ -53,8 +53,8 @@ 对于 Mellanox 网卡,可下载 [NVIDIA OFED 官方驱动](https://network.nvidia.com/products/infiniband-drivers/linux/mlnx_ofed/)进行主机安装,执行如下安装命令: ```shell - $ mount /root/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu22.04-x86_64.iso /mnt - $ /mnt/mlnxofedinstall --all + mount /root/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu22.04-x86_64.iso /mnt + /mnt/mlnxofedinstall --all ``` 对于 Mellanox 网卡,也可基于容器化安装驱动,实现对集群主机上所有 Mellanox 网卡批量安装驱动,运行如下命令,注意的是,该运行过程中需要访问因特网获取一些安装包。当所有的 ofed pod 进入 ready 状态,表示主机上已经完成了 OFED driver 安装。 @@ -131,7 +131,7 @@ gdrdrv 24576 0 ``` -4. 确认主机上的 RDMA 子系统为 shared 模式,这是 macvlan 场景下提供 RDMA 设备给容器的要求。 +4. 确认主机上的 RDMA 子系统为 shared 模式,这是 macvlan 场景下提供 RDMA 设备给容器的要求。 ``` # Check the current operating mode (the Linux RDMA subsystem operates in shared mode by default): @@ -144,10 +144,10 @@ 1. 使用 helm 安装 Spiderpool,并启用 rdmaSharedDevicePlugin 组件 ```shell - $ helm repo add spiderpool https://spidernet-io.github.io/spiderpool - $ helm repo update spiderpool - $ kubectl create namespace spiderpool - $ helm install spiderpool spiderpool/spiderpool -n spiderpool --set rdma.rdmaSharedDevicePlugin.install=true + helm repo add spiderpool https://spidernet-io.github.io/spiderpool + helm repo update spiderpool + kubectl create namespace spiderpool + helm install spiderpool spiderpool/spiderpool -n spiderpool --set rdma.rdmaSharedDevicePlugin.install=true ``` > 如果您是中国用户,可以指定参数 `--set global.imageRegistryOverride=ghcr.m.daocloud.io` 来使用国内的镜像源。 @@ -223,10 +223,10 @@ metadata: name: gpu1-net11 spec: - gateway: 172.16.11.254 - subnet: 172.16.11.0/16 - ips: - - 172.16.11.1-172.16.11.200 + gateway: 172.16.11.254 + subnet: 172.16.11.0/16 + ips: + - 172.16.11.1-172.16.11.200 --- apiVersion: spiderpool.spidernet.io/v2beta1 kind: SpiderMultusConfig @@ -234,11 +234,11 @@ name: gpu1-macvlan namespace: spiderpool spec: - cniType: macvlan - macvlan: - master: ["enp11s0f0np0"] - ippools: - ipv4: ["gpu1-net11"] + cniType: macvlan + macvlan: + master: ["enp11s0f0np0"] + ippools: + ipv4: ["gpu1-net11"] EOF ``` @@ -247,6 +247,8 @@ 1. 在指定节点上创建一组 DaemonSet 应用 如下例子,通过 annotations `v1.multus-cni.io/default-network` 指定使用 calico 的缺省网卡,用于进行控制面通信,annotations `k8s.v1.cni.cncf.io/networks` 接入 8 个 GPU 亲和网卡的网卡,用于 RDMA 通信,并配置 8 种 RDMA resources 资源 + > 注:可自动为应用注入 RDMA 网络资源,参考 [基于 Webhook 自动注入 RDMA 资源](#基于-webhook-自动注入网络资源) + ```shell $ helm repo add spiderchart https://spidernet-io.github.io/charts $ helm repo update @@ -261,39 +263,39 @@ # just run daemonset in nodes 'worker1' and 'worker2' affinity: nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - worker1 - - worker2 + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - worker1 + - worker2 # macvlan interfaces extraAnnotations: k8s.v1.cni.cncf.io/networks: |- - [{"name":"gpu1-macvlan","namespace":"spiderpool"}, - {"name":"gpu2-macvlan","namespace":"spiderpool"}, - {"name":"gpu3-macvlan","namespace":"spiderpool"}, - {"name":"gpu4-macvlan","namespace":"spiderpool"}, - {"name":"gpu5-macvlan","namespace":"spiderpool"}, - {"name":"gpu6-macvlan","namespace":"spiderpool"}, - {"name":"gpu7-macvlan","namespace":"spiderpool"}, - {"name":"gpu8-macvlan","namespace":"spiderpool"}] + [{"name":"gpu1-macvlan","namespace":"spiderpool"}, + {"name":"gpu2-macvlan","namespace":"spiderpool"}, + {"name":"gpu3-macvlan","namespace":"spiderpool"}, + {"name":"gpu4-macvlan","namespace":"spiderpool"}, + {"name":"gpu5-macvlan","namespace":"spiderpool"}, + {"name":"gpu6-macvlan","namespace":"spiderpool"}, + {"name":"gpu7-macvlan","namespace":"spiderpool"}, + {"name":"gpu8-macvlan","namespace":"spiderpool"}] # macvlan resource resources: limits: - spidernet.io/shared_cx5_gpu1: 1 - spidernet.io/shared_cx5_gpu2: 1 - spidernet.io/shared_cx5_gpu3: 1 - spidernet.io/shared_cx5_gpu4: 1 - spidernet.io/shared_cx5_gpu5: 1 - spidernet.io/shared_cx5_gpu6: 1 - spidernet.io/shared_cx5_gpu7: 1 - spidernet.io/shared_cx5_gpu8: 1 - #nvidia.com/gpu: 1 + spidernet.io/shared_cx5_gpu1: 1 + spidernet.io/shared_cx5_gpu2: 1 + spidernet.io/shared_cx5_gpu3: 1 + spidernet.io/shared_cx5_gpu4: 1 + spidernet.io/shared_cx5_gpu5: 1 + spidernet.io/shared_cx5_gpu6: 1 + spidernet.io/shared_cx5_gpu7: 1 + spidernet.io/shared_cx5_gpu8: 1 + #nvidia.com/gpu: 1 EOF $ helm install rdma-tools spiderchart/rdma-tools -f ./values.yaml @@ -410,3 +412,106 @@ # Successfully access the RDMA service of the other Pod $ ib_read_lat 172.91.0.115 ``` + +## 基于 Webhook 自动注入网络资源 + +Spiderpool 为了简化 AI 应用配置多网卡的复杂度,支持通过 labels(`cni.spidernet.io/rdma-resource-inject`) 对一组网卡配置分类。用户只需要为 Pod 添加相同的注解。这样 Spiderpool 会通过 webhook 自动为 Pod 注入所有具有相同 label 的对应的网卡和网络资源。 + + > 该功能仅支持 [ macvlan,ipvlan,sriov,ib-sriov, ipoib ] 这几种 cniType 的网卡配置。 + +1. 安装 Spiderpool 时,指定开启 webhook 自动注入网络资源功能: + + ```shell + helm install spiderpool spiderchart/spiderpool --set spiderpoolController.podResourceInject.enabled=true + ``` + + > - 默认关闭 webhook 自动注入网络资源功能,需要用户手动开启。 + > - 您可以通过 `spiderpoolController.podResourceInject.namespacesExclude` 指定不注入的命名空间,通过 `spiderpoolController.podResourceInject.namespacesInclude` 指定注入的命名空间。 + > - 安装 Spiderpool 后,您可以通过更新 spiderpool-config configMap 中 podResourceInject 字段更新配置。 + +2. 创建 SpiderMultusConfig 时指定 labels,并配置 RDMA 相关配置: + + ```shell + $ cat < - `cni.spidernet.io/rdma-resource-inject: gpu-macvlan` 固定的 key,value 为用户自定义。具有相同 `Label` 和 `Value` 的一组网卡配置要求 `cniType` 必须一致。 + > - `enableRdma`, `rdmaResourceName` 和 `ippools` 必须配置,否则 Pod 无法成功注入网络资源。 + +3. 创建应用时添加注解: `cni.spidernet.io/rdma-resource-inject: gpu-macvlan`,这样 Spiderpool 自动为 Pod 添加 8 个 GPU 亲和网卡的网卡,用于 RDMA 通信,并配置 8 种 RDMA resources 资源: + + > 注意:使用 webhook 自动注入网络资源功能时,不能为应用添加其他网络配置注解(如 `k8s.v1.cni.cncf.io/networks` 和 `ipam.spidernet.io/ippools`等),否则会影响资源自动注入功能。 + + ```shell + $ helm repo add spiderchart https://spidernet-io.github.io/charts + $ helm repo update + $ helm search repo rdma-tools + + # run daemonset on worker1 and worker2 + $ cat < values.yaml + # for china user , it could add these to use a domestic registry + #image: + # registry: ghcr.m.daocloud.io + + # just run daemonset in nodes 'worker1' and 'worker2' + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - worker1 + - worker2 + + # macvlan interfaces + extraAnnotations: + cni.spidernet.io/rdma-resource-inject: gpu-macvlan + EOF + + $ helm install rdma-tools spiderchart/rdma-tools -f ./values.yaml + ``` + + 当 Pod 成功 Running,检查 Pod 是否成功注入 8 个 RDMA 网卡的 annotations 和 8 种 RDMA 资源。 + + ```shell + # Pod multus annotations + k8s.v1.cni.cncf.io/networks: |- + [{"name":"gpu1-macvlan","namespace":"spiderpool"}, + {"name":"gpu2-macvlan","namespace":"spiderpool"}, + {"name":"gpu3-macvlan","namespace":"spiderpool"}, + {"name":"gpu4-macvlan","namespace":"spiderpool"}, + {"name":"gpu5-macvlan","namespace":"spiderpool"}, + {"name":"gpu6-macvlan","namespace":"spiderpool"}, + {"name":"gpu7-macvlan","namespace":"spiderpool"}, + {"name":"gpu8-macvlan","namespace":"spiderpool"}] + # macvlan resource + resources: + requests: + spidernet.io/shared_cx5_gpu1: 1 + spidernet.io/shared_cx5_gpu2: 1 + spidernet.io/shared_cx5_gpu3: 1 + spidernet.io/shared_cx5_gpu4: 1 + spidernet.io/shared_cx5_gpu5: 1 + spidernet.io/shared_cx5_gpu6: 1 + spidernet.io/shared_cx5_gpu7: 1 + spidernet.io/shared_cx5_gpu8: 1 + #nvidia.com/gpu: 1 + ``` diff --git a/docs/usage/install/ai/get-started-macvlan.md b/docs/usage/install/ai/get-started-macvlan.md index 1c86c32510..9bf9c008e9 100644 --- a/docs/usage/install/ai/get-started-macvlan.md +++ b/docs/usage/install/ai/get-started-macvlan.md @@ -53,8 +53,8 @@ The network planning for the cluster is as follows: For Mellanox network cards, you can download [the NVIDIA OFED official driver](https://network.nvidia.com/products/infiniband-drivers/linux/mlnx_ofed/) and install it on the host using the following installation command: ```shell - $ mount /root/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu22.04-x86_64.iso /mnt - $ /mnt/mlnxofedinstall --all + mount /root/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu22.04-x86_64.iso /mnt + /mnt/mlnxofedinstall --all ``` For Mellanox network cards, you can also perform a containerized installation to batch install drivers on all Mellanox network cards in the cluster hosts. Run the following command. Note that this process requires internet access to fetch some installation packages. When all the OFED pods enter the ready state, it indicates that the OFED driver installation on the hosts is complete: @@ -145,10 +145,10 @@ The network planning for the cluster is as follows: 1. Use Helm to install Spiderpool and enable the rdmaSharedDevicePlugin: ```shell - $ helm repo add spiderpool https://spidernet-io.github.io/spiderpool - $ helm repo update spiderpool - $ kubectl create namespace spiderpool - $ helm install spiderpool spiderpool/spiderpool -n spiderpool --set rdma.rdmaSharedDevicePlugin.install=true + helm repo add spiderpool https://spidernet-io.github.io/spiderpool + helm repo update spiderpool + kubectl create namespace spiderpool + helm install spiderpool spiderpool/spiderpool -n spiderpool --set rdma.rdmaSharedDevicePlugin.install=true ``` > If you are a user in China, you can specify the helm option `--set global.imageRegistryOverride=ghcr.m.daocloud.io` to use a domestic image source. @@ -224,10 +224,10 @@ The network planning for the cluster is as follows: metadata: name: gpu1-net11 spec: - gateway: 172.16.11.254 - subnet: 172.16.11.0/16 - ips: - - 172.16.11.1-172.16.11.200 + gateway: 172.16.11.254 + subnet: 172.16.11.0/16 + ips: + - 172.16.11.1-172.16.11.200 --- apiVersion: spiderpool.spidernet.io/v2beta1 kind: SpiderMultusConfig @@ -235,11 +235,11 @@ The network planning for the cluster is as follows: name: gpu1-macvlan namespace: spiderpool spec: - cniType: macvlan - macvlan: - master: ["enp11s0f0np0"] - ippools: - ipv4: ["gpu1-net11"] + cniType: macvlan + macvlan: + master: ["enp11s0f0np0"] + ippools: + ipv4: ["gpu1-net11"] EOF ``` @@ -248,6 +248,8 @@ The network planning for the cluster is as follows: 1. Create a DaemonSet application on specified nodes. In the following example, the annotation field `v1.multus-cni.io/default-network` specifies the use of the default Calico network card for control plane communication. The annotation field `k8s.v1.cni.cncf.io/networks` connects to the 8 network cards affinitized to the GPU for RDMA communication, and configures 8 types of RDMA resources. + > NOTICE: It support auto inject RDMA resources for application, see [Auto inject RDMA Resources](#auto-inject-rdma-resources-base-on-webhook) + ```shell $ helm repo add spiderchart https://spidernet-io.github.io/charts $ helm repo update @@ -262,42 +264,38 @@ The network planning for the cluster is as follows: # just run daemonset in nodes 'worker1' and 'worker2' affinity: nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - worker1 - - worker2 + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - worker1 + - worker2 # interfaces extraAnnotations: - k8s.v1.cni.cncf.io/networks: |- - [{"name":"gpu1-macvlan","namespace":"spiderpool"}, - {"name":"gpu2-macvlan","namespace":"spiderpool"}, - {"name":"gpu3-macvlan","namespace":"spiderpool"}, - {"name":"gpu4-macvlan","namespace":"spiderpool"}, - {"name":"gpu5-macvlan","namespace":"spiderpool"}, - {"name":"gpu6-macvlan","namespace":"spiderpool"}, - {"name":"gpu7-macvlan","namespace":"spiderpool"}, - {"name":"gpu8-macvlan","namespace":"spiderpool"}] - - # resource + k8s.v1.cni.cncf.io/networks: |- + [{"name":"gpu1-macvlan","namespace":"spiderpool"}, + {"name":"gpu2-macvlan","namespace":"spiderpool"}, + {"name":"gpu3-macvlan","namespace":"spiderpool"}, + {"name":"gpu4-macvlan","namespace":"spiderpool"}, + {"name":"gpu5-macvlan","namespace":"spiderpool"}, + {"name":"gpu6-macvlan","namespace":"spiderpool"}, + {"name":"gpu7-macvlan","namespace":"spiderpool"}, + {"name":"gpu8-macvlan","namespace":"spiderpool"}] + # macvlan resource resources: - limits: - spidernet.io/shared_cx5_gpu1: 1 - spidernet.io/shared_cx5_gpu2: 1 - spidernet.io/shared_cx5_gpu3: 1 - spidernet.io/shared_cx5_gpu4: 1 - spidernet.io/shared_cx5_gpu5: 1 - spidernet.io/shared_cx5_gpu6: 1 - spidernet.io/shared_cx5_gpu7: 1 - spidernet.io/shared_cx5_gpu8: 1 - #nvidia.com/gpu: 1 - EOF - - $ helm install rdma-tools spiderchart/rdma-tools -f ./values.yaml + requests: + spidernet.io/shared_cx5_gpu1: 1 + spidernet.io/shared_cx5_gpu2: 1 + spidernet.io/shared_cx5_gpu3: 1 + spidernet.io/shared_cx5_gpu4: 1 + spidernet.io/shared_cx5_gpu5: 1 + spidernet.io/shared_cx5_gpu6: 1 + spidernet.io/shared_cx5_gpu7: 1 + spidernet.io/shared_cx5_gpu8: 1 + #nvidia.com/gpu: 1 ``` During the creation of the network namespace for the container, Spiderpool will perform connectivity tests on the gateway of the macvlan interface. @@ -412,3 +410,105 @@ The network planning for the cluster is as follows: # Successfully access the RDMA service of the other Pod $ ib_read_lat 172.91.0.115 ``` + +## Auto Inject RDMA Resources base on webhook + +To simplify the complexity of configuring multiple network cards for AI applications, Spiderpool supports categorizing a group of network card configurations through labels (cni.spidernet.io/rdma-resource-inject). Users only need to add the same annotation to the Pod. This way, Spiderpool will automatically inject all corresponding network cards and network resources with the same label into the Pod through a webhook. + + > This feature only supports network card configurations with cniType of [ macvlan,ipvlan,sriov,ib-sriov, ipoib ]. + +1. When installing Spiderpool, specify to enable the webhook automatic injection of network resources feature: + + ```shell + helm install spiderpool spiderchart/spiderpool --set spiderpoolController.podResourceInject.enabled=true + ``` + + > - By default, the webhook automatic injection of network resources feature is disabled and needs to be manually enabled by the user. + > - You can specify namespaces to exclude from injection using `spiderpoolController.podResourceInject.namespacesExclude`, and specify namespaces to include for injection using `spiderpoolController.podResourceInject.namespacesInclude`. + > - After installing Spiderpool, you can update the configuration by modifying the podResourceInject field in the spiderpool-config configMap. + +2. Create SpiderMultusConfig and specify labels, and configure RDMA-related settings: + + ```shell + $ cat < - `cni.spidernet.io/rdma-resource-inject: gpu-macvlan` is a fixed key, and the value is user-defined. A group of network card configurations with the same Label and Value must have the same cniType. + > - `enableRdma`, `rdmaResourceName` and `ippools` must be configured, otherwise the Pod will fail to inject network resources successfully. + +3. Add the annotation `cni.spidernet.io/rdma-resource-inject: gpu-macvlan` to the Pod, so that Spiderpool automatically adds 8 GPU-affinity network cards for RDMA communication and configures 8 types of RDMA resources: + + > Note: When using the webhook automatic injection of network resources feature, do not add other network configuration annotations (such as `k8s.v1.cni.cncf.io/networks` and `ipam.spidernet.io/ippools`) to the Pod, otherwise it will affect the automatic injection of resources. + + ```shell + $ helm repo add spiderchart https://spidernet-io.github.io/charts + $ helm repo update + $ helm search repo rdma-tools + + # run daemonset on worker1 and worker2 + $ cat < values.yaml + # for china user , it could add these to use a domestic registry + #image: + # registry: ghcr.m.daocloud.io + + # just run daemonset in nodes 'worker1' and 'worker2' + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - worker1 + - worker2 + # macvlan interfaces + extraAnnotations: + cni.spidernet.io/rdma-resource-inject: gpu-macvlan + EOF + + $ helm install rdma-tools spiderchart/rdma-tools -f ./values.yaml + ``` + + When the Pod is successfully Running, check if 8 RDMA network card annotations and 8 types of RDMA resources are successfully injected into the Pod. + + ```shell + # Pod multus annotations + k8s.v1.cni.cncf.io/networks: |- + [{"name":"gpu1-macvlan","namespace":"spiderpool"}, + {"name":"gpu2-macvlan","namespace":"spiderpool"}, + {"name":"gpu3-macvlan","namespace":"spiderpool"}, + {"name":"gpu4-macvlan","namespace":"spiderpool"}, + {"name":"gpu5-macvlan","namespace":"spiderpool"}, + {"name":"gpu6-macvlan","namespace":"spiderpool"}, + {"name":"gpu7-macvlan","namespace":"spiderpool"}, + {"name":"gpu8-macvlan","namespace":"spiderpool"}] + # macvlan resource + resources: + requests: + spidernet.io/shared_cx5_gpu1: 1 + spidernet.io/shared_cx5_gpu2: 1 + spidernet.io/shared_cx5_gpu3: 1 + spidernet.io/shared_cx5_gpu4: 1 + spidernet.io/shared_cx5_gpu5: 1 + spidernet.io/shared_cx5_gpu6: 1 + spidernet.io/shared_cx5_gpu7: 1 + spidernet.io/shared_cx5_gpu8: 1 + #nvidia.com/gpu: 1 + ``` diff --git a/docs/usage/install/ai/get-started-sriov-zh_CN.md b/docs/usage/install/ai/get-started-sriov-zh_CN.md index b149bc1606..d550198dad 100644 --- a/docs/usage/install/ai/get-started-sriov-zh_CN.md +++ b/docs/usage/install/ai/get-started-sriov-zh_CN.md @@ -14,7 +14,7 @@ Linux 的 RDMA 子系统,提供两种工作模式: 对于隔离 RDMA 网卡,必须至少满足以下条件之一: - (1) 基于 5.3.0 或更新版本的 Linux 内核,系统中加载的RDMA模块,rdma 核心包提供了在系统启动时自动加载相关模块的方法 + (1) 基于 5.3.0 或更新版本的 Linux 内核,系统中加载的RDMA模块,rdma 核心包提供了在系统启动时自动加载相关模块的方法 (2) 需要 Mellanox OFED 4.7 版或更新版本。在这种情况下,不需要使用基于 5.3.0 或更新版本的内核。 @@ -28,7 +28,7 @@ Linux 的 RDMA 子系统,提供两种工作模式: ## 方案 -本文将以如下典型的 AI 集群拓扑为例,介绍如何搭建 Spiderpool +本文将以如下典型的 AI 集群拓扑为例,介绍如何搭建 Spiderpool ![AI Cluster](../../../images/ai-cluster.png) 图1 AI 集群拓扑 @@ -66,8 +66,8 @@ Linux 的 RDMA 子系统,提供两种工作模式: 对于 Mellanox 网卡,可下载 [NVIDIA OFED 官方驱动](https://network.nvidia.com/products/infiniband-drivers/linux/mlnx_ofed/) 进行主机安装,执行如下安装命令 ``` - $ mount /root/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu22.04-x86_64.iso /mnt - $ /mnt/mlnxofedinstall --all + mount /root/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu22.04-x86_64.iso /mnt + /mnt/mlnxofedinstall --all ``` 对于 Mellanox 网卡,也可基于容器化安装,实现对集群主机上所有 Mellanox 网卡批量安装驱动,运行如下命令,注意的是,该运行过程中需要访问因特网获取一些安装包。当所有的 ofed pod 进入 ready 状态,表示主机上已经完成了 OFED driver 安装 @@ -96,7 +96,7 @@ Linux 的 RDMA 子系统,提供两种工作模式: ....... ``` - 确认网卡的工作模式,如下输出表示网卡工作在 Ethernet 模式下,可实现 RoCE 通信 + 确认网卡的工作模式,如下输出表示网卡工作在 Ethernet 模式下,可实现 RoCE 通信 ``` $ ibstat mlx5_0 | grep "Link layer" @@ -127,7 +127,7 @@ Linux 的 RDMA 子系统,提供两种工作模式: ``` 3. 开启 [GPUDirect RMDA](https://docs.nvidia.com/cuda/gpudirect-rdma/) 功能 - + 在安装或使用 [gpu-operator](https://github.com/NVIDIA/gpu-operator) 过程中 a. 开启 helm 安装选项: `--set driver.rdma.enabled=true --set driver.rdma.useHostMofed=true`,gpu-operator 会安装 [nvidia-peermem](https://network.nvidia.com/products/GPUDirect-RDMA/) 内核模块,启用 GPUDirect RMDA 功能,加速 GPU 和 RDMA 网卡之间的转发性能。可在主机上输入如下命令,确认安装成功的内核模块 @@ -167,10 +167,10 @@ Linux 的 RDMA 子系统,提供两种工作模式: 1. 使用 helm 安装 Spiderpool,并启用 SR-IOV 组件 ``` - $ helm repo add spiderpool https://spidernet-io.github.io/spiderpool - $ helm repo update spiderpool - $ kubectl create namespace spiderpool - $ helm install spiderpool spiderpool/spiderpool -n spiderpool --set sriov.install=true + helm repo add spiderpool https://spidernet-io.github.io/spiderpool + helm repo update spiderpool + kubectl create namespace spiderpool + helm install spiderpool spiderpool/spiderpool -n spiderpool --set sriov.install=true ``` > 如果您是中国用户,可以指定参数 `--set global.imageRegistryOverride=ghcr.m.daocloud.io` 来使用国内的镜像源。 @@ -192,14 +192,14 @@ Linux 的 RDMA 子系统,提供两种工作模式: 2. 配置 SR-IOV operator, 在每个主机上创建出 VF 设备 使用如下命令,查询主机上网卡设备的 PCIE 信息。确认如下输出的设备号 [15b3:1017] 出现在 [sriov-network-operator 支持网卡型号范围](https://github.com/k8snetworkplumbingwg/sriov-network-operator/blob/master/deployment/sriov-network-operator-chart/templates/configmap.yaml) - + ``` $ lspci -nn | grep Mellanox 86:00.0 Infiniband controller [0207]: Mellanox Technologies MT27800 Family [ConnectX-5] [15b3:1017] 86:00.1 Infiniband controller [0207]: Mellanox Technologies MT27800 Family [ConnectX-5] [15b3:1017] .... ``` - + SRIOV VF 数量决定了一个网卡能同时为多少个 POD 提供网卡,不同型号的网卡的有不同的最大 VF 数量上限,Mellanox 的 ConnectX 网卡常见型号的最大 VF 上限是 127 。 如下示例,设置每个节点上的 GPU1 和 GPU2 的网卡,每个网卡配置出 12 个 VF 设备。请参考如下,为主机上每个亲和 GPU 的网卡配置 SriovNetworkNodePolicy,这样,将有 8 个 SRIOV resource 以供使用。 @@ -248,7 +248,7 @@ Linux 的 RDMA 子系统,提供两种工作模式: isRdma: true EOF ``` - + 创建 SriovNetworkNodePolicy 配置后,每个节点上将会启动 sriov-device-plugin ,负责上报 VF 设备资源 ``` @@ -309,17 +309,17 @@ Linux 的 RDMA 子系统,提供两种工作模式: (1) 对于 Infiniband 网络,请为所有的 GPU 亲和的 SR-IOV 网卡配置 [IB-SRIOV CNI](https://github.com/k8snetworkplumbingwg/ib-sriov-cni) 配置,并创建对应的 IP 地址池 。 如下例子,配置了 GPU1 亲和的网卡和 IP 地址池 - ``` + ```shell $ cat < 注:支持自动为应用注入 RDMA 网络资源,参考 [基于 Webhook 自动为应用注入 RDMA 网络资源](#基于-webhook-自动注入-rdma-网络资源) + ``` $ helm repo add spiderchart https://spidernet-io.github.io/charts $ helm repo update @@ -383,45 +386,45 @@ Linux 的 RDMA 子系统,提供两种工作模式: # just run daemonset in nodes 'worker1' and 'worker2' affinity: nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - worker1 - - worker2 + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - worker1 + - worker2 # sriov interfaces extraAnnotations: k8s.v1.cni.cncf.io/networks: |- - [{"name":"gpu1-sriov","namespace":"spiderpool"}, - {"name":"gpu2-sriov","namespace":"spiderpool"}, - {"name":"gpu3-sriov","namespace":"spiderpool"}, - {"name":"gpu4-sriov","namespace":"spiderpool"}, - {"name":"gpu5-sriov","namespace":"spiderpool"}, - {"name":"gpu6-sriov","namespace":"spiderpool"}, - {"name":"gpu7-sriov","namespace":"spiderpool"}, - {"name":"gpu8-sriov","namespace":"spiderpool"}] + [{"name":"gpu1-sriov","namespace":"spiderpool"}, + {"name":"gpu2-sriov","namespace":"spiderpool"}, + {"name":"gpu3-sriov","namespace":"spiderpool"}, + {"name":"gpu4-sriov","namespace":"spiderpool"}, + {"name":"gpu5-sriov","namespace":"spiderpool"}, + {"name":"gpu6-sriov","namespace":"spiderpool"}, + {"name":"gpu7-sriov","namespace":"spiderpool"}, + {"name":"gpu8-sriov","namespace":"spiderpool"}] # sriov resource resources: limits: - spidernet.io/gpu1sriov: 1 - spidernet.io/gpu2sriov: 1 - spidernet.io/gpu3sriov: 1 - spidernet.io/gpu4sriov: 1 - spidernet.io/gpu5sriov: 1 - spidernet.io/gpu6sriov: 1 - spidernet.io/gpu7sriov: 1 - spidernet.io/gpu8sriov: 1 - #nvidia.com/gpu: 1 + spidernet.io/gpu1sriov: 1 + spidernet.io/gpu2sriov: 1 + spidernet.io/gpu3sriov: 1 + spidernet.io/gpu4sriov: 1 + spidernet.io/gpu5sriov: 1 + spidernet.io/gpu6sriov: 1 + spidernet.io/gpu7sriov: 1 + spidernet.io/gpu8sriov: 1 + #nvidia.com/gpu: 1 EOF $ helm install rdma-tools spiderchart/rdma-tools -f ./values.yaml ``` - + 在容器的网络命名空间创建过程中,Spiderpool 会对 sriov 接口上的网关进行连通性测试,如果如上应用的所有 POD 都启动成功,说明了每个节点上的 VF 设备的连通性成功,可进行正常的 RDMA 通信。 2. 查看容器的网络命名空间状态 @@ -534,7 +537,7 @@ Linux 的 RDMA 子系统,提供两种工作模式: $ ib_read_lat 172.91.0.115 ``` -## (可选)Infiniband 网络下对接 UFM +## (可选)Infiniband 网络下对接 UFM 对于使用了 Infiniband 网络的集群,如果网络中有 [UFM 管理平台](https://www.nvidia.com/en-us/networking/infiniband/ufm/),可使用 [ib-kubernetes](https://github.com/Mellanox/ib-kubernetes) 插件,它以 daemonset 形式运行,监控所有使用 SRIOV 网卡的容器,把 VF 设备的 Pkey 和 GUID 上报给 UFM 。 @@ -577,12 +580,12 @@ Linux 的 RDMA 子系统,提供两种工作模式: 3. 在 kubernetes 集群上安装 ib-kubernetes ``` - $ git clone https://github.com/Mellanox/ib-kubernetes.git && cd ib-kubernetes - $ $ kubectl create -f deployment/ib-kubernetes-configmap.yaml - $ kubectl create -f deployment/ib-kubernetes.yaml + git clone https://github.com/Mellanox/ib-kubernetes.git && cd ib-kubernetes + $ kubectl create -f deployment/ib-kubernetes-configmap.yaml + kubectl create -f deployment/ib-kubernetes.yaml ``` -4. 在 Infiniband 网络下,创建 Spiderpool 的 SpiderMultusConfig 时,可配置 pkey,使用该配置创建的 POD 将生效 pkey 配置,且被 ib-kubernetes 同步给 UFM +4. 在 Infiniband 网络下,创建 Spiderpool 的 SpiderMultusConfig 时,可配置 pkey,使用该配置创建的 POD 将生效 pkey 配置,且被 ib-kubernetes 同步给 UFM ``` $ cat < Note: Each node in an Infiniband Kubernetes deployment may be associated with up to 128 PKeys due to kernel limitation + +## 基于 Webhook 自动注入 RDMA 网络资源 + +Spiderpool 为了简化 AI 应用配置多网卡的复杂度,支持通过 labels(`cni.spidernet.io/rdma-resource-inject`) 对一组网卡配置分类。用户只需要为 Pod 添加相同的注解。这样 Spiderpool 会通过 webhook 自动为 Pod 注入所有具有相同 label 的对应的网卡和网络资源。 + + > 该功能仅支持 [ macvlan,ipvlan,sriov,ib-sriov, ipoib ] 这几种 cniType 的网卡配置。 + +1. 使用 webhook 自动注入 RDMA 网络资源,需要安装 Spiderpool 时指定开启 webhook 自动注入网络资源功能: + + ```shell + helm install spiderpool spiderchart/spiderpool --set spiderpoolController.podResourceInject.enabled=true + ``` + + > - 默认关闭 webhook 自动注入网络资源功能,需要用户手动开启。 + > - 您可以通过 `spiderpoolController.podResourceInject.namespacesExclude` 指定不注入的命名空间,通过 `spiderpoolController.podResourceInject.namespacesInclude` 指定注入的命名空间。 + > - 安装 Spiderpool 后,您可以通过更新 spiderpool-config configMap 中 podResourceInject 字段更新配置。 + +2. 创建 SpiderMultusConfig 时指定 labels,并配置 RDMA 相关配置: + + (1) 对于 Infiniband 网络,请为所有的 GPU 亲和的 SR-IOV 网卡配置 [IB-SRIOV CNI](https://github.com/k8snetworkplumbingwg/ib-sriov-cni) 配置,并创建对应的 IP 地址池 。 如下例子,配置了 GPU1 亲和的网卡和 IP 地址池 + + ```shell + $ cat < - `cni.spidernet.io/rdma-resource-inject: gpu-ibsriov` 固定的 key,value 为用户自定义。具有相同 Label 和 Value 的一组网卡配置要求 cniType 必须一致。 + > - `resourceName` 和 `ippools` 必须配置,否则 Pod 无法成功注入网络资源。 + + (2) 对于 Ethernet 网络,请为所有的 GPU 亲和的 SR-IOV 网卡配置 [SR-IOV CNI](https://github.com/k8snetworkplumbingwg/sriov-cni) 配置,并创建对应的 IP 地址池 。 如下例子,配置了 GPU1 亲和的网卡和 IP 地址池 + + ``` + $ cat < - `cni.spidernet.io/rdma-resource-inject: gpu-sriov` 固定的 key,value 为用户自定义。具有相同 Label 和 Value 的一组网卡配置要求 cniType 必须一致。 + > - `resourceName` 和 `ippools` 必须配置,否则 Pod 无法成功注入网络资源。 + +3. 创建应用时,添加注解: `cni.spidernet.io/rdma-resource-inject: gpu-sriov`,这样 Spiderpool 自动为 Pod 添加 8 个 GPU 亲和网卡的网卡,用于 RDMA 通信,并配置 8 种 RDMA resources 资源: + + > 注意:使用 webhook 自动注入网络资源功能时,不能为应用添加其他网络配置注解(如 `k8s.v1.cni.cncf.io/networks` 和 `ipam.spidernet.io/ippools`等),否则会影响资源自动注入功能。 + + ```shell + $ helm repo add spiderchart https://spidernet-io.github.io/charts + $ helm repo update + $ helm search repo rdma-tools + + # run daemonset on worker1 and worker2 + $ cat < values.yaml + # for china user , it could add these to use a domestic registry + #image: + # registry: ghcr.m.daocloud.io + + # just run daemonset in nodes 'worker1' and 'worker2' + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - worker1 + - worker2 + + # macvlan interfaces + extraAnnotations: + cni.spidernet.io/rdma-resource-inject: gpu-sriov + EOF + + $ helm install rdma-tools spiderchart/rdma-tools -f ./values.yaml + ``` + + 当 Pod 成功 Running,检查 Pod 是否成功注入 8 个 RDMA 网卡的 annotations 和 8 种 RDMA 资源。 + + ```shell + # pod annotations + extraAnnotations: + k8s.v1.cni.cncf.io/networks: |- + [{"name":"gpu1-sriov","namespace":"spiderpool"}, + {"name":"gpu2-sriov","namespace":"spiderpool"}, + {"name":"gpu3-sriov","namespace":"spiderpool"}, + {"name":"gpu4-sriov","namespace":"spiderpool"}, + {"name":"gpu5-sriov","namespace":"spiderpool"}, + {"name":"gpu6-sriov","namespace":"spiderpool"}, + {"name":"gpu7-sriov","namespace":"spiderpool"}, + {"name":"gpu8-sriov","namespace":"spiderpool"}] + + # sriov resource + resources: + limits: + spidernet.io/gpu1sriov: 1 + spidernet.io/gpu2sriov: 1 + spidernet.io/gpu3sriov: 1 + spidernet.io/gpu4sriov: 1 + spidernet.io/gpu5sriov: 1 + spidernet.io/gpu6sriov: 1 + spidernet.io/gpu7sriov: 1 + spidernet.io/gpu8sriov: 1 + #nvidia.com/gpu: 1 + ``` diff --git a/docs/usage/install/ai/get-started-sriov.md b/docs/usage/install/ai/get-started-sriov.md index 0ed9942570..7cd917de91 100644 --- a/docs/usage/install/ai/get-started-sriov.md +++ b/docs/usage/install/ai/get-started-sriov.md @@ -64,8 +64,8 @@ The network planning for the cluster is as follows: For Mellanox network cards, you can download [the NVIDIA OFED official driver](https://network.nvidia.com/products/infiniband-drivers/linux/mlnx_ofed/) and install it on the host using the following installation command: ``` - $ mount /root/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu22.04-x86_64.iso /mnt - $ /mnt/mlnxofedinstall --all + mount /root/MLNX_OFED_LINUX-24.01-0.3.3.1-ubuntu22.04-x86_64.iso /mnt + /mnt/mlnxofedinstall --all ``` For Mellanox network cards, you can also perform a containerized installation to batch install drivers on all Mellanox network cards in the cluster hosts. Run the following command. Note that this process requires internet access to fetch some installation packages. When all the OFED pods enter the ready state, it indicates that the OFED driver installation on the hosts is complete: @@ -126,9 +126,9 @@ The network planning for the cluster is as follows: 3. Enable [GPUDirect RDMA](https://docs.nvidia.com/cuda/gpudirect-rdma/) - The installation of the [gpu-operator](https://github.com/NVIDIA/gpu-operator): + The installation of the [gpu-operator](https://github.com/NVIDIA/gpu-operator): - a. Enable the Helm installation options: `--set driver.rdma.enabled=true --set driver.rdma.useHostMofed=true`. The gpu-operator will install [the nvidia-peermem](https://network.nvidia.com/products/GPUDirect-RDMA/) kernel module, + a. Enable the Helm installation options: `--set driver.rdma.enabled=true --set driver.rdma.useHostMofed=true`. The gpu-operator will install [the nvidia-peermem](https://network.nvidia.com/products/GPUDirect-RDMA/) kernel module, enabling GPUDirect RDMA functionality to accelerate data transfer performance between the GPU and RDMA network cards. Enter the following command on the host to confirm the successful installation of the kernel module: ``` @@ -166,10 +166,10 @@ The network planning for the cluster is as follows: 1. Use Helm to install Spiderpool and enable the SR-IOV component: ``` - $ helm repo add spiderpool https://spidernet-io.github.io/spiderpool - $ helm repo update spiderpool - $ kubectl create namespace spiderpool - $ helm install spiderpool spiderpool/spiderpool -n spiderpool --set sriov.install=true + helm repo add spiderpool https://spidernet-io.github.io/spiderpool + helm repo update spiderpool + kubectl create namespace spiderpool + helm install spiderpool spiderpool/spiderpool -n spiderpool --set sriov.install=true ``` > If you are a user in China, you can specify the helm option `--set global.imageRegistryOverride=ghcr.m.daocloud.io` to use a domestic image source. @@ -190,7 +190,7 @@ The network planning for the cluster is as follows: 2. Configure the SR-IOV Operator to Create VF Devices on Each Host - Use the following command to query the PCIe information of the network card devices on the host. Confirm that the device ID [15b3:1017] appears + Use the following command to query the PCIe information of the network card devices on the host. Confirm that the device ID [15b3:1017] appears in [the supported network card models list of the sriov-network-operator](https://github.com/k8snetworkplumbingwg/sriov-network-operator/blob/master/deployment/sriov-network-operator-chart/templates/configmap.yaml). ``` @@ -266,8 +266,8 @@ The network planning for the cluster is as follows: sriov-network-config-daemon-n629x 1/1 Running 0 1m ....... ``` - - Once the SriovNetworkNodePolicy configuration is created, the SR-IOV operator will sequentially evict PODs on each node, configure the + + Once the SriovNetworkNodePolicy configuration is created, the SR-IOV operator will sequentially evict PODs on each node, configure the VF settings in the network card driver, and then reboot the host. Consequently, you will observe the nodes in the cluster sequentially entering the SchedulingDisabled state and being rebooted. ``` @@ -277,7 +277,7 @@ The network planning for the cluster is as follows: ai-10-1-16-2 Ready,SchedulingDisabled worker 2d15h v1.28.9 ....... ``` - + It may take several minutes for all nodes to complete the VF configuration process. You can monitor the sriovnetworknodestates status to see if it has entered the Succeeded state, indicating that the configuration is complete. ``` @@ -318,10 +318,10 @@ The network planning for the cluster is as follows: metadata: name: gpu1-net11 spec: - gateway: 172.16.11.254 - subnet: 172.16.11.0/16 - ips: - - 172.16.11.1-172.16.11.200 + gateway: 172.16.11.254 + subnet: 172.16.11.0/16 + ips: + - 172.16.11.1-172.16.11.200 --- apiVersion: spiderpool.spidernet.io/v2beta1 kind: SpiderMultusConfig @@ -329,27 +329,27 @@ The network planning for the cluster is as follows: name: gpu1-sriov namespace: spiderpool spec: - cniType: ib-sriov - ibsriov: - resourceName: spidernet.io/gpu1sriov - ippools: - ipv4: ["gpu1-net91"] + cniType: ib-sriov + ibsriov: + resourceName: spidernet.io/gpu1sriov + ippools: + ipv4: ["gpu1-net91"] EOF ``` b. For Ethernet Networks, configure [the SR-IOV CNI](https://github.com/k8snetworkplumbingwg/sriov-cni) for all GPU-affinitized SR-IOV network cards and create the corresponding IP address pool. The following example configures the network card and IP address pool for GPU1 - ``` + ``` $ cat < NOTICE: It support auto inject RDMA resources for application, see [Auto inject RDMA Resources](#auto-inject-rdma-resources-base-on-webhook) + ```shell $ helm repo add spiderchart https://spidernet-io.github.io/charts $ helm repo update @@ -385,45 +387,45 @@ The network planning for the cluster is as follows: # just run daemonset in nodes 'worker1' and 'worker2' affinity: nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - worker1 - - worker2 + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - worker1 + - worker2 # sriov interfaces extraAnnotations: k8s.v1.cni.cncf.io/networks: |- - [{"name":"gpu1-sriov","namespace":"spiderpool"}, - {"name":"gpu2-sriov","namespace":"spiderpool"}, - {"name":"gpu3-sriov","namespace":"spiderpool"}, - {"name":"gpu4-sriov","namespace":"spiderpool"}, - {"name":"gpu5-sriov","namespace":"spiderpool"}, - {"name":"gpu6-sriov","namespace":"spiderpool"}, - {"name":"gpu7-sriov","namespace":"spiderpool"}, - {"name":"gpu8-sriov","namespace":"spiderpool"}] + [{"name":"gpu1-sriov","namespace":"spiderpool"}, + {"name":"gpu2-sriov","namespace":"spiderpool"}, + {"name":"gpu3-sriov","namespace":"spiderpool"}, + {"name":"gpu4-sriov","namespace":"spiderpool"}, + {"name":"gpu5-sriov","namespace":"spiderpool"}, + {"name":"gpu6-sriov","namespace":"spiderpool"}, + {"name":"gpu7-sriov","namespace":"spiderpool"}, + {"name":"gpu8-sriov","namespace":"spiderpool"}] # sriov resource resources: limits: - spidernet.io/gpu1sriov: 1 - spidernet.io/gpu2sriov: 1 - spidernet.io/gpu3sriov: 1 - spidernet.io/gpu4sriov: 1 - spidernet.io/gpu5sriov: 1 - spidernet.io/gpu6sriov: 1 - spidernet.io/gpu7sriov: 1 - spidernet.io/gpu8sriov: 1 - #nvidia.com/gpu: 1 + spidernet.io/gpu1sriov: 1 + spidernet.io/gpu2sriov: 1 + spidernet.io/gpu3sriov: 1 + spidernet.io/gpu4sriov: 1 + spidernet.io/gpu5sriov: 1 + spidernet.io/gpu6sriov: 1 + spidernet.io/gpu7sriov: 1 + spidernet.io/gpu8sriov: 1 + #nvidia.com/gpu: 1 EOF $ helm install rdma-tools spiderchart/rdma-tools -f ./values.yaml ``` - During the creation of the network namespace for the container, Spiderpool will perform connectivity tests on the gateway of the SR-IOV interface. + During the creation of the network namespace for the container, Spiderpool will perform connectivity tests on the gateway of the SR-IOV interface. If all PODs of the above application start successfully, it indicates successful connectivity of the VF devices on each node, allowing normal RDMA communication. 2. Check the network namespace status of the container. @@ -525,7 +527,7 @@ The network planning for the cluster is as follows: # Start an RDMA service $ ib_read_lat ``` - + Open another terminal, enter another Pod, and access the service: ``` @@ -579,9 +581,9 @@ For clusters using Infiniband networks, if there is a [UFM management platform]( 3. Install ib-kubernetes on the Kubernetes cluster ``` - $ git clone https://github.com/Mellanox/ib-kubernetes.git && cd ib-kubernetes - $ $ kubectl create -f deployment/ib-kubernetes-configmap.yaml - $ kubectl create -f deployment/ib-kubernetes.yaml + git clone https://github.com/Mellanox/ib-kubernetes.git && cd ib-kubernetes + $ kubectl create -f deployment/ib-kubernetes-configmap.yaml + kubectl create -f deployment/ib-kubernetes.yaml ``` 4. On Infiniband networks, when creating Spiderpool's SpiderMultusConfig, you can configure the Pkey. Pods created with this configuration will use the Pkey settings and be synchronized with UFM by ib-kubernetes @@ -594,11 +596,157 @@ For clusters using Infiniband networks, if there is a [UFM management platform]( name: ib-sriov namespace: spiderpool spec: - cniType: ib-sriov - ibsriov: - pkey: 1000 - ... + cniType: ib-sriov + ibsriov: + pkey: 1000 + ... EOF ``` - + > Note: Each node in an Infiniband Kubernetes deployment may be associated with up to 128 PKeys due to kernel limitation + +## Auto Inject RDMA Resources base on webhook + +To simplify the complexity of configuring multiple network cards for AI applications, Spiderpool supports categorizing a group of network card configurations through labels (cni.spidernet.io/rdma-resource-inject). Users only need to add the same annotation to the Pod. This way, Spiderpool will automatically inject all corresponding network cards and network resources with the same label into the Pod through a webhook. + + > This feature only supports network card configurations with cniType of [ macvlan,ipvlan,sriov,ib-sriov, ipoib ]. + +1. Install Spiderpool with webhook automatic injection of network resources feature enabled: + + ```shell + helm install spiderpool spiderchart/spiderpool --set spiderpoolController.podResourceInject.enabled=true + ``` + + > - By default, the webhook automatic injection of network resources feature is disabled and needs to be manually enabled by the user. + > - You can specify namespaces to exclude from injection using `spiderpoolController.podResourceInject.namespacesExclude`, and specify namespaces to include for injection using `spiderpoolController.podResourceInject.namespacesInclude`. + > - After installing Spiderpool, you can update the configuration by modifying the podResourceInject field in the spiderpool-config configMap. + +2. Create SpiderMultusConfig and specify labels, and configure RDMA-related settings: + + a. For Infiniband Networks, configure [the IB-SRIOV CNI](https://github.com/k8snetworkplumbingwg/ib-sriov-cni) for all GPU-affinitized SR-IOV network cards and create the corresponding IP address pool. The following example configures the network card and IP address pool for GPU1 + + ```shell + $ cat < - `cni.spidernet.io/rdma-resource-inject: gpu-ibsriov` is a fixed key, and the value is user-defined. A group of network card configurations with the same `Label` and `Value` must have the same `cniType`. + > - `resourceName` and `ippools` must be configured, otherwise the Pod will fail to inject network resources successfully. + + b. For Ethernet Networks, configure [the SR-IOV CNI](https://github.com/k8snetworkplumbingwg/sriov-cni) for all GPU-affinitized SR-IOV network cards and create the corresponding IP address pool. The following example configures the network card and IP address pool for GPU1 + + ```shell + $ cat < - `cni.spidernet.io/rdma-resource-inject: gpu-sriov` is a fixed key, and the value is user-defined. A group of network card configurations with the same `Label` and `Value` must have the same `cniType`. + > - `resourceName` and `ippools` must be configured, otherwise the Pod will fail to inject network resources successfully. + +3. Add the annotation `cni.spidernet.io/rdma-resource-inject: gpu-sriov` to the Pod, so that Spiderpool automatically adds 8 GPU-affinity network cards for RDMA communication and configures 8 types of RDMA resources: + + > Note: When using the webhook automatic injection of network resources feature, do not add other network configuration annotations (such as `k8s.v1.cni.cncf.io/networks` and `ipam.spidernet.io/ippools`) to the Pod, otherwise it will affect the automatic injection of resources. + + ```shell + $ helm repo add spiderchart https://spidernet-io.github.io/charts + $ helm repo update + $ helm search repo rdma-tools + + # run daemonset on worker1 and worker2 + $ cat < values.yaml + # for china user , it could add these to use a domestic registry + #image: + # registry: ghcr.m.daocloud.io + + # just run daemonset in nodes 'worker1' and 'worker2' + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - worker1 + - worker2 + # sriov interfaces + extraAnnotations: + cni.spidernet.io/rdma-resource-inject: gpu-sriov + EOF + + $ helm install rdma-tools spiderchart/rdma-tools -f ./values.yaml + ``` + + When the Pod is successfully Running, check if 8 RDMA network card annotations and 8 types of RDMA resources are successfully injected into the Pod. + + ```shell + # Pod multus annotations + k8s.v1.cni.cncf.io/networks: |- + [{"name":"gpu1-sriov","namespace":"spiderpool"}, + {"name":"gpu2-sriov","namespace":"spiderpool"}, + {"name":"gpu3-sriov","namespace":"spiderpool"}, + {"name":"gpu4-sriov","namespace":"spiderpool"}, + {"name":"gpu5-sriov","namespace":"spiderpool"}, + {"name":"gpu6-sriov","namespace":"spiderpool"}, + {"name":"gpu7-sriov","namespace":"spiderpool"}, + {"name":"gpu8-sriov","namespace":"spiderpool"}] + # sriov resource + resources: + requests: + spidernet.io/gpu1sriov: 1 + spidernet.io/gpu2sriov: 1 + spidernet.io/gpu3sriov: 1 + spidernet.io/gpu4sriov: 1 + spidernet.io/gpu5sriov: 1 + spidernet.io/gpu6sriov: 1 + spidernet.io/gpu7sriov: 1 + spidernet.io/gpu8sriov: 1 + #nvidia.com/gpu: 1 + ``` diff --git a/docs/usage/istio-zh_CN.md b/docs/usage/istio-zh_CN.md new file mode 100644 index 0000000000..f70dd52d6d --- /dev/null +++ b/docs/usage/istio-zh_CN.md @@ -0,0 +1,98 @@ +# Istio + +**简体中文** | [**English**](./istio.md) + +## 介绍 + +在 Istio 场景下,使用 Spiderpool 配置服务网格应用使用 Underlay 网络时,可能会出现流量无法被 istio 劫持的问题。这是因为: + +1. 访问服务网格 Pod 的流量通过其 veth0 网卡(由 Spiderpool 创建)转发。流量随后会通过 istio 设置的 iptables redirect 规则,被劫持到 sidecar 容器中。但由于 iptables redirect 规则必须要求接收流量的网卡必须配置 IP 地址,否则该数据包会被内核沉默的丢弃。 + +2. 在默认情况下,Spiderpool 不会为使用 Underlay 网络的 Pod 的 veth0 网卡配置 IP 地址, 所以这会导致访问服务网格的流量被丢弃。 + +参考 [#Issue 3568](https://github.com/spidernet-io/spiderpool/issues/3568)。为了解决这个问题, Spiderpool 提供一个配置: `vethLinkAddress`,用于为 veth0 网卡配置一个 link-local 地址。 + +## 如何配置 + +1. 使用 Helm 安装 Spiderpool 时,可通过以下命令开启这个功能: + + ```shell + helm repo add spiderpool https://spidernet-io.github.io/spiderpool + helm repo update spiderpool + kubectl create namespace spiderpool + helm install spiderpool spiderpool/spiderpool -n spiderpool --set coordinator.vethLinkAddress=169.254.100.1 + ``` + + > - `vethLinkAddress` 必须是一个合法的 IP 地址。 + > - 如果您是中国用户,可以指定参数 `--set global.imageRegistryOverride=ghcr.m.daocloud.io` 来使用国内的镜像源。 + +2. 安装完成后,查看 Spidercoordinator 的配置,确保 `vethLinkAddress` 已配置正确: + + ```shell + ~# kubectl get spidercoordinators.spiderpool.spidernet.io default -o yaml + apiVersion: spiderpool.spidernet.io/v2beta1 + kind: SpiderCoordinator + metadata: + creationTimestamp: "2024-10-30T08:31:09Z" + finalizers: + - spiderpool.spidernet.io + generation: 7 + name: default + resourceVersion: "195405" + uid: 8bdceced-15db-497b-be07-81cbcba7caac + spec: + detectGateway: false + detectIPConflict: false + hijackCIDR: + - 169.254.0.0/16 + podRPFilter: 0 + hostRPFilter: 0 + hostRuleTable: 500 + mode: auto + podCIDRType: calico + podDefaultRouteNIC: "" + vethLinkAddress: 169.254.100.1 + podMACPrefix: "" + tunePodRoutes: true + status: + overlayPodCIDR: + - 10.222.64.0/18 + - 10.223.64.0/18 + phase: Synced + serviceCIDR: + - 10.233.0.0/18 + ``` + +3. 如果您已经安装 Spiderpool, 您可以直接修改 Spidercoordinator 中关于 vethLinkAddress 的配置: + + ```shell + kubectl patch spidercoordinators default --type='merge' -p '{"spec": {"vethLinkAddress": "169.254.100.1"}}' + ``` + +4. 步骤 3 中是集群默认设置,如果您不希望整个集群默认都配置 vethLinkAddress,您可以为单个网卡配置: + + ```shell + MACVLAN_MASTER_INTERFACE="eth0" + cat < -n -- ip addr show veth0 +``` diff --git a/docs/usage/istio.md b/docs/usage/istio.md new file mode 100644 index 0000000000..e8507c5843 --- /dev/null +++ b/docs/usage/istio.md @@ -0,0 +1,98 @@ +# Istio + +**English** | [**简体中文**](./istio-zh_CN.md) + +## Introduction + +In the context of Istio, when using Spiderpool to configure the network for service mesh applications with an Underlay network, there may be issues where traffic cannot be intercepted by Istio. This is because: + +1. Traffic accessing the service mesh Pod is forwarded through its veth0 network interface (created by Spiderpool). The traffic is then intercepted to the sidecar container through the iptables redirect rules set by Istio. However, since iptables redirect rules require the receiving network interface to be configured with an IP address, otherwise the packet will be silently dropped by the kernel. + +2. By default, Spiderpool does not configure an IP address for the veth0 network interface of Pods using the Underlay network, which leads to the traffic accessing the service mesh being dropped. + +Refer to [#Issue 3568](https://github.com/spidernet-io/spiderpool/issues/3568). To solve this problem, Spiderpool provides a configuration: `vethLinkAddress`, which is used to configure a link-local address for the veth0 network interface. + +## How to Configure + +1. When installing Spiderpool using Helm, you can enable this feature with the following command: + + ```shell + helm repo add spiderpool https://spidernet-io.github.io/spiderpool + helm repo update spiderpool + kubectl create namespace spiderpool + helm install spiderpool spiderpool/spiderpool -n spiderpool --set coordinator.vethLinkAddress=169.254.100.1 + ``` + + > - `vethLinkAddress` must be a valid IP address. + > - If you are a user in China, you can specify the parameter `--set global.imageRegistryOverride=ghcr.m.daocloud.io` to use a domestic image source. + +2. After installation, check the configuration of the Spidercoordinator to ensure that `vethLinkAddress` is configured correctly: + + ```shell + ~# kubectl get spidercoordinators.spiderpool.spidernet.io default -o yaml + apiVersion: spiderpool.spidernet.io/v2beta1 + kind: SpiderCoordinator + metadata: + creationTimestamp: "2024-10-30T08:31:09Z" + finalizers: + - spiderpool.spidernet.io + generation: 7 + name: default + resourceVersion: "195405" + uid: 8bdceced-15db-497b-be07-81cbcba7caac + spec: + detectGateway: false + detectIPConflict: false + hijackCIDR: + - 169.254.0.0/16 + podRPFilter: 0 + hostRPFilter: 0 + hostRuleTable: 500 + mode: auto + podCIDRType: calico + podDefaultRouteNIC: "" + vethLinkAddress: 169.254.100.1 + podMACPrefix: "" + tunePodRoutes: true + status: + overlayPodCIDR: + - 10.222.64.0/18 + - 10.223.64.0/18 + phase: Synced + serviceCIDR: + - 10.233.0.0/18 + ``` + +3. If you have already installed Spiderpool, you can directly modify the configuration of `vethLinkAddress` in the Spidercoordinator: + + ```shell + kubectl patch spidercoordinators default --type='merge' -p '{"spec": {"vethLinkAddress": "169.254.100.1"}}' + ``` + +4. Step 3 is the default setting for the cluster. If you do not want the entire cluster to default to configuring `vethLinkAddress`, you can configure it for a single network interface: + + ```shell + MACVLAN_MASTER_INTERFACE="eth0" + cat < -n -- ip addr show veth0 +``` diff --git a/docs/usage/network-topology-zh_CN.md b/docs/usage/network-topology-zh_CN.md index af57ebd070..2e932b5394 100644 --- a/docs/usage/network-topology-zh_CN.md +++ b/docs/usage/network-topology-zh_CN.md @@ -166,6 +166,7 @@ spec: { "ipv4": ["test-ippool-6", "test-ippool-7"] } + v1.multus-cni.io/default-network: kube-system/macvlan-conf labels: app: test-app spec: diff --git a/docs/usage/network-topology.md b/docs/usage/network-topology.md index 3fa391ab58..fff9e33324 100644 --- a/docs/usage/network-topology.md +++ b/docs/usage/network-topology.md @@ -171,6 +171,7 @@ spec: { "ipv4": ["test-ippool-6", "test-ippool-7"] } + v1.multus-cni.io/default-network: kube-system/macvlan-conf labels: app: test-app spec: diff --git a/docs/usage/route-zh_CN.md b/docs/usage/route-zh_CN.md index 3a1806b6b8..291884c80b 100644 --- a/docs/usage/route-zh_CN.md +++ b/docs/usage/route-zh_CN.md @@ -26,7 +26,7 @@ spec: 我们也可为 SpiderIPPool 资源配置路由(`spec.routes`),创建 Pod 时会继承该路由: -> - 当 SpiderIPPool 资源配置了网关地址后,请勿为路由字段配置默认路由。 +> - 当 SpiderIPPool 资源配置了网关地址后,请勿为路由字段配置默认路由。 > - `dst` 和 `gw` 字段都为必填 ```yaml diff --git a/go.mod b/go.mod index 80e43195fc..c67f6119b2 100644 --- a/go.mod +++ b/go.mod @@ -57,7 +57,6 @@ require ( k8s.io/apimachinery v0.30.0-beta.0 k8s.io/client-go v0.29.4 k8s.io/code-generator v0.30.0-beta.0 - k8s.io/dynamic-resource-allocation v0.29.2 k8s.io/kubernetes v1.29.0 k8s.io/utils v0.0.0-20230726121419-3b25d923346b kubevirt.io/api v1.2.0 @@ -79,9 +78,6 @@ require ( github.com/hashicorp/go-multierror v1.1.1 go.uber.org/automaxprocs v1.5.3 k8s.io/kubectl v0.26.3 - k8s.io/kubelet v0.29.2 - tags.cncf.io/container-device-interface v0.6.2 - tags.cncf.io/container-device-interface/specs-go v0.6.0 ) require ( @@ -92,11 +88,13 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cilium/proxy v0.0.0-20230623092907-8fddead4e52c // indirect + github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect github.com/coreos/go-iptables v0.7.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fatih/color v1.13.0 // indirect @@ -145,8 +143,6 @@ require ( github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/opencontainers/runtime-spec v1.1.0 // indirect - github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 // indirect github.com/openshift/api v0.0.0-20230503133300-8bbcb7ca7183 // indirect github.com/openshift/custom-resource-status v1.1.2 // indirect github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect @@ -165,12 +161,11 @@ require ( github.com/shirou/gopsutil/v3 v3.23.5 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/afero v1.10.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.16.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect - github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tigera/api v0.0.0-20230406222214-ca74195900cb // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect github.com/tklauser/numcpus v0.6.0 // indirect @@ -188,8 +183,7 @@ require ( golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/grpc v1.62.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index cd69cf205e..9f9408d17f 100644 --- a/go.sum +++ b/go.sum @@ -290,7 +290,6 @@ github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwg github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -404,7 +403,6 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -415,7 +413,6 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -445,14 +442,6 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= -github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= -github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 h1:DmNGcqH3WDbV5k8OJ+esPWbqUOX5rMLR2PMvziDMJi0= -github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI= -github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= -github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/openkruise/kruise-api v1.3.0 h1:yfEy64uXgSuX/5RwePLbwUK/uX8RRM8fHJkccel5ZIQ= github.com/openkruise/kruise-api v1.3.0/go.mod h1:9ZX+ycdHKNzcA5ezAf35xOa2Mwfa2BYagWr0lKgi5dU= github.com/openshift/api v0.0.0-20230503133300-8bbcb7ca7183 h1:t/CahSnpqY46sQR01SoS+Jt0jtjgmhgE6lFmRnO4q70= @@ -505,14 +494,13 @@ github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -543,8 +531,6 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tigera/api v0.0.0-20230406222214-ca74195900cb h1:Y7r5Al3V235KaEoAzGBz9RYXEbwDu8CPaZoCq2PlD8w= github.com/tigera/api v0.0.0-20230406222214-ca74195900cb/go.mod h1:ZZghiX3CUsBAc0osBjRvV6y/eun2ObYdvSbjqXAoj/w= @@ -556,7 +542,6 @@ github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYm github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= -github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/vishvananda/netlink v1.2.1-beta.2.0.20230621221334-77712cff8739 h1:mi+RH1U/MmAQvz2Ys7r1/8OWlGJoBvF8iCXRKk2uym4= github.com/vishvananda/netlink v1.2.1-beta.2.0.20230621221334-77712cff8739/go.mod h1:0BeLktV/jHb2/Hmw1yLD7+yaIB8PDy11RCty0tCPWZg= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= @@ -567,12 +552,6 @@ github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23n github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -763,8 +742,6 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -981,7 +958,7 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg= google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= @@ -1004,8 +981,6 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= -google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1076,8 +1051,6 @@ k8s.io/code-generator v0.30.0-beta.0 h1:p+51J7CG4i6Cu/cyRrpXU7zT/XaHIHv7NK/mujr0 k8s.io/code-generator v0.30.0-beta.0/go.mod h1:kvx3eylE/Y/Z2dj8ncw3CR/zjQ37ou9lc3A0Pt8xX54= k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo= k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0= -k8s.io/dynamic-resource-allocation v0.29.2 h1:8SnrmzMhwAA3p0ZcIT2iIciJpVqJEJlswMxpgubqQ4k= -k8s.io/dynamic-resource-allocation v0.29.2/go.mod h1:79ualICIQeRX6T5YbzRylt7wEH3lAiNkbELslyS1B6k= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= @@ -1094,8 +1067,6 @@ k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7F k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kubectl v0.26.3 h1:bZ5SgFyeEXw6XTc1Qji0iNdtqAC76lmeIIQULg2wNXM= k8s.io/kubectl v0.26.3/go.mod h1:02+gv7Qn4dupzN3fi/9OvqqdW+uG/4Zi56vc4Zmsp1g= -k8s.io/kubelet v0.29.2 h1:bQ2StqkUqPCFNLtGLsb3v3O2LKQHXNMju537zOGboRg= -k8s.io/kubelet v0.29.2/go.mod h1:i5orNPqW/fAMrqptbCXFW/vLBBP12TZZc41IrrvF7SY= k8s.io/kubernetes v1.29.0 h1:DOLN7g8+nnAYBi8JHoW0+/MCrZKDPIqAxzLCXDXd0cg= k8s.io/kubernetes v1.29.0/go.mod h1:9kztbUQf9stVDcIYXx+BX3nuGCsAQDsuClkGMpPs3pA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -1126,7 +1097,3 @@ sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77Vzej sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -tags.cncf.io/container-device-interface v0.6.2 h1:dThE6dtp/93ZDGhqaED2Pu374SOeUkBfuvkLuiTdwzg= -tags.cncf.io/container-device-interface v0.6.2/go.mod h1:Shusyhjs1A5Na/kqPVLL0KqnHQHuunol9LFeUNkuGVE= -tags.cncf.io/container-device-interface/specs-go v0.6.0 h1:V+tJJN6dqu8Vym6p+Ru+K5mJ49WL6Aoc5SJFSY0RLsQ= -tags.cncf.io/container-device-interface/specs-go v0.6.0/go.mod h1:hMAwAbMZyBLdmYqWgYcKH0F/yctNpV3P35f+/088A80= diff --git a/images/spiderpool-agent/Dockerfile b/images/spiderpool-agent/Dockerfile index 7e5b9511b9..eceb6950e8 100644 --- a/images/spiderpool-agent/Dockerfile +++ b/images/spiderpool-agent/Dockerfile @@ -5,7 +5,7 @@ ARG BASE_IMAGE=ghcr.io/spidernet-io/spiderpool/spiderpool-base:1f8330482d25b58d2 ARG GOLANG_IMAGE=docker.io/library/golang:1.22.0@sha256:03082deb6ae090a0caa4e4a8f666bc59715bc6fa67f5fd109f823a0c4e1efc2a #======= build bin ========== -FROM --platform=${BUILDPLATFORM} ${GOLANG_IMAGE} as builder +FROM --platform=${BUILDPLATFORM} ${GOLANG_IMAGE} AS builder ARG TARGETOS ARG TARGETARCH @@ -60,7 +60,7 @@ ARG VERSION ENV VERSION=${VERSION} RUN groupadd -f spidernet \ - && echo ". /etc/profile.d/bash_completion.sh" >> /etc/bash.bashrc + && echo ". /etc/profile.d/bash_completion.sh" >> /etc/bash.bashrc COPY --from=builder /tmp/install/${TARGETOS}/${TARGETARCH}/bin/* /usr/bin/ COPY --from=builder /tmp/install/${TARGETOS}/${TARGETARCH}/bash-completion/* /etc/bash_completion.d/ diff --git a/images/spiderpool-base/Dockerfile b/images/spiderpool-base/Dockerfile index 89a74935ed..8f10cf2de0 100644 --- a/images/spiderpool-base/Dockerfile +++ b/images/spiderpool-base/Dockerfile @@ -9,7 +9,7 @@ ARG UBUNTU_IMAGE=docker.io/library/ubuntu:20.04@sha256:bea6d19168bbfd6af8d77c2cc #========= build gops =============== -FROM --platform=${BUILDPLATFORM} ${GOLANG_IMAGE} as gops-cni-builder +FROM --platform=${BUILDPLATFORM} ${GOLANG_IMAGE} AS gops-cni-builder COPY /build-gops.sh /build-gops.sh #use alibaba debian source @@ -25,7 +25,7 @@ RUN chmod +x /build-gops.sh && /build-gops.sh #========== root image ============== -FROM ${UBUNTU_IMAGE} as rootfs +FROM ${UBUNTU_IMAGE} AS rootfs COPY /install-others.sh /install-others.sh COPY /configure-iptables-wrapper.sh /configure-iptables-wrapper.sh COPY /iptables-wrapper /usr/sbin/iptables-wrapper diff --git a/images/spiderpool-controller/Dockerfile b/images/spiderpool-controller/Dockerfile index 225ded6659..e7dd89aada 100644 --- a/images/spiderpool-controller/Dockerfile +++ b/images/spiderpool-controller/Dockerfile @@ -5,7 +5,7 @@ ARG BASE_IMAGE=ghcr.io/spidernet-io/spiderpool/spiderpool-base:1f8330482d25b58d2 ARG GOLANG_IMAGE=docker.io/library/golang:1.22.0@sha256:03082deb6ae090a0caa4e4a8f666bc59715bc6fa67f5fd109f823a0c4e1efc2a #======= build bin ========== -FROM --platform=${BUILDPLATFORM} ${GOLANG_IMAGE} as builder +FROM --platform=${BUILDPLATFORM} ${GOLANG_IMAGE} AS builder ARG TARGETOS ARG TARGETARCH @@ -55,7 +55,7 @@ ARG VERSION ENV VERSION=${VERSION} RUN groupadd -f spidernet \ - && echo ". /etc/profile.d/bash_completion.sh" >> /etc/bash.bashrc + && echo ". /etc/profile.d/bash_completion.sh" >> /etc/bash.bashrc COPY --from=builder /tmp/install/${TARGETOS}/${TARGETARCH}/bin/* /usr/bin/ COPY --from=builder /tmp/install/${TARGETOS}/${TARGETARCH}/bash-completion/* /etc/bash_completion.d/ diff --git a/images/spiderpool-plugins/Dockerfile b/images/spiderpool-plugins/Dockerfile index b4655edd55..7e26c2563a 100644 --- a/images/spiderpool-plugins/Dockerfile +++ b/images/spiderpool-plugins/Dockerfile @@ -4,7 +4,7 @@ ARG GOLANG_IMAGE=docker.io/library/golang:1.22.0@sha256:03082deb6ae090a0caa4e4a8f666bc59715bc6fa67f5fd109f823a0c4e1efc2a #======= build plugins ========== -FROM --platform=${BUILDPLATFORM} ${GOLANG_IMAGE} as builder +FROM --platform=${BUILDPLATFORM} ${GOLANG_IMAGE} AS builder ARG TARGETOS ARG TARGETARCH @@ -44,7 +44,7 @@ RUN git clone https://github.com/Mellanox/ipoib-cni.git WORKDIR /src/rdma-cni RUN git checkout ${RDMA_VERSION} && make TARGET_ARCH=${TARGETARCH} \ - TARGET_OS=${TARGETOS} build + TARGET_OS=${TARGETOS} build WORKDIR /src/ovs-cni RUN mkdir -p build && GOOS=${TARGETOS} GOARCH=${TARGETARCH} CGO_ENABLED=0 go build -v -ldflags="-s -w" -tags no_openssl -mod vendor -o build/ovs ./cmd/plugin diff --git a/pkg/constant/k8s.go b/pkg/constant/k8s.go index 1e06cc774c..9b88acde5e 100644 --- a/pkg/constant/k8s.go +++ b/pkg/constant/k8s.go @@ -51,7 +51,10 @@ const ( ) const ( - AnnotationPre = "ipam.spidernet.io" + // DEPRETED, Maintain backward compatibility, don't remove it. + // and all new annotations use spidernet.io + AnnotationPre = "ipam.spidernet.io" + CNIAnnotationPre = "cni.spidernet.io" AnnoPodIPPool = AnnotationPre + "/ippool" AnnoPodIPPools = AnnotationPre + "/ippools" @@ -100,6 +103,10 @@ const ( //dra DraAnnotationPre = "dra.spidernet.io" AnnoDraCdiVersion = AnnotationPre + "/cdi-version" + + // webhook + PodMutatingWebhookName = "pods.spiderpool.spidernet.io" + AnnoPodResourceInject = CNIAnnotationPre + "/rdma-resource-inject" ) const ( diff --git a/pkg/coordinatormanager/coordinator_mutate.go b/pkg/coordinatormanager/coordinator_mutate.go index 3e6f7edf7b..e26ca0e37b 100644 --- a/pkg/coordinatormanager/coordinator_mutate.go +++ b/pkg/coordinatormanager/coordinator_mutate.go @@ -49,6 +49,10 @@ func mutateCoordinator(ctx context.Context, coord *spiderpoolv2beta1.SpiderCoord coord.Spec.TxQueueLen = ptr.To(0) } + if coord.Spec.VethLinkAddress == nil { + coord.Spec.VethLinkAddress = ptr.To("") + } + if coord.DeletionTimestamp != nil { logger.Info("Terminating Coordinator, noting to mutate") return nil diff --git a/pkg/coordinatormanager/coordinator_validate.go b/pkg/coordinatormanager/coordinator_validate.go index 2990397267..4577a338e3 100644 --- a/pkg/coordinatormanager/coordinator_validate.go +++ b/pkg/coordinatormanager/coordinator_validate.go @@ -5,6 +5,7 @@ package coordinatormanager import ( "fmt" + "net/netip" "strconv" "strings" @@ -16,12 +17,13 @@ import ( ) var ( - podCIDRTypeField *field.Path = field.NewPath("spec").Child("podCIDRType") - extraCIDRField *field.Path = field.NewPath("spec").Child("extraCIDR") - podMACPrefixField *field.Path = field.NewPath("spec").Child("podMACPrefix") - hostRPFilterField *field.Path = field.NewPath("spec").Child("hostRPFilter") - podRPFilterField *field.Path = field.NewPath("spec").Child("podRPFilter") - txQueueLenField *field.Path = field.NewPath("spec").Child("txQueueLen") + podCIDRTypeField *field.Path = field.NewPath("spec").Child("podCIDRType") + extraCIDRField *field.Path = field.NewPath("spec").Child("extraCIDR") + podMACPrefixField *field.Path = field.NewPath("spec").Child("podMACPrefix") + hostRPFilterField *field.Path = field.NewPath("spec").Child("hostRPFilter") + podRPFilterField *field.Path = field.NewPath("spec").Child("podRPFilter") + txQueueLenField *field.Path = field.NewPath("spec").Child("txQueueLen") + vethLinkAddressField *field.Path = field.NewPath("spec").Child("vethLinkAddress") ) func validateCreateCoordinator(coord *spiderpoolv2beta1.SpiderCoordinator) field.ErrorList { @@ -51,7 +53,6 @@ func validateUpdateCoordinator(oldCoord, newCoord *spiderpoolv2beta1.SpiderCoord } func ValidateCoordinatorSpec(spec *spiderpoolv2beta1.CoordinatorSpec, requireOptionalType bool) *field.Error { - if requireOptionalType && spec.PodCIDRType == nil { return field.NotSupported( podCIDRTypeField, @@ -102,6 +103,13 @@ func ValidateCoordinatorSpec(spec *spiderpoolv2beta1.CoordinatorSpec, requireOpt } } + if spec.VethLinkAddress != nil && *spec.VethLinkAddress != "" { + _, err := netip.ParseAddr(*spec.VethLinkAddress) + if err != nil { + return field.Invalid(vethLinkAddressField, *spec.VethLinkAddress, "vethLinkAddress is an invalid IP address") + } + } + return nil } @@ -137,11 +145,7 @@ func validateCoordinatorExtraCIDR(cidrs []string) *field.Error { } func validateCoordinatorPodMACPrefix(prefix *string) *field.Error { - if prefix == nil { - return nil - } - - if *prefix == "" { + if prefix == nil || *prefix == "" { return nil } diff --git a/pkg/dra/dra-controller/controller.go b/pkg/dra/dra-controller/controller.go deleted file mode 100644 index cce59007e6..0000000000 --- a/pkg/dra/dra-controller/controller.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2024 Authors of spidernet-io -// SPDX-License-Identifier: Apache-2.0 - -package draController - -import ( - "context" - "time" - - "github.com/spidernet-io/spiderpool/pkg/constant" - "github.com/spidernet-io/spiderpool/pkg/election" - clientset "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/dynamic-resource-allocation/controller" -) - -func StartController(ctx context.Context, - leaderRetryElectGap time.Duration, - spiderClientset clientset.Interface, - kubeClient kubernetes.Interface, - informerFactory informers.SharedInformerFactory, - leader election.SpiderLeaseElector) error { - - driver := NewDriver(spiderClientset) - controller := controller.New(ctx, constant.DRADriverName, driver, kubeClient, informerFactory) - - go func() { - for { - select { - case <-ctx.Done(): - return - default: - } - - if !leader.IsElected() { - time.Sleep(leaderRetryElectGap) - continue - } - - innerCtx, innerCancel := context.WithCancel(ctx) - go func() { - for { - select { - case <-innerCtx.Done(): - return - default: - } - - if !leader.IsElected() { - innerCancel() - return - } - time.Sleep(leaderRetryElectGap) - } - }() - - informerFactory.Start(innerCtx.Done()) - controller.Run(1) - } - }() - return nil -} diff --git a/pkg/dra/dra-controller/driver.go b/pkg/dra/dra-controller/driver.go deleted file mode 100644 index 21d68ee122..0000000000 --- a/pkg/dra/dra-controller/driver.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2024 Authors of spidernet-io -// SPDX-License-Identifier: Apache-2.0 - -package draController - -import ( - "context" - "fmt" - - "github.com/spidernet-io/spiderpool/pkg/constant" - "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - clientset "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" - v1 "k8s.io/api/core/v1" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/dynamic-resource-allocation/controller" -) - -type driver struct { - spiderClientset clientset.Interface -} - -func NewDriver(spiderClientset clientset.Interface) *driver { - return &driver{spiderClientset: spiderClientset} -} - -func (d driver) GetClassParameters(ctx context.Context, class *resourcev1alpha2.ResourceClass) (interface{}, error) { - return nil, nil -} - -func (d driver) GetClaimParameters(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, class *resourcev1alpha2.ResourceClass, classParameters interface{}) (interface{}, error) { - if claim.Spec.ParametersRef == nil { - // TODO(@cyclinder): we can give it a default ClaimParameterSpec? - return &v2beta1.ClaimParameterSpec{}, nil - } - - if claim.Spec.ParametersRef.APIGroup != constant.SpiderpoolAPIGroup { - return nil, fmt.Errorf("incorrect API Group: %v", claim.Spec.ParametersRef.APIGroup) - } - - scp, err := d.spiderClientset.SpiderpoolV2beta1().SpiderClaimParameters(claim.Namespace).Get(ctx, claim.Spec.ParametersRef.Name, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("failed to getting SpiderClaimParameters %s/%s: %w", claim.Namespace, claim.Spec.ParametersRef.Name, err) - } - - return scp, nil -} - -func (d driver) Allocate(ctx context.Context, cas []*controller.ClaimAllocation, selectedNode string) { - for _, ca := range cas { - ca.Allocation, ca.Error = d.allocate(ctx, ca.Claim, ca.ClaimParameters, ca.Class, ca.ClassParameters, selectedNode) - } -} - -func (d driver) allocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (*resourcev1alpha2.AllocationResult, error) { - if selectedNode == "" { - return nil, fmt.Errorf("TODO: immediate allocations not yet supported") - } - - // TODO(@cyclinder): do some checks - nodeSelector := &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchFields: []v1.NodeSelectorRequirement{ - { - Key: "metadata.name", - Operator: "In", - Values: []string{selectedNode}, - }, - }, - }, - }, - } - - return &resourcev1alpha2.AllocationResult{ - AvailableOnNodes: nodeSelector, - }, nil -} - -// Deallocate -func (d driver) Deallocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim) error { - // TODO(@cyclinder): maybe we need clean the NodeState resource. - return nil -} - -// UnsuitableNodes -func (d driver) UnsuitableNodes(ctx context.Context, pod *v1.Pod, claims []*controller.ClaimAllocation, potentialNodes []string) error { - // TODO(@cyclinder): we need a new CRD resource like NodeState, dra-plugin check the node's state and - // update it to the NodeState resource, dra-controller read the NodeState resource and check the node - // if is unsuitable. - return nil -} diff --git a/pkg/dra/dra-plugin/cdi.go b/pkg/dra/dra-plugin/cdi.go deleted file mode 100644 index c1d6af149c..0000000000 --- a/pkg/dra/dra-plugin/cdi.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2024 Authors of spidernet-io -// SPDX-License-Identifier: Apache-2.0 - -package draPlugin - -import ( - "fmt" - "os" - "path" - - "github.com/spidernet-io/spiderpool/pkg/constant" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - "go.uber.org/zap" - cdiapi "tags.cncf.io/container-device-interface/pkg/cdi" - cdiparser "tags.cncf.io/container-device-interface/pkg/parser" - cdispec "tags.cncf.io/container-device-interface/specs-go" -) - -type CDIHandler struct { - cdiRoot string - vendor string - class string - so string - - registry cdiapi.Registry - logger *zap.Logger -} - -type cdiOption func(*CDIHandler) - -func WithCDIRoot(cdiRoot string) cdiOption { - return func(c *CDIHandler) { - c.cdiRoot = cdiRoot - } -} - -func WithVendor(vendor string) cdiOption { - return func(c *CDIHandler) { - c.vendor = vendor - } -} - -func WithClass(class string) cdiOption { - return func(c *CDIHandler) { - c.class = class - } -} - -func WithSoPath(so string) cdiOption { - return func(c *CDIHandler) { - c.so = so - } -} - -func NewCDIHandler(logger *zap.Logger, opts ...cdiOption) (*CDIHandler, error) { - cdi := &CDIHandler{logger: logger} - for _, opt := range opts { - opt(cdi) - } - - registry := cdiapi.GetRegistry( - cdiapi.WithSpecDirs(cdi.cdiRoot), - ) - err := registry.Refresh() - if err != nil { - return nil, fmt.Errorf("unable to refresh the CDI registry: %w", err) - } - cdi.registry = registry - - return cdi, nil -} - -func (cdi *CDIHandler) GetDevice(device string) *cdiapi.Device { - return cdi.registry.DeviceDB().GetDevice(device) -} - -func (cdi *CDIHandler) GetClaimDevices(claimUID string) []string { - devices := []string{ - cdiparser.QualifiedName(cdi.vendor, cdi.class, claimUID), - } - - return devices -} - -// CreateClaimSpecFile create CDI file for the claim -func (cdi *CDIHandler) CreateClaimSpecFile(claimUID string, scp *v2beta1.SpiderClaimParameter) error { - cdiSpec := cdispec.Spec{ - Version: cdi.getCdiVersion(scp.Annotations), - Kind: cdi.cdiKind(), - Devices: []cdispec.Device{{ - Name: claimUID, - ContainerEdits: cdi.getContaineEdits(claimUID, scp.Spec.RdmaAcc), - }}, - } - - specName, err := cdiapi.GenerateNameForTransientSpec(&cdiSpec, claimUID) - if err != nil { - return fmt.Errorf("failed to generate CDI Spec name: %w", err) - } - - specFileName := fmt.Sprintf("%s.%s", specName, "yaml") - if err = cdi.registry.SpecDB().WriteSpec(&cdiSpec, specName+".yaml"); err != nil { - return fmt.Errorf("failed to write CDI spec for claim %s: %v", claimUID, err) - } - - if err := os.Chmod(path.Join(cdi.cdiRoot, specFileName), 0600); err != nil { - return fmt.Errorf("failed to set permissions on spec file: %w", err) - } - return nil -} - -func (cdi *CDIHandler) DeleteClaimSpecFile(claimUID string) error { - spec := &cdispec.Spec{ - Kind: cdi.cdiKind(), - } - - specName, err := cdiapi.GenerateNameForTransientSpec(spec, claimUID) - if err != nil { - return fmt.Errorf("failed to generate CDI Spec name: %w", err) - } - - return cdi.registry.SpecDB().RemoveSpec(specName + ".yaml") -} - -// nolint: all -func (cdi *CDIHandler) getContaineEdits(claim string, rdmaAcc bool) cdispec.ContainerEdits { - ce := cdispec.ContainerEdits{ - // why do we need this? - // a device MUST be have at lease a ContainerEdits, so if rdma is false: - // the device have empty ContainerEdits, which cause the container can't - // be started. - Env: []string{ - fmt.Sprintf("DRA_CLAIM_UID=%s", claim), - }, - } - - if rdmaAcc { - soName := path.Base(cdi.so) - ce.Env = append(ce.Env, fmt.Sprintf("LD_PRELOAD=%s", soName)) - ce.Mounts = []*cdispec.Mount{ - { - HostPath: cdi.so, - ContainerPath: fmt.Sprintf("/usr/lib/%s", soName), - Options: []string{"ro", "nosuid", "nodev", "bind"}, - }, - { - HostPath: cdi.so, - ContainerPath: fmt.Sprintf("/usr/lib64/%s", soName), - Options: []string{"ro", "nosuid", "nodev", "bind"}, - }, - } - } - - return ce -} - -func (cdi *CDIHandler) cdiKind() string { - return cdi.vendor + "/" + cdi.class -} - -// getCdiVersion return the cdi version, it can be configure by -// spiderclaimparameter's annotation: ipam.spidernet.io/cdi-version -func (cdi CDIHandler) getCdiVersion(annotations map[string]string) string { - version := cdiapi.CurrentVersion - if annotations == nil { - return version - } - - v, ok := annotations[constant.AnnoDraCdiVersion] - if ok { - return v - } - - return version -} diff --git a/pkg/dra/dra-plugin/device_state.go b/pkg/dra/dra-plugin/device_state.go deleted file mode 100644 index 0f2be0d86e..0000000000 --- a/pkg/dra/dra-plugin/device_state.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2024 Authors of spidernet-io -// SPDX-License-Identifier: Apache-2.0 - -package draPlugin - -import ( - "context" - "fmt" - "os" - - "github.com/spidernet-io/spiderpool/pkg/constant" - v2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - "github.com/spidernet-io/spiderpool/pkg/lock" - "go.uber.org/zap" -) - -type NodeDeviceState struct { - lock.RWMutex - cdi *CDIHandler - preparedClaims map[string]struct{} -} - -func NewDeviceState(logger *zap.Logger, cdiRoot, so string) (*NodeDeviceState, error) { - _, err := os.Stat(so) - if err != nil { - return nil, fmt.Errorf("failed to stat draHostDevicePath %s: %v", so, err) - } - - cdi, err := NewCDIHandler(logger, - WithCDIRoot(cdiRoot), - WithClass(constant.DRACDIClass), - WithVendor(constant.DRACDIVendor), - WithSoPath(so), - ) - if err != nil { - return nil, err - } - - return &NodeDeviceState{ - cdi: cdi, - preparedClaims: make(map[string]struct{}), - }, nil -} - -func (nds *NodeDeviceState) Prepare(ctx context.Context, claimUID string, scp *v2beta1.SpiderClaimParameter) ([]string, error) { - nds.Lock() - defer nds.Unlock() - - _, preprared := nds.preparedClaims[claimUID] - if preprared { - return nds.cdi.GetClaimDevices(claimUID), nil - } - - if err := nds.cdi.CreateClaimSpecFile(claimUID, scp); err != nil { - return nil, fmt.Errorf("unable to create CDI spec file for claim: %w", err) - } - - nds.preparedClaims[claimUID] = struct{}{} - return nds.cdi.GetClaimDevices(claimUID), nil -} - -func (nds *NodeDeviceState) UnPrepare(ctx context.Context, claimUID string) error { - nds.Lock() - defer nds.Unlock() - - _, ok := nds.preparedClaims[claimUID] - if ok { - delete(nds.preparedClaims, claimUID) - } - - return nds.cdi.DeleteClaimSpecFile(claimUID) -} diff --git a/pkg/dra/dra-plugin/driver.go b/pkg/dra/dra-plugin/driver.go deleted file mode 100644 index b51e9bea8e..0000000000 --- a/pkg/dra/dra-plugin/driver.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2024 Authors of spidernet-io -// SPDX-License-Identifier: Apache-2.0 -package draPlugin - -import ( - "context" - "fmt" - - clientset "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" - "github.com/spidernet-io/spiderpool/pkg/lock" - "go.uber.org/zap" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/util/retry" - drapbv1 "k8s.io/kubelet/pkg/apis/dra/v1alpha3" - ctrl "sigs.k8s.io/controller-runtime" -) - -type driver struct { - lock.RWMutex - logger *zap.Logger - State *NodeDeviceState - K8sClientSet kubernetes.Interface - SpiderClientSet clientset.Interface -} - -func NewDriver(logger *zap.Logger, cdiRoot string, so string) (*driver, error) { - restConfig := ctrl.GetConfigOrDie() - state, err := NewDeviceState(logger, cdiRoot, so) - if err != nil { - return nil, err - } - - return &driver{ - logger: logger, - State: state, - K8sClientSet: kubernetes.NewForConfigOrDie(restConfig), - SpiderClientSet: clientset.NewForConfigOrDie(restConfig), - }, nil -} - -// NodePrepareResources prepares several ResourceClaims -// for use on the node. If an error is returned, the -// response is ignored. Failures for individual claims -// can be reported inside NodePrepareResourcesResponse. -func (d *driver) NodePrepareResources(ctx context.Context, req *drapbv1.NodePrepareResourcesRequest) (*drapbv1.NodePrepareResourcesResponse, error) { - d.logger.Info("NodePrepareResource is called") - preparedResources := &drapbv1.NodePrepareResourcesResponse{Claims: map[string]*drapbv1.NodePrepareResourceResponse{}} - for _, claim := range req.Claims { - preparedResources.Claims[claim.Uid] = d.nodePrepareResource(ctx, claim) - } - - d.logger.Info("NodePrepareResource returning newly prepared devices", zap.Any("response", preparedResources)) - return preparedResources, nil -} - -func (d *driver) nodePrepareResource(ctx context.Context, claim *drapbv1.Claim) *drapbv1.NodePrepareResourceResponse { - d.Lock() - defer d.Unlock() - - isPrepared, devices, err := d.isPrepared(ctx, claim.Uid) - if err != nil { - return &drapbv1.NodePrepareResourceResponse{ - Error: fmt.Sprintf("error checking if claim is already prepared: %v", err), - } - } - - if isPrepared { - d.logger.Info("[NodePrepareResource] Claim has already prepared, returning cached device resources", zap.String("claim", claim.Uid)) - return &drapbv1.NodePrepareResourceResponse{CDIDevices: devices} - } - - d.logger.Info("[NodePrepareResource] Preparing devices for claim", zap.String("claim", claim.Uid)) - devices, err = d.prepare(ctx, claim) - if err != nil { - d.logger.Error("error preparing devices for claim", zap.String("cliam", claim.Uid), zap.Error(err)) - return &drapbv1.NodePrepareResourceResponse{ - Error: fmt.Sprintf("error preparing devices for claim %v: %v", claim.Uid, err), - } - } - - return &drapbv1.NodePrepareResourceResponse{CDIDevices: devices} -} - -func (d *driver) prepare(ctx context.Context, claim *drapbv1.Claim) ([]string, error) { - var err error - var prepared []string - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - resourceClaim, err := d.K8sClientSet.ResourceV1alpha2().ResourceClaims(claim.Namespace). - Get(ctx, claim.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - // TODO(@cyclinder): check if the claim.ParametersRef is SpiderClaimParameters. - scp, err := d.SpiderClientSet.SpiderpoolV2beta1().SpiderClaimParameters(claim.Namespace). - Get(ctx, resourceClaim.Spec.ParametersRef.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - // prepare CDI file for the claim - prepared, err = d.State.Prepare(ctx, claim.Uid, scp) - if err != nil { - return err - } - return nil - }) - - if err != nil { - return nil, err - } - - return prepared, nil -} - -func (d *driver) isPrepared(ctx context.Context, claimUID string) (bool, []string, error) { - // TODO(@cyclinder): should be check if the claim is prepared. - return false, nil, nil -} - -// NodeUnprepareResources is the opposite of NodePrepareResources. -// The same error handling rules apply -func (d *driver) NodeUnprepareResources(ctx context.Context, req *drapbv1.NodeUnprepareResourcesRequest) (*drapbv1.NodeUnprepareResourcesResponse, error) { - d.logger.Info("NodeUnprepareResources is called") - response := make(map[string]*drapbv1.NodeUnprepareResourceResponse, len(req.Claims)) - for _, claim := range req.Claims { - response[claim.Uid] = d.unPrepareResoruce(ctx, claim) - - } - return &drapbv1.NodeUnprepareResourcesResponse{Claims: response}, nil -} - -func (d *driver) unPrepareResoruce(ctx context.Context, claim *drapbv1.Claim) *drapbv1.NodeUnprepareResourceResponse { - d.logger.Info("UnPrepareResource for claim", zap.String("claim", claim.Uid)) - if err := d.State.UnPrepare(ctx, claim.Uid); err != nil { - d.logger.Error("error unprepare resource for claim", zap.String("claim", claim.Uid), zap.Error(err)) - return &drapbv1.NodeUnprepareResourceResponse{Error: err.Error()} - } - - return &drapbv1.NodeUnprepareResourceResponse{} -} diff --git a/pkg/dra/dra-plugin/plugin.go b/pkg/dra/dra-plugin/plugin.go deleted file mode 100644 index bd53a1ff16..0000000000 --- a/pkg/dra/dra-plugin/plugin.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2024 Authors of spidernet-io -// SPDX-License-Identifier: Apache-2.0 -package draPlugin - -import ( - "fmt" - "os" - - "github.com/spidernet-io/spiderpool/pkg/constant" - "go.uber.org/zap" - "k8s.io/dynamic-resource-allocation/kubeletplugin" -) - -func StartDRAPlugin(logger *zap.Logger, cdiRoot string, so string) (kubeletplugin.DRAPlugin, error) { - err := os.MkdirAll(constant.DRADriverPluginPath, 0750) - if err != nil { - return nil, err - } - - fileInfo, err := os.Stat(cdiRoot) - switch { - case err != nil && os.IsNotExist(err): - if err = os.MkdirAll(cdiRoot, 0750); err != nil { - return nil, err - } - case err != nil: - return nil, err - case !fileInfo.IsDir(): - return nil, fmt.Errorf("cdi path %s isn't a directory", cdiRoot) - } - - driver, err := NewDriver(logger.Named("DRA"), cdiRoot, so) - if err != nil { - return nil, err - } - - dp, err := kubeletplugin.Start(driver, - kubeletplugin.DriverName(constant.DRADriverName), - kubeletplugin.RegistrarSocketPath(constant.DRAPluginRegistrationPath), - kubeletplugin.PluginSocketPath(constant.DRADriverPluginSocketPath), - kubeletplugin.KubeletPluginSocketPath(constant.DRADriverPluginSocketPath), - ) - if err != nil { - return nil, err - } - - return dp, nil -} diff --git a/pkg/gcmanager/scanAll_IPPool.go b/pkg/gcmanager/scanAll_IPPool.go index cfddabcd49..82bab960e8 100644 --- a/pkg/gcmanager/scanAll_IPPool.go +++ b/pkg/gcmanager/scanAll_IPPool.go @@ -76,12 +76,11 @@ func (s *SpiderGC) monitorGCSignal(ctx context.Context) { // executeScanAll scans the whole pod and whole IPPoolList func (s *SpiderGC) executeScanAll(ctx context.Context) { poolList, err := s.ippoolMgr.ListIPPools(ctx, constant.UseCache) - if nil != err { + if err != nil { if apierrors.IsNotFound(err) { logger.Sugar().Warnf("scan all failed, ippoolList not found!") return } - logger.Sugar().Errorf("scan all failed: '%v'", err) return } @@ -137,7 +136,7 @@ func (s *SpiderGC) executeScanAll(ctx context.Context) { flagGCEndpoint = false goto GCIP } else { - scanAllLogger.Sugar().Errorf("pod %s/%s does not exist and failed to get endpoint %s/%s, ignore handle IP %s and endpoint, error: '%v'", podNS, podName, podNS, podName, poolIP, err) + scanAllLogger.Sugar().Errorf("pod %s/%s does not exist and failed to get endpoint %s/%s, ignore handle IP %s and endpoint, error: '%v'", podNS, podName, podNS, podName, poolIP, endpointErr) continue } } else { @@ -157,7 +156,7 @@ func (s *SpiderGC) executeScanAll(ctx context.Context) { } } } else { - scanAllLogger.Sugar().Errorf("failed to get pod from kubernetes, error '%v'", err) + scanAllLogger.Sugar().Errorf("failed to get pod from kubernetes, error '%v'", podErr) continue } } diff --git a/pkg/ip/cidr.go b/pkg/ip/cidr.go index cb050e284b..4eec16b094 100644 --- a/pkg/ip/cidr.go +++ b/pkg/ip/cidr.go @@ -17,8 +17,10 @@ func ParseCIDR(version types.IPVersion, subnet string) (*net.IPNet, error) { if err := IsCIDR(version, subnet); err != nil { return nil, err } - _, ipNet, _ := net.ParseCIDR(subnet) - + _, ipNet, err := net.ParseCIDR(subnet) + if err != nil { + return nil, fmt.Errorf("failed to parse CIDR '%s': %v", subnet, err) + } return ipNet, nil } diff --git a/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/rbac.go b/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/rbac.go index dffe4b4535..e086256096 100644 --- a/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/rbac.go +++ b/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/rbac.go @@ -15,7 +15,7 @@ // +kubebuilder:rbac:groups="",resources=namespaces;endpoints;pods;pods/status;configmaps,verbs=get;list;watch;update;patch;delete;deletecollection // +kubebuilder:rbac:groups=k8s.cni.cncf.io,resources=network-attachment-definitions,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines;virtualmachineinstances,verbs=get;list -// +kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=mutatingwebhookconfigurations;validatingwebhookconfigurations,verbs=get;list;watch;delete +// +kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=mutatingwebhookconfigurations;validatingwebhookconfigurations,verbs=get;list;watch;delete;update // +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=apps.kruise.io,resources=clonesets;statefulsets,verbs=get;list;watch // +kubebuilder:rbac:groups=crd.projectcalico.org,resources=ippools,verbs=get;list;watch diff --git a/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/spidercoordinator_types.go b/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/spidercoordinator_types.go index 67e3b84931..4176b649ec 100644 --- a/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/spidercoordinator_types.go +++ b/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/spidercoordinator_types.go @@ -88,6 +88,11 @@ type CoordinatorSpec struct { // +kubebuilder:validation:Optional // +kubebuilder:default=false DetectGateway *bool `json:"detectGateway,omitempty"` + + // VethLinkAddress configure a ipv4 link-local address + // for veth0 device. empty means disable. default is empty. + // Format is like 169.254.100.1 + VethLinkAddress *string `json:"vethLinkAddress,omitempty"` } // CoordinationStatus defines the observed state of SpiderCoordinator. diff --git a/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/zz_generated.deepcopy.go b/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/zz_generated.deepcopy.go index 6c867d899c..61000ff277 100644 --- a/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/zz_generated.deepcopy.go +++ b/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1/zz_generated.deepcopy.go @@ -116,6 +116,11 @@ func (in *CoordinatorSpec) DeepCopyInto(out *CoordinatorSpec) { *out = new(bool) **out = **in } + if in.VethLinkAddress != nil { + in, out := &in.VethLinkAddress, &out.VethLinkAddress + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoordinatorSpec. diff --git a/pkg/multuscniconfig/multusconfig_informer.go b/pkg/multuscniconfig/multusconfig_informer.go index 9d3c52372f..1abab30b15 100644 --- a/pkg/multuscniconfig/multusconfig_informer.go +++ b/pkg/multuscniconfig/multusconfig_informer.go @@ -727,6 +727,9 @@ func generateCoordinatorCNIConf(coordinatorSpec *spiderpoolv2beta1.CoordinatorSp if coordinatorSpec.DetectGateway != nil { coordinatorNetConf.DetectGateway = coordinatorSpec.DetectGateway } + if coordinatorSpec.VethLinkAddress != nil { + coordinatorNetConf.VethLinkAddress = *coordinatorSpec.VethLinkAddress + } if coordinatorSpec.TunePodRoutes != nil { coordinatorNetConf.TunePodRoutes = coordinatorSpec.TunePodRoutes } diff --git a/pkg/multuscniconfig/multusconfig_mutate.go b/pkg/multuscniconfig/multusconfig_mutate.go index df7d7b25c9..95a24661fe 100644 --- a/pkg/multuscniconfig/multusconfig_mutate.go +++ b/pkg/multuscniconfig/multusconfig_mutate.go @@ -192,6 +192,7 @@ func setCoordinatorDefaultConfig(coordinator *spiderpoolv2beta1.CoordinatorSpec) HijackCIDR: []string{}, DetectGateway: ptr.To(false), DetectIPConflict: ptr.To(false), + VethLinkAddress: ptr.To(""), PodMACPrefix: ptr.To(""), PodDefaultRouteNIC: ptr.To(""), HostRPFilter: ptr.To(0), diff --git a/pkg/multuscniconfig/multusconfig_validate.go b/pkg/multuscniconfig/multusconfig_validate.go index 58e21a3b88..9d19c565bf 100644 --- a/pkg/multuscniconfig/multusconfig_validate.go +++ b/pkg/multuscniconfig/multusconfig_validate.go @@ -84,6 +84,10 @@ func checkExistedConfig(spec *spiderpoolv2beta1.MultusCNIConfigSpec, exclude str func validateCNIConfig(multusConfig *spiderpoolv2beta1.SpiderMultusConfig) *field.Error { // with Kubernetes OpenAPI validation and Mutating Webhook, multusConfSpec.CniType must not be nil and default to "custom" + if multusConfig.Spec.CniType == nil { + return field.Invalid(cniTypeField, nil, "CniType must not be nil") + } + switch *multusConfig.Spec.CniType { case constant.MacvlanCNI: if multusConfig.Spec.MacvlanConfig == nil { diff --git a/pkg/multuscniconfig/utils.go b/pkg/multuscniconfig/utils.go index afa3ece680..ddc886eddf 100644 --- a/pkg/multuscniconfig/utils.go +++ b/pkg/multuscniconfig/utils.go @@ -30,6 +30,7 @@ import ( coordinatorcmd "github.com/spidernet-io/spiderpool/cmd/coordinator/cmd" spiderpoolcmd "github.com/spidernet-io/spiderpool/cmd/spiderpool/cmd" + "github.com/spidernet-io/spiderpool/pkg/constant" spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" ) @@ -96,6 +97,7 @@ type CoordinatorConfig struct { TxQueueLen *int `json:"txQueueLen,omitempty"` IPConflict *bool `json:"detectIPConflict,omitempty"` DetectGateway *bool `json:"detectGateway,omitempty"` + VethLinkAddress string `json:"vethLinkAddress,omitempty"` TunePodRoutes *bool `json:"tunePodRoutes,omitempty"` MacPrefix string `json:"podMACPrefix,omitempty"` Mode coordinatorcmd.Mode `json:"mode,omitempty"` @@ -220,3 +222,28 @@ func ParsePodNetworkObjectName(podnetwork string) (string, string, string, error return netNsName, networkName, netIfName, nil } + +// resourceName returns the appropriate resource name based on the CNI type and configuration +// of the given SpiderMultusConfig. +func ResourceName(smc *spiderpoolv2beta1.SpiderMultusConfig) string { + switch *smc.Spec.CniType { + case constant.MacvlanCNI: + // For Macvlan CNI, return RDMA resource name if RDMA is enabled + if smc.Spec.MacvlanConfig != nil && smc.Spec.MacvlanConfig.EnableRdma { + return smc.Spec.MacvlanConfig.RdmaResourceName + } + case constant.IPVlanCNI: + if smc.Spec.IPVlanConfig != nil && smc.Spec.IPVlanConfig.EnableRdma { + return smc.Spec.IPVlanConfig.RdmaResourceName + } + case constant.SriovCNI: + if smc.Spec.SriovConfig != nil { + return smc.Spec.SriovConfig.ResourceName + } + case constant.IBSriovCNI: + if smc.Spec.IbSriovConfig != nil { + return smc.Spec.IbSriovConfig.ResourceName + } + } + return "" +} diff --git a/pkg/podmanager/pod_manager.go b/pkg/podmanager/pod_manager.go index 016e864b38..600454e125 100644 --- a/pkg/podmanager/pod_manager.go +++ b/pkg/podmanager/pod_manager.go @@ -7,6 +7,7 @@ import ( "context" "fmt" + crdclientset "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -27,8 +28,9 @@ type PodManager interface { } type podManager struct { - client client.Client - apiReader client.Reader + client client.Client + apiReader client.Reader + SpiderClient crdclientset.Interface } func NewPodManager(client client.Client, apiReader client.Reader) (PodManager, error) { diff --git a/pkg/podmanager/pod_webhook.go b/pkg/podmanager/pod_webhook.go new file mode 100644 index 0000000000..1a1221e51d --- /dev/null +++ b/pkg/podmanager/pod_webhook.go @@ -0,0 +1,124 @@ +// Copyright 2022 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 +package podmanager + +import ( + "context" + + "github.com/spidernet-io/spiderpool/pkg/constant" + crdclientset "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" + "github.com/spidernet-io/spiderpool/pkg/logutils" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var PodWebhookExcludeNamespaces = []string{ + metav1.NamespaceSystem, + metav1.NamespacePublic, + constant.Spiderpool, + "metallb-system", + "istio-system", + // more system namespaces to be added +} + +type PodWebhook interface { + admission.CustomDefaulter + admission.CustomValidator +} + +type podWebhook struct { + spiderClient crdclientset.Interface +} + +// InitPodWebhook initializes the pod webhook. +// It sets up the mutating webhook for pods and registers it with the manager. +// Parameters: +// - client: The Kubernetes client +// - mgr: The controller manager +// - mutatingWebhookName: The name of the mutating webhook +// +// Returns an error if initialization fails. +func InitPodWebhook( + admissionClient admissionregistrationv1.AdmissionregistrationV1Interface, + mgr ctrl.Manager, + mutatingWebhookName string, + webhookNamespaceExclude []string, + webhookNamespaceInclude []string) error { + spiderClient, err := crdclientset.NewForConfig(ctrl.GetConfigOrDie()) + if err != nil { + return err + } + + pw := &podWebhook{ + spiderClient: spiderClient, + } + + if len(webhookNamespaceExclude) != 0 { + PodWebhookExcludeNamespaces = webhookNamespaceExclude + } + + if err = AddPodMutatingWebhook(admissionClient, mutatingWebhookName, webhookNamespaceInclude); err != nil { + return err + } + + // setup mutating webhook for pods + if err = ctrl.NewWebhookManagedBy(mgr). + For(&corev1.Pod{}). + WithDefaulter(pw). + Complete(); err != nil { + return err + } + return nil +} + +// Default implements the defaulting webhook for pods. +// It injects network resources into the pod if it has the appropriate annotation. +// Parameters: +// - ctx: The context +// - obj: The runtime object (expected to be a Pod) +// +// Returns an error if defaulting fails. +func (pw *podWebhook) Default(ctx context.Context, obj runtime.Object) error { + logger := logutils.FromContext(ctx) + pod := obj.(*corev1.Pod) + mutateLogger := logger.Named("PodMutating").With( + zap.String("Pod", pod.GenerateName)) + mutateLogger.Sugar().Debugf("Request Pod: %+v", *pod) + + _, ok := pod.Annotations[constant.AnnoPodResourceInject] + if !ok { + return nil + } + + mutateLogger.Sugar().Debugf("Pod %s/%s is annotated with %s, start injecting network resources", pod.Namespace, pod.GenerateName, constant.AnnoPodResourceInject) + err := podNetworkMutatingWebhook(pw.spiderClient, pod) + if err != nil { + mutateLogger.Sugar().Errorf("Failed to inject network resources for pod %s/%s: %v", pod.Namespace, pod.GenerateName, err) + return err + } + mutateLogger.Sugar().Debugf("Pod %s/%s network resources injected, Pod: %v", pod.Namespace, pod.GenerateName, pod) + return nil +} + +// ValidateCreate implements the validation webhook for pod creation. +// Currently, it performs no validation and always returns nil. +func (pw *podWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + return nil, nil +} + +// ValidateUpdate implements the validation webhook for pod updates. +// Currently, it performs no validation and always returns nil. +func (pw *podWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + return nil, nil +} + +// ValidateDelete implements the validation webhook for pod deletion. +// Currently, it performs no validation and always returns nil. +func (pw *podWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + return nil, nil +} diff --git a/pkg/podmanager/utils.go b/pkg/podmanager/utils.go index eaf4712295..015ef20927 100644 --- a/pkg/podmanager/utils.go +++ b/pkg/podmanager/utils.go @@ -4,12 +4,23 @@ package podmanager import ( + "context" + "fmt" + + crdclientset "github.com/spidernet-io/spiderpool/pkg/k8s/client/clientset/versioned" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + k8s_resource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + admissionClientv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" + "k8s.io/client-go/util/retry" + "k8s.io/utils/ptr" kubevirtv1 "kubevirt.io/api/core/v1" "github.com/spidernet-io/spiderpool/pkg/constant" + "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + "github.com/spidernet-io/spiderpool/pkg/multuscniconfig" ) func IsPodAlive(pod *corev1.Pod) bool { @@ -53,3 +64,321 @@ func IsStaticIPPod(enableStatefulSet, enableKubevirtStaticIP bool, pod *corev1.P return false } + +// podNetworkMutatingWebhook handles the mutating webhook for pod networks. +// It checks if the pod has the required label for mutation, retrieves the corresponding +// SpiderMultusConfigs, and injects the network configuration into the pod. +// +// Parameters: +// - apiReader: A client.Reader interface for accessing Kubernetes API objects +// - pod: A pointer to the corev1.Pod object to be mutated +// +// Returns: +// - An error if any step in the process fails, nil otherwise +func podNetworkMutatingWebhook(spiderClient crdclientset.Interface, pod *corev1.Pod) error { + multusLabelValue, ok := pod.Annotations[constant.AnnoPodResourceInject] + if !ok { + return nil + } + + labelSelector := metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: constant.AnnoPodResourceInject, + Operator: metav1.LabelSelectorOpIn, + Values: []string{multusLabelValue}, + }, + }, + } + + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + return fmt.Errorf("failed to create label selector: %v", err) + } + + multusConfigs, err := spiderClient.SpiderpoolV2beta1().SpiderMultusConfigs("").List(context.TODO(), metav1.ListOptions{ + LabelSelector: selector.String(), + }) + if err != nil { + return err + } + + if len(multusConfigs.Items) == 0 { + return fmt.Errorf("No spidermultusconfig with label %v:%v found", constant.AnnoPodResourceInject, multusLabelValue) + } + + return InjectPodNetwork(pod, *multusConfigs) +} + +// injectPodNetwork injects network configurations into the pod based on the provided SpiderMultusConfigs. +// It checks for CNI type consistency, updates the pod's network attachment annotations, +// and prepares a map of resources to be injected. +// +// Parameters: +// - pod: A pointer to the corev1.Pod object to be updated +// - multusConfigs: A list of SpiderMultusConfig objects to be applied to the pod +// +// Returns: +// - An error if there's an inconsistency in CNI types, nil otherwise +func InjectPodNetwork(pod *corev1.Pod, multusConfigs v2beta1.SpiderMultusConfigList) error { + var cniType string + resourcesMap := make(map[string]bool, len(multusConfigs.Items)) + for _, mc := range multusConfigs.Items { + // Check the consistency of CNI type + if cniType != "" && cniType != *mc.Spec.CniType { + return fmt.Errorf("spidermultusconfig %s/%s cniType %s is not consistent with %s", mc.Namespace, mc.Name, *mc.Spec.CniType, cniType) + } else { + // If it's the first time setting, or consistent with the previous + // type, update cniType + cniType = *mc.Spec.CniType + } + + if err := doValidateRdmaResouceAndIPPools(mc); err != nil { + return err + } + + // Update the pod's network attachment + if networks, ok := pod.Annotations[constant.MultusNetworkAttachmentAnnot]; !ok { + pod.Annotations[constant.MultusNetworkAttachmentAnnot] = fmt.Sprintf("%s/%s", mc.Namespace, mc.Name) + } else { + pod.Annotations[constant.MultusNetworkAttachmentAnnot] = networks + "," + fmt.Sprintf("%s/%s", mc.Namespace, mc.Name) + } + + resourceName := multuscniconfig.ResourceName(&mc) + if resourceName == "" { + continue + } + + if _, ok := resourcesMap[resourceName]; !ok { + resourcesMap[resourceName] = false + } + } + InjectRdmaResourceToPod(resourcesMap, pod) + return nil +} + +// injectRdmaResourceToPod injects RDMA resources into the pod's containers. +// It checks each container for existing resource requests/limits and updates +// the resourceMap accordingly. If a resource is not found in any container, +// it is injected into the first container's resource requests. +// +// Parameters: +// - resourceMap: A map of resource names to boolean values indicating if they've been found +// - pod: A pointer to the corev1.Pod object to be updated +func InjectRdmaResourceToPod(resourceMap map[string]bool, pod *corev1.Pod) { + for _, c := range pod.Spec.Containers { + for resource := range resourceMap { + if resourceMap[resource] { + // the resource has found in pod, skip + continue + } + + // try to find the resource in container resources.limits + if _, ok := c.Resources.Limits[corev1.ResourceName(resource)]; ok { + resourceMap[resource] = true + } + } + } + + for resource, found := range resourceMap { + if found { + continue + } + if pod.Spec.Containers[0].Resources.Limits == nil { + pod.Spec.Containers[0].Resources.Limits = make(corev1.ResourceList) + } + pod.Spec.Containers[0].Resources.Limits[corev1.ResourceName(resource)] = k8s_resource.MustParse("1") + } +} + +// InitPodMutatingWebhook initializes a mutating webhook for pods based on a template webhook. +// It sets up the webhook configuration including name, admission review versions, failure policy, +// object selector, client config, and rules for pod creation and update operations. +// +// Parameters: +// - from: An admissionregistrationv1.MutatingWebhook object to use as a template +// +// Returns: +// - A new admissionregistrationv1.MutatingWebhook object configured for pod mutation +func InitPodMutatingWebhook(from admissionregistrationv1.MutatingWebhook, webhookNamespaceInclude []string) admissionregistrationv1.MutatingWebhook { + wb := admissionregistrationv1.MutatingWebhook{ + Name: constant.PodMutatingWebhookName, + AdmissionReviewVersions: from.AdmissionReviewVersions, + FailurePolicy: ptr.To(admissionregistrationv1.Fail), + NamespaceSelector: &metav1.LabelSelector{}, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + CABundle: from.ClientConfig.CABundle, + }, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Operations: []admissionregistrationv1.OperationType{ + admissionregistrationv1.Create, + admissionregistrationv1.Update, + }, + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"pods"}, + }, + }, + }, + SideEffects: ptr.To(admissionregistrationv1.SideEffectClassNone), + } + + if from.ClientConfig.Service != nil { + wb.ClientConfig.Service = &admissionregistrationv1.ServiceReference{ + Name: from.ClientConfig.Service.Name, + Namespace: from.ClientConfig.Service.Namespace, + Port: from.ClientConfig.Service.Port, + // format: /mutate--- + Path: ptr.To("/mutate--v1-pod"), + } + } + + if len(PodWebhookExcludeNamespaces) != 0 { + wb.NamespaceSelector.MatchExpressions = []metav1.LabelSelectorRequirement{ + { + Key: corev1.LabelMetadataName, + Operator: metav1.LabelSelectorOpNotIn, + Values: PodWebhookExcludeNamespaces, + }, + } + } + + if len(webhookNamespaceInclude) != 0 { + wb.NamespaceSelector.MatchExpressions = append(wb.NamespaceSelector.MatchExpressions, metav1.LabelSelectorRequirement{ + Key: corev1.LabelMetadataName, + Operator: metav1.LabelSelectorOpIn, + Values: webhookNamespaceInclude, + }) + } + return wb +} + +// addPodMutatingWebhook updates the MutatingWebhookConfiguration for pods. +// It retrieves the existing configuration, adds a new webhook for pods, +// and updates the configuration in the Kubernetes API server. +func AddPodMutatingWebhook(admissionClient admissionClientv1.AdmissionregistrationV1Interface, mutatingWebhookName string, webhookNamespaceInclude []string) error { + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + mwc, err := admissionClient.MutatingWebhookConfigurations().Get(context.TODO(), mutatingWebhookName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get MutatingWebhookConfiguration: %v", err) + } + + if len(mwc.Webhooks) == 0 { + return fmt.Errorf("no any mutating webhook found in MutatingWebhookConfiguration %s", mutatingWebhookName) + } + + var newWebhooks []admissionregistrationv1.MutatingWebhook + for _, wb := range mwc.Webhooks { + // if the webhook already exists, do nothing + if wb.Name == constant.PodMutatingWebhookName { + continue + } + newWebhooks = append(newWebhooks, wb) + } + + podWebhook := InitPodMutatingWebhook(*mwc.Webhooks[0].DeepCopy(), webhookNamespaceInclude) + newWebhooks = append(newWebhooks, podWebhook) + mwc.Webhooks = newWebhooks + + _, updateErr := admissionClient.MutatingWebhookConfigurations().Update(context.TODO(), mwc, metav1.UpdateOptions{}) + return updateErr + }) + if retryErr != nil { + return fmt.Errorf("update MutatingWebhookConfiguration %s failed: %v", mutatingWebhookName, retryErr) + } + + return nil +} + +// RemovePodMutatingWebhook removes the mutating webhook for pods. +// It retrieves the existing configuration, removes the webhook for pods, +// and updates the configuration in the Kubernetes API server. +func RemovePodMutatingWebhook(admissionClient admissionClientv1.AdmissionregistrationV1Interface, mutatingWebhookName string) error { + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + mwc, err := admissionClient.MutatingWebhookConfigurations().Get(context.TODO(), mutatingWebhookName, metav1.GetOptions{}) + if err != nil { + return err + } + + var newWebhooks []admissionregistrationv1.MutatingWebhook + for _, wb := range mwc.Webhooks { + if wb.Name != constant.PodMutatingWebhookName { + newWebhooks = append(newWebhooks, wb) + } + } + + if len(newWebhooks) == len(mwc.Webhooks) { + return nil + } + + mwc.Webhooks = newWebhooks + _, err = admissionClient.MutatingWebhookConfigurations().Update(context.TODO(), mwc, metav1.UpdateOptions{}) + if err != nil { + return err + } + return nil + }) + if retryErr != nil { + return fmt.Errorf("removes the mutating webhook for pods: %v", retryErr) + } + return nil +} + +func doValidateRdmaResouceAndIPPools(mc v2beta1.SpiderMultusConfig) error { + doValidateIPPools := func(name, namespace string, ippools *v2beta1.SpiderpoolPools) error { + if ippools == nil { + return fmt.Errorf("no any ippools config for spidermultusconfig %s/%s", namespace, name) + } + + if len(ippools.IPv4IPPool)+len(ippools.IPv6IPPool) == 0 { + return fmt.Errorf("no any ippools config for spidermultusconfig %s/%s", namespace, name) + } + return nil + } + + spec := mc.Spec + switch *spec.CniType { + case constant.MacvlanCNI: + if !spec.MacvlanConfig.EnableRdma { + return fmt.Errorf("spidermultusconfig %s/%s not enable RDMA", mc.Namespace, mc.Name) + } + + if spec.MacvlanConfig.RdmaResourceName == "" { + return fmt.Errorf("rdmaResourceName can not empty for spidermultusconfig %s/%s", mc.Namespace, mc.Name) + } + + return doValidateIPPools(mc.Name, mc.Namespace, spec.MacvlanConfig.SpiderpoolConfigPools) + case constant.IPVlanCNI: + if !spec.IPVlanConfig.EnableRdma { + return fmt.Errorf("spidermultusconfig %s/%s not enable RDMA", mc.Namespace, mc.Name) + } + + if spec.IPVlanConfig.RdmaResourceName == "" { + return fmt.Errorf("rdmaResourceName can not empty for spidermultusconfig %s/%s", mc.Namespace, mc.Name) + } + + return doValidateIPPools(mc.Name, mc.Namespace, spec.IPVlanConfig.SpiderpoolConfigPools) + case constant.SriovCNI: + if !spec.SriovConfig.EnableRdma { + return fmt.Errorf("spidermultusconfig %s/%s not enable RDMA", mc.Namespace, mc.Name) + } + + if spec.SriovConfig.ResourceName == "" { + return fmt.Errorf("resourceName can not empty for spidermultusconfig %s/%s", mc.Namespace, mc.Name) + } + + return doValidateIPPools(mc.Name, mc.Namespace, spec.SriovConfig.SpiderpoolConfigPools) + case constant.IBSriovCNI: + if spec.IbSriovConfig.ResourceName == "" { + return fmt.Errorf("resourceName can not empty for spidermultusconfig %s/%s", mc.Namespace, mc.Name) + } + + return doValidateIPPools(mc.Name, mc.Namespace, spec.IbSriovConfig.SpiderpoolConfigPools) + case constant.IPoIBCNI: + return doValidateIPPools(mc.Name, mc.Namespace, spec.IpoibConfig.SpiderpoolConfigPools) + default: + return fmt.Errorf("RDMA resource injection does not support cniType: %s", *spec.CniType) + } +} diff --git a/pkg/podmanager/utils_test.go b/pkg/podmanager/utils_test.go index b91c7f8c50..988d23c403 100644 --- a/pkg/podmanager/utils_test.go +++ b/pkg/podmanager/utils_test.go @@ -4,10 +4,17 @@ package podmanager_test import ( + "context" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/spidernet-io/spiderpool/pkg/constant" + "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" "k8s.io/utils/ptr" "github.com/spidernet-io/spiderpool/pkg/podmanager" @@ -75,4 +82,506 @@ var _ = Describe("PodManager utils", Label("pod_manager_utils_test"), func() { Expect(isAlive).To(BeTrue()) }) }) + + Describe("Test injectPodNetwork", Label("inject_pod_network_test"), func() { + var pod *corev1.Pod + var multusConfigs v2beta1.SpiderMultusConfigList + + BeforeEach(func() { + pod = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + Annotations: make(map[string]string), + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{}, + Limits: corev1.ResourceList{}, + }, + }, + }, + }, + } + }) + + It("should successfully inject network configuration", func() { + multusConfigs = v2beta1.SpiderMultusConfigList{ + Items: []v2beta1.SpiderMultusConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "config1", + Namespace: "default", + }, + Spec: v2beta1.MultusCNIConfigSpec{ + CniType: ptr.To("macvlan"), + MacvlanConfig: &v2beta1.SpiderMacvlanCniConfig{ + EnableRdma: true, + RdmaResourceName: "spidernet.io/rdma-resource1", + SpiderpoolConfigPools: &v2beta1.SpiderpoolPools{ + IPv4IPPool: []string{"test1"}, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "config2", + Namespace: "default", + }, + Spec: v2beta1.MultusCNIConfigSpec{ + CniType: ptr.To("macvlan"), + MacvlanConfig: &v2beta1.SpiderMacvlanCniConfig{ + EnableRdma: true, + RdmaResourceName: "spidernet.io/rdma-resource2", + SpiderpoolConfigPools: &v2beta1.SpiderpoolPools{ + IPv4IPPool: []string{"test1"}, + }, + }, + }, + }, + }, + } + err := podmanager.InjectPodNetwork(pod, multusConfigs) + Expect(err).NotTo(HaveOccurred()) + Expect(pod.Annotations[constant.MultusNetworkAttachmentAnnot]).To(Equal("default/config1,default/config2")) + + Expect(pod.Spec.Containers[0].Resources.Limits).To(HaveKey(corev1.ResourceName("spidernet.io/rdma-resource1"))) + Expect(pod.Spec.Containers[0].Resources.Limits).To(HaveKey(corev1.ResourceName("spidernet.io/rdma-resource2"))) + }) + + It("should return an error when no ippools configured", func() { + multusConfigs = v2beta1.SpiderMultusConfigList{ + Items: []v2beta1.SpiderMultusConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "config1", + Namespace: "default", + }, + Spec: v2beta1.MultusCNIConfigSpec{ + CniType: ptr.To("macvlan"), + MacvlanConfig: &v2beta1.SpiderMacvlanCniConfig{ + EnableRdma: true, + RdmaResourceName: "spidernet.io/rdma-resource1", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "config2", + Namespace: "default", + }, + Spec: v2beta1.MultusCNIConfigSpec{ + CniType: ptr.To("macvlan"), + MacvlanConfig: &v2beta1.SpiderMacvlanCniConfig{ + EnableRdma: true, + RdmaResourceName: "spidernet.io/rdma-resource2", + SpiderpoolConfigPools: &v2beta1.SpiderpoolPools{ + IPv4IPPool: []string{"test1"}, + }, + }, + }, + }, + }, + } + err := podmanager.InjectPodNetwork(pod, multusConfigs) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no any ippools config")) + }) + + It("should return an error when not disable rdma", func() { + multusConfigs = v2beta1.SpiderMultusConfigList{ + Items: []v2beta1.SpiderMultusConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "config1", + Namespace: "default", + }, + Spec: v2beta1.MultusCNIConfigSpec{ + CniType: ptr.To("macvlan"), + MacvlanConfig: &v2beta1.SpiderMacvlanCniConfig{ + EnableRdma: false, + RdmaResourceName: "spidernet.io/rdma-resource1", + SpiderpoolConfigPools: &v2beta1.SpiderpoolPools{ + IPv4IPPool: []string{"test1"}, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "config2", + Namespace: "default", + }, + Spec: v2beta1.MultusCNIConfigSpec{ + CniType: ptr.To("macvlan"), + MacvlanConfig: &v2beta1.SpiderMacvlanCniConfig{ + EnableRdma: true, + RdmaResourceName: "spidernet.io/rdma-resource2", + SpiderpoolConfigPools: &v2beta1.SpiderpoolPools{ + IPv4IPPool: []string{"test1"}, + }, + }, + }, + }, + }, + } + err := podmanager.InjectPodNetwork(pod, multusConfigs) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("not enable RDMA")) + }) + + It("should preserve existing resources in the Pod", func() { + // Set some pre-existing resources + pod.Spec.Containers[0].Resources.Limits = corev1.ResourceList{ + corev1.ResourceName("spidernet.io/rdma-resource1"): resource.MustParse("1"), + corev1.ResourceName("existing-resource"): resource.MustParse("10"), + } + + multusConfigs = v2beta1.SpiderMultusConfigList{ + Items: []v2beta1.SpiderMultusConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "config1", + Namespace: "default", + }, + Spec: v2beta1.MultusCNIConfigSpec{ + CniType: ptr.To("macvlan"), + MacvlanConfig: &v2beta1.SpiderMacvlanCniConfig{ + EnableRdma: true, + RdmaResourceName: "spidernet.io/rdma-resource1", + SpiderpoolConfigPools: &v2beta1.SpiderpoolPools{ + IPv4IPPool: []string{"test1"}, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "config2", + Namespace: "default", + }, + Spec: v2beta1.MultusCNIConfigSpec{ + CniType: ptr.To("macvlan"), + MacvlanConfig: &v2beta1.SpiderMacvlanCniConfig{ + EnableRdma: true, + RdmaResourceName: "spidernet.io/rdma-resource2", + SpiderpoolConfigPools: &v2beta1.SpiderpoolPools{ + IPv4IPPool: []string{"test1"}, + }, + }, + }, + }, + }, + } + + err := podmanager.InjectPodNetwork(pod, multusConfigs) + Expect(err).NotTo(HaveOccurred()) + Expect(pod.Annotations[constant.MultusNetworkAttachmentAnnot]).To(Equal("default/config1,default/config2")) + + // Verify that existing resources are preserved + Expect(pod.Spec.Containers[0].Resources.Limits).To(HaveKey(corev1.ResourceName("spidernet.io/rdma-resource1"))) + Expect(pod.Spec.Containers[0].Resources.Limits).To(HaveKey(corev1.ResourceName("spidernet.io/rdma-resource2"))) + Expect(pod.Spec.Containers[0].Resources.Limits).To(HaveKey(corev1.ResourceName("existing-resource"))) + Expect(pod.Spec.Containers[0].Resources.Limits[corev1.ResourceName("existing-resource")]).To(Equal(resource.MustParse("10"))) + }) + + It("should return an error when CNI types are inconsistent", func() { + multusConfigs = v2beta1.SpiderMultusConfigList{ + Items: []v2beta1.SpiderMultusConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "config1", + Namespace: "default", + }, + Spec: v2beta1.MultusCNIConfigSpec{ + CniType: ptr.To("macvlan"), + MacvlanConfig: &v2beta1.SpiderMacvlanCniConfig{ + EnableRdma: true, + RdmaResourceName: "spidernet.io/rdma-resource1", + SpiderpoolConfigPools: &v2beta1.SpiderpoolPools{ + IPv4IPPool: []string{"test1"}, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "config2", + Namespace: "default", + }, + Spec: v2beta1.MultusCNIConfigSpec{ + CniType: ptr.To("ipvlan"), + IPVlanConfig: &v2beta1.SpiderIPvlanCniConfig{ + EnableRdma: true, + RdmaResourceName: "spidernet.io/rdma-resource2", + SpiderpoolConfigPools: &v2beta1.SpiderpoolPools{ + IPv4IPPool: []string{"test1"}, + }, + }, + }, + }, + }, + } + + err := podmanager.InjectPodNetwork(pod, multusConfigs) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cniType ipvlan is not consistent with macvlan")) + }) + }) + + Describe("Utils", func() { + Context("initPodMutatingWebhook", func() { + It("should properly initialize pod mutating webhook with full configuration", func() { + // Prepare test data + testCABundle := []byte("test-ca-bundle") + fromWebhook := admissionregistrationv1.MutatingWebhook{ + AdmissionReviewVersions: []string{"v1", "v1beta1"}, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + CABundle: testCABundle, + Service: &admissionregistrationv1.ServiceReference{ + Name: "test-service", + Namespace: "test-namespace", + Port: ptr.To(int32(443)), + }, + }, + } + + // Call the function under test + podWebhookNamespaceInclude := []string{ + "test", + } + result := podmanager.InitPodMutatingWebhook(fromWebhook, podWebhookNamespaceInclude) + + // Verify results + Expect(result.Name).To(Equal(constant.PodMutatingWebhookName)) + Expect(result.AdmissionReviewVersions).To(Equal(fromWebhook.AdmissionReviewVersions)) + Expect(*result.FailurePolicy).To(Equal(admissionregistrationv1.Fail)) + + // Verify NamespaceSelector + Expect(result.NamespaceSelector).NotTo(BeNil()) + Expect(result.NamespaceSelector.MatchExpressions).To(HaveLen(2)) + Expect(result.NamespaceSelector.MatchExpressions[0].Key).To(Equal(corev1.LabelMetadataName)) + Expect(result.NamespaceSelector.MatchExpressions[0].Operator).To(Equal(metav1.LabelSelectorOpNotIn)) + Expect(result.NamespaceSelector.MatchExpressions[1].Key).To(Equal(corev1.LabelMetadataName)) + Expect(result.NamespaceSelector.MatchExpressions[1].Operator).To(Equal(metav1.LabelSelectorOpIn)) + + // Verify ClientConfig + Expect(result.ClientConfig.CABundle).To(Equal(testCABundle)) + Expect(result.ClientConfig.Service).NotTo(BeNil()) + Expect(result.ClientConfig.Service.Name).To(Equal("test-service")) + Expect(result.ClientConfig.Service.Namespace).To(Equal("test-namespace")) + Expect(*result.ClientConfig.Service.Port).To(Equal(int32(443))) + Expect(*result.ClientConfig.Service.Path).To(Equal("/mutate--v1-pod")) + + // Verify Rules + Expect(result.Rules).To(HaveLen(1)) + Expect(result.Rules[0].Operations).To(ConsistOf( + admissionregistrationv1.Create, + admissionregistrationv1.Update, + )) + Expect(result.Rules[0].Rule.APIGroups).To(Equal([]string{""})) + Expect(result.Rules[0].Rule.APIVersions).To(Equal([]string{"v1"})) + Expect(result.Rules[0].Rule.Resources).To(Equal([]string{"pods"})) + + // Verify SideEffects + Expect(*result.SideEffects).To(Equal(admissionregistrationv1.SideEffectClassNone)) + }) + + It("should properly initialize webhook without Service configuration", func() { + // Prepare test data + fromWebhook := admissionregistrationv1.MutatingWebhook{ + AdmissionReviewVersions: []string{"v1"}, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + CABundle: []byte("test-ca-bundle"), + }, + } + + // Call the function under test + result := podmanager.InitPodMutatingWebhook(fromWebhook, []string{}) + + // Verify results + Expect(result.ClientConfig.Service).To(BeNil()) + Expect(result.Name).To(Equal(constant.PodMutatingWebhookName)) + }) + }) + }) + + Describe("AddPodMutatingWebhook", func() { + var ( + fakeClient *fake.Clientset + webhookName string + existingConfig *admissionregistrationv1.MutatingWebhookConfiguration + podWebhookNamespaceInclude []string + ) + + BeforeEach(func() { + // Initialize test variables + fakeClient = fake.NewSimpleClientset() + webhookName = "test-webhook-config" + podWebhookNamespaceInclude = []string{ + "test", + } + + // Create a basic webhook configuration + existingConfig = &admissionregistrationv1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: webhookName, + }, + Webhooks: []admissionregistrationv1.MutatingWebhook{ + { + Name: "existing-webhook", + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + CABundle: []byte("test-ca-bundle"), + Service: &admissionregistrationv1.ServiceReference{ + Name: "webhook-service", + Namespace: "default", + Port: ptr.To(int32(443)), + }, + }, + AdmissionReviewVersions: []string{"v1"}, + }, + }, + } + }) + + Context("when adding pod mutating webhook", func() { + It("should successfully add webhook when it doesn't exist", func() { + // Create initial webhook configuration + _, err := fakeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Create( + context.TODO(), existingConfig, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Call the function under test + err = podmanager.AddPodMutatingWebhook(fakeClient.AdmissionregistrationV1(), webhookName, podWebhookNamespaceInclude) + Expect(err).NotTo(HaveOccurred()) + + // // Verify the webhook was added + updatedConfig, err := fakeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Get( + context.TODO(), webhookName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedConfig.Webhooks).To(HaveLen(2)) + Expect(updatedConfig.Webhooks[1].Name).To(Equal(constant.PodMutatingWebhookName)) + }) + + It("should not add webhook when it already exists", func() { + // Add pod webhook to initial configuration + podWebhook := podmanager.InitPodMutatingWebhook(existingConfig.Webhooks[0], podWebhookNamespaceInclude) + existingConfig.Webhooks = append(existingConfig.Webhooks, podWebhook) + + // Create webhook configuration with pod webhook + _, err := fakeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Create( + context.TODO(), existingConfig, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Call the function under test + err = podmanager.AddPodMutatingWebhook(fakeClient.AdmissionregistrationV1(), webhookName, podWebhookNamespaceInclude) + Expect(err).NotTo(HaveOccurred()) + + // // Verify no additional webhook was added + updatedConfig, err := fakeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Get( + context.TODO(), webhookName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedConfig.Webhooks).To(HaveLen(2)) + }) + + It("should return error when webhook configuration doesn't exist", func() { + // Call the function under test without creating webhook configuration + err := podmanager.AddPodMutatingWebhook(fakeClient.AdmissionregistrationV1(), webhookName, podWebhookNamespaceInclude) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to get MutatingWebhookConfiguration")) + }) + + It("should return error when webhook configuration is empty", func() { + // Create empty webhook configuration + emptyConfig := &admissionregistrationv1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: webhookName, + }, + } + _, err := fakeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Create( + context.TODO(), emptyConfig, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Call the function under test + err = podmanager.AddPodMutatingWebhook(fakeClient.AdmissionregistrationV1(), webhookName, podWebhookNamespaceInclude) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no any mutating webhook found")) + }) + }) + }) + + var _ = Describe("RemovePodMutatingWebhook", func() { + var ( + // Mock admission client + fakeClient *fake.Clientset + // Test webhook name + webhookName string + ) + + BeforeEach(func() { + // Initialize test variables + // Initialize test variables + fakeClient = fake.NewSimpleClientset() + webhookName = "test-webhook-config" + }) + + Context("when removing pod mutating webhook", func() { + It("should successfully remove the webhook if it exists", func() { + // Prepare existing webhook configuration + existingWebhooks := []admissionregistrationv1.MutatingWebhook{ + {Name: constant.PodMutatingWebhookName}, + {Name: "other-webhook"}, + } + + mwc := &admissionregistrationv1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: webhookName, + }, + Webhooks: existingWebhooks, + } + + // Setup mock behavior + _, err := fakeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Create( + context.TODO(), mwc, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Execute test + err = podmanager.RemovePodMutatingWebhook(fakeClient.AdmissionregistrationV1(), webhookName) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return nil if webhook doesn't exist", func() { + // Prepare existing webhook configuration + existingWebhooks := []admissionregistrationv1.MutatingWebhook{ + {Name: "other-webhook"}, + } + + mwc := &admissionregistrationv1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: webhookName, + }, + Webhooks: existingWebhooks, + } + + // Setup mock behavior + _, err := fakeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Create( + context.TODO(), mwc, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Execute test + err = podmanager.RemovePodMutatingWebhook(fakeClient.AdmissionregistrationV1(), webhookName) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return error when getting webhook configuration fails", func() { + err := podmanager.RemovePodMutatingWebhook(fakeClient.AdmissionregistrationV1(), webhookName) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("not found")) + }) + }) + }) }) diff --git a/pkg/types/k8s.go b/pkg/types/k8s.go index c759d47ada..f034de56ba 100644 --- a/pkg/types/k8s.go +++ b/pkg/types/k8s.go @@ -109,20 +109,20 @@ type AutoPoolProperty struct { } type SpiderpoolConfigmapConfig struct { - IpamUnixSocketPath string `yaml:"ipamUnixSocketPath"` - EnableIPv4 bool `yaml:"enableIPv4"` - EnableIPv6 bool `yaml:"enableIPv6"` - TuneSysctlConfig bool `yaml:"tuneSysctlConfig"` - EnableStatefulSet bool `yaml:"enableStatefulSet"` - EnableKubevirtStaticIP bool `yaml:"enableKubevirtStaticIP"` - EnableSpiderSubnet bool `yaml:"enableSpiderSubnet"` - EnableAutoPoolForApplication bool `yaml:"enableAutoPoolForApplication"` - ClusterSubnetAutoPoolDefaultRedundantIPNumber int `yaml:"clusterSubnetAutoPoolDefaultRedundantIPNumber"` - DraConfig `yaml:"dra"` + IpamUnixSocketPath string `yaml:"ipamUnixSocketPath"` + EnableIPv4 bool `yaml:"enableIPv4"` + EnableIPv6 bool `yaml:"enableIPv6"` + TuneSysctlConfig bool `yaml:"tuneSysctlConfig"` + EnableStatefulSet bool `yaml:"enableStatefulSet"` + EnableKubevirtStaticIP bool `yaml:"enableKubevirtStaticIP"` + EnableSpiderSubnet bool `yaml:"enableSpiderSubnet"` + EnableAutoPoolForApplication bool `yaml:"enableAutoPoolForApplication"` + ClusterSubnetAutoPoolDefaultRedundantIPNumber int `yaml:"clusterSubnetAutoPoolDefaultRedundantIPNumber"` + PodResourceInjectConfig PodResourceInjectConfig `yaml:"podResourceInject"` } -type DraConfig struct { - DraEnabled bool `yaml:"enabled"` - DraCdiRootPath string `yaml:"cdiRootPath"` - DraHostDevicePath string `yaml:"hostDevicePath"` +type PodResourceInjectConfig struct { + Enabled bool `yaml:"enabled"` + NamespacesExclude []string `yaml:"namespacesExclude"` + NamespacesInclude []string `yaml:"namespacesInclude"` } diff --git a/test/Makefile b/test/Makefile index 357ca7e1eb..ea2c9af57f 100644 --- a/test/Makefile +++ b/test/Makefile @@ -227,7 +227,8 @@ setup_kurise: docker pull $${IMAGE} ; \ kind load docker-image $${IMAGE} --name $(E2E_CLUSTER_NAME); \ done; \ - helm install kruise openkruise/kruise --kubeconfig $(E2E_KUBECONFIG) --wait --debug --set manager.image.repository=$(E2E_OPENKRUISE_IMAGE) + helm upgrade --install kruise openkruise/kruise --wait --timeout 20m --debug --set manager.image.repository=$(E2E_OPENKRUISE_IMAGE) \ + --kubeconfig $(E2E_KUBECONFIG) || { KIND_CLUSTER_NAME=$(E2E_CLUSTER_NAME) ./scripts/debugEnv.sh $(E2E_KUBECONFIG) "detail" "$(E2E_LOG_FILE)" ; exit 1 ; } ; \ .PHONY: setup_spiderpool setup_spiderpool: @@ -254,11 +255,6 @@ setup_spiderpool: else \ HELM_OPTION+=" --set coordinator.enabled=false " ; \ fi ; \ - # dra is only enable in v1.28.0 - if [ $$(echo -e "$(E2E_KIND_IMAGE_TAG)\nv1.29.0" | sort -V | tail -n1) == "$(E2E_KIND_IMAGE_TAG)" ] && [ "${E2E_SPIDERPOOL_ENABLE_DRA}" == "true" ]; then \ - HELM_OPTION+=" --set dra.enabled=true " ; \ - HELM_OPTION+=" --set dra.hostDevicePath=$(E2E_SPIDERPOOL_DRA_SOLIBRARY_PATH) " ; \ - fi ; \ HELM_OPTION+=" --set global.tuneSysctlConfig=true " ; \ HELM_OPTION+=" --set multus.multusCNI.install=true " ; \ HELM_OPTION+=" --set multus.multusCNI.uninstall=true " ; \ @@ -345,6 +341,7 @@ setup_spiderpool: HELM_OPTION+=" --set clusterDefaultPool.ipv4IPRanges={$${ipv4_ip_range}} --set clusterDefaultPool.ipv6IPRanges={$${ipv6_ip_range}}" ; \ HELM_OPTION+=" --set ipam.enableIPv4=true --set ipam.enableIPv6=true" ; \ fi ; \ + HELM_OPTION+=" --set spiderpoolController.podResourceInject.enabled=true " ; \ HELM_OPTION+=" --set spiderpoolAgent.prometheus.enabled=true --set spiderpoolController.prometheus.enabled=true " ; \ HELM_OPTION+=" --set spiderpoolAgent.prometheus.enabledDebugMetric=true --set spiderpoolController.prometheus.enabledDebugMetric=true " ; \ if [ -n "$(PYROSCOPE_LOCAL_PORT)" ] ; then \ @@ -369,6 +366,7 @@ setup_spiderpool: --set spiderpoolController.image.registry="" \ --set spiderpoolController.image.repository=$(SPIDERPOOL_CONTROLLER_IMAGE_NAME) \ --set spiderpoolController.image.tag=$(E2E_SPIDERPOOL_TAG) \ + --set spiderpoolController.enablePodNetworkResourceInject=true \ --set spiderpoolInit.image.registry="" \ --set spiderpoolInit.image.repository=$(SPIDERPOOL_CONTROLLER_IMAGE_NAME) \ --set spiderpoolInit.image.tag=$(E2E_SPIDERPOOL_TAG) \ @@ -389,7 +387,7 @@ setup_spiderpool: -n $(RELEASE_NAMESPACE) \ $${HELM_OPTION} \ $(E2E_HELM_ADDITIONAL_OPTIONS) \ - --kubeconfig $(E2E_KUBECONFIG) || { KIND_CLUSTER_NAME=$(E2E_CLUSTER_NAME) ./scripts/debugEnv.sh $(E2E_KUBECONFIG) "detail" ; exit 1 ; } ; \ + --kubeconfig $(E2E_KUBECONFIG) || { KIND_CLUSTER_NAME=$(E2E_CLUSTER_NAME) ./scripts/debugEnv.sh $(E2E_KUBECONFIG) "detail" "$(E2E_LOG_FILE)" ; exit 1 ; } ; \ if [ "$(INSTALL_SRIOV)" == "true" ] ; then \ echo "label node for sriov operator " ; \ kubectl --kubeconfig $(E2E_KUBECONFIG) get node | sed '1d' | awk '{print $$1}' | xargs -n 1 -i kubectl --kubeconfig $(E2E_KUBECONFIG) label node {} node-role.kubernetes.io/worker="" ; \ @@ -434,6 +432,7 @@ upgrade_spiderpool: helm_upgrade_spiderpool: @echo -e "\033[35m [helm upgrade spiderpool] \033[0m" HELM_OPTION="";\ + kubectl get mutatingwebhookconfigurations spiderpool-controller -o yaml --kubeconfig $(E2E_KUBECONFIG) ;\ HELM_OPTION+=" --set spiderpoolController.replicas=1 " ; \ if [ "$(INSTALL_OVERLAY_CNI)" == "true" ]; then \ HELM_OPTION+=" --set multus.multusCNI.defaultCniCRName= " ; \ @@ -446,6 +445,7 @@ helm_upgrade_spiderpool: --set spiderpoolController.image.registry="" \ --set spiderpoolController.image.repository=$(SPIDERPOOL_CONTROLLER_IMAGE_NAME) \ --set spiderpoolController.image.tag=$(E2E_SPIDERPOOL_TAG) \ + --set spiderpoolController.podResourceInject.enabled=true \ --set spiderpoolInit.image.registry="" \ --set spiderpoolInit.image.repository=$(SPIDERPOOL_CONTROLLER_IMAGE_NAME) \ --set spiderpoolInit.image.tag=$(E2E_SPIDERPOOL_TAG) \ @@ -469,6 +469,7 @@ helm_upgrade_spiderpool: kubectl wait --for=condition=ready -l app.kubernetes.io/instance=spiderpool --timeout=300s pod -n $(RELEASE_NAMESPACE) --kubeconfig $(E2E_KUBECONFIG) || true; \ kubectl scale deploy -n $(RELEASE_NAMESPACE) -l app.kubernetes.io/component=spiderpool-controller --replicas=2 --kubeconfig $(E2E_KUBECONFIG); \ kubectl wait --for=condition=ready -l app.kubernetes.io/component=spiderpool-controller --timeout=300s pod -n $(RELEASE_NAMESPACE) --kubeconfig $(E2E_KUBECONFIG) || true; \ + kubectl get mutatingwebhookconfigurations spiderpool-controller -o yaml --kubeconfig $(E2E_KUBECONFIG) ;\ helm --kubeconfig $(E2E_KUBECONFIG) list -A ; \ .PHONY: clean diff --git a/test/doc/dra.md b/test/doc/dra.md deleted file mode 100644 index 7f0e112e14..0000000000 --- a/test/doc/dra.md +++ /dev/null @@ -1,11 +0,0 @@ -# E2E Cases for DRA - -| Case ID | Title | Priority | Smoke | Status | Other | -| ------- | --------------------------------------------------------------------------------- | -------- | ----- | ------ | ----- | -| Q00001 | Creating a Pod to verify DRA if works while set rdmaAcc to true | p1 | true | done | | -| Q00002 | Creating a Pod to verify DRA if works while set rdmaAcc to false | p1 | true | done | | -| Q00003 | test dynamicNics with policy all | p3 | true | | | -| Q00004 | create a pod with set defaultNic to nil, see the default cni what pod used is if the cluster default cni | p3 | true | | | -| Q00005 | create a pod to testing the default route is from the secondaryNics | p3 | false | | | -| Q00006 | create a pod to verify webhook auto inject the rdma resource to pod | p3 | false | | | -| Q00007 | create a pod to verify host'nic schedule for secondaryNics | p3 | false | | | diff --git a/test/doc/podwebhook.md b/test/doc/podwebhook.md new file mode 100644 index 0000000000..e3c9ed63e9 --- /dev/null +++ b/test/doc/podwebhook.md @@ -0,0 +1,5 @@ +# E2E Cases for Pod Webhook + +| Case ID | Title | Priority | Smoke | Status | Other | +| ------- | --------------------------------------------------------------------------------- | -------- | ----- | ------ | ----- | +| H00001 | test pod webhook auto inject resource to pod | p1 | true | done | | diff --git a/test/doc/spidermultus.md b/test/doc/spidermultus.md index 001d8c8fb7..e534742572 100644 --- a/test/doc/spidermultus.md +++ b/test/doc/spidermultus.md @@ -27,3 +27,4 @@ | M00023 | set hostRPFilter and podRPFilter to a invalid value | p3 | | done | | | M00024 | verify the podMACPrefix filed | p3 | | done | | | M00025 | The custom net-attach-conf name from the annotation multus.spidernet.io/cr-name doesn't follow Kubernetes naming rules and can't be created. | p3 | | done | | +| M00026 | check the coordinatorConfig: enableVethLinkLocakAddress works | p3 | | done | | diff --git a/test/e2e/dra/dra_test.go b/test/e2e/dra/dra_test.go deleted file mode 100644 index bf0be47707..0000000000 --- a/test/e2e/dra/dra_test.go +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2024 Authors of spidernet-io -// SPDX-License-Identifier: Apache-2.0 -package dra_test - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "path" - "strings" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "k8s.io/api/resource/v1alpha2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - - "github.com/spidernet-io/spiderpool/pkg/constant" - spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - "github.com/spidernet-io/spiderpool/pkg/types" - "github.com/spidernet-io/spiderpool/test/e2e/common" -) - -var _ = Describe("dra", Label("dra"), func() { - - Context("DRA Smoke test ", func() { - var v4PoolName, v6PoolName, namespace, depName, multusNadName, spiderClaimName string - - BeforeEach(func() { - // generate some test data - namespace = "ns-" + common.GenerateString(10, true) - depName = "dep-name-" + common.GenerateString(10, true) - multusNadName = "test-multus-" + common.GenerateString(10, true) - spiderClaimName = "spc-" + common.GenerateString(10, true) - - // create namespace and ippool - err := frame.CreateNamespaceUntilDefaultServiceAccountReady(namespace, common.ServiceAccountReadyTimeout) - Expect(err).NotTo(HaveOccurred()) - - Eventually(func() error { - var v4PoolObj, v6PoolObj *spiderpoolv2beta1.SpiderIPPool - if frame.Info.IpV4Enabled { - v4PoolName, v4PoolObj = common.GenerateExampleIpv4poolObject(1) - gateway := strings.Split(v4PoolObj.Spec.Subnet, "0/")[0] + "1" - v4PoolObj.Spec.Gateway = &gateway - err = common.CreateIppool(frame, v4PoolObj) - if err != nil { - GinkgoWriter.Printf("Failed to create v4 IPPool %v: %v \n", v4PoolName, err) - return err - } - } - if frame.Info.IpV6Enabled { - v6PoolName, v6PoolObj = common.GenerateExampleIpv6poolObject(1) - gateway := strings.Split(v6PoolObj.Spec.Subnet, "/")[0] + "1" - v6PoolObj.Spec.Gateway = &gateway - err = common.CreateIppool(frame, v6PoolObj) - if err != nil { - GinkgoWriter.Printf("Failed to create v6 IPPool %v: %v \n", v6PoolName, err) - return err - } - } - return nil - }).WithTimeout(time.Minute).WithPolling(time.Second * 3).Should(BeNil()) - - // Define multus cni NetworkAttachmentDefinition and create - nad := &spiderpoolv2beta1.SpiderMultusConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: multusNadName, - Namespace: namespace, - }, - Spec: spiderpoolv2beta1.MultusCNIConfigSpec{ - CniType: ptr.To(constant.MacvlanCNI), - MacvlanConfig: &spiderpoolv2beta1.SpiderMacvlanCniConfig{ - Master: []string{common.NIC1}, - VlanID: ptr.To(int32(100)), - }, - CoordinatorConfig: &spiderpoolv2beta1.CoordinatorSpec{ - PodDefaultRouteNIC: &common.NIC2, - }, - }, - } - Expect(frame.CreateSpiderMultusInstance(nad)).NotTo(HaveOccurred()) - - DeferCleanup(func() { - GinkgoWriter.Printf("delete spiderMultusConfig %v/%v. \n", namespace, multusNadName) - Expect(frame.DeleteSpiderMultusInstance(namespace, multusNadName)).NotTo(HaveOccurred()) - - GinkgoWriter.Printf("delete namespace %v. \n", namespace) - Expect(frame.DeleteNamespace(namespace)).NotTo(HaveOccurred()) - - if frame.Info.IpV4Enabled { - GinkgoWriter.Printf("delete v4 ippool %v. \n", v4PoolName) - Expect(common.DeleteIPPoolByName(frame, v6PoolName)).NotTo(HaveOccurred()) - } - - Expect( - common.DeleteSpiderClaimParameter(frame, spiderClaimName, namespace), - ).NotTo(HaveOccurred()) - }) - }) - - It("Creating a Pod to verify DRA if works while set rdmaAcc to true", Label("Q00001"), func() { - Expect(common.CreateSpiderClaimParameter(frame, &spiderpoolv2beta1.SpiderClaimParameter{ - ObjectMeta: metav1.ObjectMeta{ - Name: spiderClaimName, - Namespace: namespace, - // kind k8s v1.29.0 -> use containerd v1.7.1 -> use cdi version(v0.5.4) - // v0.5.4 don't support CDISpec version 0.6.0, so update the cdi version - // by the annotation - Annotations: map[string]string{ - constant.AnnoDraCdiVersion: "0.5.0", - }, - }, - Spec: spiderpoolv2beta1.ClaimParameterSpec{ - RdmaAcc: true, - }, - })).NotTo(HaveOccurred()) - - // create resourceclaimtemplate - Expect( - common.CreateResourceClaimTemplate(frame, &v1alpha2.ResourceClaimTemplate{ - ObjectMeta: metav1.ObjectMeta{ - Name: spiderClaimName, - Namespace: namespace, - }, - Spec: v1alpha2.ResourceClaimTemplateSpec{ - Spec: v1alpha2.ResourceClaimSpec{ - ResourceClassName: constant.DRADriverName, - ParametersRef: &v1alpha2.ResourceClaimParametersReference{ - APIGroup: constant.SpiderpoolAPIGroup, - Kind: constant.KindSpiderClaimParameter, - Name: spiderClaimName, - }, - }, - }, - })).NotTo(HaveOccurred()) - - podIppoolsAnno := types.AnnoPodIPPoolsValue{ - types.AnnoIPPoolItem{ - NIC: common.NIC1, - }, - types.AnnoIPPoolItem{ - NIC: common.NIC2, - }, - } - if frame.Info.IpV4Enabled { - podIppoolsAnno[0].IPv4Pools = []string{common.SpiderPoolIPv4PoolDefault} - podIppoolsAnno[1].IPv4Pools = []string{v4PoolName} - } - if frame.Info.IpV6Enabled { - podIppoolsAnno[0].IPv6Pools = []string{common.SpiderPoolIPv6PoolDefault} - podIppoolsAnno[1].IPv6Pools = []string{v6PoolName} - } - podAnnoMarshal, err := json.Marshal(podIppoolsAnno) - Expect(err).NotTo(HaveOccurred()) - var annotations = make(map[string]string) - annotations[common.MultusNetworks] = fmt.Sprintf("%s/%s", namespace, multusNadName) - annotations[constant.AnnoPodIPPools] = string(podAnnoMarshal) - deployObject := common.GenerateDraDeploymentYaml(depName, spiderClaimName, namespace, int32(1)) - deployObject.Spec.Template.Annotations = annotations - Expect(frame.CreateDeployment(deployObject)).NotTo(HaveOccurred()) - - ctx, cancel := context.WithTimeout(context.Background(), common.PodStartTimeout) - defer cancel() - depObject, err := frame.WaitDeploymentReady(depName, namespace, ctx) - Expect(err).NotTo(HaveOccurred(), "waiting for deploy ready failed: %v ", err) - podList, err := frame.GetPodListByLabel(depObject.Spec.Template.Labels) - Expect(err).NotTo(HaveOccurred(), "failed to get podList: %v ", err) - - cm, err := frame.GetConfigmap(common.SpiderPoolConfigmapName, common.SpiderPoolConfigmapNameSpace) - Expect(err).NotTo(HaveOccurred(), "failed to get spiderpool-conf configMap: %v") - - list := strings.Split(cm.Data["conf.yml"], "\n") - Expect(list).NotTo(BeEmpty()) - - GinkgoWriter.Printf("Got list: %v\n", list) - var draHostDevicePath string - for _, l := range list { - if strings.Contains(l, "hostDevicePath") { - GinkgoWriter.Printf("Got : %v\n", l) - res := strings.Split(l, " ") - Expect(len(res)).To(Equal(4)) - draHostDevicePath = res[3] - break - } - } - - Expect(draHostDevicePath).NotTo(BeEmpty()) - GinkgoWriter.Printf("Got draHostDevicePath: %v\n", draHostDevicePath) - var executeCommandResult []byte - soBaseName := path.Base(draHostDevicePath) - for _, pod := range podList.Items { - // check so if exist - checkSoCommand := "ls " + draHostDevicePath - _, err := frame.ExecCommandInPod(pod.Name, pod.Namespace, checkSoCommand, ctx) - Expect(err).NotTo(HaveOccurred(), "failed to check the dra so if ok to mount: %v", err) - - checkEnvComand := "printenv LD_PRELOAD" - executeCommandResult, err = frame.ExecCommandInPod(pod.Name, pod.Namespace, checkEnvComand, ctx) - Expect(err).NotTo(HaveOccurred(), "failed to check the dra so if ok to mount: %v", err) - - executeCommandResult = bytes.TrimSuffix(executeCommandResult, []byte("\n")) - Expect(string(executeCommandResult)).To(Equal(soBaseName), "unexpected result: %s", executeCommandResult) - } - }) - - It("Creating a Pod to verify DRA if works while set rdmaAcc to false", Label("Q00002"), func() { - Expect(common.CreateSpiderClaimParameter(frame, &spiderpoolv2beta1.SpiderClaimParameter{ - ObjectMeta: metav1.ObjectMeta{ - Name: spiderClaimName, - Namespace: namespace, - // kind k8s v1.29.0 -> use containerd v1.7.1 -> use cdi version(v0.5.4) - // v0.5.4 don't support CDISpec version 0.6.0, so update the cdi version - // by the annotation - Annotations: map[string]string{ - constant.AnnoDraCdiVersion: "0.5.0", - }, - }, - Spec: spiderpoolv2beta1.ClaimParameterSpec{ - RdmaAcc: false, - }, - })).NotTo(HaveOccurred()) - - // create resourceclaimtemplate - Expect( - common.CreateResourceClaimTemplate(frame, &v1alpha2.ResourceClaimTemplate{ - ObjectMeta: metav1.ObjectMeta{ - Name: spiderClaimName, - Namespace: namespace, - }, - Spec: v1alpha2.ResourceClaimTemplateSpec{ - Spec: v1alpha2.ResourceClaimSpec{ - ResourceClassName: constant.DRADriverName, - ParametersRef: &v1alpha2.ResourceClaimParametersReference{ - APIGroup: constant.SpiderpoolAPIGroup, - Kind: constant.KindSpiderClaimParameter, - Name: spiderClaimName, - }, - }, - }, - })).NotTo(HaveOccurred()) - - podIppoolsAnno := types.AnnoPodIPPoolsValue{ - types.AnnoIPPoolItem{ - NIC: common.NIC1, - }, - types.AnnoIPPoolItem{ - NIC: common.NIC2, - }, - } - if frame.Info.IpV4Enabled { - podIppoolsAnno[0].IPv4Pools = []string{common.SpiderPoolIPv4PoolDefault} - podIppoolsAnno[1].IPv4Pools = []string{v4PoolName} - } - if frame.Info.IpV6Enabled { - podIppoolsAnno[0].IPv6Pools = []string{common.SpiderPoolIPv6PoolDefault} - podIppoolsAnno[1].IPv6Pools = []string{v6PoolName} - } - podAnnoMarshal, err := json.Marshal(podIppoolsAnno) - Expect(err).NotTo(HaveOccurred()) - var annotations = make(map[string]string) - annotations[common.MultusNetworks] = fmt.Sprintf("%s/%s", namespace, multusNadName) - annotations[constant.AnnoPodIPPools] = string(podAnnoMarshal) - deployObject := common.GenerateDraDeploymentYaml(depName, spiderClaimName, namespace, int32(1)) - deployObject.Spec.Template.Annotations = annotations - Expect(frame.CreateDeployment(deployObject)).NotTo(HaveOccurred()) - - ctx, cancel := context.WithTimeout(context.Background(), common.PodStartTimeout) - defer cancel() - depObject, err := frame.WaitDeploymentReady(depName, namespace, ctx) - Expect(err).NotTo(HaveOccurred(), "waiting for deploy ready failed: %v ", err) - podList, err := frame.GetPodListByLabel(depObject.Spec.Template.Labels) - Expect(err).NotTo(HaveOccurred(), "failed to get podList: %v ", err) - - rcList, err := common.ListResourceClaim(frame) - Expect(err).NotTo(HaveOccurred(), "failed to get resourceclaim list: %v ", err) - - resourceClaimUidMap := make(map[string]struct{}, len(rcList.Items)) - for _, rc := range rcList.Items { - resourceClaimUidMap[string(rc.ObjectMeta.UID)] = struct{}{} - } - - GinkgoWriter.Printf("resourceClaimUidMap: %v\n", resourceClaimUidMap) - var executeCommandResult []byte - for _, pod := range podList.Items { - checkEnvComand := "printenv DRA_CLAIM_UID" - executeCommandResult, err = frame.ExecCommandInPod(pod.Name, pod.Namespace, checkEnvComand, ctx) - Expect(err).NotTo(HaveOccurred(), "failed to check the value of env DRA_CLAIM_UID: %v", err) - - executeCommandResult = bytes.TrimSuffix(executeCommandResult, []byte("\n")) - _, ok := resourceClaimUidMap[string(executeCommandResult)] - Expect(ok).To(BeTrue(), "the value of DRA_CLAIM_UID is not match any resourceclaim'uid.") - } - }) - }) -}) diff --git a/test/e2e/dra/dra_suite_test.go b/test/e2e/podwebhook/podwebhook_suite_test.go similarity index 62% rename from test/e2e/dra/dra_suite_test.go rename to test/e2e/podwebhook/podwebhook_suite_test.go index 0239629038..bcae769fc7 100644 --- a/test/e2e/dra/dra_suite_test.go +++ b/test/e2e/podwebhook/podwebhook_suite_test.go @@ -1,34 +1,29 @@ // Copyright 2024 Authors of spidernet-io // SPDX-License-Identifier: Apache-2.0 -package dra_test + +package podwebhook_test import ( "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + e2e "github.com/spidernet-io/e2eframework/framework" spiderpool "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" - "github.com/spidernet-io/spiderpool/test/e2e/common" - "k8s.io/api/resource/v1alpha2" "k8s.io/apimachinery/pkg/runtime" ) -func TestDra(t *testing.T) { +func TestPodwebhook(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "dra Suite") + RunSpecs(t, "Podwebhook Suite") } var frame *e2e.Framework var _ = BeforeSuite(func() { - if !common.IsDRAEnabled() { - GinkgoWriter.Println("DRA feature is disabled. Skip") - Skip("DRA feature is disabled. Skip") - } - defer GinkgoRecover() var e error - frame, e = e2e.NewFramework(GinkgoT(), []func(*runtime.Scheme) error{spiderpool.AddToScheme, v1alpha2.AddToScheme}) + frame, e = e2e.NewFramework(GinkgoT(), []func(*runtime.Scheme) error{spiderpool.AddToScheme}) Expect(e).NotTo(HaveOccurred()) }) diff --git a/test/e2e/podwebhook/podwebhook_test.go b/test/e2e/podwebhook/podwebhook_test.go new file mode 100644 index 0000000000..3a999dfdc4 --- /dev/null +++ b/test/e2e/podwebhook/podwebhook_test.go @@ -0,0 +1,112 @@ +// Copyright 2024 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +package podwebhook_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/spidernet-io/spiderpool/pkg/constant" + "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + "github.com/spidernet-io/spiderpool/test/e2e/common" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +var _ = Describe("Podwebhook", func() { + var namespace string + + BeforeEach(func() { + // create namespace + namespace = "ns-" + common.GenerateString(10, true) + err := frame.CreateNamespaceUntilDefaultServiceAccountReady(namespace, common.ServiceAccountReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + DeferCleanup(func() { + if CurrentSpecReport().Failed() { + GinkgoWriter.Println("If the use case fails, the cleanup step will be skipped") + return + } + + err := frame.DeleteNamespace(namespace) + Expect(err).NotTo(HaveOccurred(), "Failed to delete namespace %v") + }) + }) + + Context("Test inject pod network resources", func() { + It("Test inject pod network resources", Label("H00001"), func() { + // Define multus cni NetworkAttachmentDefinition and create + createNad := func(name string) *v2beta1.SpiderMultusConfig { + return &v2beta1.SpiderMultusConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + constant.AnnoPodResourceInject: "macvlan-rdma", + }, + }, + Spec: v2beta1.MultusCNIConfigSpec{ + CniType: ptr.To(constant.MacvlanCNI), + MacvlanConfig: &v2beta1.SpiderMacvlanCniConfig{ + Master: []string{common.NIC1}, + EnableRdma: true, + RdmaResourceName: "spidernet.io/rdma_resource" + "_" + name, + SpiderpoolConfigPools: &v2beta1.SpiderpoolPools{ + IPv4IPPool: []string{"test-ipv4-pool"}, + }, + }, + }, + } + } + + By("Create spiderMultusConfig: nad1 for testing") + Expect(frame.CreateSpiderMultusInstance(createNad("nad1"))).NotTo(HaveOccurred()) + By("Create spiderMultusConfig: nad2 for testing") + Expect(frame.CreateSpiderMultusInstance(createNad("nad2"))).NotTo(HaveOccurred()) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: namespace, + Annotations: map[string]string{ + constant.AnnoPodResourceInject: "macvlan-rdma", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "samplepod", + Image: "alpine", + ImagePullPolicy: "IfNotPresent", + Command: []string{"/bin/ash", "-c", "while true; do echo 'HTTP/1.1 200 OK Hello, World!' | nc -l -p 80; done"}, + Ports: []corev1.ContainerPort{ + { + Name: "samplepod", + ContainerPort: 80, + }, + }, + }, + }, + }, + } + + By("Create Pod for testing network resources inject") + err := frame.CreatePod(pod) + Expect(err).NotTo(HaveOccurred()) + + By("Check pod network annotations and resources") + p, err := frame.GetPod(pod.Name, pod.Namespace) + Expect(err).NotTo(HaveOccurred(), "failed to get pod: %v", err) + + GinkgoWriter.Printf("Pod annotations: %v\n", p.Annotations) + GinkgoWriter.Printf("Pod resources: %v\n", p.Spec.Containers[0].Resources.Limits) + Expect(p.Annotations[constant.MultusNetworkAttachmentAnnot]).To(Equal(fmt.Sprintf("%s/%s,%s/%s", namespace, "nad1", namespace, "nad2"))) + Expect(p.Spec.Containers[0].Resources.Requests).To(HaveKey(corev1.ResourceName("spidernet.io/rdma_resource_nad1"))) + Expect(p.Spec.Containers[0].Resources.Requests).To(HaveKey(corev1.ResourceName("spidernet.io/rdma_resource_nad2"))) + }) + }) +}) diff --git a/test/e2e/reliability/reliability_test.go b/test/e2e/reliability/reliability_test.go index 58e5608630..0ff199a090 100644 --- a/test/e2e/reliability/reliability_test.go +++ b/test/e2e/reliability/reliability_test.go @@ -62,7 +62,7 @@ var _ = Describe("test reliability", Label("reliability"), Serial, func() { // Define a set of daemonSets with Pods on each node to verify that the components on each node can provide services for the Pods. dsName := "ds" + tools.RandomName() - dsYaml := common.GenerateExampleDaemonSetYaml(dsName, namespace) + dsYaml := common.GenerateExampleDaemonSetYaml(dsName, "kube-public") podIppoolAnnoStr := common.GeneratePodIPPoolAnnotations(frame, common.NIC1, globalDefaultV4IppoolList, globalDefaultV6IppoolList) dsYaml.Spec.Template.Annotations = map[string]string{constant.AnnoPodIPPool: podIppoolAnnoStr} @@ -126,7 +126,7 @@ var _ = Describe("test reliability", Label("reliability"), Serial, func() { return err } - if err := frame.DeleteDaemonSet(dsName, namespace); err != nil { + if err := frame.DeleteDaemonSet(dsName, "kube-public"); err != nil { return err } diff --git a/test/e2e/spidermultus/spidermultus_test.go b/test/e2e/spidermultus/spidermultus_test.go index d4b727ce19..de5750817b 100644 --- a/test/e2e/spidermultus/spidermultus_test.go +++ b/test/e2e/spidermultus/spidermultus_test.go @@ -918,6 +918,31 @@ var _ = Describe("test spidermultus", Label("SpiderMultusConfig"), func() { Expect(string(data)).To(Equal("4096\n"), "net.core.somaxconn: %s", data) }) + It("check the enableVethLinkLocakAddress works", Label("M00026"), func() { + // create a pod + name := "veth-address-test" + var annotations = make(map[string]string) + annotations[common.MultusDefaultNetwork] = fmt.Sprintf("%s/%s", common.MultusNs, common.MacvlanUnderlayVlan0) + deployObject := common.GenerateExampleDeploymentYaml("veth-address-test", namespace, int32(1)) + deployObject.Spec.Template.Annotations = annotations + Expect(frame.CreateDeployment(deployObject)).NotTo(HaveOccurred()) + + ctx, cancel := context.WithTimeout(context.Background(), common.PodStartTimeout) + defer cancel() + + depObject, err := frame.WaitDeploymentReady(name, namespace, ctx) + Expect(err).NotTo(HaveOccurred(), "waiting for deploy ready failed: %v ", err) + podList, err := frame.GetPodListByLabel(depObject.Spec.Template.Labels) + Expect(err).NotTo(HaveOccurred(), "failed to get podList: %v ", err) + + commandString := "ip a show veth0 | grep 169.254.200.1 &> /dev/null" + ctx, cancel = context.WithTimeout(context.Background(), common.ExecCommandTimeout) + defer cancel() + + _, err = frame.ExecCommandInPod(podList.Items[0].Name, podList.Items[0].Namespace, commandString, ctx) + Expect(err).NotTo(HaveOccurred(), "failed to execute command, err: %v ", err) + }) + It("verify the podMACPrefix filed", Label("M00024"), func() { smcName := "test-multus-" + common.GenerateString(10, true) smc := &spiderpoolv2beta1.SpiderMultusConfig{ diff --git a/test/scripts/install-default-cni.sh b/test/scripts/install-default-cni.sh index 35f7ad0e18..c776853dc2 100755 --- a/test/scripts/install-default-cni.sh +++ b/test/scripts/install-default-cni.sh @@ -134,7 +134,8 @@ function install_calico() { echo "the value of E2E_IP_FAMILY: ipv4 or ipv6 or dual" exit 1 esac - kubectl patch felixconfigurations.crd.projectcalico.org default --type='merge' -p '{"spec":{"chainInsertMode":"Append"}}' || { echo "failed to patch calico chainInsertMode"; exit 1; } + # there no default felixconfigurations.crd.projectcalico.org in latest calico version (https://github.com/projectcalico/calico/releases/tag/v3.29.0) + kubectl patch felixconfigurations.crd.projectcalico.org default --type='merge' -p '{"spec":{"chainInsertMode":"Append"}}' || true # restart calico pod kubectl -n kube-system delete pod -l k8s-app=calico-node --force --grace-period=0 && sleep 3 diff --git a/test/scripts/install-multus.sh b/test/scripts/install-multus.sh index 6eebc0e9e2..092be8a1d1 100755 --- a/test/scripts/install-multus.sh +++ b/test/scripts/install-multus.sh @@ -68,6 +68,7 @@ spec: ipv6: [<>] coordinator: mode: "<>" + vethLinkAddress: <> ' OVS_CR_TEMPLATE=' @@ -150,6 +151,7 @@ spec: | sed 's?<>?0?g' \ | sed 's?<>?'""${DEFAULT_IPV4_IPPOOLS}""'?g' \ | sed 's?<>?'""${DEFAULT_IPV6_IPPOOLS}""'?g' \ + | sed 's?<>?169.254.100.1?g' \ | kubectl apply --kubeconfig ${E2E_KUBECONFIG} -f - echo "${MACVLAN_CR_TEMPLATE}" \ @@ -161,6 +163,7 @@ spec: | sed 's?<>?100?g' \ | sed 's?<>?'""${VLAN100_IPV4_IPPOOLS}""'?g' \ | sed 's?<>?'""${VLAN100_IPV6_IPPOOLS}""'?g' \ + | sed 's?<>?""?g' \ | kubectl apply --kubeconfig ${E2E_KUBECONFIG} -f - echo "${MACVLAN_CR_TEMPLATE}" \ @@ -172,6 +175,7 @@ spec: | sed 's?<>?200?g' \ | sed 's?<>?'""${VLAN200_IPV4_IPPOOLS}""'?g' \ | sed 's?<>?'""${VLAN200_IPV6_IPPOOLS}""'?g' \ + | sed 's?<>?""?g' \ | kubectl apply --kubeconfig ${E2E_KUBECONFIG} -f - if [ "${INSTALL_OVS}" == "true" ] ; then @@ -378,6 +382,7 @@ EOF kubectl wait --for=condition=ready -l app.kubernetes.io/component=spiderpool-agent --timeout=100s pod -n kube-system --kubeconfig ${E2E_KUBECONFIG} || \ ( kubectl get pod -n kube-system --kubeconfig ${E2E_KUBECONFIG} ; \ kubectl logs -n kube-system -l app.kubernetes.io/component=spiderpool-agent --kubeconfig ${E2E_KUBECONFIG} ; \ + kubectl describe pod -n kube-system -l app.kubernetes.io/component=spiderpool-agent --kubeconfig ${E2E_KUBECONFIG} ; \ kubectl logs -n kube-system -l job-name=spiderpool-init --kubeconfig ${E2E_KUBECONFIG} ; exit 1 ) Install::MultusCR diff --git a/test/scripts/install-ovs.sh b/test/scripts/install-ovs.sh index c24bffaf58..5be9e212a4 100644 --- a/test/scripts/install-ovs.sh +++ b/test/scripts/install-ovs.sh @@ -66,21 +66,24 @@ for NODE in $KIND_NODES; do install_openvswitch() { for attempt in {1..5}; do echo "Attempt $attempt to install openvswitch on ${NODE}..." - docker exec ${NODE} apt-get update > /dev/null - docker exec ${NODE} apt-get install -y apt-utils > /dev/null - docker exec ${NODE} apt-get install -y openvswitch-switch > /dev/null + if ! docker exec ${NODE} apt-get update > /dev/null; then + echo "Failed to update package list on ${NODE}, retrying in 10s..." + sleep 10 + continue + fi - if [[ $? -eq 0 ]]; then - echo "Openvswitch installed successfully on ${NODE}" - return 0 + if ! docker exec ${NODE} apt-get install -y openvswitch-switch > /dev/null; then + echo "Failed to install openvswitch on ${NODE}, retrying in 10s..." + sleep 10 + continue fi - - echo "Failed to install openvswitch on ${NODE}, retrying in 10s..." - sleep 10 + + echo "Succeed to install openvswitch on ${NODE}" + return 0 done - - echo "Error: Failed to install openvswitch on ${NODE} after 5 attempts" - exit 1 + + echo "Error: Failed to install openvswitch on ${NODE} after 5 attempts." >&2 + return 1 } echo "=========install openvswitch" diff --git a/vendor/github.com/opencontainers/runtime-spec/LICENSE b/vendor/github.com/opencontainers/runtime-spec/LICENSE deleted file mode 100644 index bdc403653e..0000000000 --- a/vendor/github.com/opencontainers/runtime-spec/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 The Linux Foundation. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go deleted file mode 100644 index 4e7717d53f..0000000000 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ /dev/null @@ -1,879 +0,0 @@ -package specs - -import "os" - -// Spec is the base configuration for the container. -type Spec struct { - // Version of the Open Container Initiative Runtime Specification with which the bundle complies. - Version string `json:"ociVersion"` - // Process configures the container process. - Process *Process `json:"process,omitempty"` - // Root configures the container's root filesystem. - Root *Root `json:"root,omitempty"` - // Hostname configures the container's hostname. - Hostname string `json:"hostname,omitempty"` - // Domainname configures the container's domainname. - Domainname string `json:"domainname,omitempty"` - // Mounts configures additional mounts (on top of Root). - Mounts []Mount `json:"mounts,omitempty"` - // Hooks configures callbacks for container lifecycle events. - Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris,zos"` - // Annotations contains arbitrary metadata for the container. - Annotations map[string]string `json:"annotations,omitempty"` - - // Linux is platform-specific configuration for Linux based containers. - Linux *Linux `json:"linux,omitempty" platform:"linux"` - // Solaris is platform-specific configuration for Solaris based containers. - Solaris *Solaris `json:"solaris,omitempty" platform:"solaris"` - // Windows is platform-specific configuration for Windows based containers. - Windows *Windows `json:"windows,omitempty" platform:"windows"` - // VM specifies configuration for virtual-machine-based containers. - VM *VM `json:"vm,omitempty" platform:"vm"` - // ZOS is platform-specific configuration for z/OS based containers. - ZOS *ZOS `json:"zos,omitempty" platform:"zos"` -} - -// Scheduler represents the scheduling attributes for a process. It is based on -// the Linux sched_setattr(2) syscall. -type Scheduler struct { - // Policy represents the scheduling policy (e.g., SCHED_FIFO, SCHED_RR, SCHED_OTHER). - Policy LinuxSchedulerPolicy `json:"policy"` - - // Nice is the nice value for the process, which affects its priority. - Nice int32 `json:"nice,omitempty"` - - // Priority represents the static priority of the process. - Priority int32 `json:"priority,omitempty"` - - // Flags is an array of scheduling flags. - Flags []LinuxSchedulerFlag `json:"flags,omitempty"` - - // The following ones are used by the DEADLINE scheduler. - - // Runtime is the amount of time in nanoseconds during which the process - // is allowed to run in a given period. - Runtime uint64 `json:"runtime,omitempty"` - - // Deadline is the absolute deadline for the process to complete its execution. - Deadline uint64 `json:"deadline,omitempty"` - - // Period is the length of the period in nanoseconds used for determining the process runtime. - Period uint64 `json:"period,omitempty"` -} - -// Process contains information to start a specific application inside the container. -type Process struct { - // Terminal creates an interactive terminal for the container. - Terminal bool `json:"terminal,omitempty"` - // ConsoleSize specifies the size of the console. - ConsoleSize *Box `json:"consoleSize,omitempty"` - // User specifies user information for the process. - User User `json:"user"` - // Args specifies the binary and arguments for the application to execute. - Args []string `json:"args,omitempty"` - // CommandLine specifies the full command line for the application to execute on Windows. - CommandLine string `json:"commandLine,omitempty" platform:"windows"` - // Env populates the process environment for the process. - Env []string `json:"env,omitempty"` - // Cwd is the current working directory for the process and must be - // relative to the container's root. - Cwd string `json:"cwd"` - // Capabilities are Linux capabilities that are kept for the process. - Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"` - // Rlimits specifies rlimit options to apply to the process. - Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris,zos"` - // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. - NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"` - // ApparmorProfile specifies the apparmor profile for the container. - ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"` - // Specify an oom_score_adj for the container. - OOMScoreAdj *int `json:"oomScoreAdj,omitempty" platform:"linux"` - // Scheduler specifies the scheduling attributes for a process - Scheduler *Scheduler `json:"scheduler,omitempty" platform:"linux"` - // SelinuxLabel specifies the selinux context that the container process is run as. - SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"` - // IOPriority contains the I/O priority settings for the cgroup. - IOPriority *LinuxIOPriority `json:"ioPriority,omitempty" platform:"linux"` -} - -// LinuxCapabilities specifies the list of allowed capabilities that are kept for a process. -// http://man7.org/linux/man-pages/man7/capabilities.7.html -type LinuxCapabilities struct { - // Bounding is the set of capabilities checked by the kernel. - Bounding []string `json:"bounding,omitempty" platform:"linux"` - // Effective is the set of capabilities checked by the kernel. - Effective []string `json:"effective,omitempty" platform:"linux"` - // Inheritable is the capabilities preserved across execve. - Inheritable []string `json:"inheritable,omitempty" platform:"linux"` - // Permitted is the limiting superset for effective capabilities. - Permitted []string `json:"permitted,omitempty" platform:"linux"` - // Ambient is the ambient set of capabilities that are kept. - Ambient []string `json:"ambient,omitempty" platform:"linux"` -} - -// IOPriority represents I/O priority settings for the container's processes within the process group. -type LinuxIOPriority struct { - Class IOPriorityClass `json:"class"` - Priority int `json:"priority"` -} - -// IOPriorityClass represents an I/O scheduling class. -type IOPriorityClass string - -// Possible values for IOPriorityClass. -const ( - IOPRIO_CLASS_RT IOPriorityClass = "IOPRIO_CLASS_RT" - IOPRIO_CLASS_BE IOPriorityClass = "IOPRIO_CLASS_BE" - IOPRIO_CLASS_IDLE IOPriorityClass = "IOPRIO_CLASS_IDLE" -) - -// Box specifies dimensions of a rectangle. Used for specifying the size of a console. -type Box struct { - // Height is the vertical dimension of a box. - Height uint `json:"height"` - // Width is the horizontal dimension of a box. - Width uint `json:"width"` -} - -// User specifies specific user (and group) information for the container process. -type User struct { - // UID is the user id. - UID uint32 `json:"uid" platform:"linux,solaris,zos"` - // GID is the group id. - GID uint32 `json:"gid" platform:"linux,solaris,zos"` - // Umask is the umask for the init process. - Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris,zos"` - // AdditionalGids are additional group ids set for the container's process. - AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"` - // Username is the user name. - Username string `json:"username,omitempty" platform:"windows"` -} - -// Root contains information about the container's root filesystem on the host. -type Root struct { - // Path is the absolute path to the container's root filesystem. - Path string `json:"path"` - // Readonly makes the root filesystem for the container readonly before the process is executed. - Readonly bool `json:"readonly,omitempty"` -} - -// Mount specifies a mount for a container. -type Mount struct { - // Destination is the absolute path where the mount will be placed in the container. - Destination string `json:"destination"` - // Type specifies the mount kind. - Type string `json:"type,omitempty" platform:"linux,solaris,zos"` - // Source specifies the source path of the mount. - Source string `json:"source,omitempty"` - // Options are fstab style mount options. - Options []string `json:"options,omitempty"` - - // UID/GID mappings used for changing file owners w/o calling chown, fs should support it. - // Every mount point could have its own mapping. - UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty" platform:"linux"` - GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty" platform:"linux"` -} - -// Hook specifies a command that is run at a particular event in the lifecycle of a container -type Hook struct { - Path string `json:"path"` - Args []string `json:"args,omitempty"` - Env []string `json:"env,omitempty"` - Timeout *int `json:"timeout,omitempty"` -} - -// Hooks specifies a command that is run in the container at a particular event in the lifecycle of a container -// Hooks for container setup and teardown -type Hooks struct { - // Prestart is Deprecated. Prestart is a list of hooks to be run before the container process is executed. - // It is called in the Runtime Namespace - Prestart []Hook `json:"prestart,omitempty"` - // CreateRuntime is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called - // It is called in the Runtime Namespace - CreateRuntime []Hook `json:"createRuntime,omitempty"` - // CreateContainer is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called - // It is called in the Container Namespace - CreateContainer []Hook `json:"createContainer,omitempty"` - // StartContainer is a list of hooks to be run after the start operation is called but before the container process is started - // It is called in the Container Namespace - StartContainer []Hook `json:"startContainer,omitempty"` - // Poststart is a list of hooks to be run after the container process is started. - // It is called in the Runtime Namespace - Poststart []Hook `json:"poststart,omitempty"` - // Poststop is a list of hooks to be run after the container process exits. - // It is called in the Runtime Namespace - Poststop []Hook `json:"poststop,omitempty"` -} - -// Linux contains platform-specific configuration for Linux based containers. -type Linux struct { - // UIDMapping specifies user mappings for supporting user namespaces. - UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty"` - // GIDMapping specifies group mappings for supporting user namespaces. - GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty"` - // Sysctl are a set of key value pairs that are set for the container on start - Sysctl map[string]string `json:"sysctl,omitempty"` - // Resources contain cgroup information for handling resource constraints - // for the container - Resources *LinuxResources `json:"resources,omitempty"` - // CgroupsPath specifies the path to cgroups that are created and/or joined by the container. - // The path is expected to be relative to the cgroups mountpoint. - // If resources are specified, the cgroups at CgroupsPath will be updated based on resources. - CgroupsPath string `json:"cgroupsPath,omitempty"` - // Namespaces contains the namespaces that are created and/or joined by the container - Namespaces []LinuxNamespace `json:"namespaces,omitempty"` - // Devices are a list of device nodes that are created for the container - Devices []LinuxDevice `json:"devices,omitempty"` - // Seccomp specifies the seccomp security settings for the container. - Seccomp *LinuxSeccomp `json:"seccomp,omitempty"` - // RootfsPropagation is the rootfs mount propagation mode for the container. - RootfsPropagation string `json:"rootfsPropagation,omitempty"` - // MaskedPaths masks over the provided paths inside the container. - MaskedPaths []string `json:"maskedPaths,omitempty"` - // ReadonlyPaths sets the provided paths as RO inside the container. - ReadonlyPaths []string `json:"readonlyPaths,omitempty"` - // MountLabel specifies the selinux context for the mounts in the container. - MountLabel string `json:"mountLabel,omitempty"` - // IntelRdt contains Intel Resource Director Technology (RDT) information for - // handling resource constraints and monitoring metrics (e.g., L3 cache, memory bandwidth) for the container - IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"` - // Personality contains configuration for the Linux personality syscall - Personality *LinuxPersonality `json:"personality,omitempty"` - // TimeOffsets specifies the offset for supporting time namespaces. - TimeOffsets map[string]LinuxTimeOffset `json:"timeOffsets,omitempty"` -} - -// LinuxNamespace is the configuration for a Linux namespace -type LinuxNamespace struct { - // Type is the type of namespace - Type LinuxNamespaceType `json:"type"` - // Path is a path to an existing namespace persisted on disk that can be joined - // and is of the same type - Path string `json:"path,omitempty"` -} - -// LinuxNamespaceType is one of the Linux namespaces -type LinuxNamespaceType string - -const ( - // PIDNamespace for isolating process IDs - PIDNamespace LinuxNamespaceType = "pid" - // NetworkNamespace for isolating network devices, stacks, ports, etc - NetworkNamespace LinuxNamespaceType = "network" - // MountNamespace for isolating mount points - MountNamespace LinuxNamespaceType = "mount" - // IPCNamespace for isolating System V IPC, POSIX message queues - IPCNamespace LinuxNamespaceType = "ipc" - // UTSNamespace for isolating hostname and NIS domain name - UTSNamespace LinuxNamespaceType = "uts" - // UserNamespace for isolating user and group IDs - UserNamespace LinuxNamespaceType = "user" - // CgroupNamespace for isolating cgroup hierarchies - CgroupNamespace LinuxNamespaceType = "cgroup" - // TimeNamespace for isolating the clocks - TimeNamespace LinuxNamespaceType = "time" -) - -// LinuxIDMapping specifies UID/GID mappings -type LinuxIDMapping struct { - // ContainerID is the starting UID/GID in the container - ContainerID uint32 `json:"containerID"` - // HostID is the starting UID/GID on the host to be mapped to 'ContainerID' - HostID uint32 `json:"hostID"` - // Size is the number of IDs to be mapped - Size uint32 `json:"size"` -} - -// LinuxTimeOffset specifies the offset for Time Namespace -type LinuxTimeOffset struct { - // Secs is the offset of clock (in secs) in the container - Secs int64 `json:"secs,omitempty"` - // Nanosecs is the additional offset for Secs (in nanosecs) - Nanosecs uint32 `json:"nanosecs,omitempty"` -} - -// POSIXRlimit type and restrictions -type POSIXRlimit struct { - // Type of the rlimit to set - Type string `json:"type"` - // Hard is the hard limit for the specified type - Hard uint64 `json:"hard"` - // Soft is the soft limit for the specified type - Soft uint64 `json:"soft"` -} - -// LinuxHugepageLimit structure corresponds to limiting kernel hugepages. -// Default to reservation limits if supported. Otherwise fallback to page fault limits. -type LinuxHugepageLimit struct { - // Pagesize is the hugepage size. - // Format: "B' (e.g. 64KB, 2MB, 1GB, etc.). - Pagesize string `json:"pageSize"` - // Limit is the limit of "hugepagesize" hugetlb reservations (if supported) or usage. - Limit uint64 `json:"limit"` -} - -// LinuxInterfacePriority for network interfaces -type LinuxInterfacePriority struct { - // Name is the name of the network interface - Name string `json:"name"` - // Priority for the interface - Priority uint32 `json:"priority"` -} - -// LinuxBlockIODevice holds major:minor format supported in blkio cgroup -type LinuxBlockIODevice struct { - // Major is the device's major number. - Major int64 `json:"major"` - // Minor is the device's minor number. - Minor int64 `json:"minor"` -} - -// LinuxWeightDevice struct holds a `major:minor weight` pair for weightDevice -type LinuxWeightDevice struct { - LinuxBlockIODevice - // Weight is the bandwidth rate for the device. - Weight *uint16 `json:"weight,omitempty"` - // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only - LeafWeight *uint16 `json:"leafWeight,omitempty"` -} - -// LinuxThrottleDevice struct holds a `major:minor rate_per_second` pair -type LinuxThrottleDevice struct { - LinuxBlockIODevice - // Rate is the IO rate limit per cgroup per device - Rate uint64 `json:"rate"` -} - -// LinuxBlockIO for Linux cgroup 'blkio' resource management -type LinuxBlockIO struct { - // Specifies per cgroup weight - Weight *uint16 `json:"weight,omitempty"` - // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, CFQ scheduler only - LeafWeight *uint16 `json:"leafWeight,omitempty"` - // Weight per cgroup per device, can override BlkioWeight - WeightDevice []LinuxWeightDevice `json:"weightDevice,omitempty"` - // IO read rate limit per cgroup per device, bytes per second - ThrottleReadBpsDevice []LinuxThrottleDevice `json:"throttleReadBpsDevice,omitempty"` - // IO write rate limit per cgroup per device, bytes per second - ThrottleWriteBpsDevice []LinuxThrottleDevice `json:"throttleWriteBpsDevice,omitempty"` - // IO read rate limit per cgroup per device, IO per second - ThrottleReadIOPSDevice []LinuxThrottleDevice `json:"throttleReadIOPSDevice,omitempty"` - // IO write rate limit per cgroup per device, IO per second - ThrottleWriteIOPSDevice []LinuxThrottleDevice `json:"throttleWriteIOPSDevice,omitempty"` -} - -// LinuxMemory for Linux cgroup 'memory' resource management -type LinuxMemory struct { - // Memory limit (in bytes). - Limit *int64 `json:"limit,omitempty"` - // Memory reservation or soft_limit (in bytes). - Reservation *int64 `json:"reservation,omitempty"` - // Total memory limit (memory + swap). - Swap *int64 `json:"swap,omitempty"` - // Kernel memory limit (in bytes). - Kernel *int64 `json:"kernel,omitempty"` - // Kernel memory limit for tcp (in bytes) - KernelTCP *int64 `json:"kernelTCP,omitempty"` - // How aggressive the kernel will swap memory pages. - Swappiness *uint64 `json:"swappiness,omitempty"` - // DisableOOMKiller disables the OOM killer for out of memory conditions - DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"` - // Enables hierarchical memory accounting - UseHierarchy *bool `json:"useHierarchy,omitempty"` - // CheckBeforeUpdate enables checking if a new memory limit is lower - // than the current usage during update, and if so, rejecting the new - // limit. - CheckBeforeUpdate *bool `json:"checkBeforeUpdate,omitempty"` -} - -// LinuxCPU for Linux cgroup 'cpu' resource management -type LinuxCPU struct { - // CPU shares (relative weight (ratio) vs. other cgroups with cpu shares). - Shares *uint64 `json:"shares,omitempty"` - // CPU hardcap limit (in usecs). Allowed cpu time in a given period. - Quota *int64 `json:"quota,omitempty"` - // CPU hardcap burst limit (in usecs). Allowed accumulated cpu time additionally for burst in a - // given period. - Burst *uint64 `json:"burst,omitempty"` - // CPU period to be used for hardcapping (in usecs). - Period *uint64 `json:"period,omitempty"` - // How much time realtime scheduling may use (in usecs). - RealtimeRuntime *int64 `json:"realtimeRuntime,omitempty"` - // CPU period to be used for realtime scheduling (in usecs). - RealtimePeriod *uint64 `json:"realtimePeriod,omitempty"` - // CPUs to use within the cpuset. Default is to use any CPU available. - Cpus string `json:"cpus,omitempty"` - // List of memory nodes in the cpuset. Default is to use any available memory node. - Mems string `json:"mems,omitempty"` - // cgroups are configured with minimum weight, 0: default behavior, 1: SCHED_IDLE. - Idle *int64 `json:"idle,omitempty"` -} - -// LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3) -type LinuxPids struct { - // Maximum number of PIDs. Default is "no limit". - Limit int64 `json:"limit"` -} - -// LinuxNetwork identification and priority configuration -type LinuxNetwork struct { - // Set class identifier for container's network packets - ClassID *uint32 `json:"classID,omitempty"` - // Set priority of network traffic for container - Priorities []LinuxInterfacePriority `json:"priorities,omitempty"` -} - -// LinuxRdma for Linux cgroup 'rdma' resource management (Linux 4.11) -type LinuxRdma struct { - // Maximum number of HCA handles that can be opened. Default is "no limit". - HcaHandles *uint32 `json:"hcaHandles,omitempty"` - // Maximum number of HCA objects that can be created. Default is "no limit". - HcaObjects *uint32 `json:"hcaObjects,omitempty"` -} - -// LinuxResources has container runtime resource constraints -type LinuxResources struct { - // Devices configures the device allowlist. - Devices []LinuxDeviceCgroup `json:"devices,omitempty"` - // Memory restriction configuration - Memory *LinuxMemory `json:"memory,omitempty"` - // CPU resource restriction configuration - CPU *LinuxCPU `json:"cpu,omitempty"` - // Task resource restriction configuration. - Pids *LinuxPids `json:"pids,omitempty"` - // BlockIO restriction configuration - BlockIO *LinuxBlockIO `json:"blockIO,omitempty"` - // Hugetlb limits (in bytes). Default to reservation limits if supported. - HugepageLimits []LinuxHugepageLimit `json:"hugepageLimits,omitempty"` - // Network restriction configuration - Network *LinuxNetwork `json:"network,omitempty"` - // Rdma resource restriction configuration. - // Limits are a set of key value pairs that define RDMA resource limits, - // where the key is device name and value is resource limits. - Rdma map[string]LinuxRdma `json:"rdma,omitempty"` - // Unified resources. - Unified map[string]string `json:"unified,omitempty"` -} - -// LinuxDevice represents the mknod information for a Linux special device file -type LinuxDevice struct { - // Path to the device. - Path string `json:"path"` - // Device type, block, char, etc. - Type string `json:"type"` - // Major is the device's major number. - Major int64 `json:"major"` - // Minor is the device's minor number. - Minor int64 `json:"minor"` - // FileMode permission bits for the device. - FileMode *os.FileMode `json:"fileMode,omitempty"` - // UID of the device. - UID *uint32 `json:"uid,omitempty"` - // Gid of the device. - GID *uint32 `json:"gid,omitempty"` -} - -// LinuxDeviceCgroup represents a device rule for the devices specified to -// the device controller -type LinuxDeviceCgroup struct { - // Allow or deny - Allow bool `json:"allow"` - // Device type, block, char, etc. - Type string `json:"type,omitempty"` - // Major is the device's major number. - Major *int64 `json:"major,omitempty"` - // Minor is the device's minor number. - Minor *int64 `json:"minor,omitempty"` - // Cgroup access permissions format, rwm. - Access string `json:"access,omitempty"` -} - -// LinuxPersonalityDomain refers to a personality domain. -type LinuxPersonalityDomain string - -// LinuxPersonalityFlag refers to an additional personality flag. None are currently defined. -type LinuxPersonalityFlag string - -// Define domain and flags for Personality -const ( - // PerLinux is the standard Linux personality - PerLinux LinuxPersonalityDomain = "LINUX" - // PerLinux32 sets personality to 32 bit - PerLinux32 LinuxPersonalityDomain = "LINUX32" -) - -// LinuxPersonality represents the Linux personality syscall input -type LinuxPersonality struct { - // Domain for the personality - Domain LinuxPersonalityDomain `json:"domain"` - // Additional flags - Flags []LinuxPersonalityFlag `json:"flags,omitempty"` -} - -// Solaris contains platform-specific configuration for Solaris application containers. -type Solaris struct { - // SMF FMRI which should go "online" before we start the container process. - Milestone string `json:"milestone,omitempty"` - // Maximum set of privileges any process in this container can obtain. - LimitPriv string `json:"limitpriv,omitempty"` - // The maximum amount of shared memory allowed for this container. - MaxShmMemory string `json:"maxShmMemory,omitempty"` - // Specification for automatic creation of network resources for this container. - Anet []SolarisAnet `json:"anet,omitempty"` - // Set limit on the amount of CPU time that can be used by container. - CappedCPU *SolarisCappedCPU `json:"cappedCPU,omitempty"` - // The physical and swap caps on the memory that can be used by this container. - CappedMemory *SolarisCappedMemory `json:"cappedMemory,omitempty"` -} - -// SolarisCappedCPU allows users to set limit on the amount of CPU time that can be used by container. -type SolarisCappedCPU struct { - Ncpus string `json:"ncpus,omitempty"` -} - -// SolarisCappedMemory allows users to set the physical and swap caps on the memory that can be used by this container. -type SolarisCappedMemory struct { - Physical string `json:"physical,omitempty"` - Swap string `json:"swap,omitempty"` -} - -// SolarisAnet provides the specification for automatic creation of network resources for this container. -type SolarisAnet struct { - // Specify a name for the automatically created VNIC datalink. - Linkname string `json:"linkname,omitempty"` - // Specify the link over which the VNIC will be created. - Lowerlink string `json:"lowerLink,omitempty"` - // The set of IP addresses that the container can use. - Allowedaddr string `json:"allowedAddress,omitempty"` - // Specifies whether allowedAddress limitation is to be applied to the VNIC. - Configallowedaddr string `json:"configureAllowedAddress,omitempty"` - // The value of the optional default router. - Defrouter string `json:"defrouter,omitempty"` - // Enable one or more types of link protection. - Linkprotection string `json:"linkProtection,omitempty"` - // Set the VNIC's macAddress - Macaddress string `json:"macAddress,omitempty"` -} - -// Windows defines the runtime configuration for Windows based containers, including Hyper-V containers. -type Windows struct { - // LayerFolders contains a list of absolute paths to directories containing image layers. - LayerFolders []string `json:"layerFolders"` - // Devices are the list of devices to be mapped into the container. - Devices []WindowsDevice `json:"devices,omitempty"` - // Resources contains information for handling resource constraints for the container. - Resources *WindowsResources `json:"resources,omitempty"` - // CredentialSpec contains a JSON object describing a group Managed Service Account (gMSA) specification. - CredentialSpec interface{} `json:"credentialSpec,omitempty"` - // Servicing indicates if the container is being started in a mode to apply a Windows Update servicing operation. - Servicing bool `json:"servicing,omitempty"` - // IgnoreFlushesDuringBoot indicates if the container is being started in a mode where disk writes are not flushed during its boot process. - IgnoreFlushesDuringBoot bool `json:"ignoreFlushesDuringBoot,omitempty"` - // HyperV contains information for running a container with Hyper-V isolation. - HyperV *WindowsHyperV `json:"hyperv,omitempty"` - // Network restriction configuration. - Network *WindowsNetwork `json:"network,omitempty"` -} - -// WindowsDevice represents information about a host device to be mapped into the container. -type WindowsDevice struct { - // Device identifier: interface class GUID, etc. - ID string `json:"id"` - // Device identifier type: "class", etc. - IDType string `json:"idType"` -} - -// WindowsResources has container runtime resource constraints for containers running on Windows. -type WindowsResources struct { - // Memory restriction configuration. - Memory *WindowsMemoryResources `json:"memory,omitempty"` - // CPU resource restriction configuration. - CPU *WindowsCPUResources `json:"cpu,omitempty"` - // Storage restriction configuration. - Storage *WindowsStorageResources `json:"storage,omitempty"` -} - -// WindowsMemoryResources contains memory resource management settings. -type WindowsMemoryResources struct { - // Memory limit in bytes. - Limit *uint64 `json:"limit,omitempty"` -} - -// WindowsCPUResources contains CPU resource management settings. -type WindowsCPUResources struct { - // Count is the number of CPUs available to the container. It represents the - // fraction of the configured processor `count` in a container in relation - // to the processors available in the host. The fraction ultimately - // determines the portion of processor cycles that the threads in a - // container can use during each scheduling interval, as the number of - // cycles per 10,000 cycles. - Count *uint64 `json:"count,omitempty"` - // Shares limits the share of processor time given to the container relative - // to other workloads on the processor. The processor `shares` (`weight` at - // the platform level) is a value between 0 and 10000. - Shares *uint16 `json:"shares,omitempty"` - // Maximum determines the portion of processor cycles that the threads in a - // container can use during each scheduling interval, as the number of - // cycles per 10,000 cycles. Set processor `maximum` to a percentage times - // 100. - Maximum *uint16 `json:"maximum,omitempty"` -} - -// WindowsStorageResources contains storage resource management settings. -type WindowsStorageResources struct { - // Specifies maximum Iops for the system drive. - Iops *uint64 `json:"iops,omitempty"` - // Specifies maximum bytes per second for the system drive. - Bps *uint64 `json:"bps,omitempty"` - // Sandbox size specifies the minimum size of the system drive in bytes. - SandboxSize *uint64 `json:"sandboxSize,omitempty"` -} - -// WindowsNetwork contains network settings for Windows containers. -type WindowsNetwork struct { - // List of HNS endpoints that the container should connect to. - EndpointList []string `json:"endpointList,omitempty"` - // Specifies if unqualified DNS name resolution is allowed. - AllowUnqualifiedDNSQuery bool `json:"allowUnqualifiedDNSQuery,omitempty"` - // Comma separated list of DNS suffixes to use for name resolution. - DNSSearchList []string `json:"DNSSearchList,omitempty"` - // Name (ID) of the container that we will share with the network stack. - NetworkSharedContainerName string `json:"networkSharedContainerName,omitempty"` - // name (ID) of the network namespace that will be used for the container. - NetworkNamespace string `json:"networkNamespace,omitempty"` -} - -// WindowsHyperV contains information for configuring a container to run with Hyper-V isolation. -type WindowsHyperV struct { - // UtilityVMPath is an optional path to the image used for the Utility VM. - UtilityVMPath string `json:"utilityVMPath,omitempty"` -} - -// VM contains information for virtual-machine-based containers. -type VM struct { - // Hypervisor specifies hypervisor-related configuration for virtual-machine-based containers. - Hypervisor VMHypervisor `json:"hypervisor,omitempty"` - // Kernel specifies kernel-related configuration for virtual-machine-based containers. - Kernel VMKernel `json:"kernel"` - // Image specifies guest image related configuration for virtual-machine-based containers. - Image VMImage `json:"image,omitempty"` -} - -// VMHypervisor contains information about the hypervisor to use for a virtual machine. -type VMHypervisor struct { - // Path is the host path to the hypervisor used to manage the virtual machine. - Path string `json:"path"` - // Parameters specifies parameters to pass to the hypervisor. - Parameters []string `json:"parameters,omitempty"` -} - -// VMKernel contains information about the kernel to use for a virtual machine. -type VMKernel struct { - // Path is the host path to the kernel used to boot the virtual machine. - Path string `json:"path"` - // Parameters specifies parameters to pass to the kernel. - Parameters []string `json:"parameters,omitempty"` - // InitRD is the host path to an initial ramdisk to be used by the kernel. - InitRD string `json:"initrd,omitempty"` -} - -// VMImage contains information about the virtual machine root image. -type VMImage struct { - // Path is the host path to the root image that the VM kernel would boot into. - Path string `json:"path"` - // Format is the root image format type (e.g. "qcow2", "raw", "vhd", etc). - Format string `json:"format"` -} - -// LinuxSeccomp represents syscall restrictions -type LinuxSeccomp struct { - DefaultAction LinuxSeccompAction `json:"defaultAction"` - DefaultErrnoRet *uint `json:"defaultErrnoRet,omitempty"` - Architectures []Arch `json:"architectures,omitempty"` - Flags []LinuxSeccompFlag `json:"flags,omitempty"` - ListenerPath string `json:"listenerPath,omitempty"` - ListenerMetadata string `json:"listenerMetadata,omitempty"` - Syscalls []LinuxSyscall `json:"syscalls,omitempty"` -} - -// Arch used for additional architectures -type Arch string - -// LinuxSeccompFlag is a flag to pass to seccomp(2). -type LinuxSeccompFlag string - -const ( - // LinuxSeccompFlagLog is a seccomp flag to request all returned - // actions except SECCOMP_RET_ALLOW to be logged. An administrator may - // override this filter flag by preventing specific actions from being - // logged via the /proc/sys/kernel/seccomp/actions_logged file. (since - // Linux 4.14) - LinuxSeccompFlagLog LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_LOG" - - // LinuxSeccompFlagSpecAllow can be used to disable Speculative Store - // Bypass mitigation. (since Linux 4.17) - LinuxSeccompFlagSpecAllow LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_SPEC_ALLOW" - - // LinuxSeccompFlagWaitKillableRecv can be used to switch to the wait - // killable semantics. (since Linux 5.19) - LinuxSeccompFlagWaitKillableRecv LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV" -) - -// Additional architectures permitted to be used for system calls -// By default only the native architecture of the kernel is permitted -const ( - ArchX86 Arch = "SCMP_ARCH_X86" - ArchX86_64 Arch = "SCMP_ARCH_X86_64" - ArchX32 Arch = "SCMP_ARCH_X32" - ArchARM Arch = "SCMP_ARCH_ARM" - ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" - ArchMIPS Arch = "SCMP_ARCH_MIPS" - ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" - ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" - ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" - ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" - ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" - ArchPPC Arch = "SCMP_ARCH_PPC" - ArchPPC64 Arch = "SCMP_ARCH_PPC64" - ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" - ArchS390 Arch = "SCMP_ARCH_S390" - ArchS390X Arch = "SCMP_ARCH_S390X" - ArchPARISC Arch = "SCMP_ARCH_PARISC" - ArchPARISC64 Arch = "SCMP_ARCH_PARISC64" - ArchRISCV64 Arch = "SCMP_ARCH_RISCV64" -) - -// LinuxSeccompAction taken upon Seccomp rule match -type LinuxSeccompAction string - -// Define actions for Seccomp rules -const ( - ActKill LinuxSeccompAction = "SCMP_ACT_KILL" - ActKillProcess LinuxSeccompAction = "SCMP_ACT_KILL_PROCESS" - ActKillThread LinuxSeccompAction = "SCMP_ACT_KILL_THREAD" - ActTrap LinuxSeccompAction = "SCMP_ACT_TRAP" - ActErrno LinuxSeccompAction = "SCMP_ACT_ERRNO" - ActTrace LinuxSeccompAction = "SCMP_ACT_TRACE" - ActAllow LinuxSeccompAction = "SCMP_ACT_ALLOW" - ActLog LinuxSeccompAction = "SCMP_ACT_LOG" - ActNotify LinuxSeccompAction = "SCMP_ACT_NOTIFY" -) - -// LinuxSeccompOperator used to match syscall arguments in Seccomp -type LinuxSeccompOperator string - -// Define operators for syscall arguments in Seccomp -const ( - OpNotEqual LinuxSeccompOperator = "SCMP_CMP_NE" - OpLessThan LinuxSeccompOperator = "SCMP_CMP_LT" - OpLessEqual LinuxSeccompOperator = "SCMP_CMP_LE" - OpEqualTo LinuxSeccompOperator = "SCMP_CMP_EQ" - OpGreaterEqual LinuxSeccompOperator = "SCMP_CMP_GE" - OpGreaterThan LinuxSeccompOperator = "SCMP_CMP_GT" - OpMaskedEqual LinuxSeccompOperator = "SCMP_CMP_MASKED_EQ" -) - -// LinuxSeccompArg used for matching specific syscall arguments in Seccomp -type LinuxSeccompArg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"valueTwo,omitempty"` - Op LinuxSeccompOperator `json:"op"` -} - -// LinuxSyscall is used to match a syscall in Seccomp -type LinuxSyscall struct { - Names []string `json:"names"` - Action LinuxSeccompAction `json:"action"` - ErrnoRet *uint `json:"errnoRet,omitempty"` - Args []LinuxSeccompArg `json:"args,omitempty"` -} - -// LinuxIntelRdt has container runtime resource constraints for Intel RDT CAT and MBA -// features and flags enabling Intel RDT CMT and MBM features. -// Intel RDT features are available in Linux 4.14 and newer kernel versions. -type LinuxIntelRdt struct { - // The identity for RDT Class of Service - ClosID string `json:"closID,omitempty"` - // The schema for L3 cache id and capacity bitmask (CBM) - // Format: "L3:=;=;..." - L3CacheSchema string `json:"l3CacheSchema,omitempty"` - - // The schema of memory bandwidth per L3 cache id - // Format: "MB:=bandwidth0;=bandwidth1;..." - // The unit of memory bandwidth is specified in "percentages" by - // default, and in "MBps" if MBA Software Controller is enabled. - MemBwSchema string `json:"memBwSchema,omitempty"` - - // EnableCMT is the flag to indicate if the Intel RDT CMT is enabled. CMT (Cache Monitoring Technology) supports monitoring of - // the last-level cache (LLC) occupancy for the container. - EnableCMT bool `json:"enableCMT,omitempty"` - - // EnableMBM is the flag to indicate if the Intel RDT MBM is enabled. MBM (Memory Bandwidth Monitoring) supports monitoring of - // total and local memory bandwidth for the container. - EnableMBM bool `json:"enableMBM,omitempty"` -} - -// ZOS contains platform-specific configuration for z/OS based containers. -type ZOS struct { - // Devices are a list of device nodes that are created for the container - Devices []ZOSDevice `json:"devices,omitempty"` -} - -// ZOSDevice represents the mknod information for a z/OS special device file -type ZOSDevice struct { - // Path to the device. - Path string `json:"path"` - // Device type, block, char, etc. - Type string `json:"type"` - // Major is the device's major number. - Major int64 `json:"major"` - // Minor is the device's minor number. - Minor int64 `json:"minor"` - // FileMode permission bits for the device. - FileMode *os.FileMode `json:"fileMode,omitempty"` - // UID of the device. - UID *uint32 `json:"uid,omitempty"` - // Gid of the device. - GID *uint32 `json:"gid,omitempty"` -} - -// LinuxSchedulerPolicy represents different scheduling policies used with the Linux Scheduler -type LinuxSchedulerPolicy string - -const ( - // SchedOther is the default scheduling policy - SchedOther LinuxSchedulerPolicy = "SCHED_OTHER" - // SchedFIFO is the First-In-First-Out scheduling policy - SchedFIFO LinuxSchedulerPolicy = "SCHED_FIFO" - // SchedRR is the Round-Robin scheduling policy - SchedRR LinuxSchedulerPolicy = "SCHED_RR" - // SchedBatch is the Batch scheduling policy - SchedBatch LinuxSchedulerPolicy = "SCHED_BATCH" - // SchedISO is the Isolation scheduling policy - SchedISO LinuxSchedulerPolicy = "SCHED_ISO" - // SchedIdle is the Idle scheduling policy - SchedIdle LinuxSchedulerPolicy = "SCHED_IDLE" - // SchedDeadline is the Deadline scheduling policy - SchedDeadline LinuxSchedulerPolicy = "SCHED_DEADLINE" -) - -// LinuxSchedulerFlag represents the flags used by the Linux Scheduler. -type LinuxSchedulerFlag string - -const ( - // SchedFlagResetOnFork represents the reset on fork scheduling flag - SchedFlagResetOnFork LinuxSchedulerFlag = "SCHED_FLAG_RESET_ON_FORK" - // SchedFlagReclaim represents the reclaim scheduling flag - SchedFlagReclaim LinuxSchedulerFlag = "SCHED_FLAG_RECLAIM" - // SchedFlagDLOverrun represents the deadline overrun scheduling flag - SchedFlagDLOverrun LinuxSchedulerFlag = "SCHED_FLAG_DL_OVERRUN" - // SchedFlagKeepPolicy represents the keep policy scheduling flag - SchedFlagKeepPolicy LinuxSchedulerFlag = "SCHED_FLAG_KEEP_POLICY" - // SchedFlagKeepParams represents the keep parameters scheduling flag - SchedFlagKeepParams LinuxSchedulerFlag = "SCHED_FLAG_KEEP_PARAMS" - // SchedFlagUtilClampMin represents the utilization clamp minimum scheduling flag - SchedFlagUtilClampMin LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MIN" - // SchedFlagUtilClampMin represents the utilization clamp maximum scheduling flag - SchedFlagUtilClampMax LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MAX" -) diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go deleted file mode 100644 index 7c010d4fe7..0000000000 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go +++ /dev/null @@ -1,56 +0,0 @@ -package specs - -// ContainerState represents the state of a container. -type ContainerState string - -const ( - // StateCreating indicates that the container is being created - StateCreating ContainerState = "creating" - - // StateCreated indicates that the runtime has finished the create operation - StateCreated ContainerState = "created" - - // StateRunning indicates that the container process has executed the - // user-specified program but has not exited - StateRunning ContainerState = "running" - - // StateStopped indicates that the container process has exited - StateStopped ContainerState = "stopped" -) - -// State holds information about the runtime state of the container. -type State struct { - // Version is the version of the specification that is supported. - Version string `json:"ociVersion"` - // ID is the container ID - ID string `json:"id"` - // Status is the runtime status of the container. - Status ContainerState `json:"status"` - // Pid is the process ID for the container process. - Pid int `json:"pid,omitempty"` - // Bundle is the path to the container's bundle directory. - Bundle string `json:"bundle"` - // Annotations are key values associated with the container. - Annotations map[string]string `json:"annotations,omitempty"` -} - -const ( - // SeccompFdName is the name of the seccomp notify file descriptor. - SeccompFdName string = "seccompFd" -) - -// ContainerProcessState holds information about the state of a container process. -type ContainerProcessState struct { - // Version is the version of the specification that is supported. - Version string `json:"ociVersion"` - // Fds is a string array containing the names of the file descriptors passed. - // The index of the name in this array corresponds to index of the file - // descriptor in the `SCM_RIGHTS` array. - Fds []string `json:"fds"` - // Pid is the process ID as seen by the runtime. - Pid int `json:"pid"` - // Opaque metadata. - Metadata string `json:"metadata,omitempty"` - // State of the container. - State State `json:"state"` -} diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go deleted file mode 100644 index b3fca349cb..0000000000 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ /dev/null @@ -1,18 +0,0 @@ -package specs - -import "fmt" - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 1 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 1 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/runtime-tools/LICENSE b/vendor/github.com/opencontainers/runtime-tools/LICENSE deleted file mode 100644 index bdc403653e..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 The Linux Foundation. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/config.go b/vendor/github.com/opencontainers/runtime-tools/generate/config.go deleted file mode 100644 index 48f281d286..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/generate/config.go +++ /dev/null @@ -1,194 +0,0 @@ -package generate - -import ( - rspec "github.com/opencontainers/runtime-spec/specs-go" -) - -func (g *Generator) initConfig() { - if g.Config == nil { - g.Config = &rspec.Spec{} - } -} - -func (g *Generator) initConfigProcess() { - g.initConfig() - if g.Config.Process == nil { - g.Config.Process = &rspec.Process{} - } -} - -func (g *Generator) initConfigProcessConsoleSize() { - g.initConfigProcess() - if g.Config.Process.ConsoleSize == nil { - g.Config.Process.ConsoleSize = &rspec.Box{} - } -} - -func (g *Generator) initConfigProcessCapabilities() { - g.initConfigProcess() - if g.Config.Process.Capabilities == nil { - g.Config.Process.Capabilities = &rspec.LinuxCapabilities{} - } -} - -func (g *Generator) initConfigRoot() { - g.initConfig() - if g.Config.Root == nil { - g.Config.Root = &rspec.Root{} - } -} - -func (g *Generator) initConfigAnnotations() { - g.initConfig() - if g.Config.Annotations == nil { - g.Config.Annotations = make(map[string]string) - } -} - -func (g *Generator) initConfigHooks() { - g.initConfig() - if g.Config.Hooks == nil { - g.Config.Hooks = &rspec.Hooks{} - } -} - -func (g *Generator) initConfigLinux() { - g.initConfig() - if g.Config.Linux == nil { - g.Config.Linux = &rspec.Linux{} - } -} - -func (g *Generator) initConfigLinuxIntelRdt() { - g.initConfigLinux() - if g.Config.Linux.IntelRdt == nil { - g.Config.Linux.IntelRdt = &rspec.LinuxIntelRdt{} - } -} - -func (g *Generator) initConfigLinuxSysctl() { - g.initConfigLinux() - if g.Config.Linux.Sysctl == nil { - g.Config.Linux.Sysctl = make(map[string]string) - } -} - -func (g *Generator) initConfigLinuxSeccomp() { - g.initConfigLinux() - if g.Config.Linux.Seccomp == nil { - g.Config.Linux.Seccomp = &rspec.LinuxSeccomp{} - } -} - -func (g *Generator) initConfigLinuxResources() { - g.initConfigLinux() - if g.Config.Linux.Resources == nil { - g.Config.Linux.Resources = &rspec.LinuxResources{} - } -} - -func (g *Generator) initConfigLinuxResourcesBlockIO() { - g.initConfigLinuxResources() - if g.Config.Linux.Resources.BlockIO == nil { - g.Config.Linux.Resources.BlockIO = &rspec.LinuxBlockIO{} - } -} - -// InitConfigLinuxResourcesCPU initializes CPU of Linux resources -func (g *Generator) InitConfigLinuxResourcesCPU() { - g.initConfigLinuxResources() - if g.Config.Linux.Resources.CPU == nil { - g.Config.Linux.Resources.CPU = &rspec.LinuxCPU{} - } -} - -func (g *Generator) initConfigLinuxResourcesMemory() { - g.initConfigLinuxResources() - if g.Config.Linux.Resources.Memory == nil { - g.Config.Linux.Resources.Memory = &rspec.LinuxMemory{} - } -} - -func (g *Generator) initConfigLinuxResourcesNetwork() { - g.initConfigLinuxResources() - if g.Config.Linux.Resources.Network == nil { - g.Config.Linux.Resources.Network = &rspec.LinuxNetwork{} - } -} - -func (g *Generator) initConfigLinuxResourcesPids() { - g.initConfigLinuxResources() - if g.Config.Linux.Resources.Pids == nil { - g.Config.Linux.Resources.Pids = &rspec.LinuxPids{} - } -} - -func (g *Generator) initConfigLinuxResourcesUnified() { - g.initConfigLinuxResources() - if g.Config.Linux.Resources.Unified == nil { - g.Config.Linux.Resources.Unified = map[string]string{} - } -} - -func (g *Generator) initConfigSolaris() { - g.initConfig() - if g.Config.Solaris == nil { - g.Config.Solaris = &rspec.Solaris{} - } -} - -func (g *Generator) initConfigSolarisCappedCPU() { - g.initConfigSolaris() - if g.Config.Solaris.CappedCPU == nil { - g.Config.Solaris.CappedCPU = &rspec.SolarisCappedCPU{} - } -} - -func (g *Generator) initConfigSolarisCappedMemory() { - g.initConfigSolaris() - if g.Config.Solaris.CappedMemory == nil { - g.Config.Solaris.CappedMemory = &rspec.SolarisCappedMemory{} - } -} - -func (g *Generator) initConfigWindows() { - g.initConfig() - if g.Config.Windows == nil { - g.Config.Windows = &rspec.Windows{} - } -} - -func (g *Generator) initConfigWindowsNetwork() { - g.initConfigWindows() - if g.Config.Windows.Network == nil { - g.Config.Windows.Network = &rspec.WindowsNetwork{} - } -} - -func (g *Generator) initConfigWindowsHyperV() { - g.initConfigWindows() - if g.Config.Windows.HyperV == nil { - g.Config.Windows.HyperV = &rspec.WindowsHyperV{} - } -} - -func (g *Generator) initConfigWindowsResources() { - g.initConfigWindows() - if g.Config.Windows.Resources == nil { - g.Config.Windows.Resources = &rspec.WindowsResources{} - } -} - -func (g *Generator) initConfigWindowsResourcesMemory() { - g.initConfigWindowsResources() - if g.Config.Windows.Resources.Memory == nil { - g.Config.Windows.Resources.Memory = &rspec.WindowsMemoryResources{} - } -} - -func (g *Generator) initConfigVM() { - g.initConfig() - if g.Config.VM == nil { - g.Config.VM = &rspec.VM{} - } -} diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go deleted file mode 100644 index 4d66b320dc..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go +++ /dev/null @@ -1,1874 +0,0 @@ -// Package generate implements functions generating container config files. -package generate - -import ( - "encoding/json" - "fmt" - "io" - "os" - "strings" - - rspec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/opencontainers/runtime-tools/generate/seccomp" - capsCheck "github.com/opencontainers/runtime-tools/validate/capabilities" - "github.com/syndtr/gocapability/capability" -) - -var ( - // Namespaces include the names of supported namespaces. - Namespaces = []string{"network", "pid", "mount", "ipc", "uts", "user", "cgroup"} - - // we don't care about order...and this is way faster... - removeFunc = func(s []string, i int) []string { - s[i] = s[len(s)-1] - return s[:len(s)-1] - } -) - -// Generator represents a generator for a container config. -type Generator struct { - Config *rspec.Spec - HostSpecific bool - // This is used to keep a cache of the ENVs added to improve - // performance when adding a huge number of ENV variables - envMap map[string]int -} - -// ExportOptions have toggles for exporting only certain parts of the specification -type ExportOptions struct { - Seccomp bool // seccomp toggles if only seccomp should be exported -} - -// New creates a configuration Generator with the default -// configuration for the target operating system. -func New(os string) (generator Generator, err error) { - if os != "linux" && os != "solaris" && os != "windows" && os != "freebsd" { - return generator, fmt.Errorf("no defaults configured for %s", os) - } - - config := rspec.Spec{ - Version: rspec.Version, - Hostname: "mrsdalloway", - } - - if os == "windows" { - config.Process = &rspec.Process{ - Args: []string{ - "cmd", - }, - Cwd: `C:\`, - } - config.Windows = &rspec.Windows{} - } else { - config.Root = &rspec.Root{ - Path: "rootfs", - Readonly: false, - } - config.Process = &rspec.Process{ - Terminal: false, - Args: []string{ - "sh", - }, - } - } - - if os == "linux" || os == "solaris" || os == "freebsd" { - config.Process.User = rspec.User{} - config.Process.Env = []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "TERM=xterm", - } - config.Process.Cwd = "/" - config.Process.Rlimits = []rspec.POSIXRlimit{ - { - Type: "RLIMIT_NOFILE", - Hard: uint64(1024), - Soft: uint64(1024), - }, - } - } - - if os == "linux" { - config.Process.Capabilities = &rspec.LinuxCapabilities{ - Bounding: []string{ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE", - }, - Permitted: []string{ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE", - }, - Inheritable: []string{ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE", - }, - Effective: []string{ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE", - }, - Ambient: []string{ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE", - }, - } - config.Mounts = []rspec.Mount{ - { - Destination: "/proc", - Type: "proc", - Source: "proc", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/dev", - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nosuid", "noexec", "strictatime", "mode=755", "size=65536k"}, - }, - { - Destination: "/dev/pts", - Type: "devpts", - Source: "devpts", - Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, - }, - { - Destination: "/dev/shm", - Type: "tmpfs", - Source: "shm", - Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, - }, - { - Destination: "/dev/mqueue", - Type: "mqueue", - Source: "mqueue", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/sys", - Type: "sysfs", - Source: "sysfs", - Options: []string{"nosuid", "noexec", "nodev", "ro"}, - }, - } - config.Linux = &rspec.Linux{ - Resources: &rspec.LinuxResources{ - Devices: []rspec.LinuxDeviceCgroup{ - { - Allow: false, - Access: "rwm", - }, - }, - }, - Namespaces: []rspec.LinuxNamespace{ - { - Type: "pid", - }, - { - Type: "network", - }, - { - Type: "ipc", - }, - { - Type: "uts", - }, - { - Type: "mount", - }, - }, - Seccomp: seccomp.DefaultProfile(&config), - } - } else if os == "freebsd" { - config.Mounts = []rspec.Mount{ - { - Destination: "/dev", - Type: "devfs", - Source: "devfs", - Options: []string{"ruleset=4"}, - }, - { - Destination: "/dev/fd", - Type: "fdescfs", - Source: "fdesc", - Options: []string{}, - }, - } - } - - envCache := map[string]int{} - if config.Process != nil { - envCache = createEnvCacheMap(config.Process.Env) - } - - return Generator{Config: &config, envMap: envCache}, nil -} - -// NewFromSpec creates a configuration Generator from a given -// configuration. -func NewFromSpec(config *rspec.Spec) Generator { - envCache := map[string]int{} - if config != nil && config.Process != nil { - envCache = createEnvCacheMap(config.Process.Env) - } - - return Generator{ - Config: config, - envMap: envCache, - } -} - -// NewFromFile loads the template specified in a file into a -// configuration Generator. -func NewFromFile(path string) (Generator, error) { - cf, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return Generator{}, fmt.Errorf("template configuration at %s not found", path) - } - return Generator{}, err - } - defer cf.Close() - - return NewFromTemplate(cf) -} - -// NewFromTemplate loads the template from io.Reader into a -// configuration Generator. -func NewFromTemplate(r io.Reader) (Generator, error) { - var config rspec.Spec - if err := json.NewDecoder(r).Decode(&config); err != nil { - return Generator{}, err - } - - envCache := map[string]int{} - if config.Process != nil { - envCache = createEnvCacheMap(config.Process.Env) - } - - return Generator{ - Config: &config, - envMap: envCache, - }, nil -} - -// createEnvCacheMap creates a hash map with the ENV variables given by the config -func createEnvCacheMap(env []string) map[string]int { - envMap := make(map[string]int, len(env)) - for i, val := range env { - envMap[val] = i - } - return envMap -} - -// SetSpec sets the configuration in the Generator g. -// -// Deprecated: Replace with: -// -// Use generator.Config = config -func (g *Generator) SetSpec(config *rspec.Spec) { - g.Config = config -} - -// Spec gets the configuration from the Generator g. -// -// Deprecated: Replace with generator.Config. -func (g *Generator) Spec() *rspec.Spec { - return g.Config -} - -// Save writes the configuration into w. -func (g *Generator) Save(w io.Writer, exportOpts ExportOptions) (err error) { - var data []byte - - if g.Config.Linux != nil { - buf, err := json.Marshal(g.Config.Linux) - if err != nil { - return err - } - if string(buf) == "{}" { - g.Config.Linux = nil - } - } - - if exportOpts.Seccomp { - data, err = json.MarshalIndent(g.Config.Linux.Seccomp, "", "\t") - } else { - data, err = json.MarshalIndent(g.Config, "", "\t") - } - if err != nil { - return err - } - - _, err = w.Write(data) - if err != nil { - return err - } - - return nil -} - -// SaveToFile writes the configuration into a file. -func (g *Generator) SaveToFile(path string, exportOpts ExportOptions) error { - f, err := os.Create(path) - if err != nil { - return err - } - defer f.Close() - return g.Save(f, exportOpts) -} - -// SetVersion sets g.Config.Version. -func (g *Generator) SetVersion(version string) { - g.initConfig() - g.Config.Version = version -} - -// SetRootPath sets g.Config.Root.Path. -func (g *Generator) SetRootPath(path string) { - g.initConfigRoot() - g.Config.Root.Path = path -} - -// SetRootReadonly sets g.Config.Root.Readonly. -func (g *Generator) SetRootReadonly(b bool) { - g.initConfigRoot() - g.Config.Root.Readonly = b -} - -// SetHostname sets g.Config.Hostname. -func (g *Generator) SetHostname(s string) { - g.initConfig() - g.Config.Hostname = s -} - -// SetOCIVersion sets g.Config.Version. -func (g *Generator) SetOCIVersion(s string) { - g.initConfig() - g.Config.Version = s -} - -// ClearAnnotations clears g.Config.Annotations. -func (g *Generator) ClearAnnotations() { - if g.Config == nil { - return - } - g.Config.Annotations = make(map[string]string) -} - -// AddAnnotation adds an annotation into g.Config.Annotations. -func (g *Generator) AddAnnotation(key, value string) { - g.initConfigAnnotations() - g.Config.Annotations[key] = value -} - -// RemoveAnnotation remove an annotation from g.Config.Annotations. -func (g *Generator) RemoveAnnotation(key string) { - if g.Config == nil || g.Config.Annotations == nil { - return - } - delete(g.Config.Annotations, key) -} - -// RemoveHostname removes g.Config.Hostname, setting it to an empty string. -func (g *Generator) RemoveHostname() { - if g.Config == nil { - return - } - g.Config.Hostname = "" -} - -// SetProcessConsoleSize sets g.Config.Process.ConsoleSize. -func (g *Generator) SetProcessConsoleSize(width, height uint) { - g.initConfigProcessConsoleSize() - g.Config.Process.ConsoleSize.Width = width - g.Config.Process.ConsoleSize.Height = height -} - -// SetProcessUID sets g.Config.Process.User.UID. -func (g *Generator) SetProcessUID(uid uint32) { - g.initConfigProcess() - g.Config.Process.User.UID = uid -} - -// SetProcessUsername sets g.Config.Process.User.Username. -func (g *Generator) SetProcessUsername(username string) { - g.initConfigProcess() - g.Config.Process.User.Username = username -} - -// SetProcessUmask sets g.Config.Process.User.Umask. -func (g *Generator) SetProcessUmask(umask uint32) { - g.initConfigProcess() - u := umask - g.Config.Process.User.Umask = &u -} - -// SetProcessGID sets g.Config.Process.User.GID. -func (g *Generator) SetProcessGID(gid uint32) { - g.initConfigProcess() - g.Config.Process.User.GID = gid -} - -// SetProcessCwd sets g.Config.Process.Cwd. -func (g *Generator) SetProcessCwd(cwd string) { - g.initConfigProcess() - g.Config.Process.Cwd = cwd -} - -// SetProcessNoNewPrivileges sets g.Config.Process.NoNewPrivileges. -func (g *Generator) SetProcessNoNewPrivileges(b bool) { - g.initConfigProcess() - g.Config.Process.NoNewPrivileges = b -} - -// SetProcessTerminal sets g.Config.Process.Terminal. -func (g *Generator) SetProcessTerminal(b bool) { - g.initConfigProcess() - g.Config.Process.Terminal = b -} - -// SetProcessApparmorProfile sets g.Config.Process.ApparmorProfile. -func (g *Generator) SetProcessApparmorProfile(prof string) { - g.initConfigProcess() - g.Config.Process.ApparmorProfile = prof -} - -// SetProcessArgs sets g.Config.Process.Args. -func (g *Generator) SetProcessArgs(args []string) { - g.initConfigProcess() - g.Config.Process.Args = args -} - -// ClearProcessEnv clears g.Config.Process.Env. -func (g *Generator) ClearProcessEnv() { - if g.Config == nil || g.Config.Process == nil { - return - } - g.Config.Process.Env = []string{} - // Clear out the env cache map as well - g.envMap = map[string]int{} -} - -// AddProcessEnv adds name=value into g.Config.Process.Env, or replaces an -// existing entry with the given name. -func (g *Generator) AddProcessEnv(name, value string) { - if name == "" { - return - } - - g.initConfigProcess() - g.addEnv(fmt.Sprintf("%s=%s", name, value), name) -} - -// AddMultipleProcessEnv adds multiple name=value into g.Config.Process.Env, or replaces -// existing entries with the given name. -func (g *Generator) AddMultipleProcessEnv(envs []string) { - g.initConfigProcess() - - for _, val := range envs { - split := strings.SplitN(val, "=", 2) - g.addEnv(val, split[0]) - } -} - -// addEnv looks through adds ENV to the Process and checks envMap for -// any duplicates -// This is called by both AddMultipleProcessEnv and AddProcessEnv -func (g *Generator) addEnv(env, key string) { - if idx, ok := g.envMap[key]; ok { - // The ENV exists in the cache, so change its value in g.Config.Process.Env - g.Config.Process.Env[idx] = env - } else { - // else the env doesn't exist, so add it and add it's index to g.envMap - g.Config.Process.Env = append(g.Config.Process.Env, env) - g.envMap[key] = len(g.Config.Process.Env) - 1 - } -} - -// AddProcessRlimits adds rlimit into g.Config.Process.Rlimits. -func (g *Generator) AddProcessRlimits(rType string, rHard uint64, rSoft uint64) { - g.initConfigProcess() - for i, rlimit := range g.Config.Process.Rlimits { - if rlimit.Type == rType { - g.Config.Process.Rlimits[i].Hard = rHard - g.Config.Process.Rlimits[i].Soft = rSoft - return - } - } - - newRlimit := rspec.POSIXRlimit{ - Type: rType, - Hard: rHard, - Soft: rSoft, - } - g.Config.Process.Rlimits = append(g.Config.Process.Rlimits, newRlimit) -} - -// RemoveProcessRlimits removes a rlimit from g.Config.Process.Rlimits. -func (g *Generator) RemoveProcessRlimits(rType string) { - if g.Config == nil || g.Config.Process == nil { - return - } - for i, rlimit := range g.Config.Process.Rlimits { - if rlimit.Type == rType { - g.Config.Process.Rlimits = append(g.Config.Process.Rlimits[:i], g.Config.Process.Rlimits[i+1:]...) - return - } - } -} - -// ClearProcessRlimits clear g.Config.Process.Rlimits. -func (g *Generator) ClearProcessRlimits() { - if g.Config == nil || g.Config.Process == nil { - return - } - g.Config.Process.Rlimits = []rspec.POSIXRlimit{} -} - -// ClearProcessAdditionalGids clear g.Config.Process.AdditionalGids. -func (g *Generator) ClearProcessAdditionalGids() { - if g.Config == nil || g.Config.Process == nil { - return - } - g.Config.Process.User.AdditionalGids = []uint32{} -} - -// AddProcessAdditionalGid adds an additional gid into g.Config.Process.AdditionalGids. -func (g *Generator) AddProcessAdditionalGid(gid uint32) { - g.initConfigProcess() - for _, group := range g.Config.Process.User.AdditionalGids { - if group == gid { - return - } - } - g.Config.Process.User.AdditionalGids = append(g.Config.Process.User.AdditionalGids, gid) -} - -// SetProcessSelinuxLabel sets g.Config.Process.SelinuxLabel. -func (g *Generator) SetProcessSelinuxLabel(label string) { - g.initConfigProcess() - g.Config.Process.SelinuxLabel = label -} - -// SetLinuxCgroupsPath sets g.Config.Linux.CgroupsPath. -func (g *Generator) SetLinuxCgroupsPath(path string) { - g.initConfigLinux() - g.Config.Linux.CgroupsPath = path -} - -// SetLinuxIntelRdtClosID sets g.Config.Linux.IntelRdt.ClosID -func (g *Generator) SetLinuxIntelRdtClosID(clos string) { - g.initConfigLinuxIntelRdt() - g.Config.Linux.IntelRdt.ClosID = clos -} - -// SetLinuxIntelRdtL3CacheSchema sets g.Config.Linux.IntelRdt.L3CacheSchema -func (g *Generator) SetLinuxIntelRdtL3CacheSchema(schema string) { - g.initConfigLinuxIntelRdt() - g.Config.Linux.IntelRdt.L3CacheSchema = schema -} - -// SetLinuxMountLabel sets g.Config.Linux.MountLabel. -func (g *Generator) SetLinuxMountLabel(label string) { - g.initConfigLinux() - g.Config.Linux.MountLabel = label -} - -// SetProcessOOMScoreAdj sets g.Config.Process.OOMScoreAdj. -func (g *Generator) SetProcessOOMScoreAdj(adj int) { - g.initConfigProcess() - g.Config.Process.OOMScoreAdj = &adj -} - -// SetLinuxResourcesBlockIOLeafWeight sets g.Config.Linux.Resources.BlockIO.LeafWeight. -func (g *Generator) SetLinuxResourcesBlockIOLeafWeight(weight uint16) { - g.initConfigLinuxResourcesBlockIO() - g.Config.Linux.Resources.BlockIO.LeafWeight = &weight -} - -// AddLinuxResourcesBlockIOLeafWeightDevice adds or sets g.Config.Linux.Resources.BlockIO.WeightDevice.LeafWeight. -func (g *Generator) AddLinuxResourcesBlockIOLeafWeightDevice(major int64, minor int64, weight uint16) { - g.initConfigLinuxResourcesBlockIO() - for i, weightDevice := range g.Config.Linux.Resources.BlockIO.WeightDevice { - if weightDevice.Major == major && weightDevice.Minor == minor { - g.Config.Linux.Resources.BlockIO.WeightDevice[i].LeafWeight = &weight - return - } - } - weightDevice := new(rspec.LinuxWeightDevice) - weightDevice.Major = major - weightDevice.Minor = minor - weightDevice.LeafWeight = &weight - g.Config.Linux.Resources.BlockIO.WeightDevice = append(g.Config.Linux.Resources.BlockIO.WeightDevice, *weightDevice) -} - -// DropLinuxResourcesBlockIOLeafWeightDevice drops a item form g.Config.Linux.Resources.BlockIO.WeightDevice.LeafWeight -func (g *Generator) DropLinuxResourcesBlockIOLeafWeightDevice(major int64, minor int64) { - if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Resources == nil || g.Config.Linux.Resources.BlockIO == nil { - return - } - - for i, weightDevice := range g.Config.Linux.Resources.BlockIO.WeightDevice { - if weightDevice.Major == major && weightDevice.Minor == minor { - if weightDevice.Weight != nil { - newWeightDevice := new(rspec.LinuxWeightDevice) - newWeightDevice.Major = major - newWeightDevice.Minor = minor - newWeightDevice.Weight = weightDevice.Weight - g.Config.Linux.Resources.BlockIO.WeightDevice[i] = *newWeightDevice - } else { - g.Config.Linux.Resources.BlockIO.WeightDevice = append(g.Config.Linux.Resources.BlockIO.WeightDevice[:i], g.Config.Linux.Resources.BlockIO.WeightDevice[i+1:]...) - } - return - } - } -} - -// SetLinuxResourcesBlockIOWeight sets g.Config.Linux.Resources.BlockIO.Weight. -func (g *Generator) SetLinuxResourcesBlockIOWeight(weight uint16) { - g.initConfigLinuxResourcesBlockIO() - g.Config.Linux.Resources.BlockIO.Weight = &weight -} - -// AddLinuxResourcesBlockIOWeightDevice adds or sets g.Config.Linux.Resources.BlockIO.WeightDevice.Weight. -func (g *Generator) AddLinuxResourcesBlockIOWeightDevice(major int64, minor int64, weight uint16) { - g.initConfigLinuxResourcesBlockIO() - for i, weightDevice := range g.Config.Linux.Resources.BlockIO.WeightDevice { - if weightDevice.Major == major && weightDevice.Minor == minor { - g.Config.Linux.Resources.BlockIO.WeightDevice[i].Weight = &weight - return - } - } - weightDevice := new(rspec.LinuxWeightDevice) - weightDevice.Major = major - weightDevice.Minor = minor - weightDevice.Weight = &weight - g.Config.Linux.Resources.BlockIO.WeightDevice = append(g.Config.Linux.Resources.BlockIO.WeightDevice, *weightDevice) -} - -// DropLinuxResourcesBlockIOWeightDevice drops a item form g.Config.Linux.Resources.BlockIO.WeightDevice.Weight -func (g *Generator) DropLinuxResourcesBlockIOWeightDevice(major int64, minor int64) { - if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Resources == nil || g.Config.Linux.Resources.BlockIO == nil { - return - } - - for i, weightDevice := range g.Config.Linux.Resources.BlockIO.WeightDevice { - if weightDevice.Major == major && weightDevice.Minor == minor { - if weightDevice.LeafWeight != nil { - newWeightDevice := new(rspec.LinuxWeightDevice) - newWeightDevice.Major = major - newWeightDevice.Minor = minor - newWeightDevice.LeafWeight = weightDevice.LeafWeight - g.Config.Linux.Resources.BlockIO.WeightDevice[i] = *newWeightDevice - } else { - g.Config.Linux.Resources.BlockIO.WeightDevice = append(g.Config.Linux.Resources.BlockIO.WeightDevice[:i], g.Config.Linux.Resources.BlockIO.WeightDevice[i+1:]...) - } - return - } - } -} - -// AddLinuxResourcesBlockIOThrottleReadBpsDevice adds or sets g.Config.Linux.Resources.BlockIO.ThrottleReadBpsDevice. -func (g *Generator) AddLinuxResourcesBlockIOThrottleReadBpsDevice(major int64, minor int64, rate uint64) { - g.initConfigLinuxResourcesBlockIO() - throttleDevices := addOrReplaceBlockIOThrottleDevice(g.Config.Linux.Resources.BlockIO.ThrottleReadBpsDevice, major, minor, rate) - g.Config.Linux.Resources.BlockIO.ThrottleReadBpsDevice = throttleDevices -} - -// DropLinuxResourcesBlockIOThrottleReadBpsDevice drops a item from g.Config.Linux.Resources.BlockIO.ThrottleReadBpsDevice. -func (g *Generator) DropLinuxResourcesBlockIOThrottleReadBpsDevice(major int64, minor int64) { - if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Resources == nil || g.Config.Linux.Resources.BlockIO == nil { - return - } - - throttleDevices := dropBlockIOThrottleDevice(g.Config.Linux.Resources.BlockIO.ThrottleReadBpsDevice, major, minor) - g.Config.Linux.Resources.BlockIO.ThrottleReadBpsDevice = throttleDevices -} - -// AddLinuxResourcesBlockIOThrottleReadIOPSDevice adds or sets g.Config.Linux.Resources.BlockIO.ThrottleReadIOPSDevice. -func (g *Generator) AddLinuxResourcesBlockIOThrottleReadIOPSDevice(major int64, minor int64, rate uint64) { - g.initConfigLinuxResourcesBlockIO() - throttleDevices := addOrReplaceBlockIOThrottleDevice(g.Config.Linux.Resources.BlockIO.ThrottleReadIOPSDevice, major, minor, rate) - g.Config.Linux.Resources.BlockIO.ThrottleReadIOPSDevice = throttleDevices -} - -// DropLinuxResourcesBlockIOThrottleReadIOPSDevice drops a item from g.Config.Linux.Resources.BlockIO.ThrottleReadIOPSDevice. -func (g *Generator) DropLinuxResourcesBlockIOThrottleReadIOPSDevice(major int64, minor int64) { - if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Resources == nil || g.Config.Linux.Resources.BlockIO == nil { - return - } - - throttleDevices := dropBlockIOThrottleDevice(g.Config.Linux.Resources.BlockIO.ThrottleReadIOPSDevice, major, minor) - g.Config.Linux.Resources.BlockIO.ThrottleReadIOPSDevice = throttleDevices -} - -// AddLinuxResourcesBlockIOThrottleWriteBpsDevice adds or sets g.Config.Linux.Resources.BlockIO.ThrottleWriteBpsDevice. -func (g *Generator) AddLinuxResourcesBlockIOThrottleWriteBpsDevice(major int64, minor int64, rate uint64) { - g.initConfigLinuxResourcesBlockIO() - throttleDevices := addOrReplaceBlockIOThrottleDevice(g.Config.Linux.Resources.BlockIO.ThrottleWriteBpsDevice, major, minor, rate) - g.Config.Linux.Resources.BlockIO.ThrottleWriteBpsDevice = throttleDevices -} - -// DropLinuxResourcesBlockIOThrottleWriteBpsDevice drops a item from g.Config.Linux.Resources.BlockIO.ThrottleWriteBpsDevice. -func (g *Generator) DropLinuxResourcesBlockIOThrottleWriteBpsDevice(major int64, minor int64) { - if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Resources == nil || g.Config.Linux.Resources.BlockIO == nil { - return - } - - throttleDevices := dropBlockIOThrottleDevice(g.Config.Linux.Resources.BlockIO.ThrottleWriteBpsDevice, major, minor) - g.Config.Linux.Resources.BlockIO.ThrottleWriteBpsDevice = throttleDevices -} - -// AddLinuxResourcesBlockIOThrottleWriteIOPSDevice adds or sets g.Config.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice. -func (g *Generator) AddLinuxResourcesBlockIOThrottleWriteIOPSDevice(major int64, minor int64, rate uint64) { - g.initConfigLinuxResourcesBlockIO() - throttleDevices := addOrReplaceBlockIOThrottleDevice(g.Config.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice, major, minor, rate) - g.Config.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice = throttleDevices -} - -// DropLinuxResourcesBlockIOThrottleWriteIOPSDevice drops a item from g.Config.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice. -func (g *Generator) DropLinuxResourcesBlockIOThrottleWriteIOPSDevice(major int64, minor int64) { - if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Resources == nil || g.Config.Linux.Resources.BlockIO == nil { - return - } - - throttleDevices := dropBlockIOThrottleDevice(g.Config.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice, major, minor) - g.Config.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice = throttleDevices -} - -// SetLinuxResourcesCPUShares sets g.Config.Linux.Resources.CPU.Shares. -func (g *Generator) SetLinuxResourcesCPUShares(shares uint64) { - g.InitConfigLinuxResourcesCPU() - g.Config.Linux.Resources.CPU.Shares = &shares -} - -// SetLinuxResourcesCPUQuota sets g.Config.Linux.Resources.CPU.Quota. -func (g *Generator) SetLinuxResourcesCPUQuota(quota int64) { - g.InitConfigLinuxResourcesCPU() - g.Config.Linux.Resources.CPU.Quota = "a -} - -// SetLinuxResourcesCPUPeriod sets g.Config.Linux.Resources.CPU.Period. -func (g *Generator) SetLinuxResourcesCPUPeriod(period uint64) { - g.InitConfigLinuxResourcesCPU() - g.Config.Linux.Resources.CPU.Period = &period -} - -// SetLinuxResourcesCPURealtimeRuntime sets g.Config.Linux.Resources.CPU.RealtimeRuntime. -func (g *Generator) SetLinuxResourcesCPURealtimeRuntime(time int64) { - g.InitConfigLinuxResourcesCPU() - g.Config.Linux.Resources.CPU.RealtimeRuntime = &time -} - -// SetLinuxResourcesCPURealtimePeriod sets g.Config.Linux.Resources.CPU.RealtimePeriod. -func (g *Generator) SetLinuxResourcesCPURealtimePeriod(period uint64) { - g.InitConfigLinuxResourcesCPU() - g.Config.Linux.Resources.CPU.RealtimePeriod = &period -} - -// SetLinuxResourcesCPUCpus sets g.Config.Linux.Resources.CPU.Cpus. -func (g *Generator) SetLinuxResourcesCPUCpus(cpus string) { - g.InitConfigLinuxResourcesCPU() - g.Config.Linux.Resources.CPU.Cpus = cpus -} - -// SetLinuxResourcesCPUMems sets g.Config.Linux.Resources.CPU.Mems. -func (g *Generator) SetLinuxResourcesCPUMems(mems string) { - g.InitConfigLinuxResourcesCPU() - g.Config.Linux.Resources.CPU.Mems = mems -} - -// AddLinuxResourcesHugepageLimit adds or sets g.Config.Linux.Resources.HugepageLimits. -func (g *Generator) AddLinuxResourcesHugepageLimit(pageSize string, limit uint64) { - hugepageLimit := rspec.LinuxHugepageLimit{ - Pagesize: pageSize, - Limit: limit, - } - - g.initConfigLinuxResources() - for i, pageLimit := range g.Config.Linux.Resources.HugepageLimits { - if pageLimit.Pagesize == pageSize { - g.Config.Linux.Resources.HugepageLimits[i].Limit = limit - return - } - } - g.Config.Linux.Resources.HugepageLimits = append(g.Config.Linux.Resources.HugepageLimits, hugepageLimit) -} - -// DropLinuxResourcesHugepageLimit drops a hugepage limit from g.Config.Linux.Resources.HugepageLimits. -func (g *Generator) DropLinuxResourcesHugepageLimit(pageSize string) { - if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Resources == nil { - return - } - - for i, pageLimit := range g.Config.Linux.Resources.HugepageLimits { - if pageLimit.Pagesize == pageSize { - g.Config.Linux.Resources.HugepageLimits = append(g.Config.Linux.Resources.HugepageLimits[:i], g.Config.Linux.Resources.HugepageLimits[i+1:]...) - return - } - } -} - -// AddLinuxResourcesUnified sets the g.Config.Linux.Resources.Unified -func (g *Generator) SetLinuxResourcesUnified(unified map[string]string) { - g.initConfigLinuxResourcesUnified() - for k, v := range unified { - g.Config.Linux.Resources.Unified[k] = v - } -} - -// AddLinuxResourcesUnified adds or updates the key-value pair from g.Config.Linux.Resources.Unified -func (g *Generator) AddLinuxResourcesUnified(key, val string) { - g.initConfigLinuxResourcesUnified() - g.Config.Linux.Resources.Unified[key] = val -} - -// DropLinuxResourcesUnified drops a key-value pair from g.Config.Linux.Resources.Unified -func (g *Generator) DropLinuxResourcesUnified(key string) { - if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Resources == nil || g.Config.Linux.Resources.Unified == nil { - return - } - delete(g.Config.Linux.Resources.Unified, key) -} - -// SetLinuxResourcesMemoryLimit sets g.Config.Linux.Resources.Memory.Limit. -func (g *Generator) SetLinuxResourcesMemoryLimit(limit int64) { - g.initConfigLinuxResourcesMemory() - g.Config.Linux.Resources.Memory.Limit = &limit -} - -// SetLinuxResourcesMemoryReservation sets g.Config.Linux.Resources.Memory.Reservation. -func (g *Generator) SetLinuxResourcesMemoryReservation(reservation int64) { - g.initConfigLinuxResourcesMemory() - g.Config.Linux.Resources.Memory.Reservation = &reservation -} - -// SetLinuxResourcesMemorySwap sets g.Config.Linux.Resources.Memory.Swap. -func (g *Generator) SetLinuxResourcesMemorySwap(swap int64) { - g.initConfigLinuxResourcesMemory() - g.Config.Linux.Resources.Memory.Swap = &swap -} - -// SetLinuxResourcesMemoryKernel sets g.Config.Linux.Resources.Memory.Kernel. -func (g *Generator) SetLinuxResourcesMemoryKernel(kernel int64) { - g.initConfigLinuxResourcesMemory() - g.Config.Linux.Resources.Memory.Kernel = &kernel -} - -// SetLinuxResourcesMemoryKernelTCP sets g.Config.Linux.Resources.Memory.KernelTCP. -func (g *Generator) SetLinuxResourcesMemoryKernelTCP(kernelTCP int64) { - g.initConfigLinuxResourcesMemory() - g.Config.Linux.Resources.Memory.KernelTCP = &kernelTCP -} - -// SetLinuxResourcesMemorySwappiness sets g.Config.Linux.Resources.Memory.Swappiness. -func (g *Generator) SetLinuxResourcesMemorySwappiness(swappiness uint64) { - g.initConfigLinuxResourcesMemory() - g.Config.Linux.Resources.Memory.Swappiness = &swappiness -} - -// SetLinuxResourcesMemoryDisableOOMKiller sets g.Config.Linux.Resources.Memory.DisableOOMKiller. -func (g *Generator) SetLinuxResourcesMemoryDisableOOMKiller(disable bool) { - g.initConfigLinuxResourcesMemory() - g.Config.Linux.Resources.Memory.DisableOOMKiller = &disable -} - -// SetLinuxResourcesNetworkClassID sets g.Config.Linux.Resources.Network.ClassID. -func (g *Generator) SetLinuxResourcesNetworkClassID(classid uint32) { - g.initConfigLinuxResourcesNetwork() - g.Config.Linux.Resources.Network.ClassID = &classid -} - -// AddLinuxResourcesNetworkPriorities adds or sets g.Config.Linux.Resources.Network.Priorities. -func (g *Generator) AddLinuxResourcesNetworkPriorities(name string, prio uint32) { - g.initConfigLinuxResourcesNetwork() - for i, netPriority := range g.Config.Linux.Resources.Network.Priorities { - if netPriority.Name == name { - g.Config.Linux.Resources.Network.Priorities[i].Priority = prio - return - } - } - interfacePrio := new(rspec.LinuxInterfacePriority) - interfacePrio.Name = name - interfacePrio.Priority = prio - g.Config.Linux.Resources.Network.Priorities = append(g.Config.Linux.Resources.Network.Priorities, *interfacePrio) -} - -// DropLinuxResourcesNetworkPriorities drops one item from g.Config.Linux.Resources.Network.Priorities. -func (g *Generator) DropLinuxResourcesNetworkPriorities(name string) { - if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Resources == nil || g.Config.Linux.Resources.Network == nil { - return - } - - for i, netPriority := range g.Config.Linux.Resources.Network.Priorities { - if netPriority.Name == name { - g.Config.Linux.Resources.Network.Priorities = append(g.Config.Linux.Resources.Network.Priorities[:i], g.Config.Linux.Resources.Network.Priorities[i+1:]...) - return - } - } -} - -// SetLinuxResourcesPidsLimit sets g.Config.Linux.Resources.Pids.Limit. -func (g *Generator) SetLinuxResourcesPidsLimit(limit int64) { - g.initConfigLinuxResourcesPids() - g.Config.Linux.Resources.Pids.Limit = limit -} - -// ClearLinuxSysctl clears g.Config.Linux.Sysctl. -func (g *Generator) ClearLinuxSysctl() { - if g.Config == nil || g.Config.Linux == nil { - return - } - g.Config.Linux.Sysctl = make(map[string]string) -} - -// AddLinuxSysctl adds a new sysctl config into g.Config.Linux.Sysctl. -func (g *Generator) AddLinuxSysctl(key, value string) { - g.initConfigLinuxSysctl() - g.Config.Linux.Sysctl[key] = value -} - -// RemoveLinuxSysctl removes a sysctl config from g.Config.Linux.Sysctl. -func (g *Generator) RemoveLinuxSysctl(key string) { - if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Sysctl == nil { - return - } - delete(g.Config.Linux.Sysctl, key) -} - -// ClearLinuxUIDMappings clear g.Config.Linux.UIDMappings. -func (g *Generator) ClearLinuxUIDMappings() { - if g.Config == nil || g.Config.Linux == nil { - return - } - g.Config.Linux.UIDMappings = []rspec.LinuxIDMapping{} -} - -// AddLinuxUIDMapping adds uidMap into g.Config.Linux.UIDMappings. -func (g *Generator) AddLinuxUIDMapping(hid, cid, size uint32) { - idMapping := rspec.LinuxIDMapping{ - HostID: hid, - ContainerID: cid, - Size: size, - } - - g.initConfigLinux() - g.Config.Linux.UIDMappings = append(g.Config.Linux.UIDMappings, idMapping) -} - -// ClearLinuxGIDMappings clear g.Config.Linux.GIDMappings. -func (g *Generator) ClearLinuxGIDMappings() { - if g.Config == nil || g.Config.Linux == nil { - return - } - g.Config.Linux.GIDMappings = []rspec.LinuxIDMapping{} -} - -// AddLinuxGIDMapping adds gidMap into g.Config.Linux.GIDMappings. -func (g *Generator) AddLinuxGIDMapping(hid, cid, size uint32) { - idMapping := rspec.LinuxIDMapping{ - HostID: hid, - ContainerID: cid, - Size: size, - } - - g.initConfigLinux() - g.Config.Linux.GIDMappings = append(g.Config.Linux.GIDMappings, idMapping) -} - -// SetLinuxRootPropagation sets g.Config.Linux.RootfsPropagation. -func (g *Generator) SetLinuxRootPropagation(rp string) error { - switch rp { - case "": - case "private": - case "rprivate": - case "slave": - case "rslave": - case "shared": - case "rshared": - case "unbindable": - case "runbindable": - default: - return fmt.Errorf("rootfs-propagation %q must be empty or one of (r)private|(r)slave|(r)shared|(r)unbindable", rp) - } - g.initConfigLinux() - g.Config.Linux.RootfsPropagation = rp - return nil -} - -// ClearPreStartHooks clear g.Config.Hooks.Prestart. -func (g *Generator) ClearPreStartHooks() { - if g.Config == nil || g.Config.Hooks == nil { - return - } - g.Config.Hooks.Prestart = []rspec.Hook{} -} - -// AddPreStartHook add a prestart hook into g.Config.Hooks.Prestart. -func (g *Generator) AddPreStartHook(preStartHook rspec.Hook) { - g.initConfigHooks() - g.Config.Hooks.Prestart = append(g.Config.Hooks.Prestart, preStartHook) -} - -// ClearPostStopHooks clear g.Config.Hooks.Poststop. -func (g *Generator) ClearPostStopHooks() { - if g.Config == nil || g.Config.Hooks == nil { - return - } - g.Config.Hooks.Poststop = []rspec.Hook{} -} - -// AddPostStopHook adds a poststop hook into g.Config.Hooks.Poststop. -func (g *Generator) AddPostStopHook(postStopHook rspec.Hook) { - g.initConfigHooks() - g.Config.Hooks.Poststop = append(g.Config.Hooks.Poststop, postStopHook) -} - -// ClearPostStartHooks clear g.Config.Hooks.Poststart. -func (g *Generator) ClearPostStartHooks() { - if g.Config == nil || g.Config.Hooks == nil { - return - } - g.Config.Hooks.Poststart = []rspec.Hook{} -} - -// AddPostStartHook adds a poststart hook into g.Config.Hooks.Poststart. -func (g *Generator) AddPostStartHook(postStartHook rspec.Hook) { - g.initConfigHooks() - g.Config.Hooks.Poststart = append(g.Config.Hooks.Poststart, postStartHook) -} - -// AddMount adds a mount into g.Config.Mounts. -func (g *Generator) AddMount(mnt rspec.Mount) { - g.initConfig() - - g.Config.Mounts = append(g.Config.Mounts, mnt) -} - -// RemoveMount removes a mount point on the dest directory -func (g *Generator) RemoveMount(dest string) { - g.initConfig() - - for index, mount := range g.Config.Mounts { - if mount.Destination == dest { - g.Config.Mounts = append(g.Config.Mounts[:index], g.Config.Mounts[index+1:]...) - return - } - } -} - -// Mounts returns the list of mounts -func (g *Generator) Mounts() []rspec.Mount { - g.initConfig() - - return g.Config.Mounts -} - -// ClearMounts clear g.Config.Mounts -func (g *Generator) ClearMounts() { - if g.Config == nil { - return - } - g.Config.Mounts = []rspec.Mount{} -} - -// SetupPrivileged sets up the privilege-related fields inside g.Config. -func (g *Generator) SetupPrivileged(privileged bool) { - if privileged { // Add all capabilities in privileged mode. - var finalCapList []string - for _, cap := range capability.List() { - if g.HostSpecific && cap > capsCheck.LastCap() { - continue - } - finalCapList = append(finalCapList, fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String()))) - } - g.initConfigLinux() - g.initConfigProcessCapabilities() - g.ClearProcessCapabilities() - g.Config.Process.Capabilities.Bounding = append(g.Config.Process.Capabilities.Bounding, finalCapList...) - g.Config.Process.Capabilities.Effective = append(g.Config.Process.Capabilities.Effective, finalCapList...) - g.Config.Process.Capabilities.Inheritable = append(g.Config.Process.Capabilities.Inheritable, finalCapList...) - g.Config.Process.Capabilities.Permitted = append(g.Config.Process.Capabilities.Permitted, finalCapList...) - g.Config.Process.Capabilities.Ambient = append(g.Config.Process.Capabilities.Ambient, finalCapList...) - g.Config.Process.SelinuxLabel = "" - g.Config.Process.ApparmorProfile = "" - g.Config.Linux.Seccomp = nil - } -} - -// ClearProcessCapabilities clear g.Config.Process.Capabilities. -func (g *Generator) ClearProcessCapabilities() { - if g.Config == nil || g.Config.Process == nil || g.Config.Process.Capabilities == nil { - return - } - g.Config.Process.Capabilities.Bounding = []string{} - g.Config.Process.Capabilities.Effective = []string{} - g.Config.Process.Capabilities.Inheritable = []string{} - g.Config.Process.Capabilities.Permitted = []string{} - g.Config.Process.Capabilities.Ambient = []string{} -} - -// AddProcessCapability adds a process capability into all 5 capability sets. -func (g *Generator) AddProcessCapability(c string) error { - cp := strings.ToUpper(c) - if err := capsCheck.CapValid(cp, g.HostSpecific); err != nil { - return err - } - - g.initConfigProcessCapabilities() - - var foundAmbient, foundBounding, foundEffective, foundInheritable, foundPermitted bool - for _, cap := range g.Config.Process.Capabilities.Ambient { - if strings.ToUpper(cap) == cp { - foundAmbient = true - break - } - } - if !foundAmbient { - g.Config.Process.Capabilities.Ambient = append(g.Config.Process.Capabilities.Ambient, cp) - } - - for _, cap := range g.Config.Process.Capabilities.Bounding { - if strings.ToUpper(cap) == cp { - foundBounding = true - break - } - } - if !foundBounding { - g.Config.Process.Capabilities.Bounding = append(g.Config.Process.Capabilities.Bounding, cp) - } - - for _, cap := range g.Config.Process.Capabilities.Effective { - if strings.ToUpper(cap) == cp { - foundEffective = true - break - } - } - if !foundEffective { - g.Config.Process.Capabilities.Effective = append(g.Config.Process.Capabilities.Effective, cp) - } - - for _, cap := range g.Config.Process.Capabilities.Inheritable { - if strings.ToUpper(cap) == cp { - foundInheritable = true - break - } - } - if !foundInheritable { - g.Config.Process.Capabilities.Inheritable = append(g.Config.Process.Capabilities.Inheritable, cp) - } - - for _, cap := range g.Config.Process.Capabilities.Permitted { - if strings.ToUpper(cap) == cp { - foundPermitted = true - break - } - } - if !foundPermitted { - g.Config.Process.Capabilities.Permitted = append(g.Config.Process.Capabilities.Permitted, cp) - } - - return nil -} - -// AddProcessCapabilityAmbient adds a process capability into g.Config.Process.Capabilities.Ambient. -func (g *Generator) AddProcessCapabilityAmbient(c string) error { - cp := strings.ToUpper(c) - if err := capsCheck.CapValid(cp, g.HostSpecific); err != nil { - return err - } - - g.initConfigProcessCapabilities() - - var foundAmbient bool - for _, cap := range g.Config.Process.Capabilities.Ambient { - if strings.ToUpper(cap) == cp { - foundAmbient = true - break - } - } - - if !foundAmbient { - g.Config.Process.Capabilities.Ambient = append(g.Config.Process.Capabilities.Ambient, cp) - } - - return nil -} - -// AddProcessCapabilityBounding adds a process capability into g.Config.Process.Capabilities.Bounding. -func (g *Generator) AddProcessCapabilityBounding(c string) error { - cp := strings.ToUpper(c) - if err := capsCheck.CapValid(cp, g.HostSpecific); err != nil { - return err - } - - g.initConfigProcessCapabilities() - - var foundBounding bool - for _, cap := range g.Config.Process.Capabilities.Bounding { - if strings.ToUpper(cap) == cp { - foundBounding = true - break - } - } - if !foundBounding { - g.Config.Process.Capabilities.Bounding = append(g.Config.Process.Capabilities.Bounding, cp) - } - - return nil -} - -// AddProcessCapabilityEffective adds a process capability into g.Config.Process.Capabilities.Effective. -func (g *Generator) AddProcessCapabilityEffective(c string) error { - cp := strings.ToUpper(c) - if err := capsCheck.CapValid(cp, g.HostSpecific); err != nil { - return err - } - - g.initConfigProcessCapabilities() - - var foundEffective bool - for _, cap := range g.Config.Process.Capabilities.Effective { - if strings.ToUpper(cap) == cp { - foundEffective = true - break - } - } - if !foundEffective { - g.Config.Process.Capabilities.Effective = append(g.Config.Process.Capabilities.Effective, cp) - } - - return nil -} - -// AddProcessCapabilityInheritable adds a process capability into g.Config.Process.Capabilities.Inheritable. -func (g *Generator) AddProcessCapabilityInheritable(c string) error { - cp := strings.ToUpper(c) - if err := capsCheck.CapValid(cp, g.HostSpecific); err != nil { - return err - } - - g.initConfigProcessCapabilities() - - var foundInheritable bool - for _, cap := range g.Config.Process.Capabilities.Inheritable { - if strings.ToUpper(cap) == cp { - foundInheritable = true - break - } - } - if !foundInheritable { - g.Config.Process.Capabilities.Inheritable = append(g.Config.Process.Capabilities.Inheritable, cp) - } - - return nil -} - -// AddProcessCapabilityPermitted adds a process capability into g.Config.Process.Capabilities.Permitted. -func (g *Generator) AddProcessCapabilityPermitted(c string) error { - cp := strings.ToUpper(c) - if err := capsCheck.CapValid(cp, g.HostSpecific); err != nil { - return err - } - - g.initConfigProcessCapabilities() - - var foundPermitted bool - for _, cap := range g.Config.Process.Capabilities.Permitted { - if strings.ToUpper(cap) == cp { - foundPermitted = true - break - } - } - if !foundPermitted { - g.Config.Process.Capabilities.Permitted = append(g.Config.Process.Capabilities.Permitted, cp) - } - - return nil -} - -// DropProcessCapability drops a process capability from all 5 capability sets. -func (g *Generator) DropProcessCapability(c string) error { - if g.Config == nil || g.Config.Process == nil || g.Config.Process.Capabilities == nil { - return nil - } - - cp := strings.ToUpper(c) - for i, cap := range g.Config.Process.Capabilities.Ambient { - if strings.ToUpper(cap) == cp { - g.Config.Process.Capabilities.Ambient = removeFunc(g.Config.Process.Capabilities.Ambient, i) - } - } - for i, cap := range g.Config.Process.Capabilities.Bounding { - if strings.ToUpper(cap) == cp { - g.Config.Process.Capabilities.Bounding = removeFunc(g.Config.Process.Capabilities.Bounding, i) - } - } - for i, cap := range g.Config.Process.Capabilities.Effective { - if strings.ToUpper(cap) == cp { - g.Config.Process.Capabilities.Effective = removeFunc(g.Config.Process.Capabilities.Effective, i) - } - } - for i, cap := range g.Config.Process.Capabilities.Inheritable { - if strings.ToUpper(cap) == cp { - g.Config.Process.Capabilities.Inheritable = removeFunc(g.Config.Process.Capabilities.Inheritable, i) - } - } - for i, cap := range g.Config.Process.Capabilities.Permitted { - if strings.ToUpper(cap) == cp { - g.Config.Process.Capabilities.Permitted = removeFunc(g.Config.Process.Capabilities.Permitted, i) - } - } - - return capsCheck.CapValid(cp, false) -} - -// DropProcessCapabilityAmbient drops a process capability from g.Config.Process.Capabilities.Ambient. -func (g *Generator) DropProcessCapabilityAmbient(c string) error { - if g.Config == nil || g.Config.Process == nil || g.Config.Process.Capabilities == nil { - return nil - } - - cp := strings.ToUpper(c) - for i, cap := range g.Config.Process.Capabilities.Ambient { - if strings.ToUpper(cap) == cp { - g.Config.Process.Capabilities.Ambient = removeFunc(g.Config.Process.Capabilities.Ambient, i) - } - } - - return capsCheck.CapValid(cp, false) -} - -// DropProcessCapabilityBounding drops a process capability from g.Config.Process.Capabilities.Bounding. -func (g *Generator) DropProcessCapabilityBounding(c string) error { - if g.Config == nil || g.Config.Process == nil || g.Config.Process.Capabilities == nil { - return nil - } - - cp := strings.ToUpper(c) - for i, cap := range g.Config.Process.Capabilities.Bounding { - if strings.ToUpper(cap) == cp { - g.Config.Process.Capabilities.Bounding = removeFunc(g.Config.Process.Capabilities.Bounding, i) - } - } - - return capsCheck.CapValid(cp, false) -} - -// DropProcessCapabilityEffective drops a process capability from g.Config.Process.Capabilities.Effective. -func (g *Generator) DropProcessCapabilityEffective(c string) error { - if g.Config == nil || g.Config.Process == nil || g.Config.Process.Capabilities == nil { - return nil - } - - cp := strings.ToUpper(c) - for i, cap := range g.Config.Process.Capabilities.Effective { - if strings.ToUpper(cap) == cp { - g.Config.Process.Capabilities.Effective = removeFunc(g.Config.Process.Capabilities.Effective, i) - } - } - - return capsCheck.CapValid(cp, false) -} - -// DropProcessCapabilityInheritable drops a process capability from g.Config.Process.Capabilities.Inheritable. -func (g *Generator) DropProcessCapabilityInheritable(c string) error { - if g.Config == nil || g.Config.Process == nil || g.Config.Process.Capabilities == nil { - return nil - } - - cp := strings.ToUpper(c) - for i, cap := range g.Config.Process.Capabilities.Inheritable { - if strings.ToUpper(cap) == cp { - g.Config.Process.Capabilities.Inheritable = removeFunc(g.Config.Process.Capabilities.Inheritable, i) - } - } - - return capsCheck.CapValid(cp, false) -} - -// DropProcessCapabilityPermitted drops a process capability from g.Config.Process.Capabilities.Permitted. -func (g *Generator) DropProcessCapabilityPermitted(c string) error { - if g.Config == nil || g.Config.Process == nil || g.Config.Process.Capabilities == nil { - return nil - } - - cp := strings.ToUpper(c) - for i, cap := range g.Config.Process.Capabilities.Permitted { - if strings.ToUpper(cap) == cp { - g.Config.Process.Capabilities.Permitted = removeFunc(g.Config.Process.Capabilities.Permitted, i) - } - } - - return capsCheck.CapValid(cp, false) -} - -func mapStrToNamespace(ns string, path string) (rspec.LinuxNamespace, error) { - switch ns { - case "network": - return rspec.LinuxNamespace{Type: rspec.NetworkNamespace, Path: path}, nil - case "pid": - return rspec.LinuxNamespace{Type: rspec.PIDNamespace, Path: path}, nil - case "mount": - return rspec.LinuxNamespace{Type: rspec.MountNamespace, Path: path}, nil - case "ipc": - return rspec.LinuxNamespace{Type: rspec.IPCNamespace, Path: path}, nil - case "uts": - return rspec.LinuxNamespace{Type: rspec.UTSNamespace, Path: path}, nil - case "user": - return rspec.LinuxNamespace{Type: rspec.UserNamespace, Path: path}, nil - case "cgroup": - return rspec.LinuxNamespace{Type: rspec.CgroupNamespace, Path: path}, nil - default: - return rspec.LinuxNamespace{}, fmt.Errorf("unrecognized namespace %q", ns) - } -} - -// ClearLinuxNamespaces clear g.Config.Linux.Namespaces. -func (g *Generator) ClearLinuxNamespaces() { - if g.Config == nil || g.Config.Linux == nil { - return - } - g.Config.Linux.Namespaces = []rspec.LinuxNamespace{} -} - -// AddOrReplaceLinuxNamespace adds or replaces a namespace inside -// g.Config.Linux.Namespaces. -func (g *Generator) AddOrReplaceLinuxNamespace(ns string, path string) error { - namespace, err := mapStrToNamespace(ns, path) - if err != nil { - return err - } - - g.initConfigLinux() - for i, ns := range g.Config.Linux.Namespaces { - if ns.Type == namespace.Type { - g.Config.Linux.Namespaces[i] = namespace - return nil - } - } - g.Config.Linux.Namespaces = append(g.Config.Linux.Namespaces, namespace) - return nil -} - -// RemoveLinuxNamespace removes a namespace from g.Config.Linux.Namespaces. -func (g *Generator) RemoveLinuxNamespace(ns string) error { - namespace, err := mapStrToNamespace(ns, "") - if err != nil { - return err - } - - if g.Config == nil || g.Config.Linux == nil { - return nil - } - for i, ns := range g.Config.Linux.Namespaces { - if ns.Type == namespace.Type { - g.Config.Linux.Namespaces = append(g.Config.Linux.Namespaces[:i], g.Config.Linux.Namespaces[i+1:]...) - return nil - } - } - return nil -} - -// AddDevice - add a device into g.Config.Linux.Devices -func (g *Generator) AddDevice(device rspec.LinuxDevice) { - g.initConfigLinux() - - for i, dev := range g.Config.Linux.Devices { - if dev.Path == device.Path { - g.Config.Linux.Devices[i] = device - return - } - } - - g.Config.Linux.Devices = append(g.Config.Linux.Devices, device) -} - -// RemoveDevice remove a device from g.Config.Linux.Devices -func (g *Generator) RemoveDevice(path string) { - if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Devices == nil { - return - } - - for i, device := range g.Config.Linux.Devices { - if device.Path == path { - g.Config.Linux.Devices = append(g.Config.Linux.Devices[:i], g.Config.Linux.Devices[i+1:]...) - return - } - } -} - -// ClearLinuxDevices clears g.Config.Linux.Devices -func (g *Generator) ClearLinuxDevices() { - if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Devices == nil { - return - } - - g.Config.Linux.Devices = []rspec.LinuxDevice{} -} - -// AddLinuxResourcesDevice - add a device into g.Config.Linux.Resources.Devices -func (g *Generator) AddLinuxResourcesDevice(allow bool, devType string, major, minor *int64, access string) { - g.initConfigLinuxResources() - - device := rspec.LinuxDeviceCgroup{ - Allow: allow, - Type: devType, - Access: access, - Major: major, - Minor: minor, - } - g.Config.Linux.Resources.Devices = append(g.Config.Linux.Resources.Devices, device) -} - -// RemoveLinuxResourcesDevice - remove a device from g.Config.Linux.Resources.Devices -func (g *Generator) RemoveLinuxResourcesDevice(allow bool, devType string, major, minor *int64, access string) { - if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Resources == nil { - return - } - for i, device := range g.Config.Linux.Resources.Devices { - if device.Allow == allow && - (devType == device.Type || (devType != "" && device.Type != "" && devType == device.Type)) && - (access == device.Access || (access != "" && device.Access != "" && access == device.Access)) && - (major == device.Major || (major != nil && device.Major != nil && *major == *device.Major)) && - (minor == device.Minor || (minor != nil && device.Minor != nil && *minor == *device.Minor)) { - - g.Config.Linux.Resources.Devices = append(g.Config.Linux.Resources.Devices[:i], g.Config.Linux.Resources.Devices[i+1:]...) - return - } - } -} - -// SetSyscallAction adds rules for syscalls with the specified action -func (g *Generator) SetSyscallAction(arguments seccomp.SyscallOpts) error { - g.initConfigLinuxSeccomp() - return seccomp.ParseSyscallFlag(arguments, g.Config.Linux.Seccomp) -} - -// SetDefaultSeccompAction sets the default action for all syscalls not defined -// and then removes any syscall rules with this action already specified. -func (g *Generator) SetDefaultSeccompAction(action string) error { - g.initConfigLinuxSeccomp() - return seccomp.ParseDefaultAction(action, g.Config.Linux.Seccomp) -} - -// SetDefaultSeccompActionForce only sets the default action for all syscalls not defined -func (g *Generator) SetDefaultSeccompActionForce(action string) error { - g.initConfigLinuxSeccomp() - return seccomp.ParseDefaultActionForce(action, g.Config.Linux.Seccomp) -} - -// SetDomainName sets g.Config.Domainname -func (g *Generator) SetDomainName(domain string) { - g.initConfig() - g.Config.Domainname = domain -} - -// SetSeccompArchitecture sets the supported seccomp architectures -func (g *Generator) SetSeccompArchitecture(architecture string) error { - g.initConfigLinuxSeccomp() - return seccomp.ParseArchitectureFlag(architecture, g.Config.Linux.Seccomp) -} - -// RemoveSeccompRule removes rules for any specified syscalls -func (g *Generator) RemoveSeccompRule(arguments string) error { - g.initConfigLinuxSeccomp() - return seccomp.RemoveAction(arguments, g.Config.Linux.Seccomp) -} - -// RemoveAllSeccompRules removes all syscall rules -func (g *Generator) RemoveAllSeccompRules() error { - g.initConfigLinuxSeccomp() - return seccomp.RemoveAllSeccompRules(g.Config.Linux.Seccomp) -} - -// AddLinuxMaskedPaths adds masked paths into g.Config.Linux.MaskedPaths. -func (g *Generator) AddLinuxMaskedPaths(path string) { - g.initConfigLinux() - g.Config.Linux.MaskedPaths = append(g.Config.Linux.MaskedPaths, path) -} - -// AddLinuxReadonlyPaths adds readonly paths into g.Config.Linux.MaskedPaths. -func (g *Generator) AddLinuxReadonlyPaths(path string) { - g.initConfigLinux() - g.Config.Linux.ReadonlyPaths = append(g.Config.Linux.ReadonlyPaths, path) -} - -func addOrReplaceBlockIOThrottleDevice(tmpList []rspec.LinuxThrottleDevice, major int64, minor int64, rate uint64) []rspec.LinuxThrottleDevice { - throttleDevices := tmpList - for i, throttleDevice := range throttleDevices { - if throttleDevice.Major == major && throttleDevice.Minor == minor { - throttleDevices[i].Rate = rate - return throttleDevices - } - } - throttleDevice := new(rspec.LinuxThrottleDevice) - throttleDevice.Major = major - throttleDevice.Minor = minor - throttleDevice.Rate = rate - throttleDevices = append(throttleDevices, *throttleDevice) - - return throttleDevices -} - -func dropBlockIOThrottleDevice(tmpList []rspec.LinuxThrottleDevice, major int64, minor int64) []rspec.LinuxThrottleDevice { - throttleDevices := tmpList - for i, throttleDevice := range throttleDevices { - if throttleDevice.Major == major && throttleDevice.Minor == minor { - throttleDevices = append(throttleDevices[:i], throttleDevices[i+1:]...) - return throttleDevices - } - } - - return throttleDevices -} - -// AddSolarisAnet adds network into g.Config.Solaris.Anet -func (g *Generator) AddSolarisAnet(anet rspec.SolarisAnet) { - g.initConfigSolaris() - g.Config.Solaris.Anet = append(g.Config.Solaris.Anet, anet) -} - -// SetSolarisCappedCPUNcpus sets g.Config.Solaris.CappedCPU.Ncpus -func (g *Generator) SetSolarisCappedCPUNcpus(ncpus string) { - g.initConfigSolarisCappedCPU() - g.Config.Solaris.CappedCPU.Ncpus = ncpus -} - -// SetSolarisCappedMemoryPhysical sets g.Config.Solaris.CappedMemory.Physical -func (g *Generator) SetSolarisCappedMemoryPhysical(physical string) { - g.initConfigSolarisCappedMemory() - g.Config.Solaris.CappedMemory.Physical = physical -} - -// SetSolarisCappedMemorySwap sets g.Config.Solaris.CappedMemory.Swap -func (g *Generator) SetSolarisCappedMemorySwap(swap string) { - g.initConfigSolarisCappedMemory() - g.Config.Solaris.CappedMemory.Swap = swap -} - -// SetSolarisLimitPriv sets g.Config.Solaris.LimitPriv -func (g *Generator) SetSolarisLimitPriv(limitPriv string) { - g.initConfigSolaris() - g.Config.Solaris.LimitPriv = limitPriv -} - -// SetSolarisMaxShmMemory sets g.Config.Solaris.MaxShmMemory -func (g *Generator) SetSolarisMaxShmMemory(memory string) { - g.initConfigSolaris() - g.Config.Solaris.MaxShmMemory = memory -} - -// SetSolarisMilestone sets g.Config.Solaris.Milestone -func (g *Generator) SetSolarisMilestone(milestone string) { - g.initConfigSolaris() - g.Config.Solaris.Milestone = milestone -} - -// SetVMHypervisorPath sets g.Config.VM.Hypervisor.Path -func (g *Generator) SetVMHypervisorPath(path string) error { - if !strings.HasPrefix(path, "/") { - return fmt.Errorf("hypervisorPath %v is not an absolute path", path) - } - g.initConfigVM() - g.Config.VM.Hypervisor.Path = path - return nil -} - -// SetVMHypervisorParameters sets g.Config.VM.Hypervisor.Parameters -func (g *Generator) SetVMHypervisorParameters(parameters []string) { - g.initConfigVM() - g.Config.VM.Hypervisor.Parameters = parameters -} - -// SetVMKernelPath sets g.Config.VM.Kernel.Path -func (g *Generator) SetVMKernelPath(path string) error { - if !strings.HasPrefix(path, "/") { - return fmt.Errorf("kernelPath %v is not an absolute path", path) - } - g.initConfigVM() - g.Config.VM.Kernel.Path = path - return nil -} - -// SetVMKernelParameters sets g.Config.VM.Kernel.Parameters -func (g *Generator) SetVMKernelParameters(parameters []string) { - g.initConfigVM() - g.Config.VM.Kernel.Parameters = parameters -} - -// SetVMKernelInitRD sets g.Config.VM.Kernel.InitRD -func (g *Generator) SetVMKernelInitRD(initrd string) error { - if !strings.HasPrefix(initrd, "/") { - return fmt.Errorf("kernelInitrd %v is not an absolute path", initrd) - } - g.initConfigVM() - g.Config.VM.Kernel.InitRD = initrd - return nil -} - -// SetVMImagePath sets g.Config.VM.Image.Path -func (g *Generator) SetVMImagePath(path string) error { - if !strings.HasPrefix(path, "/") { - return fmt.Errorf("imagePath %v is not an absolute path", path) - } - g.initConfigVM() - g.Config.VM.Image.Path = path - return nil -} - -// SetVMImageFormat sets g.Config.VM.Image.Format -func (g *Generator) SetVMImageFormat(format string) error { - switch format { - case "raw": - case "qcow2": - case "vdi": - case "vmdk": - case "vhd": - default: - return fmt.Errorf("Commonly supported formats are: raw, qcow2, vdi, vmdk, vhd") - } - g.initConfigVM() - g.Config.VM.Image.Format = format - return nil -} - -// SetWindowsHypervUntilityVMPath sets g.Config.Windows.HyperV.UtilityVMPath. -func (g *Generator) SetWindowsHypervUntilityVMPath(path string) { - g.initConfigWindowsHyperV() - g.Config.Windows.HyperV.UtilityVMPath = path -} - -// SetWindowsIgnoreFlushesDuringBoot sets g.Config.Windows.IgnoreFlushesDuringBoot. -func (g *Generator) SetWindowsIgnoreFlushesDuringBoot(ignore bool) { - g.initConfigWindows() - g.Config.Windows.IgnoreFlushesDuringBoot = ignore -} - -// AddWindowsLayerFolders adds layer folders into g.Config.Windows.LayerFolders. -func (g *Generator) AddWindowsLayerFolders(folder string) { - g.initConfigWindows() - g.Config.Windows.LayerFolders = append(g.Config.Windows.LayerFolders, folder) -} - -// AddWindowsDevices adds or sets g.Config.Windwos.Devices -func (g *Generator) AddWindowsDevices(id, idType string) error { - if idType != "class" { - return fmt.Errorf("Invalid idType value: %s. Windows only supports a value of class", idType) - } - device := rspec.WindowsDevice{ - ID: id, - IDType: idType, - } - - g.initConfigWindows() - for i, device := range g.Config.Windows.Devices { - if device.ID == id { - g.Config.Windows.Devices[i].IDType = idType - return nil - } - } - g.Config.Windows.Devices = append(g.Config.Windows.Devices, device) - return nil -} - -// SetWindowsNetwork sets g.Config.Windows.Network. -func (g *Generator) SetWindowsNetwork(network rspec.WindowsNetwork) { - g.initConfigWindows() - g.Config.Windows.Network = &network -} - -// SetWindowsNetworkAllowUnqualifiedDNSQuery sets g.Config.Windows.Network.AllowUnqualifiedDNSQuery -func (g *Generator) SetWindowsNetworkAllowUnqualifiedDNSQuery(setting bool) { - g.initConfigWindowsNetwork() - g.Config.Windows.Network.AllowUnqualifiedDNSQuery = setting -} - -// SetWindowsNetworkNamespace sets g.Config.Windows.Network.NetworkNamespace -func (g *Generator) SetWindowsNetworkNamespace(path string) { - g.initConfigWindowsNetwork() - g.Config.Windows.Network.NetworkNamespace = path -} - -// SetWindowsResourcesCPU sets g.Config.Windows.Resources.CPU. -func (g *Generator) SetWindowsResourcesCPU(cpu rspec.WindowsCPUResources) { - g.initConfigWindowsResources() - g.Config.Windows.Resources.CPU = &cpu -} - -// SetWindowsResourcesMemoryLimit sets g.Config.Windows.Resources.Memory.Limit. -func (g *Generator) SetWindowsResourcesMemoryLimit(limit uint64) { - g.initConfigWindowsResourcesMemory() - g.Config.Windows.Resources.Memory.Limit = &limit -} - -// SetWindowsResourcesStorage sets g.Config.Windows.Resources.Storage. -func (g *Generator) SetWindowsResourcesStorage(storage rspec.WindowsStorageResources) { - g.initConfigWindowsResources() - g.Config.Windows.Resources.Storage = &storage -} - -// SetWindowsServicing sets g.Config.Windows.Servicing. -func (g *Generator) SetWindowsServicing(servicing bool) { - g.initConfigWindows() - g.Config.Windows.Servicing = servicing -} diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go deleted file mode 100644 index f28d8f5875..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go +++ /dev/null @@ -1,7 +0,0 @@ -package seccomp - -const ( - seccompOverwrite = "overwrite" - seccompAppend = "append" - nothing = "nothing" -) diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go deleted file mode 100644 index 25daf0752d..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_action.go +++ /dev/null @@ -1,135 +0,0 @@ -package seccomp - -import ( - "fmt" - "strconv" - "strings" - - rspec "github.com/opencontainers/runtime-spec/specs-go" -) - -// SyscallOpts contain options for parsing syscall rules -type SyscallOpts struct { - Action string - Syscall string - Index string - Value string - ValueTwo string - Operator string -} - -// ParseSyscallFlag takes a SyscallOpts struct and the seccomp configuration -// and sets the new syscall rule accordingly -func ParseSyscallFlag(args SyscallOpts, config *rspec.LinuxSeccomp) error { - var arguments []string - if args.Index != "" && args.Value != "" && args.ValueTwo != "" && args.Operator != "" { - arguments = []string{args.Action, args.Syscall, args.Index, args.Value, - args.ValueTwo, args.Operator} - } else { - arguments = []string{args.Action, args.Syscall} - } - - action, _ := parseAction(arguments[0]) - if action == config.DefaultAction && args.argsAreEmpty() { - // default already set, no need to make changes - return nil - } - - var newSyscall rspec.LinuxSyscall - numOfArgs := len(arguments) - if numOfArgs == 6 || numOfArgs == 2 { - argStruct, err := parseArguments(arguments[1:]) - if err != nil { - return err - } - newSyscall = newSyscallStruct(arguments[1], action, argStruct) - } else { - return fmt.Errorf("incorrect number of arguments to ParseSyscall: %d", numOfArgs) - } - - descison, err := decideCourseOfAction(&newSyscall, config.Syscalls) - if err != nil { - return err - } - delimDescison := strings.Split(descison, ":") - - if delimDescison[0] == seccompAppend { - config.Syscalls = append(config.Syscalls, newSyscall) - } - - if delimDescison[0] == seccompOverwrite { - indexForOverwrite, err := strconv.ParseInt(delimDescison[1], 10, 32) - if err != nil { - return err - } - config.Syscalls[indexForOverwrite] = newSyscall - } - - return nil -} - -var actions = map[string]rspec.LinuxSeccompAction{ - "allow": rspec.ActAllow, - "errno": rspec.ActErrno, - "kill": rspec.ActKill, - "trace": rspec.ActTrace, - "trap": rspec.ActTrap, -} - -// Take passed action, return the SCMP_ACT_ version of it -func parseAction(action string) (rspec.LinuxSeccompAction, error) { - a, ok := actions[action] - if !ok { - return "", fmt.Errorf("unrecognized action: %s", action) - } - return a, nil -} - -// ParseDefaultAction sets the default action of the seccomp configuration -// and then removes any rules that were already specified with this action -func ParseDefaultAction(action string, config *rspec.LinuxSeccomp) error { - if action == "" { - return nil - } - - defaultAction, err := parseAction(action) - if err != nil { - return err - } - config.DefaultAction = defaultAction - err = RemoveAllMatchingRules(config, defaultAction) - if err != nil { - return err - } - return nil -} - -// ParseDefaultActionForce simply sets the default action of the seccomp configuration -func ParseDefaultActionForce(action string, config *rspec.LinuxSeccomp) error { - if action == "" { - return nil - } - - defaultAction, err := parseAction(action) - if err != nil { - return err - } - config.DefaultAction = defaultAction - return nil -} - -func newSyscallStruct(name string, action rspec.LinuxSeccompAction, args []rspec.LinuxSeccompArg) rspec.LinuxSyscall { - syscallStruct := rspec.LinuxSyscall{ - Names: []string{name}, - Action: action, - Args: args, - } - return syscallStruct -} - -func (s SyscallOpts) argsAreEmpty() bool { - return (s.Index == "" && - s.Value == "" && - s.ValueTwo == "" && - s.Operator == "") -} diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_architecture.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_architecture.go deleted file mode 100644 index 9b2bdfd2fa..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_architecture.go +++ /dev/null @@ -1,55 +0,0 @@ -package seccomp - -import ( - "fmt" - - rspec "github.com/opencontainers/runtime-spec/specs-go" -) - -// ParseArchitectureFlag takes the raw string passed with the --arch flag, parses it -// and updates the Seccomp config accordingly -func ParseArchitectureFlag(architectureArg string, config *rspec.LinuxSeccomp) error { - correctedArch, err := parseArch(architectureArg) - if err != nil { - return err - } - - shouldAppend := true - for _, alreadySpecified := range config.Architectures { - if correctedArch == alreadySpecified { - shouldAppend = false - } - } - if shouldAppend { - config.Architectures = append(config.Architectures, correctedArch) - } - return nil -} - -func parseArch(arch string) (rspec.Arch, error) { - arches := map[string]rspec.Arch{ - "x86": rspec.ArchX86, - "amd64": rspec.ArchX86_64, - "x32": rspec.ArchX32, - "arm": rspec.ArchARM, - "arm64": rspec.ArchAARCH64, - "mips": rspec.ArchMIPS, - "mips64": rspec.ArchMIPS64, - "mips64n32": rspec.ArchMIPS64N32, - "mipsel": rspec.ArchMIPSEL, - "mipsel64": rspec.ArchMIPSEL64, - "mipsel64n32": rspec.ArchMIPSEL64N32, - "parisc": rspec.ArchPARISC, - "parisc64": rspec.ArchPARISC64, - "ppc": rspec.ArchPPC, - "ppc64": rspec.ArchPPC64, - "ppc64le": rspec.ArchPPC64LE, - "s390": rspec.ArchS390, - "s390x": rspec.ArchS390X, - } - a, ok := arches[arch] - if !ok { - return "", fmt.Errorf("unrecognized architecture: %s", arch) - } - return a, nil -} diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_arguments.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_arguments.go deleted file mode 100644 index 2b4c394e67..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_arguments.go +++ /dev/null @@ -1,73 +0,0 @@ -package seccomp - -import ( - "fmt" - "strconv" - - rspec "github.com/opencontainers/runtime-spec/specs-go" -) - -// parseArguments takes a list of arguments (delimArgs). It parses and fills out -// the argument information and returns a slice of arg structs -func parseArguments(delimArgs []string) ([]rspec.LinuxSeccompArg, error) { - nilArgSlice := []rspec.LinuxSeccompArg{} - numberOfArgs := len(delimArgs) - - // No parameters passed with syscall - if numberOfArgs == 1 { - return nilArgSlice, nil - } - - // Correct number of parameters passed with syscall - if numberOfArgs == 5 { - syscallIndex, err := strconv.ParseUint(delimArgs[1], 10, 0) - if err != nil { - return nilArgSlice, err - } - - syscallValue, err := strconv.ParseUint(delimArgs[2], 10, 64) - if err != nil { - return nilArgSlice, err - } - - syscallValueTwo, err := strconv.ParseUint(delimArgs[3], 10, 64) - if err != nil { - return nilArgSlice, err - } - - syscallOp, err := parseOperator(delimArgs[4]) - if err != nil { - return nilArgSlice, err - } - - argStruct := rspec.LinuxSeccompArg{ - Index: uint(syscallIndex), - Value: syscallValue, - ValueTwo: syscallValueTwo, - Op: syscallOp, - } - - argSlice := []rspec.LinuxSeccompArg{} - argSlice = append(argSlice, argStruct) - return argSlice, nil - } - - return nilArgSlice, fmt.Errorf("incorrect number of arguments passed with syscall: %d", numberOfArgs) -} - -func parseOperator(operator string) (rspec.LinuxSeccompOperator, error) { - operators := map[string]rspec.LinuxSeccompOperator{ - "NE": rspec.OpNotEqual, - "LT": rspec.OpLessThan, - "LE": rspec.OpLessEqual, - "EQ": rspec.OpEqualTo, - "GE": rspec.OpGreaterEqual, - "GT": rspec.OpGreaterThan, - "ME": rspec.OpMaskedEqual, - } - o, ok := operators[operator] - if !ok { - return "", fmt.Errorf("unrecognized operator: %s", operator) - } - return o, nil -} diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_remove.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_remove.go deleted file mode 100644 index 59537d49c4..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/parse_remove.go +++ /dev/null @@ -1,52 +0,0 @@ -package seccomp - -import ( - "fmt" - "reflect" - "strings" - - rspec "github.com/opencontainers/runtime-spec/specs-go" -) - -// RemoveAction takes the argument string that was passed with the --remove flag, -// parses it, and updates the Seccomp config accordingly -func RemoveAction(arguments string, config *rspec.LinuxSeccomp) error { - if config == nil { - return fmt.Errorf("Cannot remove action from nil Seccomp pointer") - } - - syscallsToRemove := strings.Split(arguments, ",") - - for counter, syscallStruct := range config.Syscalls { - if reflect.DeepEqual(syscallsToRemove, syscallStruct.Names) { - config.Syscalls = append(config.Syscalls[:counter], config.Syscalls[counter+1:]...) - } - } - - return nil -} - -// RemoveAllSeccompRules removes all seccomp syscall rules -func RemoveAllSeccompRules(config *rspec.LinuxSeccomp) error { - if config == nil { - return fmt.Errorf("Cannot remove action from nil Seccomp pointer") - } - newSyscallSlice := []rspec.LinuxSyscall{} - config.Syscalls = newSyscallSlice - return nil -} - -// RemoveAllMatchingRules will remove any syscall rules that match the specified action -func RemoveAllMatchingRules(config *rspec.LinuxSeccomp, seccompAction rspec.LinuxSeccompAction) error { - if config == nil { - return fmt.Errorf("Cannot remove action from nil Seccomp pointer") - } - - for _, syscall := range config.Syscalls { - if reflect.DeepEqual(syscall.Action, seccompAction) { - RemoveAction(strings.Join(syscall.Names, ","), config) - } - } - - return nil -} diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go deleted file mode 100644 index 345a32a61d..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go +++ /dev/null @@ -1,606 +0,0 @@ -package seccomp - -import ( - "runtime" - - "github.com/opencontainers/runtime-spec/specs-go" - rspec "github.com/opencontainers/runtime-spec/specs-go" -) - -func arches() []rspec.Arch { - native := runtime.GOARCH - - switch native { - case "amd64": - return []rspec.Arch{rspec.ArchX86_64, rspec.ArchX86, rspec.ArchX32} - case "arm64": - return []rspec.Arch{rspec.ArchARM, rspec.ArchAARCH64} - case "mips64": - return []rspec.Arch{rspec.ArchMIPS, rspec.ArchMIPS64, rspec.ArchMIPS64N32} - case "mips64n32": - return []rspec.Arch{rspec.ArchMIPS, rspec.ArchMIPS64, rspec.ArchMIPS64N32} - case "mipsel64": - return []rspec.Arch{rspec.ArchMIPSEL, rspec.ArchMIPSEL64, rspec.ArchMIPSEL64N32} - case "mipsel64n32": - return []rspec.Arch{rspec.ArchMIPSEL, rspec.ArchMIPSEL64, rspec.ArchMIPSEL64N32} - case "s390x": - return []rspec.Arch{rspec.ArchS390, rspec.ArchS390X} - default: - return []rspec.Arch{} - } -} - -// DefaultProfile defines the whitelist for the default seccomp profile. -func DefaultProfile(rs *specs.Spec) *rspec.LinuxSeccomp { - - syscalls := []rspec.LinuxSyscall{ - { - Names: []string{ - "accept", - "accept4", - "access", - "alarm", - "bind", - "brk", - "capget", - "capset", - "chdir", - "chmod", - "chown", - "chown32", - "clock_getres", - "clock_gettime", - "clock_nanosleep", - "close", - "connect", - "copy_file_range", - "creat", - "dup", - "dup2", - "dup3", - "epoll_create", - "epoll_create1", - "epoll_ctl", - "epoll_ctl_old", - "epoll_pwait", - "epoll_wait", - "epoll_wait_old", - "eventfd", - "eventfd2", - "execve", - "execveat", - "exit", - "exit_group", - "faccessat", - "fadvise64", - "fadvise64_64", - "fallocate", - "fanotify_mark", - "fchdir", - "fchmod", - "fchmodat", - "fchown", - "fchown32", - "fchownat", - "fcntl", - "fcntl64", - "fdatasync", - "fgetxattr", - "flistxattr", - "flock", - "fork", - "fremovexattr", - "fsetxattr", - "fstat", - "fstat64", - "fstatat64", - "fstatfs", - "fstatfs64", - "fsync", - "ftruncate", - "ftruncate64", - "futex", - "futimesat", - "getcpu", - "getcwd", - "getdents", - "getdents64", - "getegid", - "getegid32", - "geteuid", - "geteuid32", - "getgid", - "getgid32", - "getgroups", - "getgroups32", - "getitimer", - "getpeername", - "getpgid", - "getpgrp", - "getpid", - "getppid", - "getpriority", - "getrandom", - "getresgid", - "getresgid32", - "getresuid", - "getresuid32", - "getrlimit", - "get_robust_list", - "getrusage", - "getsid", - "getsockname", - "getsockopt", - "get_thread_area", - "gettid", - "gettimeofday", - "getuid", - "getuid32", - "getxattr", - "inotify_add_watch", - "inotify_init", - "inotify_init1", - "inotify_rm_watch", - "io_cancel", - "ioctl", - "io_destroy", - "io_getevents", - "ioprio_get", - "ioprio_set", - "io_setup", - "io_submit", - "ipc", - "kill", - "landlock_add_rule", - "landlock_create_ruleset", - "landlock_restrict_self", - "lchown", - "lchown32", - "lgetxattr", - "link", - "linkat", - "listen", - "listxattr", - "llistxattr", - "_llseek", - "lremovexattr", - "lseek", - "lsetxattr", - "lstat", - "lstat64", - "madvise", - "memfd_create", - "mincore", - "mkdir", - "mkdirat", - "mknod", - "mknodat", - "mlock", - "mlock2", - "mlockall", - "mmap", - "mmap2", - "mprotect", - "mq_getsetattr", - "mq_notify", - "mq_open", - "mq_timedreceive", - "mq_timedsend", - "mq_unlink", - "mremap", - "msgctl", - "msgget", - "msgrcv", - "msgsnd", - "msync", - "munlock", - "munlockall", - "munmap", - "nanosleep", - "newfstatat", - "_newselect", - "open", - "openat", - "pause", - "pipe", - "pipe2", - "poll", - "ppoll", - "prctl", - "pread64", - "preadv", - "prlimit64", - "pselect6", - "pwrite64", - "pwritev", - "read", - "readahead", - "readlink", - "readlinkat", - "readv", - "recv", - "recvfrom", - "recvmmsg", - "recvmsg", - "remap_file_pages", - "removexattr", - "rename", - "renameat", - "renameat2", - "restart_syscall", - "rmdir", - "rt_sigaction", - "rt_sigpending", - "rt_sigprocmask", - "rt_sigqueueinfo", - "rt_sigreturn", - "rt_sigsuspend", - "rt_sigtimedwait", - "rt_tgsigqueueinfo", - "sched_getaffinity", - "sched_getattr", - "sched_getparam", - "sched_get_priority_max", - "sched_get_priority_min", - "sched_getscheduler", - "sched_rr_get_interval", - "sched_setaffinity", - "sched_setattr", - "sched_setparam", - "sched_setscheduler", - "sched_yield", - "seccomp", - "select", - "semctl", - "semget", - "semop", - "semtimedop", - "send", - "sendfile", - "sendfile64", - "sendmmsg", - "sendmsg", - "sendto", - "setfsgid", - "setfsgid32", - "setfsuid", - "setfsuid32", - "setgid", - "setgid32", - "setgroups", - "setgroups32", - "setitimer", - "setpgid", - "setpriority", - "setregid", - "setregid32", - "setresgid", - "setresgid32", - "setresuid", - "setresuid32", - "setreuid", - "setreuid32", - "setrlimit", - "set_robust_list", - "setsid", - "setsockopt", - "set_thread_area", - "set_tid_address", - "setuid", - "setuid32", - "setxattr", - "shmat", - "shmctl", - "shmdt", - "shmget", - "shutdown", - "sigaltstack", - "signalfd", - "signalfd4", - "sigreturn", - "socket", - "socketcall", - "socketpair", - "splice", - "stat", - "stat64", - "statfs", - "statfs64", - "statx", - "symlink", - "symlinkat", - "sync", - "sync_file_range", - "syncfs", - "sysinfo", - "syslog", - "tee", - "tgkill", - "time", - "timer_create", - "timer_delete", - "timerfd_create", - "timerfd_gettime", - "timerfd_settime", - "timer_getoverrun", - "timer_gettime", - "timer_settime", - "times", - "tkill", - "truncate", - "truncate64", - "ugetrlimit", - "umask", - "uname", - "unlink", - "unlinkat", - "utime", - "utimensat", - "utimes", - "vfork", - "vmsplice", - "wait4", - "waitid", - "waitpid", - "write", - "writev", - }, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - { - Names: []string{"personality"}, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{ - { - Index: 0, - Value: 0x0, - Op: rspec.OpEqualTo, - }, - }, - }, - { - Names: []string{"personality"}, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{ - { - Index: 0, - Value: 0x0008, - Op: rspec.OpEqualTo, - }, - }, - }, - { - Names: []string{"personality"}, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{ - { - Index: 0, - Value: 0xffffffff, - Op: rspec.OpEqualTo, - }, - }, - }, - } - var sysCloneFlagsIndex uint - - capSysAdmin := false - caps := make(map[string]bool) - - for _, cap := range rs.Process.Capabilities.Bounding { - caps[cap] = true - } - for _, cap := range rs.Process.Capabilities.Effective { - caps[cap] = true - } - for _, cap := range rs.Process.Capabilities.Inheritable { - caps[cap] = true - } - for _, cap := range rs.Process.Capabilities.Permitted { - caps[cap] = true - } - for _, cap := range rs.Process.Capabilities.Ambient { - caps[cap] = true - } - - for cap := range caps { - switch cap { - case "CAP_DAC_READ_SEARCH": - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{"open_by_handle_at"}, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - case "CAP_SYS_ADMIN": - capSysAdmin = true - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{ - "bpf", - "clone", - "fanotify_init", - "lookup_dcookie", - "mount", - "name_to_handle_at", - "perf_event_open", - "setdomainname", - "sethostname", - "setns", - "umount", - "umount2", - "unshare", - }, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - case "CAP_SYS_BOOT": - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{"reboot"}, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - case "CAP_SYS_CHROOT": - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{"chroot"}, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - case "CAP_SYS_MODULE": - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{ - "delete_module", - "init_module", - "finit_module", - "query_module", - }, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - case "CAP_SYS_PACCT": - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{"acct"}, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - case "CAP_SYS_PTRACE": - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{ - "kcmp", - "process_vm_readv", - "process_vm_writev", - "ptrace", - }, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - case "CAP_SYS_RAWIO": - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{ - "iopl", - "ioperm", - }, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - case "CAP_SYS_TIME": - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{ - "settimeofday", - "stime", - "adjtimex", - }, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - case "CAP_SYS_TTY_CONFIG": - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{"vhangup"}, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - } - } - - if !capSysAdmin { - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{"clone"}, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{ - { - Index: sysCloneFlagsIndex, - Value: CloneNewNS | CloneNewUTS | CloneNewIPC | CloneNewUser | CloneNewPID | CloneNewNet | CloneNewCgroup, - ValueTwo: 0, - Op: rspec.OpMaskedEqual, - }, - }, - }, - }...) - - } - - arch := runtime.GOARCH - switch arch { - case "arm", "arm64": - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{ - "breakpoint", - "cacheflush", - "set_tls", - }, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - case "amd64", "x32": - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{"arch_prctl"}, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - fallthrough - case "x86": - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{"modify_ldt"}, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - case "s390", "s390x": - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{ - "s390_pci_mmio_read", - "s390_pci_mmio_write", - "s390_runtime_instr", - }, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{}, - }, - }...) - /* Flags parameter of the clone syscall is the 2nd on s390 */ - syscalls = append(syscalls, []rspec.LinuxSyscall{ - { - Names: []string{"clone"}, - Action: rspec.ActAllow, - Args: []rspec.LinuxSeccompArg{ - { - Index: 1, - Value: 2080505856, - ValueTwo: 0, - Op: rspec.OpMaskedEqual, - }, - }, - }, - }...) - } - - return &rspec.LinuxSeccomp{ - DefaultAction: rspec.ActErrno, - Architectures: arches(), - Syscalls: syscalls, - } -} diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_linux.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_linux.go deleted file mode 100644 index 5ca9a6daee..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_linux.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build linux -// +build linux - -package seccomp - -import "golang.org/x/sys/unix" - -// System values passed through on linux -const ( - CloneNewIPC = unix.CLONE_NEWIPC - CloneNewNet = unix.CLONE_NEWNET - CloneNewNS = unix.CLONE_NEWNS - CloneNewPID = unix.CLONE_NEWPID - CloneNewUser = unix.CLONE_NEWUSER - CloneNewUTS = unix.CLONE_NEWUTS - CloneNewCgroup = unix.CLONE_NEWCGROUP -) diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_unsupported.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_unsupported.go deleted file mode 100644 index b8c1bc26e2..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !linux -// +build !linux - -package seccomp - -// These are copied from linux/amd64 syscall values, as a reference for other -// platforms to have access to -const ( - CloneNewIPC = 0x8000000 - CloneNewNet = 0x40000000 - CloneNewNS = 0x20000 - CloneNewPID = 0x20000000 - CloneNewUser = 0x10000000 - CloneNewUTS = 0x4000000 - CloneNewCgroup = 0x02000000 -) diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go deleted file mode 100644 index 5e84653a94..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go +++ /dev/null @@ -1,124 +0,0 @@ -package seccomp - -import ( - "fmt" - "reflect" - "strconv" - "strings" - - rspec "github.com/opencontainers/runtime-spec/specs-go" -) - -// Determine if a new syscall rule should be appended, overwrite an existing rule -// or if no action should be taken at all -func decideCourseOfAction(newSyscall *rspec.LinuxSyscall, syscalls []rspec.LinuxSyscall) (string, error) { - ruleForSyscallAlreadyExists := false - - var sliceOfDeterminedActions []string - for i, syscall := range syscalls { - if sameName(&syscall, newSyscall) { - ruleForSyscallAlreadyExists = true - - if identical(newSyscall, &syscall) { - sliceOfDeterminedActions = append(sliceOfDeterminedActions, nothing) - } - - if sameAction(newSyscall, &syscall) { - if bothHaveArgs(newSyscall, &syscall) { - sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend) - } - if onlyOneHasArgs(newSyscall, &syscall) { - if firstParamOnlyHasArgs(newSyscall, &syscall) { - sliceOfDeterminedActions = append(sliceOfDeterminedActions, "overwrite:"+strconv.Itoa(i)) - } else { - sliceOfDeterminedActions = append(sliceOfDeterminedActions, nothing) - } - } - } - - if !sameAction(newSyscall, &syscall) { - if bothHaveArgs(newSyscall, &syscall) { - if sameArgs(newSyscall, &syscall) { - sliceOfDeterminedActions = append(sliceOfDeterminedActions, "overwrite:"+strconv.Itoa(i)) - } - if !sameArgs(newSyscall, &syscall) { - sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend) - } - } - if onlyOneHasArgs(newSyscall, &syscall) { - sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend) - } - if neitherHasArgs(newSyscall, &syscall) { - sliceOfDeterminedActions = append(sliceOfDeterminedActions, "overwrite:"+strconv.Itoa(i)) - } - } - } - } - - if !ruleForSyscallAlreadyExists { - sliceOfDeterminedActions = append(sliceOfDeterminedActions, seccompAppend) - } - - // Nothing has highest priority - for _, determinedAction := range sliceOfDeterminedActions { - if determinedAction == nothing { - return determinedAction, nil - } - } - - // Overwrite has second highest priority - for _, determinedAction := range sliceOfDeterminedActions { - if strings.Contains(determinedAction, seccompOverwrite) { - return determinedAction, nil - } - } - - // Append has the lowest priority - for _, determinedAction := range sliceOfDeterminedActions { - if determinedAction == seccompAppend { - return determinedAction, nil - } - } - - return "", fmt.Errorf("Trouble determining action: %s", sliceOfDeterminedActions) -} - -func hasArguments(config *rspec.LinuxSyscall) bool { - nilSyscall := new(rspec.LinuxSyscall) - return !sameArgs(nilSyscall, config) -} - -func identical(config1, config2 *rspec.LinuxSyscall) bool { - return reflect.DeepEqual(config1, config2) -} - -func sameName(config1, config2 *rspec.LinuxSyscall) bool { - return reflect.DeepEqual(config1.Names, config2.Names) -} - -func sameAction(config1, config2 *rspec.LinuxSyscall) bool { - return config1.Action == config2.Action -} - -func sameArgs(config1, config2 *rspec.LinuxSyscall) bool { - return reflect.DeepEqual(config1.Args, config2.Args) -} - -func bothHaveArgs(config1, config2 *rspec.LinuxSyscall) bool { - return hasArguments(config1) && hasArguments(config2) -} - -func onlyOneHasArgs(config1, config2 *rspec.LinuxSyscall) bool { - conf1 := hasArguments(config1) - conf2 := hasArguments(config2) - - return (conf1 && !conf2) || (!conf1 && conf2) -} - -func neitherHasArgs(config1, config2 *rspec.LinuxSyscall) bool { - return !hasArguments(config1) && !hasArguments(config2) -} - -func firstParamOnlyHasArgs(config1, config2 *rspec.LinuxSyscall) bool { - return !hasArguments(config1) && hasArguments(config2) -} diff --git a/vendor/github.com/opencontainers/runtime-tools/validate/capabilities/validate.go b/vendor/github.com/opencontainers/runtime-tools/validate/capabilities/validate.go deleted file mode 100644 index 7fa47b77cc..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/validate/capabilities/validate.go +++ /dev/null @@ -1,31 +0,0 @@ -package capabilities - -import ( - "fmt" - "strings" - - "github.com/syndtr/gocapability/capability" -) - -// CapValid checks whether a capability is valid -func CapValid(c string, hostSpecific bool) error { - isValid := false - - if !strings.HasPrefix(c, "CAP_") { - return fmt.Errorf("capability %s must start with CAP_", c) - } - for _, cap := range capability.List() { - if c == fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String())) { - if hostSpecific && cap > LastCap() { - return fmt.Errorf("%s is not supported on the current host", c) - } - isValid = true - break - } - } - - if !isValid { - return fmt.Errorf("invalid capability: %s", c) - } - return nil -} diff --git a/vendor/github.com/opencontainers/runtime-tools/validate/capabilities/validate_linux.go b/vendor/github.com/opencontainers/runtime-tools/validate/capabilities/validate_linux.go deleted file mode 100644 index f6cb0d550a..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/validate/capabilities/validate_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package capabilities - -import ( - "github.com/syndtr/gocapability/capability" -) - -// LastCap return last cap of system -func LastCap() capability.Cap { - last := capability.CAP_LAST_CAP - // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap - if last == capability.Cap(63) { - last = capability.CAP_BLOCK_SUSPEND - } - - return last -} diff --git a/vendor/github.com/opencontainers/runtime-tools/validate/capabilities/validate_unsupported.go b/vendor/github.com/opencontainers/runtime-tools/validate/capabilities/validate_unsupported.go deleted file mode 100644 index e4aed632ce..0000000000 --- a/vendor/github.com/opencontainers/runtime-tools/validate/capabilities/validate_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !linux -// +build !linux - -package capabilities - -import ( - "github.com/syndtr/gocapability/capability" -) - -// LastCap return last cap of system -func LastCap() capability.Cap { - return capability.Cap(-1) -} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go index eed0f225fd..30855de572 100644 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly -// +build aix darwin openbsd freebsd netbsd dragonfly +//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly || zos +// +build aix darwin openbsd freebsd netbsd dragonfly zos package afero diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go index 004d57e2ff..12792d21e2 100644 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -10,8 +10,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix -// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix +//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix && !zos +// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix,!zos package afero diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go index e6b7d70b94..d6c744e8d5 100644 --- a/vendor/github.com/spf13/afero/memmap.go +++ b/vendor/github.com/spf13/afero/memmap.go @@ -16,9 +16,12 @@ package afero import ( "fmt" "io" + "log" "os" "path/filepath" + + "sort" "strings" "sync" "time" @@ -88,6 +91,24 @@ func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { return pfile } +func (m *MemMapFs) findDescendants(name string) []*mem.FileData { + fData := m.getData() + descendants := make([]*mem.FileData, 0, len(fData)) + for p, dFile := range fData { + if strings.HasPrefix(p, name+FilePathSeparator) { + descendants = append(descendants, dFile) + } + } + + sort.Slice(descendants, func(i, j int) bool { + cur := len(strings.Split(descendants[i].Name(), FilePathSeparator)) + next := len(strings.Split(descendants[j].Name(), FilePathSeparator)) + return cur < next + }) + + return descendants +} + func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { if f == nil { return @@ -309,29 +330,51 @@ func (m *MemMapFs) Rename(oldname, newname string) error { if _, ok := m.getData()[oldname]; ok { m.mu.RUnlock() m.mu.Lock() - m.unRegisterWithParent(oldname) + err := m.unRegisterWithParent(oldname) + if err != nil { + return err + } + fileData := m.getData()[oldname] - delete(m.getData(), oldname) mem.ChangeFileName(fileData, newname) m.getData()[newname] = fileData + + err = m.renameDescendants(oldname, newname) + if err != nil { + return err + } + + delete(m.getData(), oldname) + m.registerWithParent(fileData, 0) m.mu.Unlock() m.mu.RLock() } else { return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} } + return nil +} - for p, fileData := range m.getData() { - if strings.HasPrefix(p, oldname+FilePathSeparator) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - p := strings.Replace(p, oldname, newname, 1) - m.getData()[p] = fileData - m.mu.Unlock() - m.mu.RLock() +func (m *MemMapFs) renameDescendants(oldname, newname string) error { + descendants := m.findDescendants(oldname) + removes := make([]string, 0, len(descendants)) + for _, desc := range descendants { + descNewName := strings.Replace(desc.Name(), oldname, newname, 1) + err := m.unRegisterWithParent(desc.Name()) + if err != nil { + return err } + + removes = append(removes, desc.Name()) + mem.ChangeFileName(desc, descNewName) + m.getData()[descNewName] = desc + + m.registerWithParent(desc, 0) + } + for _, r := range removes { + delete(m.getData(), r) } + return nil } diff --git a/vendor/github.com/syndtr/gocapability/LICENSE b/vendor/github.com/syndtr/gocapability/LICENSE deleted file mode 100644 index 80dd96de77..0000000000 --- a/vendor/github.com/syndtr/gocapability/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright 2013 Suryandaru Triandana -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/syndtr/gocapability/capability/capability.go b/vendor/github.com/syndtr/gocapability/capability/capability.go deleted file mode 100644 index 61a90775e5..0000000000 --- a/vendor/github.com/syndtr/gocapability/capability/capability.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package capability provides utilities for manipulating POSIX capabilities. -package capability - -type Capabilities interface { - // Get check whether a capability present in the given - // capabilities set. The 'which' value should be one of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Get(which CapType, what Cap) bool - - // Empty check whether all capability bits of the given capabilities - // set are zero. The 'which' value should be one of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Empty(which CapType) bool - - // Full check whether all capability bits of the given capabilities - // set are one. The 'which' value should be one of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Full(which CapType) bool - - // Set sets capabilities of the given capabilities sets. The - // 'which' value should be one or combination (OR'ed) of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Set(which CapType, caps ...Cap) - - // Unset unsets capabilities of the given capabilities sets. The - // 'which' value should be one or combination (OR'ed) of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Unset(which CapType, caps ...Cap) - - // Fill sets all bits of the given capabilities kind to one. The - // 'kind' value should be one or combination (OR'ed) of CAPS, - // BOUNDS or AMBS. - Fill(kind CapType) - - // Clear sets all bits of the given capabilities kind to zero. The - // 'kind' value should be one or combination (OR'ed) of CAPS, - // BOUNDS or AMBS. - Clear(kind CapType) - - // String return current capabilities state of the given capabilities - // set as string. The 'which' value should be one of EFFECTIVE, - // PERMITTED, INHERITABLE BOUNDING or AMBIENT - StringCap(which CapType) string - - // String return current capabilities state as string. - String() string - - // Load load actual capabilities value. This will overwrite all - // outstanding changes. - Load() error - - // Apply apply the capabilities settings, so all changes will take - // effect. - Apply(kind CapType) error -} - -// NewPid initializes a new Capabilities object for given pid when -// it is nonzero, or for the current process if pid is 0. -// -// Deprecated: Replace with NewPid2. For example, replace: -// -// c, err := NewPid(0) -// if err != nil { -// return err -// } -// -// with: -// -// c, err := NewPid2(0) -// if err != nil { -// return err -// } -// err = c.Load() -// if err != nil { -// return err -// } -func NewPid(pid int) (Capabilities, error) { - c, err := newPid(pid) - if err != nil { - return c, err - } - err = c.Load() - return c, err -} - -// NewPid2 initializes a new Capabilities object for given pid when -// it is nonzero, or for the current process if pid is 0. This -// does not load the process's current capabilities; to do that you -// must call Load explicitly. -func NewPid2(pid int) (Capabilities, error) { - return newPid(pid) -} - -// NewFile initializes a new Capabilities object for given file path. -// -// Deprecated: Replace with NewFile2. For example, replace: -// -// c, err := NewFile(path) -// if err != nil { -// return err -// } -// -// with: -// -// c, err := NewFile2(path) -// if err != nil { -// return err -// } -// err = c.Load() -// if err != nil { -// return err -// } -func NewFile(path string) (Capabilities, error) { - c, err := newFile(path) - if err != nil { - return c, err - } - err = c.Load() - return c, err -} - -// NewFile2 creates a new initialized Capabilities object for given -// file path. This does not load the process's current capabilities; -// to do that you must call Load explicitly. -func NewFile2(path string) (Capabilities, error) { - return newFile(path) -} diff --git a/vendor/github.com/syndtr/gocapability/capability/capability_linux.go b/vendor/github.com/syndtr/gocapability/capability/capability_linux.go deleted file mode 100644 index 1567dc8104..0000000000 --- a/vendor/github.com/syndtr/gocapability/capability/capability_linux.go +++ /dev/null @@ -1,642 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package capability - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "strings" - "syscall" -) - -var errUnknownVers = errors.New("unknown capability version") - -const ( - linuxCapVer1 = 0x19980330 - linuxCapVer2 = 0x20071026 - linuxCapVer3 = 0x20080522 -) - -var ( - capVers uint32 - capLastCap Cap -) - -func init() { - var hdr capHeader - capget(&hdr, nil) - capVers = hdr.version - - if initLastCap() == nil { - CAP_LAST_CAP = capLastCap - if capLastCap > 31 { - capUpperMask = (uint32(1) << (uint(capLastCap) - 31)) - 1 - } else { - capUpperMask = 0 - } - } -} - -func initLastCap() error { - if capLastCap != 0 { - return nil - } - - f, err := os.Open("/proc/sys/kernel/cap_last_cap") - if err != nil { - return err - } - defer f.Close() - - var b []byte = make([]byte, 11) - _, err = f.Read(b) - if err != nil { - return err - } - - fmt.Sscanf(string(b), "%d", &capLastCap) - - return nil -} - -func mkStringCap(c Capabilities, which CapType) (ret string) { - for i, first := Cap(0), true; i <= CAP_LAST_CAP; i++ { - if !c.Get(which, i) { - continue - } - if first { - first = false - } else { - ret += ", " - } - ret += i.String() - } - return -} - -func mkString(c Capabilities, max CapType) (ret string) { - ret = "{" - for i := CapType(1); i <= max; i <<= 1 { - ret += " " + i.String() + "=\"" - if c.Empty(i) { - ret += "empty" - } else if c.Full(i) { - ret += "full" - } else { - ret += c.StringCap(i) - } - ret += "\"" - } - ret += " }" - return -} - -func newPid(pid int) (c Capabilities, err error) { - switch capVers { - case linuxCapVer1: - p := new(capsV1) - p.hdr.version = capVers - p.hdr.pid = int32(pid) - c = p - case linuxCapVer2, linuxCapVer3: - p := new(capsV3) - p.hdr.version = capVers - p.hdr.pid = int32(pid) - c = p - default: - err = errUnknownVers - return - } - return -} - -type capsV1 struct { - hdr capHeader - data capData -} - -func (c *capsV1) Get(which CapType, what Cap) bool { - if what > 32 { - return false - } - - switch which { - case EFFECTIVE: - return (1< 32 { - continue - } - - if which&EFFECTIVE != 0 { - c.data.effective |= 1 << uint(what) - } - if which&PERMITTED != 0 { - c.data.permitted |= 1 << uint(what) - } - if which&INHERITABLE != 0 { - c.data.inheritable |= 1 << uint(what) - } - } -} - -func (c *capsV1) Unset(which CapType, caps ...Cap) { - for _, what := range caps { - if what > 32 { - continue - } - - if which&EFFECTIVE != 0 { - c.data.effective &= ^(1 << uint(what)) - } - if which&PERMITTED != 0 { - c.data.permitted &= ^(1 << uint(what)) - } - if which&INHERITABLE != 0 { - c.data.inheritable &= ^(1 << uint(what)) - } - } -} - -func (c *capsV1) Fill(kind CapType) { - if kind&CAPS == CAPS { - c.data.effective = 0x7fffffff - c.data.permitted = 0x7fffffff - c.data.inheritable = 0 - } -} - -func (c *capsV1) Clear(kind CapType) { - if kind&CAPS == CAPS { - c.data.effective = 0 - c.data.permitted = 0 - c.data.inheritable = 0 - } -} - -func (c *capsV1) StringCap(which CapType) (ret string) { - return mkStringCap(c, which) -} - -func (c *capsV1) String() (ret string) { - return mkString(c, BOUNDING) -} - -func (c *capsV1) Load() (err error) { - return capget(&c.hdr, &c.data) -} - -func (c *capsV1) Apply(kind CapType) error { - if kind&CAPS == CAPS { - return capset(&c.hdr, &c.data) - } - return nil -} - -type capsV3 struct { - hdr capHeader - data [2]capData - bounds [2]uint32 - ambient [2]uint32 -} - -func (c *capsV3) Get(which CapType, what Cap) bool { - var i uint - if what > 31 { - i = uint(what) >> 5 - what %= 32 - } - - switch which { - case EFFECTIVE: - return (1< 31 { - i = uint(what) >> 5 - what %= 32 - } - - if which&EFFECTIVE != 0 { - c.data[i].effective |= 1 << uint(what) - } - if which&PERMITTED != 0 { - c.data[i].permitted |= 1 << uint(what) - } - if which&INHERITABLE != 0 { - c.data[i].inheritable |= 1 << uint(what) - } - if which&BOUNDING != 0 { - c.bounds[i] |= 1 << uint(what) - } - if which&AMBIENT != 0 { - c.ambient[i] |= 1 << uint(what) - } - } -} - -func (c *capsV3) Unset(which CapType, caps ...Cap) { - for _, what := range caps { - var i uint - if what > 31 { - i = uint(what) >> 5 - what %= 32 - } - - if which&EFFECTIVE != 0 { - c.data[i].effective &= ^(1 << uint(what)) - } - if which&PERMITTED != 0 { - c.data[i].permitted &= ^(1 << uint(what)) - } - if which&INHERITABLE != 0 { - c.data[i].inheritable &= ^(1 << uint(what)) - } - if which&BOUNDING != 0 { - c.bounds[i] &= ^(1 << uint(what)) - } - if which&AMBIENT != 0 { - c.ambient[i] &= ^(1 << uint(what)) - } - } -} - -func (c *capsV3) Fill(kind CapType) { - if kind&CAPS == CAPS { - c.data[0].effective = 0xffffffff - c.data[0].permitted = 0xffffffff - c.data[0].inheritable = 0 - c.data[1].effective = 0xffffffff - c.data[1].permitted = 0xffffffff - c.data[1].inheritable = 0 - } - - if kind&BOUNDS == BOUNDS { - c.bounds[0] = 0xffffffff - c.bounds[1] = 0xffffffff - } - if kind&AMBS == AMBS { - c.ambient[0] = 0xffffffff - c.ambient[1] = 0xffffffff - } -} - -func (c *capsV3) Clear(kind CapType) { - if kind&CAPS == CAPS { - c.data[0].effective = 0 - c.data[0].permitted = 0 - c.data[0].inheritable = 0 - c.data[1].effective = 0 - c.data[1].permitted = 0 - c.data[1].inheritable = 0 - } - - if kind&BOUNDS == BOUNDS { - c.bounds[0] = 0 - c.bounds[1] = 0 - } - if kind&AMBS == AMBS { - c.ambient[0] = 0 - c.ambient[1] = 0 - } -} - -func (c *capsV3) StringCap(which CapType) (ret string) { - return mkStringCap(c, which) -} - -func (c *capsV3) String() (ret string) { - return mkString(c, BOUNDING) -} - -func (c *capsV3) Load() (err error) { - err = capget(&c.hdr, &c.data[0]) - if err != nil { - return - } - - var status_path string - - if c.hdr.pid == 0 { - status_path = fmt.Sprintf("/proc/self/status") - } else { - status_path = fmt.Sprintf("/proc/%d/status", c.hdr.pid) - } - - f, err := os.Open(status_path) - if err != nil { - return - } - b := bufio.NewReader(f) - for { - line, e := b.ReadString('\n') - if e != nil { - if e != io.EOF { - err = e - } - break - } - if strings.HasPrefix(line, "CapB") { - fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0]) - continue - } - if strings.HasPrefix(line, "CapA") { - fmt.Sscanf(line[4:], "mb: %08x%08x", &c.ambient[1], &c.ambient[0]) - continue - } - } - f.Close() - - return -} - -func (c *capsV3) Apply(kind CapType) (err error) { - if kind&BOUNDS == BOUNDS { - var data [2]capData - err = capget(&c.hdr, &data[0]) - if err != nil { - return - } - if (1< 31 { - if c.data.version == 1 { - return false - } - i = uint(what) >> 5 - what %= 32 - } - - switch which { - case EFFECTIVE: - return (1< 31 { - if c.data.version == 1 { - continue - } - i = uint(what) >> 5 - what %= 32 - } - - if which&EFFECTIVE != 0 { - c.data.effective[i] |= 1 << uint(what) - } - if which&PERMITTED != 0 { - c.data.data[i].permitted |= 1 << uint(what) - } - if which&INHERITABLE != 0 { - c.data.data[i].inheritable |= 1 << uint(what) - } - } -} - -func (c *capsFile) Unset(which CapType, caps ...Cap) { - for _, what := range caps { - var i uint - if what > 31 { - if c.data.version == 1 { - continue - } - i = uint(what) >> 5 - what %= 32 - } - - if which&EFFECTIVE != 0 { - c.data.effective[i] &= ^(1 << uint(what)) - } - if which&PERMITTED != 0 { - c.data.data[i].permitted &= ^(1 << uint(what)) - } - if which&INHERITABLE != 0 { - c.data.data[i].inheritable &= ^(1 << uint(what)) - } - } -} - -func (c *capsFile) Fill(kind CapType) { - if kind&CAPS == CAPS { - c.data.effective[0] = 0xffffffff - c.data.data[0].permitted = 0xffffffff - c.data.data[0].inheritable = 0 - if c.data.version == 2 { - c.data.effective[1] = 0xffffffff - c.data.data[1].permitted = 0xffffffff - c.data.data[1].inheritable = 0 - } - } -} - -func (c *capsFile) Clear(kind CapType) { - if kind&CAPS == CAPS { - c.data.effective[0] = 0 - c.data.data[0].permitted = 0 - c.data.data[0].inheritable = 0 - if c.data.version == 2 { - c.data.effective[1] = 0 - c.data.data[1].permitted = 0 - c.data.data[1].inheritable = 0 - } - } -} - -func (c *capsFile) StringCap(which CapType) (ret string) { - return mkStringCap(c, which) -} - -func (c *capsFile) String() (ret string) { - return mkString(c, INHERITABLE) -} - -func (c *capsFile) Load() (err error) { - return getVfsCap(c.path, &c.data) -} - -func (c *capsFile) Apply(kind CapType) (err error) { - if kind&CAPS == CAPS { - return setVfsCap(c.path, &c.data) - } - return -} diff --git a/vendor/github.com/syndtr/gocapability/capability/capability_noop.go b/vendor/github.com/syndtr/gocapability/capability/capability_noop.go deleted file mode 100644 index 9bb3070c5e..0000000000 --- a/vendor/github.com/syndtr/gocapability/capability/capability_noop.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !linux - -package capability - -import "errors" - -func newPid(pid int) (Capabilities, error) { - return nil, errors.New("not supported") -} - -func newFile(path string) (Capabilities, error) { - return nil, errors.New("not supported") -} diff --git a/vendor/github.com/syndtr/gocapability/capability/enum.go b/vendor/github.com/syndtr/gocapability/capability/enum.go deleted file mode 100644 index ad10785314..0000000000 --- a/vendor/github.com/syndtr/gocapability/capability/enum.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package capability - -type CapType uint - -func (c CapType) String() string { - switch c { - case EFFECTIVE: - return "effective" - case PERMITTED: - return "permitted" - case INHERITABLE: - return "inheritable" - case BOUNDING: - return "bounding" - case CAPS: - return "caps" - case AMBIENT: - return "ambient" - } - return "unknown" -} - -const ( - EFFECTIVE CapType = 1 << iota - PERMITTED - INHERITABLE - BOUNDING - AMBIENT - - CAPS = EFFECTIVE | PERMITTED | INHERITABLE - BOUNDS = BOUNDING - AMBS = AMBIENT -) - -//go:generate go run enumgen/gen.go -type Cap int - -// POSIX-draft defined capabilities and Linux extensions. -// -// Defined in https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h -const ( - // In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this - // overrides the restriction of changing file ownership and group - // ownership. - CAP_CHOWN = Cap(0) - - // Override all DAC access, including ACL execute access if - // [_POSIX_ACL] is defined. Excluding DAC access covered by - // CAP_LINUX_IMMUTABLE. - CAP_DAC_OVERRIDE = Cap(1) - - // Overrides all DAC restrictions regarding read and search on files - // and directories, including ACL restrictions if [_POSIX_ACL] is - // defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE. - CAP_DAC_READ_SEARCH = Cap(2) - - // Overrides all restrictions about allowed operations on files, where - // file owner ID must be equal to the user ID, except where CAP_FSETID - // is applicable. It doesn't override MAC and DAC restrictions. - CAP_FOWNER = Cap(3) - - // Overrides the following restrictions that the effective user ID - // shall match the file owner ID when setting the S_ISUID and S_ISGID - // bits on that file; that the effective group ID (or one of the - // supplementary group IDs) shall match the file owner ID when setting - // the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are - // cleared on successful return from chown(2) (not implemented). - CAP_FSETID = Cap(4) - - // Overrides the restriction that the real or effective user ID of a - // process sending a signal must match the real or effective user ID - // of the process receiving the signal. - CAP_KILL = Cap(5) - - // Allows setgid(2) manipulation - // Allows setgroups(2) - // Allows forged gids on socket credentials passing. - CAP_SETGID = Cap(6) - - // Allows set*uid(2) manipulation (including fsuid). - // Allows forged pids on socket credentials passing. - CAP_SETUID = Cap(7) - - // Linux-specific capabilities - - // Without VFS support for capabilities: - // Transfer any capability in your permitted set to any pid, - // remove any capability in your permitted set from any pid - // With VFS support for capabilities (neither of above, but) - // Add any capability from current's capability bounding set - // to the current process' inheritable set - // Allow taking bits out of capability bounding set - // Allow modification of the securebits for a process - CAP_SETPCAP = Cap(8) - - // Allow modification of S_IMMUTABLE and S_APPEND file attributes - CAP_LINUX_IMMUTABLE = Cap(9) - - // Allows binding to TCP/UDP sockets below 1024 - // Allows binding to ATM VCIs below 32 - CAP_NET_BIND_SERVICE = Cap(10) - - // Allow broadcasting, listen to multicast - CAP_NET_BROADCAST = Cap(11) - - // Allow interface configuration - // Allow administration of IP firewall, masquerading and accounting - // Allow setting debug option on sockets - // Allow modification of routing tables - // Allow setting arbitrary process / process group ownership on - // sockets - // Allow binding to any address for transparent proxying (also via NET_RAW) - // Allow setting TOS (type of service) - // Allow setting promiscuous mode - // Allow clearing driver statistics - // Allow multicasting - // Allow read/write of device-specific registers - // Allow activation of ATM control sockets - CAP_NET_ADMIN = Cap(12) - - // Allow use of RAW sockets - // Allow use of PACKET sockets - // Allow binding to any address for transparent proxying (also via NET_ADMIN) - CAP_NET_RAW = Cap(13) - - // Allow locking of shared memory segments - // Allow mlock and mlockall (which doesn't really have anything to do - // with IPC) - CAP_IPC_LOCK = Cap(14) - - // Override IPC ownership checks - CAP_IPC_OWNER = Cap(15) - - // Insert and remove kernel modules - modify kernel without limit - CAP_SYS_MODULE = Cap(16) - - // Allow ioperm/iopl access - // Allow sending USB messages to any device via /proc/bus/usb - CAP_SYS_RAWIO = Cap(17) - - // Allow use of chroot() - CAP_SYS_CHROOT = Cap(18) - - // Allow ptrace() of any process - CAP_SYS_PTRACE = Cap(19) - - // Allow configuration of process accounting - CAP_SYS_PACCT = Cap(20) - - // Allow configuration of the secure attention key - // Allow administration of the random device - // Allow examination and configuration of disk quotas - // Allow setting the domainname - // Allow setting the hostname - // Allow calling bdflush() - // Allow mount() and umount(), setting up new smb connection - // Allow some autofs root ioctls - // Allow nfsservctl - // Allow VM86_REQUEST_IRQ - // Allow to read/write pci config on alpha - // Allow irix_prctl on mips (setstacksize) - // Allow flushing all cache on m68k (sys_cacheflush) - // Allow removing semaphores - // Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores - // and shared memory - // Allow locking/unlocking of shared memory segment - // Allow turning swap on/off - // Allow forged pids on socket credentials passing - // Allow setting readahead and flushing buffers on block devices - // Allow setting geometry in floppy driver - // Allow turning DMA on/off in xd driver - // Allow administration of md devices (mostly the above, but some - // extra ioctls) - // Allow tuning the ide driver - // Allow access to the nvram device - // Allow administration of apm_bios, serial and bttv (TV) device - // Allow manufacturer commands in isdn CAPI support driver - // Allow reading non-standardized portions of pci configuration space - // Allow DDI debug ioctl on sbpcd driver - // Allow setting up serial ports - // Allow sending raw qic-117 commands - // Allow enabling/disabling tagged queuing on SCSI controllers and sending - // arbitrary SCSI commands - // Allow setting encryption key on loopback filesystem - // Allow setting zone reclaim policy - // Allow everything under CAP_BPF and CAP_PERFMON for backward compatibility - CAP_SYS_ADMIN = Cap(21) - - // Allow use of reboot() - CAP_SYS_BOOT = Cap(22) - - // Allow raising priority and setting priority on other (different - // UID) processes - // Allow use of FIFO and round-robin (realtime) scheduling on own - // processes and setting the scheduling algorithm used by another - // process. - // Allow setting cpu affinity on other processes - CAP_SYS_NICE = Cap(23) - - // Override resource limits. Set resource limits. - // Override quota limits. - // Override reserved space on ext2 filesystem - // Modify data journaling mode on ext3 filesystem (uses journaling - // resources) - // NOTE: ext2 honors fsuid when checking for resource overrides, so - // you can override using fsuid too - // Override size restrictions on IPC message queues - // Allow more than 64hz interrupts from the real-time clock - // Override max number of consoles on console allocation - // Override max number of keymaps - // Control memory reclaim behavior - CAP_SYS_RESOURCE = Cap(24) - - // Allow manipulation of system clock - // Allow irix_stime on mips - // Allow setting the real-time clock - CAP_SYS_TIME = Cap(25) - - // Allow configuration of tty devices - // Allow vhangup() of tty - CAP_SYS_TTY_CONFIG = Cap(26) - - // Allow the privileged aspects of mknod() - CAP_MKNOD = Cap(27) - - // Allow taking of leases on files - CAP_LEASE = Cap(28) - - CAP_AUDIT_WRITE = Cap(29) - CAP_AUDIT_CONTROL = Cap(30) - CAP_SETFCAP = Cap(31) - - // Override MAC access. - // The base kernel enforces no MAC policy. - // An LSM may enforce a MAC policy, and if it does and it chooses - // to implement capability based overrides of that policy, this is - // the capability it should use to do so. - CAP_MAC_OVERRIDE = Cap(32) - - // Allow MAC configuration or state changes. - // The base kernel requires no MAC configuration. - // An LSM may enforce a MAC policy, and if it does and it chooses - // to implement capability based checks on modifications to that - // policy or the data required to maintain it, this is the - // capability it should use to do so. - CAP_MAC_ADMIN = Cap(33) - - // Allow configuring the kernel's syslog (printk behaviour) - CAP_SYSLOG = Cap(34) - - // Allow triggering something that will wake the system - CAP_WAKE_ALARM = Cap(35) - - // Allow preventing system suspends - CAP_BLOCK_SUSPEND = Cap(36) - - // Allow reading the audit log via multicast netlink socket - CAP_AUDIT_READ = Cap(37) - - // Allow system performance and observability privileged operations - // using perf_events, i915_perf and other kernel subsystems - CAP_PERFMON = Cap(38) - - // CAP_BPF allows the following BPF operations: - // - Creating all types of BPF maps - // - Advanced verifier features - // - Indirect variable access - // - Bounded loops - // - BPF to BPF function calls - // - Scalar precision tracking - // - Larger complexity limits - // - Dead code elimination - // - And potentially other features - // - Loading BPF Type Format (BTF) data - // - Retrieve xlated and JITed code of BPF programs - // - Use bpf_spin_lock() helper - // - // CAP_PERFMON relaxes the verifier checks further: - // - BPF progs can use of pointer-to-integer conversions - // - speculation attack hardening measures are bypassed - // - bpf_probe_read to read arbitrary kernel memory is allowed - // - bpf_trace_printk to print kernel memory is allowed - // - // CAP_SYS_ADMIN is required to use bpf_probe_write_user. - // - // CAP_SYS_ADMIN is required to iterate system wide loaded - // programs, maps, links, BTFs and convert their IDs to file descriptors. - // - // CAP_PERFMON and CAP_BPF are required to load tracing programs. - // CAP_NET_ADMIN and CAP_BPF are required to load networking programs. - CAP_BPF = Cap(39) - - // Allow checkpoint/restore related operations. - // Introduced in kernel 5.9 - CAP_CHECKPOINT_RESTORE = Cap(40) -) - -var ( - // Highest valid capability of the running kernel. - CAP_LAST_CAP = Cap(63) - - capUpperMask = ^uint32(0) -) diff --git a/vendor/github.com/syndtr/gocapability/capability/enum_gen.go b/vendor/github.com/syndtr/gocapability/capability/enum_gen.go deleted file mode 100644 index 2ff9bf4d88..0000000000 --- a/vendor/github.com/syndtr/gocapability/capability/enum_gen.go +++ /dev/null @@ -1,138 +0,0 @@ -// generated file; DO NOT EDIT - use go generate in directory with source - -package capability - -func (c Cap) String() string { - switch c { - case CAP_CHOWN: - return "chown" - case CAP_DAC_OVERRIDE: - return "dac_override" - case CAP_DAC_READ_SEARCH: - return "dac_read_search" - case CAP_FOWNER: - return "fowner" - case CAP_FSETID: - return "fsetid" - case CAP_KILL: - return "kill" - case CAP_SETGID: - return "setgid" - case CAP_SETUID: - return "setuid" - case CAP_SETPCAP: - return "setpcap" - case CAP_LINUX_IMMUTABLE: - return "linux_immutable" - case CAP_NET_BIND_SERVICE: - return "net_bind_service" - case CAP_NET_BROADCAST: - return "net_broadcast" - case CAP_NET_ADMIN: - return "net_admin" - case CAP_NET_RAW: - return "net_raw" - case CAP_IPC_LOCK: - return "ipc_lock" - case CAP_IPC_OWNER: - return "ipc_owner" - case CAP_SYS_MODULE: - return "sys_module" - case CAP_SYS_RAWIO: - return "sys_rawio" - case CAP_SYS_CHROOT: - return "sys_chroot" - case CAP_SYS_PTRACE: - return "sys_ptrace" - case CAP_SYS_PACCT: - return "sys_pacct" - case CAP_SYS_ADMIN: - return "sys_admin" - case CAP_SYS_BOOT: - return "sys_boot" - case CAP_SYS_NICE: - return "sys_nice" - case CAP_SYS_RESOURCE: - return "sys_resource" - case CAP_SYS_TIME: - return "sys_time" - case CAP_SYS_TTY_CONFIG: - return "sys_tty_config" - case CAP_MKNOD: - return "mknod" - case CAP_LEASE: - return "lease" - case CAP_AUDIT_WRITE: - return "audit_write" - case CAP_AUDIT_CONTROL: - return "audit_control" - case CAP_SETFCAP: - return "setfcap" - case CAP_MAC_OVERRIDE: - return "mac_override" - case CAP_MAC_ADMIN: - return "mac_admin" - case CAP_SYSLOG: - return "syslog" - case CAP_WAKE_ALARM: - return "wake_alarm" - case CAP_BLOCK_SUSPEND: - return "block_suspend" - case CAP_AUDIT_READ: - return "audit_read" - case CAP_PERFMON: - return "perfmon" - case CAP_BPF: - return "bpf" - case CAP_CHECKPOINT_RESTORE: - return "checkpoint_restore" - } - return "unknown" -} - -// List returns list of all supported capabilities -func List() []Cap { - return []Cap{ - CAP_CHOWN, - CAP_DAC_OVERRIDE, - CAP_DAC_READ_SEARCH, - CAP_FOWNER, - CAP_FSETID, - CAP_KILL, - CAP_SETGID, - CAP_SETUID, - CAP_SETPCAP, - CAP_LINUX_IMMUTABLE, - CAP_NET_BIND_SERVICE, - CAP_NET_BROADCAST, - CAP_NET_ADMIN, - CAP_NET_RAW, - CAP_IPC_LOCK, - CAP_IPC_OWNER, - CAP_SYS_MODULE, - CAP_SYS_RAWIO, - CAP_SYS_CHROOT, - CAP_SYS_PTRACE, - CAP_SYS_PACCT, - CAP_SYS_ADMIN, - CAP_SYS_BOOT, - CAP_SYS_NICE, - CAP_SYS_RESOURCE, - CAP_SYS_TIME, - CAP_SYS_TTY_CONFIG, - CAP_MKNOD, - CAP_LEASE, - CAP_AUDIT_WRITE, - CAP_AUDIT_CONTROL, - CAP_SETFCAP, - CAP_MAC_OVERRIDE, - CAP_MAC_ADMIN, - CAP_SYSLOG, - CAP_WAKE_ALARM, - CAP_BLOCK_SUSPEND, - CAP_AUDIT_READ, - CAP_PERFMON, - CAP_BPF, - CAP_CHECKPOINT_RESTORE, - } -} diff --git a/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go b/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go deleted file mode 100644 index 3d2bf6927f..0000000000 --- a/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package capability - -import ( - "syscall" - "unsafe" -) - -type capHeader struct { - version uint32 - pid int32 -} - -type capData struct { - effective uint32 - permitted uint32 - inheritable uint32 -} - -func capget(hdr *capHeader, data *capData) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = e1 - } - return -} - -func capset(hdr *capHeader, data *capData) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = e1 - } - return -} - -// not yet in syscall -const ( - pr_CAP_AMBIENT = 47 - pr_CAP_AMBIENT_IS_SET = uintptr(1) - pr_CAP_AMBIENT_RAISE = uintptr(2) - pr_CAP_AMBIENT_LOWER = uintptr(3) - pr_CAP_AMBIENT_CLEAR_ALL = uintptr(4) -) - -func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { - _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0) - if e1 != 0 { - err = e1 - } - return -} - -const ( - vfsXattrName = "security.capability" - - vfsCapVerMask = 0xff000000 - vfsCapVer1 = 0x01000000 - vfsCapVer2 = 0x02000000 - - vfsCapFlagMask = ^vfsCapVerMask - vfsCapFlageffective = 0x000001 - - vfscapDataSizeV1 = 4 * (1 + 2*1) - vfscapDataSizeV2 = 4 * (1 + 2*2) -) - -type vfscapData struct { - magic uint32 - data [2]struct { - permitted uint32 - inheritable uint32 - } - effective [2]uint32 - version int8 -} - -var ( - _vfsXattrName *byte -) - -func init() { - _vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName) -} - -func getVfsCap(path string, dest *vfscapData) (err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0) - if e1 != 0 { - if e1 == syscall.ENODATA { - dest.version = 2 - return - } - err = e1 - } - switch dest.magic & vfsCapVerMask { - case vfsCapVer1: - dest.version = 1 - if r0 != vfscapDataSizeV1 { - return syscall.EINVAL - } - dest.data[1].permitted = 0 - dest.data[1].inheritable = 0 - case vfsCapVer2: - dest.version = 2 - if r0 != vfscapDataSizeV2 { - return syscall.EINVAL - } - default: - return syscall.EINVAL - } - if dest.magic&vfsCapFlageffective != 0 { - dest.effective[0] = dest.data[0].permitted | dest.data[0].inheritable - dest.effective[1] = dest.data[1].permitted | dest.data[1].inheritable - } else { - dest.effective[0] = 0 - dest.effective[1] = 0 - } - return -} - -func setVfsCap(path string, data *vfscapData) (err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(path) - if err != nil { - return - } - var size uintptr - if data.version == 1 { - data.magic = vfsCapVer1 - size = vfscapDataSizeV1 - } else if data.version == 2 { - data.magic = vfsCapVer2 - if data.effective[0] != 0 || data.effective[1] != 0 { - data.magic |= vfsCapFlageffective - } - size = vfscapDataSizeV2 - } else { - return syscall.EINVAL - } - _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go deleted file mode 100644 index dc5225b6d4..0000000000 --- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package timeseries implements a time series structure for stats collection. -package timeseries // import "golang.org/x/net/internal/timeseries" - -import ( - "fmt" - "log" - "time" -) - -const ( - timeSeriesNumBuckets = 64 - minuteHourSeriesNumBuckets = 60 -) - -var timeSeriesResolutions = []time.Duration{ - 1 * time.Second, - 10 * time.Second, - 1 * time.Minute, - 10 * time.Minute, - 1 * time.Hour, - 6 * time.Hour, - 24 * time.Hour, // 1 day - 7 * 24 * time.Hour, // 1 week - 4 * 7 * 24 * time.Hour, // 4 weeks - 16 * 7 * 24 * time.Hour, // 16 weeks -} - -var minuteHourSeriesResolutions = []time.Duration{ - 1 * time.Second, - 1 * time.Minute, -} - -// An Observable is a kind of data that can be aggregated in a time series. -type Observable interface { - Multiply(ratio float64) // Multiplies the data in self by a given ratio - Add(other Observable) // Adds the data from a different observation to self - Clear() // Clears the observation so it can be reused. - CopyFrom(other Observable) // Copies the contents of a given observation to self -} - -// Float attaches the methods of Observable to a float64. -type Float float64 - -// NewFloat returns a Float. -func NewFloat() Observable { - f := Float(0) - return &f -} - -// String returns the float as a string. -func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } - -// Value returns the float's value. -func (f *Float) Value() float64 { return float64(*f) } - -func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } - -func (f *Float) Add(other Observable) { - o := other.(*Float) - *f += *o -} - -func (f *Float) Clear() { *f = 0 } - -func (f *Float) CopyFrom(other Observable) { - o := other.(*Float) - *f = *o -} - -// A Clock tells the current time. -type Clock interface { - Time() time.Time -} - -type defaultClock int - -var defaultClockInstance defaultClock - -func (defaultClock) Time() time.Time { return time.Now() } - -// Information kept per level. Each level consists of a circular list of -// observations. The start of the level may be derived from end and the -// len(buckets) * sizeInMillis. -type tsLevel struct { - oldest int // index to oldest bucketed Observable - newest int // index to newest bucketed Observable - end time.Time // end timestamp for this level - size time.Duration // duration of the bucketed Observable - buckets []Observable // collections of observations - provider func() Observable // used for creating new Observable -} - -func (l *tsLevel) Clear() { - l.oldest = 0 - l.newest = len(l.buckets) - 1 - l.end = time.Time{} - for i := range l.buckets { - if l.buckets[i] != nil { - l.buckets[i].Clear() - l.buckets[i] = nil - } - } -} - -func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { - l.size = size - l.provider = f - l.buckets = make([]Observable, numBuckets) -} - -// Keeps a sequence of levels. Each level is responsible for storing data at -// a given resolution. For example, the first level stores data at a one -// minute resolution while the second level stores data at a one hour -// resolution. - -// Each level is represented by a sequence of buckets. Each bucket spans an -// interval equal to the resolution of the level. New observations are added -// to the last bucket. -type timeSeries struct { - provider func() Observable // make more Observable - numBuckets int // number of buckets in each level - levels []*tsLevel // levels of bucketed Observable - lastAdd time.Time // time of last Observable tracked - total Observable // convenient aggregation of all Observable - clock Clock // Clock for getting current time - pending Observable // observations not yet bucketed - pendingTime time.Time // what time are we keeping in pending - dirty bool // if there are pending observations -} - -// init initializes a level according to the supplied criteria. -func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { - ts.provider = f - ts.numBuckets = numBuckets - ts.clock = clock - ts.levels = make([]*tsLevel, len(resolutions)) - - for i := range resolutions { - if i > 0 && resolutions[i-1] >= resolutions[i] { - log.Print("timeseries: resolutions must be monotonically increasing") - break - } - newLevel := new(tsLevel) - newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) - ts.levels[i] = newLevel - } - - ts.Clear() -} - -// Clear removes all observations from the time series. -func (ts *timeSeries) Clear() { - ts.lastAdd = time.Time{} - ts.total = ts.resetObservation(ts.total) - ts.pending = ts.resetObservation(ts.pending) - ts.pendingTime = time.Time{} - ts.dirty = false - - for i := range ts.levels { - ts.levels[i].Clear() - } -} - -// Add records an observation at the current time. -func (ts *timeSeries) Add(observation Observable) { - ts.AddWithTime(observation, ts.clock.Time()) -} - -// AddWithTime records an observation at the specified time. -func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { - - smallBucketDuration := ts.levels[0].size - - if t.After(ts.lastAdd) { - ts.lastAdd = t - } - - if t.After(ts.pendingTime) { - ts.advance(t) - ts.mergePendingUpdates() - ts.pendingTime = ts.levels[0].end - ts.pending.CopyFrom(observation) - ts.dirty = true - } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { - // The observation is close enough to go into the pending bucket. - // This compensates for clock skewing and small scheduling delays - // by letting the update stay in the fast path. - ts.pending.Add(observation) - ts.dirty = true - } else { - ts.mergeValue(observation, t) - } -} - -// mergeValue inserts the observation at the specified time in the past into all levels. -func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { - for _, level := range ts.levels { - index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) - if 0 <= index && index < ts.numBuckets { - bucketNumber := (level.oldest + index) % ts.numBuckets - if level.buckets[bucketNumber] == nil { - level.buckets[bucketNumber] = level.provider() - } - level.buckets[bucketNumber].Add(observation) - } - } - ts.total.Add(observation) -} - -// mergePendingUpdates applies the pending updates into all levels. -func (ts *timeSeries) mergePendingUpdates() { - if ts.dirty { - ts.mergeValue(ts.pending, ts.pendingTime) - ts.pending = ts.resetObservation(ts.pending) - ts.dirty = false - } -} - -// advance cycles the buckets at each level until the latest bucket in -// each level can hold the time specified. -func (ts *timeSeries) advance(t time.Time) { - if !t.After(ts.levels[0].end) { - return - } - for i := 0; i < len(ts.levels); i++ { - level := ts.levels[i] - if !level.end.Before(t) { - break - } - - // If the time is sufficiently far, just clear the level and advance - // directly. - if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { - for _, b := range level.buckets { - ts.resetObservation(b) - } - level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) - } - - for t.After(level.end) { - level.end = level.end.Add(level.size) - level.newest = level.oldest - level.oldest = (level.oldest + 1) % ts.numBuckets - ts.resetObservation(level.buckets[level.newest]) - } - - t = level.end - } -} - -// Latest returns the sum of the num latest buckets from the level. -func (ts *timeSeries) Latest(level, num int) Observable { - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - result := ts.provider() - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - if l.buckets[index] != nil { - result.Add(l.buckets[index]) - } - if index == 0 { - index = ts.numBuckets - } - index-- - } - - return result -} - -// LatestBuckets returns a copy of the num latest buckets from level. -func (ts *timeSeries) LatestBuckets(level, num int) []Observable { - if level < 0 || level > len(ts.levels) { - log.Print("timeseries: bad level argument: ", level) - return nil - } - if num < 0 || num >= ts.numBuckets { - log.Print("timeseries: bad num argument: ", num) - return nil - } - - results := make([]Observable, num) - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - result := ts.provider() - results[i] = result - if l.buckets[index] != nil { - result.CopyFrom(l.buckets[index]) - } - - if index == 0 { - index = ts.numBuckets - } - index -= 1 - } - return results -} - -// ScaleBy updates observations by scaling by factor. -func (ts *timeSeries) ScaleBy(factor float64) { - for _, l := range ts.levels { - for i := 0; i < ts.numBuckets; i++ { - l.buckets[i].Multiply(factor) - } - } - - ts.total.Multiply(factor) - ts.pending.Multiply(factor) -} - -// Range returns the sum of observations added over the specified time range. -// If start or finish times don't fall on bucket boundaries of the same -// level, then return values are approximate answers. -func (ts *timeSeries) Range(start, finish time.Time) Observable { - return ts.ComputeRange(start, finish, 1)[0] -} - -// Recent returns the sum of observations from the last delta. -func (ts *timeSeries) Recent(delta time.Duration) Observable { - now := ts.clock.Time() - return ts.Range(now.Add(-delta), now) -} - -// Total returns the total of all observations. -func (ts *timeSeries) Total() Observable { - ts.mergePendingUpdates() - return ts.total -} - -// ComputeRange computes a specified number of values into a slice using -// the observations recorded over the specified time period. The return -// values are approximate if the start or finish times don't fall on the -// bucket boundaries at the same level or if the number of buckets spanning -// the range is not an integral multiple of num. -func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { - if start.After(finish) { - log.Printf("timeseries: start > finish, %v>%v", start, finish) - return nil - } - - if num < 0 { - log.Printf("timeseries: num < 0, %v", num) - return nil - } - - results := make([]Observable, num) - - for _, l := range ts.levels { - if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { - ts.extract(l, start, finish, num, results) - return results - } - } - - // Failed to find a level that covers the desired range. So just - // extract from the last level, even if it doesn't cover the entire - // desired range. - ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) - - return results -} - -// RecentList returns the specified number of values in slice over the most -// recent time period of the specified range. -func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { - if delta < 0 { - return nil - } - now := ts.clock.Time() - return ts.ComputeRange(now.Add(-delta), now, num) -} - -// extract returns a slice of specified number of observations from a given -// level over a given range. -func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { - ts.mergePendingUpdates() - - srcInterval := l.size - dstInterval := finish.Sub(start) / time.Duration(num) - dstStart := start - srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) - - srcIndex := 0 - - // Where should scanning start? - if dstStart.After(srcStart) { - advance := int(dstStart.Sub(srcStart) / srcInterval) - srcIndex += advance - srcStart = srcStart.Add(time.Duration(advance) * srcInterval) - } - - // The i'th value is computed as show below. - // interval = (finish/start)/num - // i'th value = sum of observation in range - // [ start + i * interval, - // start + (i + 1) * interval ) - for i := 0; i < num; i++ { - results[i] = ts.resetObservation(results[i]) - dstEnd := dstStart.Add(dstInterval) - for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { - srcEnd := srcStart.Add(srcInterval) - if srcEnd.After(ts.lastAdd) { - srcEnd = ts.lastAdd - } - - if !srcEnd.Before(dstStart) { - srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] - if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { - // dst completely contains src. - if srcValue != nil { - results[i].Add(srcValue) - } - } else { - // dst partially overlaps src. - overlapStart := maxTime(srcStart, dstStart) - overlapEnd := minTime(srcEnd, dstEnd) - base := srcEnd.Sub(srcStart) - fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() - - used := ts.provider() - if srcValue != nil { - used.CopyFrom(srcValue) - } - used.Multiply(fraction) - results[i].Add(used) - } - - if srcEnd.After(dstEnd) { - break - } - } - srcIndex++ - srcStart = srcStart.Add(srcInterval) - } - dstStart = dstStart.Add(dstInterval) - } -} - -// resetObservation clears the content so the struct may be reused. -func (ts *timeSeries) resetObservation(observation Observable) Observable { - if observation == nil { - observation = ts.provider() - } else { - observation.Clear() - } - return observation -} - -// TimeSeries tracks data at granularities from 1 second to 16 weeks. -type TimeSeries struct { - timeSeries -} - -// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. -func NewTimeSeries(f func() Observable) *TimeSeries { - return NewTimeSeriesWithClock(f, defaultClockInstance) -} - -// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { - ts := new(TimeSeries) - ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) - return ts -} - -// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. -type MinuteHourSeries struct { - timeSeries -} - -// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. -func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { - return NewMinuteHourSeriesWithClock(f, defaultClockInstance) -} - -// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { - ts := new(MinuteHourSeries) - ts.timeSeries.init(minuteHourSeriesResolutions, f, - minuteHourSeriesNumBuckets, clock) - return ts -} - -func (ts *MinuteHourSeries) Minute() Observable { - return ts.timeSeries.Latest(0, 60) -} - -func (ts *MinuteHourSeries) Hour() Observable { - return ts.timeSeries.Latest(1, 60) -} - -func minTime(a, b time.Time) time.Time { - if a.Before(b) { - return a - } - return b -} - -func maxTime(a, b time.Time) time.Time { - if a.After(b) { - return a - } - return b -} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go deleted file mode 100644 index c646a6952e..0000000000 --- a/vendor/golang.org/x/net/trace/events.go +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net/http" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "text/tabwriter" - "time" -) - -const maxEventsPerLog = 100 - -type bucket struct { - MaxErrAge time.Duration - String string -} - -var buckets = []bucket{ - {0, "total"}, - {10 * time.Second, "errs<10s"}, - {1 * time.Minute, "errs<1m"}, - {10 * time.Minute, "errs<10m"}, - {1 * time.Hour, "errs<1h"}, - {10 * time.Hour, "errs<10h"}, - {24000 * time.Hour, "errors"}, -} - -// RenderEvents renders the HTML page typically served at /debug/events. -// It does not do any auth checking. The request may be nil. -// -// Most users will use the Events handler. -func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { - now := time.Now() - data := &struct { - Families []string // family names - Buckets []bucket - Counts [][]int // eventLog count per family/bucket - - // Set when a bucket has been selected. - Family string - Bucket int - EventLogs eventLogs - Expanded bool - }{ - Buckets: buckets, - } - - data.Families = make([]string, 0, len(families)) - famMu.RLock() - for name := range families { - data.Families = append(data.Families, name) - } - famMu.RUnlock() - sort.Strings(data.Families) - - // Count the number of eventLogs in each family for each error age. - data.Counts = make([][]int, len(data.Families)) - for i, name := range data.Families { - // TODO(sameer): move this loop under the family lock. - f := getEventFamily(name) - data.Counts[i] = make([]int, len(data.Buckets)) - for j, b := range data.Buckets { - data.Counts[i][j] = f.Count(now, b.MaxErrAge) - } - } - - if req != nil { - var ok bool - data.Family, data.Bucket, ok = parseEventsArgs(req) - if !ok { - // No-op - } else { - data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) - } - if data.EventLogs != nil { - defer data.EventLogs.Free() - sort.Sort(data.EventLogs) - } - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - } - - famMu.RLock() - defer famMu.RUnlock() - if err := eventsTmpl().Execute(w, data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < 0 || b >= len(buckets) { - return "", 0, false - } - return fam, b, true -} - -// An EventLog provides a log of events associated with a specific object. -type EventLog interface { - // Printf formats its arguments with fmt.Sprintf and adds the - // result to the event log. - Printf(format string, a ...interface{}) - - // Errorf is like Printf, but it marks this event as an error. - Errorf(format string, a ...interface{}) - - // Finish declares that this event log is complete. - // The event log should not be used after calling this method. - Finish() -} - -// NewEventLog returns a new EventLog with the specified family name -// and title. -func NewEventLog(family, title string) EventLog { - el := newEventLog() - el.ref() - el.Family, el.Title = family, title - el.Start = time.Now() - el.events = make([]logEntry, 0, maxEventsPerLog) - el.stack = make([]uintptr, 32) - n := runtime.Callers(2, el.stack) - el.stack = el.stack[:n] - - getEventFamily(family).add(el) - return el -} - -func (el *eventLog) Finish() { - getEventFamily(el.Family).remove(el) - el.unref() // matches ref in New -} - -var ( - famMu sync.RWMutex - families = make(map[string]*eventFamily) // family name => family -) - -func getEventFamily(fam string) *eventFamily { - famMu.Lock() - defer famMu.Unlock() - f := families[fam] - if f == nil { - f = &eventFamily{} - families[fam] = f - } - return f -} - -type eventFamily struct { - mu sync.RWMutex - eventLogs eventLogs -} - -func (f *eventFamily) add(el *eventLog) { - f.mu.Lock() - f.eventLogs = append(f.eventLogs, el) - f.mu.Unlock() -} - -func (f *eventFamily) remove(el *eventLog) { - f.mu.Lock() - defer f.mu.Unlock() - for i, el0 := range f.eventLogs { - if el == el0 { - copy(f.eventLogs[i:], f.eventLogs[i+1:]) - f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] - return - } - } -} - -func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { - f.mu.RLock() - defer f.mu.RUnlock() - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - n++ - } - } - return -} - -func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { - f.mu.RLock() - defer f.mu.RUnlock() - els = make(eventLogs, 0, len(f.eventLogs)) - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - el.ref() - els = append(els, el) - } - } - return -} - -type eventLogs []*eventLog - -// Free calls unref on each element of the list. -func (els eventLogs) Free() { - for _, el := range els { - el.unref() - } -} - -// eventLogs may be sorted in reverse chronological order. -func (els eventLogs) Len() int { return len(els) } -func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } -func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } - -// A logEntry is a timestamped log entry in an event log. -type logEntry struct { - When time.Time - Elapsed time.Duration // since previous event in log - NewDay bool // whether this event is on a different day to the previous event - What string - IsErr bool -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e logEntry) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// An eventLog represents an active event log. -type eventLog struct { - // Family is the top-level grouping of event logs to which this belongs. - Family string - - // Title is the title of this event log. - Title string - - // Timing information. - Start time.Time - - // Call stack where this event log was created. - stack []uintptr - - // Append-only sequence of events. - // - // TODO(sameer): change this to a ring buffer to avoid the array copy - // when we hit maxEventsPerLog. - mu sync.RWMutex - events []logEntry - LastErrorTime time.Time - discarded int - - refs int32 // how many buckets this is in -} - -func (el *eventLog) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - el.Family = "" - el.Title = "" - el.Start = time.Time{} - el.stack = nil - el.events = nil - el.LastErrorTime = time.Time{} - el.discarded = 0 - el.refs = 0 -} - -func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { - if maxErrAge == 0 { - return true - } - el.mu.RLock() - defer el.mu.RUnlock() - return now.Sub(el.LastErrorTime) < maxErrAge -} - -// delta returns the elapsed time since the last event or the log start, -// and whether it spans midnight. -// L >= el.mu -func (el *eventLog) delta(t time.Time) (time.Duration, bool) { - if len(el.events) == 0 { - return t.Sub(el.Start), false - } - prev := el.events[len(el.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() - -} - -func (el *eventLog) Printf(format string, a ...interface{}) { - el.printf(false, format, a...) -} - -func (el *eventLog) Errorf(format string, a ...interface{}) { - el.printf(true, format, a...) -} - -func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { - e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} - el.mu.Lock() - e.Elapsed, e.NewDay = el.delta(e.When) - if len(el.events) < maxEventsPerLog { - el.events = append(el.events, e) - } else { - // Discard the oldest event. - if el.discarded == 0 { - // el.discarded starts at two to count for the event it - // is replacing, plus the next one that we are about to - // drop. - el.discarded = 2 - } else { - el.discarded++ - } - // TODO(sameer): if this causes allocations on a critical path, - // change eventLog.What to be a fmt.Stringer, as in trace.go. - el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - el.events[0].When = el.events[1].When - copy(el.events[1:], el.events[2:]) - el.events[maxEventsPerLog-1] = e - } - if e.IsErr { - el.LastErrorTime = e.When - } - el.mu.Unlock() -} - -func (el *eventLog) ref() { - atomic.AddInt32(&el.refs, 1) -} - -func (el *eventLog) unref() { - if atomic.AddInt32(&el.refs, -1) == 0 { - freeEventLog(el) - } -} - -func (el *eventLog) When() string { - return el.Start.Format("2006/01/02 15:04:05.000000") -} - -func (el *eventLog) ElapsedTime() string { - elapsed := time.Since(el.Start) - return fmt.Sprintf("%.6f", elapsed.Seconds()) -} - -func (el *eventLog) Stack() string { - buf := new(bytes.Buffer) - tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) - printStackRecord(tw, el.stack) - tw.Flush() - return buf.String() -} - -// printStackRecord prints the function + source line information -// for a single stack trace. -// Adapted from runtime/pprof/pprof.go. -func printStackRecord(w io.Writer, stk []uintptr) { - for _, pc := range stk { - f := runtime.FuncForPC(pc) - if f == nil { - continue - } - file, line := f.FileLine(pc) - name := f.Name() - // Hide runtime.goexit and any runtime functions at the beginning. - if strings.HasPrefix(name, "runtime.") { - continue - } - fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) - } -} - -func (el *eventLog) Events() []logEntry { - el.mu.RLock() - defer el.mu.RUnlock() - return el.events -} - -// freeEventLogs is a freelist of *eventLog -var freeEventLogs = make(chan *eventLog, 1000) - -// newEventLog returns a event log ready to use. -func newEventLog() *eventLog { - select { - case el := <-freeEventLogs: - return el - default: - return new(eventLog) - } -} - -// freeEventLog adds el to freeEventLogs if there's room. -// This is non-blocking. -func freeEventLog(el *eventLog) { - el.reset() - select { - case freeEventLogs <- el: - default: - } -} - -var eventsTmplCache *template.Template -var eventsTmplOnce sync.Once - -func eventsTmpl() *template.Template { - eventsTmplOnce.Do(func() { - eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ - "elapsed": elapsed, - "trimSpace": strings.TrimSpace, - }).Parse(eventsHTML)) - }) - return eventsTmplCache -} - -const eventsHTML = ` - - - events - - - - -

/debug/events

- - - {{range $i, $fam := .Families}} - - - - {{range $j, $bucket := $.Buckets}} - {{$n := index $.Counts $i $j}} - - {{end}} - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} {{$bucket.String}}] - {{if $n}}{{end}} -
- -{{if $.EventLogs}} -
-

Family: {{$.Family}}

- -{{if $.Expanded}}{{end}} -[Summary]{{if $.Expanded}}{{end}} - -{{if not $.Expanded}}{{end}} -[Expanded]{{if not $.Expanded}}{{end}} - - - - {{range $el := $.EventLogs}} - - - - - {{if $.Expanded}} - - - - - - {{range $el.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} -
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
-{{end}} - - -` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go deleted file mode 100644 index d6c71101e4..0000000000 --- a/vendor/golang.org/x/net/trace/histogram.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -// This file implements histogramming for RPC statistics collection. - -import ( - "bytes" - "fmt" - "html/template" - "log" - "math" - "sync" - - "golang.org/x/net/internal/timeseries" -) - -const ( - bucketCount = 38 -) - -// histogram keeps counts of values in buckets that are spaced -// out in powers of 2: 0-1, 2-3, 4-7... -// histogram implements timeseries.Observable -type histogram struct { - sum int64 // running total of measurements - sumOfSquares float64 // square of running total - buckets []int64 // bucketed values for histogram - value int // holds a single value as an optimization - valueCount int64 // number of values recorded for single value -} - -// addMeasurement records a value measurement observation to the histogram. -func (h *histogram) addMeasurement(value int64) { - // TODO: assert invariant - h.sum += value - h.sumOfSquares += float64(value) * float64(value) - - bucketIndex := getBucket(value) - - if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { - h.value = bucketIndex - h.valueCount++ - } else { - h.allocateBuckets() - h.buckets[bucketIndex]++ - } -} - -func (h *histogram) allocateBuckets() { - if h.buckets == nil { - h.buckets = make([]int64, bucketCount) - h.buckets[h.value] = h.valueCount - h.value = 0 - h.valueCount = -1 - } -} - -func log2(i int64) int { - n := 0 - for ; i >= 0x100; i >>= 8 { - n += 8 - } - for ; i > 0; i >>= 1 { - n += 1 - } - return n -} - -func getBucket(i int64) (index int) { - index = log2(i) - 1 - if index < 0 { - index = 0 - } - if index >= bucketCount { - index = bucketCount - 1 - } - return -} - -// Total returns the number of recorded observations. -func (h *histogram) total() (total int64) { - if h.valueCount >= 0 { - total = h.valueCount - } - for _, val := range h.buckets { - total += int64(val) - } - return -} - -// Average returns the average value of recorded observations. -func (h *histogram) average() float64 { - t := h.total() - if t == 0 { - return 0 - } - return float64(h.sum) / float64(t) -} - -// Variance returns the variance of recorded observations. -func (h *histogram) variance() float64 { - t := float64(h.total()) - if t == 0 { - return 0 - } - s := float64(h.sum) / t - return h.sumOfSquares/t - s*s -} - -// StandardDeviation returns the standard deviation of recorded observations. -func (h *histogram) standardDeviation() float64 { - return math.Sqrt(h.variance()) -} - -// PercentileBoundary estimates the value that the given fraction of recorded -// observations are less than. -func (h *histogram) percentileBoundary(percentile float64) int64 { - total := h.total() - - // Corner cases (make sure result is strictly less than Total()) - if total == 0 { - return 0 - } else if total == 1 { - return int64(h.average()) - } - - percentOfTotal := round(float64(total) * percentile) - var runningTotal int64 - - for i := range h.buckets { - value := h.buckets[i] - runningTotal += value - if runningTotal == percentOfTotal { - // We hit an exact bucket boundary. If the next bucket has data, it is a - // good estimate of the value. If the bucket is empty, we interpolate the - // midpoint between the next bucket's boundary and the next non-zero - // bucket. If the remaining buckets are all empty, then we use the - // boundary for the next bucket as the estimate. - j := uint8(i + 1) - min := bucketBoundary(j) - if runningTotal < total { - for h.buckets[j] == 0 { - j++ - } - } - max := bucketBoundary(j) - return min + round(float64(max-min)/2) - } else if runningTotal > percentOfTotal { - // The value is in this bucket. Interpolate the value. - delta := runningTotal - percentOfTotal - percentBucket := float64(value-delta) / float64(value) - bucketMin := bucketBoundary(uint8(i)) - nextBucketMin := bucketBoundary(uint8(i + 1)) - bucketSize := nextBucketMin - bucketMin - return bucketMin + round(percentBucket*float64(bucketSize)) - } - } - return bucketBoundary(bucketCount - 1) -} - -// Median returns the estimated median of the observed values. -func (h *histogram) median() int64 { - return h.percentileBoundary(0.5) -} - -// Add adds other to h. -func (h *histogram) Add(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == 0 { - // Other histogram is empty - } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { - // Both have a single bucketed value, aggregate them - h.valueCount += o.valueCount - } else { - // Two different values necessitate buckets in this histogram - h.allocateBuckets() - if o.valueCount >= 0 { - h.buckets[o.value] += o.valueCount - } else { - for i := range h.buckets { - h.buckets[i] += o.buckets[i] - } - } - } - h.sumOfSquares += o.sumOfSquares - h.sum += o.sum -} - -// Clear resets the histogram to an empty state, removing all observed values. -func (h *histogram) Clear() { - h.buckets = nil - h.value = 0 - h.valueCount = 0 - h.sum = 0 - h.sumOfSquares = 0 -} - -// CopyFrom copies from other, which must be a *histogram, into h. -func (h *histogram) CopyFrom(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == -1 { - h.allocateBuckets() - copy(h.buckets, o.buckets) - } - h.sum = o.sum - h.sumOfSquares = o.sumOfSquares - h.value = o.value - h.valueCount = o.valueCount -} - -// Multiply scales the histogram by the specified ratio. -func (h *histogram) Multiply(ratio float64) { - if h.valueCount == -1 { - for i := range h.buckets { - h.buckets[i] = int64(float64(h.buckets[i]) * ratio) - } - } else { - h.valueCount = int64(float64(h.valueCount) * ratio) - } - h.sum = int64(float64(h.sum) * ratio) - h.sumOfSquares = h.sumOfSquares * ratio -} - -// New creates a new histogram. -func (h *histogram) New() timeseries.Observable { - r := new(histogram) - r.Clear() - return r -} - -func (h *histogram) String() string { - return fmt.Sprintf("%d, %f, %d, %d, %v", - h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) -} - -// round returns the closest int64 to the argument -func round(in float64) int64 { - return int64(math.Floor(in + 0.5)) -} - -// bucketBoundary returns the first value in the bucket. -func bucketBoundary(bucket uint8) int64 { - if bucket == 0 { - return 0 - } - return 1 << bucket -} - -// bucketData holds data about a specific bucket for use in distTmpl. -type bucketData struct { - Lower, Upper int64 - N int64 - Pct, CumulativePct float64 - GraphWidth int -} - -// data holds data about a Distribution for use in distTmpl. -type data struct { - Buckets []*bucketData - Count, Median int64 - Mean, StandardDeviation float64 -} - -// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. -const maxHTMLBarWidth = 350.0 - -// newData returns data representing h for use in distTmpl. -func (h *histogram) newData() *data { - // Force the allocation of buckets to simplify the rendering implementation - h.allocateBuckets() - // We scale the bars on the right so that the largest bar is - // maxHTMLBarWidth pixels in width. - maxBucket := int64(0) - for _, n := range h.buckets { - if n > maxBucket { - maxBucket = n - } - } - total := h.total() - barsizeMult := maxHTMLBarWidth / float64(maxBucket) - var pctMult float64 - if total == 0 { - pctMult = 1.0 - } else { - pctMult = 100.0 / float64(total) - } - - buckets := make([]*bucketData, len(h.buckets)) - runningTotal := int64(0) - for i, n := range h.buckets { - if n == 0 { - continue - } - runningTotal += n - var upperBound int64 - if i < bucketCount-1 { - upperBound = bucketBoundary(uint8(i + 1)) - } else { - upperBound = math.MaxInt64 - } - buckets[i] = &bucketData{ - Lower: bucketBoundary(uint8(i)), - Upper: upperBound, - N: n, - Pct: float64(n) * pctMult, - CumulativePct: float64(runningTotal) * pctMult, - GraphWidth: int(float64(n) * barsizeMult), - } - } - return &data{ - Buckets: buckets, - Count: total, - Median: h.median(), - Mean: h.average(), - StandardDeviation: h.standardDeviation(), - } -} - -func (h *histogram) html() template.HTML { - buf := new(bytes.Buffer) - if err := distTmpl().Execute(buf, h.newData()); err != nil { - buf.Reset() - log.Printf("net/trace: couldn't execute template: %v", err) - } - return template.HTML(buf.String()) -} - -var distTmplCache *template.Template -var distTmplOnce sync.Once - -func distTmpl() *template.Template { - distTmplOnce.Do(func() { - // Input: data - distTmplCache = template.Must(template.New("distTmpl").Parse(` - - - - - - - -
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
-
- -{{range $b := .Buckets}} -{{if $b}} - - - - - - - - - -{{end}} -{{end}} -
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
-`)) - }) - return distTmplCache -} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go deleted file mode 100644 index eae2a99f54..0000000000 --- a/vendor/golang.org/x/net/trace/trace.go +++ /dev/null @@ -1,1130 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package trace implements tracing of requests and long-lived objects. -It exports HTTP interfaces on /debug/requests and /debug/events. - -A trace.Trace provides tracing for short-lived objects, usually requests. -A request handler might be implemented like this: - - func fooHandler(w http.ResponseWriter, req *http.Request) { - tr := trace.New("mypkg.Foo", req.URL.Path) - defer tr.Finish() - ... - tr.LazyPrintf("some event %q happened", str) - ... - if err := somethingImportant(); err != nil { - tr.LazyPrintf("somethingImportant failed: %v", err) - tr.SetError() - } - } - -The /debug/requests HTTP endpoint organizes the traces by family, -errors, and duration. It also provides histogram of request duration -for each family. - -A trace.EventLog provides tracing for long-lived objects, such as RPC -connections. - - // A Fetcher fetches URL paths for a single domain. - type Fetcher struct { - domain string - events trace.EventLog - } - - func NewFetcher(domain string) *Fetcher { - return &Fetcher{ - domain, - trace.NewEventLog("mypkg.Fetcher", domain), - } - } - - func (f *Fetcher) Fetch(path string) (string, error) { - resp, err := http.Get("http://" + f.domain + "/" + path) - if err != nil { - f.events.Errorf("Get(%q) = %v", path, err) - return "", err - } - f.events.Printf("Get(%q) = %s", path, resp.Status) - ... - } - - func (f *Fetcher) Close() error { - f.events.Finish() - return nil - } - -The /debug/events HTTP endpoint organizes the event logs by family and -by time since the last error. The expanded view displays recent log -entries and the log's call stack. -*/ -package trace // import "golang.org/x/net/trace" - -import ( - "bytes" - "context" - "fmt" - "html/template" - "io" - "log" - "net" - "net/http" - "net/url" - "runtime" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/internal/timeseries" -) - -// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. -// FOR DEBUGGING ONLY. This will slow down the program. -var DebugUseAfterFinish = false - -// HTTP ServeMux paths. -const ( - debugRequestsPath = "/debug/requests" - debugEventsPath = "/debug/events" -) - -// AuthRequest determines whether a specific request is permitted to load the -// /debug/requests or /debug/events pages. -// -// It returns two bools; the first indicates whether the page may be viewed at all, -// and the second indicates whether sensitive events will be shown. -// -// AuthRequest may be replaced by a program to customize its authorization requirements. -// -// The default AuthRequest function returns (true, true) if and only if the request -// comes from localhost/127.0.0.1/[::1]. -var AuthRequest = func(req *http.Request) (any, sensitive bool) { - // RemoteAddr is commonly in the form "IP" or "IP:port". - // If it is in the form "IP:port", split off the port. - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - host = req.RemoteAddr - } - switch host { - case "localhost", "127.0.0.1", "::1": - return true, true - default: - return false, false - } -} - -func init() { - _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) - if pat == debugRequestsPath { - panic("/debug/requests is already registered. You may have two independent copies of " + - "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + - "involve a vendored copy of golang.org/x/net/trace.") - } - - // TODO(jbd): Serve Traces from /debug/traces in the future? - // There is no requirement for a request to be present to have traces. - http.HandleFunc(debugRequestsPath, Traces) - http.HandleFunc(debugEventsPath, Events) -} - -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} - -// Traces responds with traces from the program. -// The package initialization registers it in http.DefaultServeMux -// at /debug/requests. -// -// It performs authorization by running AuthRequest. -func Traces(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - Render(w, req, sensitive) -} - -// Events responds with a page of events collected by EventLogs. -// The package initialization registers it in http.DefaultServeMux -// at /debug/events. -// -// It performs authorization by running AuthRequest. -func Events(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - RenderEvents(w, req, sensitive) -} - -// Render renders the HTML page typically served at /debug/requests. -// It does not do any auth checking. The request may be nil. -// -// Most users will use the Traces handler. -func Render(w io.Writer, req *http.Request, sensitive bool) { - data := &struct { - Families []string - ActiveTraceCount map[string]int - CompletedTraces map[string]*family - - // Set when a bucket has been selected. - Traces traceList - Family string - Bucket int - Expanded bool - Traced bool - Active bool - ShowSensitive bool // whether to show sensitive events - - Histogram template.HTML - HistogramWindow string // e.g. "last minute", "last hour", "all time" - - // If non-zero, the set of traces is a partial set, - // and this is the total number. - Total int - }{ - CompletedTraces: completedTraces, - } - - data.ShowSensitive = sensitive - if req != nil { - // Allow show_sensitive=0 to force hiding of sensitive data for testing. - // This only goes one way; you can't use show_sensitive=1 to see things. - if req.FormValue("show_sensitive") == "0" { - data.ShowSensitive = false - } - - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { - data.Traced = exp - } - } - - completedMu.RLock() - data.Families = make([]string, 0, len(completedTraces)) - for fam := range completedTraces { - data.Families = append(data.Families, fam) - } - completedMu.RUnlock() - sort.Strings(data.Families) - - // We are careful here to minimize the time spent locking activeMu, - // since that lock is required every time an RPC starts and finishes. - data.ActiveTraceCount = make(map[string]int, len(data.Families)) - activeMu.RLock() - for fam, s := range activeTraces { - data.ActiveTraceCount[fam] = s.Len() - } - activeMu.RUnlock() - - var ok bool - data.Family, data.Bucket, ok = parseArgs(req) - switch { - case !ok: - // No-op - case data.Bucket == -1: - data.Active = true - n := data.ActiveTraceCount[data.Family] - data.Traces = getActiveTraces(data.Family) - if len(data.Traces) < n { - data.Total = n - } - case data.Bucket < bucketsPerFamily: - if b := lookupBucket(data.Family, data.Bucket); b != nil { - data.Traces = b.Copy(data.Traced) - } - default: - if f := getFamily(data.Family, false); f != nil { - var obs timeseries.Observable - f.LatencyMu.RLock() - switch o := data.Bucket - bucketsPerFamily; o { - case 0: - obs = f.Latency.Minute() - data.HistogramWindow = "last minute" - case 1: - obs = f.Latency.Hour() - data.HistogramWindow = "last hour" - case 2: - obs = f.Latency.Total() - data.HistogramWindow = "all time" - } - f.LatencyMu.RUnlock() - if obs != nil { - data.Histogram = obs.(*histogram).html() - } - } - } - - if data.Traces != nil { - defer data.Traces.Free() - sort.Sort(data.Traces) - } - - completedMu.RLock() - defer completedMu.RUnlock() - if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseArgs(req *http.Request) (fam string, b int, ok bool) { - if req == nil { - return "", 0, false - } - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < -1 { - return "", 0, false - } - - return fam, b, true -} - -func lookupBucket(fam string, b int) *traceBucket { - f := getFamily(fam, false) - if f == nil || b < 0 || b >= len(f.Buckets) { - return nil - } - return f.Buckets[b] -} - -type contextKeyT string - -var contextKey = contextKeyT("golang.org/x/net/trace.Trace") - -// Trace represents an active request. -type Trace interface { - // LazyLog adds x to the event log. It will be evaluated each time the - // /debug/requests page is rendered. Any memory referenced by x will be - // pinned until the trace is finished and later discarded. - LazyLog(x fmt.Stringer, sensitive bool) - - // LazyPrintf evaluates its arguments with fmt.Sprintf each time the - // /debug/requests page is rendered. Any memory referenced by a will be - // pinned until the trace is finished and later discarded. - LazyPrintf(format string, a ...interface{}) - - // SetError declares that this trace resulted in an error. - SetError() - - // SetRecycler sets a recycler for the trace. - // f will be called for each event passed to LazyLog at a time when - // it is no longer required, whether while the trace is still active - // and the event is discarded, or when a completed trace is discarded. - SetRecycler(f func(interface{})) - - // SetTraceInfo sets the trace info for the trace. - // This is currently unused. - SetTraceInfo(traceID, spanID uint64) - - // SetMaxEvents sets the maximum number of events that will be stored - // in the trace. This has no effect if any events have already been - // added to the trace. - SetMaxEvents(m int) - - // Finish declares that this trace is complete. - // The trace should not be used after calling this method. - Finish() -} - -type lazySprintf struct { - format string - a []interface{} -} - -func (l *lazySprintf) String() string { - return fmt.Sprintf(l.format, l.a...) -} - -// New returns a new Trace with the specified family and title. -func New(family, title string) Trace { - tr := newTrace() - tr.ref() - tr.Family, tr.Title = family, title - tr.Start = time.Now() - tr.maxEvents = maxEventsPerTrace - tr.events = tr.eventsBuf[:0] - - activeMu.RLock() - s := activeTraces[tr.Family] - activeMu.RUnlock() - if s == nil { - activeMu.Lock() - s = activeTraces[tr.Family] // check again - if s == nil { - s = new(traceSet) - activeTraces[tr.Family] = s - } - activeMu.Unlock() - } - s.Add(tr) - - // Trigger allocation of the completed trace structure for this family. - // This will cause the family to be present in the request page during - // the first trace of this family. We don't care about the return value, - // nor is there any need for this to run inline, so we execute it in its - // own goroutine, but only if the family isn't allocated yet. - completedMu.RLock() - if _, ok := completedTraces[tr.Family]; !ok { - go allocFamily(tr.Family) - } - completedMu.RUnlock() - - return tr -} - -func (tr *trace) Finish() { - elapsed := time.Since(tr.Start) - tr.mu.Lock() - tr.Elapsed = elapsed - tr.mu.Unlock() - - if DebugUseAfterFinish { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - tr.finishStack = buf[:n] - } - - activeMu.RLock() - m := activeTraces[tr.Family] - activeMu.RUnlock() - m.Remove(tr) - - f := getFamily(tr.Family, true) - tr.mu.RLock() // protects tr fields in Cond.match calls - for _, b := range f.Buckets { - if b.Cond.match(tr) { - b.Add(tr) - } - } - tr.mu.RUnlock() - - // Add a sample of elapsed time as microseconds to the family's timeseries - h := new(histogram) - h.addMeasurement(elapsed.Nanoseconds() / 1e3) - f.LatencyMu.Lock() - f.Latency.Add(h) - f.LatencyMu.Unlock() - - tr.unref() // matches ref in New -} - -const ( - bucketsPerFamily = 9 - tracesPerBucket = 10 - maxActiveTraces = 20 // Maximum number of active traces to show. - maxEventsPerTrace = 10 - numHistogramBuckets = 38 -) - -var ( - // The active traces. - activeMu sync.RWMutex - activeTraces = make(map[string]*traceSet) // family -> traces - - // Families of completed traces. - completedMu sync.RWMutex - completedTraces = make(map[string]*family) // family -> traces -) - -type traceSet struct { - mu sync.RWMutex - m map[*trace]bool - - // We could avoid the entire map scan in FirstN by having a slice of all the traces - // ordered by start time, and an index into that from the trace struct, with a periodic - // repack of the slice after enough traces finish; we could also use a skip list or similar. - // However, that would shift some of the expense from /debug/requests time to RPC time, - // which is probably the wrong trade-off. -} - -func (ts *traceSet) Len() int { - ts.mu.RLock() - defer ts.mu.RUnlock() - return len(ts.m) -} - -func (ts *traceSet) Add(tr *trace) { - ts.mu.Lock() - if ts.m == nil { - ts.m = make(map[*trace]bool) - } - ts.m[tr] = true - ts.mu.Unlock() -} - -func (ts *traceSet) Remove(tr *trace) { - ts.mu.Lock() - delete(ts.m, tr) - ts.mu.Unlock() -} - -// FirstN returns the first n traces ordered by time. -func (ts *traceSet) FirstN(n int) traceList { - ts.mu.RLock() - defer ts.mu.RUnlock() - - if n > len(ts.m) { - n = len(ts.m) - } - trl := make(traceList, 0, n) - - // Fast path for when no selectivity is needed. - if n == len(ts.m) { - for tr := range ts.m { - tr.ref() - trl = append(trl, tr) - } - sort.Sort(trl) - return trl - } - - // Pick the oldest n traces. - // This is inefficient. See the comment in the traceSet struct. - for tr := range ts.m { - // Put the first n traces into trl in the order they occur. - // When we have n, sort trl, and thereafter maintain its order. - if len(trl) < n { - tr.ref() - trl = append(trl, tr) - if len(trl) == n { - // This is guaranteed to happen exactly once during this loop. - sort.Sort(trl) - } - continue - } - if tr.Start.After(trl[n-1].Start) { - continue - } - - // Find where to insert this one. - tr.ref() - i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) - trl[n-1].unref() - copy(trl[i+1:], trl[i:]) - trl[i] = tr - } - - return trl -} - -func getActiveTraces(fam string) traceList { - activeMu.RLock() - s := activeTraces[fam] - activeMu.RUnlock() - if s == nil { - return nil - } - return s.FirstN(maxActiveTraces) -} - -func getFamily(fam string, allocNew bool) *family { - completedMu.RLock() - f := completedTraces[fam] - completedMu.RUnlock() - if f == nil && allocNew { - f = allocFamily(fam) - } - return f -} - -func allocFamily(fam string) *family { - completedMu.Lock() - defer completedMu.Unlock() - f := completedTraces[fam] - if f == nil { - f = newFamily() - completedTraces[fam] = f - } - return f -} - -// family represents a set of trace buckets and associated latency information. -type family struct { - // traces may occur in multiple buckets. - Buckets [bucketsPerFamily]*traceBucket - - // latency time series - LatencyMu sync.RWMutex - Latency *timeseries.MinuteHourSeries -} - -func newFamily() *family { - return &family{ - Buckets: [bucketsPerFamily]*traceBucket{ - {Cond: minCond(0)}, - {Cond: minCond(50 * time.Millisecond)}, - {Cond: minCond(100 * time.Millisecond)}, - {Cond: minCond(200 * time.Millisecond)}, - {Cond: minCond(500 * time.Millisecond)}, - {Cond: minCond(1 * time.Second)}, - {Cond: minCond(10 * time.Second)}, - {Cond: minCond(100 * time.Second)}, - {Cond: errorCond{}}, - }, - Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), - } -} - -// traceBucket represents a size-capped bucket of historic traces, -// along with a condition for a trace to belong to the bucket. -type traceBucket struct { - Cond cond - - // Ring buffer implementation of a fixed-size FIFO queue. - mu sync.RWMutex - buf [tracesPerBucket]*trace - start int // < tracesPerBucket - length int // <= tracesPerBucket -} - -func (b *traceBucket) Add(tr *trace) { - b.mu.Lock() - defer b.mu.Unlock() - - i := b.start + b.length - if i >= tracesPerBucket { - i -= tracesPerBucket - } - if b.length == tracesPerBucket { - // "Remove" an element from the bucket. - b.buf[i].unref() - b.start++ - if b.start == tracesPerBucket { - b.start = 0 - } - } - b.buf[i] = tr - if b.length < tracesPerBucket { - b.length++ - } - tr.ref() -} - -// Copy returns a copy of the traces in the bucket. -// If tracedOnly is true, only the traces with trace information will be returned. -// The logs will be ref'd before returning; the caller should call -// the Free method when it is done with them. -// TODO(dsymonds): keep track of traced requests in separate buckets. -func (b *traceBucket) Copy(tracedOnly bool) traceList { - b.mu.RLock() - defer b.mu.RUnlock() - - trl := make(traceList, 0, b.length) - for i, x := 0, b.start; i < b.length; i++ { - tr := b.buf[x] - if !tracedOnly || tr.spanID != 0 { - tr.ref() - trl = append(trl, tr) - } - x++ - if x == b.length { - x = 0 - } - } - return trl -} - -func (b *traceBucket) Empty() bool { - b.mu.RLock() - defer b.mu.RUnlock() - return b.length == 0 -} - -// cond represents a condition on a trace. -type cond interface { - match(t *trace) bool - String() string -} - -type minCond time.Duration - -func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } -func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } - -type errorCond struct{} - -func (e errorCond) match(t *trace) bool { return t.IsError } -func (e errorCond) String() string { return "errors" } - -type traceList []*trace - -// Free calls unref on each element of the list. -func (trl traceList) Free() { - for _, t := range trl { - t.unref() - } -} - -// traceList may be sorted in reverse chronological order. -func (trl traceList) Len() int { return len(trl) } -func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } -func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } - -// An event is a timestamped log entry in a trace. -type event struct { - When time.Time - Elapsed time.Duration // since previous event in trace - NewDay bool // whether this event is on a different day to the previous event - Recyclable bool // whether this event was passed via LazyLog - Sensitive bool // whether this event contains sensitive information - What interface{} // string or fmt.Stringer -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e event) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// discarded represents a number of discarded events. -// It is stored as *discarded to make it easier to update in-place. -type discarded int - -func (d *discarded) String() string { - return fmt.Sprintf("(%d events discarded)", int(*d)) -} - -// trace represents an active or complete request, -// either sent or received by this program. -type trace struct { - // Family is the top-level grouping of traces to which this belongs. - Family string - - // Title is the title of this trace. - Title string - - // Start time of the this trace. - Start time.Time - - mu sync.RWMutex - events []event // Append-only sequence of events (modulo discards). - maxEvents int - recycler func(interface{}) - IsError bool // Whether this trace resulted in an error. - Elapsed time.Duration // Elapsed time for this trace, zero while active. - traceID uint64 // Trace information if non-zero. - spanID uint64 - - refs int32 // how many buckets this is in - disc discarded // scratch space to avoid allocation - - finishStack []byte // where finish was called, if DebugUseAfterFinish is set - - eventsBuf [4]event // preallocated buffer in case we only log a few events -} - -func (tr *trace) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - tr.Family = "" - tr.Title = "" - tr.Start = time.Time{} - - tr.mu.Lock() - tr.Elapsed = 0 - tr.traceID = 0 - tr.spanID = 0 - tr.IsError = false - tr.maxEvents = 0 - tr.events = nil - tr.recycler = nil - tr.mu.Unlock() - - tr.refs = 0 - tr.disc = 0 - tr.finishStack = nil - for i := range tr.eventsBuf { - tr.eventsBuf[i] = event{} - } -} - -// delta returns the elapsed time since the last event or the trace start, -// and whether it spans midnight. -// L >= tr.mu -func (tr *trace) delta(t time.Time) (time.Duration, bool) { - if len(tr.events) == 0 { - return t.Sub(tr.Start), false - } - prev := tr.events[len(tr.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() -} - -func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { - if DebugUseAfterFinish && tr.finishStack != nil { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) - } - - /* - NOTE TO DEBUGGERS - - If you are here because your program panicked in this code, - it is almost definitely the fault of code using this package, - and very unlikely to be the fault of this code. - - The most likely scenario is that some code elsewhere is using - a trace.Trace after its Finish method is called. - You can temporarily set the DebugUseAfterFinish var - to help discover where that is; do not leave that var set, - since it makes this package much less efficient. - */ - - e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} - tr.mu.Lock() - e.Elapsed, e.NewDay = tr.delta(e.When) - if len(tr.events) < tr.maxEvents { - tr.events = append(tr.events, e) - } else { - // Discard the middle events. - di := int((tr.maxEvents - 1) / 2) - if d, ok := tr.events[di].What.(*discarded); ok { - (*d)++ - } else { - // disc starts at two to count for the event it is replacing, - // plus the next one that we are about to drop. - tr.disc = 2 - if tr.recycler != nil && tr.events[di].Recyclable { - go tr.recycler(tr.events[di].What) - } - tr.events[di].What = &tr.disc - } - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - tr.events[di].When = tr.events[di+1].When - - if tr.recycler != nil && tr.events[di+1].Recyclable { - go tr.recycler(tr.events[di+1].What) - } - copy(tr.events[di+1:], tr.events[di+2:]) - tr.events[tr.maxEvents-1] = e - } - tr.mu.Unlock() -} - -func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { - tr.addEvent(x, true, sensitive) -} - -func (tr *trace) LazyPrintf(format string, a ...interface{}) { - tr.addEvent(&lazySprintf{format, a}, false, false) -} - -func (tr *trace) SetError() { - tr.mu.Lock() - tr.IsError = true - tr.mu.Unlock() -} - -func (tr *trace) SetRecycler(f func(interface{})) { - tr.mu.Lock() - tr.recycler = f - tr.mu.Unlock() -} - -func (tr *trace) SetTraceInfo(traceID, spanID uint64) { - tr.mu.Lock() - tr.traceID, tr.spanID = traceID, spanID - tr.mu.Unlock() -} - -func (tr *trace) SetMaxEvents(m int) { - tr.mu.Lock() - // Always keep at least three events: first, discarded count, last. - if len(tr.events) == 0 && m > 3 { - tr.maxEvents = m - } - tr.mu.Unlock() -} - -func (tr *trace) ref() { - atomic.AddInt32(&tr.refs, 1) -} - -func (tr *trace) unref() { - if atomic.AddInt32(&tr.refs, -1) == 0 { - tr.mu.RLock() - if tr.recycler != nil { - // freeTrace clears tr, so we hold tr.recycler and tr.events here. - go func(f func(interface{}), es []event) { - for _, e := range es { - if e.Recyclable { - f(e.What) - } - } - }(tr.recycler, tr.events) - } - tr.mu.RUnlock() - - freeTrace(tr) - } -} - -func (tr *trace) When() string { - return tr.Start.Format("2006/01/02 15:04:05.000000") -} - -func (tr *trace) ElapsedTime() string { - tr.mu.RLock() - t := tr.Elapsed - tr.mu.RUnlock() - - if t == 0 { - // Active trace. - t = time.Since(tr.Start) - } - return fmt.Sprintf("%.6f", t.Seconds()) -} - -func (tr *trace) Events() []event { - tr.mu.RLock() - defer tr.mu.RUnlock() - return tr.events -} - -var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? - -// newTrace returns a trace ready to use. -func newTrace() *trace { - select { - case tr := <-traceFreeList: - return tr - default: - return new(trace) - } -} - -// freeTrace adds tr to traceFreeList if there's room. -// This is non-blocking. -func freeTrace(tr *trace) { - if DebugUseAfterFinish { - return // never reuse - } - tr.reset() - select { - case traceFreeList <- tr: - default: - } -} - -func elapsed(d time.Duration) string { - b := []byte(fmt.Sprintf("%.6f", d.Seconds())) - - // For subsecond durations, blank all zeros before decimal point, - // and all zeros between the decimal point and the first non-zero digit. - if d < time.Second { - dot := bytes.IndexByte(b, '.') - for i := 0; i < dot; i++ { - b[i] = ' ' - } - for i := dot + 1; i < len(b); i++ { - if b[i] == '0' { - b[i] = ' ' - } else { - break - } - } - } - - return string(b) -} - -var pageTmplCache *template.Template -var pageTmplOnce sync.Once - -func pageTmpl() *template.Template { - pageTmplOnce.Do(func() { - pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ - "elapsed": elapsed, - "add": func(a, b int) int { return a + b }, - }).Parse(pageHTML)) - }) - return pageTmplCache -} - -const pageHTML = ` -{{template "Prolog" .}} -{{template "StatusTable" .}} -{{template "Epilog" .}} - -{{define "Prolog"}} - - - /debug/requests - - - - -

/debug/requests

-{{end}} {{/* end of Prolog */}} - -{{define "StatusTable"}} - - {{range $fam := .Families}} - - - - {{$n := index $.ActiveTraceCount $fam}} - - - {{$f := index $.CompletedTraces $fam}} - {{range $i, $b := $f.Buckets}} - {{$empty := $b.Empty}} - - {{end}} - - {{$nb := len $f.Buckets}} - - - - - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} active] - {{if $n}}{{end}} - - {{if not $empty}}{{end}} - [{{.Cond}}] - {{if not $empty}}{{end}} - - [minute] - - [hour] - - [total] -
-{{end}} {{/* end of StatusTable */}} - -{{define "Epilog"}} -{{if $.Traces}} -
-

Family: {{$.Family}}

- -{{if or $.Expanded $.Traced}} - [Normal/Summary] -{{else}} - [Normal/Summary] -{{end}} - -{{if or (not $.Expanded) $.Traced}} - [Normal/Expanded] -{{else}} - [Normal/Expanded] -{{end}} - -{{if not $.Active}} - {{if or $.Expanded (not $.Traced)}} - [Traced/Summary] - {{else}} - [Traced/Summary] - {{end}} - {{if or (not $.Expanded) (not $.Traced)}} - [Traced/Expanded] - {{else}} - [Traced/Expanded] - {{end}} -{{end}} - -{{if $.Total}} -

Showing {{len $.Traces}} of {{$.Total}} traces.

-{{end}} - - - - - {{range $tr := $.Traces}} - - - - - {{/* TODO: include traceID/spanID */}} - - {{if $.Expanded}} - {{range $tr.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
- {{if $.Active}}Active{{else}}Completed{{end}} Requests -
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
-{{end}} {{/* if $.Traces */}} - -{{if $.Histogram}} -

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

-{{$.Histogram}} -{{end}} {{/* if $.Histogram */}} - - - -{{end}} {{/* end of Epilog */}} -` diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go deleted file mode 100644 index a6b5081888..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 -// source: google/rpc/status.proto - -package status - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The `Status` type defines a logical error model that is suitable for -// different programming environments, including REST APIs and RPC APIs. It is -// used by [gRPC](https://github.com/grpc). Each `Status` message contains -// three pieces of data: error code, error message, and error details. -// -// You can find out more about this error model and how to work with it in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). -type Status struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The status code, which should be an enum value of - // [google.rpc.Code][google.rpc.Code]. - Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // A developer-facing error message, which should be in English. Any - // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized - // by the client. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // A list of messages that carry the error details. There is a common set of - // message types for APIs to use. - Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` -} - -func (x *Status) Reset() { - *x = Status{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_status_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Status) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Status) ProtoMessage() {} - -func (x *Status) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_status_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Status.ProtoReflect.Descriptor instead. -func (*Status) Descriptor() ([]byte, []int) { - return file_google_rpc_status_proto_rawDescGZIP(), []int{0} -} - -func (x *Status) GetCode() int32 { - if x != nil { - return x.Code - } - return 0 -} - -func (x *Status) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -func (x *Status) GetDetails() []*anypb.Any { - if x != nil { - return x.Details - } - return nil -} - -var File_google_rpc_status_proto protoreflect.FileDescriptor - -var file_google_rpc_status_proto_rawDesc = []byte{ - 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, - 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_google_rpc_status_proto_rawDescOnce sync.Once - file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc -) - -func file_google_rpc_status_proto_rawDescGZIP() []byte { - file_google_rpc_status_proto_rawDescOnce.Do(func() { - file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) - }) - return file_google_rpc_status_proto_rawDescData -} - -var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_rpc_status_proto_goTypes = []interface{}{ - (*Status)(nil), // 0: google.rpc.Status - (*anypb.Any)(nil), // 1: google.protobuf.Any -} -var file_google_rpc_status_proto_depIdxs = []int32{ - 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_google_rpc_status_proto_init() } -func file_google_rpc_status_proto_init() { - if File_google_rpc_status_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Status); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_rpc_status_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_rpc_status_proto_goTypes, - DependencyIndexes: file_google_rpc_status_proto_depIdxs, - MessageInfos: file_google_rpc_status_proto_msgTypes, - }.Build() - File_google_rpc_status_proto = out.File - file_google_rpc_status_proto_rawDesc = nil - file_google_rpc_status_proto_goTypes = nil - file_google_rpc_status_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS deleted file mode 100644 index e491a9e7f7..0000000000 --- a/vendor/google.golang.org/grpc/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Google Inc. diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md deleted file mode 100644 index 9d4213ebca..0000000000 --- a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md +++ /dev/null @@ -1,3 +0,0 @@ -## Community Code of Conduct - -gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md deleted file mode 100644 index 608aa6e1ac..0000000000 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ /dev/null @@ -1,73 +0,0 @@ -# How to contribute - -We definitely welcome your patches and contributions to gRPC! Please read the gRPC -organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) -and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. - -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) - -## Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). - -## Guidelines for Pull Requests -How to get your contributions merged smoothly and quickly. - -- Create **small PRs** that are narrowly focused on **addressing a single - concern**. We often times receive PRs that are trying to fix several things at - a time, but only one fix is considered acceptable, nothing gets merged and - both author's & review's time is wasted. Create more PRs to address different - concerns and everyone will be happy. - -- If you are searching for features to work on, issues labeled [Status: Help - Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) - is a great place to start. These issues are well-documented and usually can be - resolved with a single pull request. - -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file - and update the year. - -- The grpc package should only depend on standard Go packages and a small number - of exceptions. If your contribution introduces new dependencies which are NOT - in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a - discussion with gRPC-Go authors and consultants. - -- For speculative changes, consider opening an issue and discussing it first. If - you are suggesting a behavioral or API change, consider starting with a [gRFC - proposal](https://github.com/grpc/proposal). - -- Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a github issue if it exists. - -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the - benefits of the change. - -- Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We'll mark it as `Status: Requires - Reporter Clarification` if we expect you to respond to these comments in a - timely manner. If the PR remains inactive for 6 days, it will be marked as - `stale` and automatically close 7 days after that if we don't hear back from - you. - -- Maintain **clean commit history** and use **meaningful commit messages**. PRs - with messy commit history are difficult to review and won't be merged. Use - `rebase -i upstream/master` to curate your commit history and/or to bring in - latest changes from master (but avoid rebasing in the middle of a code - review). - -- Keep your PR up to date with upstream/master (if there are merge conflicts, we - can't really merge your change). - -- **All tests need to be passing** before your change can be merged. We - recommend you **run tests locally** before creating your PR to catch breakages - early on. - - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors - - `go test -cpu 1,4 -timeout 7m ./...` to run the tests - - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - -- Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md deleted file mode 100644 index d6ff267471..0000000000 --- a/vendor/google.golang.org/grpc/GOVERNANCE.md +++ /dev/null @@ -1 +0,0 @@ -This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/google.golang.org/grpc/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md deleted file mode 100644 index c6672c0a3e..0000000000 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ /dev/null @@ -1,28 +0,0 @@ -This page lists all active maintainers of this repository. If you were a -maintainer and would like to add your name to the Emeritus list, please send us a -PR. - -See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) -for governance guidelines and how to become a maintainer. -See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) -for general contribution guidelines. - -## Maintainers (in alphabetical order) - -- [cesarghali](https://github.com/cesarghali), Google LLC -- [dfawley](https://github.com/dfawley), Google LLC -- [easwars](https://github.com/easwars), Google LLC -- [menghanl](https://github.com/menghanl), Google LLC -- [srini100](https://github.com/srini100), Google LLC - -## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez), Google LLC -- [canguler](https://github.com/canguler), Google LLC -- [iamqizhao](https://github.com/iamqizhao), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [lyuxuan](https://github.com/lyuxuan), Google LLC -- [makmukhi](https://github.com/makmukhi), Google LLC -- [matt-kwong](https://github.com/matt-kwong), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile deleted file mode 100644 index 1f8960922b..0000000000 --- a/vendor/google.golang.org/grpc/Makefile +++ /dev/null @@ -1,46 +0,0 @@ -all: vet test testrace - -build: - go build google.golang.org/grpc/... - -clean: - go clean -i google.golang.org/grpc/... - -deps: - GO111MODULE=on go get -d -v google.golang.org/grpc/... - -proto: - @ if ! which protoc > /dev/null; then \ - echo "error: protoc not installed" >&2; \ - exit 1; \ - fi - go generate google.golang.org/grpc/... - -test: - go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... - -testsubmodule: - cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... - cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... - -testrace: - go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... - -testdeps: - GO111MODULE=on go get -d -v -t google.golang.org/grpc/... - -vet: vetdeps - ./vet.sh - -vetdeps: - ./vet.sh -install - -.PHONY: \ - all \ - build \ - clean \ - proto \ - test \ - testrace \ - vet \ - vetdeps diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt deleted file mode 100644 index 530197749e..0000000000 --- a/vendor/google.golang.org/grpc/NOTICE.txt +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 gRPC authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md deleted file mode 100644 index ab0fbb79b8..0000000000 --- a/vendor/google.golang.org/grpc/README.md +++ /dev/null @@ -1,107 +0,0 @@ -# gRPC-Go - -[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] -[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) -[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) - -The [Go][] implementation of [gRPC][]: A high performance, open source, general -RPC framework that puts mobile and HTTP/2 first. For more information see the -[Go gRPC docs][], or jump directly into the [quick start][]. - -## Prerequisites - -- **[Go][]**: any one of the **three latest major** [releases][go-releases]. - -## Installation - -Simply add the following import to your code, and then `go [build|run|test]` -will automatically fetch the necessary dependencies: - - -```go -import "google.golang.org/grpc" -``` - -> **Note:** If you are trying to access `grpc-go` from **China**, see the -> [FAQ](#FAQ) below. - -## Learn more - -- [Go gRPC docs][], which include a [quick start][] and [API - reference][API] among other resources -- [Low-level technical docs](Documentation) from this repository -- [Performance benchmark][] -- [Examples](examples) - -## FAQ - -### I/O Timeout Errors - -The `golang.org` domain may be blocked from some countries. `go get` usually -produces an error like the following when this happens: - -```console -$ go get -u google.golang.org/grpc -package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) -``` - -To build Go code, there are several options: - -- Set up a VPN and access google.golang.org through that. - -- With Go module support: it is possible to use the `replace` feature of `go - mod` to create aliases for golang.org packages. In your project's directory: - - ```sh - go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest - go mod tidy - go mod vendor - go build -mod=vendor - ``` - - Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. For details, refer to [golang/go issue - #28652](https://github.com/golang/go/issues/28652). - -### Compiling error, undefined: grpc.SupportPackageIsVersion - -Please update to the latest version of gRPC-Go using -`go get google.golang.org/grpc`. - -### How to turn on logging - -The default logger is controlled by environment variables. Turn everything on -like this: - -```console -$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 -$ export GRPC_GO_LOG_SEVERITY_LEVEL=info -``` - -### The RPC failed with error `"code = Unavailable desc = transport is closing"` - -This error means the connection the RPC is using was closed, and there are many -possible reasons, including: - 1. mis-configured transport credentials, connection failed on handshaking - 1. bytes disrupted, possibly by a proxy in between - 1. server shutdown - 1. Keepalive parameters caused connection shutdown, for example if you have - configured your server to terminate connections regularly to [trigger DNS - lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). - If this is the case, you may want to increase your - [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), - to allow longer RPC calls to finish. - -It can be tricky to debug this because the error happens on the client side but -the root cause of the connection being closed is on the server side. Turn on -logging on __both client and server__, and see if there are any transport -errors. - -[API]: https://pkg.go.dev/google.golang.org/grpc -[Go]: https://golang.org -[Go module]: https://github.com/golang/go/wiki/Modules -[gRPC]: https://grpc.io -[Go gRPC docs]: https://grpc.io/docs/languages/go -[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 -[quick start]: https://grpc.io/docs/languages/go/quickstart -[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md deleted file mode 100644 index be6e108705..0000000000 --- a/vendor/google.golang.org/grpc/SECURITY.md +++ /dev/null @@ -1,3 +0,0 @@ -# Security Policy - -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go deleted file mode 100644 index 52d530d7ad..0000000000 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ /dev/null @@ -1,141 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package attributes defines a generic key/value store used in various gRPC -// components. -// -// # Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. -package attributes - -import ( - "fmt" - "strings" -) - -// Attributes is an immutable struct for storing and retrieving generic -// key/value pairs. Keys must be hashable, and users should define their own -// types for keys. Values should not be modified after they are added to an -// Attributes or if they were received from one. If values implement 'Equal(o -// any) bool', it will be called by (*Attributes).Equal to determine whether -// two values with the same key should be considered equal. -type Attributes struct { - m map[any]any -} - -// New returns a new Attributes containing the key/value pair. -func New(key, value any) *Attributes { - return &Attributes{m: map[any]any{key: value}} -} - -// WithValue returns a new Attributes containing the previous keys and values -// and the new key/value pair. If the same key appears multiple times, the -// last value overwrites all previous values for that key. To remove an -// existing key, use a nil value. value should not be modified later. -func (a *Attributes) WithValue(key, value any) *Attributes { - if a == nil { - return New(key, value) - } - n := &Attributes{m: make(map[any]any, len(a.m)+1)} - for k, v := range a.m { - n.m[k] = v - } - n.m[key] = value - return n -} - -// Value returns the value associated with these attributes for key, or nil if -// no value is associated with key. The returned value should not be modified. -func (a *Attributes) Value(key any) any { - if a == nil { - return nil - } - return a.m[key] -} - -// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is -// implemented for a value in the attributes, it is called to determine if the -// value matches the one stored in the other attributes. If Equal is not -// implemented, standard equality is used to determine if the two values are -// equal. Note that some types (e.g. maps) aren't comparable by default, so -// they must be wrapped in a struct, or in an alias type, with Equal defined. -func (a *Attributes) Equal(o *Attributes) bool { - if a == nil && o == nil { - return true - } - if a == nil || o == nil { - return false - } - if len(a.m) != len(o.m) { - return false - } - for k, v := range a.m { - ov, ok := o.m[k] - if !ok { - // o missing element of a - return false - } - if eq, ok := v.(interface{ Equal(o any) bool }); ok { - if !eq.Equal(ov) { - return false - } - } else if v != ov { - // Fallback to a standard equality check if Value is unimplemented. - return false - } - } - return true -} - -// String prints the attribute map. If any key or values throughout the map -// implement fmt.Stringer, it calls that method and appends. -func (a *Attributes) String() string { - var sb strings.Builder - sb.WriteString("{") - first := true - for k, v := range a.m { - if !first { - sb.WriteString(", ") - } - sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) - first = false - } - sb.WriteString("}") - return sb.String() -} - -func str(x any) (s string) { - if v, ok := x.(fmt.Stringer); ok { - return fmt.Sprint(v) - } else if v, ok := x.(string); ok { - return v - } - return fmt.Sprintf("<%p>", x) -} - -// MarshalJSON helps implement the json.Marshaler interface, thereby rendering -// the Attributes correctly when printing (via pretty.JSON) structs containing -// Attributes as fields. -// -// Is it impossible to unmarshal attributes from a JSON representation and this -// method is meant only for debugging purposes. -func (a *Attributes) MarshalJSON() ([]byte, error) { - return []byte(a.String()), nil -} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go deleted file mode 100644 index 29475e31c9..0000000000 --- a/vendor/google.golang.org/grpc/backoff.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// See internal/backoff package for the backoff implementation. This file is -// kept for the exported types and API backward compatibility. - -package grpc - -import ( - "time" - - "google.golang.org/grpc/backoff" -) - -// DefaultBackoffConfig uses values specified for backoff in -// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. -// -// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. -var DefaultBackoffConfig = BackoffConfig{ - MaxDelay: 120 * time.Second, -} - -// BackoffConfig defines the parameters for the default gRPC backoff strategy. -// -// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. -type BackoffConfig struct { - // MaxDelay is the upper bound of backoff delay. - MaxDelay time.Duration -} - -// ConnectParams defines the parameters for connecting and retrying. Users are -// encouraged to use this instead of the BackoffConfig type defined above. See -// here for more details: -// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type ConnectParams struct { - // Backoff specifies the configuration options for connection backoff. - Backoff backoff.Config - // MinConnectTimeout is the minimum amount of time we are willing to give a - // connection to complete. - MinConnectTimeout time.Duration -} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go deleted file mode 100644 index 0787d0b50c..0000000000 --- a/vendor/google.golang.org/grpc/backoff/backoff.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package backoff provides configuration options for backoff. -// -// More details can be found at: -// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. -// -// All APIs in this package are experimental. -package backoff - -import "time" - -// Config defines the configuration options for backoff. -type Config struct { - // BaseDelay is the amount of time to backoff after the first failure. - BaseDelay time.Duration - // Multiplier is the factor with which to multiply backoffs after a - // failed retry. Should ideally be greater than 1. - Multiplier float64 - // Jitter is the factor with which backoffs are randomized. - Jitter float64 - // MaxDelay is the upper bound of backoff delay. - MaxDelay time.Duration -} - -// DefaultConfig is a backoff configuration with the default values specfied -// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. -// -// This should be useful for callers who want to configure backoff with -// non-default values only for a subset of the options. -var DefaultConfig = Config{ - BaseDelay: 1.0 * time.Second, - Multiplier: 1.6, - Jitter: 0.2, - MaxDelay: 120 * time.Second, -} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go deleted file mode 100644 index d79560a2e2..0000000000 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ /dev/null @@ -1,442 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package balancer defines APIs for load balancing in gRPC. -// All APIs in this package are experimental. -package balancer - -import ( - "context" - "encoding/json" - "errors" - "net" - "strings" - - "google.golang.org/grpc/channelz" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -var ( - // m is a map from name to balancer builder. - m = make(map[string]Builder) - - logger = grpclog.Component("balancer") -) - -// Register registers the balancer builder to the balancer map. b.Name -// (lowercased) will be used as the name registered with this builder. If the -// Builder implements ConfigParser, ParseConfig will be called when new service -// configs are received by the resolver, and the result will be provided to the -// Balancer in UpdateClientConnState. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Balancers are -// registered with the same name, the one registered last will take effect. -func Register(b Builder) { - if strings.ToLower(b.Name()) != b.Name() { - // TODO: Skip the use of strings.ToLower() to index the map after v1.59 - // is released to switch to case sensitive balancer registry. Also, - // remove this warning and update the docstrings for Register and Get. - logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) - } - m[strings.ToLower(b.Name())] = b -} - -// unregisterForTesting deletes the balancer with the given name from the -// balancer map. -// -// This function is not thread-safe. -func unregisterForTesting(name string) { - delete(m, name) -} - -func init() { - internal.BalancerUnregister = unregisterForTesting -} - -// Get returns the resolver builder registered with the given name. -// Note that the compare is done in a case-insensitive fashion. -// If no builder is register with the name, nil will be returned. -func Get(name string) Builder { - if strings.ToLower(name) != name { - // TODO: Skip the use of strings.ToLower() to index the map after v1.59 - // is released to switch to case sensitive balancer registry. Also, - // remove this warning and update the docstrings for Register and Get. - logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) - } - if b, ok := m[strings.ToLower(name)]; ok { - return b - } - return nil -} - -// A SubConn represents a single connection to a gRPC backend service. -// -// Each SubConn contains a list of addresses. -// -// All SubConns start in IDLE, and will not try to connect. To trigger the -// connecting, Balancers must call Connect. If a connection re-enters IDLE, -// Balancers must call Connect again to trigger a new connection attempt. -// -// gRPC will try to connect to the addresses in sequence, and stop trying the -// remainder once the first connection is successful. If an attempt to connect -// to all addresses encounters an error, the SubConn will enter -// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. -// -// Once established, if a connection is lost, the SubConn will transition -// directly to IDLE. -// -// This interface is to be implemented by gRPC. Users should not need their own -// implementation of this interface. For situations like testing, any -// implementations should embed this interface. This allows gRPC to add new -// methods to this interface. -type SubConn interface { - // UpdateAddresses updates the addresses used in this SubConn. - // gRPC checks if currently-connected address is still in the new list. - // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully closed, and - // a new connection will be created. - // - // This will trigger a state transition for the SubConn. - // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. - UpdateAddresses([]resolver.Address) - // Connect starts the connecting for this SubConn. - Connect() - // GetOrBuildProducer returns a reference to the existing Producer for this - // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which must - // be called when the Producer is no longer needed. - GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) - // Shutdown shuts down the SubConn gracefully. Any started RPCs will be - // allowed to complete. No future calls should be made on the SubConn. - // One final state update will be delivered to the StateListener (or - // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to - // indicate the shutdown operation. This may be delivered before - // in-progress RPCs are complete and the actual connection is closed. - Shutdown() -} - -// NewSubConnOptions contains options to create new SubConn. -type NewSubConnOptions struct { - // CredsBundle is the credentials bundle that will be used in the created - // SubConn. If it's nil, the original creds from grpc DialOptions will be - // used. - // - // Deprecated: Use the Attributes field in resolver.Address to pass - // arbitrary data to the credential handshaker. - CredsBundle credentials.Bundle - // HealthCheckEnabled indicates whether health check service should be - // enabled on this SubConn - HealthCheckEnabled bool - // StateListener is called when the state of the subconn changes. If nil, - // Balancer.UpdateSubConnState will be called instead. Will never be - // invoked until after Connect() is called on the SubConn created with - // these options. - StateListener func(SubConnState) -} - -// State contains the balancer's state relevant to the gRPC ClientConn. -type State struct { - // State contains the connectivity state of the balancer, which is used to - // determine the state of the ClientConn. - ConnectivityState connectivity.State - // Picker is used to choose connections (SubConns) for RPCs. - Picker Picker -} - -// ClientConn represents a gRPC ClientConn. -// -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. -type ClientConn interface { - // NewSubConn is called by balancer to create a new SubConn. - // It doesn't block and wait for the connections to be established. - // Behaviors of the SubConn can be controlled by options. - // - // Deprecated: please be aware that in a future version, SubConns will only - // support one address per SubConn. - NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) - // RemoveSubConn removes the SubConn from ClientConn. - // The SubConn will be shutdown. - // - // Deprecated: use SubConn.Shutdown instead. - RemoveSubConn(SubConn) - // UpdateAddresses updates the addresses used in the passed in SubConn. - // gRPC checks if the currently connected address is still in the new list. - // If so, the connection will be kept. Else, the connection will be - // gracefully closed, and a new connection will be created. - // - // This may trigger a state transition for the SubConn. - // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. - UpdateAddresses(SubConn, []resolver.Address) - - // UpdateState notifies gRPC that the balancer's internal state has - // changed. - // - // gRPC will update the connectivity state of the ClientConn, and will call - // Pick on the new Picker to pick new SubConns. - UpdateState(State) - - // ResolveNow is called by balancer to notify gRPC to do a name resolving. - ResolveNow(resolver.ResolveNowOptions) - - // Target returns the dial target for this ClientConn. - // - // Deprecated: Use the Target field in the BuildOptions instead. - Target() string -} - -// BuildOptions contains additional information for Build. -type BuildOptions struct { - // DialCreds is the transport credentials to use when communicating with a - // remote load balancer server. Balancer implementations which do not - // communicate with a remote load balancer server can ignore this field. - DialCreds credentials.TransportCredentials - // CredsBundle is the credentials bundle to use when communicating with a - // remote load balancer server. Balancer implementations which do not - // communicate with a remote load balancer server can ignore this field. - CredsBundle credentials.Bundle - // Dialer is the custom dialer to use when communicating with a remote load - // balancer server. Balancer implementations which do not communicate with a - // remote load balancer server can ignore this field. - Dialer func(context.Context, string) (net.Conn, error) - // Authority is the server name to use as part of the authentication - // handshake when communicating with a remote load balancer server. Balancer - // implementations which do not communicate with a remote load balancer - // server can ignore this field. - Authority string - // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID *channelz.Identifier - // CustomUserAgent is the custom user agent set on the parent ClientConn. - // The balancer should set the same custom user agent if it creates a - // ClientConn. - CustomUserAgent string - // Target contains the parsed address info of the dial target. It is the - // same resolver.Target as passed to the resolver. See the documentation for - // the resolver.Target type for details about what it contains. - Target resolver.Target -} - -// Builder creates a balancer. -type Builder interface { - // Build creates a new balancer with the ClientConn. - Build(cc ClientConn, opts BuildOptions) Balancer - // Name returns the name of balancers built by this builder. - // It will be used to pick balancers (for example in service config). - Name() string -} - -// ConfigParser parses load balancer configs. -type ConfigParser interface { - // ParseConfig parses the JSON load balancer config provided into an - // internal form or returns an error if the config is invalid. For future - // compatibility reasons, unknown fields in the config should be ignored. - ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) -} - -// PickInfo contains additional information for the Pick operation. -type PickInfo struct { - // FullMethodName is the method name that NewClientStream() is called - // with. The canonical format is /service/Method. - FullMethodName string - // Ctx is the RPC's context, and may contain relevant RPC-level information - // like the outgoing header metadata. - Ctx context.Context -} - -// DoneInfo contains additional information for done. -type DoneInfo struct { - // Err is the rpc error the RPC finished with. It could be nil. - Err error - // Trailer contains the metadata from the RPC's trailer, if present. - Trailer metadata.MD - // BytesSent indicates if any bytes have been sent to the server. - BytesSent bool - // BytesReceived indicates if any byte has been received from the server. - BytesReceived bool - // ServerLoad is the load received from server. It's usually sent as part of - // trailing metadata. - // - // The only supported type now is *orca_v3.LoadReport. - ServerLoad any -} - -var ( - // ErrNoSubConnAvailable indicates no SubConn is available for pick(). - // gRPC will block the RPC until a new picker is available via UpdateState(). - ErrNoSubConnAvailable = errors.New("no SubConn is available") - // ErrTransientFailure indicates all SubConns are in TransientFailure. - // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. - // - // Deprecated: return an appropriate error based on the last resolution or - // connection attempt instead. The behavior is the same for any non-gRPC - // status error. - ErrTransientFailure = errors.New("all SubConns are in TransientFailure") -) - -// PickResult contains information related to a connection chosen for an RPC. -type PickResult struct { - // SubConn is the connection to use for this pick, if its state is Ready. - // If the state is not Ready, gRPC will block the RPC until a new Picker is - // provided by the balancer (using ClientConn.UpdateState). The SubConn - // must be one returned by ClientConn.NewSubConn. - SubConn SubConn - - // Done is called when the RPC is completed. If the SubConn is not ready, - // this will be called with a nil parameter. If the SubConn is not a valid - // type, Done may not be called. May be nil if the balancer does not wish - // to be notified when the RPC completes. - Done func(DoneInfo) - - // Metadata provides a way for LB policies to inject arbitrary per-call - // metadata. Any metadata returned here will be merged with existing - // metadata added by the client application. - // - // LB policies with child policies are responsible for propagating metadata - // injected by their children to the ClientConn, as part of Pick(). - Metadata metadata.MD -} - -// TransientFailureError returns e. It exists for backward compatibility and -// will be deleted soon. -// -// Deprecated: no longer necessary, picker errors are treated this way by -// default. -func TransientFailureError(e error) error { return e } - -// Picker is used by gRPC to pick a SubConn to send an RPC. -// Balancer is expected to generate a new picker from its snapshot every time its -// internal state has changed. -// -// The pickers used by gRPC can be updated by ClientConn.UpdateState(). -type Picker interface { - // Pick returns the connection to use for this RPC and related information. - // - // Pick should not block. If the balancer needs to do I/O or any blocking - // or time-consuming work to service this call, it should return - // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when - // the Picker is updated (using ClientConn.UpdateState). - // - // If an error is returned: - // - // - If the error is ErrNoSubConnAvailable, gRPC will block until a new - // Picker is provided by the balancer (using ClientConn.UpdateState). - // - // - If the error is a status error (implemented by the grpc/status - // package), gRPC will terminate the RPC with the code and message - // provided. - // - // - For all other errors, wait for ready RPCs will wait, but non-wait for - // ready RPCs will be terminated with this error's Error() string and - // status code Unavailable. - Pick(info PickInfo) (PickResult, error) -} - -// Balancer takes input from gRPC, manages SubConns, and collects and aggregates -// the connectivity states. -// -// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. -// -// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are -// guaranteed to be called synchronously from the same goroutine. There's no -// guarantee on picker.Pick, it may be called anytime. -type Balancer interface { - // UpdateClientConnState is called by gRPC when the state of the ClientConn - // changes. If the error returned is ErrBadResolverState, the ClientConn - // will begin calling ResolveNow on the active name resolver with - // exponential backoff until a subsequent call to UpdateClientConnState - // returns a nil error. Any other errors are currently ignored. - UpdateClientConnState(ClientConnState) error - // ResolverError is called by gRPC when the name resolver reports an error. - ResolverError(error) - // UpdateSubConnState is called by gRPC when the state of a SubConn - // changes. - // - // Deprecated: Use NewSubConnOptions.StateListener when creating the - // SubConn instead. - UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not currently required to - // call SubConn.Shutdown for its existing SubConns; however, this will be - // required in a future release, so it is recommended. - Close() -} - -// ExitIdler is an optional interface for balancers to implement. If -// implemented, ExitIdle will be called when ClientConn.Connect is called, if -// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause -// all SubConns to connect. -// -// Notice: it will be required for all balancers to implement this in a future -// release. -type ExitIdler interface { - // ExitIdle instructs the LB policy to reconnect to backends / exit the - // IDLE state, if appropriate and possible. Note that SubConns that enter - // the IDLE state will not reconnect until SubConn.Connect is called. - ExitIdle() -} - -// SubConnState describes the state of a SubConn. -type SubConnState struct { - // ConnectivityState is the connectivity state of the SubConn. - ConnectivityState connectivity.State - // ConnectionError is set if the ConnectivityState is TransientFailure, - // describing the reason the SubConn failed. Otherwise, it is nil. - ConnectionError error -} - -// ClientConnState describes the state of a ClientConn relevant to the -// balancer. -type ClientConnState struct { - ResolverState resolver.State - // The parsed load balancing configuration returned by the builder's - // ParseConfig method, if implemented. - BalancerConfig serviceconfig.LoadBalancingConfig -} - -// ErrBadResolverState may be returned by UpdateClientConnState to indicate a -// problem with the provided name resolver data. -var ErrBadResolverState = errors.New("bad resolver state") - -// A ProducerBuilder is a simple constructor for a Producer. It is used by the -// SubConn to create producers when needed. -type ProducerBuilder interface { - // Build creates a Producer. The first parameter is always a - // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Should also return a close function that will be called when all - // references to the Producer have been given up. - Build(grpcClientConnInterface any) (p Producer, close func()) -} - -// A Producer is a type shared among potentially many consumers. It is -// associated with a SubConn, and an implementation will typically contain -// other methods to provide additional functionality, e.g. configuration or -// subscription registration. -type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go deleted file mode 100644 index a7f1eeec8e..0000000000 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ /dev/null @@ -1,264 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package base - -import ( - "errors" - "fmt" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -var logger = grpclog.Component("balancer") - -type baseBuilder struct { - name string - pickerBuilder PickerBuilder - config Config -} - -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - bal := &baseBalancer{ - cc: cc, - pickerBuilder: bb.pickerBuilder, - - subConns: resolver.NewAddressMap(), - scStates: make(map[balancer.SubConn]connectivity.State), - csEvltr: &balancer.ConnectivityStateEvaluator{}, - config: bb.config, - state: connectivity.Connecting, - } - // Initialize picker to a picker that always returns - // ErrNoSubConnAvailable, because when state of a SubConn changes, we - // may call UpdateState with this picker. - bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) - return bal -} - -func (bb *baseBuilder) Name() string { - return bb.name -} - -type baseBalancer struct { - cc balancer.ClientConn - pickerBuilder PickerBuilder - - csEvltr *balancer.ConnectivityStateEvaluator - state connectivity.State - - subConns *resolver.AddressMap - scStates map[balancer.SubConn]connectivity.State - picker balancer.Picker - config Config - - resolverErr error // the last error reported by the resolver; cleared on successful resolution - connErr error // the last connection error; cleared upon leaving TransientFailure -} - -func (b *baseBalancer) ResolverError(err error) { - b.resolverErr = err - if b.subConns.Len() == 0 { - b.state = connectivity.TransientFailure - } - - if b.state != connectivity.TransientFailure { - // The picker will not change since the balancer does not currently - // report an error. - return - } - b.regeneratePicker() - b.cc.UpdateState(balancer.State{ - ConnectivityState: b.state, - Picker: b.picker, - }) -} - -func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - // TODO: handle s.ResolverState.ServiceConfig? - if logger.V(2) { - logger.Info("base.baseBalancer: got new ClientConn state: ", s) - } - // Successful resolution; clear resolver error and ensure we return nil. - b.resolverErr = nil - // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := resolver.NewAddressMap() - for _, a := range s.ResolverState.Addresses { - addrsSet.Set(a, nil) - if _, ok := b.subConns.Get(a); !ok { - // a is a new address (not existing in b.subConns). - var sc balancer.SubConn - opts := balancer.NewSubConnOptions{ - HealthCheckEnabled: b.config.HealthCheck, - StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, - } - sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) - if err != nil { - logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) - continue - } - b.subConns.Set(a, sc) - b.scStates[sc] = connectivity.Idle - b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) - sc.Connect() - } - } - for _, a := range b.subConns.Keys() { - sci, _ := b.subConns.Get(a) - sc := sci.(balancer.SubConn) - // a was removed by resolver. - if _, ok := addrsSet.Get(a); !ok { - sc.Shutdown() - b.subConns.Delete(a) - // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in updateSubConnState. - } - } - // If resolver state contains no addresses, return an error so ClientConn - // will trigger re-resolve. Also records this as an resolver error, so when - // the overall state turns transient failure, the error message will have - // the zero address information. - if len(s.ResolverState.Addresses) == 0 { - b.ResolverError(errors.New("produced zero addresses")) - return balancer.ErrBadResolverState - } - - b.regeneratePicker() - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) - return nil -} - -// mergeErrors builds an error from the last connection error and the last -// resolver error. Must only be called if b.state is TransientFailure. -func (b *baseBalancer) mergeErrors() error { - // connErr must always be non-nil unless there are no SubConns, in which - // case resolverErr must be non-nil. - if b.connErr == nil { - return fmt.Errorf("last resolver error: %v", b.resolverErr) - } - if b.resolverErr == nil { - return fmt.Errorf("last connection error: %v", b.connErr) - } - return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) -} - -// regeneratePicker takes a snapshot of the balancer, and generates a picker -// from it. The picker is -// - errPicker if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. -func (b *baseBalancer) regeneratePicker() { - if b.state == connectivity.TransientFailure { - b.picker = NewErrPicker(b.mergeErrors()) - return - } - readySCs := make(map[balancer.SubConn]SubConnInfo) - - // Filter out all ready SCs from full subConn map. - for _, addr := range b.subConns.Keys() { - sci, _ := b.subConns.Get(addr) - sc := sci.(balancer.SubConn) - if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[sc] = SubConnInfo{Address: addr} - } - } - b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) -} - -// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. -func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) -} - -func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - s := state.ConnectivityState - if logger.V(2) { - logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) - } - oldS, ok := b.scStates[sc] - if !ok { - if logger.V(2) { - logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) - } - return - } - if oldS == connectivity.TransientFailure && - (s == connectivity.Connecting || s == connectivity.Idle) { - // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or - // CONNECTING transitions to prevent the aggregated state from being - // always CONNECTING when many backends exist but are all down. - if s == connectivity.Idle { - sc.Connect() - } - return - } - b.scStates[sc] = s - switch s { - case connectivity.Idle: - sc.Connect() - case connectivity.Shutdown: - // When an address was removed by resolver, b called Shutdown but kept - // the sc's state in scStates. Remove state for this sc here. - delete(b.scStates, sc) - case connectivity.TransientFailure: - // Save error to be reported via picker. - b.connErr = state.ConnectionError - } - - b.state = b.csEvltr.RecordTransition(oldS, s) - - // Regenerate picker when one of the following happens: - // - this sc entered or left ready - // - the aggregated state of balancer is TransientFailure - // (may need to update error message) - if (s == connectivity.Ready) != (oldS == connectivity.Ready) || - b.state == connectivity.TransientFailure { - b.regeneratePicker() - } - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) -} - -// Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call Shutdown for the SubConns. -func (b *baseBalancer) Close() { -} - -// ExitIdle is a nop because the base balancer attempts to stay connected to -// all SubConns at all times. -func (b *baseBalancer) ExitIdle() { -} - -// NewErrPicker returns a Picker that always returns err on Pick(). -func NewErrPicker(err error) balancer.Picker { - return &errPicker{err: err} -} - -// NewErrPickerV2 is temporarily defined for backward compatibility reasons. -// -// Deprecated: use NewErrPicker instead. -var NewErrPickerV2 = NewErrPicker - -type errPicker struct { - err error // Pick() always returns this err. -} - -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - return balancer.PickResult{}, p.err -} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go deleted file mode 100644 index e31d76e338..0000000000 --- a/vendor/google.golang.org/grpc/balancer/base/base.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package base defines a balancer base that can be used to build balancers with -// different picking algorithms. -// -// The base balancer creates a new SubConn for each resolved address. The -// provided picker will only be notified about READY SubConns. -// -// This package is the base of round_robin balancer, its purpose is to be used -// to build round_robin like balancers with complex picking algorithms. -// Balancers with more complicated logic should try to implement a balancer -// builder from scratch. -// -// All APIs in this package are experimental. -package base - -import ( - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/resolver" -) - -// PickerBuilder creates balancer.Picker. -type PickerBuilder interface { - // Build returns a picker that will be used by gRPC to pick a SubConn. - Build(info PickerBuildInfo) balancer.Picker -} - -// PickerBuildInfo contains information needed by the picker builder to -// construct a picker. -type PickerBuildInfo struct { - // ReadySCs is a map from all ready SubConns to the Addresses used to - // create them. - ReadySCs map[balancer.SubConn]SubConnInfo -} - -// SubConnInfo contains information about a SubConn created by the base -// balancer. -type SubConnInfo struct { - Address resolver.Address // the address used to create this SubConn -} - -// Config contains the config info about the base balancer builder. -type Config struct { - // HealthCheck indicates whether health checking should be enabled for this specific balancer. - HealthCheck bool -} - -// NewBalancerBuilder returns a base balancer builder configured by the provided config. -func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { - return &baseBuilder{ - name: name, - pickerBuilder: pb, - config: config, - } -} diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go deleted file mode 100644 index c334135810..0000000000 --- a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package balancer - -import "google.golang.org/grpc/connectivity" - -// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns -// and returns one aggregated connectivity state. -// -// It's not thread safe. -type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transient failure state. - numIdle uint64 // Number of addrConns in idle state. -} - -// RecordTransition records state change happening in subConn and based on that -// it evaluates what aggregated state should be. -// -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else if at least one SubConn is Idle, the aggregated state is Idle; -// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. -// -// Shutdown is not considered. -func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal - case connectivity.Idle: - cse.numIdle += updateVal - } - } - return cse.CurrentState() -} - -// CurrentState returns the current aggregate conn state by evaluating the counters -func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - if cse.numIdle > 0 { - return connectivity.Idle - } - return connectivity.TransientFailure -} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go deleted file mode 100644 index 4ecfa1c215..0000000000 --- a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package state declares grpclb types to be set by resolvers wishing to pass -// information to grpclb via resolver.State Attributes. -package state - -import ( - "google.golang.org/grpc/resolver" -) - -// keyType is the key to use for storing State in Attributes. -type keyType string - -const key = keyType("grpc.grpclb.state") - -// State contains gRPCLB-relevant data passed from the name resolver. -type State struct { - // BalancerAddresses contains the remote load balancer address(es). If - // set, overrides any resolver-provided addresses with Type of GRPCLB. - BalancerAddresses []resolver.Address -} - -// Set returns a copy of the provided state with attributes containing s. s's -// data should not be mutated after calling Set. -func Set(state resolver.State, s *State) resolver.State { - state.Attributes = state.Attributes.WithValue(key, s) - return state -} - -// Get returns the grpclb State in the resolver.State, or nil if not present. -// The returned data should not be mutated. -func Get(state resolver.State) *State { - s, _ := state.Attributes.Value(key).(*State) - return s -} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go deleted file mode 100644 index f7031ad225..0000000000 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is -// installed as one of the default balancers in gRPC, users don't need to -// explicitly install this balancer. -package roundrobin - -import ( - "sync/atomic" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcrand" -) - -// Name is the name of round_robin balancer. -const Name = "round_robin" - -var logger = grpclog.Component("roundrobin") - -// newBuilder creates a new roundrobin balancer builder. -func newBuilder() balancer.Builder { - return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) -} - -func init() { - balancer.Register(newBuilder()) -} - -type rrPickerBuilder struct{} - -func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: Build called with info: %v", info) - if len(info.ReadySCs) == 0 { - return base.NewErrPicker(balancer.ErrNoSubConnAvailable) - } - scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) - for sc := range info.ReadySCs { - scs = append(scs, sc) - } - return &rrPicker{ - subConns: scs, - // Start at a random index, as the same RR balancer rebuilds a new - // picker when SubConn states change, and we don't want to apply excess - // load to the first server in the list. - next: uint32(grpcrand.Intn(len(scs))), - } -} - -type rrPicker struct { - // subConns is the snapshot of the roundrobin balancer when this picker was - // created. The slice is immutable. Each Get() will do a round robin - // selection from it and return the selected SubConn. - subConns []balancer.SubConn - next uint32 -} - -func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - subConnsLen := uint32(len(p.subConns)) - nextIndex := atomic.AddUint32(&p.next, 1) - - sc := p.subConns[nextIndex%subConnsLen] - return balancer.PickResult{SubConn: sc}, nil -} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go deleted file mode 100644 index b5e30cff02..0000000000 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ /dev/null @@ -1,380 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "fmt" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/resolver" -) - -// ccBalancerWrapper sits between the ClientConn and the Balancer. -// -// ccBalancerWrapper implements methods corresponding to the ones on the -// balancer.Balancer interface. The ClientConn is free to call these methods -// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn -// to the Balancer happen in order by performing them in the serializer, without -// any mutexes held. -// -// ccBalancerWrapper also implements the balancer.ClientConn interface and is -// passed to the Balancer implementations. It invokes unexported methods on the -// ClientConn to handle these calls from the Balancer. -// -// It uses the gracefulswitch.Balancer internally to ensure that balancer -// switches happen in a graceful manner. -type ccBalancerWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - opts balancer.BuildOptions - serializer *grpcsync.CallbackSerializer - serializerCancel context.CancelFunc - - // The following fields are only accessed within the serializer or during - // initialization. - curBalancerName string - balancer *gracefulswitch.Balancer - - // The following field is protected by mu. Caller must take cc.mu before - // taking mu. - mu sync.Mutex - closed bool -} - -// newCCBalancerWrapper creates a new balancer wrapper in idle state. The -// underlying balancer is not created until the switchTo() method is invoked. -func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { - ctx, cancel := context.WithCancel(cc.ctx) - ccb := &ccBalancerWrapper{ - cc: cc, - opts: balancer.BuildOptions{ - DialCreds: cc.dopts.copts.TransportCredentials, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) - return ccb -} - -// updateClientConnState is invoked by grpc to push a ClientConnState update to -// the underlying balancer. This is always executed from the serializer, so -// it is safe to call into the balancer here. -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - errCh := make(chan error) - ok := ccb.serializer.Schedule(func(ctx context.Context) { - defer close(errCh) - if ctx.Err() != nil || ccb.balancer == nil { - return - } - err := ccb.balancer.UpdateClientConnState(*ccs) - if logger.V(2) && err != nil { - logger.Infof("error from balancer.UpdateClientConnState: %v", err) - } - errCh <- err - }) - if !ok { - return nil - } - return <-errCh -} - -// resolverError is invoked by grpc to push a resolver error to the underlying -// balancer. The call to the balancer is executed from the serializer. -func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil || ccb.balancer == nil { - return - } - ccb.balancer.ResolverError(err) - }) -} - -// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the -// LB policy identified by name. -// -// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the -// first good update from the name resolver, it determines the LB policy to use -// and invokes the switchTo() method. Upon receipt of every subsequent update -// from the name resolver, it invokes this method. -// -// the ccBalancerWrapper keeps track of the current LB policy name, and skips -// the graceful balancer switching process if the name does not change. -func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil || ccb.balancer == nil { - return - } - // TODO: Other languages use case-sensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - ccb.buildLoadBalancingPolicy(name) - }) -} - -// buildLoadBalancingPolicy performs the following: -// - retrieve a balancer builder for the given name. Use the default LB -// policy, pick_first, if no LB policy with name is found in the registry. -// - instruct the gracefulswitch balancer to switch to the above builder. This -// will actually build the new balancer. -// - update the `curBalancerName` field -// -// Must be called from a serializer callback. -func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) - builder = newPickfirstBuilder() - } else { - channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) - } - - if err := ccb.balancer.SwitchTo(builder); err != nil { - channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) - return - } - ccb.curBalancerName = builder.Name() -} - -// close initiates async shutdown of the wrapper. cc.mu must be held when -// calling this function. To determine the wrapper has finished shutting down, -// the channel should block on ccb.serializer.Done() without cc.mu held. -func (ccb *ccBalancerWrapper) close() { - ccb.mu.Lock() - ccb.closed = true - ccb.mu.Unlock() - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") - ccb.serializer.Schedule(func(context.Context) { - if ccb.balancer == nil { - return - } - ccb.balancer.Close() - ccb.balancer = nil - }) - ccb.serializerCancel() -} - -// exitIdle invokes the balancer's exitIdle method in the serializer. -func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil || ccb.balancer == nil { - return - } - ccb.balancer.ExitIdle() - }) -} - -func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - ccb.cc.mu.Lock() - defer ccb.cc.mu.Unlock() - - ccb.mu.Lock() - if ccb.closed { - ccb.mu.Unlock() - return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") - } - ccb.mu.Unlock() - - if len(addrs) == 0 { - return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") - } - ac, err := ccb.cc.newAddrConnLocked(addrs, opts) - if err != nil { - channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) - return nil, err - } - acbw := &acBalancerWrapper{ - ccb: ccb, - ac: ac, - producers: make(map[balancer.ProducerBuilder]*refCountedProducer), - stateListener: opts.StateListener, - } - ac.acbw = acbw - return acbw, nil -} - -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The graceful switch balancer will never call this. - logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") -} - -func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - acbw.UpdateAddresses(addrs) -} - -func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.cc.mu.Lock() - defer ccb.cc.mu.Unlock() - - ccb.mu.Lock() - if ccb.closed { - ccb.mu.Unlock() - return - } - ccb.mu.Unlock() - // Update picker before updating state. Even though the ordering here does - // not matter, it can lead to multiple calls of Pick in the common start-up - // case where we wait for ready and then perform an RPC. If the picker is - // updated later, we could call the "connecting" picker when the state is - // updated, and then call the "ready" picker after the picker gets updated. - - // Note that there is no need to check if the balancer wrapper was closed, - // as we know the graceful switch LB policy will not call cc if it has been - // closed. - ccb.cc.pickerWrapper.updatePicker(s.Picker) - ccb.cc.csMgr.updateState(s.ConnectivityState) -} - -func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - ccb.cc.mu.RLock() - defer ccb.cc.mu.RUnlock() - - ccb.mu.Lock() - if ccb.closed { - ccb.mu.Unlock() - return - } - ccb.mu.Unlock() - ccb.cc.resolveNowLocked(o) -} - -func (ccb *ccBalancerWrapper) Target() string { - return ccb.cc.target -} - -// acBalancerWrapper is a wrapper on top of ac for balancers. -// It implements balancer.SubConn interface. -type acBalancerWrapper struct { - ac *addrConn // read-only - ccb *ccBalancerWrapper // read-only - stateListener func(balancer.SubConnState) - - mu sync.Mutex - producers map[balancer.ProducerBuilder]*refCountedProducer -} - -// updateState is invoked by grpc to push a subConn state update to the -// underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { - acbw.ccb.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil || acbw.ccb.balancer == nil { - return - } - // Even though it is optional for balancers, gracefulswitch ensures - // opts.StateListener is set, so this cannot ever be nil. - // TODO: delete this comment when UpdateSubConnState is removed. - acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) - }) -} - -func (acbw *acBalancerWrapper) String() string { - return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) -} - -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.ac.updateAddrs(addrs) -} - -func (acbw *acBalancerWrapper) Connect() { - go acbw.ac.connect() -} - -func (acbw *acBalancerWrapper) Shutdown() { - acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) -} - -// NewStream begins a streaming RPC on the addrConn. If the addrConn is not -// ready, blocks until it is or ctx expires. Returns an error when the context -// expires or the addrConn is shut down. -func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err - } - return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) -} - -// Invoke performs a unary RPC. If the addrConn is not ready, returns -// errSubConnNotReady. -func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { - cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) - if err != nil { - return err - } - if err := cs.SendMsg(args); err != nil { - return err - } - return cs.RecvMsg(reply) -} - -type refCountedProducer struct { - producer balancer.Producer - refs int // number of current refs to the producer - close func() // underlying producer's close function -} - -func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - - // Look up existing producer from this builder. - pData := acbw.producers[pb] - if pData == nil { - // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} - acbw.producers[pb] = pData - } - // Account for this new reference. - pData.refs++ - - // Return a cleanup function wrapped in a OnceFunc to remove this reference - // and delete the refCountedProducer from the map if the total reference - // count goes to zero. - unref := func() { - acbw.mu.Lock() - pData.refs-- - if pData.refs == 0 { - defer pData.close() // Run outside the acbw mutex - delete(acbw.producers, pb) - } - acbw.mu.Unlock() - } - return pData.producer, grpcsync.OnceFunc(unref) -} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go deleted file mode 100644 index 856c75dd4e..0000000000 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ /dev/null @@ -1,1183 +0,0 @@ -// Copyright 2018 The gRPC Authors -// All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 -// source: grpc/binlog/v1/binarylog.proto - -package grpc_binarylog_v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Enumerates the type of event -// Note the terminology is different from the RPC semantics -// definition, but the same meaning is expressed here. -type GrpcLogEntry_EventType int32 - -const ( - GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 - // Header sent from client to server - GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 - // Header sent from server to client - GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 - // Message sent from client to server - GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 - // Message sent from server to client - GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 - // A signal that client is done sending - GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 - // Trailer indicates the end of the RPC. - // On client side, this event means a trailer was either received - // from the network or the gRPC library locally generated a status - // to inform the application about a failure. - // On server side, this event means the server application requested - // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after - // this due to races on server side. - GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 - // A signal that the RPC is cancelled. On client side, this - // indicates the client application requests a cancellation. - // On server side, this indicates that cancellation was detected. - // Note: This marks the end of the RPC. Events may arrive after - // this due to races. For example, on client side a trailer - // may arrive even though the application requested to cancel the RPC. - GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 -) - -// Enum value maps for GrpcLogEntry_EventType. -var ( - GrpcLogEntry_EventType_name = map[int32]string{ - 0: "EVENT_TYPE_UNKNOWN", - 1: "EVENT_TYPE_CLIENT_HEADER", - 2: "EVENT_TYPE_SERVER_HEADER", - 3: "EVENT_TYPE_CLIENT_MESSAGE", - 4: "EVENT_TYPE_SERVER_MESSAGE", - 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", - 6: "EVENT_TYPE_SERVER_TRAILER", - 7: "EVENT_TYPE_CANCEL", - } - GrpcLogEntry_EventType_value = map[string]int32{ - "EVENT_TYPE_UNKNOWN": 0, - "EVENT_TYPE_CLIENT_HEADER": 1, - "EVENT_TYPE_SERVER_HEADER": 2, - "EVENT_TYPE_CLIENT_MESSAGE": 3, - "EVENT_TYPE_SERVER_MESSAGE": 4, - "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, - "EVENT_TYPE_SERVER_TRAILER": 6, - "EVENT_TYPE_CANCEL": 7, - } -) - -func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType { - p := new(GrpcLogEntry_EventType) - *p = x - return p -} - -func (x GrpcLogEntry_EventType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor() -} - -func (GrpcLogEntry_EventType) Type() protoreflect.EnumType { - return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0] -} - -func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead. -func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { - return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0} -} - -// Enumerates the entity that generates the log entry -type GrpcLogEntry_Logger int32 - -const ( - GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 - GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 - GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 -) - -// Enum value maps for GrpcLogEntry_Logger. -var ( - GrpcLogEntry_Logger_name = map[int32]string{ - 0: "LOGGER_UNKNOWN", - 1: "LOGGER_CLIENT", - 2: "LOGGER_SERVER", - } - GrpcLogEntry_Logger_value = map[string]int32{ - "LOGGER_UNKNOWN": 0, - "LOGGER_CLIENT": 1, - "LOGGER_SERVER": 2, - } -) - -func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger { - p := new(GrpcLogEntry_Logger) - *p = x - return p -} - -func (x GrpcLogEntry_Logger) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor() -} - -func (GrpcLogEntry_Logger) Type() protoreflect.EnumType { - return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1] -} - -func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead. -func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { - return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1} -} - -type Address_Type int32 - -const ( - Address_TYPE_UNKNOWN Address_Type = 0 - // address is in 1.2.3.4 form - Address_TYPE_IPV4 Address_Type = 1 - // address is in IPv6 canonical form (RFC5952 section 4) - // The scope is NOT included in the address string. - Address_TYPE_IPV6 Address_Type = 2 - // address is UDS string - Address_TYPE_UNIX Address_Type = 3 -) - -// Enum value maps for Address_Type. -var ( - Address_Type_name = map[int32]string{ - 0: "TYPE_UNKNOWN", - 1: "TYPE_IPV4", - 2: "TYPE_IPV6", - 3: "TYPE_UNIX", - } - Address_Type_value = map[string]int32{ - "TYPE_UNKNOWN": 0, - "TYPE_IPV4": 1, - "TYPE_IPV6": 2, - "TYPE_UNIX": 3, - } -) - -func (x Address_Type) Enum() *Address_Type { - p := new(Address_Type) - *p = x - return p -} - -func (x Address_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Address_Type) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor() -} - -func (Address_Type) Type() protoreflect.EnumType { - return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2] -} - -func (x Address_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Address_Type.Descriptor instead. -func (Address_Type) EnumDescriptor() ([]byte, []int) { - return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0} -} - -// Log entry we store in binary logs -type GrpcLogEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The timestamp of the binary log message - Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Uniquely identifies a call. The value must not be 0 in order to disambiguate - // from an unset value. - // Each call may have several log entries, they will all have the same call_id. - // Nothing is guaranteed about their value other than they are unique across - // different RPCs in the same gRPC process. - CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` - // The entry sequence id for this call. The first GrpcLogEntry has a - // value of 1, to disambiguate from an unset value. The purpose of - // this field is to detect missing entries in environments where - // durability or ordering is not guaranteed. - SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` - Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` - Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum - // The logger uses one of the following fields to record the payload, - // according to the type of the log entry. - // - // Types that are assignable to Payload: - // - // *GrpcLogEntry_ClientHeader - // *GrpcLogEntry_ServerHeader - // *GrpcLogEntry_Message - // *GrpcLogEntry_Trailer - Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` - // true if payload does not represent the full message or metadata. - PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` - // Peer address information, will only be recorded on the first - // incoming event. On client side, peer is logged on - // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in - // the case of trailers-only. On server side, peer is always - // logged on EVENT_TYPE_CLIENT_HEADER. - Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` -} - -func (x *GrpcLogEntry) Reset() { - *x = GrpcLogEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcLogEntry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcLogEntry) ProtoMessage() {} - -func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead. -func (*GrpcLogEntry) Descriptor() ([]byte, []int) { - return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} -} - -func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.Timestamp - } - return nil -} - -func (x *GrpcLogEntry) GetCallId() uint64 { - if x != nil { - return x.CallId - } - return 0 -} - -func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { - if x != nil { - return x.SequenceIdWithinCall - } - return 0 -} - -func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType { - if x != nil { - return x.Type - } - return GrpcLogEntry_EVENT_TYPE_UNKNOWN -} - -func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { - if x != nil { - return x.Logger - } - return GrpcLogEntry_LOGGER_UNKNOWN -} - -func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { - if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { - return x.ClientHeader - } - return nil -} - -func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { - if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { - return x.ServerHeader - } - return nil -} - -func (x *GrpcLogEntry) GetMessage() *Message { - if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { - return x.Message - } - return nil -} - -func (x *GrpcLogEntry) GetTrailer() *Trailer { - if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { - return x.Trailer - } - return nil -} - -func (x *GrpcLogEntry) GetPayloadTruncated() bool { - if x != nil { - return x.PayloadTruncated - } - return false -} - -func (x *GrpcLogEntry) GetPeer() *Address { - if x != nil { - return x.Peer - } - return nil -} - -type isGrpcLogEntry_Payload interface { - isGrpcLogEntry_Payload() -} - -type GrpcLogEntry_ClientHeader struct { - ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` -} - -type GrpcLogEntry_ServerHeader struct { - ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` -} - -type GrpcLogEntry_Message struct { - // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE - Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` -} - -type GrpcLogEntry_Trailer struct { - Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` -} - -func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} - -type ClientHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - // The name of the RPC method, which looks something like: - // // - // Note the leading "/" character. - MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` - // A single process may be used to run multiple virtual - // servers with different identities. - // The authority is the name of such a server identity. - // It is typically a portion of the URI in the form of - // or : . - Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` - // the RPC timeout - Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` -} - -func (x *ClientHeader) Reset() { - *x = ClientHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClientHeader) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClientHeader) ProtoMessage() {} - -func (x *ClientHeader) ProtoReflect() protoreflect.Message { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead. -func (*ClientHeader) Descriptor() ([]byte, []int) { - return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1} -} - -func (x *ClientHeader) GetMetadata() *Metadata { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *ClientHeader) GetMethodName() string { - if x != nil { - return x.MethodName - } - return "" -} - -func (x *ClientHeader) GetAuthority() string { - if x != nil { - return x.Authority - } - return "" -} - -func (x *ClientHeader) GetTimeout() *durationpb.Duration { - if x != nil { - return x.Timeout - } - return nil -} - -type ServerHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` -} - -func (x *ServerHeader) Reset() { - *x = ServerHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerHeader) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerHeader) ProtoMessage() {} - -func (x *ServerHeader) ProtoReflect() protoreflect.Message { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead. -func (*ServerHeader) Descriptor() ([]byte, []int) { - return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2} -} - -func (x *ServerHeader) GetMetadata() *Metadata { - if x != nil { - return x.Metadata - } - return nil -} - -type Trailer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - // The gRPC status code. - StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` - // An original status message before any transport specific - // encoding. - StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` - // The value of the 'grpc-status-details-bin' metadata key. If - // present, this is always an encoded 'google.rpc.Status' message. - StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` -} - -func (x *Trailer) Reset() { - *x = Trailer{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Trailer) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Trailer) ProtoMessage() {} - -func (x *Trailer) ProtoReflect() protoreflect.Message { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Trailer.ProtoReflect.Descriptor instead. -func (*Trailer) Descriptor() ([]byte, []int) { - return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3} -} - -func (x *Trailer) GetMetadata() *Metadata { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *Trailer) GetStatusCode() uint32 { - if x != nil { - return x.StatusCode - } - return 0 -} - -func (x *Trailer) GetStatusMessage() string { - if x != nil { - return x.StatusMessage - } - return "" -} - -func (x *Trailer) GetStatusDetails() []byte { - if x != nil { - return x.StatusDetails - } - return nil -} - -// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE -type Message struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Length of the message. It may not be the same as the length of the - // data field, as the logging payload can be truncated or omitted. - Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` - // May be truncated or omitted. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` -} - -func (x *Message) Reset() { - *x = Message{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Message) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Message) ProtoMessage() {} - -func (x *Message) ProtoReflect() protoreflect.Message { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Message.ProtoReflect.Descriptor instead. -func (*Message) Descriptor() ([]byte, []int) { - return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4} -} - -func (x *Message) GetLength() uint32 { - if x != nil { - return x.Length - } - return 0 -} - -func (x *Message) GetData() []byte { - if x != nil { - return x.Data - } - return nil -} - -// A list of metadata pairs, used in the payload of client header, -// server header, and server trailer. -// Implementations may omit some entries to honor the header limits -// of GRPC_BINARY_LOG_CONFIG. -// -// Header keys added by gRPC are omitted. To be more specific, -// implementations will not log the following entries, and this is -// not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials -// -// Implementations must always log grpc-trace-bin if it is present. -// Practically speaking it will only be visible on server side because -// grpc-trace-bin is managed by low level client side mechanisms -// inaccessible from the application level. On server side, the -// header is just a normal metadata key. -// The pair will not count towards the size limit. -type Metadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` -} - -func (x *Metadata) Reset() { - *x = Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Metadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Metadata) ProtoMessage() {} - -func (x *Metadata) ProtoReflect() protoreflect.Message { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. -func (*Metadata) Descriptor() ([]byte, []int) { - return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5} -} - -func (x *Metadata) GetEntry() []*MetadataEntry { - if x != nil { - return x.Entry - } - return nil -} - -// A metadata key value pair -type MetadataEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *MetadataEntry) Reset() { - *x = MetadataEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MetadataEntry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MetadataEntry) ProtoMessage() {} - -func (x *MetadataEntry) ProtoReflect() protoreflect.Message { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead. -func (*MetadataEntry) Descriptor() ([]byte, []int) { - return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6} -} - -func (x *MetadataEntry) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *MetadataEntry) GetValue() []byte { - if x != nil { - return x.Value - } - return nil -} - -// Address information -type Address struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - // only for TYPE_IPV4 and TYPE_IPV6 - IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` -} - -func (x *Address) Reset() { - *x = Address{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Address) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Address) ProtoMessage() {} - -func (x *Address) ProtoReflect() protoreflect.Message { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Address.ProtoReflect.Descriptor instead. -func (*Address) Descriptor() ([]byte, []int) { - return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7} -} - -func (x *Address) GetType() Address_Type { - if x != nil { - return x.Type - } - return Address_TYPE_UNKNOWN -} - -func (x *Address) GetAddress() string { - if x != nil { - return x.Address - } - return "" -} - -func (x *Address) GetIpPort() uint32 { - if x != nil { - return x.IpPort - } - return 0 -} - -var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor - -var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, - 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, - 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, - 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, - 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, - 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, - 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, - 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, - 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, - 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, - 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, - 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, - 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, - 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, - 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, - 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, - 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, - 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, - 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, - 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, - 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, - 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, - 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, - 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, - 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, - 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, - 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, - 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, - 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, - 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, - 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, - 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, - 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, - 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, - 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, - 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, - 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once - file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc -) - -func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { - file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { - file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) - }) - return file_grpc_binlog_v1_binarylog_proto_rawDescData -} - -var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ - (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType - (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger - (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type - (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry - (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader - (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader - (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer - (*Message)(nil), // 7: grpc.binarylog.v1.Message - (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata - (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry - (*Address)(nil), // 10: grpc.binarylog.v1.Address - (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 12: google.protobuf.Duration -} -var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ - 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp - 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType - 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger - 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader - 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader - 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message - 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer - 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address - 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata - 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration - 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata - 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata - 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry - 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type - 14, // [14:14] is the sub-list for method output_type - 14, // [14:14] is the sub-list for method input_type - 14, // [14:14] is the sub-list for extension type_name - 14, // [14:14] is the sub-list for extension extendee - 0, // [0:14] is the sub-list for field type_name -} - -func init() { file_grpc_binlog_v1_binarylog_proto_init() } -func file_grpc_binlog_v1_binarylog_proto_init() { - if File_grpc_binlog_v1_binarylog_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcLogEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClientHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Trailer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Message); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetadataEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*GrpcLogEntry_ClientHeader)(nil), - (*GrpcLogEntry_ServerHeader)(nil), - (*GrpcLogEntry_Message)(nil), - (*GrpcLogEntry_Trailer)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, - NumEnums: 3, - NumMessages: 8, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes, - DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs, - EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes, - MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, - }.Build() - File_grpc_binlog_v1_binarylog_proto = out.File - file_grpc_binlog_v1_binarylog_proto_rawDesc = nil - file_grpc_binlog_v1_binarylog_proto_goTypes = nil - file_grpc_binlog_v1_binarylog_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go deleted file mode 100644 index 788c89c16f..0000000000 --- a/vendor/google.golang.org/grpc/call.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" -) - -// Invoke sends the RPC request on the wire and returns after response is -// received. This is typically called by generated code. -// -// All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { - // allow interceptor to see all applicable call options, which means those - // configured as defaults from dial option as well as per-call options - opts = combine(cc.dopts.callOptions, opts) - - if cc.dopts.unaryInt != nil { - return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) - } - return invoke(ctx, method, args, reply, cc, opts...) -} - -func combine(o1 []CallOption, o2 []CallOption) []CallOption { - // we don't use append because o1 could have extra capacity whose - // elements would be overwritten, which could cause inadvertent - // sharing (and race conditions) between concurrent calls - if len(o1) == 0 { - return o2 - } else if len(o2) == 0 { - return o1 - } - ret := make([]CallOption, len(o1)+len(o2)) - copy(ret, o1) - copy(ret[len(o1):], o2) - return ret -} - -// Invoke sends the RPC request on the wire and returns after response is -// received. This is typically called by generated code. -// -// DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { - return cc.Invoke(ctx, method, args, reply, opts...) -} - -var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} - -func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { - cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) - if err != nil { - return err - } - if err := cs.SendMsg(req); err != nil { - return err - } - return cs.RecvMsg(reply) -} diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go deleted file mode 100644 index 32b7fa5794..0000000000 --- a/vendor/google.golang.org/grpc/channelz/channelz.go +++ /dev/null @@ -1,36 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package channelz exports internals of the channelz implementation as required -// by other gRPC packages. -// -// The implementation of the channelz spec as defined in -// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by -// the `internal/channelz` package. -// -// # Experimental -// -// Notice: All APIs in this package are experimental and may be removed in a -// later release. -package channelz - -import "google.golang.org/grpc/internal/channelz" - -// Identifier is an opaque identifier which uniquely identifies an entity in the -// channelz database. -type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go deleted file mode 100644 index f6e815e6bf..0000000000 --- a/vendor/google.golang.org/grpc/clientconn.go +++ /dev/null @@ -1,1876 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "errors" - "fmt" - "math" - "net/url" - "strings" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/idle" - "google.golang.org/grpc/internal/pretty" - iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/status" - - _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. - _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. - _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. -) - -const ( - // minimum time to give a connection to complete - minConnectTimeout = 20 * time.Second -) - -var ( - // ErrClientConnClosing indicates that the operation is illegal because - // the ClientConn is closing. - // - // Deprecated: this error should not be relied upon by users; use the status - // code of Canceled instead. - ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") - // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. - errConnDrain = errors.New("grpc: the connection is drained") - // errConnClosing indicates that the connection is closing. - errConnClosing = errors.New("grpc: the connection is closing") - // errConnIdling indicates the the connection is being closed as the channel - // is moving to an idle mode due to inactivity. - errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") - // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default - // service config. - invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" -) - -// The following errors are returned from Dial and DialContext -var ( - // errNoTransportSecurity indicates that there is no transport security - // being set for ClientConn. Users should either set one or explicitly - // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") - // errTransportCredsAndBundle indicates that creds bundle is used together - // with other individual Transport Credentials. - errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") - // errNoTransportCredsInBundle indicated that the configured creds bundle - // returned a transport credentials which was nil. - errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") - // errTransportCredentialsMissing indicates that users want to transmit - // security information (e.g., OAuth2 token) which requires secure - // connection on an insecure connection. - errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") -) - -const ( - defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 - defaultClientMaxSendMessageSize = math.MaxInt32 - // http2IOBufSize specifies the buffer size for sending frames. - defaultWriteBufSize = 32 * 1024 - defaultReadBufSize = 32 * 1024 -) - -// Dial creates a client connection to the given target. -func Dial(target string, opts ...DialOption) (*ClientConn, error) { - return DialContext(context.Background(), target, opts...) -} - -type defaultConfigSelector struct { - sc *ServiceConfig -} - -func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { - return &iresolver.RPCConfig{ - Context: rpcInfo.Context, - MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), - }, nil -} - -// newClient returns a new client in idle mode. -func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { - cc := &ClientConn{ - target: target, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - czData: new(channelzData), - } - - cc.retryThrottler.Store((*retryThrottler)(nil)) - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.ctx, cc.cancel = context.WithCancel(context.Background()) - - // Apply dial options. - disableGlobalOpts := false - for _, opt := range opts { - if _, ok := opt.(*disableGlobalDialOptions); ok { - disableGlobalOpts = true - break - } - } - - if !disableGlobalOpts { - for _, opt := range globalDialOptions { - opt.apply(&cc.dopts) - } - } - - for _, opt := range opts { - opt.apply(&cc.dopts) - } - chainUnaryClientInterceptors(cc) - chainStreamClientInterceptors(cc) - - if err := cc.validateTransportCredentials(); err != nil { - return nil, err - } - - if cc.dopts.defaultServiceConfigRawJSON != nil { - scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) - if scpr.Err != nil { - return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) - } - cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) - } - cc.mkp = cc.dopts.copts.KeepaliveParams - - // Register ClientConn with channelz. - cc.channelzRegistration(target) - - // TODO: Ideally it should be impossible to error from this function after - // channelz registration. This will require removing some channelz logs - // from the following functions that can error. Errors can be returned to - // the user, and successful logs can be emitted here, after the checks have - // passed and channelz is subsequently registered. - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - channelz.RemoveEntry(cc.channelzID) - return nil, err - } - if err = cc.determineAuthority(); err != nil { - channelz.RemoveEntry(cc.channelzID) - return nil, err - } - - cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) - cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) - - cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. - cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) - return cc, nil -} - -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. -// -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. -// -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. -// -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { - cc, err := newClient(target, opts...) - if err != nil { - return nil, err - } - - // We start the channel off in idle mode, but kick it out of idle now, - // instead of waiting for the first RPC. Other gRPC implementations do wait - // for the first RPC to kick the channel out of idle. But doing so would be - // a major behavior change for our users who are used to seeing the channel - // active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, i.e. by making newClient exported. - - defer func() { - if err != nil { - cc.Close() - } - }() - - // This creates the name resolver, load balancer, etc. - if err := cc.idlenessMgr.ExitIdleMode(); err != nil { - return nil, err - } - - // Return now for non-blocking dials. - if !cc.dopts.block { - return cc, nil - } - - if cc.dopts.timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) - defer cancel() - } - defer func() { - select { - case <-ctx.Done(): - switch { - case ctx.Err() == err: - conn = nil - case err == nil || !cc.dopts.returnLastError: - conn, err = nil, ctx.Err() - default: - conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) - } - default: - } - }() - - // A blocking dial blocks until the clientConn is ready. - for { - s := cc.GetState() - if s == connectivity.Idle { - cc.Connect() - } - if s == connectivity.Ready { - return cc, nil - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { - return nil, err - } - return nil, ctx.Err() - } - } -} - -// addTraceEvent is a helper method to add a trace event on the channel. If the -// channel is a nested one, the same event is also added on the parent channel. -func (cc *ClientConn) addTraceEvent(msg string) { - ted := &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel %s", msg), - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) -} - -type idler ClientConn - -func (i *idler) EnterIdleMode() { - (*ClientConn)(i).enterIdleMode() -} - -func (i *idler) ExitIdleMode() error { - return (*ClientConn)(i).exitIdleMode() -} - -// exitIdleMode moves the channel out of idle mode by recreating the name -// resolver and load balancer. This should never be called directly; use -// cc.idlenessMgr.ExitIdleMode instead. -func (cc *ClientConn) exitIdleMode() (err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return errConnClosing - } - cc.mu.Unlock() - - // This needs to be called without cc.mu because this builds a new resolver - // which might update state or report error inline, which would then need to - // acquire cc.mu. - if err := cc.resolverWrapper.start(); err != nil { - return err - } - - cc.addTraceEvent("exiting idle mode") - return nil -} - -// initIdleStateLocked initializes common state to how it should be while idle. -func (cc *ClientConn) initIdleStateLocked() { - cc.resolverWrapper = newCCResolverWrapper(cc) - cc.balancerWrapper = newCCBalancerWrapper(cc) - cc.firstResolveEvent = grpcsync.NewEvent() - // cc.conns == nil is a proxy for the ClientConn being closed. So, instead - // of setting it to nil here, we recreate the map. This also means that we - // don't have to do this when exiting idle mode. - cc.conns = make(map[*addrConn]struct{}) -} - -// enterIdleMode puts the channel in idle mode, and as part of it shuts down the -// name resolver, load balancer, and any subchannels. This should never be -// called directly; use cc.idlenessMgr.EnterIdleMode instead. -func (cc *ClientConn) enterIdleMode() { - cc.mu.Lock() - - if cc.conns == nil { - cc.mu.Unlock() - return - } - - conns := cc.conns - - rWrapper := cc.resolverWrapper - rWrapper.close() - cc.pickerWrapper.reset() - bWrapper := cc.balancerWrapper - bWrapper.close() - cc.csMgr.updateState(connectivity.Idle) - cc.addTraceEvent("entering idle mode") - - cc.initIdleStateLocked() - - cc.mu.Unlock() - - // Block until the name resolver and LB policy are closed. - <-rWrapper.serializer.Done() - <-bWrapper.serializer.Done() - - // Close all subchannels after the LB policy is closed. - for ac := range conns { - ac.tearDown(errConnIdling) - } -} - -// validateTransportCredentials performs a series of checks on the configured -// transport credentials. It returns a non-nil error if any of these conditions -// are met: -// - no transport creds and no creds bundle is configured -// - both transport creds and creds bundle are configured -// - creds bundle is configured, but it lacks a transport credentials -// - insecure transport creds configured alongside call creds that require -// transport level security -// -// If none of the above conditions are met, the configured credentials are -// deemed valid and a nil error is returned. -func (cc *ClientConn) validateTransportCredentials() error { - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return errTransportCredsAndBundle - } - if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { - return errNoTransportCredsInBundle - } - transportCreds := cc.dopts.copts.TransportCredentials - if transportCreds == nil { - transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() - } - if transportCreds.Info().SecurityProtocol == "insecure" { - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return errTransportCredentialsMissing - } - } - } - return nil -} - -// channelzRegistration registers the newly created ClientConn with channelz and -// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. -// A channelz trace event is emitted for ClientConn creation. If the newly -// created ClientConn is a nested one, i.e a valid parent ClientConn ID is -// specified via a dial option, the trace event is also added to the parent. -// -// Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) channelzRegistration(target string) { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - cc.addTraceEvent("created") -} - -// chainUnaryClientInterceptors chains all unary client interceptors into one. -func chainUnaryClientInterceptors(cc *ClientConn) { - interceptors := cc.dopts.chainUnaryInts - // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will - // be executed before any other chained interceptors. - if cc.dopts.unaryInt != nil { - interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) - } - var chainedInt UnaryClientInterceptor - if len(interceptors) == 0 { - chainedInt = nil - } else if len(interceptors) == 1 { - chainedInt = interceptors[0] - } else { - chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) - } - } - cc.dopts.unaryInt = chainedInt -} - -// getChainUnaryInvoker recursively generate the chained unary invoker. -func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { - if curr == len(interceptors)-1 { - return finalInvoker - } - return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { - return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) - } -} - -// chainStreamClientInterceptors chains all stream client interceptors into one. -func chainStreamClientInterceptors(cc *ClientConn) { - interceptors := cc.dopts.chainStreamInts - // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will - // be executed before any other chained interceptors. - if cc.dopts.streamInt != nil { - interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) - } - var chainedInt StreamClientInterceptor - if len(interceptors) == 0 { - chainedInt = nil - } else if len(interceptors) == 1 { - chainedInt = interceptors[0] - } else { - chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { - return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) - } - } - cc.dopts.streamInt = chainedInt -} - -// getChainStreamer recursively generate the chained client stream constructor. -func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { - if curr == len(interceptors)-1 { - return finalStreamer - } - return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { - return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) - } -} - -// newConnectivityStateManager creates an connectivityStateManager with -// the specified id. -func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { - return &connectivityStateManager{ - channelzID: id, - pubSub: grpcsync.NewPubSub(ctx), - } -} - -// connectivityStateManager keeps the connectivity.State of ClientConn. -// This struct will eventually be exported so the balancers can access it. -// -// TODO: If possible, get rid of the `connectivityStateManager` type, and -// provide this functionality using the `PubSub`, to avoid keeping track of -// the connectivity state at two places. -type connectivityStateManager struct { - mu sync.Mutex - state connectivity.State - notifyChan chan struct{} - channelzID *channelz.Identifier - pubSub *grpcsync.PubSub -} - -// updateState updates the connectivity.State of ClientConn. -// If there's a change it notifies goroutines waiting on state change to -// happen. -func (csm *connectivityStateManager) updateState(state connectivity.State) { - csm.mu.Lock() - defer csm.mu.Unlock() - if csm.state == connectivity.Shutdown { - return - } - if csm.state == state { - return - } - csm.state = state - csm.pubSub.Publish(state) - - channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) - if csm.notifyChan != nil { - // There are other goroutines waiting on this channel. - close(csm.notifyChan) - csm.notifyChan = nil - } -} - -func (csm *connectivityStateManager) getState() connectivity.State { - csm.mu.Lock() - defer csm.mu.Unlock() - return csm.state -} - -func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { - csm.mu.Lock() - defer csm.mu.Unlock() - if csm.notifyChan == nil { - csm.notifyChan = make(chan struct{}) - } - return csm.notifyChan -} - -// ClientConnInterface defines the functions clients need to perform unary and -// streaming RPCs. It is implemented by *ClientConn, and is only intended to -// be referenced by generated code. -type ClientConnInterface interface { - // Invoke performs a unary RPC and returns after the response is received - // into reply. - Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error - // NewStream begins a streaming RPC. - NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) -} - -// Assert *ClientConn implements ClientConnInterface. -var _ ClientConnInterface = (*ClientConn)(nil) - -// ClientConn represents a virtual connection to a conceptual endpoint, to -// perform RPCs. -// -// A ClientConn is free to have zero or more actual connections to the endpoint -// based on configuration, load, etc. It is also free to determine which actual -// endpoints to use and may change it every RPC, permitting client-side load -// balancing. -// -// A ClientConn encapsulates a range of functionality including name -// resolution, TCP connection establishment (with retries and backoff) and TLS -// handshakes. It also handles errors on established connections by -// re-resolving the name and reconnecting. -type ClientConn struct { - ctx context.Context // Initialized using the background context at dial time. - cancel context.CancelFunc // Cancelled on close. - - // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See parseTargetAndFindResolver(). - authority string // See determineAuthority(). - dopts dialOptions // Default and user specified dial options. - channelzID *channelz.Identifier // Channelz identifier for the channel. - resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - idlenessMgr *idle.Manager - - // The following provide their own synchronization, and therefore don't - // require cc.mu to be held to access them. - csMgr *connectivityStateManager - pickerWrapper *pickerWrapper - safeConfigSelector iresolver.SafeConfigSelector - czData *channelzData - retryThrottler atomic.Value // Updated from service config. - - // mu protects the following fields. - // TODO: split mu so the same mutex isn't used for everything. - mu sync.RWMutex - resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. - balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. - sc *ServiceConfig // Latest service config received from the resolver. - conns map[*addrConn]struct{} // Set to nil on close. - mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - // firstResolveEvent is used to track whether the name resolver sent us at - // least one update. RPCs block on this event. May be accessed without mu - // if we know we cannot be asked to enter idle mode while accessing it (e.g. - // when the idle manager has already been closed, or if we are already - // entering idle mode). - firstResolveEvent *grpcsync.Event - - lceMu sync.Mutex // protects lastConnectionError - lastConnectionError error -} - -// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or -// ctx expires. A true value is returned in former case and false in latter. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { - ch := cc.csMgr.getNotifyChan() - if cc.csMgr.getState() != sourceState { - return true - } - select { - case <-ctx.Done(): - return false - case <-ch: - return true - } -} - -// GetState returns the connectivity.State of ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. -func (cc *ClientConn) GetState() connectivity.State { - return cc.csMgr.getState() -} - -// Connect causes all subchannels in the ClientConn to attempt to connect if -// the channel is idle. Does not wait for the connection attempts to begin -// before returning. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. -func (cc *ClientConn) Connect() { - if err := cc.idlenessMgr.ExitIdleMode(); err != nil { - cc.addTraceEvent(err.Error()) - return - } - // If the ClientConn was not in idle mode, we need to call ExitIdle on the - // LB policy so that connections can be created. - cc.mu.Lock() - cc.balancerWrapper.exitIdle() - cc.mu.Unlock() -} - -// waitForResolvedAddrs blocks until the resolver has provided addresses or the -// context expires. Returns nil unless the context expires first; otherwise -// returns a status error based on the context. -func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { - // This is on the RPC path, so we use a fast path to avoid the - // more-expensive "select" below after the resolver has returned once. - if cc.firstResolveEvent.HasFired() { - return nil - } - select { - case <-cc.firstResolveEvent.Done(): - return nil - case <-ctx.Done(): - return status.FromContextError(ctx.Err()).Err() - case <-cc.ctx.Done(): - return ErrClientConnClosing - } -} - -var emptyServiceConfig *ServiceConfig - -func init() { - cfg := parseServiceConfig("{}") - if cfg.Err != nil { - panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) - } - emptyServiceConfig = cfg.Config.(*ServiceConfig) - - internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { - return cc.csMgr.pubSub.Subscribe(s) - } - internal.EnterIdleModeForTesting = func(cc *ClientConn) { - cc.idlenessMgr.EnterIdleModeForTesting() - } - internal.ExitIdleModeForTesting = func(cc *ClientConn) error { - return cc.idlenessMgr.ExitIdleMode() - } -} - -func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { - if cc.sc != nil { - cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) - return - } - if cc.dopts.defaultServiceConfig != nil { - cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) - } else { - cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) - } -} - -func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { - defer cc.firstResolveEvent.Fire() - // Check if the ClientConn is already closed. Some fields (e.g. - // balancerWrapper) are set to nil when closing the ClientConn, and could - // cause nil pointer panic if we don't have this check. - if cc.conns == nil { - cc.mu.Unlock() - return nil - } - - if err != nil { - // May need to apply the initial service config in case the resolver - // doesn't support service configs, or doesn't provide a service config - // with the new addresses. - cc.maybeApplyDefaultServiceConfig(nil) - - cc.balancerWrapper.resolverError(err) - - // No addresses are valid with err set; return early. - cc.mu.Unlock() - return balancer.ErrBadResolverState - } - - var ret error - if cc.dopts.disableServiceConfig { - channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) - cc.maybeApplyDefaultServiceConfig(s.Addresses) - } else if s.ServiceConfig == nil { - cc.maybeApplyDefaultServiceConfig(s.Addresses) - // TODO: do we need to apply a failing LB policy if there is no - // default, per the error handling design? - } else { - if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { - configSelector := iresolver.GetConfigSelector(s) - if configSelector != nil { - if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { - channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") - } - } else { - configSelector = &defaultConfigSelector{sc} - } - cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) - } else { - ret = balancer.ErrBadResolverState - if cc.sc == nil { - // Apply the failing LB only if we haven't received valid service config - // from the name resolver in the past. - cc.applyFailingLBLocked(s.ServiceConfig) - cc.mu.Unlock() - return ret - } - } - } - - var balCfg serviceconfig.LoadBalancingConfig - if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig.cfg - } - bw := cc.balancerWrapper - cc.mu.Unlock() - - uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) - if ret == nil { - ret = uccsErr // prefer ErrBadResolver state since any other error is - // currently meaningless to the caller. - } - return ret -} - -// applyFailingLBLocked is akin to configuring an LB policy on the channel which -// always fails RPCs. Here, an actual LB policy is not configured, but an always -// erroring picker is configured, which returns errors with information about -// what was invalid in the received service config. A config selector with no -// service config is configured, and the connectivity state of the channel is -// set to TransientFailure. -func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { - var err error - if sc.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) - } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) -} - -// Makes a copy of the input addresses slice and clears out the balancer -// attributes field. Addresses are passed during subconn creation and address -// update operations. In both cases, we will clear the balancer attributes by -// calling this function, and therefore we will be able to use the Equal method -// provided by the resolver.Address type for comparison. -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { - out := make([]resolver.Address, len(in)) - for i := range in { - out[i] = in[i] - out[i].BalancerAttributes = nil - } - return out -} - -// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. -// -// Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { - if cc.conns == nil { - return nil, ErrClientConnClosing - } - - ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddressesWithoutBalancerAttributes(addrs), - scopts: opts, - dopts: cc.dopts, - czData: new(channelzData), - resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), - } - ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - - var err error - ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") - if err != nil { - return nil, err - } - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), - Severity: channelz.CtInfo, - }, - }) - - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.conns[ac] = struct{}{} - return ac, nil -} - -// removeAddrConn removes the addrConn in the subConn from clientConn. -// It also tears down the ac with the given error. -func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - delete(cc.conns, ac) - cc.mu.Unlock() - ac.tearDown(err) -} - -func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { - return &channelz.ChannelInternalMetric{ - State: cc.GetState(), - Target: cc.target, - CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), - } -} - -// Target returns the target string of the ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func (cc *ClientConn) Target() string { - return cc.target -} - -func (cc *ClientConn) incrCallsStarted() { - atomic.AddInt64(&cc.czData.callsStarted, 1) - atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) -} - -func (cc *ClientConn) incrCallsSucceeded() { - atomic.AddInt64(&cc.czData.callsSucceeded, 1) -} - -func (cc *ClientConn) incrCallsFailed() { - atomic.AddInt64(&cc.czData.callsFailed, 1) -} - -// connect starts creating a transport. -// It does nothing if the ac is not IDLE. -// TODO(bar) Move this to the addrConn section. -func (ac *addrConn) connect() error { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - if logger.V(2) { - logger.Infof("connect called on shutdown addrConn; ignoring.") - } - ac.mu.Unlock() - return errConnClosing - } - if ac.state != connectivity.Idle { - if logger.V(2) { - logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) - } - ac.mu.Unlock() - return nil - } - ac.mu.Unlock() - - ac.resetTransport() - return nil -} - -func equalAddresses(a, b []resolver.Address) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if !v.Equal(b[i]) { - return false - } - } - return true -} - -// updateAddrs updates ac.addrs with the new addresses list and handles active -// connections or connection attempts. -func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - ac.mu.Lock() - channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) - - addrs = copyAddressesWithoutBalancerAttributes(addrs) - if equalAddresses(ac.addrs, addrs) { - ac.mu.Unlock() - return - } - - ac.addrs = addrs - - if ac.state == connectivity.Shutdown || - ac.state == connectivity.TransientFailure || - ac.state == connectivity.Idle { - // We were not connecting, so do nothing but update the addresses. - ac.mu.Unlock() - return - } - - if ac.state == connectivity.Ready { - // Try to find the connected address. - for _, a := range addrs { - a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { - // We are connected to a valid address, so do nothing but - // update the addresses. - ac.mu.Unlock() - return - } - } - } - - // We are either connected to the wrong address or currently connecting. - // Stop the current iteration and restart. - - ac.cancel() - ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) - - // We have to defer here because GracefulClose => onClose, which requires - // locking ac.mu. - if ac.transport != nil { - defer ac.transport.GracefulClose() - ac.transport = nil - } - - if len(addrs) == 0 { - ac.updateConnectivityState(connectivity.Idle, nil) - } - - ac.mu.Unlock() - - // Since we were connecting/connected, we should start a new connection - // attempt. - go ac.resetTransport() -} - -// getServerName determines the serverName to be used in the connection -// handshake. The default value for the serverName is the authority on the -// ClientConn, which either comes from the user's dial target or through an -// authority override specified using the WithAuthority dial option. Name -// resolvers can specify a per-address override for the serverName through the -// resolver.Address.ServerName field which is used only if the WithAuthority -// dial option was not used. The rationale is that per-address authority -// overrides specified by the name resolver can represent a security risk, while -// an override specified by the user is more dependable since they probably know -// what they are doing. -func (cc *ClientConn) getServerName(addr resolver.Address) string { - if cc.dopts.authority != "" { - return cc.dopts.authority - } - if addr.ServerName != "" { - return addr.ServerName - } - return cc.authority -} - -func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { - if sc == nil { - return MethodConfig{} - } - if m, ok := sc.Methods[method]; ok { - return m - } - i := strings.LastIndex(method, "/") - if m, ok := sc.Methods[method[:i+1]]; ok { - return m - } - return sc.Methods[""] -} - -// GetMethodConfig gets the method config of the input method. -// If there's an exact match for input method (i.e. /service/method), we return -// the corresponding MethodConfig. -// If there isn't an exact match for the input method, we look for the service's default -// config under the service (i.e /service/) and then for the default for all services (empty string). -// -// If there is a default MethodConfig for the service, we return it. -// Otherwise, we return an empty MethodConfig. -func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { - // TODO: Avoid the locking here. - cc.mu.RLock() - defer cc.mu.RUnlock() - return getMethodConfig(cc.sc, method) -} - -func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { - cc.mu.RLock() - defer cc.mu.RUnlock() - if cc.sc == nil { - return nil - } - return cc.sc.healthCheckConfig -} - -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ - Ctx: ctx, - FullMethodName: method, - }) -} - -func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { - if sc == nil { - // should never reach here. - return - } - cc.sc = sc - if configSelector != nil { - cc.safeConfigSelector.UpdateConfigSelector(configSelector) - } - - if cc.sc.retryThrottling != nil { - newThrottler := &retryThrottler{ - tokens: cc.sc.retryThrottling.MaxTokens, - max: cc.sc.retryThrottling.MaxTokens, - thresh: cc.sc.retryThrottling.MaxTokens / 2, - ratio: cc.sc.retryThrottling.TokenRatio, - } - cc.retryThrottler.Store(newThrottler) - } else { - cc.retryThrottler.Store((*retryThrottler)(nil)) - } - - var newBalancerName string - if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { - // No service config or no LB policy specified in config. - newBalancerName = PickFirstBalancerName - } else if cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { // cc.sc.LB != nil - newBalancerName = *cc.sc.LB - } - cc.balancerWrapper.switchTo(newBalancerName) -} - -func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { - cc.mu.RLock() - cc.resolverWrapper.resolveNow(o) - cc.mu.RUnlock() -} - -func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { - cc.resolverWrapper.resolveNow(o) -} - -// ResetConnectBackoff wakes up all subchannels in transient failure and causes -// them to attempt another connection immediately. It also resets the backoff -// times used for subsequent attempts regardless of the current state. -// -// In general, this function should not be used. Typical service or network -// outages result in a reasonable client reconnection strategy by default. -// However, if a previously unavailable network becomes available, this may be -// used to trigger an immediate reconnect. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func (cc *ClientConn) ResetConnectBackoff() { - cc.mu.Lock() - conns := cc.conns - cc.mu.Unlock() - for ac := range conns { - ac.resetConnectBackoff() - } -} - -// Close tears down the ClientConn and all underlying connections. -func (cc *ClientConn) Close() error { - defer func() { - cc.cancel() - <-cc.csMgr.pubSub.Done() - }() - - // Prevent calls to enter/exit idle immediately, and ensure we are not - // currently entering/exiting idle mode. - cc.idlenessMgr.Close() - - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return ErrClientConnClosing - } - - conns := cc.conns - cc.conns = nil - cc.csMgr.updateState(connectivity.Shutdown) - - // We can safely unlock and continue to access all fields now as - // cc.conns==nil, preventing any further operations on cc. - cc.mu.Unlock() - - cc.resolverWrapper.close() - // The order of closing matters here since the balancer wrapper assumes the - // picker is closed before it is closed. - cc.pickerWrapper.close() - cc.balancerWrapper.close() - - <-cc.resolverWrapper.serializer.Done() - <-cc.balancerWrapper.serializer.Done() - - for ac := range conns { - ac.tearDown(ErrClientConnClosing) - } - cc.addTraceEvent("deleted") - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add - // trace reference to the entity being deleted, and thus prevent it from being - // deleted right away. - channelz.RemoveEntry(cc.channelzID) - - return nil -} - -// addrConn is a network connection to a given address. -type addrConn struct { - ctx context.Context - cancel context.CancelFunc - - cc *ClientConn - dopts dialOptions - acbw *acBalancerWrapper - scopts balancer.NewSubConnOptions - - // transport is set when there's a viable transport (note: ac state may not be READY as LB channel - // health checking may require server to report healthy to set ac to READY), and is reset - // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway - // is received, transport is closed, ac has been torn down). - transport transport.ClientTransport // The current transport. - - mu sync.Mutex - curAddr resolver.Address // The current address. - addrs []resolver.Address // All addresses that the resolver resolved to. - - // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. - - backoffIdx int // Needs to be stateful for resetConnectBackoff. - resetBackoff chan struct{} - - channelzID *channelz.Identifier - czData *channelzData -} - -// Note: this requires a lock on ac.mu. -func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { - if ac.state == s { - return - } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) - ac.state = s - if lastErr == nil { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) - } else { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) - } - ac.acbw.updateState(s, lastErr) -} - -// adjustParams updates parameters used to create transports upon -// receiving a GoAway. -func (ac *addrConn) adjustParams(r transport.GoAwayReason) { - switch r { - case transport.GoAwayTooManyPings: - v := 2 * ac.dopts.copts.KeepaliveParams.Time - ac.cc.mu.Lock() - if v > ac.cc.mkp.Time { - ac.cc.mkp.Time = v - } - ac.cc.mu.Unlock() - } -} - -func (ac *addrConn) resetTransport() { - ac.mu.Lock() - acCtx := ac.ctx - if acCtx.Err() != nil { - ac.mu.Unlock() - return - } - - addrs := ac.addrs - backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) - // This will be the duration that dial gets to finish. - dialDuration := minConnectTimeout - if ac.dopts.minConnectTimeout != nil { - dialDuration = ac.dopts.minConnectTimeout() - } - - if dialDuration < backoffFor { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor - } - // We can potentially spend all the time trying the first address, and - // if the server accepts the connection and then hangs, the following - // addresses will never be tried. - // - // The spec doesn't mention what should be done for multiple addresses. - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm - connectDeadline := time.Now().Add(dialDuration) - - ac.updateConnectivityState(connectivity.Connecting, nil) - ac.mu.Unlock() - - if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { - ac.cc.resolveNow(resolver.ResolveNowOptions{}) - ac.mu.Lock() - if acCtx.Err() != nil { - // addrConn was torn down. - ac.mu.Unlock() - return - } - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. - ac.updateConnectivityState(connectivity.TransientFailure, err) - - // Backoff. - b := ac.resetBackoff - ac.mu.Unlock() - - timer := time.NewTimer(backoffFor) - select { - case <-timer.C: - ac.mu.Lock() - ac.backoffIdx++ - ac.mu.Unlock() - case <-b: - timer.Stop() - case <-acCtx.Done(): - timer.Stop() - return - } - - ac.mu.Lock() - if acCtx.Err() == nil { - ac.updateConnectivityState(connectivity.Idle, err) - } - ac.mu.Unlock() - return - } - // Success; reset backoff. - ac.mu.Lock() - ac.backoffIdx = 0 - ac.mu.Unlock() -} - -// tryAllAddrs tries to creates a connection to the addresses, and stop when at -// the first successful one. It returns an error if no address was successfully -// connected, or updates ac appropriately with the new transport. -func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { - var firstConnErr error - for _, addr := range addrs { - if ctx.Err() != nil { - return errConnClosing - } - ac.mu.Lock() - - ac.cc.mu.RLock() - ac.dopts.copts.KeepaliveParams = ac.cc.mkp - ac.cc.mu.RUnlock() - - copts := ac.dopts.copts - if ac.scopts.CredsBundle != nil { - copts.CredsBundle = ac.scopts.CredsBundle - } - ac.mu.Unlock() - - channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - - err := ac.createTransport(ctx, addr, copts, connectDeadline) - if err == nil { - return nil - } - if firstConnErr == nil { - firstConnErr = err - } - ac.cc.updateConnectionError(err) - } - - // Couldn't connect to any address. - return firstConnErr -} - -// createTransport creates a connection to addr. It returns an error if the -// address was not successfully connected, or updates ac appropriately with the -// new transport. -func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { - addr.ServerName = ac.cc.getServerName(addr) - hctx, hcancel := context.WithCancel(ctx) - - onClose := func(r transport.GoAwayReason) { - ac.mu.Lock() - defer ac.mu.Unlock() - // adjust params based on GoAwayReason - ac.adjustParams(r) - if ctx.Err() != nil { - // Already shut down or connection attempt canceled. tearDown() or - // updateAddrs() already cleared the transport and canceled hctx - // via ac.ctx, and we expected this connection to be closed, so do - // nothing here. - return - } - hcancel() - if ac.transport == nil { - // We're still connecting to this address, which could error. Do - // not update the connectivity state or resolve; these will happen - // at the end of the tryAllAddrs connection loop in the event of an - // error. - return - } - ac.transport = nil - // Refresh the name resolver on any connection loss. - ac.cc.resolveNow(resolver.ResolveNowOptions{}) - // Always go idle and wait for the LB policy to initiate a new - // connection attempt. - ac.updateConnectivityState(connectivity.Idle, nil) - } - - connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) - defer cancel() - copts.ChannelzParentID = ac.channelzID - - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) - if err != nil { - if logger.V(2) { - logger.Infof("Creating new client transport to %q: %v", addr, err) - } - // newTr is either nil, or closed. - hcancel() - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) - return err - } - - ac.mu.Lock() - defer ac.mu.Unlock() - if ctx.Err() != nil { - // This can happen if the subConn was removed while in `Connecting` - // state. tearDown() would have set the state to `Shutdown`, but - // would not have closed the transport since ac.transport would not - // have been set at that point. - // - // We run this in a goroutine because newTr.Close() calls onClose() - // inline, which requires locking ac.mu. - // - // The error we pass to Close() is immaterial since there are no open - // streams at this point, so no trailers with error details will be sent - // out. We just need to pass a non-nil error. - // - // This can also happen when updateAddrs is called during a connection - // attempt. - go newTr.Close(transport.ErrConnClosing) - return nil - } - if hctx.Err() != nil { - // onClose was already called for this connection, but the connection - // was successfully established first. Consider it a success and set - // the new state to Idle. - ac.updateConnectivityState(connectivity.Idle, nil) - return nil - } - ac.curAddr = addr - ac.transport = newTr - ac.startHealthCheck(hctx) // Will set state to READY if appropriate. - return nil -} - -// startHealthCheck starts the health checking stream (RPC) to watch the health -// stats of this connection if health checking is requested and configured. -// -// LB channel health checking is enabled when all requirements below are met: -// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption -// 2. internal.HealthCheckFunc is set by importing the grpc/health package -// 3. a service config with non-empty healthCheckConfig field is provided -// 4. the load balancer requests it -// -// It sets addrConn to READY if the health checking stream is not started. -// -// Caller must hold ac.mu. -func (ac *addrConn) startHealthCheck(ctx context.Context) { - var healthcheckManagingState bool - defer func() { - if !healthcheckManagingState { - ac.updateConnectivityState(connectivity.Ready, nil) - } - }() - - if ac.cc.dopts.disableHealthCheck { - return - } - healthCheckConfig := ac.cc.healthCheckConfig() - if healthCheckConfig == nil { - return - } - if !ac.scopts.HealthCheckEnabled { - return - } - healthCheckFunc := ac.cc.dopts.healthCheckFunc - if healthCheckFunc == nil { - // The health package is not imported to set health check function. - // - // TODO: add a link to the health check doc in the error message. - channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") - return - } - - healthcheckManagingState = true - - // Set up the health check helper functions. - currentTr := ac.transport - newStream := func(method string) (any, error) { - ac.mu.Lock() - if ac.transport != currentTr { - ac.mu.Unlock() - return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") - } - ac.mu.Unlock() - return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) - } - setConnectivityState := func(s connectivity.State, lastErr error) { - ac.mu.Lock() - defer ac.mu.Unlock() - if ac.transport != currentTr { - return - } - ac.updateConnectivityState(s, lastErr) - } - // Start the health checking stream. - go func() { - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) - if err != nil { - if status.Code(err) == codes.Unimplemented { - channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") - } else { - channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) - } - } - }() -} - -func (ac *addrConn) resetConnectBackoff() { - ac.mu.Lock() - close(ac.resetBackoff) - ac.backoffIdx = 0 - ac.resetBackoff = make(chan struct{}) - ac.mu.Unlock() -} - -// getReadyTransport returns the transport if ac's state is READY or nil if not. -func (ac *addrConn) getReadyTransport() transport.ClientTransport { - ac.mu.Lock() - defer ac.mu.Unlock() - if ac.state == connectivity.Ready { - return ac.transport - } - return nil -} - -// getTransport waits until the addrconn is ready and returns the transport. -// If the context expires first, returns an appropriate status. If the -// addrConn is stopped first, returns an Unavailable status error. -func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { - for ctx.Err() == nil { - ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan - ac.mu.Unlock() - if state == connectivity.Ready { - return t, nil - } - if state == connectivity.Shutdown { - return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") - } - - select { - case <-ctx.Done(): - case <-sc: - } - } - return nil, status.FromContextError(ctx.Err()).Err() -} - -// tearDown starts to tear down the addrConn. -// -// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct -// will leak. In most cases, call cc.removeAddrConn() instead. -func (ac *addrConn) tearDown(err error) { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - curTr := ac.transport - ac.transport = nil - // We have to set the state to Shutdown before anything else to prevent races - // between setting the state and logic that waits on context cancellation / etc. - ac.updateConnectivityState(connectivity.Shutdown, nil) - ac.cancel() - ac.curAddr = resolver.Address{} - - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel deleted", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add - // trace reference to the entity being deleted, and thus prevent it from - // being deleted right away. - channelz.RemoveEntry(ac.channelzID) - ac.mu.Unlock() - - // We have to release the lock before the call to GracefulClose/Close here - // because both of them call onClose(), which requires locking ac.mu. - if curTr != nil { - if err == errConnDrain { - // Close the transport gracefully when the subConn is being shutdown. - // - // GracefulClose() may be executed multiple times if: - // - multiple GoAway frames are received from the server - // - there are concurrent name resolver or balancer triggered - // address removal and GoAway - curTr.GracefulClose() - } else { - // Hard close the transport when the channel is entering idle or is - // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. - // But in the case where the channel is entering idle, we need to - // explicitly close the transports here. Instead of distinguishing - // between these two cases, it is simpler to close the transport - // unconditionally here. - curTr.Close(err) - } - } -} - -func (ac *addrConn) getState() connectivity.State { - ac.mu.Lock() - defer ac.mu.Unlock() - return ac.state -} - -func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { - ac.mu.Lock() - addr := ac.curAddr.Addr - ac.mu.Unlock() - return &channelz.ChannelInternalMetric{ - State: ac.getState(), - Target: addr, - CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), - } -} - -func (ac *addrConn) incrCallsStarted() { - atomic.AddInt64(&ac.czData.callsStarted, 1) - atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) -} - -func (ac *addrConn) incrCallsSucceeded() { - atomic.AddInt64(&ac.czData.callsSucceeded, 1) -} - -func (ac *addrConn) incrCallsFailed() { - atomic.AddInt64(&ac.czData.callsFailed, 1) -} - -type retryThrottler struct { - max float64 - thresh float64 - ratio float64 - - mu sync.Mutex - tokens float64 // TODO(dfawley): replace with atomic and remove lock. -} - -// throttle subtracts a retry token from the pool and returns whether a retry -// should be throttled (disallowed) based upon the retry throttling policy in -// the service config. -func (rt *retryThrottler) throttle() bool { - if rt == nil { - return false - } - rt.mu.Lock() - defer rt.mu.Unlock() - rt.tokens-- - if rt.tokens < 0 { - rt.tokens = 0 - } - return rt.tokens <= rt.thresh -} - -func (rt *retryThrottler) successfulRPC() { - if rt == nil { - return - } - rt.mu.Lock() - defer rt.mu.Unlock() - rt.tokens += rt.ratio - if rt.tokens > rt.max { - rt.tokens = rt.max - } -} - -type channelzChannel struct { - cc *ClientConn -} - -func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { - return c.cc.channelzMetric() -} - -// ErrClientConnTimeout indicates that the ClientConn cannot establish the -// underlying connections within the specified timeout. -// -// Deprecated: This error is never returned by grpc and should not be -// referenced by users. -var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") - -// getResolver finds the scheme in the cc's resolvers or the global registry. -// scheme should always be lowercase (typically by virtue of url.Parse() -// performing proper RFC3986 behavior). -func (cc *ClientConn) getResolver(scheme string) resolver.Builder { - for _, rb := range cc.dopts.resolvers { - if scheme == rb.Scheme() { - return rb - } - } - return resolver.Get(scheme) -} - -func (cc *ClientConn) updateConnectionError(err error) { - cc.lceMu.Lock() - cc.lastConnectionError = err - cc.lceMu.Unlock() -} - -func (cc *ClientConn) connectionError() error { - cc.lceMu.Lock() - defer cc.lceMu.Unlock() - return cc.lastConnectionError -} - -// parseTargetAndFindResolver parses the user's dial target and stores the -// parsed target in `cc.parsedTarget`. -// -// The resolver to use is determined based on the scheme in the parsed target -// and the same is stored in `cc.resolverBuilder`. -// -// Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) parseTargetAndFindResolver() error { - channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) - - var rb resolver.Builder - parsedTarget, err := parseTarget(cc.target) - if err != nil { - channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) - } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) - rb = cc.getResolver(parsedTarget.URL.Scheme) - if rb != nil { - cc.parsedTarget = parsedTarget - cc.resolverBuilder = rb - return nil - } - } - - // We are here because the user's dial target did not contain a scheme or - // specified an unregistered scheme. We should fallback to the default - // scheme, except when a custom dialer is specified in which case, we should - // always use passthrough scheme. - defScheme := resolver.GetDefaultScheme() - channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) - canonicalTarget := defScheme + ":///" + cc.target - - parsedTarget, err = parseTarget(canonicalTarget) - if err != nil { - channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) - return err - } - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.URL.Scheme) - if rb == nil { - return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) - } - cc.parsedTarget = parsedTarget - cc.resolverBuilder = rb - return nil -} - -// parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing url. Query params are stripped from the -// endpoint. -func parseTarget(target string) (resolver.Target, error) { - u, err := url.Parse(target) - if err != nil { - return resolver.Target{}, err - } - - return resolver.Target{URL: *u}, nil -} - -func encodeAuthority(authority string) string { - const upperhex = "0123456789ABCDEF" - - // Return for characters that must be escaped as per - // Valid chars are mentioned here: - // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 - shouldEscape := func(c byte) bool { - // Alphanum are always allowed. - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - return false - } - switch c { - case '-', '_', '.', '~': // Unreserved characters - return false - case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters - return false - case ':', '[', ']', '@': // Authority related delimeters - return false - } - // Everything else must be escaped. - return true - } - - hexCount := 0 - for i := 0; i < len(authority); i++ { - c := authority[i] - if shouldEscape(c) { - hexCount++ - } - } - - if hexCount == 0 { - return authority - } - - required := len(authority) + 2*hexCount - t := make([]byte, required) - - j := 0 - // This logic is a barebones version of escape in the go net/url library. - for i := 0; i < len(authority); i++ { - switch c := authority[i]; { - case shouldEscape(c): - t[j] = '%' - t[j+1] = upperhex[c>>4] - t[j+2] = upperhex[c&15] - j += 3 - default: - t[j] = authority[i] - j++ - } - } - return string(t) -} - -// Determine channel authority. The order of precedence is as follows: -// - user specified authority override using `WithAuthority` dial option -// - creds' notion of server name for the authentication handshake -// - endpoint from dial target of the form "scheme://[authority]/endpoint" -// -// Stores the determined authority in `cc.authority`. -// -// Returns a non-nil error if the authority returned by the transport -// credentials do not match the authority configured through the dial option. -// -// Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) determineAuthority() error { - dopts := cc.dopts - // Historically, we had two options for users to specify the serverName or - // authority for a channel. One was through the transport credentials - // (either in its constructor, or through the OverrideServerName() method). - // The other option (for cases where WithInsecure() dial option was used) - // was to use the WithAuthority() dial option. - // - // A few things have changed since: - // - `insecure` package with an implementation of the `TransportCredentials` - // interface for the insecure case - // - WithAuthority() dial option support for secure credentials - authorityFromCreds := "" - if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { - authorityFromCreds = creds.Info().ServerName - } - authorityFromDialOption := dopts.authority - if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { - return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) - } - - endpoint := cc.parsedTarget.Endpoint() - if authorityFromDialOption != "" { - cc.authority = authorityFromDialOption - } else if authorityFromCreds != "" { - cc.authority = authorityFromCreds - } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { - cc.authority = auth.OverrideAuthority(cc.parsedTarget) - } else if strings.HasPrefix(endpoint, ":") { - cc.authority = "localhost" + endpoint - } else { - cc.authority = encodeAuthority(endpoint) - } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) - return nil -} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go deleted file mode 100644 index 411e3dfd47..0000000000 --- a/vendor/google.golang.org/grpc/codec.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "google.golang.org/grpc/encoding" - _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" -) - -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. -type baseCodec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error -} - -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) - -// Codec defines the interface gRPC uses to encode and decode messages. -// Note that implementations of this interface must be thread safe; -// a Codec's methods can be called from concurrent goroutines. -// -// Deprecated: use encoding.Codec instead. -type Codec interface { - // Marshal returns the wire format of v. - Marshal(v any) ([]byte, error) - // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v any) error - // String returns the name of the Codec implementation. This is unused by - // gRPC. - String() string -} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100644 index 4cdc6ba7c0..0000000000 --- a/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go deleted file mode 100644 index 934fac2b09..0000000000 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ /dev/null @@ -1,111 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package codes - -import ( - "strconv" - - "google.golang.org/grpc/internal" -) - -func init() { - internal.CanonicalString = canonicalString -} - -func (c Code) String() string { - switch c { - case OK: - return "OK" - case Canceled: - return "Canceled" - case Unknown: - return "Unknown" - case InvalidArgument: - return "InvalidArgument" - case DeadlineExceeded: - return "DeadlineExceeded" - case NotFound: - return "NotFound" - case AlreadyExists: - return "AlreadyExists" - case PermissionDenied: - return "PermissionDenied" - case ResourceExhausted: - return "ResourceExhausted" - case FailedPrecondition: - return "FailedPrecondition" - case Aborted: - return "Aborted" - case OutOfRange: - return "OutOfRange" - case Unimplemented: - return "Unimplemented" - case Internal: - return "Internal" - case Unavailable: - return "Unavailable" - case DataLoss: - return "DataLoss" - case Unauthenticated: - return "Unauthenticated" - default: - return "Code(" + strconv.FormatInt(int64(c), 10) + ")" - } -} - -func canonicalString(c Code) string { - switch c { - case OK: - return "OK" - case Canceled: - return "CANCELLED" - case Unknown: - return "UNKNOWN" - case InvalidArgument: - return "INVALID_ARGUMENT" - case DeadlineExceeded: - return "DEADLINE_EXCEEDED" - case NotFound: - return "NOT_FOUND" - case AlreadyExists: - return "ALREADY_EXISTS" - case PermissionDenied: - return "PERMISSION_DENIED" - case ResourceExhausted: - return "RESOURCE_EXHAUSTED" - case FailedPrecondition: - return "FAILED_PRECONDITION" - case Aborted: - return "ABORTED" - case OutOfRange: - return "OUT_OF_RANGE" - case Unimplemented: - return "UNIMPLEMENTED" - case Internal: - return "INTERNAL" - case Unavailable: - return "UNAVAILABLE" - case DataLoss: - return "DATA_LOSS" - case Unauthenticated: - return "UNAUTHENTICATED" - default: - return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" - } -} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go deleted file mode 100644 index 08476ad1fe..0000000000 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ /dev/null @@ -1,250 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package codes defines the canonical error codes used by gRPC. It is -// consistent across various languages. -package codes // import "google.golang.org/grpc/codes" - -import ( - "fmt" - "strconv" -) - -// A Code is a status code defined according to the [gRPC documentation]. -// -// Only the codes defined as consts in this package are valid codes. Do not use -// other code values. Behavior of other codes is implementation-specific and -// interoperability between implementations is not guaranteed. -// -// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md -type Code uint32 - -const ( - // OK is returned on success. - OK Code = 0 - - // Canceled indicates the operation was canceled (typically by the caller). - // - // The gRPC framework will generate this error code when cancellation - // is requested. - Canceled Code = 1 - - // Unknown error. An example of where this error may be returned is - // if a Status value received from another address space belongs to - // an error-space that is not known in this address space. Also - // errors raised by APIs that do not return enough error information - // may be converted to this error. - // - // The gRPC framework will generate this error code in the above two - // mentioned cases. - Unknown Code = 2 - - // InvalidArgument indicates client specified an invalid argument. - // Note that this differs from FailedPrecondition. It indicates arguments - // that are problematic regardless of the state of the system - // (e.g., a malformed file name). - // - // This error code will not be generated by the gRPC framework. - InvalidArgument Code = 3 - - // DeadlineExceeded means operation expired before completion. - // For operations that change the state of the system, this error may be - // returned even if the operation has completed successfully. For - // example, a successful response from a server could have been delayed - // long enough for the deadline to expire. - // - // The gRPC framework will generate this error code when the deadline is - // exceeded. - DeadlineExceeded Code = 4 - - // NotFound means some requested entity (e.g., file or directory) was - // not found. - // - // This error code will not be generated by the gRPC framework. - NotFound Code = 5 - - // AlreadyExists means an attempt to create an entity failed because one - // already exists. - // - // This error code will not be generated by the gRPC framework. - AlreadyExists Code = 6 - - // PermissionDenied indicates the caller does not have permission to - // execute the specified operation. It must not be used for rejections - // caused by exhausting some resource (use ResourceExhausted - // instead for those errors). It must not be - // used if the caller cannot be identified (use Unauthenticated - // instead for those errors). - // - // This error code will not be generated by the gRPC core framework, - // but expect authentication middleware to use it. - PermissionDenied Code = 7 - - // ResourceExhausted indicates some resource has been exhausted, perhaps - // a per-user quota, or perhaps the entire file system is out of space. - // - // This error code will be generated by the gRPC framework in - // out-of-memory and server overload situations, or when a message is - // larger than the configured maximum size. - ResourceExhausted Code = 8 - - // FailedPrecondition indicates operation was rejected because the - // system is not in a state required for the operation's execution. - // For example, directory to be deleted may be non-empty, an rmdir - // operation is applied to a non-directory, etc. - // - // A litmus test that may help a service implementor in deciding - // between FailedPrecondition, Aborted, and Unavailable: - // (a) Use Unavailable if the client can retry just the failing call. - // (b) Use Aborted if the client should retry at a higher-level - // (e.g., restarting a read-modify-write sequence). - // (c) Use FailedPrecondition if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" - // fails because the directory is non-empty, FailedPrecondition - // should be returned since the client should not retry unless - // they have first fixed up the directory by deleting files from it. - // (d) Use FailedPrecondition if the client performs conditional - // REST Get/Update/Delete on a resource and the resource on the - // server does not match the condition. E.g., conflicting - // read-modify-write on the same resource. - // - // This error code will not be generated by the gRPC framework. - FailedPrecondition Code = 9 - - // Aborted indicates the operation was aborted, typically due to a - // concurrency issue like sequencer check failures, transaction aborts, - // etc. - // - // See litmus test above for deciding between FailedPrecondition, - // Aborted, and Unavailable. - // - // This error code will not be generated by the gRPC framework. - Aborted Code = 10 - - // OutOfRange means operation was attempted past the valid range. - // E.g., seeking or reading past end of file. - // - // Unlike InvalidArgument, this error indicates a problem that may - // be fixed if the system state changes. For example, a 32-bit file - // system will generate InvalidArgument if asked to read at an - // offset that is not in the range [0,2^32-1], but it will generate - // OutOfRange if asked to read from an offset past the current - // file size. - // - // There is a fair bit of overlap between FailedPrecondition and - // OutOfRange. We recommend using OutOfRange (the more specific - // error) when it applies so that callers who are iterating through - // a space can easily look for an OutOfRange error to detect when - // they are done. - // - // This error code will not be generated by the gRPC framework. - OutOfRange Code = 11 - - // Unimplemented indicates operation is not implemented or not - // supported/enabled in this service. - // - // This error code will be generated by the gRPC framework. Most - // commonly, you will see this error code when a method implementation - // is missing on the server. It can also be generated for unknown - // compression algorithms or a disagreement as to whether an RPC should - // be streaming. - Unimplemented Code = 12 - - // Internal errors. Means some invariants expected by underlying - // system has been broken. If you see one of these errors, - // something is very broken. - // - // This error code will be generated by the gRPC framework in several - // internal error conditions. - Internal Code = 13 - - // Unavailable indicates the service is currently unavailable. - // This is a most likely a transient condition and may be corrected - // by retrying with a backoff. Note that it is not always safe to retry - // non-idempotent operations. - // - // See litmus test above for deciding between FailedPrecondition, - // Aborted, and Unavailable. - // - // This error code will be generated by the gRPC framework during - // abrupt shutdown of a server process or network connection. - Unavailable Code = 14 - - // DataLoss indicates unrecoverable data loss or corruption. - // - // This error code will not be generated by the gRPC framework. - DataLoss Code = 15 - - // Unauthenticated indicates the request does not have valid - // authentication credentials for the operation. - // - // The gRPC framework will generate this error code when the - // authentication metadata is invalid or a Credentials callback fails, - // but also expect authentication middleware to generate it. - Unauthenticated Code = 16 - - _maxCode = 17 -) - -var strToCode = map[string]Code{ - `"OK"`: OK, - `"CANCELLED"`:/* [sic] */ Canceled, - `"UNKNOWN"`: Unknown, - `"INVALID_ARGUMENT"`: InvalidArgument, - `"DEADLINE_EXCEEDED"`: DeadlineExceeded, - `"NOT_FOUND"`: NotFound, - `"ALREADY_EXISTS"`: AlreadyExists, - `"PERMISSION_DENIED"`: PermissionDenied, - `"RESOURCE_EXHAUSTED"`: ResourceExhausted, - `"FAILED_PRECONDITION"`: FailedPrecondition, - `"ABORTED"`: Aborted, - `"OUT_OF_RANGE"`: OutOfRange, - `"UNIMPLEMENTED"`: Unimplemented, - `"INTERNAL"`: Internal, - `"UNAVAILABLE"`: Unavailable, - `"DATA_LOSS"`: DataLoss, - `"UNAUTHENTICATED"`: Unauthenticated, -} - -// UnmarshalJSON unmarshals b into the Code. -func (c *Code) UnmarshalJSON(b []byte) error { - // From json.Unmarshaler: By convention, to approximate the behavior of - // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as - // a no-op. - if string(b) == "null" { - return nil - } - if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") - } - - if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { - if ci >= _maxCode { - return fmt.Errorf("invalid code: %q", ci) - } - - *c = Code(ci) - return nil - } - - if jc, ok := strToCode[string(b)]; ok { - *c = jc - return nil - } - return fmt.Errorf("invalid code: %q", string(b)) -} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go deleted file mode 100644 index 4a89926422..0000000000 --- a/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ /dev/null @@ -1,94 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package connectivity defines connectivity semantics. -// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. -package connectivity - -import ( - "google.golang.org/grpc/grpclog" -) - -var logger = grpclog.Component("core") - -// State indicates the state of connectivity. -// It can be the state of a ClientConn or SubConn. -type State int - -func (s State) String() string { - switch s { - case Idle: - return "IDLE" - case Connecting: - return "CONNECTING" - case Ready: - return "READY" - case TransientFailure: - return "TRANSIENT_FAILURE" - case Shutdown: - return "SHUTDOWN" - default: - logger.Errorf("unknown connectivity state: %d", s) - return "INVALID_STATE" - } -} - -const ( - // Idle indicates the ClientConn is idle. - Idle State = iota - // Connecting indicates the ClientConn is connecting. - Connecting - // Ready indicates the ClientConn is ready for work. - Ready - // TransientFailure indicates the ClientConn has seen a failure but expects to recover. - TransientFailure - // Shutdown indicates the ClientConn has started shutting down. - Shutdown -) - -// ServingMode indicates the current mode of operation of the server. -// -// Only xDS enabled gRPC servers currently report their serving mode. -type ServingMode int - -const ( - // ServingModeStarting indicates that the server is starting up. - ServingModeStarting ServingMode = iota - // ServingModeServing indicates that the server contains all required - // configuration and is serving RPCs. - ServingModeServing - // ServingModeNotServing indicates that the server is not accepting new - // connections. Existing connections will be closed gracefully, allowing - // in-progress RPCs to complete. A server enters this mode when it does not - // contain the required configuration to serve RPCs. - ServingModeNotServing -) - -func (s ServingMode) String() string { - switch s { - case ServingModeStarting: - return "STARTING" - case ServingModeServing: - return "SERVING" - case ServingModeNotServing: - return "NOT_SERVING" - default: - logger.Errorf("unknown serving mode: %d", s) - return "INVALID_MODE" - } -} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go deleted file mode 100644 index 5feac3aa0e..0000000000 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ /dev/null @@ -1,291 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package credentials implements various credentials supported by gRPC library, -// which encapsulate all the state needed by a client to authenticate with a -// server and make various assertions, e.g., about the client's identity, role, -// or whether it is authorized to make a particular call. -package credentials // import "google.golang.org/grpc/credentials" - -import ( - "context" - "errors" - "fmt" - "net" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/attributes" - icredentials "google.golang.org/grpc/internal/credentials" -) - -// PerRPCCredentials defines the common interface for the credentials which need to -// attach security information to every RPC (e.g., oauth2). -type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing tokens - // if required. This should be called by the transport layer on each - // request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status for - // the RPC (restricted to an allowable set of codes as defined by gRFC - // A54). uri is the URI of the entry point for the request. When supported - // by the underlying implementation, ctx can be used for timeout and - // cancellation. Additionally, RequestInfo data will be available via ctx - // to this call. TODO(zhaoq): Define the set of the qualified keys instead - // of leaving it as an arbitrary string. - GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) - // RequireTransportSecurity indicates whether the credentials requires - // transport security. - RequireTransportSecurity() bool -} - -// SecurityLevel defines the protection level on an established connection. -// -// This API is experimental. -type SecurityLevel int - -const ( - // InvalidSecurityLevel indicates an invalid security level. - // The zero SecurityLevel value is invalid for backward compatibility. - InvalidSecurityLevel SecurityLevel = iota - // NoSecurity indicates a connection is insecure. - NoSecurity - // IntegrityOnly indicates a connection only provides integrity protection. - IntegrityOnly - // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. - PrivacyAndIntegrity -) - -// String returns SecurityLevel in a string format. -func (s SecurityLevel) String() string { - switch s { - case NoSecurity: - return "NoSecurity" - case IntegrityOnly: - return "IntegrityOnly" - case PrivacyAndIntegrity: - return "PrivacyAndIntegrity" - } - return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) -} - -// CommonAuthInfo contains authenticated information common to AuthInfo implementations. -// It should be embedded in a struct implementing AuthInfo to provide additional information -// about the credentials. -// -// This API is experimental. -type CommonAuthInfo struct { - SecurityLevel SecurityLevel -} - -// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. -func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { - return c -} - -// ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, server name, etc. -type ProtocolInfo struct { - // ProtocolVersion is the gRPC wire protocol version. - ProtocolVersion string - // SecurityProtocol is the security protocol in use. - SecurityProtocol string - // SecurityVersion is the security protocol version. It is a static version string from the - // credentials, not a value that reflects per-connection protocol negotiation. To retrieve - // details about the credentials used for a connection, use the Peer's AuthInfo field instead. - // - // Deprecated: please use Peer.AuthInfo. - SecurityVersion string - // ServerName is the user-configured server name. - ServerName string -} - -// AuthInfo defines the common interface for the auth information the users are interested in. -// A struct that implements AuthInfo should embed CommonAuthInfo by including additional -// information about the credentials in it. -type AuthInfo interface { - AuthType() string -} - -// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC -// and the caller should not close rawConn. -var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") - -// TransportCredentials defines the common interface for all the live gRPC wire -// protocols and supported transport security protocols (e.g., TLS, SSL). -type TransportCredentials interface { - // ClientHandshake does the authentication handshake specified by the - // corresponding authentication protocol on rawConn for clients. It returns - // the authenticated connection and the corresponding auth information - // about the connection. The auth information should embed CommonAuthInfo - // to return additional information about the credentials. Implementations - // must use the provided context to implement timely cancellation. gRPC - // will try to reconnect if the error returned is a temporary error - // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the - // returned error is a wrapper error, implementations should make sure that - // the error implements Temporary() to have the correct retry behaviors. - // Additionally, ClientHandshakeInfo data will be available via the context - // passed to this call. - // - // The second argument to this method is the `:authority` header value used - // while creating new streams on this connection after authentication - // succeeds. Implementations must use this as the server name during the - // authentication handshake. - // - // If the returned net.Conn is closed, it MUST close the net.Conn provided. - ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) - // ServerHandshake does the authentication handshake for servers. It returns - // the authenticated connection and the corresponding auth information about - // the connection. The auth information should embed CommonAuthInfo to return additional information - // about the credentials. - // - // If the returned net.Conn is closed, it MUST close the net.Conn provided. - ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) - // Info provides the ProtocolInfo of this TransportCredentials. - Info() ProtocolInfo - // Clone makes a copy of this TransportCredentials. - Clone() TransportCredentials - // OverrideServerName specifies the value used for the following: - // - verifying the hostname on the returned certificates - // - as SNI in the client's handshake to support virtual hosting - // - as the value for `:authority` header at stream creation time - // - // Deprecated: use grpc.WithAuthority instead. Will be supported - // throughout 1.x. - OverrideServerName(string) error -} - -// Bundle is a combination of TransportCredentials and PerRPCCredentials. -// -// It also contains a mode switching method, so it can be used as a combination -// of different credential policies. -// -// Bundle cannot be used together with individual TransportCredentials. -// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. -// -// This API is experimental. -type Bundle interface { - // TransportCredentials returns the transport credentials from the Bundle. - // - // Implementations must return non-nil transport credentials. If transport - // security is not needed by the Bundle, implementations may choose to - // return insecure.NewCredentials(). - TransportCredentials() TransportCredentials - - // PerRPCCredentials returns the per-RPC credentials from the Bundle. - // - // May be nil if per-RPC credentials are not needed. - PerRPCCredentials() PerRPCCredentials - - // NewWithMode should make a copy of Bundle, and switch mode. Modifying the - // existing Bundle may cause races. - // - // NewWithMode returns nil if the requested mode is not supported. - NewWithMode(mode string) (Bundle, error) -} - -// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. -// -// This API is experimental. -type RequestInfo struct { - // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") - Method string - // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) - AuthInfo AuthInfo -} - -// RequestInfoFromContext extracts the RequestInfo from the context if it exists. -// -// This API is experimental. -func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { - ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) - return ri, ok -} - -// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes -// it possible to pass arbitrary data to the handshaker from gRPC, resolver, -// balancer etc. Individual credential implementations control the actual -// format of the data that they are willing to receive. -// -// This API is experimental. -type ClientHandshakeInfo struct { - // Attributes contains the attributes for the address. It could be provided - // by the gRPC, resolver, balancer etc. - Attributes *attributes.Attributes -} - -// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored -// in ctx. -// -// This API is experimental. -func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { - chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) - return chi -} - -// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. -// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method -// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. -// -// This API is experimental. -func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { - type internalInfo interface { - GetCommonAuthInfo() CommonAuthInfo - } - if ai == nil { - return errors.New("AuthInfo is nil") - } - if ci, ok := ai.(internalInfo); ok { - // CommonAuthInfo.SecurityLevel has an invalid value. - if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { - return nil - } - if ci.GetCommonAuthInfo().SecurityLevel < level { - return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) - } - } - // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. - return nil -} - -// ChannelzSecurityInfo defines the interface that security protocols should implement -// in order to provide security info to channelz. -// -// This API is experimental. -type ChannelzSecurityInfo interface { - GetSecurityValue() ChannelzSecurityValue -} - -// ChannelzSecurityValue defines the interface that GetSecurityValue() return value -// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue -// and *OtherChannelzSecurityValue. -// -// This API is experimental. -type ChannelzSecurityValue interface { - isChannelzSecurityValue() -} - -// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return -// from GetSecurityValue(), which contains protocol specific security info. Note -// the Value field will be sent to users of channelz requesting channel info, and -// thus sensitive info should better be avoided. -// -// This API is experimental. -type OtherChannelzSecurityValue struct { - ChannelzSecurityValue - Name string - Value proto.Message -} diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go deleted file mode 100644 index 82bee1443b..0000000000 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ /dev/null @@ -1,98 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package insecure provides an implementation of the -// credentials.TransportCredentials interface which disables transport security. -package insecure - -import ( - "context" - "net" - - "google.golang.org/grpc/credentials" -) - -// NewCredentials returns a credentials which disables transport security. -// -// Note that using this credentials with per-RPC credentials which require -// transport security is incompatible and will cause grpc.Dial() to fail. -func NewCredentials() credentials.TransportCredentials { - return insecureTC{} -} - -// insecureTC implements the insecure transport credentials. The handshake -// methods simply return the passed in net.Conn and set the security level to -// NoSecurity. -type insecureTC struct{} - -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { - return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil -} - -func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { - return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil -} - -func (insecureTC) Info() credentials.ProtocolInfo { - return credentials.ProtocolInfo{SecurityProtocol: "insecure"} -} - -func (insecureTC) Clone() credentials.TransportCredentials { - return insecureTC{} -} - -func (insecureTC) OverrideServerName(string) error { - return nil -} - -// info contains the auth information for an insecure connection. -// It implements the AuthInfo interface. -type info struct { - credentials.CommonAuthInfo -} - -// AuthType returns the type of info as a string. -func (info) AuthType() string { - return "insecure" -} - -// insecureBundle implements an insecure bundle. -// An insecure bundle provides a thin wrapper around insecureTC to support -// the credentials.Bundle interface. -type insecureBundle struct{} - -// NewBundle returns a bundle with disabled transport security and no per rpc credential. -func NewBundle() credentials.Bundle { - return insecureBundle{} -} - -// NewWithMode returns a new insecure Bundle. The mode is ignored. -func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { - return insecureBundle{}, nil -} - -// PerRPCCredentials returns an nil implementation as insecure -// bundle does not support a per rpc credential. -func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { - return nil -} - -// TransportCredentials returns the underlying insecure transport credential. -func (insecureBundle) TransportCredentials() credentials.TransportCredentials { - return NewCredentials() -} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go deleted file mode 100644 index 5dafd34edf..0000000000 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ /dev/null @@ -1,251 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "net" - "net/url" - "os" - - credinternal "google.golang.org/grpc/internal/credentials" -) - -// TLSInfo contains the auth information for a TLS authenticated connection. -// It implements the AuthInfo interface. -type TLSInfo struct { - State tls.ConnectionState - CommonAuthInfo - // This API is experimental. - SPIFFEID *url.URL -} - -// AuthType returns the type of TLSInfo as a string. -func (t TLSInfo) AuthType() string { - return "tls" -} - -// cipherSuiteLookup returns the string version of a TLS cipher suite ID. -func cipherSuiteLookup(cipherSuiteID uint16) string { - for _, s := range tls.CipherSuites() { - if s.ID == cipherSuiteID { - return s.Name - } - } - for _, s := range tls.InsecureCipherSuites() { - if s.ID == cipherSuiteID { - return s.Name - } - } - return fmt.Sprintf("unknown ID: %v", cipherSuiteID) -} - -// GetSecurityValue returns security info requested by channelz. -func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { - v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup(t.State.CipherSuite), - } - // Currently there's no way to get LocalCertificate info from tls package. - if len(t.State.PeerCertificates) > 0 { - v.RemoteCertificate = t.State.PeerCertificates[0].Raw - } - return v -} - -// tlsCreds is the credentials required for authenticating a connection using TLS. -type tlsCreds struct { - // TLS configuration - config *tls.Config -} - -func (c tlsCreds) Info() ProtocolInfo { - return ProtocolInfo{ - SecurityProtocol: "tls", - SecurityVersion: "1.2", - ServerName: c.config.ServerName, - } -} - -func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { - // use local cfg to avoid clobbering ServerName if using multiple endpoints - cfg := credinternal.CloneTLSConfig(c.config) - if cfg.ServerName == "" { - serverName, _, err := net.SplitHostPort(authority) - if err != nil { - // If the authority had no host port or if the authority cannot be parsed, use it as-is. - serverName = authority - } - cfg.ServerName = serverName - } - conn := tls.Client(rawConn, cfg) - errChannel := make(chan error, 1) - go func() { - errChannel <- conn.Handshake() - close(errChannel) - }() - select { - case err := <-errChannel: - if err != nil { - conn.Close() - return nil, nil, err - } - case <-ctx.Done(): - conn.Close() - return nil, nil, ctx.Err() - } - tlsInfo := TLSInfo{ - State: conn.ConnectionState(), - CommonAuthInfo: CommonAuthInfo{ - SecurityLevel: PrivacyAndIntegrity, - }, - } - id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) - if id != nil { - tlsInfo.SPIFFEID = id - } - return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil -} - -func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { - conn := tls.Server(rawConn, c.config) - if err := conn.Handshake(); err != nil { - conn.Close() - return nil, nil, err - } - tlsInfo := TLSInfo{ - State: conn.ConnectionState(), - CommonAuthInfo: CommonAuthInfo{ - SecurityLevel: PrivacyAndIntegrity, - }, - } - id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) - if id != nil { - tlsInfo.SPIFFEID = id - } - return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil -} - -func (c *tlsCreds) Clone() TransportCredentials { - return NewTLS(c.config) -} - -func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { - c.config.ServerName = serverNameOverride - return nil -} - -// The following cipher suites are forbidden for use with HTTP/2 by -// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A -var tls12ForbiddenCipherSuites = map[uint16]struct{}{ - tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, - tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, -} - -// NewTLS uses c to construct a TransportCredentials based on TLS. -func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{credinternal.CloneTLSConfig(c)} - tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) - // If the user did not configure a MinVersion and did not configure a - // MaxVersion < 1.2, use MinVersion=1.2, which is required by - // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 - if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { - tc.config.MinVersion = tls.VersionTLS12 - } - // If the user did not configure CipherSuites, use all "secure" cipher - // suites reported by the TLS package, but remove some explicitly forbidden - // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A - if tc.config.CipherSuites == nil { - for _, cs := range tls.CipherSuites() { - if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { - tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) - } - } - } - return tc -} - -// NewClientTLSFromCert constructs TLS credentials from the provided root -// certificate authority certificate(s) to validate server connections. If -// certificates to establish the identity of the client need to be included in -// the credentials (eg: for mTLS), use NewTLS instead, where a complete -// tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. -func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { - return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) -} - -// NewClientTLSFromFile constructs TLS credentials from the provided root -// certificate authority certificate file(s) to validate server connections. If -// certificates to establish the identity of the client need to be included in -// the credentials (eg: for mTLS), use NewTLS instead, where a complete -// tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. -func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := os.ReadFile(certFile) - if err != nil { - return nil, err - } - cp := x509.NewCertPool() - if !cp.AppendCertsFromPEM(b) { - return nil, fmt.Errorf("credentials: failed to append certificates") - } - return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil -} - -// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. -func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { - return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) -} - -// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key -// file for server. -func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, err - } - return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil -} - -// TLSChannelzSecurityValue defines the struct that TLS protocol should return -// from GetSecurityValue(), containing security info like cipher and certificate used. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type TLSChannelzSecurityValue struct { - ChannelzSecurityValue - StandardName string - LocalCertificate []byte - RemoteCertificate []byte -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go deleted file mode 100644 index ba24261804..0000000000 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ /dev/null @@ -1,718 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "net" - "time" - - "google.golang.org/grpc/backoff" - "google.golang.org/grpc/channelz" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal" - internalbackoff "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/binarylog" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/stats" -) - -func init() { - internal.AddGlobalDialOptions = func(opt ...DialOption) { - globalDialOptions = append(globalDialOptions, opt...) - } - internal.ClearGlobalDialOptions = func() { - globalDialOptions = nil - } - internal.WithBinaryLogger = withBinaryLogger - internal.JoinDialOptions = newJoinDialOption - internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithRecvBufferPool = withRecvBufferPool -} - -// dialOptions configure a Dial call. dialOptions are set by the DialOption -// values passed to Dial. -type dialOptions struct { - unaryInt UnaryClientInterceptor - streamInt StreamClientInterceptor - - chainUnaryInts []UnaryClientInterceptor - chainStreamInts []StreamClientInterceptor - - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - timeout time.Duration - authority string - binaryLogger binarylog.Logger - copts transport.ConnectOptions - callOptions []CallOption - channelzParentID *channelz.Identifier - disableServiceConfig bool - disableRetry bool - disableHealthCheck bool - healthCheckFunc internal.HealthChecker - minConnectTimeout func() time.Duration - defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. - defaultServiceConfigRawJSON *string - resolvers []resolver.Builder - idleTimeout time.Duration - recvBufferPool SharedBufferPool -} - -// DialOption configures how we set up the connection. -type DialOption interface { - apply(*dialOptions) -} - -var globalDialOptions []DialOption - -// EmptyDialOption does not alter the dial configuration. It can be embedded in -// another structure to build custom dial options. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type EmptyDialOption struct{} - -func (EmptyDialOption) apply(*dialOptions) {} - -type disableGlobalDialOptions struct{} - -func (disableGlobalDialOptions) apply(*dialOptions) {} - -// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn -// from applying the global DialOptions (set via AddGlobalDialOptions). -func newDisableGlobalDialOptions() DialOption { - return &disableGlobalDialOptions{} -} - -// funcDialOption wraps a function that modifies dialOptions into an -// implementation of the DialOption interface. -type funcDialOption struct { - f func(*dialOptions) -} - -func (fdo *funcDialOption) apply(do *dialOptions) { - fdo.f(do) -} - -func newFuncDialOption(f func(*dialOptions)) *funcDialOption { - return &funcDialOption{ - f: f, - } -} - -type joinDialOption struct { - opts []DialOption -} - -func (jdo *joinDialOption) apply(do *dialOptions) { - for _, opt := range jdo.opts { - opt.apply(do) - } -} - -func newJoinDialOption(opts ...DialOption) DialOption { - return &joinDialOption{opts: opts} -} - -// WithSharedWriteBuffer allows reusing per-connection transport write buffer. -// If this option is set to true every connection will release the buffer after -// flushing the data on the wire. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WithSharedWriteBuffer(val bool) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.SharedWriteBuffer = val - }) -} - -// WithWriteBufferSize determines how much data can be batched before doing a -// write on the wire. The corresponding memory allocation for this buffer will -// be twice the size to keep syscalls low. The default value for this buffer is -// 32KB. -// -// Zero or negative values will disable the write buffer such that each write -// will be on underlying connection. Note: A Send call may not directly -// translate to a write. -func WithWriteBufferSize(s int) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.WriteBufferSize = s - }) -} - -// WithReadBufferSize lets you set the size of read buffer, this determines how -// much data can be read at most for each read syscall. -// -// The default value for this buffer is 32KB. Zero or negative values will -// disable read buffer for a connection so data framer can access the -// underlying conn directly. -func WithReadBufferSize(s int) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.ReadBufferSize = s - }) -} - -// WithInitialWindowSize returns a DialOption which sets the value for initial -// window size on a stream. The lower bound for window size is 64K and any value -// smaller than that will be ignored. -func WithInitialWindowSize(s int32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.InitialWindowSize = s - }) -} - -// WithInitialConnWindowSize returns a DialOption which sets the value for -// initial window size on a connection. The lower bound for window size is 64K -// and any value smaller than that will be ignored. -func WithInitialConnWindowSize(s int32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.InitialConnWindowSize = s - }) -} - -// WithMaxMsgSize returns a DialOption which sets the maximum message size the -// client can receive. -// -// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will -// be supported throughout 1.x. -func WithMaxMsgSize(s int) DialOption { - return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) -} - -// WithDefaultCallOptions returns a DialOption which sets the default -// CallOptions for calls over the connection. -func WithDefaultCallOptions(cos ...CallOption) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.callOptions = append(o.callOptions, cos...) - }) -} - -// WithCodec returns a DialOption which sets a codec for message marshaling and -// unmarshaling. -// -// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be -// supported throughout 1.x. -func WithCodec(c Codec) DialOption { - return WithDefaultCallOptions(CallCustomCodec(c)) -} - -// WithCompressor returns a DialOption which sets a Compressor to use for -// message compression. It has lower priority than the compressor set by the -// UseCompressor CallOption. -// -// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. -func WithCompressor(cp Compressor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.cp = cp - }) -} - -// WithDecompressor returns a DialOption which sets a Decompressor to use for -// incoming message decompression. If incoming response messages are encoded -// using the decompressor's Type(), it will be used. Otherwise, the message -// encoding will be used to look up the compressor registered via -// encoding.RegisterCompressor, which will then be used to decompress the -// message. If no compressor is registered for the encoding, an Unimplemented -// status error will be returned. -// -// Deprecated: use encoding.RegisterCompressor instead. Will be supported -// throughout 1.x. -func WithDecompressor(dc Decompressor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.dc = dc - }) -} - -// WithConnectParams configures the ClientConn to use the provided ConnectParams -// for creating and maintaining connections to servers. -// -// The backoff configuration specified as part of the ConnectParams overrides -// all defaults specified in -// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider -// using the backoff.DefaultConfig as a base, in cases where you want to -// override only a subset of the backoff configuration. -func WithConnectParams(p ConnectParams) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.bs = internalbackoff.Exponential{Config: p.Backoff} - o.minConnectTimeout = func() time.Duration { - return p.MinConnectTimeout - } - }) -} - -// WithBackoffMaxDelay configures the dialer to use the provided maximum delay -// when backing off after failed connection attempts. -// -// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. -func WithBackoffMaxDelay(md time.Duration) DialOption { - return WithBackoffConfig(BackoffConfig{MaxDelay: md}) -} - -// WithBackoffConfig configures the dialer to use the provided backoff -// parameters after connection failures. -// -// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. -func WithBackoffConfig(b BackoffConfig) DialOption { - bc := backoff.DefaultConfig - bc.MaxDelay = b.MaxDelay - return withBackoff(internalbackoff.Exponential{Config: bc}) -} - -// withBackoff sets the backoff strategy used for connectRetryNum after a failed -// connection attempt. -// -// This can be exported if arbitrary backoff strategies are allowed by gRPC. -func withBackoff(bs internalbackoff.Strategy) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.bs = bs - }) -} - -// WithBlock returns a DialOption which makes callers of Dial block until the -// underlying connection is up. Without this, Dial returns immediately and -// connecting the server happens in background. -// -// Use of this feature is not recommended. For more information, please see: -// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md -func WithBlock() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.block = true - }) -} - -// WithReturnConnectionError returns a DialOption which makes the client connection -// return a string containing both the last connection error that occurred and -// the context.DeadlineExceeded error. -// Implies WithBlock() -// -// Use of this feature is not recommended. For more information, please see: -// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WithReturnConnectionError() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.block = true - o.returnLastError = true - }) -} - -// WithInsecure returns a DialOption which disables transport security for this -// ClientConn. Under the hood, it uses insecure.NewCredentials(). -// -// Note that using this DialOption with per-RPC credentials (through -// WithCredentialsBundle or WithPerRPCCredentials) which require transport -// security is incompatible and will cause grpc.Dial() to fail. -// -// Deprecated: use WithTransportCredentials and insecure.NewCredentials() -// instead. Will be supported throughout 1.x. -func WithInsecure() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.TransportCredentials = insecure.NewCredentials() - }) -} - -// WithNoProxy returns a DialOption which disables the use of proxies for this -// ClientConn. This is ignored if WithDialer or WithContextDialer are used. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WithNoProxy() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.UseProxy = false - }) -} - -// WithTransportCredentials returns a DialOption which configures a connection -// level security credentials (e.g., TLS/SSL). This should not be used together -// with WithCredentialsBundle. -func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.TransportCredentials = creds - }) -} - -// WithPerRPCCredentials returns a DialOption which sets credentials and places -// auth state on each outbound RPC. -func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) - }) -} - -// WithCredentialsBundle returns a DialOption to set a credentials bundle for -// the ClientConn.WithCreds. This should not be used together with -// WithTransportCredentials. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WithCredentialsBundle(b credentials.Bundle) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.CredsBundle = b - }) -} - -// WithTimeout returns a DialOption that configures a timeout for dialing a -// ClientConn initially. This is valid if and only if WithBlock() is present. -// -// Deprecated: use DialContext instead of Dial and context.WithTimeout -// instead. Will be supported throughout 1.x. -func WithTimeout(d time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.timeout = d - }) -} - -// WithContextDialer returns a DialOption that sets a dialer to create -// connections. If FailOnNonTempDialError() is set to true, and an error is -// returned by f, gRPC checks the error's Temporary() method to decide if it -// should try to reconnect to the network address. -// -// Note: All supported releases of Go (as of December 2023) override the OS -// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive -// with OS defaults for keepalive time and interval, use a net.Dialer that sets -// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket -// option to true from the Control field. For a concrete example of how to do -// this, see internal.NetDialerWithTCPKeepalive(). -// -// For more information, please see [issue 23459] in the Go github repo. -// -// [issue 23459]: https://github.com/golang/go/issues/23459 -func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.Dialer = f - }) -} - -func init() { - internal.WithHealthCheckFunc = withHealthCheckFunc -} - -// WithDialer returns a DialOption that specifies a function to use for dialing -// network addresses. If FailOnNonTempDialError() is set to true, and an error -// is returned by f, gRPC checks the error's Temporary() method to decide if it -// should try to reconnect to the network address. -// -// Deprecated: use WithContextDialer instead. Will be supported throughout -// 1.x. -func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { - return WithContextDialer( - func(ctx context.Context, addr string) (net.Conn, error) { - if deadline, ok := ctx.Deadline(); ok { - return f(addr, time.Until(deadline)) - } - return f(addr, 0) - }) -} - -// WithStatsHandler returns a DialOption that specifies the stats handler for -// all the RPCs and underlying network connections in this ClientConn. -func WithStatsHandler(h stats.Handler) DialOption { - return newFuncDialOption(func(o *dialOptions) { - if h == nil { - logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") - // Do not allow a nil stats handler, which would otherwise cause - // panics. - return - } - o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) - }) -} - -// withBinaryLogger returns a DialOption that specifies the binary logger for -// this ClientConn. -func withBinaryLogger(bl binarylog.Logger) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.binaryLogger = bl - }) -} - -// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on -// non-temporary dial errors. If f is true, and dialer returns a non-temporary -// error, gRPC will fail the connection to the network address and won't try to -// reconnect. The default value of FailOnNonTempDialError is false. -// -// FailOnNonTempDialError only affects the initial dial, and does not do -// anything useful unless you are also using WithBlock(). -// -// Use of this feature is not recommended. For more information, please see: -// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func FailOnNonTempDialError(f bool) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.FailOnNonTempDialError = f - }) -} - -// WithUserAgent returns a DialOption that specifies a user agent string for all -// the RPCs. -func WithUserAgent(s string) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + " " + grpcUA - }) -} - -// WithKeepaliveParams returns a DialOption that specifies keepalive parameters -// for the client transport. -func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { - if kp.Time < internal.KeepaliveMinPingTime { - logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) - kp.Time = internal.KeepaliveMinPingTime - } - return newFuncDialOption(func(o *dialOptions) { - o.copts.KeepaliveParams = kp - }) -} - -// WithUnaryInterceptor returns a DialOption that specifies the interceptor for -// unary RPCs. -func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.unaryInt = f - }) -} - -// WithChainUnaryInterceptor returns a DialOption that specifies the chained -// interceptor for unary RPCs. The first interceptor will be the outer most, -// while the last interceptor will be the inner most wrapper around the real call. -// All interceptors added by this method will be chained, and the interceptor -// defined by WithUnaryInterceptor will always be prepended to the chain. -func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) - }) -} - -// WithStreamInterceptor returns a DialOption that specifies the interceptor for -// streaming RPCs. -func WithStreamInterceptor(f StreamClientInterceptor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.streamInt = f - }) -} - -// WithChainStreamInterceptor returns a DialOption that specifies the chained -// interceptor for streaming RPCs. The first interceptor will be the outer most, -// while the last interceptor will be the inner most wrapper around the real call. -// All interceptors added by this method will be chained, and the interceptor -// defined by WithStreamInterceptor will always be prepended to the chain. -func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.chainStreamInts = append(o.chainStreamInts, interceptors...) - }) -} - -// WithAuthority returns a DialOption that specifies the value to be used as the -// :authority pseudo-header and as the server name in authentication handshake. -func WithAuthority(a string) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.authority = a - }) -} - -// WithChannelzParentID returns a DialOption that specifies the channelz ID of -// current ClientConn's parent. This function is used in nested channel creation -// (e.g. grpclb dial). -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WithChannelzParentID(id *channelz.Identifier) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.channelzParentID = id - }) -} - -// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any -// service config provided by the resolver and provides a hint to the resolver -// to not fetch service configs. -// -// Note that this dial option only disables service config from resolver. If -// default service config is provided, gRPC will use the default service config. -func WithDisableServiceConfig() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.disableServiceConfig = true - }) -} - -// WithDefaultServiceConfig returns a DialOption that configures the default -// service config, which will be used in cases where: -// -// 1. WithDisableServiceConfig is also used, or -// -// 2. The name resolver does not provide a service config or provides an -// invalid service config. -// -// The parameter s is the JSON representation of the default service config. -// For more information about service configs, see: -// https://github.com/grpc/grpc/blob/master/doc/service_config.md -// For a simple example of usage, see: -// examples/features/load_balancing/client/main.go -func WithDefaultServiceConfig(s string) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.defaultServiceConfigRawJSON = &s - }) -} - -// WithDisableRetry returns a DialOption that disables retries, even if the -// service config enables them. This does not impact transparent retries, which -// will happen automatically if no data is written to the wire or if the RPC is -// unprocessed by the remote server. -func WithDisableRetry() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.disableRetry = true - }) -} - -// WithMaxHeaderListSize returns a DialOption that specifies the maximum -// (uncompressed) size of header list that the client is prepared to accept. -func WithMaxHeaderListSize(s uint32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.MaxHeaderListSize = &s - }) -} - -// WithDisableHealthCheck disables the LB channel health checking for all -// SubConns of this ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WithDisableHealthCheck() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.disableHealthCheck = true - }) -} - -// withHealthCheckFunc replaces the default health check function with the -// provided one. It makes tests easier to change the health check function. -// -// For testing purpose only. -func withHealthCheckFunc(f internal.HealthChecker) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.healthCheckFunc = f - }) -} - -func defaultDialOptions() dialOptions { - return dialOptions{ - copts: transport.ConnectOptions{ - ReadBufferSize: defaultReadBufSize, - WriteBufferSize: defaultWriteBufSize, - UseProxy: true, - UserAgent: grpcUA, - }, - bs: internalbackoff.DefaultExponential, - healthCheckFunc: internal.HealthCheckFunc, - idleTimeout: 30 * time.Minute, - recvBufferPool: nopBufferPool{}, - } -} - -// withGetMinConnectDeadline specifies the function that clientconn uses to -// get minConnectDeadline. This can be used to make connection attempts happen -// faster/slower. -// -// For testing purpose only. -func withMinConnectDeadline(f func() time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.minConnectTimeout = f - }) -} - -// WithResolvers allows a list of resolver implementations to be registered -// locally with the ClientConn without needing to be globally registered via -// resolver.Register. They will be matched against the scheme used for the -// current Dial only, and will take precedence over the global registry. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WithResolvers(rs ...resolver.Builder) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolvers = append(o.resolvers, rs...) - }) -} - -// WithIdleTimeout returns a DialOption that configures an idle timeout for the -// channel. If the channel is idle for the configured timeout, i.e there are no -// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode -// and as a result the name resolver and load balancer will be shut down. The -// channel will exit idle mode when the Connect() method is called or when an -// RPC is initiated. -// -// A default timeout of 30 minutes will be used if this dial option is not set -// at dial time and idleness can be disabled by passing a timeout of zero. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WithIdleTimeout(d time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.idleTimeout = d - }) -} - -// WithRecvBufferPool returns a DialOption that configures the ClientConn -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return withRecvBufferPool(bufferPool) -} - -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.recvBufferPool = bufferPool - }) -} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go deleted file mode 100644 index 0022859ad7..0000000000 --- a/vendor/google.golang.org/grpc/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -//go:generate ./regenerate.sh - -/* -Package grpc implements an RPC system called gRPC. - -See grpc.io for more information about gRPC. -*/ -package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go deleted file mode 100644 index 5ebf88d714..0000000000 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ /dev/null @@ -1,130 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package encoding defines the interface for the compressor and codec, and -// functions to register and retrieve compressors and codecs. -// -// # Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. -package encoding - -import ( - "io" - "strings" - - "google.golang.org/grpc/internal/grpcutil" -) - -// Identity specifies the optional encoding for uncompressed streams. -// It is intended for grpc internal use only. -const Identity = "identity" - -// Compressor is used for compressing and decompressing when sending or -// receiving messages. -// -// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, -// gRPC will invoke it to determine the size of the buffer allocated for the -// result of decompression. A return value of -1 indicates unknown size. -type Compressor interface { - // Compress writes the data written to wc to w after compressing it. If an - // error occurs while initializing the compressor, that error is returned - // instead. - Compress(w io.Writer) (io.WriteCloser, error) - // Decompress reads data from r, decompresses it, and provides the - // uncompressed data via the returned io.Reader. If an error occurs while - // initializing the decompressor, that error is returned instead. - Decompress(r io.Reader) (io.Reader, error) - // Name is the name of the compression codec and is used to set the content - // coding header. The result must be static; the result cannot change - // between calls. - Name() string -} - -var registeredCompressor = make(map[string]Compressor) - -// RegisterCompressor registers the compressor with gRPC by its name. It can -// be activated when sending an RPC via grpc.UseCompressor(). It will be -// automatically accessed when receiving a message based on the content coding -// header. Servers also use it to send a response with the same encoding as -// the request. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are -// registered with the same name, the one registered last will take effect. -func RegisterCompressor(c Compressor) { - registeredCompressor[c.Name()] = c - if !grpcutil.IsCompressorNameRegistered(c.Name()) { - grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) - } -} - -// GetCompressor returns Compressor for the given compressor name. -func GetCompressor(name string) Compressor { - return registeredCompressor[name] -} - -// Codec defines the interface gRPC uses to encode and decode messages. Note -// that implementations of this interface must be thread safe; a Codec's -// methods can be called from concurrent goroutines. -type Codec interface { - // Marshal returns the wire format of v. - Marshal(v any) ([]byte, error) - // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v any) error - // Name returns the name of the Codec implementation. The returned string - // will be used as part of content type in transmission. The result must be - // static; the result cannot change between calls. - Name() string -} - -var registeredCodecs = make(map[string]Codec) - -// RegisterCodec registers the provided Codec for use with all gRPC clients and -// servers. -// -// The Codec will be stored and looked up by result of its Name() method, which -// should match the content-subtype of the encoding handled by the Codec. This -// is case-insensitive, and is stored and looked up as lowercase. If the -// result of calling Name() is an empty string, RegisterCodec will panic. See -// Content-Type on -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Codecs are -// registered with the same name, the one registered last will take effect. -func RegisterCodec(codec Codec) { - if codec == nil { - panic("cannot register a nil Codec") - } - if codec.Name() == "" { - panic("cannot register Codec with empty string result for Name()") - } - contentSubtype := strings.ToLower(codec.Name()) - registeredCodecs[contentSubtype] = codec -} - -// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is -// registered for the content-subtype. -// -// The content-subtype is expected to be lowercase. -func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] -} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go deleted file mode 100644 index 66d5cdf03e..0000000000 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package proto defines the protobuf codec. Importing this package will -// register the codec. -package proto - -import ( - "fmt" - - "google.golang.org/grpc/encoding" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/protoadapt" -) - -// Name is the name registered for the proto compressor. -const Name = "proto" - -func init() { - encoding.RegisterCodec(codec{}) -} - -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} - -func (codec) Marshal(v any) ([]byte, error) { - vv := messageV2Of(v) - if vv == nil { - return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) - } - - return proto.Marshal(vv) -} - -func (codec) Unmarshal(data []byte, v any) error { - vv := messageV2Of(v) - if vv == nil { - return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) - } - - return proto.Unmarshal(data, vv) -} - -func messageV2Of(v any) proto.Message { - switch v := v.(type) { - case protoadapt.MessageV1: - return protoadapt.MessageV2Of(v) - case protoadapt.MessageV2: - return v - } - - return nil -} - -func (codec) Name() string { - return Name -} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go deleted file mode 100644 index ac73c9ced2..0000000000 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ /dev/null @@ -1,117 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclog - -import ( - "fmt" - - "google.golang.org/grpc/internal/grpclog" -) - -// componentData records the settings for a component. -type componentData struct { - name string -} - -var cache = map[string]*componentData{} - -func (c *componentData) InfoDepth(depth int, args ...any) { - args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.InfoDepth(depth+1, args...) -} - -func (c *componentData) WarningDepth(depth int, args ...any) { - args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.WarningDepth(depth+1, args...) -} - -func (c *componentData) ErrorDepth(depth int, args ...any) { - args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.ErrorDepth(depth+1, args...) -} - -func (c *componentData) FatalDepth(depth int, args ...any) { - args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.FatalDepth(depth+1, args...) -} - -func (c *componentData) Info(args ...any) { - c.InfoDepth(1, args...) -} - -func (c *componentData) Warning(args ...any) { - c.WarningDepth(1, args...) -} - -func (c *componentData) Error(args ...any) { - c.ErrorDepth(1, args...) -} - -func (c *componentData) Fatal(args ...any) { - c.FatalDepth(1, args...) -} - -func (c *componentData) Infof(format string, args ...any) { - c.InfoDepth(1, fmt.Sprintf(format, args...)) -} - -func (c *componentData) Warningf(format string, args ...any) { - c.WarningDepth(1, fmt.Sprintf(format, args...)) -} - -func (c *componentData) Errorf(format string, args ...any) { - c.ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -func (c *componentData) Fatalf(format string, args ...any) { - c.FatalDepth(1, fmt.Sprintf(format, args...)) -} - -func (c *componentData) Infoln(args ...any) { - c.InfoDepth(1, args...) -} - -func (c *componentData) Warningln(args ...any) { - c.WarningDepth(1, args...) -} - -func (c *componentData) Errorln(args ...any) { - c.ErrorDepth(1, args...) -} - -func (c *componentData) Fatalln(args ...any) { - c.FatalDepth(1, args...) -} - -func (c *componentData) V(l int) bool { - return V(l) -} - -// Component creates a new component and returns it for logging. If a component -// with the name already exists, nothing will be created and it will be -// returned. SetLoggerV2 will panic if it is called with a logger created by -// Component. -func Component(componentName string) DepthLoggerV2 { - if cData, ok := cache[componentName]; ok { - return cData - } - c := &componentData{componentName} - cache[componentName] = c - return c -} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go deleted file mode 100644 index 16928c9cb9..0000000000 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ /dev/null @@ -1,132 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpclog defines logging for grpc. -// -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" - -import ( - "os" - - "google.golang.org/grpc/internal/grpclog" -) - -func init() { - SetLoggerV2(newLoggerV2()) -} - -// V reports whether verbosity level l is at least the requested verbose level. -func V(l int) bool { - return grpclog.Logger.V(l) -} - -// Info logs to the INFO log. -func Info(args ...any) { - grpclog.Logger.Info(args...) -} - -// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...any) { - grpclog.Logger.Infof(format, args...) -} - -// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...any) { - grpclog.Logger.Infoln(args...) -} - -// Warning logs to the WARNING log. -func Warning(args ...any) { - grpclog.Logger.Warning(args...) -} - -// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...any) { - grpclog.Logger.Warningf(format, args...) -} - -// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...any) { - grpclog.Logger.Warningln(args...) -} - -// Error logs to the ERROR log. -func Error(args ...any) { - grpclog.Logger.Error(args...) -} - -// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...any) { - grpclog.Logger.Errorf(format, args...) -} - -// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...any) { - grpclog.Logger.Errorln(args...) -} - -// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. -// It calls os.Exit() with exit code 1. -func Fatal(args ...any) { - grpclog.Logger.Fatal(args...) - // Make sure fatal logs will exit. - os.Exit(1) -} - -// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. -// It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...any) { - grpclog.Logger.Fatalf(format, args...) - // Make sure fatal logs will exit. - os.Exit(1) -} - -// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. -func Fatalln(args ...any) { - grpclog.Logger.Fatalln(args...) - // Make sure fatal logs will exit. - os.Exit(1) -} - -// Print prints to the logger. Arguments are handled in the manner of fmt.Print. -// -// Deprecated: use Info. -func Print(args ...any) { - grpclog.Logger.Info(args...) -} - -// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. -// -// Deprecated: use Infof. -func Printf(format string, args ...any) { - grpclog.Logger.Infof(format, args...) -} - -// Println prints to the logger. Arguments are handled in the manner of fmt.Println. -// -// Deprecated: use Infoln. -func Println(args ...any) { - grpclog.Logger.Infoln(args...) -} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go deleted file mode 100644 index b1674d8267..0000000000 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclog - -import "google.golang.org/grpc/internal/grpclog" - -// Logger mimics golang's standard Logger as an interface. -// -// Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} - -// SetLogger sets the logger that is used in grpc. Call only from -// init() functions. -// -// Deprecated: use SetLoggerV2. -func SetLogger(l Logger) { - grpclog.Logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true -} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go deleted file mode 100644 index ecfd36d713..0000000000 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ /dev/null @@ -1,258 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclog - -import ( - "encoding/json" - "fmt" - "io" - "log" - "os" - "strconv" - "strings" - - "google.golang.org/grpc/internal/grpclog" -) - -// LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} - -// SetLoggerV2 sets logger that is used in grpc to a V2 logger. -// Not mutex-protected, should be called before any gRPC functions. -func SetLoggerV2(l LoggerV2) { - if _, ok := l.(*componentData); ok { - panic("cannot use component logger as grpclog logger") - } - grpclog.Logger = l - grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool -} - -// NewLoggerV2 creates a loggerV2 with the provided writers. -// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). -// Error logs will be written to errorW, warningW and infoW. -// Warning logs will be written to warningW and infoW. -// Info logs will be written to infoW. -func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) -} - -// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and -// verbosity level. -func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) -} - -type loggerV2Config struct { - verbose int - jsonFormat bool -} - -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.jsonFormat { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} -} - -// newLoggerV2 creates a loggerV2 to be used as default logger. -// All logs are written to stderr. -func newLoggerV2() LoggerV2 { - errorW := io.Discard - warningW := io.Discard - infoW := io.Discard - - logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") - switch logLevel { - case "", "ERROR", "error": // If env is unset, set level to ERROR. - errorW = os.Stderr - case "WARNING", "warning": - warningW = os.Stderr - case "INFO", "info": - infoW = os.Stderr - } - - var v int - vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") - if vl, err := strconv.Atoi(vLevel); err == nil { - v = vl - } - - jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ - verbose: v, - jsonFormat: jsonFormat, - }) -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, - }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v -} - -// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements -// DepthLoggerV2, the below functions will be called with the appropriate stack -// depth set for trivial functions the logger may ignore. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type DepthLoggerV2 interface { - LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go deleted file mode 100644 index 877d78fc3d..0000000000 --- a/vendor/google.golang.org/grpc/interceptor.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" -) - -// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error - -// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. -// Unary interceptors can be specified as a DialOption, using -// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a -// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC -// delegates all unary RPC invocations to the interceptor, and it is the -// responsibility of the interceptor to call invoker to complete the processing -// of the RPC. -// -// method is the RPC name. req and reply are the corresponding request and -// response messages. cc is the ClientConn on which the RPC was invoked. invoker -// is the handler to complete the RPC and it is the responsibility of the -// interceptor to call it. opts contain all applicable call options, including -// defaults from the ClientConn as well as per-call options. -// -// The returned error must be compatible with the status package. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error - -// Streamer is called by StreamClientInterceptor to create a ClientStream. -type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) - -// StreamClientInterceptor intercepts the creation of a ClientStream. Stream -// interceptors can be specified as a DialOption, using WithStreamInterceptor() -// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream -// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations -// to the interceptor, and it is the responsibility of the interceptor to call -// streamer. -// -// desc contains a description of the stream. cc is the ClientConn on which the -// RPC was invoked. streamer is the handler to create a ClientStream and it is -// the responsibility of the interceptor to call it. opts contain all applicable -// call options, including defaults from the ClientConn as well as per-call -// options. -// -// StreamClientInterceptor may return a custom ClientStream to intercept all I/O -// operations. The returned error must be compatible with the status package. -type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) - -// UnaryServerInfo consists of various information about a unary RPC on -// server side. All per-rpc information may be mutated by the interceptor. -type UnaryServerInfo struct { - // Server is the service implementation the user provides. This is read-only. - Server any - // FullMethod is the full RPC method string, i.e., /package.service/method. - FullMethod string -} - -// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. -// -// If a UnaryHandler returns an error, it should either be produced by the -// status package, or be one of the context errors. Otherwise, gRPC will use -// codes.Unknown as the status code and err.Error() as the status message of the -// RPC. -type UnaryHandler func(ctx context.Context, req any) (any, error) - -// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info -// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper -// of the service method implementation. It is the responsibility of the interceptor to invoke handler -// to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) - -// StreamServerInfo consists of various information about a streaming RPC on -// server side. All per-rpc information may be mutated by the interceptor. -type StreamServerInfo struct { - // FullMethod is the full RPC method string, i.e., /package.service/method. - FullMethod string - // IsClientStream indicates whether the RPC is a client streaming RPC. - IsClientStream bool - // IsServerStream indicates whether the RPC is a server streaming RPC. - IsServerStream bool -} - -// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. -// info contains all the information of this RPC the interceptor can operate on. And handler is the -// service method implementation. It is the responsibility of the interceptor to invoke handler to -// complete the RPC. -type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go deleted file mode 100644 index fed1c011a3..0000000000 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package backoff implement the backoff strategy for gRPC. -// -// This is kept in internal until the gRPC project decides whether or not to -// allow alternative backoff strategies. -package backoff - -import ( - "context" - "errors" - "time" - - grpcbackoff "google.golang.org/grpc/backoff" - "google.golang.org/grpc/internal/grpcrand" -) - -// Strategy defines the methodology for backing off after a grpc connection -// failure. -type Strategy interface { - // Backoff returns the amount of time to wait before the next retry given - // the number of consecutive failures. - Backoff(retries int) time.Duration -} - -// DefaultExponential is an exponential backoff implementation using the -// default values for all the configurable knobs defined in -// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. -var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} - -// Exponential implements exponential backoff algorithm as defined in -// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. -type Exponential struct { - // Config contains all options to configure the backoff algorithm. - Config grpcbackoff.Config -} - -// Backoff returns the amount of time to wait before the next retry given the -// number of retries. -func (bc Exponential) Backoff(retries int) time.Duration { - if retries == 0 { - return bc.Config.BaseDelay - } - backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) - for backoff < max && retries > 0 { - backoff *= bc.Config.Multiplier - retries-- - } - if backoff > max { - backoff = max - } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) - if backoff < 0 { - return 0 - } - return time.Duration(backoff) -} - -// ErrResetBackoff is the error to be returned by the function executed by RunF, -// to instruct the latter to reset its backoff state. -var ErrResetBackoff = errors.New("reset backoff state") - -// RunF provides a convenient way to run a function f repeatedly until the -// context expires or f returns a non-nil error that is not ErrResetBackoff. -// When f returns ErrResetBackoff, RunF continues to run f, but resets its -// backoff state before doing so. backoff accepts an integer representing the -// number of retries, and returns the amount of time to backoff. -func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { - attempt := 0 - timer := time.NewTimer(0) - for ctx.Err() == nil { - select { - case <-timer.C: - case <-ctx.Done(): - timer.Stop() - return - } - - err := f() - if errors.Is(err, ErrResetBackoff) { - timer.Reset(0) - attempt = 0 - continue - } - if err != nil { - return - } - timer.Reset(backoff(attempt)) - attempt++ - } -} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go deleted file mode 100644 index 3c594e6e4e..0000000000 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ /dev/null @@ -1,385 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package gracefulswitch implements a graceful switch load balancer. -package gracefulswitch - -import ( - "errors" - "fmt" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/resolver" -) - -var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") -var _ balancer.Balancer = (*Balancer)(nil) - -// NewBalancer returns a graceful switch Balancer. -func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { - return &Balancer{ - cc: cc, - bOpts: opts, - } -} - -// Balancer is a utility to gracefully switch from one balancer to -// a new balancer. It implements the balancer.Balancer interface. -type Balancer struct { - bOpts balancer.BuildOptions - cc balancer.ClientConn - - // mu protects the following fields and all fields within balancerCurrent - // and balancerPending. mu does not need to be held when calling into the - // child balancers, as all calls into these children happen only as a direct - // result of a call into the gracefulSwitchBalancer, which are also - // guaranteed to be synchronous. There is one exception: an UpdateState call - // from a child balancer when current and pending are populated can lead to - // calling Close() on the current. To prevent that racing with an - // UpdateSubConnState from the channel, we hold currentMu during Close and - // UpdateSubConnState calls. - mu sync.Mutex - balancerCurrent *balancerWrapper - balancerPending *balancerWrapper - closed bool // set to true when this balancer is closed - - // currentMu must be locked before mu. This mutex guards against this - // sequence of events: UpdateSubConnState() called, finds the - // balancerCurrent, gives up lock, updateState comes in, causes Close() on - // balancerCurrent before the UpdateSubConnState is called on the - // balancerCurrent. - currentMu sync.Mutex -} - -// swap swaps out the current lb with the pending lb and updates the ClientConn. -// The caller must hold gsb.mu. -func (gsb *Balancer) swap() { - gsb.cc.UpdateState(gsb.balancerPending.lastState) - cur := gsb.balancerCurrent - gsb.balancerCurrent = gsb.balancerPending - gsb.balancerPending = nil - go func() { - gsb.currentMu.Lock() - defer gsb.currentMu.Unlock() - cur.Close() - }() -} - -// Helper function that checks if the balancer passed in is current or pending. -// The caller must hold gsb.mu. -func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { - return bw == gsb.balancerCurrent || bw == gsb.balancerPending -} - -// SwitchTo initializes the graceful switch process, which completes based on -// connectivity state changes on the current/pending balancer. Thus, the switch -// process is not complete when this method returns. This method must be called -// synchronously alongside the rest of the balancer.Balancer methods this -// Graceful Switch Balancer implements. -func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { - gsb.mu.Lock() - if gsb.closed { - gsb.mu.Unlock() - return errBalancerClosed - } - bw := &balancerWrapper{ - gsb: gsb, - lastState: balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), - }, - subconns: make(map[balancer.SubConn]bool), - } - balToClose := gsb.balancerPending // nil if there is no pending balancer - if gsb.balancerCurrent == nil { - gsb.balancerCurrent = bw - } else { - gsb.balancerPending = bw - } - gsb.mu.Unlock() - balToClose.Close() - // This function takes a builder instead of a balancer because builder.Build - // can call back inline, and this utility needs to handle the callbacks. - newBalancer := builder.Build(bw, gsb.bOpts) - if newBalancer == nil { - // This is illegal and should never happen; we clear the balancerWrapper - // we were constructing if it happens to avoid a potential panic. - gsb.mu.Lock() - if gsb.balancerPending != nil { - gsb.balancerPending = nil - } else { - gsb.balancerCurrent = nil - } - gsb.mu.Unlock() - return balancer.ErrBadResolverState - } - - // This write doesn't need to take gsb.mu because this field never gets read - // or written to on any calls from the current or pending. Calls from grpc - // to this balancer are guaranteed to be called synchronously, so this - // bw.Balancer field will never be forwarded to until this SwitchTo() - // function returns. - bw.Balancer = newBalancer - return nil -} - -// Returns nil if the graceful switch balancer is closed. -func (gsb *Balancer) latestBalancer() *balancerWrapper { - gsb.mu.Lock() - defer gsb.mu.Unlock() - if gsb.balancerPending != nil { - return gsb.balancerPending - } - return gsb.balancerCurrent -} - -// UpdateClientConnState forwards the update to the latest balancer created. -func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { - // The resolver data is only relevant to the most recent LB Policy. - balToUpdate := gsb.latestBalancer() - if balToUpdate == nil { - return errBalancerClosed - } - // Perform this call without gsb.mu to prevent deadlocks if the child calls - // back into the channel. The latest balancer can never be closed during a - // call from the channel, even without gsb.mu held. - return balToUpdate.UpdateClientConnState(state) -} - -// ResolverError forwards the error to the latest balancer created. -func (gsb *Balancer) ResolverError(err error) { - // The resolver data is only relevant to the most recent LB Policy. - balToUpdate := gsb.latestBalancer() - if balToUpdate == nil { - return - } - // Perform this call without gsb.mu to prevent deadlocks if the child calls - // back into the channel. The latest balancer can never be closed during a - // call from the channel, even without gsb.mu held. - balToUpdate.ResolverError(err) -} - -// ExitIdle forwards the call to the latest balancer created. -// -// If the latest balancer does not support ExitIdle, the subConns are -// re-connected to manually. -func (gsb *Balancer) ExitIdle() { - balToUpdate := gsb.latestBalancer() - if balToUpdate == nil { - return - } - // There is no need to protect this read with a mutex, as the write to the - // Balancer field happens in SwitchTo, which completes before this can be - // called. - if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { - ei.ExitIdle() - return - } - gsb.mu.Lock() - defer gsb.mu.Unlock() - for sc := range balToUpdate.subconns { - sc.Connect() - } -} - -// updateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { - gsb.currentMu.Lock() - defer gsb.currentMu.Unlock() - gsb.mu.Lock() - // Forward update to the appropriate child. Even if there is a pending - // balancer, the current balancer should continue to get SubConn updates to - // maintain the proper state while the pending is still connecting. - var balToUpdate *balancerWrapper - if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { - balToUpdate = gsb.balancerCurrent - } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { - balToUpdate = gsb.balancerPending - } - if balToUpdate == nil { - // SubConn belonged to a stale lb policy that has not yet fully closed, - // or the balancer was already closed. - gsb.mu.Unlock() - return - } - if state.ConnectivityState == connectivity.Shutdown { - delete(balToUpdate.subconns, sc) - } - gsb.mu.Unlock() - if cb != nil { - cb(state) - } else { - balToUpdate.UpdateSubConnState(sc, state) - } -} - -// UpdateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - gsb.updateSubConnState(sc, state, nil) -} - -// Close closes any active child balancers. -func (gsb *Balancer) Close() { - gsb.mu.Lock() - gsb.closed = true - currentBalancerToClose := gsb.balancerCurrent - gsb.balancerCurrent = nil - pendingBalancerToClose := gsb.balancerPending - gsb.balancerPending = nil - gsb.mu.Unlock() - - currentBalancerToClose.Close() - pendingBalancerToClose.Close() -} - -// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer -// methods to help cleanup SubConns created by the wrapped balancer. -// -// It implements the balancer.ClientConn interface and is passed down in that -// capacity to the wrapped balancer. It maintains a set of subConns created by -// the wrapped balancer and calls from the latter to create/update/shutdown -// SubConns update this set before being forwarded to the parent ClientConn. -// State updates from the wrapped balancer can result in invocation of the -// graceful switch logic. -type balancerWrapper struct { - balancer.Balancer - gsb *Balancer - - lastState balancer.State - subconns map[balancer.SubConn]bool // subconns created by this balancer -} - -// Close closes the underlying LB policy and shuts down the subconns it -// created. bw must not be referenced via balancerCurrent or balancerPending in -// gsb when called. gsb.mu must not be held. Does not panic with a nil -// receiver. -func (bw *balancerWrapper) Close() { - // before Close is called. - if bw == nil { - return - } - // There is no need to protect this read with a mutex, as Close() is - // impossible to be called concurrently with the write in SwitchTo(). The - // callsites of Close() for this balancer in Graceful Switch Balancer will - // never be called until SwitchTo() returns. - bw.Balancer.Close() - bw.gsb.mu.Lock() - for sc := range bw.subconns { - sc.Shutdown() - } - bw.gsb.mu.Unlock() -} - -func (bw *balancerWrapper) UpdateState(state balancer.State) { - // Hold the mutex for this entire call to ensure it cannot occur - // concurrently with other updateState() calls. This causes updates to - // lastState and calls to cc.UpdateState to happen atomically. - bw.gsb.mu.Lock() - defer bw.gsb.mu.Unlock() - bw.lastState = state - - if !bw.gsb.balancerCurrentOrPending(bw) { - return - } - - if bw == bw.gsb.balancerCurrent { - // In the case that the current balancer exits READY, and there is a pending - // balancer, you can forward the pending balancer's cached State up to - // ClientConn and swap the pending into the current. This is because there - // is no reason to gracefully switch from and keep using the old policy as - // the ClientConn is not connected to any backends. - if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { - bw.gsb.swap() - return - } - // Even if there is a pending balancer waiting to be gracefully switched to, - // continue to forward current balancer updates to the Client Conn. Ignoring - // state + picker from the current would cause undefined behavior/cause the - // system to behave incorrectly from the current LB policies perspective. - // Also, the current LB is still being used by grpc to choose SubConns per - // RPC, and thus should use the most updated form of the current balancer. - bw.gsb.cc.UpdateState(state) - return - } - // This method is now dealing with a state update from the pending balancer. - // If the current balancer is currently in a state other than READY, the new - // policy can be swapped into place immediately. This is because there is no - // reason to gracefully switch from and keep using the old policy as the - // ClientConn is not connected to any backends. - if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { - bw.gsb.swap() - } -} - -func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - bw.gsb.mu.Lock() - if !bw.gsb.balancerCurrentOrPending(bw) { - bw.gsb.mu.Unlock() - return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) - } - bw.gsb.mu.Unlock() - - var sc balancer.SubConn - oldListener := opts.StateListener - opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } - sc, err := bw.gsb.cc.NewSubConn(addrs, opts) - if err != nil { - return nil, err - } - bw.gsb.mu.Lock() - if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call - sc.Shutdown() - bw.gsb.mu.Unlock() - return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) - } - bw.subconns[sc] = true - bw.gsb.mu.Unlock() - return sc, nil -} - -func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { - // Ignore ResolveNow requests from anything other than the most recent - // balancer, because older balancers were already removed from the config. - if bw != bw.gsb.latestBalancer() { - return - } - bw.gsb.cc.ResolveNow(opts) -} - -func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // Note: existing third party balancers may call this, so it must remain - // until RemoveSubConn is fully removed. - sc.Shutdown() -} - -func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - bw.gsb.mu.Lock() - if !bw.gsb.balancerCurrentOrPending(bw) { - bw.gsb.mu.Unlock() - return - } - bw.gsb.mu.Unlock() - bw.gsb.cc.UpdateAddresses(sc, addrs) -} - -func (bw *balancerWrapper) Target() string { - return bw.gsb.cc.Target() -} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go deleted file mode 100644 index 94a08d6875..0000000000 --- a/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package balancerload defines APIs to parse server loads in trailers. The -// parsed loads are sent to balancers in DoneInfo. -package balancerload - -import ( - "google.golang.org/grpc/metadata" -) - -// Parser converts loads from metadata into a concrete type. -type Parser interface { - // Parse parses loads from metadata. - Parse(md metadata.MD) any -} - -var parser Parser - -// SetParser sets the load parser. -// -// Not mutex-protected, should be called before any gRPC functions. -func SetParser(lr Parser) { - parser = lr -} - -// Parse calls parser.Read(). -func Parse(md metadata.MD) any { - if parser == nil { - return nil - } - return parser.Parse(md) -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go deleted file mode 100644 index 755fdebc1b..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ /dev/null @@ -1,192 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package binarylog implementation binary logging as defined in -// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. -package binarylog - -import ( - "fmt" - "os" - - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcutil" -) - -var grpclogLogger = grpclog.Component("binarylog") - -// Logger specifies MethodLoggers for method names with a Log call that -// takes a context. -// -// This is used in the 1.0 release of gcp/observability, and thus must not be -// deleted or changed. -type Logger interface { - GetMethodLogger(methodName string) MethodLogger -} - -// binLogger is the global binary logger for the binary. One of this should be -// built at init time from the configuration (environment variable or flags). -// -// It is used to get a MethodLogger for each individual method. -var binLogger Logger - -// SetLogger sets the binary logger. -// -// Only call this at init time. -func SetLogger(l Logger) { - binLogger = l -} - -// GetLogger gets the binary logger. -// -// Only call this at init time. -func GetLogger() Logger { - return binLogger -} - -// GetMethodLogger returns the MethodLogger for the given methodName. -// -// methodName should be in the format of "/service/method". -// -// Each MethodLogger returned by this method is a new instance. This is to -// generate sequence id within the call. -func GetMethodLogger(methodName string) MethodLogger { - if binLogger == nil { - return nil - } - return binLogger.GetMethodLogger(methodName) -} - -func init() { - const envStr = "GRPC_BINARY_LOG_FILTER" - configStr := os.Getenv(envStr) - binLogger = NewLoggerFromConfigString(configStr) -} - -// MethodLoggerConfig contains the setting for logging behavior of a method -// logger. Currently, it contains the max length of header and message. -type MethodLoggerConfig struct { - // Max length of header and message. - Header, Message uint64 -} - -// LoggerConfig contains the config for loggers to create method loggers. -type LoggerConfig struct { - All *MethodLoggerConfig - Services map[string]*MethodLoggerConfig - Methods map[string]*MethodLoggerConfig - - Blacklist map[string]struct{} -} - -type logger struct { - config LoggerConfig -} - -// NewLoggerFromConfig builds a logger with the given LoggerConfig. -func NewLoggerFromConfig(config LoggerConfig) Logger { - return &logger{config: config} -} - -// newEmptyLogger creates an empty logger. The map fields need to be filled in -// using the set* functions. -func newEmptyLogger() *logger { - return &logger{} -} - -// Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { - if l.config.All != nil { - return fmt.Errorf("conflicting global rules found") - } - l.config.All = ml - return nil -} - -// Set method logger for "service/*". -// -// New MethodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { - if _, ok := l.config.Services[service]; ok { - return fmt.Errorf("conflicting service rules for service %v found", service) - } - if l.config.Services == nil { - l.config.Services = make(map[string]*MethodLoggerConfig) - } - l.config.Services[service] = ml - return nil -} - -// Set method logger for "service/method". -// -// New MethodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { - if _, ok := l.config.Blacklist[method]; ok { - return fmt.Errorf("conflicting blacklist rules for method %v found", method) - } - if _, ok := l.config.Methods[method]; ok { - return fmt.Errorf("conflicting method rules for method %v found", method) - } - if l.config.Methods == nil { - l.config.Methods = make(map[string]*MethodLoggerConfig) - } - l.config.Methods[method] = ml - return nil -} - -// Set blacklist method for "-service/method". -func (l *logger) setBlacklist(method string) error { - if _, ok := l.config.Blacklist[method]; ok { - return fmt.Errorf("conflicting blacklist rules for method %v found", method) - } - if _, ok := l.config.Methods[method]; ok { - return fmt.Errorf("conflicting method rules for method %v found", method) - } - if l.config.Blacklist == nil { - l.config.Blacklist = make(map[string]struct{}) - } - l.config.Blacklist[method] = struct{}{} - return nil -} - -// getMethodLogger returns the MethodLogger for the given methodName. -// -// methodName should be in the format of "/service/method". -// -// Each MethodLogger returned by this method is a new instance. This is to -// generate sequence id within the call. -func (l *logger) GetMethodLogger(methodName string) MethodLogger { - s, m, err := grpcutil.ParseMethod(methodName) - if err != nil { - grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) - return nil - } - if ml, ok := l.config.Methods[s+"/"+m]; ok { - return NewTruncatingMethodLogger(ml.Header, ml.Message) - } - if _, ok := l.config.Blacklist[s+"/"+m]; ok { - return nil - } - if ml, ok := l.config.Services[s]; ok { - return NewTruncatingMethodLogger(ml.Header, ml.Message) - } - if l.config.All == nil { - return nil - } - return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go deleted file mode 100644 index 1ee00a39ac..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// This file contains exported variables/functions that are exported for testing -// only. -// -// An ideal way for this would be to put those in a *_test.go but in binarylog -// package. But this doesn't work with staticcheck with go module. Error was: -// "MdToMetadataProto not declared by package binarylog". This could be caused -// by the way staticcheck looks for files for a certain package, which doesn't -// support *_test.go files. -// -// Move those to binary_test.go when staticcheck is fixed. - -package binarylog - -var ( - // AllLogger is a logger that logs all headers/messages for all RPCs. It's - // for testing only. - AllLogger = NewLoggerFromConfigString("*") - // MdToMetadataProto converts metadata to a binary logging proto message. - // It's for testing only. - MdToMetadataProto = mdToMetadataProto - // AddrToProto converts an address to a binary logging proto message. It's - // for testing only. - AddrToProto = addrToProto -) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go deleted file mode 100644 index f9e80e27ab..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ /dev/null @@ -1,208 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -// NewLoggerFromConfigString reads the string and build a logger. It can be used -// to build a new logger and assign it to binarylog.Logger. -// -// Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. -// -// If two configs exist for one certain method or service, the one specified -// later overrides the previous config. -func NewLoggerFromConfigString(s string) Logger { - if s == "" { - return nil - } - l := newEmptyLogger() - methods := strings.Split(s, ",") - for _, method := range methods { - if err := l.fillMethodLoggerWithConfigString(method); err != nil { - grpclogLogger.Warningf("failed to parse binary log config: %v", err) - return nil - } - } - return l -} - -// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds -// it to the right map in the logger. -func (l *logger) fillMethodLoggerWithConfigString(config string) error { - // "" is invalid. - if config == "" { - return errors.New("empty string is not a valid method binary logging config") - } - - // "-service/method", blacklist, no * or {} allowed. - if config[0] == '-' { - s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) - if err != nil { - return fmt.Errorf("invalid config: %q, %v", config, err) - } - if m == "*" { - return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") - } - if suffix != "" { - return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") - } - if err := l.setBlacklist(s + "/" + m); err != nil { - return fmt.Errorf("invalid config: %v", err) - } - return nil - } - - // "*{h:256;m:256}" - if config[0] == '*' { - hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) - if err != nil { - return fmt.Errorf("invalid config: %q, %v", config, err) - } - if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { - return fmt.Errorf("invalid config: %v", err) - } - return nil - } - - s, m, suffix, err := parseMethodConfigAndSuffix(config) - if err != nil { - return fmt.Errorf("invalid config: %q, %v", config, err) - } - hdr, msg, err := parseHeaderMessageLengthConfig(suffix) - if err != nil { - return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) - } - if m == "*" { - if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { - return fmt.Errorf("invalid config: %v", err) - } - } else { - if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { - return fmt.Errorf("invalid config: %v", err) - } - } - return nil -} - -const ( - // TODO: this const is only used by env_config now. But could be useful for - // other config. Move to binarylog.go if necessary. - maxUInt = ^uint64(0) - - // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for - // expected output. - longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` - - // For suffix from above, "{h:123,m:123}". See test for expected output. - optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". - headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` - messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` - headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` -) - -var ( - longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) - headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) - messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) - headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) -) - -// Turn "service/method{h;m}" into "service", "method", "{h;m}". -func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { - // Regexp result: - // - // in: "p.s/m{h:123,m:123}", - // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, - match := longMethodConfigRegexp.FindStringSubmatch(c) - if match == nil { - return "", "", "", fmt.Errorf("%q contains invalid substring", c) - } - service = match[1] - method = match[2] - suffix = match[3] - return -} - -// Turn "{h:123;m:345}" into 123, 345. -// -// Return maxUInt if length is unspecified. -func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { - if c == "" { - return maxUInt, maxUInt, nil - } - // Header config only. - if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { - if s := match[1]; s != "" { - hdrLenStr, err = strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("failed to convert %q to uint", s) - } - return hdrLenStr, 0, nil - } - return maxUInt, 0, nil - } - - // Message config only. - if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { - if s := match[1]; s != "" { - msgLenStr, err = strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("failed to convert %q to uint", s) - } - return 0, msgLenStr, nil - } - return 0, maxUInt, nil - } - - // Header and message config both. - if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { - // Both hdr and msg are specified, but one or two of them might be empty. - hdrLenStr = maxUInt - msgLenStr = maxUInt - if s := match[1]; s != "" { - hdrLenStr, err = strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("failed to convert %q to uint", s) - } - } - if s := match[2]; s != "" { - msgLenStr, err = strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("failed to convert %q to uint", s) - } - } - return hdrLenStr, msgLenStr, nil - } - return 0, 0, fmt.Errorf("%q contains invalid substring", c) -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go deleted file mode 100644 index e8456a77c2..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ /dev/null @@ -1,446 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "context" - "net" - "strings" - "sync/atomic" - "time" - - binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/durationpb" - "google.golang.org/protobuf/types/known/timestamppb" -) - -type callIDGenerator struct { - id uint64 -} - -func (g *callIDGenerator) next() uint64 { - id := atomic.AddUint64(&g.id, 1) - return id -} - -// reset is for testing only, and doesn't need to be thread safe. -func (g *callIDGenerator) reset() { - g.id = 0 -} - -var idGen callIDGenerator - -// MethodLogger is the sub-logger for each method. -// -// This is used in the 1.0 release of gcp/observability, and thus must not be -// deleted or changed. -type MethodLogger interface { - Log(context.Context, LogEntryConfig) -} - -// TruncatingMethodLogger is a method logger that truncates headers and messages -// based on configured fields. -type TruncatingMethodLogger struct { - headerMaxLen, messageMaxLen uint64 - - callID uint64 - idWithinCallGen *callIDGenerator - - sink Sink // TODO(blog): make this plugable. -} - -// NewTruncatingMethodLogger returns a new truncating method logger. -// -// This is used in the 1.0 release of gcp/observability, and thus must not be -// deleted or changed. -func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { - return &TruncatingMethodLogger{ - headerMaxLen: h, - messageMaxLen: m, - - callID: idGen.next(), - idWithinCallGen: &callIDGenerator{}, - - sink: DefaultSink, // TODO(blog): make it plugable. - } -} - -// Build is an internal only method for building the proto message out of the -// input event. It's made public to enable other library to reuse as much logic -// in TruncatingMethodLogger as possible. -func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { - m := c.toProto() - timestamp := timestamppb.Now() - m.Timestamp = timestamp - m.CallId = ml.callID - m.SequenceIdWithinCall = ml.idWithinCallGen.next() - - switch pay := m.Payload.(type) { - case *binlogpb.GrpcLogEntry_ClientHeader: - m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *binlogpb.GrpcLogEntry_ServerHeader: - m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *binlogpb.GrpcLogEntry_Message: - m.PayloadTruncated = ml.truncateMessage(pay.Message) - } - return m -} - -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { - ml.sink.Write(ml.Build(c)) -} - -func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { - if ml.headerMaxLen == maxUInt { - return false - } - var ( - bytesLimit = ml.headerMaxLen - index int - ) - // At the end of the loop, index will be the first entry where the total - // size is greater than the limit: - // - // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. - for ; index < len(mdPb.Entry); index++ { - entry := mdPb.Entry[index] - if entry.Key == "grpc-trace-bin" { - // "grpc-trace-bin" is a special key. It's kept in the log entry, - // but not counted towards the size limit. - continue - } - currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) - if currentEntryLen > bytesLimit { - break - } - bytesLimit -= currentEntryLen - } - truncated = index < len(mdPb.Entry) - mdPb.Entry = mdPb.Entry[:index] - return truncated -} - -func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { - if ml.messageMaxLen == maxUInt { - return false - } - if ml.messageMaxLen >= uint64(len(msgPb.Data)) { - return false - } - msgPb.Data = msgPb.Data[:ml.messageMaxLen] - return true -} - -// LogEntryConfig represents the configuration for binary log entry. -// -// This is used in the 1.0 release of gcp/observability, and thus must not be -// deleted or changed. -type LogEntryConfig interface { - toProto() *binlogpb.GrpcLogEntry -} - -// ClientHeader configs the binary log entry to be a ClientHeader entry. -type ClientHeader struct { - OnClientSide bool - Header metadata.MD - MethodName string - Authority string - Timeout time.Duration - // PeerAddr is required only when it's on server side. - PeerAddr net.Addr -} - -func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { - // This function doesn't need to set all the fields (e.g. seq ID). The Log - // function will set the fields when necessary. - clientHeader := &binlogpb.ClientHeader{ - Metadata: mdToMetadataProto(c.Header), - MethodName: c.MethodName, - Authority: c.Authority, - } - if c.Timeout > 0 { - clientHeader.Timeout = durationpb.New(c.Timeout) - } - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &binlogpb.GrpcLogEntry_ClientHeader{ - ClientHeader: clientHeader, - }, - } - if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER - } - if c.PeerAddr != nil { - ret.Peer = addrToProto(c.PeerAddr) - } - return ret -} - -// ServerHeader configs the binary log entry to be a ServerHeader entry. -type ServerHeader struct { - OnClientSide bool - Header metadata.MD - // PeerAddr is required only when it's on client side. - PeerAddr net.Addr -} - -func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &binlogpb.GrpcLogEntry_ServerHeader{ - ServerHeader: &binlogpb.ServerHeader{ - Metadata: mdToMetadataProto(c.Header), - }, - }, - } - if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER - } - if c.PeerAddr != nil { - ret.Peer = addrToProto(c.PeerAddr) - } - return ret -} - -// ClientMessage configs the binary log entry to be a ClientMessage entry. -type ClientMessage struct { - OnClientSide bool - // Message can be a proto.Message or []byte. Other messages formats are not - // supported. - Message any -} - -func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { - var ( - data []byte - err error - ) - if m, ok := c.Message.(proto.Message); ok { - data, err = proto.Marshal(m) - if err != nil { - grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) - } - } else if b, ok := c.Message.([]byte); ok { - data = b - } else { - grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") - } - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &binlogpb.GrpcLogEntry_Message{ - Message: &binlogpb.Message{ - Length: uint32(len(data)), - Data: data, - }, - }, - } - if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER - } - return ret -} - -// ServerMessage configs the binary log entry to be a ServerMessage entry. -type ServerMessage struct { - OnClientSide bool - // Message can be a proto.Message or []byte. Other messages formats are not - // supported. - Message any -} - -func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { - var ( - data []byte - err error - ) - if m, ok := c.Message.(proto.Message); ok { - data, err = proto.Marshal(m) - if err != nil { - grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) - } - } else if b, ok := c.Message.([]byte); ok { - data = b - } else { - grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") - } - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &binlogpb.GrpcLogEntry_Message{ - Message: &binlogpb.Message{ - Length: uint32(len(data)), - Data: data, - }, - }, - } - if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER - } - return ret -} - -// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. -type ClientHalfClose struct { - OnClientSide bool -} - -func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, - Payload: nil, // No payload here. - } - if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER - } - return ret -} - -// ServerTrailer configs the binary log entry to be a ServerTrailer entry. -type ServerTrailer struct { - OnClientSide bool - Trailer metadata.MD - // Err is the status error. - Err error - // PeerAddr is required only when it's on client side and the RPC is trailer - // only. - PeerAddr net.Addr -} - -func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { - st, ok := status.FromError(c.Err) - if !ok { - grpclogLogger.Info("binarylogging: error in trailer is not a status error") - } - var ( - detailsBytes []byte - err error - ) - stProto := st.Proto() - if stProto != nil && len(stProto.Details) != 0 { - detailsBytes, err = proto.Marshal(stProto) - if err != nil { - grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) - } - } - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &binlogpb.GrpcLogEntry_Trailer{ - Trailer: &binlogpb.Trailer{ - Metadata: mdToMetadataProto(c.Trailer), - StatusCode: uint32(st.Code()), - StatusMessage: st.Message(), - StatusDetails: detailsBytes, - }, - }, - } - if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER - } - if c.PeerAddr != nil { - ret.Peer = addrToProto(c.PeerAddr) - } - return ret -} - -// Cancel configs the binary log entry to be a Cancel entry. -type Cancel struct { - OnClientSide bool -} - -func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { - ret := &binlogpb.GrpcLogEntry{ - Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, - Payload: nil, - } - if c.OnClientSide { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER - } - return ret -} - -// metadataKeyOmit returns whether the metadata entry with this key should be -// omitted. -func metadataKeyOmit(key string) bool { - switch key { - case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": - return true - case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. - return false - } - return strings.HasPrefix(key, "grpc-") -} - -func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { - ret := &binlogpb.Metadata{} - for k, vv := range md { - if metadataKeyOmit(k) { - continue - } - for _, v := range vv { - ret.Entry = append(ret.Entry, - &binlogpb.MetadataEntry{ - Key: k, - Value: []byte(v), - }, - ) - } - } - return ret -} - -func addrToProto(addr net.Addr) *binlogpb.Address { - ret := &binlogpb.Address{} - switch a := addr.(type) { - case *net.TCPAddr: - if a.IP.To4() != nil { - ret.Type = binlogpb.Address_TYPE_IPV4 - } else if a.IP.To16() != nil { - ret.Type = binlogpb.Address_TYPE_IPV6 - } else { - ret.Type = binlogpb.Address_TYPE_UNKNOWN - // Do not set address and port fields. - break - } - ret.Address = a.IP.String() - ret.IpPort = uint32(a.Port) - case *net.UnixAddr: - ret.Type = binlogpb.Address_TYPE_UNIX - ret.Address = a.String() - default: - ret.Type = binlogpb.Address_TYPE_UNKNOWN - } - return ret -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go deleted file mode 100644 index 9ea598b14c..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ /dev/null @@ -1,170 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "bufio" - "encoding/binary" - "io" - "sync" - "time" - - binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/protobuf/proto" -) - -var ( - // DefaultSink is the sink where the logs will be written to. It's exported - // for the binarylog package to update. - DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). -) - -// Sink writes log entry into the binary log sink. -// -// sink is a copy of the exported binarylog.Sink, to avoid circular dependency. -type Sink interface { - // Write will be called to write the log entry into the sink. - // - // It should be thread-safe so it can be called in parallel. - Write(*binlogpb.GrpcLogEntry) error - // Close will be called when the Sink is replaced by a new Sink. - Close() error -} - -type noopSink struct{} - -func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } - -// newWriterSink creates a binary log sink with the given writer. -// -// Write() marshals the proto message and writes it to the given writer. Each -// message is prefixed with a 4 byte big endian unsigned integer as the length. -// -// No buffer is done, Close() doesn't try to close the writer. -func newWriterSink(w io.Writer) Sink { - return &writerSink{out: w} -} - -type writerSink struct { - out io.Writer -} - -func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { - b, err := proto.Marshal(e) - if err != nil { - grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) - return err - } - hdr := make([]byte, 4) - binary.BigEndian.PutUint32(hdr, uint32(len(b))) - if _, err := ws.out.Write(hdr); err != nil { - return err - } - if _, err := ws.out.Write(b); err != nil { - return err - } - return nil -} - -func (ws *writerSink) Close() error { return nil } - -type bufferedSink struct { - mu sync.Mutex - closer io.Closer - out Sink // out is built on buf. - buf *bufio.Writer // buf is kept for flush. - flusherStarted bool - - writeTicker *time.Ticker - done chan struct{} -} - -func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { - fs.mu.Lock() - defer fs.mu.Unlock() - if !fs.flusherStarted { - // Start the write loop when Write is called. - fs.startFlushGoroutine() - fs.flusherStarted = true - } - if err := fs.out.Write(e); err != nil { - return err - } - return nil -} - -const ( - bufFlushDuration = 60 * time.Second -) - -func (fs *bufferedSink) startFlushGoroutine() { - fs.writeTicker = time.NewTicker(bufFlushDuration) - go func() { - for { - select { - case <-fs.done: - return - case <-fs.writeTicker.C: - } - fs.mu.Lock() - if err := fs.buf.Flush(); err != nil { - grpclogLogger.Warningf("failed to flush to Sink: %v", err) - } - fs.mu.Unlock() - } - }() -} - -func (fs *bufferedSink) Close() error { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.writeTicker != nil { - fs.writeTicker.Stop() - } - close(fs.done) - if err := fs.buf.Flush(); err != nil { - grpclogLogger.Warningf("failed to flush to Sink: %v", err) - } - if err := fs.closer.Close(); err != nil { - grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err) - } - if err := fs.out.Close(); err != nil { - grpclogLogger.Warningf("failed to close the Sink: %v", err) - } - return nil -} - -// NewBufferedSink creates a binary log sink with the given WriteCloser. -// -// Write() marshals the proto message and writes it to the given writer. Each -// message is prefixed with a 4 byte big endian unsigned integer as the length. -// -// Content is kept in a buffer, and is flushed every 60 seconds. -// -// Close closes the WriteCloser. -func NewBufferedSink(o io.WriteCloser) Sink { - bufW := bufio.NewWriter(o) - return &bufferedSink{ - closer: o, - out: newWriterSink(bufW), - buf: bufW, - done: make(chan struct{}), - } -} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go deleted file mode 100644 index 11f91668ac..0000000000 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package buffer provides an implementation of an unbounded buffer. -package buffer - -import ( - "errors" - "sync" -) - -// Unbounded is an implementation of an unbounded buffer which does not use -// extra goroutines. This is typically used for passing updates from one entity -// to another within gRPC. -// -// All methods on this type are thread-safe and don't block on anything except -// the underlying mutex used for synchronization. -// -// Unbounded supports values of any type to be stored in it by using a channel -// of `any`. This means that a call to Put() incurs an extra memory allocation, -// and also that users need a type assertion while reading. For performance -// critical code paths, using Unbounded is strongly discouraged and defining a -// new type specific implementation of this buffer is preferred. See -// internal/transport/transport.go for an example of this. -type Unbounded struct { - c chan any - closed bool - closing bool - mu sync.Mutex - backlog []any -} - -// NewUnbounded returns a new instance of Unbounded. -func NewUnbounded() *Unbounded { - return &Unbounded{c: make(chan any, 1)} -} - -var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") - -// Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t any) error { - b.mu.Lock() - defer b.mu.Unlock() - if b.closing { - return errBufferClosed - } - if len(b.backlog) == 0 { - select { - case b.c <- t: - return nil - default: - } - } - b.backlog = append(b.backlog, t) - return nil -} - -// Load sends the earliest buffered data, if any, onto the read channel returned -// by Get(). Users are expected to call this every time they successfully read a -// value from the read channel. -func (b *Unbounded) Load() { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) > 0 { - select { - case b.c <- b.backlog[0]: - b.backlog[0] = nil - b.backlog = b.backlog[1:] - default: - } - } else if b.closing && !b.closed { - close(b.c) - } -} - -// Get returns a read channel on which values added to the buffer, via Put(), -// are sent on. -// -// Upon reading a value from this channel, users are expected to call Load() to -// send the next buffered value onto the channel if there is any. -// -// If the unbounded buffer is closed, the read channel returned by this method -// is closed after all data is drained. -func (b *Unbounded) Get() <-chan any { - return b.c -} - -// Close closes the unbounded buffer. No subsequent data may be Put(), and the -// channel returned from Get() will be closed after all the data is read and -// Load() is called for the final time. -func (b *Unbounded) Close() { - b.mu.Lock() - defer b.mu.Unlock() - if b.closing { - return - } - b.closing = true - if len(b.backlog) == 0 { - b.closed = true - close(b.c) - } -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go deleted file mode 100644 index fc094f3441..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ /dev/null @@ -1,763 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package channelz defines APIs for enabling channelz service, entry -// registration/deletion, and accessing channelz data. It also defines channelz -// metric struct formats. -// -// All APIs in this package are experimental. -package channelz - -import ( - "errors" - "sort" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" -) - -const ( - defaultMaxTraceEntry int32 = 30 -) - -var ( - // IDGen is the global channelz entity ID generator. It should not be used - // outside this package except by tests. - IDGen IDGenerator - - db dbWrapper - // EntryPerPage defines the number of channelz entries to be shown on a web page. - EntryPerPage = int64(50) - curState int32 - maxTraceEntry = defaultMaxTraceEntry -) - -// TurnOn turns on channelz data collection. -func TurnOn() { - if !IsOn() { - db.set(newChannelMap()) - IDGen.Reset() - atomic.StoreInt32(&curState, 1) - } -} - -func init() { - internal.ChannelzTurnOffForTesting = func() { - atomic.StoreInt32(&curState, 0) - } -} - -// IsOn returns whether channelz data collection is on. -func IsOn() bool { - return atomic.LoadInt32(&curState) == 1 -} - -// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). -// Setting it to 0 will disable channel tracing. -func SetMaxTraceEntry(i int32) { - atomic.StoreInt32(&maxTraceEntry, i) -} - -// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. -func ResetMaxTraceEntryToDefault() { - atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) -} - -func getMaxTraceEntry() int { - i := atomic.LoadInt32(&maxTraceEntry) - return int(i) -} - -// dbWarpper wraps around a reference to internal channelz data storage, and -// provide synchronized functionality to set and get the reference. -type dbWrapper struct { - mu sync.RWMutex - DB *channelMap -} - -func (d *dbWrapper) set(db *channelMap) { - d.mu.Lock() - d.DB = db - d.mu.Unlock() -} - -func (d *dbWrapper) get() *channelMap { - d.mu.RLock() - defer d.mu.RUnlock() - return d.DB -} - -// GetTopChannels returns a slice of top channel's ChannelMetric, along with a -// boolean indicating whether there's more top channels to be queried for. -// -// The arg id specifies that only top channel with id at or above it will be included -// in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - return db.get().GetTopChannels(id, maxResults) -} - -// GetServers returns a slice of server's ServerMetric, along with a -// boolean indicating whether there's more servers to be queried for. -// -// The arg id specifies that only server with id at or above it will be included -// in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { - return db.get().GetServers(id, maxResults) -} - -// GetServerSockets returns a slice of server's (identified by id) normal socket's -// SocketMetric, along with a boolean indicating whether there's more sockets to -// be queried for. -// -// The arg startID specifies that only sockets with id at or above it will be -// included in the result. The returned slice is up to a length of the arg maxResults -// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - return db.get().GetServerSockets(id, startID, maxResults) -} - -// GetChannel returns the ChannelMetric for the channel (identified by id). -func GetChannel(id int64) *ChannelMetric { - return db.get().GetChannel(id) -} - -// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). -func GetSubChannel(id int64) *SubChannelMetric { - return db.get().GetSubChannel(id) -} - -// GetSocket returns the SocketInternalMetric for the socket (identified by id). -func GetSocket(id int64) *SocketMetric { - return db.get().GetSocket(id) -} - -// GetServer returns the ServerMetric for the server (identified by id). -func GetServer(id int64) *ServerMetric { - return db.get().GetServer(id) -} - -// RegisterChannel registers the given channel c in the channelz database with -// ref as its reference name, and adds it to the child list of its parent -// (identified by pid). pid == nil means no parent. -// -// Returns a unique channelz identifier assigned to this channel. -// -// If channelz is not turned ON, the channelz database is not mutated. -func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { - id := IDGen.genID() - var parent int64 - isTopChannel := true - if pid != nil { - isTopChannel = false - parent = pid.Int() - } - - if !IsOn() { - return newIdentifer(RefChannel, id, pid) - } - - cn := &channel{ - refName: ref, - c: c, - subChans: make(map[int64]string), - nestedChans: make(map[int64]string), - id: id, - pid: parent, - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, - } - db.get().addChannel(id, cn, isTopChannel, parent) - return newIdentifer(RefChannel, id, pid) -} - -// RegisterSubChannel registers the given subChannel c in the channelz database -// with ref as its reference name, and adds it to the child list of its parent -// (identified by pid). -// -// Returns a unique channelz identifier assigned to this subChannel. -// -// If channelz is not turned ON, the channelz database is not mutated. -func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a SubChannel's parent id cannot be nil") - } - id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefSubChannel, id, pid), nil - } - - sc := &subChannel{ - refName: ref, - c: c, - sockets: make(map[int64]string), - id: id, - pid: pid.Int(), - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, - } - db.get().addSubChannel(id, sc, pid.Int()) - return newIdentifer(RefSubChannel, id, pid), nil -} - -// RegisterServer registers the given server s in channelz database. It returns -// the unique channelz tracking id assigned to this server. -// -// If channelz is not turned ON, the channelz database is not mutated. -func RegisterServer(s Server, ref string) *Identifier { - id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefServer, id, nil) - } - - svr := &server{ - refName: ref, - s: s, - sockets: make(map[int64]string), - listenSockets: make(map[int64]string), - id: id, - } - db.get().addServer(id, svr) - return newIdentifer(RefServer, id, nil) -} - -// RegisterListenSocket registers the given listen socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this listen socket. -// -// If channelz is not turned ON, the channelz database is not mutated. -func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a ListenSocket's parent id cannot be 0") - } - id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefListenSocket, id, pid), nil - } - - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} - db.get().addListenSocket(id, ls, pid.Int()) - return newIdentifer(RefListenSocket, id, pid), nil -} - -// RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and adds it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this normal socket. -// -// If channelz is not turned ON, the channelz database is not mutated. -func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a NormalSocket's parent id cannot be 0") - } - id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefNormalSocket, id, pid), nil - } - - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} - db.get().addNormalSocket(id, ns, pid.Int()) - return newIdentifer(RefNormalSocket, id, pid), nil -} - -// RemoveEntry removes an entry with unique channelz tracking id to be id from -// channelz database. -// -// If channelz is not turned ON, this function is a no-op. -func RemoveEntry(id *Identifier) { - if !IsOn() { - return - } - db.get().removeEntry(id.Int()) -} - -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe -// the event to be added to the channel trace. -// -// The Parent field is optional. It is used for an event that will be recorded -// in the entity's parent trace. -type TraceEventDesc struct { - Desc string - Severity Severity - Parent *TraceEventDesc -} - -// AddTraceEvent adds trace related to the entity with specified id, using the -// provided TraceEventDesc. -// -// If channelz is not turned ON, this will simply log the event descriptions. -func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { - // Log only the trace description associated with the bottom most entity. - switch desc.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, withParens(id)+desc.Desc) - case CtWarning: - l.WarningDepth(depth+1, withParens(id)+desc.Desc) - case CtError: - l.ErrorDepth(depth+1, withParens(id)+desc.Desc) - } - - if getMaxTraceEntry() == 0 { - return - } - if IsOn() { - db.get().traceEvent(id.Int(), desc) - } -} - -// channelMap is the storage data structure for channelz. -// Methods of channelMap can be divided in two two categories with respect to locking. -// 1. Methods acquire the global lock. -// 2. Methods that can only be called when global lock is held. -// A second type of method need always to be called inside a first type of method. -type channelMap struct { - mu sync.RWMutex - topLevelChannels map[int64]struct{} - servers map[int64]*server - channels map[int64]*channel - subChannels map[int64]*subChannel - listenSockets map[int64]*listenSocket - normalSockets map[int64]*normalSocket -} - -func newChannelMap() *channelMap { - return &channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - } -} - -func (c *channelMap) addServer(id int64, s *server) { - c.mu.Lock() - s.cm = c - c.servers[id] = s - c.mu.Unlock() -} - -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { - c.mu.Lock() - cn.cm = c - cn.trace.cm = c - c.channels[id] = cn - if isTopChannel { - c.topLevelChannels[id] = struct{}{} - } else { - c.findEntry(pid).addChild(id, cn) - } - c.mu.Unlock() -} - -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { - c.mu.Lock() - sc.cm = c - sc.trace.cm = c - c.subChannels[id] = sc - c.findEntry(pid).addChild(id, sc) - c.mu.Unlock() -} - -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { - c.mu.Lock() - ls.cm = c - c.listenSockets[id] = ls - c.findEntry(pid).addChild(id, ls) - c.mu.Unlock() -} - -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { - c.mu.Lock() - ns.cm = c - c.normalSockets[id] = ns - c.findEntry(pid).addChild(id, ns) - c.mu.Unlock() -} - -// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to -// wait on the deletion of its children and until no other entity's channel trace references it. -// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully -// shutting down server will lead to the server being also deleted. -func (c *channelMap) removeEntry(id int64) { - c.mu.Lock() - c.findEntry(id).triggerDelete() - c.mu.Unlock() -} - -// c.mu must be held by the caller -func (c *channelMap) decrTraceRefCount(id int64) { - e := c.findEntry(id) - if v, ok := e.(tracedChannel); ok { - v.decrTraceRefCount() - e.deleteSelfIfReady() - } -} - -// c.mu must be held by the caller. -func (c *channelMap) findEntry(id int64) entry { - var v entry - var ok bool - if v, ok = c.channels[id]; ok { - return v - } - if v, ok = c.subChannels[id]; ok { - return v - } - if v, ok = c.servers[id]; ok { - return v - } - if v, ok = c.listenSockets[id]; ok { - return v - } - if v, ok = c.normalSockets[id]; ok { - return v - } - return &dummyEntry{idNotFound: id} -} - -// c.mu must be held by the caller -// deleteEntry simply deletes an entry from the channelMap. Before calling this -// method, caller must check this entry is ready to be deleted, i.e removeEntry() -// has been called on it, and no children still exist. -// Conditionals are ordered by the expected frequency of deletion of each entity -// type, in order to optimize performance. -func (c *channelMap) deleteEntry(id int64) { - var ok bool - if _, ok = c.normalSockets[id]; ok { - delete(c.normalSockets, id) - return - } - if _, ok = c.subChannels[id]; ok { - delete(c.subChannels, id) - return - } - if _, ok = c.channels[id]; ok { - delete(c.channels, id) - delete(c.topLevelChannels, id) - return - } - if _, ok = c.listenSockets[id]; ok { - delete(c.listenSockets, id) - return - } - if _, ok = c.servers[id]; ok { - delete(c.servers, id) - return - } -} - -func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { - c.mu.Lock() - child := c.findEntry(id) - childTC, ok := child.(tracedChannel) - if !ok { - c.mu.Unlock() - return - } - childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) - if desc.Parent != nil { - parent := c.findEntry(child.getParentID()) - var chanType RefChannelType - switch child.(type) { - case *channel: - chanType = RefChannel - case *subChannel: - chanType = RefSubChannel - } - if parentTC, ok := parent.(tracedChannel); ok { - parentTC.getChannelTrace().append(&TraceEvent{ - Desc: desc.Parent.Desc, - Severity: desc.Parent.Severity, - Timestamp: time.Now(), - RefID: id, - RefName: childTC.getRefName(), - RefType: chanType, - }) - childTC.incrTraceRefCount() - } - } - c.mu.Unlock() -} - -type int64Slice []int64 - -func (s int64Slice) Len() int { return len(s) } -func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } - -func copyMap(m map[int64]string) map[int64]string { - n := make(map[int64]string) - for k, v := range m { - n[k] = v - } - return n -} - -func min(a, b int64) int64 { - if a < b { - return a - } - return b -} - -func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.topLevelChannels)) - ids := make([]int64, 0, l) - cns := make([]*channel, 0, min(l, maxResults)) - - for k := range c.topLevelChannels { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var t []*ChannelMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if cn, ok := c.channels[v]; ok { - cns = append(cns, cn) - t = append(t, &ChannelMetric{ - NestedChans: copyMap(cn.nestedChans), - SubChans: copyMap(cn.subChans), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, cn := range cns { - t[i].ChannelData = cn.c.ChannelzMetric() - t[i].ID = cn.id - t[i].RefName = cn.refName - t[i].Trace = cn.trace.dumpData() - } - return t, end -} - -func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.servers)) - ids := make([]int64, 0, l) - ss := make([]*server, 0, min(l, maxResults)) - for k := range c.servers { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var s []*ServerMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if svr, ok := c.servers[v]; ok { - ss = append(ss, svr) - s = append(s, &ServerMetric{ - ListenSockets: copyMap(svr.listenSockets), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, svr := range ss { - s[i].ServerData = svr.s.ChannelzMetric() - s[i].ID = svr.id - s[i].RefName = svr.refName - } - return s, end -} - -func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - // server with id doesn't exist. - c.mu.RUnlock() - return nil, true - } - svrskts := svr.sockets - l := int64(len(svrskts)) - ids := make([]int64, 0, l) - sks := make([]*normalSocket, 0, min(l, maxResults)) - for k := range svrskts { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) - count := int64(0) - var end bool - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if ns, ok := c.normalSockets[v]; ok { - sks = append(sks, ns) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - s := make([]*SocketMetric, 0, len(sks)) - for _, ns := range sks { - sm := &SocketMetric{} - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - s = append(s, sm) - } - return s, end -} - -func (c *channelMap) GetChannel(id int64) *ChannelMetric { - cm := &ChannelMetric{} - var cn *channel - var ok bool - c.mu.RLock() - if cn, ok = c.channels[id]; !ok { - // channel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.NestedChans = copyMap(cn.nestedChans) - cm.SubChans = copyMap(cn.subChans) - // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when - // holding the lock to prevent potential data race. - chanCopy := cn.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = cn.id - cm.RefName = cn.refName - cm.Trace = cn.trace.dumpData() - return cm -} - -func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { - cm := &SubChannelMetric{} - var sc *subChannel - var ok bool - c.mu.RLock() - if sc, ok = c.subChannels[id]; !ok { - // subchannel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.Sockets = copyMap(sc.sockets) - // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when - // holding the lock to prevent potential data race. - chanCopy := sc.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = sc.id - cm.RefName = sc.refName - cm.Trace = sc.trace.dumpData() - return cm -} - -func (c *channelMap) GetSocket(id int64) *SocketMetric { - sm := &SocketMetric{} - c.mu.RLock() - if ls, ok := c.listenSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ls.s.ChannelzMetric() - sm.ID = ls.id - sm.RefName = ls.refName - return sm - } - if ns, ok := c.normalSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - return sm - } - c.mu.RUnlock() - return nil -} - -func (c *channelMap) GetServer(id int64) *ServerMetric { - sm := &ServerMetric{} - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - c.mu.RUnlock() - return nil - } - sm.ListenSockets = copyMap(svr.listenSockets) - c.mu.RUnlock() - sm.ID = svr.id - sm.RefName = svr.refName - sm.ServerData = svr.s.ChannelzMetric() - return sm -} - -// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. -type IDGenerator struct { - id int64 -} - -// Reset resets the generated ID back to zero. Should only be used at -// initialization or by tests sensitive to the ID number. -func (i *IDGenerator) Reset() { - atomic.StoreInt64(&i.id, 0) -} - -func (i *IDGenerator) genID() int64 { - return atomic.AddInt64(&i.id, 1) -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go deleted file mode 100644 index c9a27acd37..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/id.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import "fmt" - -// Identifier is an opaque identifier which uniquely identifies an entity in the -// channelz database. -type Identifier struct { - typ RefChannelType - id int64 - str string - pid *Identifier -} - -// Type returns the entity type corresponding to id. -func (id *Identifier) Type() RefChannelType { - return id.typ -} - -// Int returns the integer identifier corresponding to id. -func (id *Identifier) Int() int64 { - return id.id -} - -// String returns a string representation of the entity corresponding to id. -// -// This includes some information about the parent as well. Examples: -// Top-level channel: [Channel #channel-number] -// Nested channel: [Channel #parent-channel-number Channel #channel-number] -// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] -func (id *Identifier) String() string { - return id.str -} - -// Equal returns true if other is the same as id. -func (id *Identifier) Equal(other *Identifier) bool { - if (id != nil) != (other != nil) { - return false - } - if id == nil && other == nil { - return true - } - return id.typ == other.typ && id.id == other.id && id.pid == other.pid -} - -// NewIdentifierForTesting returns a new opaque identifier to be used only for -// testing purposes. -func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { - return newIdentifer(typ, id, pid) -} - -func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { - str := fmt.Sprintf("%s #%d", typ, id) - if pid != nil { - str = fmt.Sprintf("%s %s", pid, str) - } - return &Identifier{typ: typ, id: id, str: str, pid: pid} -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go deleted file mode 100644 index f89e6f77bb..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "fmt" - - "google.golang.org/grpc/grpclog" -) - -var logger = grpclog.Component("channelz") - -func withParens(id *Identifier) string { - return "[" + id.String() + "] " -} - -// Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) -} - -// Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprintf(format, args...), - Severity: CtInfo, - }) -} - -// Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) -} - -// Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprintf(format, args...), - Severity: CtWarning, - }) -} - -// Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) -} - -// Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprintf(format, args...), - Severity: CtError, - }) -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go deleted file mode 100644 index 1d4020f537..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ /dev/null @@ -1,727 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "net" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" -) - -// entry represents a node in the channelz database. -type entry interface { - // addChild adds a child e, whose channelz id is id to child list - addChild(id int64, e entry) - // deleteChild deletes a child with channelz id to be id from child list - deleteChild(id int64) - // triggerDelete tries to delete self from channelz database. However, if child - // list is not empty, then deletion from the database is on hold until the last - // child is deleted from database. - triggerDelete() - // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child - // list is now empty. If both conditions are met, then delete self from database. - deleteSelfIfReady() - // getParentID returns parent ID of the entry. 0 value parent ID means no parent. - getParentID() int64 -} - -// dummyEntry is a fake entry to handle entry not found case. -type dummyEntry struct { - idNotFound int64 -} - -func (d *dummyEntry) addChild(id int64, e entry) { - // Note: It is possible for a normal program to reach here under race condition. - // For example, there could be a race between ClientConn.Close() info being propagated - // to addrConn and http2Client. ClientConn.Close() cancel the context and result - // in http2Client to error. The error info is then caught by transport monitor - // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, - // the addrConn will create a new transport. And when registering the new transport in - // channelz, its parent addrConn could have already been torn down and deleted - // from channelz tracking, and thus reach the code here. - logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) -} - -func (d *dummyEntry) deleteChild(id int64) { - // It is possible for a normal program to reach here under race condition. - // Refer to the example described in addChild(). - logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) -} - -func (d *dummyEntry) triggerDelete() { - logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) -} - -func (*dummyEntry) deleteSelfIfReady() { - // code should not reach here. deleteSelfIfReady is always called on an existing entry. -} - -func (*dummyEntry) getParentID() int64 { - return 0 -} - -// ChannelMetric defines the info channelz provides for a specific Channel, which -// includes ChannelInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ChannelMetric struct { - // ID is the channelz id of this channel. - ID int64 - // RefName is the human readable reference string of this channel. - RefName string - // ChannelData contains channel internal metric reported by the channel through - // ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this channel in the format of - // a map from nested channel channelz id to corresponding reference string. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this channel in the format of a - // map from subchannel channelz id to corresponding reference string. - SubChans map[int64]string - // Sockets tracks the socket type children of this channel in the format of a map - // from socket channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow channel having sockets directly, - // therefore, this is field is unused. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// SubChannelMetric defines the info channelz provides for a specific SubChannel, -// which includes ChannelInternalMetric and channelz-specific data, such as -// channelz id, child list, etc. -type SubChannelMetric struct { - // ID is the channelz id of this subchannel. - ID int64 - // RefName is the human readable reference string of this subchannel. - RefName string - // ChannelData contains subchannel internal metric reported by the subchannel - // through ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this subchannel in the format of - // a map from nested channel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have nested channels - // as children, therefore, this field is unused. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this subchannel in the format of a - // map from subchannel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have subchannels - // as children, therefore, this field is unused. - SubChans map[int64]string - // Sockets tracks the socket type children of this subchannel in the format of a map - // from socket channelz id to corresponding reference string. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// ChannelInternalMetric defines the struct that the implementor of Channel interface -// should return from ChannelzMetric(). -type ChannelInternalMetric struct { - // current connectivity state of the channel. - State connectivity.State - // The target this channel originally tried to connect to. May be absent - Target string - // The number of calls started on the channel. - CallsStarted int64 - // The number of calls that have completed with an OK status. - CallsSucceeded int64 - // The number of calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the channel. - LastCallStartedTimestamp time.Time -} - -// ChannelTrace stores traced events on a channel/subchannel and related info. -type ChannelTrace struct { - // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) - EventNum int64 - // CreationTime is the creation time of the trace. - CreationTime time.Time - // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the - // oldest one) - Events []*TraceEvent -} - -// TraceEvent represent a single trace event -type TraceEvent struct { - // Desc is a simple description of the trace event. - Desc string - // Severity states the severity of this trace event. - Severity Severity - // Timestamp is the event time. - Timestamp time.Time - // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is - // involved in this event. - // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) - RefID int64 - // RefName is the reference name for the entity that gets referenced in the event. - RefName string - // RefType indicates the referenced entity type, i.e Channel or SubChannel. - RefType RefChannelType -} - -// Channel is the interface that should be satisfied in order to be tracked by -// channelz as Channel or SubChannel. -type Channel interface { - ChannelzMetric() *ChannelInternalMetric -} - -type dummyChannel struct{} - -func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { - return &ChannelInternalMetric{} -} - -type channel struct { - refName string - c Channel - closeCalled bool - nestedChans map[int64]string - subChans map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - // traceRefCount is the number of trace events that reference this channel. - // Non-zero traceRefCount means the trace of this channel cannot be deleted. - traceRefCount int32 -} - -func (c *channel) addChild(id int64, e entry) { - switch v := e.(type) { - case *subChannel: - c.subChans[id] = v.refName - case *channel: - c.nestedChans[id] = v.refName - default: - logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) - } -} - -func (c *channel) deleteChild(id int64) { - delete(c.subChans, id) - delete(c.nestedChans, id) - c.deleteSelfIfReady() -} - -func (c *channel) triggerDelete() { - c.closeCalled = true - c.deleteSelfIfReady() -} - -func (c *channel) getParentID() int64 { - return c.pid -} - -// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means -// deleting the channel reference from its parent's child list. -// -// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the -// corresponding grpc object has been invoked, and the channel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (c *channel) deleteSelfFromTree() (deleted bool) { - if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { - return false - } - // not top channel - if c.pid != 0 { - c.cm.findEntry(c.pid).deleteChild(c.id) - } - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means -// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the -// channel, and its memory will be garbage collected. -// -// The trace reference count of the channel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (c *channel) deleteSelfFromMap() (delete bool) { - if c.getTraceRefCount() != 0 { - c.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the channel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. -func (c *channel) deleteSelfIfReady() { - if !c.deleteSelfFromTree() { - return - } - if !c.deleteSelfFromMap() { - return - } - c.cm.deleteEntry(c.id) - c.trace.clear() -} - -func (c *channel) getChannelTrace() *channelTrace { - return c.trace -} - -func (c *channel) incrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, 1) -} - -func (c *channel) decrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, -1) -} - -func (c *channel) getTraceRefCount() int { - i := atomic.LoadInt32(&c.traceRefCount) - return int(i) -} - -func (c *channel) getRefName() string { - return c.refName -} - -type subChannel struct { - refName string - c Channel - closeCalled bool - sockets map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - traceRefCount int32 -} - -func (sc *subChannel) addChild(id int64, e entry) { - if v, ok := e.(*normalSocket); ok { - sc.sockets[id] = v.refName - } else { - logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) - } -} - -func (sc *subChannel) deleteChild(id int64) { - delete(sc.sockets, id) - sc.deleteSelfIfReady() -} - -func (sc *subChannel) triggerDelete() { - sc.closeCalled = true - sc.deleteSelfIfReady() -} - -func (sc *subChannel) getParentID() int64 { - return sc.pid -} - -// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which -// means deleting the subchannel reference from its parent's child list. -// -// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of -// the corresponding grpc object has been invoked, and the subchannel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (sc *subChannel) deleteSelfFromTree() (deleted bool) { - if !sc.closeCalled || len(sc.sockets) != 0 { - return false - } - sc.cm.findEntry(sc.pid).deleteChild(sc.id) - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means -// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query -// the subchannel, and its memory will be garbage collected. -// -// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (sc *subChannel) deleteSelfFromMap() (delete bool) { - if sc.getTraceRefCount() != 0 { - // free the grpc struct (i.e. addrConn) - sc.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. -func (sc *subChannel) deleteSelfIfReady() { - if !sc.deleteSelfFromTree() { - return - } - if !sc.deleteSelfFromMap() { - return - } - sc.cm.deleteEntry(sc.id) - sc.trace.clear() -} - -func (sc *subChannel) getChannelTrace() *channelTrace { - return sc.trace -} - -func (sc *subChannel) incrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, 1) -} - -func (sc *subChannel) decrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, -1) -} - -func (sc *subChannel) getTraceRefCount() int { - i := atomic.LoadInt32(&sc.traceRefCount) - return int(i) -} - -func (sc *subChannel) getRefName() string { - return sc.refName -} - -// SocketMetric defines the info channelz provides for a specific Socket, which -// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. -type SocketMetric struct { - // ID is the channelz id of this socket. - ID int64 - // RefName is the human readable reference string of this socket. - RefName string - // SocketData contains socket internal metric reported by the socket through - // ChannelzMetric(). - SocketData *SocketInternalMetric -} - -// SocketInternalMetric defines the struct that the implementor of Socket interface -// should return from ChannelzMetric(). -type SocketInternalMetric struct { - // The number of streams that have been started. - StreamsStarted int64 - // The number of streams that have ended successfully: - // On client side, receiving frame with eos bit set. - // On server side, sending frame with eos bit set. - StreamsSucceeded int64 - // The number of streams that have ended unsuccessfully: - // On client side, termination without receiving frame with eos bit set. - // On server side, termination without sending frame with eos bit set. - StreamsFailed int64 - // The number of messages successfully sent on this socket. - MessagesSent int64 - MessagesReceived int64 - // The number of keep alives sent. This is typically implemented with HTTP/2 - // ping messages. - KeepAlivesSent int64 - // The last time a stream was created by this endpoint. Usually unset for - // servers. - LastLocalStreamCreatedTimestamp time.Time - // The last time a stream was created by the remote endpoint. Usually unset - // for clients. - LastRemoteStreamCreatedTimestamp time.Time - // The last time a message was sent by this endpoint. - LastMessageSentTimestamp time.Time - // The last time a message was received by this endpoint. - LastMessageReceivedTimestamp time.Time - // The amount of window, granted to the local endpoint by the remote endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - LocalFlowControlWindow int64 - // The amount of window, granted to the remote endpoint by the local endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - RemoteFlowControlWindow int64 - // The locally bound address. - LocalAddr net.Addr - // The remote bound address. May be absent. - RemoteAddr net.Addr - // Optional, represents the name of the remote endpoint, if different than - // the original target name. - RemoteName string - SocketOptions *SocketOptionData - Security credentials.ChannelzSecurityValue -} - -// Socket is the interface that should be satisfied in order to be tracked by -// channelz as Socket. -type Socket interface { - ChannelzMetric() *SocketInternalMetric -} - -type listenSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ls *listenSocket) addChild(id int64, e entry) { - logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) -} - -func (ls *listenSocket) deleteChild(id int64) { - logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) -} - -func (ls *listenSocket) triggerDelete() { - ls.cm.deleteEntry(ls.id) - ls.cm.findEntry(ls.pid).deleteChild(ls.id) -} - -func (ls *listenSocket) deleteSelfIfReady() { - logger.Errorf("cannot call deleteSelfIfReady on a listen socket") -} - -func (ls *listenSocket) getParentID() int64 { - return ls.pid -} - -type normalSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ns *normalSocket) addChild(id int64, e entry) { - logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) -} - -func (ns *normalSocket) deleteChild(id int64) { - logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) -} - -func (ns *normalSocket) triggerDelete() { - ns.cm.deleteEntry(ns.id) - ns.cm.findEntry(ns.pid).deleteChild(ns.id) -} - -func (ns *normalSocket) deleteSelfIfReady() { - logger.Errorf("cannot call deleteSelfIfReady on a normal socket") -} - -func (ns *normalSocket) getParentID() int64 { - return ns.pid -} - -// ServerMetric defines the info channelz provides for a specific Server, which -// includes ServerInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ServerMetric struct { - // ID is the channelz id of this server. - ID int64 - // RefName is the human readable reference string of this server. - RefName string - // ServerData contains server internal metric reported by the server through - // ChannelzMetric(). - ServerData *ServerInternalMetric - // ListenSockets tracks the listener socket type children of this server in the - // format of a map from socket channelz id to corresponding reference string. - ListenSockets map[int64]string -} - -// ServerInternalMetric defines the struct that the implementor of Server interface -// should return from ChannelzMetric(). -type ServerInternalMetric struct { - // The number of incoming calls started on the server. - CallsStarted int64 - // The number of incoming calls that have completed with an OK status. - CallsSucceeded int64 - // The number of incoming calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the server. - LastCallStartedTimestamp time.Time -} - -// Server is the interface to be satisfied in order to be tracked by channelz as -// Server. -type Server interface { - ChannelzMetric() *ServerInternalMetric -} - -type server struct { - refName string - s Server - closeCalled bool - sockets map[int64]string - listenSockets map[int64]string - id int64 - cm *channelMap -} - -func (s *server) addChild(id int64, e entry) { - switch v := e.(type) { - case *normalSocket: - s.sockets[id] = v.refName - case *listenSocket: - s.listenSockets[id] = v.refName - default: - logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) - } -} - -func (s *server) deleteChild(id int64) { - delete(s.sockets, id) - delete(s.listenSockets, id) - s.deleteSelfIfReady() -} - -func (s *server) triggerDelete() { - s.closeCalled = true - s.deleteSelfIfReady() -} - -func (s *server) deleteSelfIfReady() { - if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { - return - } - s.cm.deleteEntry(s.id) -} - -func (s *server) getParentID() int64 { - return 0 -} - -type tracedChannel interface { - getChannelTrace() *channelTrace - incrTraceRefCount() - decrTraceRefCount() - getRefName() string -} - -type channelTrace struct { - cm *channelMap - clearCalled bool - createdTime time.Time - eventCount int64 - mu sync.Mutex - events []*TraceEvent -} - -func (c *channelTrace) append(e *TraceEvent) { - c.mu.Lock() - if len(c.events) == getMaxTraceEntry() { - del := c.events[0] - c.events = c.events[1:] - if del.RefID != 0 { - // start recursive cleanup in a goroutine to not block the call originated from grpc. - go func() { - // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. - c.cm.mu.Lock() - c.cm.decrTraceRefCount(del.RefID) - c.cm.mu.Unlock() - }() - } - } - e.Timestamp = time.Now() - c.events = append(c.events, e) - c.eventCount++ - c.mu.Unlock() -} - -func (c *channelTrace) clear() { - if c.clearCalled { - return - } - c.clearCalled = true - c.mu.Lock() - for _, e := range c.events { - if e.RefID != 0 { - // caller should have already held the c.cm.mu lock. - c.cm.decrTraceRefCount(e.RefID) - } - } - c.mu.Unlock() -} - -// Severity is the severity level of a trace event. -// The canonical enumeration of all valid values is here: -// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. -type Severity int - -const ( - // CtUnknown indicates unknown severity of a trace event. - CtUnknown Severity = iota - // CtInfo indicates info level severity of a trace event. - CtInfo - // CtWarning indicates warning level severity of a trace event. - CtWarning - // CtError indicates error level severity of a trace event. - CtError -) - -// RefChannelType is the type of the entity being referenced in a trace event. -type RefChannelType int - -const ( - // RefUnknown indicates an unknown entity type, the zero value for this type. - RefUnknown RefChannelType = iota - // RefChannel indicates the referenced entity is a Channel. - RefChannel - // RefSubChannel indicates the referenced entity is a SubChannel. - RefSubChannel - // RefServer indicates the referenced entity is a Server. - RefServer - // RefListenSocket indicates the referenced entity is a ListenSocket. - RefListenSocket - // RefNormalSocket indicates the referenced entity is a NormalSocket. - RefNormalSocket -) - -var refChannelTypeToString = map[RefChannelType]string{ - RefUnknown: "Unknown", - RefChannel: "Channel", - RefSubChannel: "SubChannel", - RefServer: "Server", - RefListenSocket: "ListenSocket", - RefNormalSocket: "NormalSocket", -} - -func (r RefChannelType) String() string { - return refChannelTypeToString[r] -} - -func (c *channelTrace) dumpData() *ChannelTrace { - c.mu.Lock() - ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} - ct.Events = c.events[:len(c.events)] - c.mu.Unlock() - return ct -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go deleted file mode 100644 index 1b1c4cce34..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -// SocketOptionData defines the struct to hold socket option data, and related -// getter function to obtain info from fd. -type SocketOptionData struct { - Linger *unix.Linger - RecvTimeout *unix.Timeval - SendTimeout *unix.Timeval - TCPInfo *unix.TCPInfo -} - -// Getsockopt defines the function to get socket options requested by channelz. -// It is to be passed to syscall.RawConn.Control(). -func (s *SocketOptionData) Getsockopt(fd uintptr) { - if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { - s.Linger = v - } - if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { - s.RecvTimeout = v - } - if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { - s.SendTimeout = v - } - if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { - s.TCPInfo = v - } -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go deleted file mode 100644 index 8b06eed1ab..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build !linux -// +build !linux - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "sync" -) - -var once sync.Once - -// SocketOptionData defines the struct to hold socket option data, and related -// getter function to obtain info from fd. -// Windows OS doesn't support Socket Option -type SocketOptionData struct { -} - -// Getsockopt defines the function to get socket options requested by channelz. -// It is to be passed to syscall.RawConn.Control(). -// Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { - once.Do(func() { - logger.Warning("Channelz: socket options are not supported on non-linux environments") - }) -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go deleted file mode 100644 index 98288c3f86..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "syscall" -) - -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket any) *SocketOptionData { - c, ok := socket.(syscall.Conn) - if !ok { - return nil - } - data := &SocketOptionData{} - if rawConn, err := c.SyscallConn(); err == nil { - rawConn.Control(data.Getsockopt) - return data - } - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go deleted file mode 100644 index b5568b22e2..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build !linux -// +build !linux - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go deleted file mode 100644 index 9deee7f651..0000000000 --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "context" -) - -// requestInfoKey is a struct to be used as the key to store RequestInfo in a -// context. -type requestInfoKey struct{} - -// NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri any) context.Context { - return context.WithValue(ctx, requestInfoKey{}, ri) -} - -// RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) any { - return ctx.Value(requestInfoKey{}) -} - -// clientHandshakeInfoKey is a struct used as the key to store -// ClientHandshakeInfo in a context. -type clientHandshakeInfoKey struct{} - -// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. -func ClientHandshakeInfoFromContext(ctx context.Context) any { - return ctx.Value(clientHandshakeInfoKey{}) -} - -// NewClientHandshakeInfoContext creates a context with chi. -func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { - return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) -} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go deleted file mode 100644 index 25ade62305..0000000000 --- a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package credentials defines APIs for parsing SPIFFE ID. -// -// All APIs in this package are experimental. -package credentials - -import ( - "crypto/tls" - "crypto/x509" - "net/url" - - "google.golang.org/grpc/grpclog" -) - -var logger = grpclog.Component("credentials") - -// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format -// is invalid, return nil with warning. -func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { - if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { - return nil - } - return SPIFFEIDFromCert(state.PeerCertificates[0]) -} - -// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE -// ID format is invalid, return nil with warning. -func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL { - if cert == nil || cert.URIs == nil { - return nil - } - var spiffeID *url.URL - for _, uri := range cert.URIs { - if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { - continue - } - // From this point, we assume the uri is intended for a SPIFFE ID. - if len(uri.String()) > 2048 { - logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") - return nil - } - if len(uri.Host) == 0 || len(uri.Path) == 0 { - logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") - return nil - } - if len(uri.Host) > 255 { - logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") - return nil - } - // A valid SPIFFE certificate can only have exactly one URI SAN field. - if len(cert.URIs) > 1 { - logger.Warning("invalid SPIFFE ID: multiple URI SANs") - return nil - } - spiffeID = uri - } - return spiffeID -} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go deleted file mode 100644 index 2919632d65..0000000000 --- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import ( - "net" - "syscall" -) - -type sysConn = syscall.Conn - -// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. -// SyscallConn() (the method in interface syscall.Conn) is explicitly -// implemented on this type, -// -// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. -// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns -// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn -// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't -// help here). -type syscallConn struct { - net.Conn - // sysConn is a type alias of syscall.Conn. It's necessary because the name - // `Conn` collides with `net.Conn`. - sysConn -} - -// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that -// implements syscall.Conn. rawConn will be used to support syscall, and newConn -// will be used for read/write. -// -// This function returns newConn if rawConn doesn't implement syscall.Conn. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - sysConn, ok := rawConn.(syscall.Conn) - if !ok { - return newConn - } - return &syscallConn{ - Conn: newConn, - sysConn: sysConn, - } -} diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go deleted file mode 100644 index f792fd22ca..0000000000 --- a/vendor/google.golang.org/grpc/internal/credentials/util.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import ( - "crypto/tls" -) - -const alpnProtoStrH2 = "h2" - -// AppendH2ToNextProtos appends h2 to next protos. -func AppendH2ToNextProtos(ps []string) []string { - for _, p := range ps { - if p == alpnProtoStrH2 { - return ps - } - } - ret := make([]string, 0, len(ps)+1) - ret = append(ret, ps...) - return append(ret, alpnProtoStrH2) -} - -// CloneTLSConfig returns a shallow clone of the exported -// fields of cfg, ignoring the unexported sync.Once, which -// contains a mutex and must not be copied. -// -// If cfg is nil, a new zero tls.Config is returned. -// -// TODO: inline this function if possible. -func CloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - - return cfg.Clone() -} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go deleted file mode 100644 index 685a3cb41b..0000000000 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package envconfig contains grpc settings configured by environment variables. -package envconfig - -import ( - "os" - "strconv" - "strings" -) - -var ( - // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). - TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) - // AdvertiseCompressors is set if registered compressor should be advertised - // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). - AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) - // RingHashCap indicates the maximum ring size which defaults to 4096 - // entries but may be overridden by setting the environment variable - // "GRPC_RING_HASH_CAP". This does not override the default bounds - // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). - RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // LeastRequestLB is set if we should support the least_request_experimental - // LB policy, which can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". - LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) - // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS - // handshakes that can be performed. - ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) -) - -func boolFromEnv(envVar string, def bool) bool { - if def { - // The default is true; return true unless the variable is "false". - return !strings.EqualFold(os.Getenv(envVar), "false") - } - // The default is false; return false unless the variable is "true". - return strings.EqualFold(os.Getenv(envVar), "true") -} - -func uint64FromEnv(envVar string, def, min, max uint64) uint64 { - v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) - if err != nil { - return def - } - if v < min { - return min - } - if v > max { - return max - } - return v -} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go deleted file mode 100644 index dd314cfb18..0000000000 --- a/vendor/google.golang.org/grpc/internal/envconfig/observability.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package envconfig - -import "os" - -const ( - envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" - envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" -) - -var ( - // ObservabilityConfig is the json configuration for the gcp/observability - // package specified directly in the envObservabilityConfig env var. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - ObservabilityConfig = os.Getenv(envObservabilityConfig) - // ObservabilityConfigFile is the json configuration for the - // gcp/observability specified in a file with the location specified in - // envObservabilityConfigFile env var. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) -) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go deleted file mode 100644 index 29f234acb1..0000000000 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package envconfig - -import ( - "os" -) - -const ( - // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. - // Do not use this and read from env directly. Its value is read and kept in - // variable XDSBootstrapFileName. - // - // When both bootstrap FileName and FileContent are set, FileName is used. - XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" - // XDSBootstrapFileContentEnv is the env variable to set bootstrap file - // content. Do not use this and read from env directly. Its value is read - // and kept in variable XDSBootstrapFileContent. - // - // When both bootstrap FileName and FileContent are set, FileName is used. - XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" -) - -var ( - // XDSBootstrapFileName holds the name of the file which contains xDS - // bootstrap configuration. Users can specify the location of the bootstrap - // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". - // - // When both bootstrap FileName and FileContent are set, FileName is used. - XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) - // XDSBootstrapFileContent holds the content of the xDS bootstrap - // configuration. Users can specify the bootstrap config by setting the - // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". - // - // When both bootstrap FileName and FileContent are set, FileName is used. - XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) - - // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. - C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") -) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go deleted file mode 100644 index 7f7044e173..0000000000 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial - // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - - // RecvBufferPool is implemented by the grpc package and returns a server - // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption -) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go deleted file mode 100644 index bfc45102ab..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ /dev/null @@ -1,126 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpclog (internal) defines depth logging for grpc. -package grpclog - -import ( - "os" -) - -// Logger is the logger used for the non-depth log functions. -var Logger LoggerV2 - -// DepthLogger is the logger used for the depth log functions. -var DepthLogger DepthLoggerV2 - -// InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.InfoDepth(depth, args...) - } else { - Logger.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.WarningDepth(depth, args...) - } else { - Logger.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.ErrorDepth(depth, args...) - } else { - Logger.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.FatalDepth(depth, args...) - } else { - Logger.Fatalln(args...) - } - os.Exit(1) -} - -// LoggerV2 does underlying logging work for grpclog. -// This is a copy of the LoggerV2 defined in the external grpclog package. It -// is defined here to avoid a circular dependency. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} - -// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements -// DepthLoggerV2, the below functions will be called with the appropriate stack -// depth set for trivial functions the logger may ignore. -// This is a copy of the DepthLoggerV2 defined in the external grpclog package. -// It is defined here to avoid a circular dependency. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type DepthLoggerV2 interface { - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go deleted file mode 100644 index faa998de76..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ /dev/null @@ -1,93 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclog - -import ( - "fmt" -) - -// PrefixLogger does logging with a prefix. -// -// Logging method on a nil logs without any prefix. -type PrefixLogger struct { - logger DepthLoggerV2 - prefix string -} - -// Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...any) { - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) -} - -// Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...any) { - if pl != nil { - format = pl.prefix + format - pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) - return - } - WarningDepth(1, fmt.Sprintf(format, args...)) -} - -// Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...any) { - if pl != nil { - format = pl.prefix + format - pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) - return - } - ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -// Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - if !Logger.V(2) { - return - } - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) - -} - -// V reports whether verbosity level l is at least the requested verbose level. -func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) -} - -// NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { - return &PrefixLogger{logger: logger, prefix: prefix} -} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go deleted file mode 100644 index 0126d6b510..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ /dev/null @@ -1,100 +0,0 @@ -//go:build !go1.21 - -// TODO: when this file is deleted (after Go 1.20 support is dropped), delete -// all of grpcrand and call the rand package directly. - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import ( - "math/rand" - "sync" - "time" -) - -var ( - r = rand.New(rand.NewSource(time.Now().UnixNano())) - mu sync.Mutex -) - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - mu.Lock() - defer mu.Unlock() - return r.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - mu.Lock() - defer mu.Unlock() - return r.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - mu.Lock() - defer mu.Unlock() - return r.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - mu.Lock() - defer mu.Unlock() - return r.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - mu.Lock() - defer mu.Unlock() - return r.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - mu.Lock() - defer mu.Unlock() - return r.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - mu.Lock() - defer mu.Unlock() - return r.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - mu.Lock() - defer mu.Unlock() - return r.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - mu.Lock() - defer mu.Unlock() - r.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go deleted file mode 100644 index c37299af1e..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build go1.21 - -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import "math/rand" - -// This implementation will be used for Go version 1.21 or newer. -// For older versions, the original implementation with mutex will be used. - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - return rand.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - return rand.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - return rand.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - return rand.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - return rand.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - return rand.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - return rand.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - return rand.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - rand.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go deleted file mode 100644 index f7f40a16ac..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ /dev/null @@ -1,100 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcsync - -import ( - "context" - - "google.golang.org/grpc/internal/buffer" -) - -// CallbackSerializer provides a mechanism to schedule callbacks in a -// synchronized manner. It provides a FIFO guarantee on the order of execution -// of scheduled callbacks. New callbacks can be scheduled by invoking the -// Schedule() method. -// -// This type is safe for concurrent access. -type CallbackSerializer struct { - // done is closed once the serializer is shut down completely, i.e all - // scheduled callbacks are executed and the serializer has deallocated all - // its resources. - done chan struct{} - - callbacks *buffer.Unbounded -} - -// NewCallbackSerializer returns a new CallbackSerializer instance. The provided -// context will be passed to the scheduled callbacks. Users should cancel the -// provided context to shutdown the CallbackSerializer. It is guaranteed that no -// callbacks will be added once this context is canceled, and any pending un-run -// callbacks will be executed before the serializer is shut down. -func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - cs := &CallbackSerializer{ - done: make(chan struct{}), - callbacks: buffer.NewUnbounded(), - } - go cs.run(ctx) - return cs -} - -// Schedule adds a callback to be scheduled after existing callbacks are run. -// -// Callbacks are expected to honor the context when performing any blocking -// operations, and should return early when the context is canceled. -// -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - return cs.callbacks.Put(f) == nil -} - -func (cs *CallbackSerializer) run(ctx context.Context) { - defer close(cs.done) - - // TODO: when Go 1.21 is the oldest supported version, this loop and Close - // can be replaced with: - // - // context.AfterFunc(ctx, cs.callbacks.Close) - for ctx.Err() == nil { - select { - case <-ctx.Done(): - // Do nothing here. Next iteration of the for loop will not happen, - // since ctx.Err() would be non-nil. - case cb := <-cs.callbacks.Get(): - cs.callbacks.Load() - cb.(func(context.Context))(ctx) - } - } - - // Close the buffer to prevent new callbacks from being added. - cs.callbacks.Close() - - // Run all pending callbacks. - for cb := range cs.callbacks.Get() { - cs.callbacks.Load() - cb.(func(context.Context))(ctx) - } -} - -// Done returns a channel that is closed after the context passed to -// NewCallbackSerializer is canceled and all callbacks have been executed. -func (cs *CallbackSerializer) Done() <-chan struct{} { - return cs.done -} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go deleted file mode 100644 index fbe697c376..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcsync/event.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcsync implements additional synchronization primitives built upon -// the sync package. -package grpcsync - -import ( - "sync" - "sync/atomic" -) - -// Event represents a one-time event that may occur in the future. -type Event struct { - fired int32 - c chan struct{} - o sync.Once -} - -// Fire causes e to complete. It is safe to call multiple times, and -// concurrently. It returns true iff this call to Fire caused the signaling -// channel returned by Done to close. -func (e *Event) Fire() bool { - ret := false - e.o.Do(func() { - atomic.StoreInt32(&e.fired, 1) - close(e.c) - ret = true - }) - return ret -} - -// Done returns a channel that will be closed when Fire is called. -func (e *Event) Done() <-chan struct{} { - return e.c -} - -// HasFired returns true if Fire has been called. -func (e *Event) HasFired() bool { - return atomic.LoadInt32(&e.fired) == 1 -} - -// NewEvent returns a new, ready-to-use Event. -func NewEvent() *Event { - return &Event{c: make(chan struct{})} -} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go deleted file mode 100644 index 6635f7bca9..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcsync - -import ( - "sync" -) - -// OnceFunc returns a function wrapping f which ensures f is only executed -// once even if the returned function is executed multiple times. -func OnceFunc(f func()) func() { - var once sync.Once - return func() { - once.Do(f) - } -} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go deleted file mode 100644 index aef8cec1ab..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ /dev/null @@ -1,121 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcsync - -import ( - "context" - "sync" -) - -// Subscriber represents an entity that is subscribed to messages published on -// a PubSub. It wraps the callback to be invoked by the PubSub when a new -// message is published. -type Subscriber interface { - // OnMessage is invoked when a new message is published. Implementations - // must not block in this method. - OnMessage(msg any) -} - -// PubSub is a simple one-to-many publish-subscribe system that supports -// messages of arbitrary type. It guarantees that messages are delivered in -// the same order in which they were published. -// -// Publisher invokes the Publish() method to publish new messages, while -// subscribers interested in receiving these messages register a callback -// via the Subscribe() method. -// -// Once a PubSub is stopped, no more messages can be published, but any pending -// published messages will be delivered to the subscribers. Done may be used -// to determine when all published messages have been delivered. -type PubSub struct { - cs *CallbackSerializer - - // Access to the below fields are guarded by this mutex. - mu sync.Mutex - msg any - subscribers map[Subscriber]bool -} - -// NewPubSub returns a new PubSub instance. Users should cancel the -// provided context to shutdown the PubSub. -func NewPubSub(ctx context.Context) *PubSub { - return &PubSub{ - cs: NewCallbackSerializer(ctx), - subscribers: map[Subscriber]bool{}, - } -} - -// Subscribe registers the provided Subscriber to the PubSub. -// -// If the PubSub contains a previously published message, the Subscriber's -// OnMessage() callback will be invoked asynchronously with the existing -// message to begin with, and subsequently for every newly published message. -// -// The caller is responsible for invoking the returned cancel function to -// unsubscribe itself from the PubSub. -func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { - ps.mu.Lock() - defer ps.mu.Unlock() - - ps.subscribers[sub] = true - - if ps.msg != nil { - msg := ps.msg - ps.cs.Schedule(func(context.Context) { - ps.mu.Lock() - defer ps.mu.Unlock() - if !ps.subscribers[sub] { - return - } - sub.OnMessage(msg) - }) - } - - return func() { - ps.mu.Lock() - defer ps.mu.Unlock() - delete(ps.subscribers, sub) - } -} - -// Publish publishes the provided message to the PubSub, and invokes -// callbacks registered by subscribers asynchronously. -func (ps *PubSub) Publish(msg any) { - ps.mu.Lock() - defer ps.mu.Unlock() - - ps.msg = msg - for sub := range ps.subscribers { - s := sub - ps.cs.Schedule(func(context.Context) { - ps.mu.Lock() - defer ps.mu.Unlock() - if !ps.subscribers[s] { - return - } - s.OnMessage(msg) - }) - } -} - -// Done returns a channel that is closed after the context passed to NewPubSub -// is canceled and all updates have been sent to subscribers. -func (ps *PubSub) Done() <-chan struct{} { - return ps.cs.Done() -} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go deleted file mode 100644 index 9f40909679..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcutil - -import ( - "strings" - - "google.golang.org/grpc/internal/envconfig" -) - -// RegisteredCompressorNames holds names of the registered compressors. -var RegisteredCompressorNames []string - -// IsCompressorNameRegistered returns true when name is available in registry. -func IsCompressorNameRegistered(name string) bool { - for _, compressor := range RegisteredCompressorNames { - if compressor == name { - return true - } - } - return false -} - -// RegisteredCompressors returns a string of registered compressor names -// separated by comma. -func RegisteredCompressors() string { - if !envconfig.AdvertiseCompressors { - return "" - } - return strings.Join(RegisteredCompressorNames, ",") -} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go deleted file mode 100644 index b25b0baec3..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcutil - -import ( - "strconv" - "time" -) - -const maxTimeoutValue int64 = 100000000 - 1 - -// div does integer division and round-up the result. Note that this is -// equivalent to (d+r-1)/r but has less chance to overflow. -func div(d, r time.Duration) int64 { - if d%r > 0 { - return int64(d/r + 1) - } - return int64(d / r) -} - -// EncodeDuration encodes the duration to the format grpc-timeout header -// accepts. -// -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests -func EncodeDuration(t time.Duration) string { - // TODO: This is simplistic and not bandwidth efficient. Improve it. - if t <= 0 { - return "0n" - } - if d := div(t, time.Nanosecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "n" - } - if d := div(t, time.Microsecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "u" - } - if d := div(t, time.Millisecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "m" - } - if d := div(t, time.Second); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "S" - } - if d := div(t, time.Minute); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "M" - } - // Note that maxTimeoutValue * time.Hour > MaxInt64. - return strconv.FormatInt(div(t, time.Hour), 10) + "H" -} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go deleted file mode 100644 index e2f948e8f4..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcutil provides utility functions used across the gRPC codebase. -package grpcutil diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go deleted file mode 100644 index 6f22bd8911..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcutil - -import ( - "context" - - "google.golang.org/grpc/metadata" -) - -type mdExtraKey struct{} - -// WithExtraMetadata creates a new context with incoming md attached. -func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { - return context.WithValue(ctx, mdExtraKey{}, md) -} - -// ExtraMetadata returns the incoming metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. -func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { - md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) - return -} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go deleted file mode 100644 index ec62b4775e..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcutil - -import ( - "errors" - "strings" -) - -// ParseMethod splits service and method from the input. It expects format -// "/service/method". -func ParseMethod(methodName string) (service, method string, _ error) { - if !strings.HasPrefix(methodName, "/") { - return "", "", errors.New("invalid method name: should start with /") - } - methodName = methodName[1:] - - pos := strings.LastIndex(methodName, "/") - if pos < 0 { - return "", "", errors.New("invalid method name: suffix /method is missing") - } - return methodName[:pos], methodName[pos+1:], nil -} - -// baseContentType is the base content-type for gRPC. This is a valid -// content-type on it's own, but can also include a content-subtype such as -// "proto" as a suffix after "+" or ";". See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests -// for more details. -const baseContentType = "application/grpc" - -// ContentSubtype returns the content-subtype for the given content-type. The -// given content-type must be a valid content-type that starts with -// "application/grpc". A content-subtype will follow "application/grpc" after a -// "+" or ";". See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -// -// If contentType is not a valid content-type for gRPC, the boolean -// will be false, otherwise true. If content-type == "application/grpc", -// "application/grpc+", or "application/grpc;", the boolean will be true, -// but no content-subtype will be returned. -// -// contentType is assumed to be lowercase already. -func ContentSubtype(contentType string) (string, bool) { - if contentType == baseContentType { - return "", true - } - if !strings.HasPrefix(contentType, baseContentType) { - return "", false - } - // guaranteed since != baseContentType and has baseContentType prefix - switch contentType[len(baseContentType)] { - case '+', ';': - // this will return true for "application/grpc+" or "application/grpc;" - // which the previous validContentType function tested to be valid, so we - // just say that no content-subtype is specified in this case - return contentType[len(baseContentType)+1:], true - default: - return "", false - } -} - -// ContentType builds full content type with the given sub-type. -// -// contentSubtype is assumed to be lowercase -func ContentType(contentSubtype string) string { - if contentSubtype == "" { - return baseContentType - } - return baseContentType + "+" + contentSubtype -} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go deleted file mode 100644 index 7a092b2b80..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcutil - -import "regexp" - -// FullMatchWithRegex returns whether the full text matches the regex provided. -func FullMatchWithRegex(re *regexp.Regexp, text string) bool { - if len(text) == 0 { - return re.MatchString(text) - } - re.Longest() - rem := re.FindString(text) - return len(rem) == len(text) -} diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go deleted file mode 100644 index fe49cb74c5..0000000000 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ /dev/null @@ -1,278 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package idle contains a component for managing idleness (entering and exiting) -// based on RPC activity. -package idle - -import ( - "fmt" - "math" - "sync" - "sync/atomic" - "time" -) - -// For overriding in unit tests. -var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { - return time.AfterFunc(d, f) -} - -// Enforcer is the functionality provided by grpc.ClientConn to enter -// and exit from idle mode. -type Enforcer interface { - ExitIdleMode() error - EnterIdleMode() -} - -// Manager implements idleness detection and calls the configured Enforcer to -// enter/exit idle mode when appropriate. Must be created by NewManager. -type Manager struct { - // State accessed atomically. - lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. - activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. - activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. - closed int32 // Boolean; True when the manager is closed. - - // Can be accessed without atomics or mutex since these are set at creation - // time and read-only after that. - enforcer Enforcer // Functionality provided by grpc.ClientConn. - timeout time.Duration - - // idleMu is used to guarantee mutual exclusion in two scenarios: - // - Opposing intentions: - // - a: Idle timeout has fired and handleIdleTimeout() is trying to put - // the channel in idle mode because the channel has been inactive. - // - b: At the same time an RPC is made on the channel, and OnCallBegin() - // is trying to prevent the channel from going idle. - // - Competing intentions: - // - The channel is in idle mode and there are multiple RPCs starting at - // the same time, all trying to move the channel out of idle. Only one - // of them should succeed in doing so, while the other RPCs should - // piggyback on the first one and be successfully handled. - idleMu sync.RWMutex - actuallyIdle bool - timer *time.Timer -} - -// NewManager creates a new idleness manager implementation for the -// given idle timeout. It begins in idle mode. -func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { - return &Manager{ - enforcer: enforcer, - timeout: timeout, - actuallyIdle: true, - activeCallsCount: -math.MaxInt32, - } -} - -// resetIdleTimerLocked resets the idle timer to the given duration. Called -// when exiting idle mode or when the timer fires and we need to reset it. -func (m *Manager) resetIdleTimerLocked(d time.Duration) { - if m.isClosed() || m.timeout == 0 || m.actuallyIdle { - return - } - - // It is safe to ignore the return value from Reset() because this method is - // only ever called from the timer callback or when exiting idle mode. - if m.timer != nil { - m.timer.Stop() - } - m.timer = timeAfterFunc(d, m.handleIdleTimeout) -} - -func (m *Manager) resetIdleTimer(d time.Duration) { - m.idleMu.Lock() - defer m.idleMu.Unlock() - m.resetIdleTimerLocked(d) -} - -// handleIdleTimeout is the timer callback that is invoked upon expiry of the -// configured idle timeout. The channel is considered inactive if there are no -// ongoing calls and no RPC activity since the last time the timer fired. -func (m *Manager) handleIdleTimeout() { - if m.isClosed() { - return - } - - if atomic.LoadInt32(&m.activeCallsCount) > 0 { - m.resetIdleTimer(m.timeout) - return - } - - // There has been activity on the channel since we last got here. Reset the - // timer and return. - if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { - // Set the timer to fire after a duration of idle timeout, calculated - // from the time the most recent RPC completed. - atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) - m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) - return - } - - // Now that we've checked that there has been no activity, attempt to enter - // idle mode, which is very likely to succeed. - if m.tryEnterIdleMode() { - // Successfully entered idle mode. No timer needed until we exit idle. - return - } - - // Failed to enter idle mode due to a concurrent RPC that kept the channel - // active, or because of an error from the channel. Undo the attempt to - // enter idle, and reset the timer to try again later. - m.resetIdleTimer(m.timeout) -} - -// tryEnterIdleMode instructs the channel to enter idle mode. But before -// that, it performs a last minute check to ensure that no new RPC has come in, -// making the channel active. -// -// Return value indicates whether or not the channel moved to idle mode. -// -// Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (m *Manager) tryEnterIdleMode() bool { - // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() - // that the channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { - // This CAS operation can fail if an RPC started after we checked for - // activity in the timer handler, or one was ongoing from before the - // last time the timer fired, or if a test is attempting to enter idle - // mode without checking. In all cases, abort going into idle mode. - return false - } - // N.B. if we fail to enter idle mode after this, we must re-add - // math.MaxInt32 to m.activeCallsCount. - - m.idleMu.Lock() - defer m.idleMu.Unlock() - - if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { - // We raced and lost to a new RPC. Very rare, but stop entering idle. - atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) - return false - } - if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { - // A very short RPC could have come in (and also finished) after we - // checked for calls count and activity in handleIdleTimeout(), but - // before the CAS operation. So, we need to check for activity again. - atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) - return false - } - - // No new RPCs have come in since we set the active calls count value to - // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode - // unconditionally now. - m.enforcer.EnterIdleMode() - m.actuallyIdle = true - return true -} - -func (m *Manager) EnterIdleModeForTesting() { - m.tryEnterIdleMode() -} - -// OnCallBegin is invoked at the start of every RPC. -func (m *Manager) OnCallBegin() error { - if m.isClosed() { - return nil - } - - if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { - // Channel is not idle now. Set the activity bit and allow the call. - atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) - return nil - } - - // Channel is either in idle mode or is in the process of moving to idle - // mode. Attempt to exit idle mode to allow this RPC. - if err := m.ExitIdleMode(); err != nil { - // Undo the increment to calls count, and return an error causing the - // RPC to fail. - atomic.AddInt32(&m.activeCallsCount, -1) - return err - } - - atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) - return nil -} - -// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's -// internal state. -func (m *Manager) ExitIdleMode() error { - // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. - m.idleMu.Lock() - defer m.idleMu.Unlock() - - if m.isClosed() || !m.actuallyIdle { - // This can happen in three scenarios: - // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called - // tryEnterIdleMode(). But before the latter could grab the lock, an RPC - // came in and OnCallBegin() noticed that the calls count is negative. - // - Channel is in idle mode, and multiple new RPCs come in at the same - // time, all of them notice a negative calls count in OnCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. - // - Channel is not in idle mode, and the user calls Connect which calls - // m.ExitIdleMode. - // - // In any case, there is nothing to do here. - return nil - } - - if err := m.enforcer.ExitIdleMode(); err != nil { - return fmt.Errorf("failed to exit idle mode: %w", err) - } - - // Undo the idle entry process. This also respects any new RPC attempts. - atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) - m.actuallyIdle = false - - // Start a new timer to fire after the configured idle timeout. - m.resetIdleTimerLocked(m.timeout) - return nil -} - -// OnCallEnd is invoked at the end of every RPC. -func (m *Manager) OnCallEnd() { - if m.isClosed() { - return - } - - // Record the time at which the most recent call finished. - atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) - - // Decrement the active calls count. This count can temporarily go negative - // when the timer callback is in the process of moving the channel to idle - // mode, but one or more RPCs come in and complete before the timer callback - // can get done with the process of moving to idle mode. - atomic.AddInt32(&m.activeCallsCount, -1) -} - -func (m *Manager) isClosed() bool { - return atomic.LoadInt32(&m.closed) == 1 -} - -func (m *Manager) Close() { - atomic.StoreInt32(&m.closed, 1) - - m.idleMu.Lock() - if m.timer != nil { - m.timer.Stop() - m.timer = nil - } - m.idleMu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go deleted file mode 100644 index 6c7ea6a533..0000000000 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package internal contains gRPC-internal code, to avoid polluting -// the godoc of the top-level grpc package. It must not import any grpc -// symbols to avoid circular dependencies. -package internal - -import ( - "context" - "time" - - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/serviceconfig" -) - -var ( - // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc any // func (HealthChecker) DialOption - // HealthCheckFunc is used to provide client-side LB channel health checking - HealthCheckFunc HealthChecker - // BalancerUnregister is exported by package balancer to unregister a balancer. - BalancerUnregister func(name string) - // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by - // default, but tests may wish to set it lower for convenience. - KeepaliveMinPingTime = 10 * time.Second - // KeepaliveMinServerPingTime is the minimum ping interval for servers. - // This must be 1s by default, but tests may wish to set it lower for - // convenience. - KeepaliveMinServerPingTime = time.Second - // ParseServiceConfig parses a JSON representation of the service config. - ParseServiceConfig any // func(string) *serviceconfig.ParseResult - // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfig. - // This function compares the config without rawJSON stripped, in case the - // there's difference in white space. - EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool - // GetCertificateProviderBuilder returns the registered builder for the - // given name. This is set by package certprovider for use from xDS - // bootstrap code while parsing certificate provider configs in the - // bootstrap file. - GetCertificateProviderBuilder any // func(string) certprovider.Builder - // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo - // stored in the passed in attributes. This is set by - // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer - // GetServerCredentials returns the transport credentials configured on a - // gRPC server. An xDS-enabled server needs to know what type of credentials - // is configured on the underlying gRPC server. This is set by server.go. - GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials - // CanonicalString returns the canonical string of the code defined here: - // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - CanonicalString any // func (codes.Code) string - // IsRegisteredMethod returns whether the passed in method is registered as - // a method on the server. - IsRegisteredMethod any // func(*grpc.Server, string) bool - // ServerFromContext returns the server from the context. - ServerFromContext any // func(context.Context) *grpc.Server - // AddGlobalServerOptions adds an array of ServerOption that will be - // effective globally for newly created servers. The priority will be: 1. - // user-provided; 2. this method; 3. default values. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - AddGlobalServerOptions any // func(opt ...ServerOption) - // ClearGlobalServerOptions clears the array of extra ServerOption. This - // method is useful in testing and benchmarking. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - ClearGlobalServerOptions func() - // AddGlobalDialOptions adds an array of DialOption that will be effective - // globally for newly created client channels. The priority will be: 1. - // user-provided; 2. this method; 3. default values. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - AddGlobalDialOptions any // func(opt ...DialOption) - // DisableGlobalDialOptions returns a DialOption that prevents the - // ClientConn from applying the global DialOptions (set via - // AddGlobalDialOptions). - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - DisableGlobalDialOptions any // func() grpc.DialOption - // ClearGlobalDialOptions clears the array of extra DialOption. This - // method is useful in testing and benchmarking. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - ClearGlobalDialOptions func() - // JoinDialOptions combines the dial options passed as arguments into a - // single dial option. - JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption - // JoinServerOptions combines the server options passed as arguments into a - // single server option. - JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption - - // WithBinaryLogger returns a DialOption that specifies the binary logger - // for a ClientConn. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption - // BinaryLogger returns a ServerOption that can set the binary logger for a - // server. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - BinaryLogger any // func(binarylog.Logger) grpc.ServerOption - - // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn - SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) - - // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using - // the provided xds bootstrap config instead of the global configuration from - // the supported environment variables. The resolver.Builder is meant to be - // used in conjunction with the grpc.WithResolvers DialOption. - // - // Testing Only - // - // This function should ONLY be used for testing and may not work with some - // other features, including the CSDS service. - NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) - - // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster - // Specifier Plugin for testing purposes, regardless of the XDSRLS environment - // variable. - // - // TODO: Remove this function once the RLS env var is removed. - RegisterRLSClusterSpecifierPluginForTesting func() - - // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster - // Specifier Plugin for testing purposes. This is needed because there is no way - // to unregister the RLS Cluster Specifier Plugin after registering it solely - // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). - // - // TODO: Remove this function once the RLS env var is removed. - UnregisterRLSClusterSpecifierPluginForTesting func() - - // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing - // purposes, regardless of the RBAC environment variable. - // - // TODO: Remove this function once the RBAC env var is removed. - RegisterRBACHTTPFilterForTesting func() - - // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for - // testing purposes. This is needed because there is no way to unregister the - // HTTP Filter after registering it solely for testing purposes using - // RegisterRBACHTTPFilterForTesting(). - // - // TODO: Remove this function once the RBAC env var is removed. - UnregisterRBACHTTPFilterForTesting func() - - // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. - ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) - - // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra - // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" - - // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. - EnterIdleModeForTesting any // func(*grpc.ClientConn) - - // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. - ExitIdleModeForTesting any // func(*grpc.ClientConn) error - - ChannelzTurnOffForTesting func() - - // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found - // error for a given resource type and name. This is usually triggered when - // the associated watch timer fires. For testing purposes, having this - // function makes events more predictable than relying on timer events. - TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error - - // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton - // to invoke resource not found for a resource type name and resource name. - TriggerXDSResourceNameNotFoundClient any // func(string, string) error - - // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. - FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) -) - -// HealthChecker defines the signature of the client-side LB channel health checking function. -// -// The implementation is expected to create a health checking RPC stream by -// calling newStream(), watch for the health status of serviceName, and report -// it's health back by calling setConnectivityState(). -// -// The health checking protocol is defined at: -// https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error - -const ( - // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. - CredsBundleModeFallback = "fallback" - // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer - // mode. - CredsBundleModeBalancer = "balancer" - // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode - // that supports backend returned by grpclb balancer. - CredsBundleModeBackendFromBalancer = "backend-from-balancer" -) - -// RLSLoadBalancingPolicyName is the name of the RLS LB policy. -// -// It currently has an experimental suffix which would be removed once -// end-to-end testing of the policy is completed. -const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go deleted file mode 100644 index 900bfb7160..0000000000 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ /dev/null @@ -1,132 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package metadata contains functions to set and get metadata from addresses. -// -// This package is experimental. -package metadata - -import ( - "fmt" - "strings" - - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/resolver" -) - -type mdKeyType string - -const mdKey = mdKeyType("grpc.internal.address.metadata") - -type mdValue metadata.MD - -func (m mdValue) Equal(o any) bool { - om, ok := o.(mdValue) - if !ok { - return false - } - if len(m) != len(om) { - return false - } - for k, v := range m { - ov := om[k] - if len(ov) != len(v) { - return false - } - for i, ve := range v { - if ov[i] != ve { - return false - } - } - } - return true -} - -// Get returns the metadata of addr. -func Get(addr resolver.Address) metadata.MD { - attrs := addr.Attributes - if attrs == nil { - return nil - } - md, _ := attrs.Value(mdKey).(mdValue) - return metadata.MD(md) -} - -// Set sets (overrides) the metadata in addr. -// -// When a SubConn is created with this address, the RPCs sent on it will all -// have this metadata. -func Set(addr resolver.Address, md metadata.MD) resolver.Address { - addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) - return addr -} - -// Validate validates every pair in md with ValidatePair. -func Validate(md metadata.MD) error { - for k, vals := range md { - if err := ValidatePair(k, vals...); err != nil { - return err - } - } - return nil -} - -// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E -func hasNotPrintable(msg string) bool { - // for i that saving a conversion if not using for range - for i := 0; i < len(msg); i++ { - if msg[i] < 0x20 || msg[i] > 0x7E { - return true - } - } - return false -} - -// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : -// -// - key must contain one or more characters. -// - the characters in the key must be contained in [0-9 a-z _ - .]. -// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. -// - the characters in the every value must be printable (in [%x20-%x7E]). -func ValidatePair(key string, vals ...string) error { - // key should not be empty - if key == "" { - return fmt.Errorf("there is an empty key in the header") - } - // pseudo-header will be ignored - if key[0] == ':' { - return nil - } - // check key, for i that saving a conversion if not using for range - for i := 0; i < len(key); i++ { - r := key[i] - if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { - return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) - } - } - if strings.HasSuffix(key, "-bin") { - return nil - } - // check value - for _, val := range vals { - if hasNotPrintable(val) { - return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) - } - } - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go deleted file mode 100644 index 52cfab1b93..0000000000 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package pretty defines helper functions to pretty-print structs for logging. -package pretty - -import ( - "bytes" - "encoding/json" - "fmt" - - protov1 "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/encoding/protojson" - protov2 "google.golang.org/protobuf/proto" -) - -const jsonIndent = " " - -// ToJSON marshals the input into a json string. -// -// If marshal fails, it falls back to fmt.Sprintf("%+v"). -func ToJSON(e any) string { - switch ee := e.(type) { - case protov1.Message: - mm := protojson.MarshalOptions{Indent: jsonIndent} - ret, err := mm.Marshal(protov1.MessageV2(ee)) - if err != nil { - // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 - // messages are not imported, and this will fail because the message - // is not found. - return fmt.Sprintf("%+v", ee) - } - return string(ret) - case protov2.Message: - mm := protojson.MarshalOptions{ - Multiline: true, - Indent: jsonIndent, - } - ret, err := mm.Marshal(ee) - if err != nil { - // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 - // messages are not imported, and this will fail because the message - // is not found. - return fmt.Sprintf("%+v", ee) - } - return string(ret) - default: - ret, err := json.MarshalIndent(ee, "", jsonIndent) - if err != nil { - return fmt.Sprintf("%+v", ee) - } - return string(ret) - } -} - -// FormatJSON formats the input json bytes with indentation. -// -// If Indent fails, it returns the unchanged input as string. -func FormatJSON(b []byte) string { - var out bytes.Buffer - err := json.Indent(&out, b, "", jsonIndent) - if err != nil { - return string(b) - } - return out.String() -} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go deleted file mode 100644 index f0603871c9..0000000000 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ /dev/null @@ -1,167 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package resolver provides internal resolver-related functionality. -package resolver - -import ( - "context" - "sync" - - "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/resolver" -) - -// ConfigSelector controls what configuration to use for every RPC. -type ConfigSelector interface { - // Selects the configuration for the RPC, or terminates it using the error. - // This error will be converted by the gRPC library to a status error with - // code UNKNOWN if it is not returned as a status error. - SelectConfig(RPCInfo) (*RPCConfig, error) -} - -// RPCInfo contains RPC information needed by a ConfigSelector. -type RPCInfo struct { - // Context is the user's context for the RPC and contains headers and - // application timeout. It is passed for interception purposes and for - // efficiency reasons. SelectConfig should not be blocking. - Context context.Context - Method string // i.e. "/Service/Method" -} - -// RPCConfig describes the configuration to use for each RPC. -type RPCConfig struct { - // The context to use for the remainder of the RPC; can pass info to LB - // policy or affect timeout or metadata. - Context context.Context - MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC - OnCommitted func() // Called when the RPC has been committed (retries no longer possible) - Interceptor ClientInterceptor -} - -// ClientStream is the same as grpc.ClientStream, but defined here for circular -// dependency reasons. -type ClientStream interface { - // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. - Header() (metadata.MD, error) - // Trailer returns the trailer metadata from the server, if there is any. - // It must only be called after stream.CloseAndRecv has returned, or - // stream.Recv has returned a non-nil error (including io.EOF). - Trailer() metadata.MD - // CloseSend closes the send direction of the stream. It closes the stream - // when non-nil error is met. It is also not safe to call CloseSend - // concurrently with SendMsg. - CloseSend() error - // Context returns the context for this stream. - // - // It should not be called until after Header or RecvMsg has returned. Once - // called, subsequent client-side retries are disabled. - Context() context.Context - // SendMsg is generally called by generated code. On error, SendMsg aborts - // the stream. If the error was generated by the client, the status is - // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. - // - // SendMsg blocks until: - // - There is sufficient flow control to schedule m with the transport, or - // - The stream is done, or - // - The stream breaks. - // - // SendMsg does not wait until the message is received by the server. An - // untimely stream closure may result in lost messages. To ensure delivery, - // users should ensure the RPC completed successfully using RecvMsg. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not safe - // to call SendMsg on the same stream in different goroutines. It is also - // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m any) error - // RecvMsg blocks until it receives a message into m or the stream is - // done. It returns io.EOF when the stream completes successfully. On - // any other error, the stream is aborted and the error contains the RPC - // status. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not - // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m any) error -} - -// ClientInterceptor is an interceptor for gRPC client streams. -type ClientInterceptor interface { - // NewStream produces a ClientStream for an RPC which may optionally use - // the provided function to produce a stream for delegation. Note: - // RPCInfo.Context should not be used (will be nil). - // - // done is invoked when the RPC is finished using its connection, or could - // not be assigned a connection. RPC operations may still occur on - // ClientStream after done is called, since the interceptor is invoked by - // application-layer operations. done must never be nil when called. - NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) -} - -// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. -type ServerInterceptor interface { - // AllowRPC checks if an incoming RPC is allowed to proceed based on - // information about connection RPC was received on, and HTTP Headers. This - // information will be piped into context. - AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. -} - -type csKeyType string - -const csKey = csKeyType("grpc.internal.resolver.configSelector") - -// SetConfigSelector sets the config selector in state and returns the new -// state. -func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { - state.Attributes = state.Attributes.WithValue(csKey, cs) - return state -} - -// GetConfigSelector retrieves the config selector from state, if present, and -// returns it or nil if absent. -func GetConfigSelector(state resolver.State) ConfigSelector { - cs, _ := state.Attributes.Value(csKey).(ConfigSelector) - return cs -} - -// SafeConfigSelector allows for safe switching of ConfigSelector -// implementations such that previous values are guaranteed to not be in use -// when UpdateConfigSelector returns. -type SafeConfigSelector struct { - mu sync.RWMutex - cs ConfigSelector -} - -// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until -// all uses of the previous ConfigSelector have completed. -func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { - scs.mu.Lock() - defer scs.mu.Unlock() - scs.cs = cs -} - -// SelectConfig defers to the current ConfigSelector in scs. -func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { - scs.mu.RLock() - defer scs.mu.RUnlock() - return scs.cs.SelectConfig(r) -} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go deleted file mode 100644 index b66dcb2132..0000000000 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ /dev/null @@ -1,441 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package dns implements a dns resolver to be installed as the default resolver -// in grpc. -package dns - -import ( - "context" - "encoding/json" - "fmt" - "net" - "os" - "strconv" - "strings" - "sync" - "time" - - grpclbstate "google.golang.org/grpc/balancer/grpclb/state" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/resolver/dns/internal" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB -// addresses from SRV records. Must not be changed after init time. -var EnableSRVLookups = false - -var logger = grpclog.Component("dns") - -func init() { - resolver.Register(NewBuilder()) - internal.TimeAfterFunc = time.After - internal.NewNetResolver = newNetResolver - internal.AddressDialer = addressDialer -} - -const ( - defaultPort = "443" - defaultDNSSvrPort = "53" - golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt - // record lookup. - txtPrefix = "_grpc_config." - // In DNS, service config is encoded in a TXT record via the mechanism - // described in RFC-1464 using the attribute name grpc_config. - txtAttribute = "grpc_config=" -) - -var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { - return func(ctx context.Context, network, _ string) (net.Conn, error) { - var dialer net.Dialer - return dialer.DialContext(ctx, network, address) - } -} - -var newNetResolver = func(authority string) (internal.NetResolver, error) { - if authority == "" { - return net.DefaultResolver, nil - } - - host, port, err := parseTarget(authority, defaultDNSSvrPort) - if err != nil { - return nil, err - } - - authorityWithPort := net.JoinHostPort(host, port) - - return &net.Resolver{ - PreferGo: true, - Dial: internal.AddressDialer(authorityWithPort), - }, nil -} - -// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. -func NewBuilder() resolver.Builder { - return &dnsBuilder{} -} - -type dnsBuilder struct{} - -// Build creates and starts a DNS resolver that watches the name resolution of -// the target. -func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint(), defaultPort) - if err != nil { - return nil, err - } - - // IP address. - if ipAddr, ok := formatIP(host); ok { - addr := []resolver.Address{{Addr: ipAddr + ":" + port}} - cc.UpdateState(resolver.State{Addresses: addr}) - return deadResolver{}, nil - } - - // DNS address (non-IP). - ctx, cancel := context.WithCancel(context.Background()) - d := &dnsResolver{ - host: host, - port: port, - ctx: ctx, - cancel: cancel, - cc: cc, - rn: make(chan struct{}, 1), - disableServiceConfig: opts.DisableServiceConfig, - } - - d.resolver, err = internal.NewNetResolver(target.URL.Host) - if err != nil { - return nil, err - } - - d.wg.Add(1) - go d.watcher() - return d, nil -} - -// Scheme returns the naming scheme of this resolver builder, which is "dns". -func (b *dnsBuilder) Scheme() string { - return "dns" -} - -// deadResolver is a resolver that does nothing. -type deadResolver struct{} - -func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} - -func (deadResolver) Close() {} - -// dnsResolver watches for the name resolution update for a non-IP target. -type dnsResolver struct { - host string - port string - resolver internal.NetResolver - ctx context.Context - cancel context.CancelFunc - cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the - // target. - rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has - // finished. Otherwise, data race will be possible. [Race Example] in - // dns_resolver_test we replace the real lookup functions with mocked ones to - // facilitate testing. If Close() doesn't wait for watcher() goroutine - // finishes, race detector sometimes will warns lookup (READ the lookup - // function pointers) inside watcher() goroutine has data race with - // replaceNetFunc (WRITE the lookup function pointers). - wg sync.WaitGroup - disableServiceConfig bool -} - -// ResolveNow invoke an immediate resolution of the target that this -// dnsResolver watches. -func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { - select { - case d.rn <- struct{}{}: - default: - } -} - -// Close closes the dnsResolver. -func (d *dnsResolver) Close() { - d.cancel() - d.wg.Wait() -} - -func (d *dnsResolver) watcher() { - defer d.wg.Done() - backoffIndex := 1 - for { - state, err := d.lookup() - if err != nil { - // Report error to the underlying grpc.ClientConn. - d.cc.ReportError(err) - } else { - err = d.cc.UpdateState(*state) - } - - var waitTime time.Duration - if err == nil { - // Success resolving, wait for the next ResolveNow. However, also wait 30 - // seconds at the very least to prevent constantly re-resolving. - backoffIndex = 1 - waitTime = internal.MinResolutionRate - select { - case <-d.ctx.Done(): - return - case <-d.rn: - } - } else { - // Poll on an error found in DNS Resolver or an error received from - // ClientConn. - waitTime = backoff.DefaultExponential.Backoff(backoffIndex) - backoffIndex++ - } - select { - case <-d.ctx.Done(): - return - case <-internal.TimeAfterFunc(waitTime): - } - } -} - -func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { - if !EnableSRVLookups { - return nil, nil - } - var newAddrs []resolver.Address - _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) - if err != nil { - err = handleDNSError(err, "SRV") // may become nil - return nil, err - } - for _, s := range srvs { - lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) - if err != nil { - err = handleDNSError(err, "A") // may become nil - if err == nil { - // If there are other SRV records, look them up and ignore this - // one that does not exist. - continue - } - return nil, err - } - for _, a := range lbAddrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) - } - addr := ip + ":" + strconv.Itoa(int(s.Port)) - newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) - } - } - return newAddrs, nil -} - -func handleDNSError(err error, lookupType string) error { - dnsErr, ok := err.(*net.DNSError) - if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { - // Timeouts and temporary errors should be communicated to gRPC to - // attempt another DNS query (with backoff). Other errors should be - // suppressed (they may represent the absence of a TXT record). - return nil - } - if err != nil { - err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) - logger.Info(err) - } - return err -} - -func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { - ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) - if err != nil { - if envconfig.TXTErrIgnore { - return nil - } - if err = handleDNSError(err, "TXT"); err != nil { - return &serviceconfig.ParseResult{Err: err} - } - return nil - } - var res string - for _, s := range ss { - res += s - } - - // TXT record must have "grpc_config=" attribute in order to be used as - // service config. - if !strings.HasPrefix(res, txtAttribute) { - logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service - // config. - return nil - } - sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) - return d.cc.ParseServiceConfig(sc) -} - -func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { - addrs, err := d.resolver.LookupHost(d.ctx, d.host) - if err != nil { - err = handleDNSError(err, "A") - return nil, err - } - newAddrs := make([]resolver.Address, 0, len(addrs)) - for _, a := range addrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) - } - addr := ip + ":" + d.port - newAddrs = append(newAddrs, resolver.Address{Addr: addr}) - } - return newAddrs, nil -} - -func (d *dnsResolver) lookup() (*resolver.State, error) { - srv, srvErr := d.lookupSRV() - addrs, hostErr := d.lookupHost() - if hostErr != nil && (srvErr != nil || len(srv) == 0) { - return nil, hostErr - } - - state := resolver.State{Addresses: addrs} - if len(srv) > 0 { - state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) - } - if !d.disableServiceConfig { - state.ServiceConfig = d.lookupTXT() - } - return &state, nil -} - -// formatIP returns ok = false if addr is not a valid textual representation of -// an IP address. If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and -// ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false - } - if ip.To4() != nil { - return addr, true - } - return "[" + addr + "]", true -} - -// parseTarget takes the user input target string and default port, returns -// formatted host and port info. If target doesn't specify a port, set the port -// to be the defaultPort. If target is in IPv6 format and host-name is enclosed -// in square brackets, brackets are stripped when setting the host. -// examples: -// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" -// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" -// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" -// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" -func parseTarget(target, defaultPort string) (host, port string, err error) { - if target == "" { - return "", "", internal.ErrMissingAddr - } - if ip := net.ParseIP(target); ip != nil { - // target is an IPv4 or IPv6(without brackets) address - return target, defaultPort, nil - } - if host, port, err = net.SplitHostPort(target); err == nil { - if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", - // this is an error. - return "", "", internal.ErrEndsWithColon - } - // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port - if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", - // the local system is assumed. - host = "localhost" - } - return host, port, nil - } - if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { - // target doesn't have port - return host, port, nil - } - return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) -} - -type rawChoice struct { - ClientLanguage *[]string `json:"clientLanguage,omitempty"` - Percentage *int `json:"percentage,omitempty"` - ClientHostName *[]string `json:"clientHostName,omitempty"` - ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` -} - -func containsString(a *[]string, b string) bool { - if a == nil { - return true - } - for _, c := range *a { - if c == b { - return true - } - } - return false -} - -func chosenByPercentage(a *int) bool { - if a == nil { - return true - } - return grpcrand.Intn(100)+1 <= *a -} - -func canaryingSC(js string) string { - if js == "" { - return "" - } - var rcs []rawChoice - err := json.Unmarshal([]byte(js), &rcs) - if err != nil { - logger.Warningf("dns: error parsing service config json: %v", err) - return "" - } - cliHostname, err := os.Hostname() - if err != nil { - logger.Warningf("dns: error getting client hostname: %v", err) - return "" - } - var sc string - for _, c := range rcs { - if !containsString(c.ClientLanguage, golang) || - !chosenByPercentage(c.Percentage) || - !containsString(c.ClientHostName, cliHostname) || - c.ServiceConfig == nil { - continue - } - sc = string(*c.ServiceConfig) - break - } - return sc -} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go deleted file mode 100644 index c7fc557d00..0000000000 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package internal contains functionality internal to the dns resolver package. -package internal - -import ( - "context" - "errors" - "net" - "time" -) - -// NetResolver groups the methods on net.Resolver that are used by the DNS -// resolver implementation. This allows the default net.Resolver instance to be -// overidden from tests. -type NetResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - -var ( - // ErrMissingAddr is the error returned when building a DNS resolver when - // the provided target name is empty. - ErrMissingAddr = errors.New("dns resolver: missing address") - - // ErrEndsWithColon is the error returned when building a DNS resolver when - // the provided target name ends with a colon that is supposed to be the - // separator between host and port. E.g. "::" is a valid address as it is - // an IPv6 address (host only) and "[::]:" is invalid as it ends with a - // colon as the host and port separator - ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -// The following vars are overridden from tests. -var ( - // MinResolutionRate is the minimum rate at which re-resolutions are - // allowed. This helps to prevent excessive re-resolution. - MinResolutionRate = 30 * time.Second - - // TimeAfterFunc is used by the DNS resolver to wait for the given duration - // to elapse. In non-test code, this is implemented by time.After. In test - // code, this can be used to control the amount of time the resolver is - // blocked waiting for the duration to elapse. - TimeAfterFunc func(time.Duration) <-chan time.Time - - // NewNetResolver returns the net.Resolver instance for the given target. - NewNetResolver func(string) (NetResolver, error) - - // AddressDialer is the dialer used to dial the DNS server. It accepts the - // Host portion of the URL corresponding to the user's dial target and - // returns a dial function. - AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) -) diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go deleted file mode 100644 index afac56572a..0000000000 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package passthrough implements a pass-through resolver. It sends the target -// name without scheme back to gRPC as resolved address. -package passthrough - -import ( - "errors" - - "google.golang.org/grpc/resolver" -) - -const scheme = "passthrough" - -type passthroughBuilder struct{} - -func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - if target.Endpoint() == "" && opts.Dialer == nil { - return nil, errors.New("passthrough: received empty target in Build()") - } - r := &passthroughResolver{ - target: target, - cc: cc, - } - r.start() - return r, nil -} - -func (*passthroughBuilder) Scheme() string { - return scheme -} - -type passthroughResolver struct { - target resolver.Target - cc resolver.ClientConn -} - -func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) -} - -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} - -func (*passthroughResolver) Close() {} - -func init() { - resolver.Register(&passthroughBuilder{}) -} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go deleted file mode 100644 index 27cd81af9e..0000000000 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package unix implements a resolver for unix targets. -package unix - -import ( - "fmt" - - "google.golang.org/grpc/internal/transport/networktype" - "google.golang.org/grpc/resolver" -) - -const unixScheme = "unix" -const unixAbstractScheme = "unix-abstract" - -type builder struct { - scheme string -} - -func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.URL.Host != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) - } - - // gRPC was parsing the dial target manually before PR #4817, and we - // switched to using url.Parse() in that PR. To avoid breaking existing - // resolver implementations we ended up stripping the leading "/" from the - // endpoint. This obviously does not work for the "unix" scheme. Hence we - // end up using the parsed URL instead. - endpoint := target.URL.Path - if endpoint == "" { - endpoint = target.URL.Opaque - } - addr := resolver.Address{Addr: endpoint} - if b.scheme == unixAbstractScheme { - // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do - // not want trailing \0 in address. - addr.Addr = "@" + addr.Addr - } - cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) - return &nopResolver{}, nil -} - -func (b *builder) Scheme() string { - return b.scheme -} - -func (b *builder) OverrideAuthority(resolver.Target) string { - return "localhost" -} - -type nopResolver struct { -} - -func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} - -func (*nopResolver) Close() {} - -func init() { - resolver.Register(&builder{scheme: unixScheme}) - resolver.Register(&builder{scheme: unixAbstractScheme}) -} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go deleted file mode 100644 index 11d82afcc7..0000000000 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go +++ /dev/null @@ -1,130 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package serviceconfig - -import ( - "encoding/json" - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// Duration defines JSON marshal and unmarshal methods to conform to the -// protobuf JSON spec defined [here]. -// -// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration -type Duration time.Duration - -func (d Duration) String() string { - return fmt.Sprint(time.Duration(d)) -} - -// MarshalJSON converts from d to a JSON string output. -func (d Duration) MarshalJSON() ([]byte, error) { - ns := time.Duration(d).Nanoseconds() - sec := ns / int64(time.Second) - ns = ns % int64(time.Second) - - var sign string - if sec < 0 || ns < 0 { - sign, sec, ns = "-", -1*sec, -1*ns - } - - // Generated output always contains 0, 3, 6, or 9 fractional digits, - // depending on required precision. - str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) - str = strings.TrimSuffix(str, "000") - str = strings.TrimSuffix(str, "000") - str = strings.TrimSuffix(str, ".000") - return []byte(fmt.Sprintf("\"%ss\"", str)), nil -} - -// UnmarshalJSON unmarshals b as a duration JSON string into d. -func (d *Duration) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !strings.HasSuffix(s, "s") { - return fmt.Errorf("malformed duration %q: missing seconds unit", s) - } - neg := false - if s[0] == '-' { - neg = true - s = s[1:] - } - ss := strings.SplitN(s[:len(s)-1], ".", 3) - if len(ss) > 2 { - return fmt.Errorf("malformed duration %q: too many decimals", s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var sec, ns int64 - if len(ss[0]) > 0 { - var err error - if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { - return fmt.Errorf("malformed duration %q: %v", s, err) - } - // Maximum seconds value per the durationpb spec. - const maxProtoSeconds = 315_576_000_000 - if sec > maxProtoSeconds { - return fmt.Errorf("out of range: %q", s) - } - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return fmt.Errorf("malformed duration %q: too many digits after decimal", s) - } - var err error - if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { - return fmt.Errorf("malformed duration %q: %v", s, err) - } - for i := 9; i > len(ss[1]); i-- { - ns *= 10 - } - hasDigits = true - } - if !hasDigits { - return fmt.Errorf("malformed duration %q: contains no numbers", s) - } - - if neg { - sec *= -1 - ns *= -1 - } - - // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. - const maxSeconds = math.MaxInt64 / int64(time.Second) - const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) - const minSeconds = math.MinInt64 / int64(time.Second) - const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) - - if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { - *d = Duration(math.MaxInt64) - } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { - *d = Duration(math.MinInt64) - } else { - *d = Duration(sec*int64(time.Second) + ns) - } - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go deleted file mode 100644 index 51e733e495..0000000000 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ /dev/null @@ -1,180 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package serviceconfig contains utility functions to parse service config. -package serviceconfig - -import ( - "encoding/json" - "fmt" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - externalserviceconfig "google.golang.org/grpc/serviceconfig" -) - -var logger = grpclog.Component("core") - -// BalancerConfig wraps the name and config associated with one load balancing -// policy. It corresponds to a single entry of the loadBalancingConfig field -// from ServiceConfig. -// -// It implements the json.Unmarshaler interface. -// -// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 -type BalancerConfig struct { - Name string - Config externalserviceconfig.LoadBalancingConfig -} - -type intermediateBalancerConfig []map[string]json.RawMessage - -// MarshalJSON implements the json.Marshaler interface. -// -// It marshals the balancer and config into a length-1 slice -// ([]map[string]config). -func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { - if bc.Config == nil { - // If config is nil, return empty config `{}`. - return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil - } - c, err := json.Marshal(bc.Config) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// -// ServiceConfig contains a list of loadBalancingConfigs, each with a name and -// config. This method iterates through that list in order, and stops at the -// first policy that is supported. -// - If the config for the first supported policy is invalid, the whole service -// config is invalid. -// - If the list doesn't contain any supported policy, the whole service config -// is invalid. -func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { - var ir intermediateBalancerConfig - err := json.Unmarshal(b, &ir) - if err != nil { - return err - } - - var names []string - for i, lbcfg := range ir { - if len(lbcfg) != 1 { - return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) - } - - var ( - name string - jsonCfg json.RawMessage - ) - // Get the key:value pair from the map. We have already made sure that - // the map contains a single entry. - for name, jsonCfg = range lbcfg { - } - - names = append(names, name) - builder := balancer.Get(name) - if builder == nil { - // If the balancer is not registered, move on to the next config. - // This is not an error. - continue - } - bc.Name = name - - parser, ok := builder.(balancer.ConfigParser) - if !ok { - if string(jsonCfg) != "{}" { - logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) - } - // Stop at this, though the builder doesn't support parsing config. - return nil - } - - cfg, err := parser.ParseConfig(jsonCfg) - if err != nil { - return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) - } - bc.Config = cfg - return nil - } - // This is reached when the for loop iterates over all entries, but didn't - // return. This means we had a loadBalancingConfig slice but did not - // encounter a registered policy. The config is considered invalid in this - // case. - return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) -} - -// MethodConfig defines the configuration recommended by the service providers for a -// particular method. -type MethodConfig struct { - // WaitForReady indicates whether RPCs sent to this method should wait until - // the connection is ready by default (!failfast). The value specified via the - // gRPC client API will override the value set here. - WaitForReady *bool - // Timeout is the default timeout for RPCs sent to this method. The actual - // deadline used will be the minimum of the value specified here and the value - // set by the application via the gRPC client API. If either one is not set, - // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout *time.Duration - // MaxReqSize is the maximum allowed payload size for an individual request in a - // stream (client->server) in bytes. The size which is measured is the serialized - // payload after per-message compression (but before stream compression) in bytes. - // The actual value used is the minimum of the value specified here and the value set - // by the application via the gRPC client API. If either one is not set, then the other - // will be used. If neither is set, then the built-in default is used. - MaxReqSize *int - // MaxRespSize is the maximum allowed payload size for an individual response in a - // stream (server->client) in bytes. - MaxRespSize *int - // RetryPolicy configures retry options for the method. - RetryPolicy *RetryPolicy -} - -// RetryPolicy defines the go-native version of the retry policy defined by the -// service config here: -// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config -type RetryPolicy struct { - // MaxAttempts is the maximum number of attempts, including the original RPC. - // - // This field is required and must be two or greater. - MaxAttempts int - - // Exponential backoff parameters. The initial retry attempt will occur at - // random(0, initialBackoff). In general, the nth attempt will occur at - // random(0, - // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). - // - // These fields are required and must be greater than zero. - InitialBackoff time.Duration - MaxBackoff time.Duration - BackoffMultiplier float64 - - // The set of status codes which may be retried. - // - // Status codes are specified as strings, e.g., "UNAVAILABLE". - // - // This field is required and must be non-empty. - // Note: a set is used to store this for easy lookup. - RetryableStatusCodes map[codes.Code]bool -} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go deleted file mode 100644 index c7dbc82059..0000000000 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ /dev/null @@ -1,205 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package status implements errors returned by gRPC. These errors are -// serialized and transmitted on the wire between server and client, and allow -// for additional data to be transmitted via the Details field in the status -// proto. gRPC service handlers should return an error created by this -// package, and gRPC clients should expect a corresponding error to be -// returned from the RPC call. -// -// This package upholds the invariants that a non-nil error may not -// contain an OK code, and an OK code must result in a nil error. -package status - -import ( - "errors" - "fmt" - - spb "google.golang.org/genproto/googleapis/rpc/status" - "google.golang.org/grpc/codes" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/protoadapt" - "google.golang.org/protobuf/types/known/anypb" -) - -// Status represents an RPC status code, message, and details. It is immutable -// and should be created with New, Newf, or FromProto. -type Status struct { - s *spb.Status -} - -// NewWithProto returns a new status including details from statusProto. This -// is meant to be used by the gRPC library only. -func NewWithProto(code codes.Code, message string, statusProto []string) *Status { - if len(statusProto) != 1 { - // No grpc-status-details bin header, or multiple; just ignore. - return &Status{s: &spb.Status{Code: int32(code), Message: message}} - } - st := &spb.Status{} - if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { - // Probably not a google.rpc.Status proto; do not provide details. - return &Status{s: &spb.Status{Code: int32(code), Message: message}} - } - if st.Code == int32(code) { - // The codes match between the grpc-status header and the - // grpc-status-details-bin header; use the full details proto. - return &Status{s: st} - } - return &Status{ - s: &spb.Status{ - Code: int32(codes.Internal), - Message: fmt.Sprintf( - "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", - code, message, st, - ), - }, - } -} - -// New returns a Status representing c and msg. -func New(c codes.Code, msg string) *Status { - return &Status{s: &spb.Status{Code: int32(c), Message: msg}} -} - -// Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...any) *Status { - return New(c, fmt.Sprintf(format, a...)) -} - -// FromProto returns a Status representing s. -func FromProto(s *spb.Status) *Status { - return &Status{s: proto.Clone(s).(*spb.Status)} -} - -// Err returns an error representing c and msg. If c is OK, returns nil. -func Err(c codes.Code, msg string) error { - return New(c, msg).Err() -} - -// Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...any) error { - return Err(c, fmt.Sprintf(format, a...)) -} - -// Code returns the status code contained in s. -func (s *Status) Code() codes.Code { - if s == nil || s.s == nil { - return codes.OK - } - return codes.Code(s.s.Code) -} - -// Message returns the message contained in s. -func (s *Status) Message() string { - if s == nil || s.s == nil { - return "" - } - return s.s.Message -} - -// Proto returns s's status as an spb.Status proto message. -func (s *Status) Proto() *spb.Status { - if s == nil { - return nil - } - return proto.Clone(s.s).(*spb.Status) -} - -// Err returns an immutable error representing s; returns nil if s.Code() is OK. -func (s *Status) Err() error { - if s.Code() == codes.OK { - return nil - } - return &Error{s: s} -} - -// WithDetails returns a new status with the provided details messages appended to the status. -// If any errors are encountered, it returns nil and the first error encountered. -func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { - if s.Code() == codes.OK { - return nil, errors.New("no error details for status with code OK") - } - // s.Code() != OK implies that s.Proto() != nil. - p := s.Proto() - for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) - if err != nil { - return nil, err - } - p.Details = append(p.Details, any) - } - return &Status{s: p}, nil -} - -// Details returns a slice of details messages attached to the status. -// If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []any { - if s == nil || s.s == nil { - return nil - } - details := make([]any, 0, len(s.s.Details)) - for _, any := range s.s.Details { - detail, err := any.UnmarshalNew() - if err != nil { - details = append(details, err) - continue - } - details = append(details, detail) - } - return details -} - -func (s *Status) String() string { - return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) -} - -// Error wraps a pointer of a status proto. It implements error and Status, -// and a nil *Error should never be returned by this package. -type Error struct { - s *Status -} - -func (e *Error) Error() string { - return e.s.String() -} - -// GRPCStatus returns the Status represented by se. -func (e *Error) GRPCStatus() *Status { - return e.s -} - -// Is implements future error.Is functionality. -// A Error is equivalent if the code and message are identical. -func (e *Error) Is(target error) bool { - tse, ok := target.(*Error) - if !ok { - return false - } - return proto.Equal(e.s.s, tse.s.s) -} - -// IsRestrictedControlPlaneCode returns whether the status includes a code -// restricted for control plane usage as defined by gRFC A54. -func IsRestrictedControlPlaneCode(s *Status) bool { - switch s.Code() { - case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: - return true - } - return false -} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go deleted file mode 100644 index b3a72276de..0000000000 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ /dev/null @@ -1,112 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package syscall provides functionalities that grpc uses to get low-level operating system -// stats/info. -package syscall - -import ( - "fmt" - "net" - "syscall" - "time" - - "golang.org/x/sys/unix" - "google.golang.org/grpc/grpclog" -) - -var logger = grpclog.Component("core") - -// GetCPUTime returns the how much CPU time has passed since the start of this process. -func GetCPUTime() int64 { - var ts unix.Timespec - if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { - logger.Fatal(err) - } - return ts.Nano() -} - -// Rusage is an alias for syscall.Rusage under linux environment. -type Rusage = syscall.Rusage - -// GetRusage returns the resource usage of current process. -func GetRusage() *Rusage { - rusage := new(Rusage) - syscall.Getrusage(syscall.RUSAGE_SELF, rusage) - return rusage -} - -// CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { - var ( - utimeDiffs = latest.Utime.Sec - first.Utime.Sec - utimeDiffus = latest.Utime.Usec - first.Utime.Usec - stimeDiffs = latest.Stime.Sec - first.Stime.Sec - stimeDiffus = latest.Stime.Usec - first.Stime.Usec - ) - - uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 - sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 - - return uTimeElapsed, sTimeElapsed -} - -// SetTCPUserTimeout sets the TCP user timeout on a connection's socket -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { - tcpconn, ok := conn.(*net.TCPConn) - if !ok { - // not a TCP connection. exit early - return nil - } - rawConn, err := tcpconn.SyscallConn() - if err != nil { - return fmt.Errorf("error getting raw connection: %v", err) - } - err = rawConn.Control(func(fd uintptr) { - err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) - }) - if err != nil { - return fmt.Errorf("error setting option on socket: %v", err) - } - - return nil -} - -// GetTCPUserTimeout gets the TCP user timeout on a connection's socket -func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { - tcpconn, ok := conn.(*net.TCPConn) - if !ok { - err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) - return - } - rawConn, err := tcpconn.SyscallConn() - if err != nil { - err = fmt.Errorf("error getting raw connection: %v", err) - return - } - err = rawConn.Control(func(fd uintptr) { - opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) - }) - if err != nil { - err = fmt.Errorf("error getting option on socket: %v", err) - return - } - - return -} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go deleted file mode 100644 index 999f52cd75..0000000000 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ /dev/null @@ -1,77 +0,0 @@ -//go:build !linux -// +build !linux - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package syscall provides functionalities that grpc uses to get low-level -// operating system stats/info. -package syscall - -import ( - "net" - "sync" - "time" - - "google.golang.org/grpc/grpclog" -) - -var once sync.Once -var logger = grpclog.Component("core") - -func log() { - once.Do(func() { - logger.Info("CPU time info is unavailable on non-linux environments.") - }) -} - -// GetCPUTime returns the how much CPU time has passed since the start of this -// process. It always returns 0 under non-linux environments. -func GetCPUTime() int64 { - log() - return 0 -} - -// Rusage is an empty struct under non-linux environments. -type Rusage struct{} - -// GetRusage is a no-op function under non-linux environments. -func GetRusage() *Rusage { - log() - return nil -} - -// CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. It a no-op function for non-linux environments. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { - log() - return 0, 0 -} - -// SetTCPUserTimeout is a no-op function under non-linux environments. -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { - log() - return nil -} - -// GetTCPUserTimeout is a no-op function under non-linux environments. -// A negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { - log() - return -1, nil -} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go deleted file mode 100644 index 4f347edd42..0000000000 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build !unix && !windows - -/* - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -import ( - "net" -) - -// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. -func NetDialerWithTCPKeepalive() *net.Dialer { - return &net.Dialer{} -} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go deleted file mode 100644 index 078137b7fd..0000000000 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ /dev/null @@ -1,54 +0,0 @@ -//go:build unix - -/* - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -import ( - "net" - "syscall" - "time" - - "golang.org/x/sys/unix" -) - -// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on -// the underlying connection with OS default values for keepalive parameters. -// -// TODO: Once https://github.com/golang/go/issues/62254 lands, and the -// appropriate Go version becomes less than our least supported Go version, we -// should look into using the new API to make things more straightforward. -func NetDialerWithTCPKeepalive() *net.Dialer { - return &net.Dialer{ - // Setting a negative value here prevents the Go stdlib from overriding - // the values of TCP keepalive time and interval. It also prevents the - // Go stdlib from enabling TCP keepalives by default. - KeepAlive: time.Duration(-1), - // This method is called after the underlying network socket is created, - // but before dialing the socket (or calling its connect() method). The - // combination of unconditionally enabling TCP keepalives here, and - // disabling the overriding of TCP keepalive parameters by setting the - // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. - Control: func(_, _ string, c syscall.RawConn) error { - return c.Control(func(fd uintptr) { - unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) - }) - }, - } -} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go deleted file mode 100644 index fd7d43a890..0000000000 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -//go:build windows - -/* - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -import ( - "net" - "syscall" - "time" - - "golang.org/x/sys/windows" -) - -// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on -// the underlying connection with OS default values for keepalive parameters. -// -// TODO: Once https://github.com/golang/go/issues/62254 lands, and the -// appropriate Go version becomes less than our least supported Go version, we -// should look into using the new API to make things more straightforward. -func NetDialerWithTCPKeepalive() *net.Dialer { - return &net.Dialer{ - // Setting a negative value here prevents the Go stdlib from overriding - // the values of TCP keepalive time and interval. It also prevents the - // Go stdlib from enabling TCP keepalives by default. - KeepAlive: time.Duration(-1), - // This method is called after the underlying network socket is created, - // but before dialing the socket (or calling its connect() method). The - // combination of unconditionally enabling TCP keepalives here, and - // disabling the overriding of TCP keepalive parameters by setting the - // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. - Control: func(_, _ string, c syscall.RawConn) error { - return c.Control(func(fd uintptr) { - windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) - }) - }, - } -} diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go deleted file mode 100644 index 070680edba..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go +++ /dev/null @@ -1,141 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "sync" - "time" -) - -const ( - // bdpLimit is the maximum value the flow control windows will be increased - // to. TCP typically limits this to 4MB, but some systems go up to 16MB. - // Since this is only a limit, it is safe to make it optimistic. - bdpLimit = (1 << 20) * 16 - // alpha is a constant factor used to keep a moving average - // of RTTs. - alpha = 0.9 - // If the current bdp sample is greater than or equal to - // our beta * our estimated bdp and the current bandwidth - // sample is the maximum bandwidth observed so far, we - // increase our bbp estimate by a factor of gamma. - beta = 0.66 - // To put our bdp to be smaller than or equal to twice the real BDP, - // we should multiply our current sample with 4/3, however to round things out - // we use 2 as the multiplication factor. - gamma = 2 -) - -// Adding arbitrary data to ping so that its ack can be identified. -// Easter-egg: what does the ping message say? -var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} - -type bdpEstimator struct { - // sentAt is the time when the ping was sent. - sentAt time.Time - - mu sync.Mutex - // bdp is the current bdp estimate. - bdp uint32 - // sample is the number of bytes received in one measurement cycle. - sample uint32 - // bwMax is the maximum bandwidth noted so far (bytes/sec). - bwMax float64 - // bool to keep track of the beginning of a new measurement cycle. - isSent bool - // Callback to update the window sizes. - updateFlowControl func(n uint32) - // sampleCount is the number of samples taken so far. - sampleCount uint64 - // round trip time (seconds) - rtt float64 -} - -// timesnap registers the time bdp ping was sent out so that -// network rtt can be calculated when its ack is received. -// It is called (by controller) when the bdpPing is -// being written on the wire. -func (b *bdpEstimator) timesnap(d [8]byte) { - if bdpPing.data != d { - return - } - b.sentAt = time.Now() -} - -// add adds bytes to the current sample for calculating bdp. -// It returns true only if a ping must be sent. This can be used -// by the caller (handleData) to make decision about batching -// a window update with it. -func (b *bdpEstimator) add(n uint32) bool { - b.mu.Lock() - defer b.mu.Unlock() - if b.bdp == bdpLimit { - return false - } - if !b.isSent { - b.isSent = true - b.sample = n - b.sentAt = time.Time{} - b.sampleCount++ - return true - } - b.sample += n - return false -} - -// calculate is called when an ack for a bdp ping is received. -// Here we calculate the current bdp and bandwidth sample and -// decide if the flow control windows should go up. -func (b *bdpEstimator) calculate(d [8]byte) { - // Check if the ping acked for was the bdp ping. - if bdpPing.data != d { - return - } - b.mu.Lock() - rttSample := time.Since(b.sentAt).Seconds() - if b.sampleCount < 10 { - // Bootstrap rtt with an average of first 10 rtt samples. - b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) - } else { - // Heed to the recent past more. - b.rtt += (rttSample - b.rtt) * float64(alpha) - } - b.isSent = false - // The number of bytes accumulated so far in the sample is smaller - // than or equal to 1.5 times the real BDP on a saturated connection. - bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) - if bwCurrent > b.bwMax { - b.bwMax = bwCurrent - } - // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is - // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we - // should update our perception of the network BDP. - if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { - sampleFloat := float64(b.sample) - b.bdp = uint32(gamma * sampleFloat) - if b.bdp > bdpLimit { - b.bdp = bdpLimit - } - bdp := b.bdp - b.mu.Unlock() - b.updateFlowControl(bdp) - return - } - b.mu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go deleted file mode 100644 index 83c3829826..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ /dev/null @@ -1,1006 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "bytes" - "errors" - "fmt" - "net" - "runtime" - "strconv" - "sync" - "sync/atomic" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcutil" - "google.golang.org/grpc/status" -) - -var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { - e.SetMaxDynamicTableSizeLimit(v) -} - -type itemNode struct { - it any - next *itemNode -} - -type itemList struct { - head *itemNode - tail *itemNode -} - -func (il *itemList) enqueue(i any) { - n := &itemNode{it: i} - if il.tail == nil { - il.head, il.tail = n, n - return - } - il.tail.next = n - il.tail = n -} - -// peek returns the first item in the list without removing it from the -// list. -func (il *itemList) peek() any { - return il.head.it -} - -func (il *itemList) dequeue() any { - if il.head == nil { - return nil - } - i := il.head.it - il.head = il.head.next - if il.head == nil { - il.tail = nil - } - return i -} - -func (il *itemList) dequeueAll() *itemNode { - h := il.head - il.head, il.tail = nil, nil - return h -} - -func (il *itemList) isEmpty() bool { - return il.head == nil -} - -// The following defines various control items which could flow through -// the control buffer of transport. They represent different aspects of -// control tasks, e.g., flow control, settings, streaming resetting, etc. - -// maxQueuedTransportResponseFrames is the most queued "transport response" -// frames we will buffer before preventing new reads from occurring on the -// transport. These are control frames sent in response to client requests, -// such as RST_STREAM due to bad headers or settings acks. -const maxQueuedTransportResponseFrames = 50 - -type cbItem interface { - isTransportResponseFrame() bool -} - -// registerStream is used to register an incoming stream with loopy writer. -type registerStream struct { - streamID uint32 - wq *writeQuota -} - -func (*registerStream) isTransportResponseFrame() bool { return false } - -// headerFrame is also used to register stream on the client-side. -type headerFrame struct { - streamID uint32 - hf []hpack.HeaderField - endStream bool // Valid on server side. - initStream func(uint32) error // Used only on the client side. - onWrite func() - wq *writeQuota // write quota for the stream created. - cleanup *cleanupStream // Valid on the server side. - onOrphaned func(error) // Valid on client-side -} - -func (h *headerFrame) isTransportResponseFrame() bool { - return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM -} - -type cleanupStream struct { - streamID uint32 - rst bool - rstCode http2.ErrCode - onWrite func() -} - -func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM - -type earlyAbortStream struct { - httpStatus uint32 - streamID uint32 - contentSubtype string - status *status.Status - rst bool -} - -func (*earlyAbortStream) isTransportResponseFrame() bool { return false } - -type dataFrame struct { - streamID uint32 - endStream bool - h []byte - d []byte - // onEachWrite is called every time - // a part of d is written out. - onEachWrite func() -} - -func (*dataFrame) isTransportResponseFrame() bool { return false } - -type incomingWindowUpdate struct { - streamID uint32 - increment uint32 -} - -func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } - -type outgoingWindowUpdate struct { - streamID uint32 - increment uint32 -} - -func (*outgoingWindowUpdate) isTransportResponseFrame() bool { - return false // window updates are throttled by thresholds -} - -type incomingSettings struct { - ss []http2.Setting -} - -func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK - -type outgoingSettings struct { - ss []http2.Setting -} - -func (*outgoingSettings) isTransportResponseFrame() bool { return false } - -type incomingGoAway struct { -} - -func (*incomingGoAway) isTransportResponseFrame() bool { return false } - -type goAway struct { - code http2.ErrCode - debugData []byte - headsUp bool - closeConn error // if set, loopyWriter will exit, resulting in conn closure -} - -func (*goAway) isTransportResponseFrame() bool { return false } - -type ping struct { - ack bool - data [8]byte -} - -func (*ping) isTransportResponseFrame() bool { return true } - -type outFlowControlSizeRequest struct { - resp chan uint32 -} - -func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } - -// closeConnection is an instruction to tell the loopy writer to flush the -// framer and exit, which will cause the transport's connection to be closed -// (by the client or server). The transport itself will close after the reader -// encounters the EOF caused by the connection closure. -type closeConnection struct{} - -func (closeConnection) isTransportResponseFrame() bool { return false } - -type outStreamState int - -const ( - active outStreamState = iota - empty - waitingOnStreamQuota -) - -type outStream struct { - id uint32 - state outStreamState - itl *itemList - bytesOutStanding int - wq *writeQuota - - next *outStream - prev *outStream -} - -func (s *outStream) deleteSelf() { - if s.prev != nil { - s.prev.next = s.next - } - if s.next != nil { - s.next.prev = s.prev - } - s.next, s.prev = nil, nil -} - -type outStreamList struct { - // Following are sentinel objects that mark the - // beginning and end of the list. They do not - // contain any item lists. All valid objects are - // inserted in between them. - // This is needed so that an outStream object can - // deleteSelf() in O(1) time without knowing which - // list it belongs to. - head *outStream - tail *outStream -} - -func newOutStreamList() *outStreamList { - head, tail := new(outStream), new(outStream) - head.next = tail - tail.prev = head - return &outStreamList{ - head: head, - tail: tail, - } -} - -func (l *outStreamList) enqueue(s *outStream) { - e := l.tail.prev - e.next = s - s.prev = e - s.next = l.tail - l.tail.prev = s -} - -// remove from the beginning of the list. -func (l *outStreamList) dequeue() *outStream { - b := l.head.next - if b == l.tail { - return nil - } - b.deleteSelf() - return b -} - -// controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. -type controlBuffer struct { - ch chan struct{} - done <-chan struct{} - mu sync.Mutex - consumerWaiting bool - list *itemList - err error - - // transportResponseFrames counts the number of queued items that represent - // the response of an action initiated by the peer. trfChan is created - // when transportResponseFrames >= maxQueuedTransportResponseFrames and is - // closed and nilled when transportResponseFrames drops below the - // threshold. Both fields are protected by mu. - transportResponseFrames int - trfChan atomic.Value // chan struct{} -} - -func newControlBuffer(done <-chan struct{}) *controlBuffer { - return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, - } -} - -// throttle blocks if there are too many incomingSettings/cleanupStreams in the -// controlbuf. -func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(chan struct{}) - if ch != nil { - select { - case <-ch: - case <-c.done: - } - } -} - -func (c *controlBuffer) put(it cbItem) error { - _, err := c.executeAndPut(nil, it) - return err -} - -func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { - var wakeUp bool - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if f != nil { - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - } - if c.consumerWaiting { - wakeUp = true - c.consumerWaiting = false - } - c.list.enqueue(it) - if it.isTransportResponseFrame() { - c.transportResponseFrames++ - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are adding the frame that puts us over the threshold; create - // a throttling channel. - c.trfChan.Store(make(chan struct{})) - } - } - c.mu.Unlock() - if wakeUp { - select { - case c.ch <- struct{}{}: - default: - } - } - return true, nil -} - -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - -func (c *controlBuffer) get(block bool) (any, error) { - for { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(chan struct{}) - close(ch) - c.trfChan.Store((chan struct{})(nil)) - } - c.transportResponseFrames-- - } - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil - } - c.consumerWaiting = true - c.mu.Unlock() - select { - case <-c.ch: - case <-c.done: - return nil, errors.New("transport closed by client") - } - } -} - -func (c *controlBuffer) finish() { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return - } - c.err = ErrConnClosing - // There may be headers for streams in the control buffer. - // These streams need to be cleaned out since the transport - // is still not aware of these yet. - for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) - } - } - // In case throttle() is currently in flight, it needs to be unblocked. - // Otherwise, the transport may not close, since the transport is closed by - // the reader encountering the connection error. - ch, _ := c.trfChan.Load().(chan struct{}) - if ch != nil { - close(ch) - } - c.trfChan.Store((chan struct{})(nil)) - c.mu.Unlock() -} - -type side int - -const ( - clientSide side = iota - serverSide -) - -// Loopy receives frames from the control buffer. -// Each frame is handled individually; most of the work done by loopy goes -// into handling data frames. Loopy maintains a queue of active streams, and each -// stream maintains a queue of data frames; as loopy receives data frames -// it gets added to the queue of the relevant stream. -// Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While -// processing a stream, loopy writes out data bytes from this stream capped by the min -// of http2MaxFrameLen, connection-level flow control and stream-level flow control. -type loopyWriter struct { - side side - cbuf *controlBuffer - sendQuota uint32 - oiws uint32 // outbound initial window size. - // estdStreams is map of all established streams that are not cleaned-up yet. - // On client-side, this is all streams whose headers were sent out. - // On server-side, this is all streams whose headers were received. - estdStreams map[uint32]*outStream // Established streams. - // activeStreams is a linked-list of all streams that have data to send and some - // stream-level flow control quota. - // Each of these streams internally have a list of data items(and perhaps trailers - // on the server-side) to be sent out. - activeStreams *outStreamList - framer *framer - hBuf *bytes.Buffer // The buffer for HPACK encoding. - hEnc *hpack.Encoder // HPACK encoder. - bdpEst *bdpEstimator - draining bool - conn net.Conn - logger *grpclog.PrefixLogger - - // Side-specific handlers - ssGoAwayHandler func(*goAway) (bool, error) -} - -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { - var buf bytes.Buffer - l := &loopyWriter{ - side: s, - cbuf: cbuf, - sendQuota: defaultWindowSize, - oiws: defaultWindowSize, - estdStreams: make(map[uint32]*outStream), - activeStreams: newOutStreamList(), - framer: fr, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - bdpEst: bdpEst, - conn: conn, - logger: logger, - } - return l -} - -const minBatchSize = 1000 - -// run should be run in a separate goroutine. -// It reads control frames from controlBuf and processes them by: -// 1. Updating loopy's internal state, or/and -// 2. Writing out HTTP2 frames on the wire. -// -// Loopy keeps all active streams with data to send in a linked-list. -// All streams in the activeStreams linked-list must have both: -// 1. Data to send, and -// 2. Stream level flow control quota available. -// -// In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the -// activeStreams linked-list. This results in writing of HTTP2 frames into an -// underlying write buffer. When there's no more control frames to read from -// controlBuf, loopy flushes the write buffer. As an optimization, to increase -// the batch size for each flush, loopy yields the processor, once if the batch -// size is too low to give stream goroutines a chance to fill it up. -// -// Upon exiting, if the error causing the exit is not an I/O error, run() -// flushes the underlying connection. The connection is always left open to -// allow different closing behavior on the client and server. -func (l *loopyWriter) run() (err error) { - defer func() { - if l.logger.V(logLevel) { - l.logger.Infof("loopyWriter exiting with error: %v", err) - } - if !isIOError(err) { - l.framer.writer.Flush() - } - l.cbuf.finish() - }() - for { - it, err := l.cbuf.get(true) - if err != nil { - return err - } - if err = l.handle(it); err != nil { - return err - } - if _, err = l.processData(); err != nil { - return err - } - gosched := true - hasdata: - for { - it, err := l.cbuf.get(false) - if err != nil { - return err - } - if it != nil { - if err = l.handle(it); err != nil { - return err - } - if _, err = l.processData(); err != nil { - return err - } - continue hasdata - } - isEmpty, err := l.processData() - if err != nil { - return err - } - if !isEmpty { - continue hasdata - } - if gosched { - gosched = false - if l.framer.writer.offset < minBatchSize { - runtime.Gosched() - continue hasdata - } - } - l.framer.writer.Flush() - break hasdata - } - } -} - -func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { - return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) -} - -func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { - // Otherwise update the quota. - if w.streamID == 0 { - l.sendQuota += w.increment - return - } - // Find the stream and update it. - if str, ok := l.estdStreams[w.streamID]; ok { - str.bytesOutStanding -= int(w.increment) - if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { - str.state = active - l.activeStreams.enqueue(str) - return - } - } -} - -func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { - return l.framer.fr.WriteSettings(s.ss...) -} - -func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { - l.applySettings(s.ss) - return l.framer.fr.WriteSettingsAck() -} - -func (l *loopyWriter) registerStreamHandler(h *registerStream) { - str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, - } - l.estdStreams[h.streamID] = str -} - -func (l *loopyWriter) headerHandler(h *headerFrame) error { - if l.side == serverSide { - str, ok := l.estdStreams[h.streamID] - if !ok { - if l.logger.V(logLevel) { - l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) - } - return nil - } - // Case 1.A: Server is responding back with headers. - if !h.endStream { - return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) - } - // else: Case 1.B: Server wants to close stream. - - if str.state != empty { // either active or waiting on stream quota. - // add it str's list of items. - str.itl.enqueue(h) - return nil - } - if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { - return err - } - return l.cleanupStreamHandler(h.cleanup) - } - // Case 2: Client wants to originate stream. - str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, - } - return l.originateStream(str, h) -} - -func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { - // l.draining is set when handling GoAway. In which case, we want to avoid - // creating new streams. - if l.draining { - // TODO: provide a better error with the reason we are in draining. - hdr.onOrphaned(errStreamDrain) - return nil - } - if err := hdr.initStream(str.id); err != nil { - return err - } - if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { - return err - } - l.estdStreams[str.id] = str - return nil -} - -func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { - if onWrite != nil { - onWrite() - } - l.hBuf.Reset() - for _, f := range hf { - if err := l.hEnc.WriteField(f); err != nil { - if l.logger.V(logLevel) { - l.logger.Warningf("Encountered error while encoding headers: %v", err) - } - } - } - var ( - err error - endHeaders, first bool - ) - first = true - for !endHeaders { - size := l.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - if first { - first = false - err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ - StreamID: streamID, - BlockFragment: l.hBuf.Next(size), - EndStream: endStream, - EndHeaders: endHeaders, - }) - } else { - err = l.framer.fr.WriteContinuation( - streamID, - endHeaders, - l.hBuf.Next(size), - ) - } - if err != nil { - return err - } - } - return nil -} - -func (l *loopyWriter) preprocessData(df *dataFrame) { - str, ok := l.estdStreams[df.streamID] - if !ok { - return - } - // If we got data for a stream it means that - // stream was originated and the headers were sent out. - str.itl.enqueue(df) - if str.state == empty { - str.state = active - l.activeStreams.enqueue(str) - } -} - -func (l *loopyWriter) pingHandler(p *ping) error { - if !p.ack { - l.bdpEst.timesnap(p.data) - } - return l.framer.fr.WritePing(p.ack, p.data) - -} - -func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { - o.resp <- l.sendQuota -} - -func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { - c.onWrite() - if str, ok := l.estdStreams[c.streamID]; ok { - // On the server side it could be a trailers-only response or - // a RST_STREAM before stream initialization thus the stream might - // not be established yet. - delete(l.estdStreams, c.streamID) - str.deleteSelf() - } - if c.rst { // If RST_STREAM needs to be sent. - if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { - return err - } - } - if l.draining && len(l.estdStreams) == 0 { - // Flush and close the connection; we are done with it. - return errors.New("finished processing active streams while in draining mode") - } - return nil -} - -func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { - if l.side == clientSide { - return errors.New("earlyAbortStream not handled on client") - } - // In case the caller forgets to set the http status, default to 200. - if eas.httpStatus == 0 { - eas.httpStatus = 200 - } - headerFields := []hpack.HeaderField{ - {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, - {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, - {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, - {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, - } - - if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { - return err - } - if eas.rst { - if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { - return err - } - } - return nil -} - -func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { - if l.side == clientSide { - l.draining = true - if len(l.estdStreams) == 0 { - // Flush and close the connection; we are done with it. - return errors.New("received GOAWAY with no active streams") - } - } - return nil -} - -func (l *loopyWriter) goAwayHandler(g *goAway) error { - // Handling of outgoing GoAway is very specific to side. - if l.ssGoAwayHandler != nil { - draining, err := l.ssGoAwayHandler(g) - if err != nil { - return err - } - l.draining = draining - } - return nil -} - -func (l *loopyWriter) handle(i any) error { - switch i := i.(type) { - case *incomingWindowUpdate: - l.incomingWindowUpdateHandler(i) - case *outgoingWindowUpdate: - return l.outgoingWindowUpdateHandler(i) - case *incomingSettings: - return l.incomingSettingsHandler(i) - case *outgoingSettings: - return l.outgoingSettingsHandler(i) - case *headerFrame: - return l.headerHandler(i) - case *registerStream: - l.registerStreamHandler(i) - case *cleanupStream: - return l.cleanupStreamHandler(i) - case *earlyAbortStream: - return l.earlyAbortStreamHandler(i) - case *incomingGoAway: - return l.incomingGoAwayHandler(i) - case *dataFrame: - l.preprocessData(i) - case *ping: - return l.pingHandler(i) - case *goAway: - return l.goAwayHandler(i) - case *outFlowControlSizeRequest: - l.outFlowControlSizeRequestHandler(i) - case closeConnection: - // Just return a non-I/O error and run() will flush and close the - // connection. - return ErrConnClosing - default: - return fmt.Errorf("transport: unknown control message type %T", i) - } - return nil -} - -func (l *loopyWriter) applySettings(ss []http2.Setting) { - for _, s := range ss { - switch s.ID { - case http2.SettingInitialWindowSize: - o := l.oiws - l.oiws = s.Val - if o < l.oiws { - // If the new limit is greater make all depleted streams active. - for _, stream := range l.estdStreams { - if stream.state == waitingOnStreamQuota { - stream.state = active - l.activeStreams.enqueue(stream) - } - } - } - case http2.SettingHeaderTableSize: - updateHeaderTblSize(l.hEnc, s.Val) - } - } -} - -// processData removes the first stream from active streams, writes out at most 16KB -// of its data and then puts it at the end of activeStreams if there's still more data -// to be sent and stream has some stream-level flow control. -func (l *loopyWriter) processData() (bool, error) { - if l.sendQuota == 0 { - return true, nil - } - str := l.activeStreams.dequeue() // Remove the first stream. - if str == nil { - return true, nil - } - dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. - // A data item is represented by a dataFrame, since it later translates into - // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possible HTTP2 frame size. - - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame - // Client sends out empty data frame with endStream = true - if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { - return false, err - } - str.itl.dequeue() // remove the empty data item from stream - if str.itl.isEmpty() { - str.state = empty - } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. - if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { - return false, err - } - if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, err - } - } else { - l.activeStreams.enqueue(str) - } - return false, nil - } - var ( - buf []byte - ) - // Figure out the maximum size we can send - maxSize := http2MaxFrameLen - if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. - str.state = waitingOnStreamQuota - return false, nil - } else if maxSize > strQuota { - maxSize = strQuota - } - if maxSize > int(l.sendQuota) { // connection-level flow control. - maxSize = int(l.sendQuota) - } - // Compute how much of the header and data we can send within quota and max frame length - hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, len(dataItem.d)) - if hSize != 0 { - if dSize == 0 { - buf = dataItem.h - } else { - // We can add some data to grpc message header to distribute bytes more equally across frames. - // Copy on the stack to avoid generating garbage - var localBuf [http2MaxFrameLen]byte - copy(localBuf[:hSize], dataItem.h) - copy(localBuf[hSize:], dataItem.d[:dSize]) - buf = localBuf[:hSize+dSize] - } - } else { - buf = dataItem.d - } - - size := hSize + dSize - - // Now that outgoing flow controls are checked we can replenish str's write quota - str.wq.replenish(size) - var endStream bool - // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { - endStream = true - } - if dataItem.onEachWrite != nil { - dataItem.onEachWrite() - } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { - return false, err - } - str.bytesOutStanding += size - l.sendQuota -= uint32(size) - dataItem.h = dataItem.h[hSize:] - dataItem.d = dataItem.d[dSize:] - - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. - str.itl.dequeue() - } - if str.itl.isEmpty() { - str.state = empty - } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. - if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { - return false, err - } - if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, err - } - } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. - str.state = waitingOnStreamQuota - } else { // Otherwise add it back to the list of active streams. - l.activeStreams.enqueue(str) - } - return false, nil -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go deleted file mode 100644 index bc8ee07474..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ /dev/null @@ -1,55 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "math" - "time" -) - -const ( - // The default value of flow control window size in HTTP2 spec. - defaultWindowSize = 65535 - // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC - infinity = time.Duration(math.MaxInt64) - defaultClientKeepaliveTime = infinity - defaultClientKeepaliveTimeout = 20 * time.Second - defaultMaxStreamsClient = 100 - defaultMaxConnectionIdle = infinity - defaultMaxConnectionAge = infinity - defaultMaxConnectionAgeGrace = infinity - defaultServerKeepaliveTime = 2 * time.Hour - defaultServerKeepaliveTimeout = 20 * time.Second - defaultKeepalivePolicyMinTime = 5 * time.Minute - // max window limit set by HTTP2 Specs. - maxWindowSize = math.MaxInt32 - // defaultWriteQuota is the default value for number of data - // bytes that each stream can schedule before some of it being - // flushed out. - defaultWriteQuota = 64 * 1024 - defaultClientMaxHeaderListSize = uint32(16 << 20) - defaultServerMaxHeaderListSize = uint32(16 << 20) -) - -// MaxStreamID is the upper bound for the stream ID before the current -// transport gracefully closes and new transport is created for subsequent RPCs. -// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit -// integer. It's exported so that tests can override it. -var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go deleted file mode 100644 index 97198c5158..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ /dev/null @@ -1,215 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "fmt" - "math" - "sync" - "sync/atomic" -) - -// writeQuota is a soft limit on the amount of data a stream can -// schedule before some of it is written out. -type writeQuota struct { - quota int32 - // get waits on read from when quota goes less than or equal to zero. - // replenish writes on it when quota goes positive again. - ch chan struct{} - // done is triggered in error case. - done <-chan struct{} - // replenish is called by loopyWriter to give quota back to. - // It is implemented as a field so that it can be updated - // by tests. - replenish func(n int) -} - -func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { - w := &writeQuota{ - quota: sz, - ch: make(chan struct{}, 1), - done: done, - } - w.replenish = w.realReplenish - return w -} - -func (w *writeQuota) get(sz int32) error { - for { - if atomic.LoadInt32(&w.quota) > 0 { - atomic.AddInt32(&w.quota, -sz) - return nil - } - select { - case <-w.ch: - continue - case <-w.done: - return errStreamDone - } - } -} - -func (w *writeQuota) realReplenish(n int) { - sz := int32(n) - a := atomic.AddInt32(&w.quota, sz) - b := a - sz - if b <= 0 && a > 0 { - select { - case w.ch <- struct{}{}: - default: - } - } -} - -type trInFlow struct { - limit uint32 - unacked uint32 - effectiveWindowSize uint32 -} - -func (f *trInFlow) newLimit(n uint32) uint32 { - d := n - f.limit - f.limit = n - f.updateEffectiveWindowSize() - return d -} - -func (f *trInFlow) onData(n uint32) uint32 { - f.unacked += n - if f.unacked >= f.limit/4 { - w := f.unacked - f.unacked = 0 - f.updateEffectiveWindowSize() - return w - } - f.updateEffectiveWindowSize() - return 0 -} - -func (f *trInFlow) reset() uint32 { - w := f.unacked - f.unacked = 0 - f.updateEffectiveWindowSize() - return w -} - -func (f *trInFlow) updateEffectiveWindowSize() { - atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) -} - -func (f *trInFlow) getSize() uint32 { - return atomic.LoadUint32(&f.effectiveWindowSize) -} - -// TODO(mmukhi): Simplify this code. -// inFlow deals with inbound flow control -type inFlow struct { - mu sync.Mutex - // The inbound flow control limit for pending data. - limit uint32 - // pendingData is the overall data which have been received but not been - // consumed by applications. - pendingData uint32 - // The amount of data the application has consumed but grpc has not sent - // window update for them. Used to reduce window update frequency. - pendingUpdate uint32 - // delta is the extra window update given by receiver when an application - // is reading data bigger in size than the inFlow limit. - delta uint32 -} - -// newLimit updates the inflow window to a new value n. -// It assumes that n is always greater than the old limit. -func (f *inFlow) newLimit(n uint32) { - f.mu.Lock() - f.limit = n - f.mu.Unlock() -} - -func (f *inFlow) maybeAdjust(n uint32) uint32 { - if n > uint32(math.MaxInt32) { - n = uint32(math.MaxInt32) - } - f.mu.Lock() - defer f.mu.Unlock() - // estSenderQuota is the receiver's view of the maximum number of bytes the sender - // can send without a window update. - estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) - // estUntransmittedData is the maximum number of bytes the sends might not have put - // on the wire yet. A value of 0 or less means that we have already received all or - // more bytes than the application is requesting to read. - estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. - // This implies that unless we send a window update, the sender won't be able to send all the bytes - // for this message. Therefore we must send an update over the limit since there's an active read - // request from the application. - if estUntransmittedData > estSenderQuota { - // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. - if f.limit+n > maxWindowSize { - f.delta = maxWindowSize - f.limit - } else { - // Send a window update for the whole message and not just the difference between - // estUntransmittedData and estSenderQuota. This will be helpful in case the message - // is padded; We will fallback on the current available window(at least a 1/4th of the limit). - f.delta = n - } - return f.delta - } - return 0 -} - -// onData is invoked when some data frame is received. It updates pendingData. -func (f *inFlow) onData(n uint32) error { - f.mu.Lock() - f.pendingData += n - if f.pendingData+f.pendingUpdate > f.limit+f.delta { - limit := f.limit - rcvd := f.pendingData + f.pendingUpdate - f.mu.Unlock() - return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) - } - f.mu.Unlock() - return nil -} - -// onRead is invoked when the application reads the data. It returns the window size -// to be sent to the peer. -func (f *inFlow) onRead(n uint32) uint32 { - f.mu.Lock() - if f.pendingData == 0 { - f.mu.Unlock() - return 0 - } - f.pendingData -= n - if n > f.delta { - n -= f.delta - f.delta = 0 - } else { - f.delta -= n - n = 0 - } - f.pendingUpdate += n - if f.pendingUpdate >= f.limit/4 { - wu := f.pendingUpdate - f.pendingUpdate = 0 - f.mu.Unlock() - return wu - } - f.mu.Unlock() - return 0 -} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go deleted file mode 100644 index bd39ff9a22..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ /dev/null @@ -1,488 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// This file is the implementation of a gRPC server using HTTP/2 which -// uses the standard Go http2 Server implementation (via the -// http.Handler interface), rather than speaking low-level HTTP/2 -// frames itself. It is the implementation of *grpc.Server.ServeHTTP. - -package transport - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "strings" - "sync" - "time" - - "golang.org/x/net/http2" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcutil" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" -) - -// NewServerHandlerTransport returns a ServerTransport handling gRPC from -// inside an http.Handler, or writes an HTTP error to w and returns an error. -// It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { - if r.ProtoMajor != 2 { - msg := "gRPC requires HTTP/2" - http.Error(w, msg, http.StatusBadRequest) - return nil, errors.New(msg) - } - if r.Method != "POST" { - msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) - http.Error(w, msg, http.StatusBadRequest) - return nil, errors.New(msg) - } - contentType := r.Header.Get("Content-Type") - // TODO: do we assume contentType is lowercase? we did before - contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) - if !validContentType { - msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) - http.Error(w, msg, http.StatusUnsupportedMediaType) - return nil, errors.New(msg) - } - if _, ok := w.(http.Flusher); !ok { - msg := "gRPC requires a ResponseWriter supporting http.Flusher" - http.Error(w, msg, http.StatusInternalServerError) - return nil, errors.New(msg) - } - - var localAddr net.Addr - if la := r.Context().Value(http.LocalAddrContextKey); la != nil { - localAddr, _ = la.(net.Addr) - } - var authInfo credentials.AuthInfo - if r.TLS != nil { - authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} - } - p := peer.Peer{ - Addr: strAddr(r.RemoteAddr), - LocalAddr: localAddr, - AuthInfo: authInfo, - } - st := &serverHandlerTransport{ - rw: w, - req: r, - closedCh: make(chan struct{}), - writes: make(chan func()), - peer: p, - contentType: contentType, - contentSubtype: contentSubtype, - stats: stats, - } - st.logger = prefixLoggerForServerHandlerTransport(st) - - if v := r.Header.Get("grpc-timeout"); v != "" { - to, err := decodeTimeout(v) - if err != nil { - msg := fmt.Sprintf("malformed grpc-timeout: %v", err) - http.Error(w, msg, http.StatusBadRequest) - return nil, status.Error(codes.Internal, msg) - } - st.timeoutSet = true - st.timeout = to - } - - metakv := []string{"content-type", contentType} - if r.Host != "" { - metakv = append(metakv, ":authority", r.Host) - } - for k, vv := range r.Header { - k = strings.ToLower(k) - if isReservedHeader(k) && !isWhitelistedHeader(k) { - continue - } - for _, v := range vv { - v, err := decodeMetadataHeader(k, v) - if err != nil { - msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) - http.Error(w, msg, http.StatusBadRequest) - return nil, status.Error(codes.Internal, msg) - } - metakv = append(metakv, k, v) - } - } - st.headerMD = metadata.Pairs(metakv...) - - return st, nil -} - -// serverHandlerTransport is an implementation of ServerTransport -// which replies to exactly one gRPC request (exactly one HTTP request), -// using the net/http.Handler interface. This http.Handler is guaranteed -// at this point to be speaking over HTTP/2, so it's able to speak valid -// gRPC. -type serverHandlerTransport struct { - rw http.ResponseWriter - req *http.Request - timeoutSet bool - timeout time.Duration - - headerMD metadata.MD - - peer peer.Peer - - closeOnce sync.Once - closedCh chan struct{} // closed on Close - - // writes is a channel of code to run serialized in the - // ServeHTTP (HandleStreams) goroutine. The channel is closed - // when WriteStatus is called. - writes chan func() - - // block concurrent WriteStatus calls - // e.g. grpc/(*serverStream).SendMsg/RecvMsg - writeStatusMu sync.Mutex - - // we just mirror the request content-type - contentType string - // we store both contentType and contentSubtype so we don't keep recreating them - // TODO make sure this is consistent across handler_server and http2_server - contentSubtype string - - stats []stats.Handler - logger *grpclog.PrefixLogger -} - -func (ht *serverHandlerTransport) Close(err error) { - ht.closeOnce.Do(func() { - if ht.logger.V(logLevel) { - ht.logger.Infof("Closing: %v", err) - } - close(ht.closedCh) - }) -} - -func (ht *serverHandlerTransport) Peer() *peer.Peer { - return &peer.Peer{ - Addr: ht.peer.Addr, - LocalAddr: ht.peer.LocalAddr, - AuthInfo: ht.peer.AuthInfo, - } -} - -// strAddr is a net.Addr backed by either a TCP "ip:port" string, or -// the empty string if unknown. -type strAddr string - -func (a strAddr) Network() string { - if a != "" { - // Per the documentation on net/http.Request.RemoteAddr, if this is - // set, it's set to the IP:port of the peer (hence, TCP): - // https://golang.org/pkg/net/http/#Request - // - // If we want to support Unix sockets later, we can - // add our own grpc-specific convention within the - // grpc codebase to set RemoteAddr to a different - // format, or probably better: we can attach it to the - // context and use that from serverHandlerTransport.RemoteAddr. - return "tcp" - } - return "" -} - -func (a strAddr) String() string { return string(a) } - -// do runs fn in the ServeHTTP goroutine. -func (ht *serverHandlerTransport) do(fn func()) error { - select { - case <-ht.closedCh: - return ErrConnClosing - case ht.writes <- fn: - return nil - } -} - -func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { - ht.writeStatusMu.Lock() - defer ht.writeStatusMu.Unlock() - - headersWritten := s.updateHeaderSent() - err := ht.do(func() { - if !headersWritten { - ht.writePendingHeaders(s) - } - - // And flush, in case no header or body has been sent yet. - // This forces a separation of headers and trailers if this is the - // first call (for example, in end2end tests's TestNoService). - ht.rw.(http.Flusher).Flush() - - h := ht.rw.Header() - h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) - if m := st.Message(); m != "" { - h.Set("Grpc-Message", encodeGrpcMessage(m)) - } - - s.hdrMu.Lock() - if p := st.Proto(); p != nil && len(p.Details) > 0 { - delete(s.trailer, grpcStatusDetailsBinHeader) - stBytes, err := proto.Marshal(p) - if err != nil { - // TODO: return error instead, when callers are able to handle it. - panic(err) - } - - h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) - } - - if len(s.trailer) > 0 { - for k, vv := range s.trailer { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - // http2 ResponseWriter mechanism to send undeclared Trailers after - // the headers have possibly been written. - h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) - } - } - } - s.hdrMu.Unlock() - }) - - if err == nil { // transport has not been closed - // Note: The trailer fields are compressed with hpack after this call returns. - // No WireLength field is set here. - for _, sh := range ht.stats { - sh.HandleRPC(s.Context(), &stats.OutTrailer{ - Trailer: s.trailer.Copy(), - }) - } - } - ht.Close(errors.New("finished writing status")) - return err -} - -// writePendingHeaders sets common and custom headers on the first -// write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { - ht.writeCommonHeaders(s) - ht.writeCustomHeaders(s) -} - -// writeCommonHeaders sets common headers on the first write -// call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { - h := ht.rw.Header() - h["Date"] = nil // suppress Date to make tests happy; TODO: restore - h.Set("Content-Type", ht.contentType) - - // Predeclare trailers we'll set later in WriteStatus (after the body). - // This is a SHOULD in the HTTP RFC, and the way you add (known) - // Trailers per the net/http.ResponseWriter contract. - // See https://golang.org/pkg/net/http/#ResponseWriter - // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers - h.Add("Trailer", "Grpc-Status") - h.Add("Trailer", "Grpc-Message") - h.Add("Trailer", "Grpc-Status-Details-Bin") - - if s.sendCompress != "" { - h.Set("Grpc-Encoding", s.sendCompress) - } -} - -// writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { - h := ht.rw.Header() - - s.hdrMu.Lock() - for k, vv := range s.header { - if isReservedHeader(k) { - continue - } - for _, v := range vv { - h.Add(k, encodeMetadataHeader(k, v)) - } - } - - s.hdrMu.Unlock() -} - -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { - headersWritten := s.updateHeaderSent() - return ht.do(func() { - if !headersWritten { - ht.writePendingHeaders(s) - } - ht.rw.Write(hdr) - ht.rw.Write(data) - ht.rw.(http.Flusher).Flush() - }) -} - -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { - if err := s.SetHeader(md); err != nil { - return err - } - - headersWritten := s.updateHeaderSent() - err := ht.do(func() { - if !headersWritten { - ht.writePendingHeaders(s) - } - - ht.rw.WriteHeader(200) - ht.rw.(http.Flusher).Flush() - }) - - if err == nil { - for _, sh := range ht.stats { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutHeader{ - Header: md.Copy(), - Compression: s.sendCompress, - }) - } - } - return err -} - -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { - // With this transport type there will be exactly 1 stream: this HTTP request. - var cancel context.CancelFunc - if ht.timeoutSet { - ctx, cancel = context.WithTimeout(ctx, ht.timeout) - } else { - ctx, cancel = context.WithCancel(ctx) - } - - // requestOver is closed when the status has been written via WriteStatus. - requestOver := make(chan struct{}) - go func() { - select { - case <-requestOver: - case <-ht.closedCh: - case <-ht.req.Context().Done(): - } - cancel() - ht.Close(errors.New("request is done processing")) - }() - - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - req := ht.req - s := &Stream{ - id: 0, // irrelevant - ctx: ctx, - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - headerWireLength: 0, // won't have access to header wire length until golang/go#18997. - } - s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, - windowHandler: func(int) {}, - } - - // readerDone is closed when the Body.Read-ing goroutine exits. - readerDone := make(chan struct{}) - go func() { - defer close(readerDone) - - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) - if n > 0 { - s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) - buf = buf[n:] - } - if err != nil { - s.buf.put(recvMsg{err: mapRecvMsgError(err)}) - return - } - if len(buf) == 0 { - buf = make([]byte, readSize) - } - } - }() - - // startStream is provided by the *grpc.Server's serveStreams. - // It starts a goroutine serving s and exits immediately. - // The goroutine that is started is the one that then calls - // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. - startStream(s) - - ht.runStream() - close(requestOver) - - // Wait for reading goroutine to finish. - req.Body.Close() - <-readerDone -} - -func (ht *serverHandlerTransport) runStream() { - for { - select { - case fn := <-ht.writes: - fn() - case <-ht.closedCh: - return - } - } -} - -func (ht *serverHandlerTransport) IncrMsgSent() {} - -func (ht *serverHandlerTransport) IncrMsgRecv() {} - -func (ht *serverHandlerTransport) Drain(debugData string) { - panic("Drain() is not implemented") -} - -// mapRecvMsgError returns the non-nil err into the appropriate -// error value as expected by callers of *grpc.parser.recvMsg. -// In particular, in can only be: -// - io.EOF -// - io.ErrUnexpectedEOF -// - of type transport.ConnectionError -// - an error from the status package -func mapRecvMsgError(err error) error { - if err == io.EOF || err == io.ErrUnexpectedEOF { - return err - } - if se, ok := err.(http2.StreamError); ok { - if code, ok := http2ErrConvTab[se.Code]; ok { - return status.Error(code, se.Error()) - } - } - if strings.Contains(err.Error(), "body closed by handler") { - return status.Error(codes.Canceled, err.Error()) - } - return connectionErrorf(true, err, err.Error()) -} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go deleted file mode 100644 index eff8799640..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ /dev/null @@ -1,1796 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "context" - "fmt" - "io" - "math" - "net" - "net/http" - "path/filepath" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/channelz" - icredentials "google.golang.org/grpc/internal/credentials" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/grpcutil" - imetadata "google.golang.org/grpc/internal/metadata" - istatus "google.golang.org/grpc/internal/status" - isyscall "google.golang.org/grpc/internal/syscall" - "google.golang.org/grpc/internal/transport/networktype" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -// clientConnectionCounter counts the number of connections a client has -// initiated (equal to the number of http2Clients created). Must be accessed -// atomically. -var clientConnectionCounter uint64 - -var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) - -// http2Client implements the ClientTransport interface with HTTP2. -type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string - // address contains the resolver returned address for this transport. - // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` - // passed to `NewStream`, when determining the :authority header. - address resolver.Address - md metadata.MD - conn net.Conn // underlying communication channel - loopy *loopyWriter - remoteAddr net.Addr - localAddr net.Addr - authInfo credentials.AuthInfo // auth info about the connection - - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) - // that the server sent GoAway on this transport. - goAway chan struct{} - - framer *framer - // controlBuf delivers all the control related tasks (e.g., window - // updates, reset streams, and various settings) to the controller. - // Do not access controlBuf with mu held. - controlBuf *controlBuffer - fc *trInFlow - // The scheme used: https if TLS is on, http otherwise. - scheme string - - isSecure bool - - perRPCCreds []credentials.PerRPCCredentials - - kp keepalive.ClientParameters - keepaliveEnabled bool - - statsHandlers []stats.Handler - - initialWindowSize int32 - - // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE - maxSendHeaderListSize *uint32 - - bdpEst *bdpEstimator - - maxConcurrentStreams uint32 - streamQuota int64 - streamsQuotaAvailable chan struct{} - waitingStreams uint32 - nextID uint32 - registeredCompressors string - - // Do not access controlBuf with mu held. - mu sync.Mutex // guard the following variables - state transportState - activeStreams map[uint32]*Stream - // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. - prevGoAwayID uint32 - // goAwayReason records the http2.ErrCode and debug data received with the - // GoAway frame. - goAwayReason GoAwayReason - // goAwayDebugMessage contains a detailed human readable string about a - // GoAway frame, useful for error messages. - goAwayDebugMessage string - // A condition variable used to signal when the keepalive goroutine should - // go dormant. The condition for dormancy is based on the number of active - // streams and the `PermitWithoutStream` keepalive client parameter. And - // since the number of active streams is guarded by the above mutex, we use - // the same for this condition variable as well. - kpDormancyCond *sync.Cond - // A boolean to track whether the keepalive goroutine is dormant or not. - // This is checked before attempting to signal the above condition - // variable. - kpDormant bool - - // Fields below are for channelz metric collection. - channelzID *channelz.Identifier - czData *channelzData - - onClose func(GoAwayReason) - - bufferPool *bufferPool - - connectionID uint64 - logger *grpclog.PrefixLogger -} - -func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { - address := addr.Addr - networkType, ok := networktype.Get(addr) - if fn != nil { - // Special handling for unix scheme with custom dialer. Back in the day, - // we did not have a unix resolver and therefore targets with a unix - // scheme would end up using the passthrough resolver. So, user's used a - // custom dialer in this case and expected the original dial target to - // be passed to the custom dialer. Now, we have a unix resolver. But if - // a custom dialer is specified, we want to retain the old behavior in - // terms of the address being passed to the custom dialer. - if networkType == "unix" && !strings.HasPrefix(address, "\x00") { - // Supported unix targets are either "unix://absolute-path" or - // "unix:relative-path". - if filepath.IsAbs(address) { - return fn(ctx, "unix://"+address) - } - return fn(ctx, "unix:"+address) - } - return fn(ctx, address) - } - if !ok { - networkType, address = parseDialTarget(address) - } - if networkType == "tcp" && useProxy { - return proxyDial(ctx, address, grpcUA) - } - return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) -} - -func isTemporary(err error) bool { - switch err := err.(type) { - case interface { - Temporary() bool - }: - return err.Temporary() - case interface { - Timeout() bool - }: - // Timeouts may be resolved upon retry, and are thus treated as - // temporary. - return err.Timeout() - } - return true -} - -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 -// and starts to receive messages on it. Non-nil error returns if construction -// fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { - scheme := "http" - ctx, cancel := context.WithCancel(ctx) - defer func() { - if err != nil { - cancel() - } - }() - - // gRPC, resolver, balancer etc. can specify arbitrary data in the - // Attributes field of resolver.Address, which is shoved into connectCtx - // and passed to the dialer and credential handshaker. This makes it possible for - // address specific arbitrary data to reach custom dialers and credential handshakers. - connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) - - conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) - if err != nil { - if opts.FailOnNonTempDialError { - return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) - } - return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) - } - - // Any further errors will close the underlying connection - defer func(conn net.Conn) { - if err != nil { - conn.Close() - } - }(conn) - - // The following defer and goroutine monitor the connectCtx for cancelation - // and deadline. On context expiration, the connection is hard closed and - // this function will naturally fail as a result. Otherwise, the defer - // waits for the goroutine to exit to prevent the context from being - // monitored (and to prevent the connection from ever being closed) after - // returning from this function. - ctxMonitorDone := grpcsync.NewEvent() - newClientCtx, newClientDone := context.WithCancel(connectCtx) - defer func() { - newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. - <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. - }() - go func(conn net.Conn) { - defer ctxMonitorDone.Fire() // Signal this goroutine has exited. - <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. - if err := connectCtx.Err(); err != nil { - // connectCtx expired before exiting the function. Hard close the connection. - if logger.V(logLevel) { - logger.Infof("Aborting due to connect deadline expiring: %v", err) - } - conn.Close() - } - }(conn) - - kp := opts.KeepaliveParams - // Validate keepalive parameters. - if kp.Time == 0 { - kp.Time = defaultClientKeepaliveTime - } - if kp.Timeout == 0 { - kp.Timeout = defaultClientKeepaliveTimeout - } - keepaliveEnabled := false - if kp.Time != infinity { - if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { - return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) - } - keepaliveEnabled = true - } - var ( - isSecure bool - authInfo credentials.AuthInfo - ) - transportCreds := opts.TransportCredentials - perRPCCreds := opts.PerRPCCredentials - - if b := opts.CredsBundle; b != nil { - if t := b.TransportCredentials(); t != nil { - transportCreds = t - } - if t := b.PerRPCCredentials(); t != nil { - perRPCCreds = append(perRPCCreds, t) - } - } - if transportCreds != nil { - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) - if err != nil { - return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) - } - for _, cd := range perRPCCreds { - if cd.RequireTransportSecurity() { - if ci, ok := authInfo.(interface { - GetCommonAuthInfo() credentials.CommonAuthInfo - }); ok { - secLevel := ci.GetCommonAuthInfo().SecurityLevel - if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { - return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") - } - } - } - } - isSecure = true - if transportCreds.Info().SecurityProtocol == "tls" { - scheme = "https" - } - } - dynamicWindow := true - icwz := int32(initialWindowSize) - if opts.InitialConnWindowSize >= defaultWindowSize { - icwz = opts.InitialConnWindowSize - dynamicWindow = false - } - writeBufSize := opts.WriteBufferSize - readBufSize := opts.ReadBufferSize - maxHeaderListSize := defaultClientMaxHeaderListSize - if opts.MaxHeaderListSize != nil { - maxHeaderListSize = *opts.MaxHeaderListSize - } - t := &http2Client{ - ctx: ctx, - ctxDone: ctx.Done(), // Cache Done chan. - cancel: cancel, - userAgent: opts.UserAgent, - registeredCompressors: grpcutil.RegisteredCompressors(), - address: addr, - conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, - readerDone: make(chan struct{}), - writerDone: make(chan struct{}), - goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), - fc: &trInFlow{limit: uint32(icwz)}, - scheme: scheme, - activeStreams: make(map[uint32]*Stream), - isSecure: isSecure, - perRPCCreds: perRPCCreds, - kp: kp, - statsHandlers: opts.StatsHandlers, - initialWindowSize: initialWindowSize, - nextID: 1, - maxConcurrentStreams: defaultMaxStreamsClient, - streamQuota: defaultMaxStreamsClient, - streamsQuotaAvailable: make(chan struct{}, 1), - czData: new(channelzData), - keepaliveEnabled: keepaliveEnabled, - bufferPool: newBufferPool(), - onClose: onClose, - } - t.logger = prefixLoggerForClientTransport(t) - // Add peer information to the http2client context. - t.ctx = peer.NewContext(t.ctx, t.getPeer()) - - if md, ok := addr.Metadata.(*metadata.MD); ok { - t.md = *md - } else if md := imetadata.Get(addr); md != nil { - t.md = md - } - t.controlBuf = newControlBuffer(t.ctxDone) - if opts.InitialWindowSize >= defaultWindowSize { - t.initialWindowSize = opts.InitialWindowSize - dynamicWindow = false - } - if dynamicWindow { - t.bdpEst = &bdpEstimator{ - bdp: initialWindowSize, - updateFlowControl: t.updateFlowControl, - } - } - for _, sh := range t.statsHandlers { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{ - Client: true, - } - sh.HandleConn(t.ctx, connBegin) - } - t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) - if err != nil { - return nil, err - } - if t.keepaliveEnabled { - t.kpDormancyCond = sync.NewCond(&t.mu) - go t.keepalive() - } - - // Start the reader goroutine for incoming messages. Each transport has a - // dedicated goroutine which reads HTTP2 frames from the network. Then it - // dispatches the frame to the corresponding stream entity. When the - // server preface is received, readerErrCh is closed. If an error occurs - // first, an error is pushed to the channel. This must be checked before - // returning from this function. - readerErrCh := make(chan error, 1) - go t.reader(readerErrCh) - defer func() { - if err == nil { - err = <-readerErrCh - } - if err != nil { - t.Close(err) - } - }() - - // Send connection preface to server. - n, err := t.conn.Write(clientPreface) - if err != nil { - err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) - return nil, err - } - if n != len(clientPreface) { - err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - return nil, err - } - var ss []http2.Setting - - if t.initialWindowSize != defaultWindowSize { - ss = append(ss, http2.Setting{ - ID: http2.SettingInitialWindowSize, - Val: uint32(t.initialWindowSize), - }) - } - if opts.MaxHeaderListSize != nil { - ss = append(ss, http2.Setting{ - ID: http2.SettingMaxHeaderListSize, - Val: *opts.MaxHeaderListSize, - }) - } - err = t.framer.fr.WriteSettings(ss...) - if err != nil { - err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) - return nil, err - } - // Adjust the connection flow control window if needed. - if delta := uint32(icwz - defaultWindowSize); delta > 0 { - if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { - err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) - return nil, err - } - } - - t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) - - if err := t.framer.writer.Flush(); err != nil { - return nil, err - } - go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - if err := t.loopy.run(); !isIOError(err) { - // Immediately close the connection, as the loopy writer returns - // when there are no more active streams and we were draining (the - // server sent a GOAWAY). For I/O errors, the reader will hit it - // after draining any remaining incoming data. - t.conn.Close() - } - close(t.writerDone) - }() - return t, nil -} - -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { - // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - ct: t, - done: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - headerChan: make(chan struct{}), - contentSubtype: callHdr.ContentSubtype, - doneFunc: callHdr.DoneFunc, - } - s.wq = newWriteQuota(defaultWriteQuota, s.done) - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } - // The client side stream context should have exactly the same life cycle with the user provided context. - // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. - // So we use the original context here instead of creating a copy. - s.ctx = ctx - s.trReader = &transportReader{ - reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctx.Done(), - recv: s.buf, - closeStream: func(err error) { - t.CloseStream(s, err) - }, - freeBuffer: t.bufferPool.put, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) - }, - } - return s -} - -func (t *http2Client) getPeer() *peer.Peer { - return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil - LocalAddr: t.localAddr, - } -} - -func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { - aud := t.createAudience(callHdr) - ri := credentials.RequestInfo{ - Method: callHdr.Method, - AuthInfo: t.authInfo, - } - ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) - authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) - if err != nil { - return nil, err - } - callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) - if err != nil { - return nil, err - } - // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields - // first and create a slice of that exact size. - // Make the slice of certain predictable size to reduce allocations made by append. - hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te - hfLen += len(authData) + len(callAuthData) - headerFields := make([]hpack.HeaderField, 0, hfLen) - headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) - headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) - headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) - headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) - headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) - headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) - if callHdr.PreviousAttempts > 0 { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) - } - - registeredCompressors := t.registeredCompressors - if callHdr.SendCompress != "" { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - // Include the outgoing compressor name when compressor is not registered - // via encoding.RegisterCompressor. This is possible when client uses - // WithCompressor dial option. - if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { - if registeredCompressors != "" { - registeredCompressors += "," - } - registeredCompressors += callHdr.SendCompress - } - } - - if registeredCompressors != "" { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) - } - if dl, ok := ctx.Deadline(); ok { - // Send out timeout regardless its value. The server can detect timeout context by itself. - // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. - timeout := time.Until(dl) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) - } - for k, v := range authData { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - for k, v := range callAuthData { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - if b := stats.OutgoingTags(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) - } - if b := stats.OutgoingTrace(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) - } - - if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { - var k string - for k, vv := range md { - // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } - for _, vv := range added { - for i, v := range vv { - if i%2 == 0 { - k = strings.ToLower(v) - continue - } - // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. - if isReservedHeader(k) { - continue - } - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } - } - for k, vv := range t.md { - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } - return headerFields, nil -} - -func (t *http2Client) createAudience(callHdr *CallHdr) string { - // Create an audience string only if needed. - if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { - return "" - } - // Construct URI required to get auth request metadata. - // Omit port if it is the default one. - host := strings.TrimSuffix(callHdr.Host, ":443") - pos := strings.LastIndex(callHdr.Method, "/") - if pos == -1 { - pos = len(callHdr.Method) - } - return "https://" + host + callHdr.Method[:pos] -} - -func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { - if len(t.perRPCCreds) == 0 { - return nil, nil - } - authData := map[string]string{} - for _, c := range t.perRPCCreds { - data, err := c.GetRequestMetadata(ctx, audience) - if err != nil { - if st, ok := status.FromError(err); ok { - // Restrict the code to the list allowed by gRFC A54. - if istatus.IsRestrictedControlPlaneCode(st) { - err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) - } - return nil, err - } - - return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) - } - for k, v := range data { - // Capital header names are illegal in HTTP/2. - k = strings.ToLower(k) - authData[k] = v - } - } - return authData, nil -} - -func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { - var callAuthData map[string]string - // Check if credentials.PerRPCCredentials were provided via call options. - // Note: if these credentials are provided both via dial options and call - // options, then both sets of credentials will be applied. - if callCreds := callHdr.Creds; callCreds != nil { - if callCreds.RequireTransportSecurity() { - ri, _ := credentials.RequestInfoFromContext(ctx) - if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { - return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") - } - } - data, err := callCreds.GetRequestMetadata(ctx, audience) - if err != nil { - if st, ok := status.FromError(err); ok { - // Restrict the code to the list allowed by gRFC A54. - if istatus.IsRestrictedControlPlaneCode(st) { - err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) - } - return nil, err - } - return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) - } - callAuthData = make(map[string]string, len(data)) - for k, v := range data { - // Capital header names are illegal in HTTP/2 - k = strings.ToLower(k) - callAuthData[k] = v - } - } - return callAuthData, nil -} - -// NewStreamError wraps an error and reports additional information. Typically -// NewStream errors result in transparent retry, as they mean nothing went onto -// the wire. However, there are two notable exceptions: -// -// 1. If the stream headers violate the max header list size allowed by the -// server. It's possible this could succeed on another transport, even if -// it's unlikely, but do not transparently retry. -// 2. If the credentials errored when requesting their headers. In this case, -// it's possible a retry can fix the problem, but indefinitely transparently -// retrying is not appropriate as it is likely the credentials, if they can -// eventually succeed, would need I/O to do so. -type NewStreamError struct { - Err error - - AllowTransparentRetry bool -} - -func (e NewStreamError) Error() string { - return e.Err.Error() -} - -// NewStream creates a stream and registers it into the transport as "active" -// streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { - ctx = peer.NewContext(ctx, t.getPeer()) - - // ServerName field of the resolver returned address takes precedence over - // Host field of CallHdr to determine the :authority header. This is because, - // the ServerName field takes precedence for server authentication during - // TLS handshake, and the :authority header should match the value used - // for server authentication. - if t.address.ServerName != "" { - newCallHdr := *callHdr - newCallHdr.Host = t.address.ServerName - callHdr = &newCallHdr - } - - headerFields, err := t.createHeaderFields(ctx, callHdr) - if err != nil { - return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} - } - s := t.newStream(ctx, callHdr) - cleanup := func(err error) { - if s.swapState(streamDone) == streamDone { - // If it was already done, return. - return - } - // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) - s.write(recvMsg{err: err}) - close(s.done) - // If headerChan isn't closed, then close it. - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - close(s.headerChan) - } - } - hdr := &headerFrame{ - hf: headerFields, - endStream: false, - initStream: func(id uint32) error { - t.mu.Lock() - // TODO: handle transport closure in loopy instead and remove this - // initStream is never called when transport is draining. - if t.state == closing { - t.mu.Unlock() - cleanup(ErrConnClosing) - return ErrConnClosing - } - if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) - } - // If the keepalive goroutine has gone dormant, wake it up. - if t.kpDormant { - t.kpDormancyCond.Signal() - } - t.mu.Unlock() - return nil - }, - onOrphaned: cleanup, - wq: s.wq, - } - firstTry := true - var ch chan struct{} - transportDrainRequired := false - checkForStreamQuota := func(it any) bool { - if t.streamQuota <= 0 { // Can go negative if server decreases it. - if firstTry { - t.waitingStreams++ - } - ch = t.streamsQuotaAvailable - return false - } - if !firstTry { - t.waitingStreams-- - } - t.streamQuota-- - h := it.(*headerFrame) - h.streamID = t.nextID - t.nextID += 2 - - // Drain client transport if nextID > MaxStreamID which signals gRPC that - // the connection is closed and a new one must be created for subsequent RPCs. - transportDrainRequired = t.nextID > MaxStreamID - - s.id = h.streamID - s.fc = &inFlow{limit: uint32(t.initialWindowSize)} - t.mu.Lock() - if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). - t.mu.Unlock() - return false // Don't create a stream if the transport is already closed. - } - t.activeStreams[s.id] = s - t.mu.Unlock() - if t.streamQuota > 0 && t.waitingStreams > 0 { - select { - case t.streamsQuotaAvailable <- struct{}{}: - default: - } - } - return true - } - var hdrListSizeErr error - checkForHeaderListSize := func(it any) bool { - if t.maxSendHeaderListSize == nil { - return true - } - hdrFrame := it.(*headerFrame) - var sz int64 - for _, f := range hdrFrame.hf { - if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) - return false - } - } - return true - } - for { - success, err := t.controlBuf.executeAndPut(func(it any) bool { - return checkForHeaderListSize(it) && checkForStreamQuota(it) - }, hdr) - if err != nil { - // Connection closed. - return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} - } - if success { - break - } - if hdrListSizeErr != nil { - return nil, &NewStreamError{Err: hdrListSizeErr} - } - firstTry = false - select { - case <-ch: - case <-ctx.Done(): - return nil, &NewStreamError{Err: ContextErr(ctx.Err())} - case <-t.goAway: - return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} - case <-t.ctx.Done(): - return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} - } - } - if len(t.statsHandlers) != 0 { - header, ok := metadata.FromOutgoingContext(ctx) - if ok { - header.Set("user-agent", t.userAgent) - } else { - header = metadata.Pairs("user-agent", t.userAgent) - } - for _, sh := range t.statsHandlers { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - // Note: Creating a new stats object to prevent pollution. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, - } - sh.HandleRPC(s.ctx, outHeader) - } - } - if transportDrainRequired { - if t.logger.V(logLevel) { - t.logger.Infof("Draining transport: t.nextID > MaxStreamID") - } - t.GracefulClose() - } - return s, nil -} - -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var ( - rst bool - rstCode http2.ErrCode - ) - if err != nil { - rst = true - rstCode = http2.ErrCodeCancel - } - t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) -} - -func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { - // Set stream status to done. - if s.swapState(streamDone) == streamDone { - // If it was already done, return. If multiple closeStream calls - // happen simultaneously, wait for the first to finish. - <-s.done - return - } - // status and trailers can be updated here without any synchronization because the stream goroutine will - // only read it after it sees an io.EOF error from read or write and we'll write those errors - // only after updating this. - s.status = st - if len(mdata) > 0 { - s.trailer = mdata - } - if err != nil { - // This will unblock reads eventually. - s.write(recvMsg{err: err}) - } - // If headerChan isn't closed, then close it. - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.noHeaders = true - close(s.headerChan) - } - cleanup := &cleanupStream{ - streamID: s.id, - onWrite: func() { - t.mu.Lock() - if t.activeStreams != nil { - delete(t.activeStreams, s.id) - } - t.mu.Unlock() - if channelz.IsOn() { - if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) - } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) - } - } - }, - rst: rst, - rstCode: rstCode, - } - addBackStreamQuota := func(any) bool { - t.streamQuota++ - if t.streamQuota > 0 && t.waitingStreams > 0 { - select { - case t.streamsQuotaAvailable <- struct{}{}: - default: - } - } - return true - } - t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) - // This will unblock write. - close(s.done) - if s.doneFunc != nil { - s.doneFunc() - } -} - -// Close kicks off the shutdown process of the transport. This should be called -// only once on a transport. Once it is called, the transport should not be -// accessed any more. -func (t *http2Client) Close(err error) { - t.mu.Lock() - // Make sure we only close once. - if t.state == closing { - t.mu.Unlock() - return - } - if t.logger.V(logLevel) { - t.logger.Infof("Closing: %v", err) - } - // Call t.onClose ASAP to prevent the client from attempting to create new - // streams. - if t.state != draining { - t.onClose(GoAwayInvalid) - } - t.state = closing - streams := t.activeStreams - t.activeStreams = nil - if t.kpDormant { - // If the keepalive goroutine is blocked on this condition variable, we - // should unblock it so that the goroutine eventually exits. - t.kpDormancyCond.Signal() - } - t.mu.Unlock() - t.controlBuf.finish() - t.cancel() - t.conn.Close() - channelz.RemoveEntry(t.channelzID) - // Append info about previous goaways if there were any, since this may be important - // for understanding the root cause for this connection to be closed. - _, goAwayDebugMessage := t.GetGoAwayReason() - - var st *status.Status - if len(goAwayDebugMessage) > 0 { - st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) - err = st.Err() - } else { - st = status.New(codes.Unavailable, err.Error()) - } - - // Notify all active streams. - for _, s := range streams { - t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) - } - for _, sh := range t.statsHandlers { - connEnd := &stats.ConnEnd{ - Client: true, - } - sh.HandleConn(t.ctx, connEnd) - } -} - -// GracefulClose sets the state to draining, which prevents new streams from -// being created and causes the transport to be closed when the last active -// stream is closed. If there are no active streams, the transport is closed -// immediately. This does nothing if the transport is already draining or -// closing. -func (t *http2Client) GracefulClose() { - t.mu.Lock() - // Make sure we move to draining only from active. - if t.state == draining || t.state == closing { - t.mu.Unlock() - return - } - if t.logger.V(logLevel) { - t.logger.Infof("GracefulClose called") - } - t.onClose(GoAwayInvalid) - t.state = draining - active := len(t.activeStreams) - t.mu.Unlock() - if active == 0 { - t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) - return - } - t.controlBuf.put(&incomingGoAway{}) -} - -// Write formats the data into HTTP2 data frame(s) and sends it out. The caller -// should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { - if opts.Last { - // If it's the last message, update stream state. - if !s.compareAndSwapState(streamActive, streamWriteDone) { - return errStreamDone - } - } else if s.getState() != streamActive { - return errStreamDone - } - df := &dataFrame{ - streamID: s.id, - endStream: opts.Last, - h: hdr, - d: data, - } - if hdr != nil || data != nil { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - return err - } - } - return t.controlBuf.put(df) -} - -func (t *http2Client) getStream(f http2.Frame) *Stream { - t.mu.Lock() - s := t.activeStreams[f.Header().StreamID] - t.mu.Unlock() - return s -} - -// adjustWindow sends out extra window update over the initial window size -// of stream if the application is requesting data larger in size than -// the window. -func (t *http2Client) adjustWindow(s *Stream, n uint32) { - if w := s.fc.maybeAdjust(n); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) - } -} - -// updateWindow adjusts the inbound quota for the stream. -// Window updates will be sent out when the cumulative quota -// exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { - if w := s.fc.onRead(n); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) - } -} - -// updateFlowControl updates the incoming flow control windows -// for the transport and the stream based on the current bdp -// estimation. -func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(any) bool { - t.initialWindowSize = int32(n) - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.mu.Unlock() - return true - } - t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) - t.controlBuf.put(&outgoingSettings{ - ss: []http2.Setting{ - { - ID: http2.SettingInitialWindowSize, - Val: n, - }, - }, - }) -} - -func (t *http2Client) handleData(f *http2.DataFrame) { - size := f.Header().Length - var sendBDPPing bool - if t.bdpEst != nil { - sendBDPPing = t.bdpEst.add(size) - } - // Decouple connection's flow control from application's read. - // An update on connection's flow control should not depend on - // whether user application has read the data or not. Such a - // restriction is already imposed on the stream's flow control, - // and therefore the sender will be blocked anyways. - // Decoupling the connection flow control will prevent other - // active(fast) streams from starving in presence of slow or - // inactive streams. - // - if w := t.fc.onData(size); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{ - streamID: 0, - increment: w, - }) - } - if sendBDPPing { - // Avoid excessive ping detection (e.g. in an L7 proxy) - // by sending a window update prior to the BDP ping. - - if w := t.fc.reset(); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{ - streamID: 0, - increment: w, - }) - } - - t.controlBuf.put(bdpPing) - } - // Select the right stream to dispatch. - s := t.getStream(f) - if s == nil { - return - } - if size > 0 { - if err := s.fc.onData(size); err != nil { - t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) - return - } - if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) - } - } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) - } - } - // The server has closed the stream without sending trailers. Record that - // the read direction is closed, and set the status appropriately. - if f.StreamEnded() { - t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) - } -} - -func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { - s := t.getStream(f) - if s == nil { - return - } - if f.ErrCode == http2.ErrCodeRefusedStream { - // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) - } - statusCode, ok := http2ErrConvTab[f.ErrCode] - if !ok { - if t.logger.V(logLevel) { - t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) - } - statusCode = codes.Unknown - } - if statusCode == codes.Canceled { - if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { - // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. - statusCode = codes.DeadlineExceeded - } - } - t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) -} - -func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { - if f.IsAck() { - return - } - var maxStreams *uint32 - var ss []http2.Setting - var updateFuncs []func() - f.ForeachSetting(func(s http2.Setting) error { - switch s.ID { - case http2.SettingMaxConcurrentStreams: - maxStreams = new(uint32) - *maxStreams = s.Val - case http2.SettingMaxHeaderListSize: - updateFuncs = append(updateFuncs, func() { - t.maxSendHeaderListSize = new(uint32) - *t.maxSendHeaderListSize = s.Val - }) - default: - ss = append(ss, s) - } - return nil - }) - if isFirst && maxStreams == nil { - maxStreams = new(uint32) - *maxStreams = math.MaxUint32 - } - sf := &incomingSettings{ - ss: ss, - } - if maxStreams != nil { - updateStreamQuota := func() { - delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) - t.maxConcurrentStreams = *maxStreams - t.streamQuota += delta - if delta > 0 && t.waitingStreams > 0 { - close(t.streamsQuotaAvailable) // wake all of them up. - t.streamsQuotaAvailable = make(chan struct{}, 1) - } - } - updateFuncs = append(updateFuncs, updateStreamQuota) - } - t.controlBuf.executeAndPut(func(any) bool { - for _, f := range updateFuncs { - f() - } - return true - }, sf) -} - -func (t *http2Client) handlePing(f *http2.PingFrame) { - if f.IsAck() { - // Maybe it's a BDP ping. - if t.bdpEst != nil { - t.bdpEst.calculate(f.Data) - } - return - } - pingAck := &ping{ack: true} - copy(pingAck.data[:], f.Data[:]) - t.controlBuf.put(pingAck) -} - -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - return - } - if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { - // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug - // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is - // enabled by default and double the configure KEEPALIVE_TIME used for new connections - // on that channel. - logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") - } - id := f.LastStreamID - if id > 0 && id%2 == 0 { - t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) - return - } - // A client can receive multiple GoAways from the server (see - // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first - // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be - // sent after an RTT delay with the ID of the last stream the server will - // process. - // - // Therefore, when we get the first GoAway we don't necessarily close any - // streams. While in case of second GoAway we close all streams created after - // the GoAwayId. This way streams that were in-flight while the GoAway from - // server was being sent don't get killed. - select { - case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). - // If there are multiple GoAways the first one should always have an ID greater than the following ones. - if id > t.prevGoAwayID { - t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) - return - } - default: - t.setGoAwayReason(f) - close(t.goAway) - defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. - // Notify the clientconn about the GOAWAY before we set the state to - // draining, to allow the client to stop attempting to create streams - // before disallowing new streams on this connection. - if t.state != draining { - t.onClose(t.goAwayReason) - t.state = draining - } - } - // All streams with IDs greater than the GoAwayId - // and smaller than the previous GoAway ID should be killed. - upperLimit := t.prevGoAwayID - if upperLimit == 0 { // This is the first GoAway Frame. - upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. - } - - t.prevGoAwayID = id - if len(t.activeStreams) == 0 { - t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) - return - } - - streamsToClose := make([]*Stream, 0) - for streamID, stream := range t.activeStreams { - if streamID > id && streamID <= upperLimit { - // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) - streamsToClose = append(streamsToClose, stream) - } - } - t.mu.Unlock() - // Called outside t.mu because closeStream can take controlBuf's mu, which - // could induce deadlock and is not allowed. - for _, stream := range streamsToClose { - t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) - } -} - -// setGoAwayReason sets the value of t.goAwayReason based -// on the GoAway frame received. -// It expects a lock on transport's mutex to be held by -// the caller. -func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { - t.goAwayReason = GoAwayNoReason - switch f.ErrCode { - case http2.ErrCodeEnhanceYourCalm: - if string(f.DebugData()) == "too_many_pings" { - t.goAwayReason = GoAwayTooManyPings - } - } - if len(f.DebugData()) == 0 { - t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) - } else { - t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) - } -} - -func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { - t.mu.Lock() - defer t.mu.Unlock() - return t.goAwayReason, t.goAwayDebugMessage -} - -func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { - t.controlBuf.put(&incomingWindowUpdate{ - streamID: f.Header().StreamID, - increment: f.Increment, - }) -} - -// operateHeaders takes action on the decoded headers. -func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { - s := t.getStream(frame) - if s == nil { - return - } - endStream := frame.StreamEnded() - atomic.StoreUint32(&s.bytesReceived, 1) - initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 - - if !initialHeader && !endStream { - // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. - st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") - t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) - return - } - - // frame.Truncated is set to true when framer detects that the current header - // list size hits MaxHeaderListSize limit. - if frame.Truncated { - se := status.New(codes.Internal, "peer header list size exceeded limit") - t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) - return - } - - var ( - // If a gRPC Response-Headers has already been received, then it means - // that the peer is speaking gRPC and we are in gRPC mode. - isGRPC = !initialHeader - mdata = make(map[string][]string) - contentTypeErr = "malformed header: missing HTTP content-type" - grpcMessage string - recvCompress string - httpStatusCode *int - httpStatusErr string - rawStatusCode = codes.Unknown - // headerError is set if an error is encountered while parsing the headers - headerError string - ) - - if initialHeader { - httpStatusErr = "malformed header: missing HTTP status" - } - - for _, hf := range frame.Fields { - switch hf.Name { - case "content-type": - if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { - contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) - break - } - contentTypeErr = "" - mdata[hf.Name] = append(mdata[hf.Name], hf.Value) - isGRPC = true - case "grpc-encoding": - recvCompress = hf.Value - case "grpc-status": - code, err := strconv.ParseInt(hf.Value, 10, 32) - if err != nil { - se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) - t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) - return - } - rawStatusCode = codes.Code(uint32(code)) - case "grpc-message": - grpcMessage = decodeGrpcMessage(hf.Value) - case ":status": - if hf.Value == "200" { - httpStatusErr = "" - statusCode := 200 - httpStatusCode = &statusCode - break - } - - c, err := strconv.ParseInt(hf.Value, 10, 32) - if err != nil { - se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) - t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) - return - } - statusCode := int(c) - httpStatusCode = &statusCode - - httpStatusErr = fmt.Sprintf( - "unexpected HTTP status code received from server: %d (%s)", - statusCode, - http.StatusText(statusCode), - ) - default: - if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { - break - } - v, err := decodeMetadataHeader(hf.Name, hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) - logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) - break - } - mdata[hf.Name] = append(mdata[hf.Name], v) - } - } - - if !isGRPC || httpStatusErr != "" { - var code = codes.Internal // when header does not include HTTP status, return INTERNAL - - if httpStatusCode != nil { - var ok bool - code, ok = HTTPStatusConvTab[*httpStatusCode] - if !ok { - code = codes.Unknown - } - } - var errs []string - if httpStatusErr != "" { - errs = append(errs, httpStatusErr) - } - if contentTypeErr != "" { - errs = append(errs, contentTypeErr) - } - // Verify the HTTP response is a 200. - se := status.New(code, strings.Join(errs, "; ")) - t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) - return - } - - if headerError != "" { - se := status.New(codes.Internal, headerError) - t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) - return - } - - // For headers, set them in s.header and close headerChan. For trailers or - // trailers-only, closeStream will set the trailers and close headerChan as - // needed. - if !endStream { - // If headerChan hasn't been closed yet (expected, given we checked it - // above, but something else could have potentially closed the whole - // stream). - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.headerValid = true - // These values can be set without any synchronization because - // stream goroutine will read it only after seeing a closed - // headerChan which we'll close after setting this. - s.recvCompress = recvCompress - if len(mdata) > 0 { - s.header = mdata - } - close(s.headerChan) - } - } - - for _, sh := range t.statsHandlers { - if !endStream { - inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - Header: metadata.MD(mdata).Copy(), - Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) - } else { - inTrailer := &stats.InTrailer{ - Client: true, - WireLength: int(frame.Header().Length), - Trailer: metadata.MD(mdata).Copy(), - } - sh.HandleRPC(s.ctx, inTrailer) - } - } - - if !endStream { - return - } - - status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) - - // If client received END_STREAM from server while stream was still active, - // send RST_STREAM. - rstStream := s.getState() == streamActive - t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) -} - -// readServerPreface reads and handles the initial settings frame from the -// server. -func (t *http2Client) readServerPreface() error { - frame, err := t.framer.fr.ReadFrame() - if err != nil { - return connectionErrorf(true, err, "error reading server preface: %v", err) - } - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) - } - t.handleSettings(sf, true) - return nil -} - -// reader verifies the server preface and reads all subsequent data from -// network connection. If the server preface is not read successfully, an -// error is pushed to errCh; otherwise errCh is closed with no error. -func (t *http2Client) reader(errCh chan<- error) { - defer close(t.readerDone) - - if err := t.readServerPreface(); err != nil { - errCh <- err - return - } - close(errCh) - if t.keepaliveEnabled { - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) - } - - // loop to keep reading incoming messages on this transport. - for { - t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() - if t.keepaliveEnabled { - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) - } - if err != nil { - // Abort an active stream if the http2.Framer returns a - // http2.StreamError. This can happen only if the server's response - // is malformed http2. - if se, ok := err.(http2.StreamError); ok { - t.mu.Lock() - s := t.activeStreams[se.StreamID] - t.mu.Unlock() - if s != nil { - // use error detail to provide better err message - code := http2ErrConvTab[se.Code] - errorDetail := t.framer.fr.ErrorDetail() - var msg string - if errorDetail != nil { - msg = errorDetail.Error() - } else { - msg = "received invalid frame" - } - t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) - } - continue - } else { - // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) - return - } - } - switch frame := frame.(type) { - case *http2.MetaHeadersFrame: - t.operateHeaders(frame) - case *http2.DataFrame: - t.handleData(frame) - case *http2.RSTStreamFrame: - t.handleRSTStream(frame) - case *http2.SettingsFrame: - t.handleSettings(frame, false) - case *http2.PingFrame: - t.handlePing(frame) - case *http2.GoAwayFrame: - t.handleGoAway(frame) - case *http2.WindowUpdateFrame: - t.handleWindowUpdate(frame) - default: - if logger.V(logLevel) { - logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) - } - } - } -} - -func minTime(a, b time.Duration) time.Duration { - if a < b { - return a - } - return b -} - -// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. -func (t *http2Client) keepalive() { - p := &ping{data: [8]byte{}} - // True iff a ping has been sent, and no data has been received since then. - outstandingPing := false - // Amount of time remaining before which we should receive an ACK for the - // last sent ping. - timeoutLeft := time.Duration(0) - // Records the last value of t.lastRead before we go block on the timer. - // This is required to check for read activity since then. - prevNano := time.Now().UnixNano() - timer := time.NewTimer(t.kp.Time) - for { - select { - case <-timer.C: - lastRead := atomic.LoadInt64(&t.lastRead) - if lastRead > prevNano { - // There has been read activity since the last time we were here. - outstandingPing = false - // Next timer should fire at kp.Time seconds from lastRead time. - timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) - prevNano = lastRead - continue - } - if outstandingPing && timeoutLeft <= 0 { - t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) - return - } - t.mu.Lock() - if t.state == closing { - // If the transport is closing, we should exit from the - // keepalive goroutine here. If not, we could have a race - // between the call to Signal() from Close() and the call to - // Wait() here, whereby the keepalive goroutine ends up - // blocking on the condition variable which will never be - // signalled again. - t.mu.Unlock() - return - } - if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { - // If a ping was sent out previously (because there were active - // streams at that point) which wasn't acked and its timeout - // hadn't fired, but we got here and are about to go dormant, - // we should make sure that we unconditionally send a ping once - // we awaken. - outstandingPing = false - t.kpDormant = true - t.kpDormancyCond.Wait() - } - t.kpDormant = false - t.mu.Unlock() - - // We get here either because we were dormant and a new stream was - // created which unblocked the Wait() call, or because the - // keepalive timer expired. In both cases, we need to send a ping. - if !outstandingPing { - if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) - } - t.controlBuf.put(p) - timeoutLeft = t.kp.Timeout - outstandingPing = true - } - // The amount of time to sleep here is the minimum of kp.Time and - // timeoutLeft. This will ensure that we wait only for kp.Time - // before sending out the next ping (for cases where the ping is - // acked). - sleepDuration := minTime(t.kp.Time, timeoutLeft) - timeoutLeft -= sleepDuration - timer.Reset(sleepDuration) - case <-t.ctx.Done(): - if !timer.Stop() { - <-timer.C - } - return - } - } -} - -func (t *http2Client) Error() <-chan struct{} { - return t.ctx.Done() -} - -func (t *http2Client) GoAway() <-chan struct{} { - return t.goAway -} - -func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, - // RemoteName : - } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s -} - -func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } - -func (t *http2Client) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) -} - -func (t *http2Client) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) -} - -func (t *http2Client) getOutFlowWindow() int64 { - resp := make(chan uint32, 1) - timer := time.NewTimer(time.Second) - defer timer.Stop() - t.controlBuf.put(&outFlowControlSizeRequest{resp}) - select { - case sz := <-resp: - return int64(sz) - case <-t.ctxDone: - return -1 - case <-timer.C: - return -2 - } -} - -func (t *http2Client) stateForTesting() transportState { - t.mu.Lock() - defer t.mu.Unlock() - return t.state -} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go deleted file mode 100644 index 3839c1ade2..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ /dev/null @@ -1,1459 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "math" - "net" - "net/http" - "strconv" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcutil" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/internal/syscall" - "google.golang.org/protobuf/proto" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - "google.golang.org/grpc/tap" -) - -var ( - // ErrIllegalHeaderWrite indicates that setting header is illegal because of - // the stream's state. - ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") - // ErrHeaderListSizeLimitViolation indicates that the header list size is larger - // than the limit set by peer. - ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") -) - -// serverConnectionCounter counts the number of connections a server has seen -// (equal to the number of http2Servers created). Must be accessed atomically. -var serverConnectionCounter uint64 - -// http2Server implements the ServerTransport interface with HTTP2. -type http2Server struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - loopyWriterDone chan struct{} - peer peer.Peer - inTapHandle tap.ServerInHandle - framer *framer - // The max number of concurrent streams. - maxStreams uint32 - // controlBuf delivers all the control related tasks (e.g., window - // updates, reset streams, and various settings) to the controller. - controlBuf *controlBuffer - fc *trInFlow - stats []stats.Handler - // Keepalive and max-age parameters for the server. - kp keepalive.ServerParameters - // Keepalive enforcement policy. - kep keepalive.EnforcementPolicy - // The time instance last ping was received. - lastPingAt time.Time - // Number of times the client has violated keepalive ping policy so far. - pingStrikes uint8 - // Flag to signify that number of ping strikes should be reset to 0. - // This is set whenever data or header frames are sent. - // 1 means yes. - resetPingStrikes uint32 // Accessed atomically. - initialWindowSize int32 - bdpEst *bdpEstimator - maxSendHeaderListSize *uint32 - - mu sync.Mutex // guard the following - - // drainEvent is initialized when Drain() is called the first time. After - // which the server writes out the first GoAway(with ID 2^31-1) frame. Then - // an independent goroutine will be launched to later send the second - // GoAway. During this time we don't want to write another first GoAway(with - // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is - // already initialized since draining is already underway. - drainEvent *grpcsync.Event - state transportState - activeStreams map[uint32]*Stream - // idle is the time instant when the connection went idle. - // This is either the beginning of the connection or when the number of - // RPCs go down to 0. - // When the connection is busy, this value is set to 0. - idle time.Time - - // Fields below are for channelz metric collection. - channelzID *channelz.Identifier - czData *channelzData - bufferPool *bufferPool - - connectionID uint64 - - // maxStreamMu guards the maximum stream ID - // This lock may not be taken if mu is already held. - maxStreamMu sync.Mutex - maxStreamID uint32 // max stream ID ever seen - - logger *grpclog.PrefixLogger -} - -// NewServerTransport creates a http2 transport with conn and configuration -// options from config. -// -// It returns a non-nil transport and a nil error on success. On failure, it -// returns a nil transport and a non-nil error. For a special case where the -// underlying conn gets closed before the client preface could be read, it -// returns a nil transport and a nil error. -func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { - var authInfo credentials.AuthInfo - rawConn := conn - if config.Credentials != nil { - var err error - conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) - if err != nil { - // ErrConnDispatched means that the connection was dispatched away - // from gRPC; those connections should be left open. io.EOF means - // the connection was closed before handshaking completed, which can - // happen naturally from probers. Return these errors directly. - if err == credentials.ErrConnDispatched || err == io.EOF { - return nil, err - } - return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - } - } - writeBufSize := config.WriteBufferSize - readBufSize := config.ReadBufferSize - maxHeaderListSize := defaultServerMaxHeaderListSize - if config.MaxHeaderListSize != nil { - maxHeaderListSize = *config.MaxHeaderListSize - } - framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) - // Send initial settings as connection preface to client. - isettings := []http2.Setting{{ - ID: http2.SettingMaxFrameSize, - Val: http2MaxFrameLen, - }} - if config.MaxStreams != math.MaxUint32 { - isettings = append(isettings, http2.Setting{ - ID: http2.SettingMaxConcurrentStreams, - Val: config.MaxStreams, - }) - } - dynamicWindow := true - iwz := int32(initialWindowSize) - if config.InitialWindowSize >= defaultWindowSize { - iwz = config.InitialWindowSize - dynamicWindow = false - } - icwz := int32(initialWindowSize) - if config.InitialConnWindowSize >= defaultWindowSize { - icwz = config.InitialConnWindowSize - dynamicWindow = false - } - if iwz != defaultWindowSize { - isettings = append(isettings, http2.Setting{ - ID: http2.SettingInitialWindowSize, - Val: uint32(iwz)}) - } - if config.MaxHeaderListSize != nil { - isettings = append(isettings, http2.Setting{ - ID: http2.SettingMaxHeaderListSize, - Val: *config.MaxHeaderListSize, - }) - } - if config.HeaderTableSize != nil { - isettings = append(isettings, http2.Setting{ - ID: http2.SettingHeaderTableSize, - Val: *config.HeaderTableSize, - }) - } - if err := framer.fr.WriteSettings(isettings...); err != nil { - return nil, connectionErrorf(false, err, "transport: %v", err) - } - // Adjust the connection flow control window if needed. - if delta := uint32(icwz - defaultWindowSize); delta > 0 { - if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { - return nil, connectionErrorf(false, err, "transport: %v", err) - } - } - kp := config.KeepaliveParams - if kp.MaxConnectionIdle == 0 { - kp.MaxConnectionIdle = defaultMaxConnectionIdle - } - if kp.MaxConnectionAge == 0 { - kp.MaxConnectionAge = defaultMaxConnectionAge - } - // Add a jitter to MaxConnectionAge. - kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) - if kp.MaxConnectionAgeGrace == 0 { - kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace - } - if kp.Time == 0 { - kp.Time = defaultServerKeepaliveTime - } - if kp.Timeout == 0 { - kp.Timeout = defaultServerKeepaliveTimeout - } - if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { - return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) - } - } - kep := config.KeepalivePolicy - if kep.MinTime == 0 { - kep.MinTime = defaultKeepalivePolicyMinTime - } - - done := make(chan struct{}) - peer := peer.Peer{ - Addr: conn.RemoteAddr(), - LocalAddr: conn.LocalAddr(), - AuthInfo: authInfo, - } - t := &http2Server{ - done: done, - conn: conn, - peer: peer, - framer: framer, - readerDone: make(chan struct{}), - loopyWriterDone: make(chan struct{}), - maxStreams: config.MaxStreams, - inTapHandle: config.InTapHandle, - fc: &trInFlow{limit: uint32(icwz)}, - state: reachable, - activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandlers, - kp: kp, - idle: time.Now(), - kep: kep, - initialWindowSize: iwz, - czData: new(channelzData), - bufferPool: newBufferPool(), - } - t.logger = prefixLoggerForServerTransport(t) - - t.controlBuf = newControlBuffer(t.done) - if dynamicWindow { - t.bdpEst = &bdpEstimator{ - bdp: initialWindowSize, - updateFlowControl: t.updateFlowControl, - } - } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) - if err != nil { - return nil, err - } - - t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() - - defer func() { - if err != nil { - t.Close(err) - } - }() - - // Check the validity of client preface. - preface := make([]byte, len(clientPreface)) - if _, err := io.ReadFull(t.conn, preface); err != nil { - // In deployments where a gRPC server runs behind a cloud load balancer - // which performs regular TCP level health checks, the connection is - // closed immediately by the latter. Returning io.EOF here allows the - // grpc server implementation to recognize this scenario and suppress - // logging to reduce spam. - if err == io.EOF { - return nil, io.EOF - } - return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) - } - if !bytes.Equal(preface, clientPreface) { - return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) - } - - frame, err := t.framer.fr.ReadFrame() - if err == io.EOF || err == io.ErrUnexpectedEOF { - return nil, err - } - if err != nil { - return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) - } - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) - } - t.handleSettings(sf) - - go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - err := t.loopy.run() - close(t.loopyWriterDone) - if !isIOError(err) { - // Close the connection if a non-I/O error occurs (for I/O errors - // the reader will also encounter the error and close). Wait 1 - // second before closing the connection, or when the reader is done - // (i.e. the client already closed the connection or a connection - // error occurred). This avoids the potential problem where there - // is unread data on the receive side of the connection, which, if - // closed, would lead to a TCP RST instead of FIN, and the client - // encountering errors. For more info: - // https://github.com/grpc/grpc-go/issues/5358 - select { - case <-t.readerDone: - case <-time.After(time.Second): - } - t.conn.Close() - } - }() - go t.keepalive() - return t, nil -} - -// operateHeaders takes action on the decoded headers. Returns an error if fatal -// error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { - // Acquire max stream ID lock for entire duration - t.maxStreamMu.Lock() - defer t.maxStreamMu.Unlock() - - streamID := frame.Header().StreamID - - // frame.Truncated is set to true when framer detects that the current header - // list size hits MaxHeaderListSize limit. - if frame.Truncated { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeFrameSize, - onWrite: func() {}, - }) - return nil - } - - if streamID%2 != 1 || streamID <= t.maxStreamID { - // illegal gRPC stream id. - return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) - } - t.maxStreamID = streamID - - buf := newRecvBuffer() - s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - headerWireLength: int(frame.Header().Length), - } - var ( - // if false, content-type was missing or invalid - isGRPC = false - contentType = "" - mdata = make(metadata.MD, len(frame.Fields)) - httpMethod string - // these are set if an error is encountered while parsing the headers - protocolError bool - headerError *status.Status - - timeoutSet bool - timeout time.Duration - ) - - for _, hf := range frame.Fields { - switch hf.Name { - case "content-type": - contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) - if !validContentType { - contentType = hf.Value - break - } - mdata[hf.Name] = append(mdata[hf.Name], hf.Value) - s.contentSubtype = contentSubtype - isGRPC = true - - case "grpc-accept-encoding": - mdata[hf.Name] = append(mdata[hf.Name], hf.Value) - if hf.Value == "" { - continue - } - compressors := hf.Value - if s.clientAdvertisedCompressors != "" { - compressors = s.clientAdvertisedCompressors + "," + compressors - } - s.clientAdvertisedCompressors = compressors - case "grpc-encoding": - s.recvCompress = hf.Value - case ":method": - httpMethod = hf.Value - case ":path": - s.method = hf.Value - case "grpc-timeout": - timeoutSet = true - var err error - if timeout, err = decodeTimeout(hf.Value); err != nil { - headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) - } - // "Transports must consider requests containing the Connection header - // as malformed." - A41 - case "connection": - if t.logger.V(logLevel) { - t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") - } - protocolError = true - default: - if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { - break - } - v, err := decodeMetadataHeader(hf.Name, hf.Value) - if err != nil { - headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) - t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) - break - } - mdata[hf.Name] = append(mdata[hf.Name], v) - } - } - - // "If multiple Host headers or multiple :authority headers are present, the - // request must be rejected with an HTTP status code 400 as required by Host - // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM - // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 - // error, this takes precedence over a client not speaking gRPC. - if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { - errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) - if t.logger.V(logLevel) { - t.logger.Infof("Aborting the stream early: %v", errMsg) - } - t.controlBuf.put(&earlyAbortStream{ - httpStatus: http.StatusBadRequest, - streamID: streamID, - contentSubtype: s.contentSubtype, - status: status.New(codes.Internal, errMsg), - rst: !frame.StreamEnded(), - }) - return nil - } - - if protocolError { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, - }) - return nil - } - if !isGRPC { - t.controlBuf.put(&earlyAbortStream{ - httpStatus: http.StatusUnsupportedMediaType, - streamID: streamID, - contentSubtype: s.contentSubtype, - status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), - rst: !frame.StreamEnded(), - }) - return nil - } - if headerError != nil { - t.controlBuf.put(&earlyAbortStream{ - httpStatus: http.StatusBadRequest, - streamID: streamID, - contentSubtype: s.contentSubtype, - status: headerError, - rst: !frame.StreamEnded(), - }) - return nil - } - - // "If :authority is missing, Host must be renamed to :authority." - A41 - if len(mdata[":authority"]) == 0 { - // No-op if host isn't present, no eventual :authority header is a valid - // RPC. - if host, ok := mdata["host"]; ok { - mdata[":authority"] = host - delete(mdata, "host") - } - } else { - // "If :authority is present, Host must be discarded" - A41 - delete(mdata, "host") - } - - if frame.StreamEnded() { - // s is just created by the caller. No lock needed. - s.state = streamReadDone - } - if timeoutSet { - s.ctx, s.cancel = context.WithTimeout(ctx, timeout) - } else { - s.ctx, s.cancel = context.WithCancel(ctx) - } - - // Attach the received metadata to the context. - if len(mdata) > 0 { - s.ctx = metadata.NewIncomingContext(s.ctx, mdata) - if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { - s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) - } - if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { - s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) - } - } - t.mu.Lock() - if t.state != reachable { - t.mu.Unlock() - s.cancel() - return nil - } - if uint32(len(t.activeStreams)) >= t.maxStreams { - t.mu.Unlock() - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeRefusedStream, - onWrite: func() {}, - }) - s.cancel() - return nil - } - if httpMethod != http.MethodPost { - t.mu.Unlock() - errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) - if t.logger.V(logLevel) { - t.logger.Infof("Aborting the stream early: %v", errMsg) - } - t.controlBuf.put(&earlyAbortStream{ - httpStatus: 405, - streamID: streamID, - contentSubtype: s.contentSubtype, - status: status.New(codes.Internal, errMsg), - rst: !frame.StreamEnded(), - }) - s.cancel() - return nil - } - if t.inTapHandle != nil { - var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { - t.mu.Unlock() - if t.logger.V(logLevel) { - t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) - } - stat, ok := status.FromError(err) - if !ok { - stat = status.New(codes.PermissionDenied, err.Error()) - } - t.controlBuf.put(&earlyAbortStream{ - httpStatus: 200, - streamID: s.id, - contentSubtype: s.contentSubtype, - status: stat, - rst: !frame.StreamEnded(), - }) - return nil - } - } - t.activeStreams[streamID] = s - if len(t.activeStreams) == 1 { - t.idle = time.Time{} - } - t.mu.Unlock() - if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) - } - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } - s.ctxDone = s.ctx.Done() - s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) - s.trReader = &transportReader{ - reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - freeBuffer: t.bufferPool.put, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) - }, - } - // Register the stream with loopy. - t.controlBuf.put(®isterStream{ - streamID: s.id, - wq: s.wq, - }) - handle(s) - return nil -} - -// HandleStreams receives incoming streams using the given handler. This is -// typically run in a separate goroutine. -// traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { - defer func() { - close(t.readerDone) - <-t.loopyWriterDone - }() - for { - t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) - if err != nil { - if se, ok := err.(http2.StreamError); ok { - if t.logger.V(logLevel) { - t.logger.Warningf("Encountered http2.StreamError: %v", se) - } - t.mu.Lock() - s := t.activeStreams[se.StreamID] - t.mu.Unlock() - if s != nil { - t.closeStream(s, true, se.Code, false) - } else { - t.controlBuf.put(&cleanupStream{ - streamID: se.StreamID, - rst: true, - rstCode: se.Code, - onWrite: func() {}, - }) - } - continue - } - t.Close(err) - return - } - switch frame := frame.(type) { - case *http2.MetaHeadersFrame: - if err := t.operateHeaders(ctx, frame, handle); err != nil { - t.Close(err) - break - } - case *http2.DataFrame: - t.handleData(frame) - case *http2.RSTStreamFrame: - t.handleRSTStream(frame) - case *http2.SettingsFrame: - t.handleSettings(frame) - case *http2.PingFrame: - t.handlePing(frame) - case *http2.WindowUpdateFrame: - t.handleWindowUpdate(frame) - case *http2.GoAwayFrame: - // TODO: Handle GoAway from the client appropriately. - default: - if t.logger.V(logLevel) { - t.logger.Infof("Received unsupported frame type %T", frame) - } - } - } -} - -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { - t.mu.Lock() - defer t.mu.Unlock() - if t.activeStreams == nil { - // The transport is closing. - return nil, false - } - s, ok := t.activeStreams[f.Header().StreamID] - if !ok { - // The stream is already done. - return nil, false - } - return s, true -} - -// adjustWindow sends out extra window update over the initial window size -// of stream if the application is requesting data larger in size than -// the window. -func (t *http2Server) adjustWindow(s *Stream, n uint32) { - if w := s.fc.maybeAdjust(n); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) - } - -} - -// updateWindow adjusts the inbound quota for the stream and the transport. -// Window updates will deliver to the controller for sending when -// the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { - if w := s.fc.onRead(n); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, - increment: w, - }) - } -} - -// updateFlowControl updates the incoming flow control windows -// for the transport and the stream based on the current bdp -// estimation. -func (t *http2Server) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.initialWindowSize = int32(n) - t.mu.Unlock() - t.controlBuf.put(&outgoingWindowUpdate{ - streamID: 0, - increment: t.fc.newLimit(n), - }) - t.controlBuf.put(&outgoingSettings{ - ss: []http2.Setting{ - { - ID: http2.SettingInitialWindowSize, - Val: n, - }, - }, - }) - -} - -func (t *http2Server) handleData(f *http2.DataFrame) { - size := f.Header().Length - var sendBDPPing bool - if t.bdpEst != nil { - sendBDPPing = t.bdpEst.add(size) - } - // Decouple connection's flow control from application's read. - // An update on connection's flow control should not depend on - // whether user application has read the data or not. Such a - // restriction is already imposed on the stream's flow control, - // and therefore the sender will be blocked anyways. - // Decoupling the connection flow control will prevent other - // active(fast) streams from starving in presence of slow or - // inactive streams. - if w := t.fc.onData(size); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{ - streamID: 0, - increment: w, - }) - } - if sendBDPPing { - // Avoid excessive ping detection (e.g. in an L7 proxy) - // by sending a window update prior to the BDP ping. - if w := t.fc.reset(); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{ - streamID: 0, - increment: w, - }) - } - t.controlBuf.put(bdpPing) - } - // Select the right stream to dispatch. - s, ok := t.getStream(f) - if !ok { - return - } - if s.getState() == streamReadDone { - t.closeStream(s, true, http2.ErrCodeStreamClosed, false) - return - } - if size > 0 { - if err := s.fc.onData(size); err != nil { - t.closeStream(s, true, http2.ErrCodeFlowControl, false) - return - } - if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) - } - } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) - } - } - if f.StreamEnded() { - // Received the end of stream from the client. - s.compareAndSwapState(streamActive, streamReadDone) - s.write(recvMsg{err: io.EOF}) - } -} - -func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { - // If the stream is not deleted from the transport's active streams map, then do a regular close stream. - if s, ok := t.getStream(f); ok { - t.closeStream(s, false, 0, false) - return - } - // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. - t.controlBuf.put(&cleanupStream{ - streamID: f.Header().StreamID, - rst: false, - rstCode: 0, - onWrite: func() {}, - }) -} - -func (t *http2Server) handleSettings(f *http2.SettingsFrame) { - if f.IsAck() { - return - } - var ss []http2.Setting - var updateFuncs []func() - f.ForeachSetting(func(s http2.Setting) error { - switch s.ID { - case http2.SettingMaxHeaderListSize: - updateFuncs = append(updateFuncs, func() { - t.maxSendHeaderListSize = new(uint32) - *t.maxSendHeaderListSize = s.Val - }) - default: - ss = append(ss, s) - } - return nil - }) - t.controlBuf.executeAndPut(func(any) bool { - for _, f := range updateFuncs { - f() - } - return true - }, &incomingSettings{ - ss: ss, - }) -} - -const ( - maxPingStrikes = 2 - defaultPingTimeout = 2 * time.Hour -) - -func (t *http2Server) handlePing(f *http2.PingFrame) { - if f.IsAck() { - if f.Data == goAwayPing.data && t.drainEvent != nil { - t.drainEvent.Fire() - return - } - // Maybe it's a BDP ping. - if t.bdpEst != nil { - t.bdpEst.calculate(f.Data) - } - return - } - pingAck := &ping{ack: true} - copy(pingAck.data[:], f.Data[:]) - t.controlBuf.put(pingAck) - - now := time.Now() - defer func() { - t.lastPingAt = now - }() - // A reset ping strikes means that we don't need to check for policy - // violation for this ping and the pingStrikes counter should be set - // to 0. - if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { - t.pingStrikes = 0 - return - } - t.mu.Lock() - ns := len(t.activeStreams) - t.mu.Unlock() - if ns < 1 && !t.kep.PermitWithoutStream { - // Keepalive shouldn't be active thus, this new ping should - // have come after at least defaultPingTimeout. - if t.lastPingAt.Add(defaultPingTimeout).After(now) { - t.pingStrikes++ - } - } else { - // Check if keepalive policy is respected. - if t.lastPingAt.Add(t.kep.MinTime).After(now) { - t.pingStrikes++ - } - } - - if t.pingStrikes > maxPingStrikes { - // Send goaway and close the connection. - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) - } -} - -func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { - t.controlBuf.put(&incomingWindowUpdate{ - streamID: f.Header().StreamID, - increment: f.Increment, - }) -} - -func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { - for k, vv := range md { - if isReservedHeader(k) { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } - return headerFields -} - -func (t *http2Server) checkForHeaderListSize(it any) bool { - if t.maxSendHeaderListSize == nil { - return true - } - hdrFrame := it.(*headerFrame) - var sz int64 - for _, f := range hdrFrame.hf { - if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - if t.logger.V(logLevel) { - t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) - } - return false - } - } - return true -} - -func (t *http2Server) streamContextErr(s *Stream) error { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) -} - -// WriteHeader sends the header metadata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - s.hdrMu.Lock() - defer s.hdrMu.Unlock() - if s.getState() == streamDone { - return t.streamContextErr(s) - } - - if s.updateHeaderSent() { - return ErrIllegalHeaderWrite - } - - if md.Len() > 0 { - if s.header.Len() > 0 { - s.header = metadata.Join(s.header, md) - } else { - s.header = md - } - } - if err := t.writeHeaderLocked(s); err != nil { - switch e := err.(type) { - case ConnectionError: - return status.Error(codes.Unavailable, e.Desc) - default: - return status.Convert(err).Err() - } - } - return nil -} - -func (t *http2Server) setResetPingStrikes() { - atomic.StoreUint32(&t.resetPingStrikes, 1) -} - -func (t *http2Server) writeHeaderLocked(s *Stream) error { - // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields - // first and create a slice of that exact size. - headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. - headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) - if s.sendCompress != "" { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) - } - headerFields = appendHeaderFieldsFromMD(headerFields, s.header) - success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ - streamID: s.id, - hf: headerFields, - endStream: false, - onWrite: t.setResetPingStrikes, - }) - if !success { - if err != nil { - return err - } - t.closeStream(s, true, http2.ErrCodeInternal, false) - return ErrHeaderListSizeLimitViolation - } - for _, sh := range t.stats { - // Note: Headers are compressed with hpack after this call returns. - // No WireLength field is set here. - outHeader := &stats.OutHeader{ - Header: s.header.Copy(), - Compression: s.sendCompress, - } - sh.HandleRPC(s.Context(), outHeader) - } - return nil -} - -// WriteStatus sends stream status to the client and terminates the stream. -// There is no further I/O operations being able to perform on this stream. -// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early -// OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { - s.hdrMu.Lock() - defer s.hdrMu.Unlock() - - if s.getState() == streamDone { - return nil - } - - // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields - // first and create a slice of that exact size. - headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. - if !s.updateHeaderSent() { // No headers have been sent. - if len(s.header) > 0 { // Send a separate header frame. - if err := t.writeHeaderLocked(s); err != nil { - return err - } - } else { // Send a trailer only response. - headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) - } - } - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) - - if p := st.Proto(); p != nil && len(p.Details) > 0 { - // Do not use the user's grpc-status-details-bin (if present) if we are - // even attempting to set our own. - delete(s.trailer, grpcStatusDetailsBinHeader) - stBytes, err := proto.Marshal(p) - if err != nil { - // TODO: return error instead, when callers are able to handle it. - t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) - } else { - headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) - } - } - - // Attach the trailer metadata. - headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) - trailingHeader := &headerFrame{ - streamID: s.id, - hf: headerFields, - endStream: true, - onWrite: t.setResetPingStrikes, - } - - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) - if !success { - if err != nil { - return err - } - t.closeStream(s, true, http2.ErrCodeInternal, false) - return ErrHeaderListSizeLimitViolation - } - // Send a RST_STREAM after the trailers if the client has not already half-closed. - rst := s.getState() == streamActive - t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - for _, sh := range t.stats { - // Note: The trailer fields are compressed with hpack after this call returns. - // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutTrailer{ - Trailer: s.trailer.Copy(), - }) - } - return nil -} - -// Write converts the data into HTTP2 data frame and sends it out. Non-nil error -// is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { - if !s.isHeaderSent() { // Headers haven't been written yet. - if err := t.WriteHeader(s, nil); err != nil { - return err - } - } else { - // Writing headers checks for this condition. - if s.getState() == streamDone { - return t.streamContextErr(s) - } - } - df := &dataFrame{ - streamID: s.id, - h: hdr, - d: data, - onEachWrite: t.setResetPingStrikes, - } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - return t.streamContextErr(s) - } - return t.controlBuf.put(df) -} - -// keepalive running in a separate goroutine does the following: -// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. -// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. -// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. -// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection -// after an additional duration of keepalive.Timeout. -func (t *http2Server) keepalive() { - p := &ping{} - // True iff a ping has been sent, and no data has been received since then. - outstandingPing := false - // Amount of time remaining before which we should receive an ACK for the - // last sent ping. - kpTimeoutLeft := time.Duration(0) - // Records the last value of t.lastRead before we go block on the timer. - // This is required to check for read activity since then. - prevNano := time.Now().UnixNano() - // Initialize the different timers to their default values. - idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) - ageTimer := time.NewTimer(t.kp.MaxConnectionAge) - kpTimer := time.NewTimer(t.kp.Time) - defer func() { - // We need to drain the underlying channel in these timers after a call - // to Stop(), only if we are interested in resetting them. Clearly we - // are not interested in resetting them here. - idleTimer.Stop() - ageTimer.Stop() - kpTimer.Stop() - }() - - for { - select { - case <-idleTimer.C: - t.mu.Lock() - idle := t.idle - if idle.IsZero() { // The connection is non-idle. - t.mu.Unlock() - idleTimer.Reset(t.kp.MaxConnectionIdle) - continue - } - val := t.kp.MaxConnectionIdle - time.Since(idle) - t.mu.Unlock() - if val <= 0 { - // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. - // Gracefully close the connection. - t.Drain("max_idle") - return - } - idleTimer.Reset(val) - case <-ageTimer.C: - t.Drain("max_age") - ageTimer.Reset(t.kp.MaxConnectionAgeGrace) - select { - case <-ageTimer.C: - // Close the connection after grace period. - if t.logger.V(logLevel) { - t.logger.Infof("Closing server transport due to maximum connection age") - } - t.controlBuf.put(closeConnection{}) - case <-t.done: - } - return - case <-kpTimer.C: - lastRead := atomic.LoadInt64(&t.lastRead) - if lastRead > prevNano { - // There has been read activity since the last time we were - // here. Setup the timer to fire at kp.Time seconds from - // lastRead time and continue. - outstandingPing = false - kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) - prevNano = lastRead - continue - } - if outstandingPing && kpTimeoutLeft <= 0 { - t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) - return - } - if !outstandingPing { - if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) - } - t.controlBuf.put(p) - kpTimeoutLeft = t.kp.Timeout - outstandingPing = true - } - // The amount of time to sleep here is the minimum of kp.Time and - // timeoutLeft. This will ensure that we wait only for kp.Time - // before sending out the next ping (for cases where the ping is - // acked). - sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) - kpTimeoutLeft -= sleepDuration - kpTimer.Reset(sleepDuration) - case <-t.done: - return - } - } -} - -// Close starts shutting down the http2Server transport. -// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This -// could cause some resource issue. Revisit this later. -func (t *http2Server) Close(err error) { - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - return - } - if t.logger.V(logLevel) { - t.logger.Infof("Closing: %v", err) - } - t.state = closing - streams := t.activeStreams - t.activeStreams = nil - t.mu.Unlock() - t.controlBuf.finish() - close(t.done) - if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { - t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) - } - channelz.RemoveEntry(t.channelzID) - // Cancel all active streams. - for _, s := range streams { - s.cancel() - } -} - -// deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - - t.mu.Lock() - if _, ok := t.activeStreams[s.id]; ok { - delete(t.activeStreams, s.id) - if len(t.activeStreams) == 0 { - t.idle = time.Now() - } - } - t.mu.Unlock() - - if channelz.IsOn() { - if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) - } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) - } - } -} - -// finishStream closes the stream and puts the trailing headerFrame into controlbuf. -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() - - oldState := s.swapState(streamDone) - if oldState == streamDone { - // If the stream was already done, return. - return - } - - hdr.cleanup = &cleanupStream{ - streamID: s.id, - rst: rst, - rstCode: rstCode, - onWrite: func() { - t.deleteStream(s, eosReceived) - }, - } - t.controlBuf.put(hdr) -} - -// closeStream clears the footprint of a stream when the stream is not needed any more. -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() - - s.swapState(streamDone) - t.deleteStream(s, eosReceived) - - t.controlBuf.put(&cleanupStream{ - streamID: s.id, - rst: rst, - rstCode: rstCode, - onWrite: func() {}, - }) -} - -func (t *http2Server) Drain(debugData string) { - t.mu.Lock() - defer t.mu.Unlock() - if t.drainEvent != nil { - return - } - t.drainEvent = grpcsync.NewEvent() - t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) -} - -var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} - -// Handles outgoing GoAway and returns true if loopy needs to put itself -// in draining mode. -func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { - t.maxStreamMu.Lock() - t.mu.Lock() - if t.state == closing { // TODO(mmukhi): This seems unnecessary. - t.mu.Unlock() - t.maxStreamMu.Unlock() - // The transport is closing. - return false, ErrConnClosing - } - if !g.headsUp { - // Stop accepting more streams now. - t.state = draining - sid := t.maxStreamID - retErr := g.closeConn - if len(t.activeStreams) == 0 { - retErr = errors.New("second GOAWAY written and no active streams left to process") - } - t.mu.Unlock() - t.maxStreamMu.Unlock() - if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { - return false, err - } - t.framer.writer.Flush() - if retErr != nil { - return false, retErr - } - return true, nil - } - t.mu.Unlock() - t.maxStreamMu.Unlock() - // For a graceful close, send out a GoAway with stream ID of MaxUInt32, - // Follow that with a ping and wait for the ack to come back or a timer - // to expire. During this time accept new streams since they might have - // originated before the GoAway reaches the client. - // After getting the ack or timer expiration send out another GoAway this - // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { - return false, err - } - if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { - return false, err - } - go func() { - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case <-t.drainEvent.Done(): - case <-timer.C: - case <-t.done: - return - } - t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) - }() - return false, nil -} - -func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.peer.LocalAddr, - RemoteAddr: t.peer.Addr, - // RemoteName : - } - if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s -} - -func (t *http2Server) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) -} - -func (t *http2Server) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) -} - -func (t *http2Server) getOutFlowWindow() int64 { - resp := make(chan uint32, 1) - timer := time.NewTimer(time.Second) - defer timer.Stop() - t.controlBuf.put(&outFlowControlSizeRequest{resp}) - select { - case sz := <-resp: - return int64(sz) - case <-t.done: - return -1 - case <-timer.C: - return -2 - } -} - -// Peer returns the peer of the transport. -func (t *http2Server) Peer() *peer.Peer { - return &peer.Peer{ - Addr: t.peer.Addr, - LocalAddr: t.peer.LocalAddr, - AuthInfo: t.peer.AuthInfo, // Can be nil - } -} - -func getJitter(v time.Duration) time.Duration { - if v == infinity { - return 0 - } - // Generate a jitter between +/- 10% of the value. - r := int64(v / 10) - j := grpcrand.Int63n(2*r) - r - return time.Duration(j) -} - -type connectionKey struct{} - -// GetConnection gets the connection from the context. -func GetConnection(ctx context.Context) net.Conn { - conn, _ := ctx.Value(connectionKey{}).(net.Conn) - return conn -} - -// SetConnection adds the connection to the context to be able to get -// information about the destination ip and port for an incoming RPC. This also -// allows any unary or streaming interceptors to see the connection. -func SetConnection(ctx context.Context, conn net.Conn) context.Context { - return context.WithValue(ctx, connectionKey{}, conn) -} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go deleted file mode 100644 index dc29d590e9..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ /dev/null @@ -1,465 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "bufio" - "encoding/base64" - "errors" - "fmt" - "io" - "math" - "net" - "net/http" - "net/url" - "strconv" - "strings" - "sync" - "time" - "unicode/utf8" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/codes" -) - -const ( - // http2MaxFrameLen specifies the max length of a HTTP2 frame. - http2MaxFrameLen = 16384 // 16KB frame - // https://httpwg.org/specs/rfc7540.html#SettingValues - http2InitHeaderTableSize = 4096 -) - -var ( - clientPreface = []byte(http2.ClientPreface) - http2ErrConvTab = map[http2.ErrCode]codes.Code{ - http2.ErrCodeNo: codes.Internal, - http2.ErrCodeProtocol: codes.Internal, - http2.ErrCodeInternal: codes.Internal, - http2.ErrCodeFlowControl: codes.ResourceExhausted, - http2.ErrCodeSettingsTimeout: codes.Internal, - http2.ErrCodeStreamClosed: codes.Internal, - http2.ErrCodeFrameSize: codes.Internal, - http2.ErrCodeRefusedStream: codes.Unavailable, - http2.ErrCodeCancel: codes.Canceled, - http2.ErrCodeCompression: codes.Internal, - http2.ErrCodeConnect: codes.Internal, - http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, - http2.ErrCodeInadequateSecurity: codes.PermissionDenied, - http2.ErrCodeHTTP11Required: codes.Internal, - } - // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. - HTTPStatusConvTab = map[int]codes.Code{ - // 400 Bad Request - INTERNAL. - http.StatusBadRequest: codes.Internal, - // 401 Unauthorized - UNAUTHENTICATED. - http.StatusUnauthorized: codes.Unauthenticated, - // 403 Forbidden - PERMISSION_DENIED. - http.StatusForbidden: codes.PermissionDenied, - // 404 Not Found - UNIMPLEMENTED. - http.StatusNotFound: codes.Unimplemented, - // 429 Too Many Requests - UNAVAILABLE. - http.StatusTooManyRequests: codes.Unavailable, - // 502 Bad Gateway - UNAVAILABLE. - http.StatusBadGateway: codes.Unavailable, - // 503 Service Unavailable - UNAVAILABLE. - http.StatusServiceUnavailable: codes.Unavailable, - // 504 Gateway timeout - UNAVAILABLE. - http.StatusGatewayTimeout: codes.Unavailable, - } -) - -var grpcStatusDetailsBinHeader = "grpc-status-details-bin" - -// isReservedHeader checks whether hdr belongs to HTTP2 headers -// reserved by gRPC protocol. Any other headers are classified as the -// user-specified metadata. -func isReservedHeader(hdr string) bool { - if hdr != "" && hdr[0] == ':' { - return true - } - switch hdr { - case "content-type", - "user-agent", - "grpc-message-type", - "grpc-encoding", - "grpc-message", - "grpc-status", - "grpc-timeout", - // Intentionally exclude grpc-previous-rpc-attempts and - // grpc-retry-pushback-ms, which are "reserved", but their API - // intentionally works via metadata. - "te": - return true - default: - return false - } -} - -// isWhitelistedHeader checks whether hdr should be propagated into metadata -// visible to users, even though it is classified as "reserved", above. -func isWhitelistedHeader(hdr string) bool { - switch hdr { - case ":authority", "user-agent": - return true - default: - return false - } -} - -const binHdrSuffix = "-bin" - -func encodeBinHeader(v []byte) string { - return base64.RawStdEncoding.EncodeToString(v) -} - -func decodeBinHeader(v string) ([]byte, error) { - if len(v)%4 == 0 { - // Input was padded, or padding was not necessary. - return base64.StdEncoding.DecodeString(v) - } - return base64.RawStdEncoding.DecodeString(v) -} - -func encodeMetadataHeader(k, v string) string { - if strings.HasSuffix(k, binHdrSuffix) { - return encodeBinHeader(([]byte)(v)) - } - return v -} - -func decodeMetadataHeader(k, v string) (string, error) { - if strings.HasSuffix(k, binHdrSuffix) { - b, err := decodeBinHeader(v) - return string(b), err - } - return v, nil -} - -type timeoutUnit uint8 - -const ( - hour timeoutUnit = 'H' - minute timeoutUnit = 'M' - second timeoutUnit = 'S' - millisecond timeoutUnit = 'm' - microsecond timeoutUnit = 'u' - nanosecond timeoutUnit = 'n' -) - -func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { - switch u { - case hour: - return time.Hour, true - case minute: - return time.Minute, true - case second: - return time.Second, true - case millisecond: - return time.Millisecond, true - case microsecond: - return time.Microsecond, true - case nanosecond: - return time.Nanosecond, true - default: - } - return -} - -func decodeTimeout(s string) (time.Duration, error) { - size := len(s) - if size < 2 { - return 0, fmt.Errorf("transport: timeout string is too short: %q", s) - } - if size > 9 { - // Spec allows for 8 digits plus the unit. - return 0, fmt.Errorf("transport: timeout string is too long: %q", s) - } - unit := timeoutUnit(s[size-1]) - d, ok := timeoutUnitToDuration(unit) - if !ok { - return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) - } - t, err := strconv.ParseInt(s[:size-1], 10, 64) - if err != nil { - return 0, err - } - const maxHours = math.MaxInt64 / int64(time.Hour) - if d == time.Hour && t > maxHours { - // This timeout would overflow math.MaxInt64; clamp it. - return time.Duration(math.MaxInt64), nil - } - return d * time.Duration(t), nil -} - -const ( - spaceByte = ' ' - tildeByte = '~' - percentByte = '%' -) - -// encodeGrpcMessage is used to encode status code in header field -// "grpc-message". It does percent encoding and also replaces invalid utf-8 -// characters with Unicode replacement character. -// -// It checks to see if each individual byte in msg is an allowable byte, and -// then either percent encoding or passing it through. When percent encoding, -// the byte is converted into hexadecimal notation with a '%' prepended. -func encodeGrpcMessage(msg string) string { - if msg == "" { - return "" - } - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - c := msg[i] - if !(c >= spaceByte && c <= tildeByte && c != percentByte) { - return encodeGrpcMessageUnchecked(msg) - } - } - return msg -} - -func encodeGrpcMessageUnchecked(msg string) string { - var sb strings.Builder - for len(msg) > 0 { - r, size := utf8.DecodeRuneInString(msg) - for _, b := range []byte(string(r)) { - if size > 1 { - // If size > 1, r is not ascii. Always do percent encoding. - fmt.Fprintf(&sb, "%%%02X", b) - continue - } - - // The for loop is necessary even if size == 1. r could be - // utf8.RuneError. - // - // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". - if b >= spaceByte && b <= tildeByte && b != percentByte { - sb.WriteByte(b) - } else { - fmt.Fprintf(&sb, "%%%02X", b) - } - } - msg = msg[size:] - } - return sb.String() -} - -// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. -func decodeGrpcMessage(msg string) string { - if msg == "" { - return "" - } - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - if msg[i] == percentByte && i+2 < lenMsg { - return decodeGrpcMessageUnchecked(msg) - } - } - return msg -} - -func decodeGrpcMessageUnchecked(msg string) string { - var sb strings.Builder - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - c := msg[i] - if c == percentByte && i+2 < lenMsg { - parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) - if err != nil { - sb.WriteByte(c) - } else { - sb.WriteByte(byte(parsed)) - i += 2 - } - } else { - sb.WriteByte(c) - } - } - return sb.String() -} - -type bufWriter struct { - pool *sync.Pool - buf []byte - offset int - batchSize int - conn net.Conn - err error -} - -func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { - w := &bufWriter{ - batchSize: batchSize, - conn: conn, - pool: pool, - } - // this indicates that we should use non shared buf - if pool == nil { - w.buf = make([]byte, batchSize) - } - return w -} - -func (w *bufWriter) Write(b []byte) (n int, err error) { - if w.err != nil { - return 0, w.err - } - if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) - return n, toIOError(err) - } - if w.buf == nil { - b := w.pool.Get().(*[]byte) - w.buf = *b - } - for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.flushKeepBuffer() - } - } - return n, err -} - -func (w *bufWriter) Flush() error { - err := w.flushKeepBuffer() - // Only release the buffer if we are in a "shared" mode - if w.buf != nil && w.pool != nil { - b := w.buf - w.pool.Put(&b) - w.buf = nil - } - return err -} - -func (w *bufWriter) flushKeepBuffer() error { - if w.err != nil { - return w.err - } - if w.offset == 0 { - return nil - } - _, w.err = w.conn.Write(w.buf[:w.offset]) - w.err = toIOError(w.err) - w.offset = 0 - return w.err -} - -type ioError struct { - error -} - -func (i ioError) Unwrap() error { - return i.error -} - -func isIOError(err error) bool { - return errors.As(err, &ioError{}) -} - -func toIOError(err error) error { - if err == nil { - return nil - } - return ioError{error: err} -} - -type framer struct { - writer *bufWriter - fr *http2.Framer -} - -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) -var writeBufferMutex sync.Mutex - -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { - if writeBufferSize < 0 { - writeBufferSize = 0 - } - var r io.Reader = conn - if readBufferSize > 0 { - r = bufio.NewReaderSize(r, readBufferSize) - } - var pool *sync.Pool - if sharedWriteBuffer { - pool = getWriteBufferPool(writeBufferSize) - } - w := newBufWriter(conn, writeBufferSize, pool) - f := &framer{ - writer: w, - fr: http2.NewFramer(w, r), - } - f.fr.SetMaxReadFrameSize(http2MaxFrameLen) - // Opt-in to Frame reuse API on framer to reduce garbage. - // Frames aren't safe to read from after a subsequent call to ReadFrame. - f.fr.SetReuseFrames() - f.fr.MaxHeaderListSize = maxHeaderListSize - f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) - return f -} - -func getWriteBufferPool(writeBufferSize int) *sync.Pool { - writeBufferMutex.Lock() - defer writeBufferMutex.Unlock() - size := writeBufferSize * 2 - pool, ok := writeBufferPoolMap[size] - if ok { - return pool - } - pool = &sync.Pool{ - New: func() any { - b := make([]byte, size) - return &b - }, - } - writeBufferPoolMap[size] = pool - return pool -} - -// parseDialTarget returns the network and address to pass to dialer. -func parseDialTarget(target string) (string, string) { - net := "tcp" - m1 := strings.Index(target, ":") - m2 := strings.Index(target, ":/") - // handle unix:addr which will fail with url.Parse - if m1 >= 0 && m2 < 0 { - if n := target[0:m1]; n == "unix" { - return n, target[m1+1:] - } - } - if m2 >= 0 { - t, err := url.Parse(target) - if err != nil { - return net, target - } - scheme := t.Scheme - addr := t.Path - if scheme == "unix" { - if addr == "" { - addr = t.Host - } - return scheme, addr - } - } - return net, target -} diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go deleted file mode 100644 index 42ed2b07af..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/logging.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "fmt" - - "google.golang.org/grpc/grpclog" - internalgrpclog "google.golang.org/grpc/internal/grpclog" -) - -var logger = grpclog.Component("transport") - -func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) -} - -func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) -} - -func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) -} diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go deleted file mode 100644 index c11b527827..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package networktype declares the network type to be used in the default -// dialer. Attribute of a resolver.Address. -package networktype - -import ( - "google.golang.org/grpc/resolver" -) - -// keyType is the key to use for storing State in Attributes. -type keyType string - -const key = keyType("grpc.internal.transport.networktype") - -// Set returns a copy of the provided address with attributes containing networkType. -func Set(address resolver.Address, networkType string) resolver.Address { - address.Attributes = address.Attributes.WithValue(key, networkType) - return address -} - -// Get returns the network type in the resolver.Address and true, or "", false -// if not present. -func Get(address resolver.Address) (string, bool) { - v := address.Attributes.Value(key) - if v == nil { - return "", false - } - return v.(string), true -} diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go deleted file mode 100644 index 24fa103257..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ /dev/null @@ -1,144 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "bufio" - "context" - "encoding/base64" - "fmt" - "io" - "net" - "net/http" - "net/http/httputil" - "net/url" - - "google.golang.org/grpc/internal" -) - -const proxyAuthHeaderKey = "Proxy-Authorization" - -var ( - // The following variable will be overwritten in the tests. - httpProxyFromEnvironment = http.ProxyFromEnvironment -) - -func mapAddress(address string) (*url.URL, error) { - req := &http.Request{ - URL: &url.URL{ - Scheme: "https", - Host: address, - }, - } - url, err := httpProxyFromEnvironment(req) - if err != nil { - return nil, err - } - return url, nil -} - -// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. -// It's possible that this reader reads more than what's need for the response and stores -// those bytes in the buffer. -// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the -// bytes in the buffer. -type bufConn struct { - net.Conn - r io.Reader -} - -func (c *bufConn) Read(b []byte) (int, error) { - return c.r.Read(b) -} - -func basicAuth(username, password string) string { - auth := username + ":" + password - return base64.StdEncoding.EncodeToString([]byte(auth)) -} - -func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { - defer func() { - if err != nil { - conn.Close() - } - }() - - req := &http.Request{ - Method: http.MethodConnect, - URL: &url.URL{Host: backendAddr}, - Header: map[string][]string{"User-Agent": {grpcUA}}, - } - if t := proxyURL.User; t != nil { - u := t.Username() - p, _ := t.Password() - req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) - } - - if err := sendHTTPRequest(ctx, req, conn); err != nil { - return nil, fmt.Errorf("failed to write the HTTP request: %v", err) - } - - r := bufio.NewReader(conn) - resp, err := http.ReadResponse(r, req) - if err != nil { - return nil, fmt.Errorf("reading server HTTP response: %v", err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - dump, err := httputil.DumpResponse(resp, true) - if err != nil { - return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) - } - return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) - } - - return &bufConn{Conn: conn, r: r}, nil -} - -// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy -// is necessary, dials, does the HTTP CONNECT handshake, and returns the -// connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { - newAddr := addr - proxyURL, err := mapAddress(addr) - if err != nil { - return nil, err - } - if proxyURL != nil { - newAddr = proxyURL.Host - } - - conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) - if err != nil { - return nil, err - } - if proxyURL == nil { - // proxy is disabled if proxyURL is nil. - return conn, err - } - return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) -} - -func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { - req = req.WithContext(ctx) - if err := req.Write(conn); err != nil { - return fmt.Errorf("failed to write the HTTP request: %v", err) - } - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go deleted file mode 100644 index b7b8fec180..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ /dev/null @@ -1,851 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package transport defines and implements message oriented communication -// channel to complete various transactions (e.g., an RPC). It is meant for -// grpc-internal usage and is not intended to be imported directly by users. -package transport - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "net" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - "google.golang.org/grpc/tap" -) - -const logLevel = 2 - -type bufferPool struct { - pool sync.Pool -} - -func newBufferPool() *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } -} - -func (p *bufferPool) get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func (p *bufferPool) put(b *bytes.Buffer) { - p.pool.Put(b) -} - -// recvMsg represents the received msg from the transport. All transport -// protocol specific info has been removed. -type recvMsg struct { - buffer *bytes.Buffer - // nil: received some data - // io.EOF: stream is completed. data is nil. - // other non-nil error: transport failure. data is nil. - err error -} - -// recvBuffer is an unbounded channel of recvMsg structs. -// -// Note: recvBuffer differs from buffer.Unbounded only in the fact that it -// holds a channel of recvMsg structs instead of objects implementing "item" -// interface. recvBuffer is written to much more often and using strict recvMsg -// structs helps avoid allocation in "recvBuffer.put" -type recvBuffer struct { - c chan recvMsg - mu sync.Mutex - backlog []recvMsg - err error -} - -func newRecvBuffer() *recvBuffer { - b := &recvBuffer{ - c: make(chan recvMsg, 1), - } - return b -} - -func (b *recvBuffer) put(r recvMsg) { - b.mu.Lock() - if b.err != nil { - b.mu.Unlock() - // An error had occurred earlier, don't accept more - // data or errors. - return - } - b.err = r.err - if len(b.backlog) == 0 { - select { - case b.c <- r: - b.mu.Unlock() - return - default: - } - } - b.backlog = append(b.backlog, r) - b.mu.Unlock() -} - -func (b *recvBuffer) load() { - b.mu.Lock() - if len(b.backlog) > 0 { - select { - case b.c <- b.backlog[0]: - b.backlog[0] = recvMsg{} - b.backlog = b.backlog[1:] - default: - } - } - b.mu.Unlock() -} - -// get returns the channel that receives a recvMsg in the buffer. -// -// Upon receipt of a recvMsg, the caller should call load to send another -// recvMsg onto the channel if there is any. -func (b *recvBuffer) get() <-chan recvMsg { - return b.c -} - -// recvBufferReader implements io.Reader interface to read the data from -// recvBuffer. -type recvBufferReader struct { - closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. - ctx context.Context - ctxDone <-chan struct{} // cache of ctx.Done() (for performance). - recv *recvBuffer - last *bytes.Buffer // Stores the remaining data in the previous calls. - err error - freeBuffer func(*bytes.Buffer) -} - -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { - if r.err != nil { - return 0, r.err - } - if r.last != nil { - // Read remaining data left in last call. - copied, _ := r.last.Read(p) - if r.last.Len() == 0 { - r.freeBuffer(r.last) - r.last = nil - } - return copied, nil - } - if r.closeStream != nil { - n, r.err = r.readClient(p) - } else { - n, r.err = r.read(p) - } - return n, r.err -} - -func (r *recvBufferReader) read(p []byte) (n int, err error) { - select { - case <-r.ctxDone: - return 0, ContextErr(r.ctx.Err()) - case m := <-r.recv.get(): - return r.readAdditional(m, p) - } -} - -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { - // If the context is canceled, then closes the stream with nil metadata. - // closeStream writes its error parameter to r.recv as a recvMsg. - // r.readAdditional acts on that message and returns the necessary error. - select { - case <-r.ctxDone: - // Note that this adds the ctx error to the end of recv buffer, and - // reads from the head. This will delay the error until recv buffer is - // empty, thus will delay ctx cancellation in Recv(). - // - // It's done this way to fix a race between ctx cancel and trailer. The - // race was, stream.Recv() may return ctx error if ctxDone wins the - // race, but stream.Trailer() may return a non-nil md because the stream - // was not marked as done when trailer is received. This closeStream - // call will mark stream as done, thus fix the race. - // - // TODO: delaying ctx error seems like a unnecessary side effect. What - // we really want is to mark the stream as done, and return ctx error - // faster. - r.closeStream(ContextErr(r.ctx.Err())) - m := <-r.recv.get() - return r.readAdditional(m, p) - case m := <-r.recv.get(): - return r.readAdditional(m, p) - } -} - -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { - r.recv.load() - if m.err != nil { - return 0, m.err - } - copied, _ := m.buffer.Read(p) - if m.buffer.Len() == 0 { - r.freeBuffer(m.buffer) - r.last = nil - } else { - r.last = m.buffer - } - return copied, nil -} - -type streamState uint32 - -const ( - streamActive streamState = iota - streamWriteDone // EndStream sent - streamReadDone // EndStream received - streamDone // the entire stream is finished. -) - -// Stream represents an RPC in the transport layer. -type Stream struct { - id uint32 - st ServerTransport // nil for client side Stream - ct *http2Client // nil for server side Stream - ctx context.Context // the associated context of the stream - cancel context.CancelFunc // always nil for client side Stream - done chan struct{} // closed at the end of stream to unblock writers. On the client side. - doneFunc func() // invoked at the end of stream on client side. - ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) - method string // the associated RPC method of the stream - recvCompress string - sendCompress string - buf *recvBuffer - trReader io.Reader - fc *inFlow - wq *writeQuota - - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. This is empty for the client side stream. - clientAdvertisedCompressors string - // Callback to state application's intentions to read data. This - // is used to adjust flow control, if needed. - requestRead func(int) - - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - // headerValid indicates whether a valid header was received. Only - // meaningful after headerChan is closed (always call waitOnHeader() before - // reading its value). Not valid on server side. - headerValid bool - headerWireLength int // Only set on server side. - - // hdrMu protects header and trailer metadata on the server-side. - hdrMu sync.Mutex - // On client side, header keeps the received header metadata. - // - // On server side, header keeps the header set by SetHeader(). The complete - // header will merged into this after t.WriteHeader() is called. - header metadata.MD - trailer metadata.MD // the key-value map of trailer metadata. - - noHeaders bool // set if the client never received headers (set only after the stream is done). - - // On the server-side, headerSent is atomically set to 1 when the headers are sent out. - headerSent uint32 - - state streamState - - // On client-side it is the status error received from the server. - // On server-side it is unused. - status *status.Status - - bytesReceived uint32 // indicates whether any bytes have been received on this stream - unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream - - // contentSubtype is the content-subtype for requests. - // this must be lowercase or the behavior is undefined. - contentSubtype string -} - -// isHeaderSent is only valid on the server-side. -func (s *Stream) isHeaderSent() bool { - return atomic.LoadUint32(&s.headerSent) == 1 -} - -// updateHeaderSent updates headerSent and returns true -// if it was alreay set. It is valid only on server-side. -func (s *Stream) updateHeaderSent() bool { - return atomic.SwapUint32(&s.headerSent, 1) == 1 -} - -func (s *Stream) swapState(st streamState) streamState { - return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) -} - -func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { - return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) -} - -func (s *Stream) getState() streamState { - return streamState(atomic.LoadUint32((*uint32)(&s.state))) -} - -func (s *Stream) waitOnHeader() { - if s.headerChan == nil { - // On the server headerChan is always nil since a stream originates - // only after having received headers. - return - } - select { - case <-s.ctx.Done(): - // Close the stream to prevent headers/trailers from changing after - // this function returns. - s.ct.CloseStream(s, ContextErr(s.ctx.Err())) - // headerChan could possibly not be closed yet if closeStream raced - // with operateHeaders; wait until it is closed explicitly here. - <-s.headerChan - case <-s.headerChan: - } -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - s.waitOnHeader() - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SendCompress returns the send compressor name. -func (s *Stream) SendCompress() string { - return s.sendCompress -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() string { - return s.clientAdvertisedCompressors -} - -// Done returns a channel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// Header returns the header metadata of the stream. -// -// On client side, it acquires the key-value pairs of header metadata once it is -// available. It blocks until i) the metadata is ready or ii) there is no header -// metadata or iii) the stream is canceled/expired. -// -// On server side, it returns the out header after t.WriteHeader is called. It -// does not block and must not be called until after WriteHeader. -func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil { - // On server side, return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil - } - s.waitOnHeader() - - if !s.headerValid || s.noHeaders { - return nil, s.status.Err() - } - - return s.header.Copy(), nil -} - -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. Client-side only. -func (s *Stream) TrailersOnly() bool { - s.waitOnHeader() - return s.noHeaders -} - -// Trailer returns the cached trailer metedata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. -// It can be safely read only after stream has ended that is either read -// or write have returned io.EOF. -func (s *Stream) Trailer() metadata.MD { - c := s.trailer.Copy() - return c -} - -// ContentSubtype returns the content-subtype for a request. For example, a -// content-subtype of "proto" will result in a content-type of -// "application/grpc+proto". This will always be lowercase. See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -func (s *Stream) ContentSubtype() string { - return s.contentSubtype -} - -// Context returns the context of the stream. -func (s *Stream) Context() context.Context { - return s.ctx -} - -// SetContext sets the context of the stream. This will be deleted once the -// stats handler callouts all move to gRPC layer. -func (s *Stream) SetContext(ctx context.Context) { - s.ctx = ctx -} - -// Method returns the method for the stream. -func (s *Stream) Method() string { - return s.method -} - -// Status returns the status received from the server. -// Status can be read safely only after the stream has ended, -// that is, after Done() is closed. -func (s *Stream) Status() *status.Status { - return s.status -} - -// HeaderWireLength returns the size of the headers of the stream as received -// from the wire. Valid only on the server. -func (s *Stream) HeaderWireLength() int { - return s.headerWireLength -} - -// SetHeader sets the header metadata. This can be called multiple times. -// Server side only. -// This should not be called in parallel to other data writes. -func (s *Stream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.isHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.header = metadata.Join(s.header, md) - s.hdrMu.Unlock() - return nil -} - -// SendHeader sends the given header metadata. The given metadata is -// combined with any metadata set by previous calls to SetHeader and -// then written to the transport stream. -func (s *Stream) SendHeader(md metadata.MD) error { - return s.st.WriteHeader(s, md) -} - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can be called multiple times. Server side only. -// This should not be called parallel to other data writes. -func (s *Stream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.hdrMu.Unlock() - return nil -} - -func (s *Stream) write(m recvMsg) { - s.buf.put(m) -} - -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { - // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er - } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) -} - -// tranportReader reads all the data available for this Stream from the transport and -// passes them into the decoder, which converts them into a gRPC message stream. -// The error is io.EOF when the stream is done or another non-nil error if -// the stream broke. -type transportReader struct { - reader io.Reader - // The handler to control the window update procedure for both this - // particular stream and the associated transport. - windowHandler func(int) - er error -} - -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) - if err != nil { - t.er = err - return - } - t.windowHandler(n) - return -} - -// BytesReceived indicates whether any bytes have been received on this stream. -func (s *Stream) BytesReceived() bool { - return atomic.LoadUint32(&s.bytesReceived) == 1 -} - -// Unprocessed indicates whether the server did not process this stream -- -// i.e. it sent a refused stream or GOAWAY including this stream ID. -func (s *Stream) Unprocessed() bool { - return atomic.LoadUint32(&s.unprocessed) == 1 -} - -// GoString is implemented by Stream so context.String() won't -// race when printing %#v. -func (s *Stream) GoString() string { - return fmt.Sprintf("", s, s.method) -} - -// state of transport -type transportState int - -const ( - reachable transportState = iota - closing - draining -) - -// ServerConfig consists of all the configurations to establish a server transport. -type ServerConfig struct { - MaxStreams uint32 - ConnectionTimeout time.Duration - Credentials credentials.TransportCredentials - InTapHandle tap.ServerInHandle - StatsHandlers []stats.Handler - KeepaliveParams keepalive.ServerParameters - KeepalivePolicy keepalive.EnforcementPolicy - InitialWindowSize int32 - InitialConnWindowSize int32 - WriteBufferSize int - ReadBufferSize int - SharedWriteBuffer bool - ChannelzParentID *channelz.Identifier - MaxHeaderListSize *uint32 - HeaderTableSize *uint32 -} - -// ConnectOptions covers all relevant options for communicating with the server. -type ConnectOptions struct { - // UserAgent is the application user agent. - UserAgent string - // Dialer specifies how to dial a network address. - Dialer func(context.Context, string) (net.Conn, error) - // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. - FailOnNonTempDialError bool - // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. - PerRPCCredentials []credentials.PerRPCCredentials - // TransportCredentials stores the Authenticator required to setup a client - // connection. Only one of TransportCredentials and CredsBundle is non-nil. - TransportCredentials credentials.TransportCredentials - // CredsBundle is the credentials bundle to be used. Only one of - // TransportCredentials and CredsBundle is non-nil. - CredsBundle credentials.Bundle - // KeepaliveParams stores the keepalive parameters. - KeepaliveParams keepalive.ClientParameters - // StatsHandlers stores the handler for stats. - StatsHandlers []stats.Handler - // InitialWindowSize sets the initial window size for a stream. - InitialWindowSize int32 - // InitialConnWindowSize sets the initial window size for a connection. - InitialConnWindowSize int32 - // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. - WriteBufferSize int - // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. - ReadBufferSize int - // SharedWriteBuffer indicates whether connections should reuse write buffer - SharedWriteBuffer bool - // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID *channelz.Identifier - // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. - MaxHeaderListSize *uint32 - // UseProxy specifies if a proxy should be used. - UseProxy bool -} - -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) -} - -// Options provides additional hints and information for message -// transmission. -type Options struct { - // Last indicates whether this write is the last piece for - // this stream. - Last bool -} - -// CallHdr carries the information of a particular RPC. -type CallHdr struct { - // Host specifies the peer's host. - Host string - - // Method specifies the operation to perform. - Method string - - // SendCompress specifies the compression algorithm applied on - // outbound message. - SendCompress string - - // Creds specifies credentials.PerRPCCredentials for a call. - Creds credentials.PerRPCCredentials - - // ContentSubtype specifies the content-subtype for a request. For example, a - // content-subtype of "proto" will result in a content-type of - // "application/grpc+proto". The value of ContentSubtype must be all - // lowercase, otherwise the behavior is undefined. See - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests - // for more details. - ContentSubtype string - - PreviousAttempts int // value of grpc-previous-rpc-attempts header to set - - DoneFunc func() // called when the stream is finished -} - -// ClientTransport is the common interface for all gRPC client-side transport -// implementations. -type ClientTransport interface { - // Close tears down this transport. Once it returns, the transport - // should not be accessed any more. The caller must make sure this - // is called only once. - Close(err error) - - // GracefulClose starts to tear down the transport: the transport will stop - // accepting new RPCs and NewStream will return error. Once all streams are - // finished, the transport will close. - // - // It does not block. - GracefulClose() - - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error - - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) - - // Error returns a channel that is closed when some I/O error - // happens. Typically the caller should have a goroutine to monitor - // this in order to take action (e.g., close the current transport - // and create a new one) in error case. It should not return nil - // once the transport is initiated. - Error() <-chan struct{} - - // GoAway returns a channel that is closed when ClientTransport - // receives the draining signal from the server (e.g., GOAWAY frame in - // HTTP/2). - GoAway() <-chan struct{} - - // GetGoAwayReason returns the reason why GoAway frame was received, along - // with a human readable string with debug info. - GetGoAwayReason() (GoAwayReason, string) - - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() -} - -// ServerTransport is the common interface for all gRPC server-side transport -// implementations. -// -// Methods may be called concurrently from multiple goroutines, but -// Write methods for a given Stream will be called serially. -type ServerTransport interface { - // HandleStreams receives incoming streams using the given handler. - HandleStreams(context.Context, func(*Stream)) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error - - // WriteStatus sends the status of a stream to the client. WriteStatus is - // the final call made on a stream and always occurs. - WriteStatus(s *Stream, st *status.Status) error - - // Close tears down the transport. Once it is called, the transport - // should not be accessed any more. All the pending streams and their - // handlers will be terminated asynchronously. - Close(err error) - - // Peer returns the peer of the server transport. - Peer() *peer.Peer - - // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain(debugData string) - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() -} - -// connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { - return ConnectionError{ - Desc: fmt.Sprintf(format, a...), - temp: temp, - err: e, - } -} - -// ConnectionError is an error that results in the termination of the -// entire connection and the retry of all the active streams. -type ConnectionError struct { - Desc string - temp bool - err error -} - -func (e ConnectionError) Error() string { - return fmt.Sprintf("connection error: desc = %q", e.Desc) -} - -// Temporary indicates if this connection error is temporary or fatal. -func (e ConnectionError) Temporary() bool { - return e.temp -} - -// Origin returns the original error of this connection error. -func (e ConnectionError) Origin() error { - // Never return nil error here. - // If the original error is nil, return itself. - if e.err == nil { - return e - } - return e.err -} - -// Unwrap returns the original error of this connection error or nil when the -// origin is nil. -func (e ConnectionError) Unwrap() error { - return e.err -} - -var ( - // ErrConnClosing indicates that the transport is closing. - ErrConnClosing = connectionErrorf(true, nil, "transport is closing") - // errStreamDrain indicates that the stream is rejected because the - // connection is draining. This could be caused by goaway or balancer - // removing the address. - errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application - // layer of an error. - errStreamDone = errors.New("the stream is done") - // StatusGoAway indicates that the server sent a GOAWAY that included this - // stream's ID in unprocessed RPCs. - statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") -) - -// GoAwayReason contains the reason for the GoAway frame received. -type GoAwayReason uint8 - -const ( - // GoAwayInvalid indicates that no GoAway frame is received. - GoAwayInvalid GoAwayReason = 0 - // GoAwayNoReason is the default value when GoAway frame is received. - GoAwayNoReason GoAwayReason = 1 - // GoAwayTooManyPings indicates that a GoAway frame with - // ErrCodeEnhanceYourCalm was received and that the debug data said - // "too_many_pings". - GoAwayTooManyPings GoAwayReason = 2 -) - -// channelzData is used to store channelz related data for http2Client and http2Server. -// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - kpCount int64 - // The number of streams that have started, including already finished ones. - streamsStarted int64 - // Client side: The number of streams that have ended successfully by receiving - // EoS bit set frame from server. - // Server side: The number of streams that have ended successfully by sending - // frame with EoS bit set. - streamsSucceeded int64 - streamsFailed int64 - // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type - // instead of time.Time since it's more costly to atomically update time.Time variable than int64 - // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. - lastStreamCreatedTime int64 - msgSent int64 - msgRecv int64 - lastMsgSentTime int64 - lastMsgRecvTime int64 -} - -// ContextErr converts the error from context package into a status error. -func ContextErr(err error) error { - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } - return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) -} diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go deleted file mode 100644 index e8b492774d..0000000000 --- a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package internal - -import ( - "google.golang.org/grpc/attributes" - "google.golang.org/grpc/resolver" -) - -// handshakeClusterNameKey is the type used as the key to store cluster name in -// the Attributes field of resolver.Address. -type handshakeClusterNameKey struct{} - -// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field -// is updated with the cluster name. -func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { - addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) - return addr -} - -// GetXDSHandshakeClusterName returns cluster name stored in attr. -func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { - v := attr.Value(handshakeClusterNameKey{}) - name, ok := v.(string) - return name, ok -} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go deleted file mode 100644 index 34d31b5e7d..0000000000 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package keepalive defines configurable parameters for point-to-point -// healthcheck. -package keepalive - -import ( - "time" -) - -// ClientParameters is used to set keepalive parameters on the client-side. -// These configure how the client will actively probe to notice when a -// connection is broken and send pings so intermediaries will be aware of the -// liveness of the connection. Make sure these parameters are set in -// coordination with the keepalive policy on the server, as incompatible -// settings can result in closing of connection. -type ClientParameters struct { - // After a duration of this time if the client doesn't see any activity it - // pings the server to see if the transport is still alive. - // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. - // After having pinged for keepalive check, the client waits for a duration - // of Timeout and if no activity is seen even after that the connection is - // closed. - Timeout time.Duration // The current default value is 20 seconds. - // If true, client sends keepalive pings even with no active RPCs. If false, - // when there are no active RPCs, Time and Timeout will be ignored and no - // keepalive pings will be sent. - PermitWithoutStream bool // false by default. -} - -// ServerParameters is used to set keepalive and max-age parameters on the -// server-side. -type ServerParameters struct { - // MaxConnectionIdle is a duration for the amount of time after which an - // idle connection would be closed by sending a GoAway. Idleness duration is - // defined since the most recent time the number of outstanding RPCs became - // zero or the connection establishment. - MaxConnectionIdle time.Duration // The current default value is infinity. - // MaxConnectionAge is a duration for the maximum amount of time a - // connection may exist before it will be closed by sending a GoAway. A - // random jitter of +/-10% will be added to MaxConnectionAge to spread out - // connection storms. - MaxConnectionAge time.Duration // The current default value is infinity. - // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after - // which the connection will be forcibly closed. - MaxConnectionAgeGrace time.Duration // The current default value is infinity. - // After a duration of this time if the server doesn't see any activity it - // pings the client to see if the transport is still alive. - // If set below 1s, a minimum value of 1s will be used instead. - Time time.Duration // The current default value is 2 hours. - // After having pinged for keepalive check, the server waits for a duration - // of Timeout and if no activity is seen even after that the connection is - // closed. - Timeout time.Duration // The current default value is 20 seconds. -} - -// EnforcementPolicy is used to set keepalive enforcement policy on the -// server-side. Server will close connection with a client that violates this -// policy. -type EnforcementPolicy struct { - // MinTime is the minimum amount of time a client should wait before sending - // a keepalive ping. - MinTime time.Duration // The current default value is 5 minutes. - // If true, server allows keepalive pings even when there are no active - // streams(RPCs). If false, and client sends ping when there are no active - // streams, server will send GOAWAY and close the connection. - PermitWithoutStream bool // false by default. -} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go deleted file mode 100644 index 1e9485fd6e..0000000000 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ /dev/null @@ -1,300 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package metadata define the structure of the metadata supported by gRPC library. -// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md -// for more information about custom-metadata. -package metadata // import "google.golang.org/grpc/metadata" - -import ( - "context" - "fmt" - "strings" - - "google.golang.org/grpc/internal" -) - -func init() { - internal.FromOutgoingContextRaw = fromOutgoingContextRaw -} - -// DecodeKeyValue returns k, v, nil. -// -// Deprecated: use k and v directly instead. -func DecodeKeyValue(k, v string) (string, string, error) { - return k, v, nil -} - -// MD is a mapping from metadata keys to values. Users should use the following -// two convenience functions New and Pairs to generate MD. -type MD map[string][]string - -// New creates an MD from a given key-value map. -// -// Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. -// -// Uppercase letters are automatically converted to lowercase. -// -// Keys beginning with "grpc-" are reserved for grpc-internal use only and may -// result in errors if set in metadata. -func New(m map[string]string) MD { - md := make(MD, len(m)) - for k, val := range m { - key := strings.ToLower(k) - md[key] = append(md[key], val) - } - return md -} - -// Pairs returns an MD formed by the mapping of key, value ... -// Pairs panics if len(kv) is odd. -// -// Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. -// -// Uppercase letters are automatically converted to lowercase. -// -// Keys beginning with "grpc-" are reserved for grpc-internal use only and may -// result in errors if set in metadata. -func Pairs(kv ...string) MD { - if len(kv)%2 == 1 { - panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) - } - md := make(MD, len(kv)/2) - for i := 0; i < len(kv); i += 2 { - key := strings.ToLower(kv[i]) - md[key] = append(md[key], kv[i+1]) - } - return md -} - -// Len returns the number of items in md. -func (md MD) Len() int { - return len(md) -} - -// Copy returns a copy of md. -func (md MD) Copy() MD { - out := make(MD, len(md)) - for k, v := range md { - out[k] = copyOf(v) - } - return out -} - -// Get obtains the values for a given key. -// -// k is converted to lowercase before searching in md. -func (md MD) Get(k string) []string { - k = strings.ToLower(k) - return md[k] -} - -// Set sets the value of a given key with a slice of values. -// -// k is converted to lowercase before storing in md. -func (md MD) Set(k string, vals ...string) { - if len(vals) == 0 { - return - } - k = strings.ToLower(k) - md[k] = vals -} - -// Append adds the values to key k, not overwriting what was already stored at -// that key. -// -// k is converted to lowercase before storing in md. -func (md MD) Append(k string, vals ...string) { - if len(vals) == 0 { - return - } - k = strings.ToLower(k) - md[k] = append(md[k], vals...) -} - -// Delete removes the values for a given key k which is converted to lowercase -// before removing it from md. -func (md MD) Delete(k string) { - k = strings.ToLower(k) - delete(md, k) -} - -// Join joins any number of mds into a single MD. -// -// The order of values for each key is determined by the order in which the mds -// containing those values are presented to Join. -func Join(mds ...MD) MD { - out := MD{} - for _, md := range mds { - for k, v := range md { - out[k] = append(out[k], v...) - } - } - return out -} - -type mdIncomingKey struct{} -type mdOutgoingKey struct{} - -// NewIncomingContext creates a new context with incoming md attached. md must -// not be modified after calling this function. -func NewIncomingContext(ctx context.Context, md MD) context.Context { - return context.WithValue(ctx, mdIncomingKey{}, md) -} - -// NewOutgoingContext creates a new context with outgoing md attached. If used -// in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. md must not be modified after -// calling this function. -func NewOutgoingContext(ctx context.Context, md MD) context.Context { - return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) -} - -// AppendToOutgoingContext returns a new context with the provided kv merged -// with any existing metadata in the context. Please refer to the documentation -// of Pairs for a description of kv. -func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { - if len(kv)%2 == 1 { - panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) - } - md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) - added := make([][]string, len(md.added)+1) - copy(added, md.added) - kvCopy := make([]string, 0, len(kv)) - for i := 0; i < len(kv); i += 2 { - kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) - } - added[len(added)-1] = kvCopy - return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) -} - -// FromIncomingContext returns the incoming metadata in ctx if it exists. -// -// All keys in the returned MD are lowercase. -func FromIncomingContext(ctx context.Context) (MD, bool) { - md, ok := ctx.Value(mdIncomingKey{}).(MD) - if !ok { - return nil, false - } - out := make(MD, len(md)) - for k, v := range md { - // We need to manually convert all keys to lower case, because MD is a - // map, and there's no guarantee that the MD attached to the context is - // created using our helper functions. - key := strings.ToLower(k) - out[key] = copyOf(v) - } - return out, true -} - -// ValueFromIncomingContext returns the metadata value corresponding to the metadata -// key from the incoming metadata if it exists. Keys are matched in a case insensitive -// manner. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func ValueFromIncomingContext(ctx context.Context, key string) []string { - md, ok := ctx.Value(mdIncomingKey{}).(MD) - if !ok { - return nil - } - - if v, ok := md[key]; ok { - return copyOf(v) - } - for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee - // that the MD attached to the context is created using our helper - // functions. - if strings.EqualFold(k, key) { - return copyOf(v) - } - } - return nil -} - -func copyOf(v []string) []string { - vals := make([]string, len(v)) - copy(vals, v) - return vals -} - -// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. -// -// Remember to perform strings.ToLower on the keys, for both the returned MD (MD -// is a map, there's no guarantee it's created using our helper functions) and -// the extra kv pairs (AppendToOutgoingContext doesn't turn them into -// lowercase). -func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { - raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) - if !ok { - return nil, nil, false - } - - return raw.md, raw.added, true -} - -// FromOutgoingContext returns the outgoing metadata in ctx if it exists. -// -// All keys in the returned MD are lowercase. -func FromOutgoingContext(ctx context.Context) (MD, bool) { - raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) - if !ok { - return nil, false - } - - mdSize := len(raw.md) - for i := range raw.added { - mdSize += len(raw.added[i]) / 2 - } - - out := make(MD, mdSize) - for k, v := range raw.md { - // We need to manually convert all keys to lower case, because MD is a - // map, and there's no guarantee that the MD attached to the context is - // created using our helper functions. - key := strings.ToLower(k) - out[key] = copyOf(v) - } - for _, added := range raw.added { - if len(added)%2 == 1 { - panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) - } - - for i := 0; i < len(added); i += 2 { - key := strings.ToLower(added[i]) - out[key] = append(out[key], added[i+1]) - } - } - return out, ok -} - -type rawMD struct { - md MD - added [][]string -} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go deleted file mode 100644 index a821ff9b2b..0000000000 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package peer defines various peer information associated with RPCs and -// corresponding utils. -package peer - -import ( - "context" - "net" - - "google.golang.org/grpc/credentials" -) - -// Peer contains the information of the peer for an RPC, such as the address -// and authentication information. -type Peer struct { - // Addr is the peer address. - Addr net.Addr - // LocalAddr is the local address. - LocalAddr net.Addr - // AuthInfo is the authentication information of the transport. - // It is nil if there is no transport security being used. - AuthInfo credentials.AuthInfo -} - -type peerKey struct{} - -// NewContext creates a new context with peer information attached. -func NewContext(ctx context.Context, p *Peer) context.Context { - return context.WithValue(ctx, peerKey{}, p) -} - -// FromContext returns the peer information in ctx if it exists. -func FromContext(ctx context.Context) (p *Peer, ok bool) { - p, ok = ctx.Value(peerKey{}).(*Peer) - return -} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go deleted file mode 100644 index bf56faa76d..0000000000 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ /dev/null @@ -1,223 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "io" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/channelz" - istatus "google.golang.org/grpc/internal/status" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick -// actions and unblock when there's a picker update. -type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker - statsHandlers []stats.Handler // to record blocking picker calls -} - -func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - return &pickerWrapper{ - blockingCh: make(chan struct{}), - statsHandlers: statsHandlers, - } -} - -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. -func (pw *pickerWrapper) updatePicker(p balancer.Picker) { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() - return - } - pw.picker = p - // pw.blockingCh should never be nil. - close(pw.blockingCh) - pw.blockingCh = make(chan struct{}) - pw.mu.Unlock() -} - -// doneChannelzWrapper performs the following: -// - increments the calls started channelz counter -// - wraps the done function in the passed in result to increment the calls -// failed or calls succeeded channelz counter before invoking the actual -// done function. -func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { - ac := acbw.ac - ac.incrCallsStarted() - done := result.Done - result.Done = func(b balancer.DoneInfo) { - if b.Err != nil && b.Err != io.EOF { - ac.incrCallsFailed() - } else { - ac.incrCallsSucceeded() - } - if done != nil { - done(b) - } - } -} - -// pick returns the transport that will be used for the RPC. -// It may block in the following cases: -// - there's no picker -// - the current picker returns ErrNoSubConnAvailable -// - the current picker returns other errors and failfast is false. -// - the subConn returned by the current picker is not READY -// When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { - var ch chan struct{} - - var lastPickErr error - - for { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() - return nil, balancer.PickResult{}, ErrClientConnClosing - } - - if pw.picker == nil { - ch = pw.blockingCh - } - if ch == pw.blockingCh { - // This could happen when either: - // - pw.picker is nil (the previous if condition), or - // - has called pick on the current picker. - pw.mu.Unlock() - select { - case <-ctx.Done(): - var errStr string - if lastPickErr != nil { - errStr = "latest balancer error: " + lastPickErr.Error() - } else { - errStr = ctx.Err().Error() - } - switch ctx.Err() { - case context.DeadlineExceeded: - return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) - case context.Canceled: - return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) - } - case <-ch: - } - continue - } - - // If the channel is set, it means that the pick call had to wait for a - // new picker at some point. Either it's the first iteration and this - // function received the first picker, or a picker errored with - // ErrNoSubConnAvailable or errored with failfast set to false, which - // will trigger a continue to the next iteration. In the first case this - // conditional will hit if this call had to block (the channel is set). - // In the second case, the only way it will get to this conditional is - // if there is a new picker. - if ch != nil { - for _, sh := range pw.statsHandlers { - sh.HandleRPC(ctx, &stats.PickerUpdated{}) - } - } - - ch = pw.blockingCh - p := pw.picker - pw.mu.Unlock() - - pickResult, err := p.Pick(info) - if err != nil { - if err == balancer.ErrNoSubConnAvailable { - continue - } - if st, ok := status.FromError(err); ok { - // Status error: end the RPC unconditionally with this status. - // First restrict the code to the list allowed by gRFC A54. - if istatus.IsRestrictedControlPlaneCode(st) { - err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) - } - return nil, balancer.PickResult{}, dropError{error: err} - } - // For all other errors, wait for ready RPCs should block and other - // RPCs should fail with unavailable. - if !failfast { - lastPickErr = err - continue - } - return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) - } - - acbw, ok := pickResult.SubConn.(*acBalancerWrapper) - if !ok { - logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) - continue - } - if t := acbw.ac.getReadyTransport(); t != nil { - if channelz.IsOn() { - doneChannelzWrapper(acbw, &pickResult) - return t, pickResult, nil - } - return t, pickResult, nil - } - if pickResult.Done != nil { - // Calling done with nil error, no bytes sent and no bytes received. - // DoneInfo with default value works. - pickResult.Done(balancer.DoneInfo{}) - } - logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") - // If ok == false, ac.state is not READY. - // A valid picker always returns READY subConn. This means the state of ac - // just changed, and picker will be updated shortly. - // continue back to the beginning of the for loop to repick. - } -} - -func (pw *pickerWrapper) close() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.done = true - close(pw.blockingCh) -} - -// reset clears the pickerWrapper and prepares it for being used again when idle -// mode is exited. -func (pw *pickerWrapper) reset() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.blockingCh = make(chan struct{}) -} - -// dropError is a wrapper error that indicates the LB policy wishes to drop the -// RPC and not retry it. -type dropError struct { - error -} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go deleted file mode 100644 index 5128f9364d..0000000000 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ /dev/null @@ -1,249 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "encoding/json" - "errors" - "fmt" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -const ( - // PickFirstBalancerName is the name of the pick_first balancer. - PickFirstBalancerName = "pick_first" - logPrefix = "[pick-first-lb %p] " -) - -func newPickfirstBuilder() balancer.Builder { - return &pickfirstBuilder{} -} - -type pickfirstBuilder struct{} - -func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{cc: cc} - b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) - return b -} - -func (*pickfirstBuilder) Name() string { - return PickFirstBalancerName -} - -type pfConfig struct { - serviceconfig.LoadBalancingConfig `json:"-"` - - // If set to true, instructs the LB policy to shuffle the order of the list - // of addresses received from the name resolver before attempting to - // connect to them. - ShuffleAddressList bool `json:"shuffleAddressList"` -} - -func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) - } - return cfg, nil -} - -type pickfirstBalancer struct { - logger *internalgrpclog.PrefixLogger - state connectivity.State - cc balancer.ClientConn - subConn balancer.SubConn -} - -func (b *pickfirstBalancer) ResolverError(err error) { - if b.logger.V(2) { - b.logger.Infof("Received error from the name resolver: %v", err) - } - if b.subConn == nil { - b.state = connectivity.TransientFailure - } - - if b.state != connectivity.TransientFailure { - // The picker will not change since the balancer does not currently - // report an error. - return - } - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) -} - -func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - addrs := state.ResolverState.Addresses - if len(addrs) == 0 { - // The resolver reported an empty address list. Treat it like an error by - // calling b.ResolverError. - if b.subConn != nil { - // Shut down the old subConn. All addresses were removed, so it is - // no longer valid. - b.subConn.Shutdown() - b.subConn = nil - } - b.ResolverError(errors.New("produced zero addresses")) - return balancer.ErrBadResolverState - } - - // We don't have to guard this block with the env var because ParseConfig - // already does so. - cfg, ok := state.BalancerConfig.(pfConfig) - if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) - } - if cfg.ShuffleAddressList { - addrs = append([]resolver.Address{}, addrs...) - grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) - } - - if b.logger.V(2) { - b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) - } - - if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, addrs) - return nil - } - - var subConn balancer.SubConn - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(subConn, state) - }, - }) - if err != nil { - if b.logger.V(2) { - b.logger.Infof("Failed to create new SubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState - } - b.subConn = subConn - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.subConn.Connect() - return nil -} - -// UpdateSubConnState is unused as a StateListener is always registered when -// creating SubConns. -func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) -} - -func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if b.logger.V(2) { - b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) - } - if b.subConn != subConn { - if b.logger.V(2) { - b.logger.Infof("Ignored state change because subConn is not recognized") - } - return - } - if state.ConnectivityState == connectivity.Shutdown { - b.subConn = nil - return - } - - switch state.ConnectivityState { - case connectivity.Ready: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, - }) - case connectivity.Connecting: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. See A62. - return - } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - case connectivity.Idle: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. Also kick the - // subConn out of Idle into Connecting. See A62. - b.subConn.Connect() - return - } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &idlePicker{subConn: subConn}, - }) - case connectivity.TransientFailure: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{err: state.ConnectionError}, - }) - } - b.state = state.ConnectivityState -} - -func (b *pickfirstBalancer) Close() { -} - -func (b *pickfirstBalancer) ExitIdle() { - if b.subConn != nil && b.state == connectivity.Idle { - b.subConn.Connect() - } -} - -type picker struct { - result balancer.PickResult - err error -} - -func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - return p.result, p.err -} - -// idlePicker is used when the SubConn is IDLE and kicks the SubConn into -// CONNECTING when Pick is called. -type idlePicker struct { - subConn balancer.SubConn -} - -func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - i.subConn.Connect() - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable -} - -func init() { - balancer.Register(newPickfirstBuilder()) -} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go deleted file mode 100644 index 73bd633643..0000000000 --- a/vendor/google.golang.org/grpc/preloader.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// PreparedMsg is responsible for creating a Marshalled and Compressed object. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type PreparedMsg struct { - // Struct for preparing msg before sending them - encodedData []byte - hdr []byte - payload []byte -} - -// Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg any) error { - ctx := s.Context() - rpcInfo, ok := rpcInfoFromContext(ctx) - if !ok { - return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") - } - - // check if the context has the relevant information to prepareMsg - if rpcInfo.preloaderInfo == nil { - return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") - } - if rpcInfo.preloaderInfo.codec == nil { - return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") - } - - // prepare the msg - data, err := encode(rpcInfo.preloaderInfo.codec, msg) - if err != nil { - return err - } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) - if err != nil { - return err - } - p.hdr, p.payload = msgHeader(data, compData) - return nil -} diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh deleted file mode 100644 index a6f26c8ab0..0000000000 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu -o pipefail - -WORKDIR=$(mktemp -d) - -function finish { - rm -rf "$WORKDIR" -} -trap finish EXIT - -export GOBIN=${WORKDIR}/bin -export PATH=${GOBIN}:${PATH} -mkdir -p ${GOBIN} - -echo "remove existing generated files" -# grpc_testing_not_regenerate/*.pb.go is not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') - -echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" -(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) - -echo "go install cmd/protoc-gen-go-grpc" -(cd cmd/protoc-gen-go-grpc && go install .) - -echo "git clone https://github.com/grpc/grpc-proto" -git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto - -echo "git clone https://github.com/protocolbuffers/protobuf" -git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf - -# Pull in code.proto as a proto dependency -mkdir -p ${WORKDIR}/googleapis/google/rpc -echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" -curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto - -mkdir -p ${WORKDIR}/out - -# Generates sources without the embed requirement -LEGACY_SOURCES=( - ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto - ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto - ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto - ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto - profiling/proto/service.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto -) - -# Generates only the new gRPC Service symbols -SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') - ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto - ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto - ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/testing/*.proto - ${WORKDIR}/grpc-proto/grpc/core/*.proto -) - -# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an -# import path of 'bar' in the generated code when 'foo.proto' is imported in -# one of the sources. -# -# Note that the protos listed here are all for testing purposes. All protos to -# be used externally should have a go_package option (and they don't need to be -# listed here). -OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ -Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing - -for src in ${SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -for src in ${LEGACY_SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -# The go_package option in grpc/lookup/v1/rls.proto doesn't match the -# current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 - -# grpc_testing_not_regenerate/*.pb.go are not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go - -cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go deleted file mode 100644 index 14aa6f20ae..0000000000 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ /dev/null @@ -1,36 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package dns implements a dns resolver to be installed as the default resolver -// in grpc. -// -// Deprecated: this package is imported by grpc and should not need to be -// imported directly by users. -package dns - -import ( - "google.golang.org/grpc/internal/resolver/dns" - "google.golang.org/grpc/resolver" -) - -// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. -// -// Deprecated: import grpc and use resolver.Get("dns") instead. -func NewBuilder() resolver.Builder { - return dns.NewBuilder() -} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go deleted file mode 100644 index ada5b9bb79..0000000000 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ /dev/null @@ -1,251 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package resolver - -type addressMapEntry struct { - addr Address - value any -} - -// AddressMap is a map of addresses to arbitrary values taking into account -// Attributes. BalancerAttributes are ignored, as are Metadata and Type. -// Multiple accesses may not be performed concurrently. Must be created via -// NewAddressMap; do not construct directly. -type AddressMap struct { - // The underlying map is keyed by an Address with fields that we don't care - // about being set to their zero values. The only fields that we care about - // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to - // distinguish between addresses with same `Addr` and `ServerName`, but - // different `Attributes`, we cannot store the `Attributes` in the map key. - // - // The comparison operation for structs work as follows: - // Struct values are comparable if all their fields are comparable. Two - // struct values are equal if their corresponding non-blank fields are equal. - // - // The value type of the map contains a slice of addresses which match the key - // in their `Addr` and `ServerName` fields and contain the corresponding value - // associated with them. - m map[Address]addressMapEntryList -} - -func toMapKey(addr *Address) Address { - return Address{Addr: addr.Addr, ServerName: addr.ServerName} -} - -type addressMapEntryList []*addressMapEntry - -// NewAddressMap creates a new AddressMap. -func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[Address]addressMapEntryList)} -} - -// find returns the index of addr in the addressMapEntry slice, or -1 if not -// present. -func (l addressMapEntryList) find(addr Address) int { - for i, entry := range l { - // Attributes are the only thing to match on here, since `Addr` and - // `ServerName` are already equal. - if entry.addr.Attributes.Equal(addr.Attributes) { - return i - } - } - return -1 -} - -// Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value any, ok bool) { - addrKey := toMapKey(&addr) - entryList := a.m[addrKey] - if entry := entryList.find(addr); entry != -1 { - return entryList[entry].value, true - } - return nil, false -} - -// Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value any) { - addrKey := toMapKey(&addr) - entryList := a.m[addrKey] - if entry := entryList.find(addr); entry != -1 { - entryList[entry].value = value - return - } - a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) -} - -// Delete removes addr from the map. -func (a *AddressMap) Delete(addr Address) { - addrKey := toMapKey(&addr) - entryList := a.m[addrKey] - entry := entryList.find(addr) - if entry == -1 { - return - } - if len(entryList) == 1 { - entryList = nil - } else { - copy(entryList[entry:], entryList[entry+1:]) - entryList = entryList[:len(entryList)-1] - } - a.m[addrKey] = entryList -} - -// Len returns the number of entries in the map. -func (a *AddressMap) Len() int { - ret := 0 - for _, entryList := range a.m { - ret += len(entryList) - } - return ret -} - -// Keys returns a slice of all current map keys. -func (a *AddressMap) Keys() []Address { - ret := make([]Address, 0, a.Len()) - for _, entryList := range a.m { - for _, entry := range entryList { - ret = append(ret, entry.addr) - } - } - return ret -} - -// Values returns a slice of all current map values. -func (a *AddressMap) Values() []any { - ret := make([]any, 0, a.Len()) - for _, entryList := range a.m { - for _, entry := range entryList { - ret = append(ret, entry.value) - } - } - return ret -} - -type endpointNode struct { - addrs map[string]struct{} -} - -// Equal returns whether the unordered set of addrs are the same between the -// endpoint nodes. -func (en *endpointNode) Equal(en2 *endpointNode) bool { - if len(en.addrs) != len(en2.addrs) { - return false - } - for addr := range en.addrs { - if _, ok := en2.addrs[addr]; !ok { - return false - } - } - return true -} - -func toEndpointNode(endpoint Endpoint) endpointNode { - en := make(map[string]struct{}) - for _, addr := range endpoint.Addresses { - en[addr.Addr] = struct{}{} - } - return endpointNode{ - addrs: en, - } -} - -// EndpointMap is a map of endpoints to arbitrary values keyed on only the -// unordered set of address strings within an endpoint. This map is not thread -// safe, thus it is unsafe to access concurrently. Must be created via -// NewEndpointMap; do not construct directly. -type EndpointMap struct { - endpoints map[*endpointNode]any -} - -// NewEndpointMap creates a new EndpointMap. -func NewEndpointMap() *EndpointMap { - return &EndpointMap{ - endpoints: make(map[*endpointNode]any), - } -} - -// Get returns the value for the address in the map, if present. -func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - return em.endpoints[endpoint], true - } - return nil, false -} - -// Set updates or adds the value to the address in the map. -func (em *EndpointMap) Set(e Endpoint, value any) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - em.endpoints[endpoint] = value - return - } - em.endpoints[&en] = value -} - -// Len returns the number of entries in the map. -func (em *EndpointMap) Len() int { - return len(em.endpoints) -} - -// Keys returns a slice of all current map keys, as endpoints specifying the -// addresses present in the endpoint keys, in which uniqueness is determined by -// the unordered set of addresses. Thus, endpoint information returned is not -// the full endpoint data (drops duplicated addresses and attributes) but can be -// used for EndpointMap accesses. -func (em *EndpointMap) Keys() []Endpoint { - ret := make([]Endpoint, 0, len(em.endpoints)) - for en := range em.endpoints { - var endpoint Endpoint - for addr := range en.addrs { - endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) - } - ret = append(ret, endpoint) - } - return ret -} - -// Values returns a slice of all current map values. -func (em *EndpointMap) Values() []any { - ret := make([]any, 0, len(em.endpoints)) - for _, val := range em.endpoints { - ret = append(ret, val) - } - return ret -} - -// find returns a pointer to the endpoint node in em if the endpoint node is -// already present. If not found, nil is returned. The comparisons are done on -// the unordered set of addresses within an endpoint. -func (em EndpointMap) find(e endpointNode) *endpointNode { - for endpoint := range em.endpoints { - if e.Equal(endpoint) { - return endpoint - } - } - return nil -} - -// Delete removes the specified endpoint from the map. -func (em *EndpointMap) Delete(e Endpoint) { - en := toEndpointNode(e) - if entry := em.find(en); entry != nil { - delete(em.endpoints, entry) - } -} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go deleted file mode 100644 index adf89dd9cf..0000000000 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ /dev/null @@ -1,326 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package resolver defines APIs for name resolution in gRPC. -// All APIs in this package are experimental. -package resolver - -import ( - "context" - "fmt" - "net" - "net/url" - "strings" - - "google.golang.org/grpc/attributes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/serviceconfig" -) - -var ( - // m is a map from scheme to resolver builder. - m = make(map[string]Builder) - // defaultScheme is the default scheme to use. - defaultScheme = "passthrough" -) - -// TODO(bar) install dns resolver in init(){}. - -// Register registers the resolver builder to the resolver map. b.Scheme will -// be used as the scheme registered with this builder. The registry is case -// sensitive, and schemes should not contain any uppercase characters. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Resolvers are -// registered with the same name, the one registered last will take effect. -func Register(b Builder) { - m[b.Scheme()] = b -} - -// Get returns the resolver builder registered with the given scheme. -// -// If no builder is register with the scheme, nil will be returned. -func Get(scheme string) Builder { - if b, ok := m[scheme]; ok { - return b - } - return nil -} - -// SetDefaultScheme sets the default scheme that will be used. The default -// default scheme is "passthrough". -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. The scheme set last overrides -// previously set values. -func SetDefaultScheme(scheme string) { - defaultScheme = scheme -} - -// GetDefaultScheme gets the default scheme that will be used. -func GetDefaultScheme() string { - return defaultScheme -} - -// Address represents a server the client connects to. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type Address struct { - // Addr is the server address on which a connection will be established. - Addr string - - // ServerName is the name of this address. - // If non-empty, the ServerName is used as the transport certification authority for - // the address, instead of the hostname from the Dial target string. In most cases, - // this should not be set. - // - // WARNING: ServerName must only be populated with trusted values. It - // is insecure to populate it with data from untrusted inputs since untrusted - // values could be used to bypass the authority checks performed by TLS. - ServerName string - - // Attributes contains arbitrary data about this address intended for - // consumption by the SubConn. - Attributes *attributes.Attributes - - // BalancerAttributes contains arbitrary data about this address intended - // for consumption by the LB policy. These attributes do not affect SubConn - // creation, connection establishment, handshaking, etc. - // - // Deprecated: when an Address is inside an Endpoint, this field should not - // be used, and it will eventually be removed entirely. - BalancerAttributes *attributes.Attributes - - // Metadata is the information associated with Addr, which may be used - // to make load balancing decision. - // - // Deprecated: use Attributes instead. - Metadata any -} - -// Equal returns whether a and o are identical. Metadata is compared directly, -// not with any recursive introspection. -// -// This method compares all fields of the address. When used to tell apart -// addresses during subchannel creation or connection establishment, it might be -// more appropriate for the caller to implement custom equality logic. -func (a Address) Equal(o Address) bool { - return a.Addr == o.Addr && a.ServerName == o.ServerName && - a.Attributes.Equal(o.Attributes) && - a.BalancerAttributes.Equal(o.BalancerAttributes) && - a.Metadata == o.Metadata -} - -// String returns JSON formatted string representation of the address. -func (a Address) String() string { - var sb strings.Builder - sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) - sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) - if a.Attributes != nil { - sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) - } - if a.BalancerAttributes != nil { - sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) - } - sb.WriteString("}") - return sb.String() -} - -// BuildOptions includes additional information for the builder to create -// the resolver. -type BuildOptions struct { - // DisableServiceConfig indicates whether a resolver implementation should - // fetch service config data. - DisableServiceConfig bool - // DialCreds is the transport credentials used by the ClientConn for - // communicating with the target gRPC service (set via - // WithTransportCredentials). In cases where a name resolution service - // requires the same credentials, the resolver may use this field. In most - // cases though, it is not appropriate, and this field may be ignored. - DialCreds credentials.TransportCredentials - // CredsBundle is the credentials bundle used by the ClientConn for - // communicating with the target gRPC service (set via - // WithCredentialsBundle). In cases where a name resolution service - // requires the same credentials, the resolver may use this field. In most - // cases though, it is not appropriate, and this field may be ignored. - CredsBundle credentials.Bundle - // Dialer is the custom dialer used by the ClientConn for dialling the - // target gRPC service (set via WithDialer). In cases where a name - // resolution service requires the same dialer, the resolver may use this - // field. In most cases though, it is not appropriate, and this field may - // be ignored. - Dialer func(context.Context, string) (net.Conn, error) -} - -// An Endpoint is one network endpoint, or server, which may have multiple -// addresses with which it can be accessed. -type Endpoint struct { - // Addresses contains a list of addresses used to access this endpoint. - Addresses []Address - - // Attributes contains arbitrary data about this endpoint intended for - // consumption by the LB policy. - Attributes *attributes.Attributes -} - -// State contains the current Resolver state relevant to the ClientConn. -type State struct { - // Addresses is the latest set of resolved addresses for the target. - // - // If a resolver sets Addresses but does not set Endpoints, one Endpoint - // will be created for each Address before the State is passed to the LB - // policy. The BalancerAttributes of each entry in Addresses will be set - // in Endpoints.Attributes, and be cleared in the Endpoint's Address's - // BalancerAttributes. - // - // Soon, Addresses will be deprecated and replaced fully by Endpoints. - Addresses []Address - - // Endpoints is the latest set of resolved endpoints for the target. - // - // If a resolver produces a State containing Endpoints but not Addresses, - // it must take care to ensure the LB policies it selects will support - // Endpoints. - Endpoints []Endpoint - - // ServiceConfig contains the result from parsing the latest service - // config. If it is nil, it indicates no service config is present or the - // resolver does not provide service configs. - ServiceConfig *serviceconfig.ParseResult - - // Attributes contains arbitrary data about the resolver intended for - // consumption by the load balancing policy. - Attributes *attributes.Attributes -} - -// ClientConn contains the callbacks for resolver to notify any updates -// to the gRPC ClientConn. -// -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. -type ClientConn interface { - // UpdateState updates the state of the ClientConn appropriately. - // - // If an error is returned, the resolver should try to resolve the - // target again. The resolver should use a backoff timer to prevent - // overloading the server with requests. If a resolver is certain that - // reresolving will not change the result, e.g. because it is - // a watch-based resolver, returned errors can be ignored. - // - // If the resolved State is the same as the last reported one, calling - // UpdateState can be omitted. - UpdateState(State) error - // ReportError notifies the ClientConn that the Resolver encountered an - // error. The ClientConn will notify the load balancer and begin calling - // ResolveNow on the Resolver with exponential backoff. - ReportError(error) - // NewAddress is called by resolver to notify ClientConn a new list - // of resolved addresses. - // The address list should be the complete list of resolved addresses. - // - // Deprecated: Use UpdateState instead. - NewAddress(addresses []Address) - // ParseServiceConfig parses the provided service config and returns an - // object that provides the parsed config. - ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult -} - -// Target represents a target for gRPC, as specified in: -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// It is parsed from the target string that gets passed into Dial or DialContext -// by the user. And gRPC passes it to the resolver and the balancer. -// -// If the target follows the naming spec, and the parsed scheme is registered -// with gRPC, we will parse the target string according to the spec. If the -// target does not contain a scheme or if the parsed scheme is not registered -// (i.e. no corresponding resolver available to resolve the endpoint), we will -// apply the default scheme, and will attempt to reparse it. -type Target struct { - // URL contains the parsed dial target with an optional default scheme added - // to it if the original dial target contained no scheme or contained an - // unregistered scheme. Any query params specified in the original dial - // target can be accessed from here. - URL url.URL -} - -// Endpoint retrieves endpoint without leading "/" from either `URL.Path` -// or `URL.Opaque`. The latter is used when the former is empty. -func (t Target) Endpoint() string { - endpoint := t.URL.Path - if endpoint == "" { - endpoint = t.URL.Opaque - } - // For targets of the form "[scheme]://[authority]/endpoint, the endpoint - // value returned from url.Parse() contains a leading "/". Although this is - // in accordance with RFC 3986, we do not want to break existing resolver - // implementations which expect the endpoint without the leading "/". So, we - // end up stripping the leading "/" here. But this will result in an - // incorrect parsing for something like "unix:///path/to/socket". Since we - // own the "unix" resolver, we can workaround in the unix resolver by using - // the `URL` field. - return strings.TrimPrefix(endpoint, "/") -} - -// String returns a string representation of Target. -func (t Target) String() string { - return t.URL.String() -} - -// Builder creates a resolver that will be used to watch name resolution updates. -type Builder interface { - // Build creates a new resolver for the given target. - // - // gRPC dial calls Build synchronously, and fails if the returned error is - // not nil. - Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) - // Scheme returns the scheme supported by this resolver. Scheme is defined - // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned - // string should not contain uppercase characters, as they will not match - // the parsed target's scheme as defined in RFC 3986. - Scheme() string -} - -// ResolveNowOptions includes additional information for ResolveNow. -type ResolveNowOptions struct{} - -// Resolver watches for the updates on the specified target. -// Updates include address updates and service config updates. -type Resolver interface { - // ResolveNow will be called by gRPC to try to resolve the target name - // again. It's just a hint, resolver can ignore this if it's not necessary. - // - // It could be called multiple times concurrently. - ResolveNow(ResolveNowOptions) - // Close closes the resolver. - Close() -} - -// AuthorityOverrider is implemented by Builders that wish to override the -// default authority for the ClientConn. -// By default, the authority used is target.Endpoint(). -type AuthorityOverrider interface { - // OverrideAuthority returns the authority to use for a ClientConn with the - // given target. The implementation must generate it without blocking, - // typically in line, and must keep it unchanged. - OverrideAuthority(Target) string -} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go deleted file mode 100644 index c79bab1214..0000000000 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ /dev/null @@ -1,197 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "strings" - "sync" - - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConn interface. -type ccResolverWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - ignoreServiceConfig bool - serializer *grpcsync.CallbackSerializer - serializerCancel context.CancelFunc - - resolver resolver.Resolver // only accessed within the serializer - - // The following fields are protected by mu. Caller must take cc.mu before - // taking mu. - mu sync.Mutex - curState resolver.State - closed bool -} - -// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used -// after calling start, which builds the resolver. -func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { - ctx, cancel := context.WithCancel(cc.ctx) - return &ccResolverWrapper{ - cc: cc, - ignoreServiceConfig: cc.dopts.disableServiceConfig, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } -} - -// start builds the name resolver using the resolver.Builder in cc and returns -// any error encountered. It must always be the first operation performed on -// any newly created ccResolverWrapper, except that close may be called instead. -func (ccr *ccResolverWrapper) start() error { - errCh := make(chan error) - ccr.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil { - return - } - opts := resolver.BuildOptions{ - DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, - DialCreds: ccr.cc.dopts.copts.TransportCredentials, - CredsBundle: ccr.cc.dopts.copts.CredsBundle, - Dialer: ccr.cc.dopts.copts.Dialer, - } - var err error - ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) - errCh <- err - }) - return <-errCh -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil || ccr.resolver == nil { - return - } - ccr.resolver.ResolveNow(o) - }) -} - -// close initiates async shutdown of the wrapper. To determine the wrapper has -// finished shutting down, the channel should block on ccr.serializer.Done() -// without cc.mu held. -func (ccr *ccResolverWrapper) close() { - channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") - ccr.mu.Lock() - ccr.closed = true - ccr.mu.Unlock() - - ccr.serializer.Schedule(func(context.Context) { - if ccr.resolver == nil { - return - } - ccr.resolver.Close() - ccr.resolver = nil - }) - ccr.serializerCancel() -} - -// UpdateState is called by resolver implementations to report new state to gRPC -// which includes addresses and service config. -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - ccr.cc.mu.Lock() - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - ccr.cc.mu.Unlock() - return nil - } - if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } - } - ccr.addChannelzTraceEvent(s) - ccr.curState = s - ccr.mu.Unlock() - return ccr.cc.updateResolverStateAndUnlock(s, nil) -} - -// ReportError is called by resolver implementations to report errors -// encountered during name resolution to gRPC. -func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.cc.mu.Lock() - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - ccr.cc.mu.Unlock() - return - } - ccr.mu.Unlock() - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) -} - -// NewAddress is called by the resolver implementation to send addresses to -// gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.cc.mu.Lock() - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - ccr.cc.mu.Unlock() - return - } - s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} - ccr.addChannelzTraceEvent(s) - ccr.curState = s - ccr.mu.Unlock() - ccr.cc.updateResolverStateAndUnlock(s, nil) -} - -// ParseServiceConfig is called by resolver implementations to parse a JSON -// representation of the service config. -func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) -} - -// addChannelzTraceEvent adds a channelz trace event containing the new -// state received from resolver implementations. -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - var oldSC, newSC *ServiceConfig - var oldOK, newOK bool - if ccr.curState.ServiceConfig != nil { - oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) - } - if s.ServiceConfig != nil { - newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) - } - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) -} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go deleted file mode 100644 index d17ede0fa4..0000000000 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ /dev/null @@ -1,978 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "bytes" - "compress/gzip" - "context" - "encoding/binary" - "fmt" - "io" - "math" - "strings" - "sync" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/encoding" - "google.golang.org/grpc/encoding/proto" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -// Compressor defines the interface gRPC uses to compress a message. -// -// Deprecated: use package encoding. -type Compressor interface { - // Do compresses p into w. - Do(w io.Writer, p []byte) error - // Type returns the compression algorithm the Compressor uses. - Type() string -} - -type gzipCompressor struct { - pool sync.Pool -} - -// NewGZIPCompressor creates a Compressor based on GZIP. -// -// Deprecated: use package encoding/gzip. -func NewGZIPCompressor() Compressor { - c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) - return c -} - -// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead -// of assuming DefaultCompression. -// -// The error returned will be nil if the level is valid. -// -// Deprecated: use package encoding/gzip. -func NewGZIPCompressorWithLevel(level int) (Compressor, error) { - if level < gzip.DefaultCompression || level > gzip.BestCompression { - return nil, fmt.Errorf("grpc: invalid compression level: %d", level) - } - return &gzipCompressor{ - pool: sync.Pool{ - New: func() any { - w, err := gzip.NewWriterLevel(io.Discard, level) - if err != nil { - panic(err) - } - return w - }, - }, - }, nil -} - -func (c *gzipCompressor) Do(w io.Writer, p []byte) error { - z := c.pool.Get().(*gzip.Writer) - defer c.pool.Put(z) - z.Reset(w) - if _, err := z.Write(p); err != nil { - return err - } - return z.Close() -} - -func (c *gzipCompressor) Type() string { - return "gzip" -} - -// Decompressor defines the interface gRPC uses to decompress a message. -// -// Deprecated: use package encoding. -type Decompressor interface { - // Do reads the data from r and uncompress them. - Do(r io.Reader) ([]byte, error) - // Type returns the compression algorithm the Decompressor uses. - Type() string -} - -type gzipDecompressor struct { - pool sync.Pool -} - -// NewGZIPDecompressor creates a Decompressor based on GZIP. -// -// Deprecated: use package encoding/gzip. -func NewGZIPDecompressor() Decompressor { - return &gzipDecompressor{} -} - -func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { - var z *gzip.Reader - switch maybeZ := d.pool.Get().(type) { - case nil: - newZ, err := gzip.NewReader(r) - if err != nil { - return nil, err - } - z = newZ - case *gzip.Reader: - z = maybeZ - if err := z.Reset(r); err != nil { - d.pool.Put(z) - return nil, err - } - } - - defer func() { - z.Close() - d.pool.Put(z) - }() - return io.ReadAll(z) -} - -func (d *gzipDecompressor) Type() string { - return "gzip" -} - -// callInfo contains all related configuration and information about an RPC. -type callInfo struct { - compressorType string - failFast bool - maxReceiveMessageSize *int - maxSendMessageSize *int - creds credentials.PerRPCCredentials - contentSubtype string - codec baseCodec - maxRetryRPCBufferSize int - onFinish []func(err error) -} - -func defaultCallInfo() *callInfo { - return &callInfo{ - failFast: true, - maxRetryRPCBufferSize: 256 * 1024, // 256KB - } -} - -// CallOption configures a Call before it starts or extracts information from -// a Call after it completes. -type CallOption interface { - // before is called before the call is sent to any server. If before - // returns a non-nil error, the RPC fails with that error. - before(*callInfo) error - - // after is called after the call has completed. after cannot return an - // error, so any failures should be reported via output parameters. - after(*callInfo, *csAttempt) -} - -// EmptyCallOption does not alter the Call configuration. -// It can be embedded in another structure to carry satellite data for use -// by interceptors. -type EmptyCallOption struct{} - -func (EmptyCallOption) before(*callInfo) error { return nil } -func (EmptyCallOption) after(*callInfo, *csAttempt) {} - -// StaticMethod returns a CallOption which specifies that a call is being made -// to a method that is static, which means the method is known at compile time -// and doesn't change at runtime. This can be used as a signal to stats plugins -// that this method is safe to include as a key to a measurement. -func StaticMethod() CallOption { - return StaticMethodCallOption{} -} - -// StaticMethodCallOption is a CallOption that specifies that a call comes -// from a static method. -type StaticMethodCallOption struct { - EmptyCallOption -} - -// Header returns a CallOptions that retrieves the header metadata -// for a unary RPC. -func Header(md *metadata.MD) CallOption { - return HeaderCallOption{HeaderAddr: md} -} - -// HeaderCallOption is a CallOption for collecting response header metadata. -// The metadata field will be populated *after* the RPC completes. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type HeaderCallOption struct { - HeaderAddr *metadata.MD -} - -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { - *o.HeaderAddr, _ = attempt.s.Header() -} - -// Trailer returns a CallOptions that retrieves the trailer metadata -// for a unary RPC. -func Trailer(md *metadata.MD) CallOption { - return TrailerCallOption{TrailerAddr: md} -} - -// TrailerCallOption is a CallOption for collecting response trailer metadata. -// The metadata field will be populated *after* the RPC completes. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type TrailerCallOption struct { - TrailerAddr *metadata.MD -} - -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { - *o.TrailerAddr = attempt.s.Trailer() -} - -// Peer returns a CallOption that retrieves peer information for a unary RPC. -// The peer field will be populated *after* the RPC completes. -func Peer(p *peer.Peer) CallOption { - return PeerCallOption{PeerAddr: p} -} - -// PeerCallOption is a CallOption for collecting the identity of the remote -// peer. The peer field will be populated *after* the RPC completes. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type PeerCallOption struct { - PeerAddr *peer.Peer -} - -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { - if x, ok := peer.FromContext(attempt.s.Context()); ok { - *o.PeerAddr = *x - } -} - -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false and the -// connection is in the TRANSIENT_FAILURE state, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. -// -// By default, RPCs don't "wait for ready". -func WaitForReady(waitForReady bool) CallOption { - return FailFastCallOption{FailFast: !waitForReady} -} - -// FailFast is the opposite of WaitForReady. -// -// Deprecated: use WaitForReady. -func FailFast(failFast bool) CallOption { - return FailFastCallOption{FailFast: failFast} -} - -// FailFastCallOption is a CallOption for indicating whether an RPC should fail -// fast or not. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type FailFastCallOption struct { - FailFast bool -} - -func (o FailFastCallOption) before(c *callInfo) error { - c.failFast = o.FailFast - return nil -} -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} - -// OnFinish returns a CallOption that configures a callback to be called when -// the call completes. The error passed to the callback is the status of the -// RPC, and may be nil. The onFinish callback provided will only be called once -// by gRPC. This is mainly used to be used by streaming interceptors, to be -// notified when the RPC completes along with information about the status of -// the RPC. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func OnFinish(onFinish func(err error)) CallOption { - return OnFinishCallOption{ - OnFinish: onFinish, - } -} - -// OnFinishCallOption is CallOption that indicates a callback to be called when -// the call completes. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type OnFinishCallOption struct { - OnFinish func(error) -} - -func (o OnFinishCallOption) before(c *callInfo) error { - c.onFinish = append(c.onFinish, o.OnFinish) - return nil -} - -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} - -// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can receive. If this is not set, gRPC uses the default -// 4MB. -func MaxCallRecvMsgSize(bytes int) CallOption { - return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} -} - -// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message -// size in bytes the client can receive. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type MaxRecvMsgSizeCallOption struct { - MaxRecvMsgSize int -} - -func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { - c.maxReceiveMessageSize = &o.MaxRecvMsgSize - return nil -} -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} - -// MaxCallSendMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can send. If this is not set, gRPC uses the default -// `math.MaxInt32`. -func MaxCallSendMsgSize(bytes int) CallOption { - return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} -} - -// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message -// size in bytes the client can send. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type MaxSendMsgSizeCallOption struct { - MaxSendMsgSize int -} - -func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { - c.maxSendMessageSize = &o.MaxSendMsgSize - return nil -} -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} - -// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials -// for a call. -func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { - return PerRPCCredsCallOption{Creds: creds} -} - -// PerRPCCredsCallOption is a CallOption that indicates the per-RPC -// credentials to use for the call. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type PerRPCCredsCallOption struct { - Creds credentials.PerRPCCredentials -} - -func (o PerRPCCredsCallOption) before(c *callInfo) error { - c.creds = o.Creds - return nil -} -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} - -// UseCompressor returns a CallOption which sets the compressor used when -// sending the request. If WithCompressor is also set, UseCompressor has -// higher priority. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func UseCompressor(name string) CallOption { - return CompressorCallOption{CompressorType: name} -} - -// CompressorCallOption is a CallOption that indicates the compressor to use. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type CompressorCallOption struct { - CompressorType string -} - -func (o CompressorCallOption) before(c *callInfo) error { - c.compressorType = o.CompressorType - return nil -} -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} - -// CallContentSubtype returns a CallOption that will set the content-subtype -// for a call. For example, if content-subtype is "json", the Content-Type over -// the wire will be "application/grpc+json". The content-subtype is converted -// to lowercase before being included in Content-Type. See Content-Type on -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -// -// If ForceCodec is not also used, the content-subtype will be used to look up -// the Codec to use in the registry controlled by RegisterCodec. See the -// documentation on RegisterCodec for details on registration. The lookup of -// content-subtype is case-insensitive. If no such Codec is found, the call -// will result in an error with code codes.Internal. -// -// If ForceCodec is also used, that Codec will be used for all request and -// response messages, with the content-subtype set to the given contentSubtype -// here for requests. -func CallContentSubtype(contentSubtype string) CallOption { - return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} -} - -// ContentSubtypeCallOption is a CallOption that indicates the content-subtype -// used for marshaling messages. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type ContentSubtypeCallOption struct { - ContentSubtype string -} - -func (o ContentSubtypeCallOption) before(c *callInfo) error { - c.contentSubtype = o.ContentSubtype - return nil -} -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} - -// ForceCodec returns a CallOption that will set codec to be used for all -// request and response messages for a call. The result of calling Name() will -// be used as the content-subtype after converting to lowercase, unless -// CallContentSubtype is also used. -// -// See Content-Type on -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. Also see the documentation on RegisterCodec and -// CallContentSubtype for more details on the interaction between Codec and -// content-subtype. -// -// This function is provided for advanced users; prefer to use only -// CallContentSubtype to select a registered codec instead. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func ForceCodec(codec encoding.Codec) CallOption { - return ForceCodecCallOption{Codec: codec} -} - -// ForceCodecCallOption is a CallOption that indicates the codec used for -// marshaling messages. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type ForceCodecCallOption struct { - Codec encoding.Codec -} - -func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec - return nil -} -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} - -// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of -// an encoding.Codec. -// -// Deprecated: use ForceCodec instead. -func CallCustomCodec(codec Codec) CallOption { - return CustomCodecCallOption{Codec: codec} -} - -// CustomCodecCallOption is a CallOption that indicates the codec used for -// marshaling messages. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type CustomCodecCallOption struct { - Codec Codec -} - -func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec - return nil -} -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} - -// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory -// used for buffering this RPC's requests for retry purposes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func MaxRetryRPCBufferSize(bytes int) CallOption { - return MaxRetryRPCBufferSizeCallOption{bytes} -} - -// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of -// memory to be used for caching this RPC for retry purposes. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type MaxRetryRPCBufferSizeCallOption struct { - MaxRetryRPCBufferSize int -} - -func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { - c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize - return nil -} -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} - -// The format of the payload: compressed or not? -type payloadFormat uint8 - -const ( - compressionNone payloadFormat = 0 // no compression - compressionMade payloadFormat = 1 // compressed -) - -// parser reads complete gRPC messages from the underlying reader. -type parser struct { - // r is the underlying reader. - // See the comment on recvMsg for the permissible - // error types. - r io.Reader - - // The header of a gRPC message. Find more detail at - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md - header [5]byte - - // recvBufferPool is the pool of shared receive buffers. - recvBufferPool SharedBufferPool -} - -// recvMsg reads a complete gRPC message from the stream. -// -// It returns the message and its payload (compression/encoding) -// format. The caller owns the returned msg memory. -// -// If there is an error, possible values are: -// - io.EOF, when no messages remain -// - io.ErrUnexpectedEOF -// - of type transport.ConnectionError -// - an error from the status package -// -// No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible -// error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { - return 0, nil, err - } - - pf = payloadFormat(p.header[0]) - length := binary.BigEndian.Uint32(p.header[1:]) - - if length == 0 { - return pf, nil, nil - } - if int64(length) > int64(maxInt) { - return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) - } - if int(length) > maxReceiveMessageSize { - return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) - } - msg = p.recvBufferPool.Get(int(length)) - if _, err := p.r.Read(msg); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, nil, err - } - return pf, msg, nil -} - -// encode serializes msg and returns a buffer containing the message, or an -// error if it is too large to be transmitted by grpc. If msg is nil, it -// generates an empty message. -func encode(c baseCodec, msg any) ([]byte, error) { - if msg == nil { // NOTE: typed nils will not be caught by this check - return nil, nil - } - b, err := c.Marshal(msg) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) - } - if uint(len(b)) > math.MaxUint32 { - return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) - } - return b, nil -} - -// compress returns the input bytes compressed by compressor or cp. -// If both compressors are nil, or if the message has zero length, returns nil, -// indicating no compression was done. -// -// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil - } - if len(in) == 0 { - return nil, nil - } - wrapErr := func(err error) error { - return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) - } - cbuf := &bytes.Buffer{} - if compressor != nil { - z, err := compressor.Compress(cbuf) - if err != nil { - return nil, wrapErr(err) - } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) - } - if err := z.Close(); err != nil { - return nil, wrapErr(err) - } - } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) - } - } - return cbuf.Bytes(), nil -} - -const ( - payloadLen = 1 - sizeLen = 4 - headerLen = payloadLen + sizeLen -) - -// msgHeader returns a 5-byte header for the message being transmitted and the -// payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { - hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData - } else { - hdr[0] = byte(compressionNone) - } - - // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data -} - -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { - return &stats.OutPayload{ - Client: client, - Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), - SentTime: t, - } -} - -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { - switch pf { - case compressionNone: - case compressionMade: - if recvCompress == "" || recvCompress == encoding.Identity { - return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") - } - if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) - } - default: - return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) - } - return nil -} - -type payloadInfo struct { - compressedLength int // The compressed length got from wire. - uncompressedBytes []byte -} - -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, buf, err := p.recvMsg(maxReceiveMessageSize) - if err != nil { - return nil, err - } - if payInfo != nil { - payInfo.compressedLength = len(buf) - } - - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, st.Err() - } - - var size int - if pf == compressionMade { - // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, - // use this decompressor as the default. - if dc != nil { - buf, err = dc.Do(bytes.NewReader(buf)) - size = len(buf) - } else { - buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) - } - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) - } - if size > maxReceiveMessageSize { - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) - } - } - return buf, nil -} - -// Using compressor, decompress d, returning data and size. -// Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) - if err != nil { - return nil, 0, err - } - if sizer, ok := compressor.(interface { - DecompressedSize(compressedBytes []byte) int - }); ok { - if size := sizer.DecompressedSize(d); size >= 0 { - if size > maxReceiveMessageSize { - return nil, size, nil - } - // size is used as an estimate to size the buffer, but we - // will read more data if available. - // +MinRead so ReadFrom will not reallocate if size is correct. - buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return buf.Bytes(), int(bytesRead), err - } - } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return d, len(d), err -} - -// For the two compressor parameters, both should not be set, but if they are, -// dc takes precedence over compressor. -// TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) - if err != nil { - return err - } - if err := c.Unmarshal(buf, m); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) - } - if payInfo != nil { - payInfo.uncompressedBytes = buf - } else { - p.recvBufferPool.Put(&buf) - } - return nil -} - -// Information about RPC -type rpcInfo struct { - failfast bool - preloaderInfo *compressorInfo -} - -// Information about Preloader -// Responsible for storing codec, and compressors -// If stream (s) has context s.Context which stores rpcInfo that has non nil -// pointers to codec, and compressors, then we can use preparedMsg for Async message prep -// and reuse marshalled bytes -type compressorInfo struct { - codec baseCodec - cp Compressor - comp encoding.Compressor -} - -type rpcInfoContextKey struct{} - -func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { - return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ - failfast: failfast, - preloaderInfo: &compressorInfo{ - codec: codec, - cp: cp, - comp: comp, - }, - }) -} - -func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { - s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) - return -} - -// Code returns the error code for err if it was produced by the rpc system. -// Otherwise, it returns codes.Unknown. -// -// Deprecated: use status.Code instead. -func Code(err error) codes.Code { - return status.Code(err) -} - -// ErrorDesc returns the error description of err if it was produced by the rpc system. -// Otherwise, it returns err.Error() or empty string when err is nil. -// -// Deprecated: use status.Convert and Message method instead. -func ErrorDesc(err error) string { - return status.Convert(err).Message() -} - -// Errorf returns an error containing an error code and a description; -// Errorf returns nil if c is OK. -// -// Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...any) error { - return status.Errorf(c, format, a...) -} - -var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) -var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) - -// toRPCErr converts an error into an error from the status package. -func toRPCErr(err error) error { - switch err { - case nil, io.EOF: - return err - case context.DeadlineExceeded: - return errContextDeadline - case context.Canceled: - return errContextCanceled - case io.ErrUnexpectedEOF: - return status.Error(codes.Internal, err.Error()) - } - - switch e := err.(type) { - case transport.ConnectionError: - return status.Error(codes.Unavailable, e.Desc) - case *transport.NewStreamError: - return toRPCErr(e.Err) - } - - if _, ok := status.FromError(err); ok { - return err - } - - return status.Error(codes.Unknown, err.Error()) -} - -// setCallInfoCodec should only be called after CallOptions have been applied. -func setCallInfoCodec(c *callInfo) error { - if c.codec != nil { - // codec was already set by a CallOption; use it, but set the content - // subtype if it is not set. - if c.contentSubtype == "" { - // c.codec is a baseCodec to hide the difference between grpc.Codec and - // encoding.Codec (Name vs. String method name). We only support - // setting content subtype from encoding.Codec to avoid a behavior - // change with the deprecated version. - if ec, ok := c.codec.(encoding.Codec); ok { - c.contentSubtype = strings.ToLower(ec.Name()) - } - } - return nil - } - - if c.contentSubtype == "" { - // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) - return nil - } - - // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) - if c.codec == nil { - return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) - } - return nil -} - -// channelzData is used to store channelz related data for ClientConn, addrConn and Server. -// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - callsStarted int64 - callsFailed int64 - callsSucceeded int64 - // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of - // time.Time since it's more costly to atomically update time.Time variable than int64 variable. - lastCallStartedTime int64 -} - -// The SupportPackageIsVersion variables are referenced from generated protocol -// buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 7. -// -// Older versions are kept for compatibility. -// -// These constants should not be referenced from any other code. -const ( - SupportPackageIsVersion3 = true - SupportPackageIsVersion4 = true - SupportPackageIsVersion5 = true - SupportPackageIsVersion6 = true - SupportPackageIsVersion7 = true - SupportPackageIsVersion8 = true -) - -const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go deleted file mode 100644 index 0bf5c78b0d..0000000000 --- a/vendor/google.golang.org/grpc/server.go +++ /dev/null @@ -1,2206 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "errors" - "fmt" - "io" - "math" - "net" - "net/http" - "reflect" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/encoding" - "google.golang.org/grpc/encoding/proto" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/binarylog" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/grpcutil" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - "google.golang.org/grpc/tap" -) - -const ( - defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 - defaultServerMaxSendMessageSize = math.MaxInt32 - - // Server transports are tracked in a map which is keyed on listener - // address. For regular gRPC traffic, connections are accepted in Serve() - // through a call to Accept(), and we use the actual listener address as key - // when we add it to the map. But for connections received through - // ServeHTTP(), we do not have a listener and hence use this dummy value. - listenerAddressForServeHTTP = "listenerAddressForServeHTTP" -) - -func init() { - internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { - return srv.opts.creds - } - internal.IsRegisteredMethod = func(srv *Server, method string) bool { - return srv.isRegisteredMethod(method) - } - internal.ServerFromContext = serverFromContext - internal.AddGlobalServerOptions = func(opt ...ServerOption) { - globalServerOptions = append(globalServerOptions, opt...) - } - internal.ClearGlobalServerOptions = func() { - globalServerOptions = nil - } - internal.BinaryLogger = binaryLogger - internal.JoinServerOptions = newJoinServerOption - internal.RecvBufferPool = recvBufferPool -} - -var statusOK = status.New(codes.OK, "") -var logger = grpclog.Component("core") - -type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) - -// MethodDesc represents an RPC service's method specification. -type MethodDesc struct { - MethodName string - Handler methodHandler -} - -// ServiceDesc represents an RPC service's specification. -type ServiceDesc struct { - ServiceName string - // The pointer to the service interface. Used to check whether the user - // provided implementation satisfies the interface requirements. - HandlerType any - Methods []MethodDesc - Streams []StreamDesc - Metadata any -} - -// serviceInfo wraps information about a service. It is very similar to -// ServiceDesc and is constructed from it for internal purposes. -type serviceInfo struct { - // Contains the implementation for the methods in this service. - serviceImpl any - methods map[string]*MethodDesc - streams map[string]*StreamDesc - mdata any -} - -// Server is a gRPC server to serve RPC requests. -type Server struct { - opts serverOptions - - mu sync.Mutex // guards following - lis map[net.Listener]bool - // conns contains all active server transports. It is a map keyed on a - // listener address with the value being the set of active transports - // belonging to that listener. - conns map[string]map[transport.ServerTransport]bool - serve bool - drain bool - cv *sync.Cond // signaled when connections close for GracefulStop - services map[string]*serviceInfo // service name -> service info - events traceEventLog - - quit *grpcsync.Event - done *grpcsync.Event - channelzRemoveOnce sync.Once - serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop - handlersWG sync.WaitGroup // counts active method handler goroutines - - channelzID *channelz.Identifier - czData *channelzData - - serverWorkerChannel chan func() - serverWorkerChannelClose func() -} - -type serverOptions struct { - creds credentials.TransportCredentials - codec baseCodec - cp Compressor - dc Decompressor - unaryInt UnaryServerInterceptor - streamInt StreamServerInterceptor - chainUnaryInts []UnaryServerInterceptor - chainStreamInts []StreamServerInterceptor - binaryLogger binarylog.Logger - inTapHandle tap.ServerInHandle - statsHandlers []stats.Handler - maxConcurrentStreams uint32 - maxReceiveMessageSize int - maxSendMessageSize int - unknownStreamDesc *StreamDesc - keepaliveParams keepalive.ServerParameters - keepalivePolicy keepalive.EnforcementPolicy - initialWindowSize int32 - initialConnWindowSize int32 - writeBufferSize int - readBufferSize int - sharedWriteBuffer bool - connectionTimeout time.Duration - maxHeaderListSize *uint32 - headerTableSize *uint32 - numServerWorkers uint32 - recvBufferPool SharedBufferPool - waitForHandlers bool -} - -var defaultServerOptions = serverOptions{ - maxConcurrentStreams: math.MaxUint32, - maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, - maxSendMessageSize: defaultServerMaxSendMessageSize, - connectionTimeout: 120 * time.Second, - writeBufferSize: defaultWriteBufSize, - readBufferSize: defaultReadBufSize, - recvBufferPool: nopBufferPool{}, -} -var globalServerOptions []ServerOption - -// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. -type ServerOption interface { - apply(*serverOptions) -} - -// EmptyServerOption does not alter the server configuration. It can be embedded -// in another structure to build custom server options. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type EmptyServerOption struct{} - -func (EmptyServerOption) apply(*serverOptions) {} - -// funcServerOption wraps a function that modifies serverOptions into an -// implementation of the ServerOption interface. -type funcServerOption struct { - f func(*serverOptions) -} - -func (fdo *funcServerOption) apply(do *serverOptions) { - fdo.f(do) -} - -func newFuncServerOption(f func(*serverOptions)) *funcServerOption { - return &funcServerOption{ - f: f, - } -} - -// joinServerOption provides a way to combine arbitrary number of server -// options into one. -type joinServerOption struct { - opts []ServerOption -} - -func (mdo *joinServerOption) apply(do *serverOptions) { - for _, opt := range mdo.opts { - opt.apply(do) - } -} - -func newJoinServerOption(opts ...ServerOption) ServerOption { - return &joinServerOption{opts: opts} -} - -// SharedWriteBuffer allows reusing per-connection transport write buffer. -// If this option is set to true every connection will release the buffer after -// flushing the data on the wire. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func SharedWriteBuffer(val bool) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.sharedWriteBuffer = val - }) -} - -// WriteBufferSize determines how much data can be batched before doing a write -// on the wire. The corresponding memory allocation for this buffer will be -// twice the size to keep syscalls low. The default value for this buffer is -// 32KB. Zero or negative values will disable the write buffer such that each -// write will be on underlying connection. -// Note: A Send call may not directly translate to a write. -func WriteBufferSize(s int) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.writeBufferSize = s - }) -} - -// ReadBufferSize lets you set the size of read buffer, this determines how much -// data can be read at most for one read syscall. The default value for this -// buffer is 32KB. Zero or negative values will disable read buffer for a -// connection so data framer can access the underlying conn directly. -func ReadBufferSize(s int) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.readBufferSize = s - }) -} - -// InitialWindowSize returns a ServerOption that sets window size for stream. -// The lower bound for window size is 64K and any value smaller than that will be ignored. -func InitialWindowSize(s int32) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.initialWindowSize = s - }) -} - -// InitialConnWindowSize returns a ServerOption that sets window size for a connection. -// The lower bound for window size is 64K and any value smaller than that will be ignored. -func InitialConnWindowSize(s int32) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.initialConnWindowSize = s - }) -} - -// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. -func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { - logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = internal.KeepaliveMinServerPingTime - } - - return newFuncServerOption(func(o *serverOptions) { - o.keepaliveParams = kp - }) -} - -// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. -func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.keepalivePolicy = kep - }) -} - -// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. -// -// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. -// -// Deprecated: register codecs using encoding.RegisterCodec. The server will -// automatically use registered codecs based on the incoming requests' headers. -// See also -// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. -// Will be supported throughout 1.x. -func CustomCodec(codec Codec) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.codec = codec - }) -} - -// ForceServerCodec returns a ServerOption that sets a codec for message -// marshaling and unmarshaling. -// -// This will override any lookups by content-subtype for Codecs registered -// with RegisterCodec. -// -// See Content-Type on -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. Also see the documentation on RegisterCodec and -// CallContentSubtype for more details on the interaction between encoding.Codec -// and content-subtype. -// -// This function is provided for advanced users; prefer to register codecs -// using encoding.RegisterCodec. -// The server will automatically use registered codecs based on the incoming -// requests' headers. See also -// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. -// Will be supported throughout 1.x. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func ForceServerCodec(codec encoding.Codec) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.codec = codec - }) -} - -// RPCCompressor returns a ServerOption that sets a compressor for outbound -// messages. For backward compatibility, all outbound messages will be sent -// using this compressor, regardless of incoming message compression. By -// default, server messages will be sent using the same compressor with which -// request messages were sent. -// -// Deprecated: use encoding.RegisterCompressor instead. Will be supported -// throughout 1.x. -func RPCCompressor(cp Compressor) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.cp = cp - }) -} - -// RPCDecompressor returns a ServerOption that sets a decompressor for inbound -// messages. It has higher priority than decompressors registered via -// encoding.RegisterCompressor. -// -// Deprecated: use encoding.RegisterCompressor instead. Will be supported -// throughout 1.x. -func RPCDecompressor(dc Decompressor) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.dc = dc - }) -} - -// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. -// If this is not set, gRPC uses the default limit. -// -// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. -func MaxMsgSize(m int) ServerOption { - return MaxRecvMsgSize(m) -} - -// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. -// If this is not set, gRPC uses the default 4MB. -func MaxRecvMsgSize(m int) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.maxReceiveMessageSize = m - }) -} - -// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. -// If this is not set, gRPC uses the default `math.MaxInt32`. -func MaxSendMsgSize(m int) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.maxSendMessageSize = m - }) -} - -// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number -// of concurrent streams to each ServerTransport. -func MaxConcurrentStreams(n uint32) ServerOption { - if n == 0 { - n = math.MaxUint32 - } - return newFuncServerOption(func(o *serverOptions) { - o.maxConcurrentStreams = n - }) -} - -// Creds returns a ServerOption that sets credentials for server connections. -func Creds(c credentials.TransportCredentials) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.creds = c - }) -} - -// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the -// server. Only one unary interceptor can be installed. The construction of multiple -// interceptors (e.g., chaining) can be implemented at the caller. -func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - if o.unaryInt != nil { - panic("The unary server interceptor was already set and may not be reset.") - } - o.unaryInt = i - }) -} - -// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor -// for unary RPCs. The first interceptor will be the outer most, -// while the last interceptor will be the inner most wrapper around the real call. -// All unary interceptors added by this method will be chained. -func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) - }) -} - -// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the -// server. Only one stream interceptor can be installed. -func StreamInterceptor(i StreamServerInterceptor) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - if o.streamInt != nil { - panic("The stream server interceptor was already set and may not be reset.") - } - o.streamInt = i - }) -} - -// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor -// for streaming RPCs. The first interceptor will be the outer most, -// while the last interceptor will be the inner most wrapper around the real call. -// All stream interceptors added by this method will be chained. -func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.chainStreamInts = append(o.chainStreamInts, interceptors...) - }) -} - -// InTapHandle returns a ServerOption that sets the tap handle for all the server -// transport to be created. Only one can be installed. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func InTapHandle(h tap.ServerInHandle) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - if o.inTapHandle != nil { - panic("The tap handle was already set and may not be reset.") - } - o.inTapHandle = h - }) -} - -// StatsHandler returns a ServerOption that sets the stats handler for the server. -func StatsHandler(h stats.Handler) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - if h == nil { - logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") - // Do not allow a nil stats handler, which would otherwise cause - // panics. - return - } - o.statsHandlers = append(o.statsHandlers, h) - }) -} - -// binaryLogger returns a ServerOption that can set the binary logger for the -// server. -func binaryLogger(bl binarylog.Logger) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.binaryLogger = bl - }) -} - -// UnknownServiceHandler returns a ServerOption that allows for adding a custom -// unknown service handler. The provided method is a bidi-streaming RPC service -// handler that will be invoked instead of returning the "unimplemented" gRPC -// error whenever a request is received for an unregistered service or method. -// The handling function and stream interceptor (if set) have full access to -// the ServerStream, including its Context. -func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.unknownStreamDesc = &StreamDesc{ - StreamName: "unknown_service_handler", - Handler: streamHandler, - // We need to assume that the users of the streamHandler will want to use both. - ClientStreams: true, - ServerStreams: true, - } - }) -} - -// ConnectionTimeout returns a ServerOption that sets the timeout for -// connection establishment (up to and including HTTP/2 handshaking) for all -// new connections. If this is not set, the default is 120 seconds. A zero or -// negative value will result in an immediate timeout. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func ConnectionTimeout(d time.Duration) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.connectionTimeout = d - }) -} - -// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size -// of header list that the server is prepared to accept. -func MaxHeaderListSize(s uint32) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.maxHeaderListSize = &s - }) -} - -// HeaderTableSize returns a ServerOption that sets the size of dynamic -// header table for stream. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func HeaderTableSize(s uint32) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.headerTableSize = &s - }) -} - -// NumStreamWorkers returns a ServerOption that sets the number of worker -// goroutines that should be used to process incoming streams. Setting this to -// zero (default) will disable workers and spawn a new goroutine for each -// stream. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NumStreamWorkers(numServerWorkers uint32) ServerOption { - // TODO: If/when this API gets stabilized (i.e. stream workers become the - // only way streams are processed), change the behavior of the zero value to - // a sane default. Preliminary experiments suggest that a value equal to the - // number of CPUs available is most performant; requires thorough testing. - return newFuncServerOption(func(o *serverOptions) { - o.numServerWorkers = numServerWorkers - }) -} - -// WaitForHandlers cause Stop to wait until all outstanding method handlers have -// exited before returning. If false, Stop will return as soon as all -// connections have closed, but method handlers may still be running. By -// default, Stop does not wait for method handlers to return. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WaitForHandlers(w bool) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.waitForHandlers = w - }) -} - -// RecvBufferPool returns a ServerOption that configures the server -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: StatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { - return recvBufferPool(bufferPool) -} - -func recvBufferPool(bufferPool SharedBufferPool) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.recvBufferPool = bufferPool - }) -} - -// serverWorkerResetThreshold defines how often the stack must be reset. Every -// N requests, by spawning a new goroutine in its place, a worker can reset its -// stack so that large stacks don't live in memory forever. 2^16 should allow -// each goroutine stack to live for at least a few seconds in a typical -// workload (assuming a QPS of a few thousand requests/sec). -const serverWorkerResetThreshold = 1 << 16 - -// serverWorkers blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows multiple requests to be -// processed by the same goroutine, removing the need for expensive stack -// re-allocations (see the runtime.morestack problem [1]). -// -// [1] https://github.com/golang/go/issues/18138 -func (s *Server) serverWorker() { - for completed := 0; completed < serverWorkerResetThreshold; completed++ { - f, ok := <-s.serverWorkerChannel - if !ok { - return - } - f() - } - go s.serverWorker() -} - -// initServerWorkers creates worker goroutines and a channel to process incoming -// connections to reduce the time spent overall on runtime.morestack. -func (s *Server) initServerWorkers() { - s.serverWorkerChannel = make(chan func()) - s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { - close(s.serverWorkerChannel) - }) - for i := uint32(0); i < s.opts.numServerWorkers; i++ { - go s.serverWorker() - } -} - -// NewServer creates a gRPC server which has no service registered and has not -// started to accept requests yet. -func NewServer(opt ...ServerOption) *Server { - opts := defaultServerOptions - for _, o := range globalServerOptions { - o.apply(&opts) - } - for _, o := range opt { - o.apply(&opts) - } - s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[string]map[transport.ServerTransport]bool), - services: make(map[string]*serviceInfo), - quit: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - czData: new(channelzData), - } - chainUnaryServerInterceptors(s) - chainStreamServerInterceptors(s) - s.cv = sync.NewCond(&s.mu) - if EnableTracing { - _, file, line, _ := runtime.Caller(1) - s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) - } - - if s.opts.numServerWorkers > 0 { - s.initServerWorkers() - } - - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - channelz.Info(logger, s.channelzID, "Server created") - return s -} - -// printf records an event in s's event log, unless s has been stopped. -// REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...any) { - if s.events != nil { - s.events.Printf(format, a...) - } -} - -// errorf records an error in s's event log, unless s has been stopped. -// REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...any) { - if s.events != nil { - s.events.Errorf(format, a...) - } -} - -// ServiceRegistrar wraps a single method that supports service registration. It -// enables users to pass concrete types other than grpc.Server to the service -// registration methods exported by the IDL generated code. -type ServiceRegistrar interface { - // RegisterService registers a service and its implementation to the - // concrete type implementing this interface. It may not be called - // once the server has started serving. - // desc describes the service and its methods and handlers. impl is the - // service implementation which is passed to the method handlers. - RegisterService(desc *ServiceDesc, impl any) -} - -// RegisterService registers a service and its implementation to the gRPC -// server. It is called from the IDL generated code. This must be called before -// invoking Serve. If ss is non-nil (for legacy code), its type is checked to -// ensure it implements sd.HandlerType. -func (s *Server) RegisterService(sd *ServiceDesc, ss any) { - if ss != nil { - ht := reflect.TypeOf(sd.HandlerType).Elem() - st := reflect.TypeOf(ss) - if !st.Implements(ht) { - logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) - } - } - s.register(sd, ss) -} - -func (s *Server) register(sd *ServiceDesc, ss any) { - s.mu.Lock() - defer s.mu.Unlock() - s.printf("RegisterService(%q)", sd.ServiceName) - if s.serve { - logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) - } - if _, ok := s.services[sd.ServiceName]; ok { - logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) - } - info := &serviceInfo{ - serviceImpl: ss, - methods: make(map[string]*MethodDesc), - streams: make(map[string]*StreamDesc), - mdata: sd.Metadata, - } - for i := range sd.Methods { - d := &sd.Methods[i] - info.methods[d.MethodName] = d - } - for i := range sd.Streams { - d := &sd.Streams[i] - info.streams[d.StreamName] = d - } - s.services[sd.ServiceName] = info -} - -// MethodInfo contains the information of an RPC including its method name and type. -type MethodInfo struct { - // Name is the method name only, without the service name or package name. - Name string - // IsClientStream indicates whether the RPC is a client streaming RPC. - IsClientStream bool - // IsServerStream indicates whether the RPC is a server streaming RPC. - IsServerStream bool -} - -// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. -type ServiceInfo struct { - Methods []MethodInfo - // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata any -} - -// GetServiceInfo returns a map from service names to ServiceInfo. -// Service names include the package names, in the form of .. -func (s *Server) GetServiceInfo() map[string]ServiceInfo { - ret := make(map[string]ServiceInfo) - for n, srv := range s.services { - methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) - for m := range srv.methods { - methods = append(methods, MethodInfo{ - Name: m, - IsClientStream: false, - IsServerStream: false, - }) - } - for m, d := range srv.streams { - methods = append(methods, MethodInfo{ - Name: m, - IsClientStream: d.ClientStreams, - IsServerStream: d.ServerStreams, - }) - } - - ret[n] = ServiceInfo{ - Methods: methods, - Metadata: srv.mdata, - } - } - return ret -} - -// ErrServerStopped indicates that the operation is now illegal because of -// the server being stopped. -var ErrServerStopped = errors.New("grpc: the server has been stopped") - -type listenSocket struct { - net.Listener - channelzID *channelz.Identifier -} - -func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { - return &channelz.SocketInternalMetric{ - SocketOptions: channelz.GetSocketOption(l.Listener), - LocalAddr: l.Listener.Addr(), - } -} - -func (l *listenSocket) Close() error { - err := l.Listener.Close() - channelz.RemoveEntry(l.channelzID) - channelz.Info(logger, l.channelzID, "ListenSocket deleted") - return err -} - -// Serve accepts incoming connections on the listener lis, creating a new -// ServerTransport and service goroutine for each. The service goroutines -// read gRPC requests and then call the registered handlers to reply to them. -// Serve returns when lis.Accept fails with fatal errors. lis will be closed when -// this method returns. -// Serve will return a non-nil error unless Stop or GracefulStop is called. -// -// Note: All supported releases of Go (as of December 2023) override the OS -// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive -// with OS defaults for keepalive time and interval, callers need to do the -// following two things: -// - pass a net.Listener created by calling the Listen method on a -// net.ListenConfig with the `KeepAlive` field set to a negative value. This -// will result in the Go standard library not overriding OS defaults for TCP -// keepalive interval and time. But this will also result in the Go standard -// library not enabling TCP keepalives by default. -// - override the Accept method on the passed in net.Listener and set the -// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. -func (s *Server) Serve(lis net.Listener) error { - s.mu.Lock() - s.printf("serving") - s.serve = true - if s.lis == nil { - // Serve called after Stop or GracefulStop. - s.mu.Unlock() - lis.Close() - return ErrServerStopped - } - - s.serveWG.Add(1) - defer func() { - s.serveWG.Done() - if s.quit.HasFired() { - // Stop or GracefulStop called; block until done and return nil. - <-s.done.Done() - } - }() - - ls := &listenSocket{Listener: lis} - s.lis[ls] = true - - defer func() { - s.mu.Lock() - if s.lis != nil && s.lis[ls] { - ls.Close() - delete(s.lis, ls) - } - s.mu.Unlock() - }() - - var err error - ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - if err != nil { - s.mu.Unlock() - return err - } - s.mu.Unlock() - channelz.Info(logger, ls.channelzID, "ListenSocket created") - - var tempDelay time.Duration // how long to sleep on accept failure - for { - rawConn, err := lis.Accept() - if err != nil { - if ne, ok := err.(interface { - Temporary() bool - }); ok && ne.Temporary() { - if tempDelay == 0 { - tempDelay = 5 * time.Millisecond - } else { - tempDelay *= 2 - } - if max := 1 * time.Second; tempDelay > max { - tempDelay = max - } - s.mu.Lock() - s.printf("Accept error: %v; retrying in %v", err, tempDelay) - s.mu.Unlock() - timer := time.NewTimer(tempDelay) - select { - case <-timer.C: - case <-s.quit.Done(): - timer.Stop() - return nil - } - continue - } - s.mu.Lock() - s.printf("done serving; Accept = %v", err) - s.mu.Unlock() - - if s.quit.HasFired() { - return nil - } - return err - } - tempDelay = 0 - // Start a new goroutine to deal with rawConn so we don't stall this Accept - // loop goroutine. - // - // Make sure we account for the goroutine so GracefulStop doesn't nil out - // s.conns before this conn can be added. - s.serveWG.Add(1) - go func() { - s.handleRawConn(lis.Addr().String(), rawConn) - s.serveWG.Done() - }() - } -} - -// handleRawConn forks a goroutine to handle a just-accepted connection that -// has not had any I/O performed on it yet. -func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { - if s.quit.HasFired() { - rawConn.Close() - return - } - rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) - - // Finish handshaking (HTTP2) - st := s.newHTTP2Transport(rawConn) - rawConn.SetDeadline(time.Time{}) - if st == nil { - return - } - - if cc, ok := rawConn.(interface { - PassServerTransport(transport.ServerTransport) - }); ok { - cc.PassServerTransport(st) - } - - if !s.addConn(lisAddr, st) { - return - } - go func() { - s.serveStreams(context.Background(), st, rawConn) - s.removeConn(lisAddr, st) - }() -} - -// newHTTP2Transport sets up a http/2 transport (using the -// gRPC http2 server transport in transport/http2_server.go). -func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { - config := &transport.ServerConfig{ - MaxStreams: s.opts.maxConcurrentStreams, - ConnectionTimeout: s.opts.connectionTimeout, - Credentials: s.opts.creds, - InTapHandle: s.opts.inTapHandle, - StatsHandlers: s.opts.statsHandlers, - KeepaliveParams: s.opts.keepaliveParams, - KeepalivePolicy: s.opts.keepalivePolicy, - InitialWindowSize: s.opts.initialWindowSize, - InitialConnWindowSize: s.opts.initialConnWindowSize, - WriteBufferSize: s.opts.writeBufferSize, - ReadBufferSize: s.opts.readBufferSize, - SharedWriteBuffer: s.opts.sharedWriteBuffer, - ChannelzParentID: s.channelzID, - MaxHeaderListSize: s.opts.maxHeaderListSize, - HeaderTableSize: s.opts.headerTableSize, - } - st, err := transport.NewServerTransport(c, config) - if err != nil { - s.mu.Lock() - s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) - s.mu.Unlock() - // ErrConnDispatched means that the connection was dispatched away from - // gRPC; those connections should be left open. - if err != credentials.ErrConnDispatched { - // Don't log on ErrConnDispatched and io.EOF to prevent log spam. - if err != io.EOF { - channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) - } - c.Close() - } - return nil - } - - return st -} - -func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { - ctx = transport.SetConnection(ctx, rawConn) - ctx = peer.NewContext(ctx, st.Peer()) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ - RemoteAddr: st.Peer().Addr, - LocalAddr: st.Peer().LocalAddr, - }) - sh.HandleConn(ctx, &stats.ConnBegin{}) - } - - defer func() { - st.Close(errors.New("finished serving streams for the server transport")) - for _, sh := range s.opts.statsHandlers { - sh.HandleConn(ctx, &stats.ConnEnd{}) - } - }() - - streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(ctx, func(stream *transport.Stream) { - s.handlersWG.Add(1) - streamQuota.acquire() - f := func() { - defer streamQuota.release() - defer s.handlersWG.Done() - s.handleStream(st, stream) - } - - if s.opts.numServerWorkers > 0 { - select { - case s.serverWorkerChannel <- f: - return - default: - // If all stream workers are busy, fallback to the default code path. - } - } - go f() - }) -} - -var _ http.Handler = (*Server)(nil) - -// ServeHTTP implements the Go standard library's http.Handler -// interface by responding to the gRPC request r, by looking up -// the requested gRPC method in the gRPC server s. -// -// The provided HTTP request must have arrived on an HTTP/2 -// connection. When using the Go standard library's server, -// practically this means that the Request must also have arrived -// over TLS. -// -// To share one port (such as 443 for https) between gRPC and an -// existing http.Handler, use a root http.Handler such as: -// -// if r.ProtoMajor == 2 && strings.HasPrefix( -// r.Header.Get("Content-Type"), "application/grpc") { -// grpcServer.ServeHTTP(w, r) -// } else { -// yourMux.ServeHTTP(w, r) -// } -// -// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally -// separate from grpc-go's HTTP/2 server. Performance and features may vary -// between the two paths. ServeHTTP does not support some gRPC features -// available through grpc-go's HTTP/2 server. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) - if err != nil { - // Errors returned from transport.NewServerHandlerTransport have - // already been written to w. - return - } - if !s.addConn(listenerAddressForServeHTTP, st) { - return - } - defer s.removeConn(listenerAddressForServeHTTP, st) - s.serveStreams(r.Context(), st, nil) -} - -func (s *Server) addConn(addr string, st transport.ServerTransport) bool { - s.mu.Lock() - defer s.mu.Unlock() - if s.conns == nil { - st.Close(errors.New("Server.addConn called when server has already been stopped")) - return false - } - if s.drain { - // Transport added after we drained our existing conns: drain it - // immediately. - st.Drain("") - } - - if s.conns[addr] == nil { - // Create a map entry if this is the first connection on this listener. - s.conns[addr] = make(map[transport.ServerTransport]bool) - } - s.conns[addr][st] = true - return true -} - -func (s *Server) removeConn(addr string, st transport.ServerTransport) { - s.mu.Lock() - defer s.mu.Unlock() - - conns := s.conns[addr] - if conns != nil { - delete(conns, st) - if len(conns) == 0 { - // If the last connection for this address is being removed, also - // remove the map entry corresponding to the address. This is used - // in GracefulStop() when waiting for all connections to be closed. - delete(s.conns, addr) - } - s.cv.Broadcast() - } -} - -func (s *Server) channelzMetric() *channelz.ServerInternalMetric { - return &channelz.ServerInternalMetric{ - CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), - } -} - -func (s *Server) incrCallsStarted() { - atomic.AddInt64(&s.czData.callsStarted, 1) - atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) -} - -func (s *Server) incrCallsSucceeded() { - atomic.AddInt64(&s.czData.callsSucceeded, 1) -} - -func (s *Server) incrCallsFailed() { - atomic.AddInt64(&s.czData.callsFailed, 1) -} - -func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { - data, err := encode(s.getCodec(stream.ContentSubtype()), msg) - if err != nil { - channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) - return err - } - compData, err := compress(data, cp, comp) - if err != nil { - channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) - return err - } - hdr, payload := msgHeader(data, compData) - // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) - } - err = t.Write(stream, hdr, payload, opts) - if err == nil { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) - } - } - return err -} - -// chainUnaryServerInterceptors chains all unary server interceptors into one. -func chainUnaryServerInterceptors(s *Server) { - // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will - // be executed before any other chained interceptors. - interceptors := s.opts.chainUnaryInts - if s.opts.unaryInt != nil { - interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) - } - - var chainedInt UnaryServerInterceptor - if len(interceptors) == 0 { - chainedInt = nil - } else if len(interceptors) == 1 { - chainedInt = interceptors[0] - } else { - chainedInt = chainUnaryInterceptors(interceptors) - } - - s.opts.unaryInt = chainedInt -} - -func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { - return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { - return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) - } -} - -func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - return func(ctx context.Context, req any) (any, error) { - return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) - } -} - -func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - shs := s.opts.statsHandlers - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { - if channelz.IsOn() { - s.incrCallsStarted() - } - var statsBegin *stats.Begin - for _, sh := range shs { - beginTime := time.Now() - statsBegin = &stats.Begin{ - BeginTime: beginTime, - IsClientStream: false, - IsServerStream: false, - } - sh.HandleRPC(ctx, statsBegin) - } - if trInfo != nil { - trInfo.tr.LazyLog(&trInfo.firstLine, false) - } - // The deferred error handling for tracing, stats handler and channelz are - // combined into one function to reduce stack usage -- a defer takes ~56-64 - // bytes on the stack, so overflowing the stack will require a stack - // re-allocation, which is expensive. - // - // To maintain behavior similar to separate deferred statements, statements - // should be executed in the reverse order. That is, tracing first, stats - // handler second, and channelz last. Note that panics *within* defers will - // lead to different behavior, but that's an acceptable compromise; that - // would be undefined behavior territory anyway. - defer func() { - if trInfo != nil { - if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() - } - trInfo.tr.Finish() - } - - for _, sh := range shs { - end := &stats.End{ - BeginTime: statsBegin.BeginTime, - EndTime: time.Now(), - } - if err != nil && err != io.EOF { - end.Error = toRPCErr(err) - } - sh.HandleRPC(ctx, end) - } - - if channelz.IsOn() { - if err != nil && err != io.EOF { - s.incrCallsFailed() - } else { - s.incrCallsSucceeded() - } - } - }() - } - var binlogs []binarylog.MethodLogger - if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { - binlogs = append(binlogs, ml) - } - if s.opts.binaryLogger != nil { - if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { - binlogs = append(binlogs, ml) - } - } - if len(binlogs) != 0 { - md, _ := metadata.FromIncomingContext(ctx) - logEntry := &binarylog.ClientHeader{ - Header: md, - MethodName: stream.Method(), - PeerAddr: nil, - } - if deadline, ok := ctx.Deadline(); ok { - logEntry.Timeout = time.Until(deadline) - if logEntry.Timeout < 0 { - logEntry.Timeout = 0 - } - } - if a := md[":authority"]; len(a) > 0 { - logEntry.Authority = a[0] - } - if peer, ok := peer.FromContext(ctx); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range binlogs { - binlog.Log(ctx, logEntry) - } - } - - // comp and cp are used for compression. decomp and dc are used for - // decompression. If comp and decomp are both set, they are the same; - // however they are kept separate to ensure that at most one of the - // compressor/decompressor variable pairs are set for use later. - var comp, decomp encoding.Compressor - var cp Compressor - var dc Decompressor - var sendCompressorName string - - // If dc is set and matches the stream's compression, use it. Otherwise, try - // to find a matching registered compressor for decomp. - if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { - dc = s.opts.dc - } else if rc != "" && rc != encoding.Identity { - decomp = encoding.GetCompressor(rc) - if decomp == nil { - st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(stream, st) - return st.Err() - } - } - - // If cp is set, use it. Otherwise, attempt to compress the response using - // the incoming message compression method. - // - // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. - if s.opts.cp != nil { - cp = s.opts.cp - sendCompressorName = cp.Type() - } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { - // Legacy compressor not specified; attempt to respond with same encoding. - comp = encoding.GetCompressor(rc) - if comp != nil { - sendCompressorName = comp.Name() - } - } - - if sendCompressorName != "" { - if err := stream.SetSendCompress(sendCompressorName); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) - } - } - - var payInfo *payloadInfo - if len(shs) != 0 || len(binlogs) != 0 { - payInfo = &payloadInfo{} - } - d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) - if err != nil { - if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) - } - return err - } - if channelz.IsOn() { - t.IncrMsgRecv() - } - df := func(v any) error { - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { - return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) - } - for _, sh := range shs { - sh.HandleRPC(ctx, &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - Length: len(d), - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, - Data: d, - }) - } - if len(binlogs) != 0 { - cm := &binarylog.ClientMessage{ - Message: d, - } - for _, binlog := range binlogs { - binlog.Log(ctx, cm) - } - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) - } - return nil - } - ctx = NewContextWithServerTransportStream(ctx, stream) - reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) - if appErr != nil { - appStatus, ok := status.FromError(appErr) - if !ok { - // Convert non-status application error to a status error with code - // Unknown, but handle context errors specifically. - appStatus = status.FromContextError(appErr) - appErr = appStatus.Err() - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer(appStatus.Message()), true) - trInfo.tr.SetError() - } - if e := t.WriteStatus(stream, appStatus); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) - } - if len(binlogs) != 0 { - if h, _ := stream.Header(); h.Len() > 0 { - // Only log serverHeader if there was header. Otherwise it can - // be trailer only. - sh := &binarylog.ServerHeader{ - Header: h, - } - for _, binlog := range binlogs { - binlog.Log(ctx, sh) - } - } - st := &binarylog.ServerTrailer{ - Trailer: stream.Trailer(), - Err: appErr, - } - for _, binlog := range binlogs { - binlog.Log(ctx, st) - } - } - return appErr - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer("OK"), false) - } - opts := &transport.Options{Last: true} - - // Server handler could have set new compressor by calling SetSendCompressor. - // In case it is set, we need to use it for compressing outbound message. - if stream.SendCompress() != sendCompressorName { - comp = encoding.GetCompressor(stream.SendCompress()) - } - if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { - if err == io.EOF { - // The entire stream is done (for unary RPC only). - return err - } - if sts, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, sts); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) - } - } else { - switch st := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) - } - } - if len(binlogs) != 0 { - h, _ := stream.Header() - sh := &binarylog.ServerHeader{ - Header: h, - } - st := &binarylog.ServerTrailer{ - Trailer: stream.Trailer(), - Err: appErr, - } - for _, binlog := range binlogs { - binlog.Log(ctx, sh) - binlog.Log(ctx, st) - } - } - return err - } - if len(binlogs) != 0 { - h, _ := stream.Header() - sh := &binarylog.ServerHeader{ - Header: h, - } - sm := &binarylog.ServerMessage{ - Message: reply, - } - for _, binlog := range binlogs { - binlog.Log(ctx, sh) - binlog.Log(ctx, sm) - } - } - if channelz.IsOn() { - t.IncrMsgSent() - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) - } - // TODO: Should we be logging if writing status failed here, like above? - // Should the logging be in WriteStatus? Should we ignore the WriteStatus - // error or allow the stats handler to see it? - if len(binlogs) != 0 { - st := &binarylog.ServerTrailer{ - Trailer: stream.Trailer(), - Err: appErr, - } - for _, binlog := range binlogs { - binlog.Log(ctx, st) - } - } - return t.WriteStatus(stream, statusOK) -} - -// chainStreamServerInterceptors chains all stream server interceptors into one. -func chainStreamServerInterceptors(s *Server) { - // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will - // be executed before any other chained interceptors. - interceptors := s.opts.chainStreamInts - if s.opts.streamInt != nil { - interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) - } - - var chainedInt StreamServerInterceptor - if len(interceptors) == 0 { - chainedInt = nil - } else if len(interceptors) == 1 { - chainedInt = interceptors[0] - } else { - chainedInt = chainStreamInterceptors(interceptors) - } - - s.opts.streamInt = chainedInt -} - -func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { - return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) - } -} - -func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - return func(srv any, stream ServerStream) error { - return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) - } -} - -func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { - if channelz.IsOn() { - s.incrCallsStarted() - } - shs := s.opts.statsHandlers - var statsBegin *stats.Begin - if len(shs) != 0 { - beginTime := time.Now() - statsBegin = &stats.Begin{ - BeginTime: beginTime, - IsClientStream: sd.ClientStreams, - IsServerStream: sd.ServerStreams, - } - for _, sh := range shs { - sh.HandleRPC(ctx, statsBegin) - } - } - ctx = NewContextWithServerTransportStream(ctx, stream) - ss := &serverStream{ - ctx: ctx, - t: t, - s: stream, - p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, - codec: s.getCodec(stream.ContentSubtype()), - maxReceiveMessageSize: s.opts.maxReceiveMessageSize, - maxSendMessageSize: s.opts.maxSendMessageSize, - trInfo: trInfo, - statsHandler: shs, - } - - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { - // See comment in processUnaryRPC on defers. - defer func() { - if trInfo != nil { - ss.mu.Lock() - if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - ss.trInfo.tr.SetError() - } - ss.trInfo.tr.Finish() - ss.trInfo.tr = nil - ss.mu.Unlock() - } - - if len(shs) != 0 { - end := &stats.End{ - BeginTime: statsBegin.BeginTime, - EndTime: time.Now(), - } - if err != nil && err != io.EOF { - end.Error = toRPCErr(err) - } - for _, sh := range shs { - sh.HandleRPC(ctx, end) - } - } - - if channelz.IsOn() { - if err != nil && err != io.EOF { - s.incrCallsFailed() - } else { - s.incrCallsSucceeded() - } - } - }() - } - - if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { - ss.binlogs = append(ss.binlogs, ml) - } - if s.opts.binaryLogger != nil { - if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { - ss.binlogs = append(ss.binlogs, ml) - } - } - if len(ss.binlogs) != 0 { - md, _ := metadata.FromIncomingContext(ctx) - logEntry := &binarylog.ClientHeader{ - Header: md, - MethodName: stream.Method(), - PeerAddr: nil, - } - if deadline, ok := ctx.Deadline(); ok { - logEntry.Timeout = time.Until(deadline) - if logEntry.Timeout < 0 { - logEntry.Timeout = 0 - } - } - if a := md[":authority"]; len(a) > 0 { - logEntry.Authority = a[0] - } - if peer, ok := peer.FromContext(ss.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range ss.binlogs { - binlog.Log(ctx, logEntry) - } - } - - // If dc is set and matches the stream's compression, use it. Otherwise, try - // to find a matching registered compressor for decomp. - if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { - ss.dc = s.opts.dc - } else if rc != "" && rc != encoding.Identity { - ss.decomp = encoding.GetCompressor(rc) - if ss.decomp == nil { - st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(ss.s, st) - return st.Err() - } - } - - // If cp is set, use it. Otherwise, attempt to compress the response using - // the incoming message compression method. - // - // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. - if s.opts.cp != nil { - ss.cp = s.opts.cp - ss.sendCompressorName = s.opts.cp.Type() - } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { - // Legacy compressor not specified; attempt to respond with same encoding. - ss.comp = encoding.GetCompressor(rc) - if ss.comp != nil { - ss.sendCompressorName = rc - } - } - - if ss.sendCompressorName != "" { - if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) - } - } - - ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) - - if trInfo != nil { - trInfo.tr.LazyLog(&trInfo.firstLine, false) - } - var appErr error - var server any - if info != nil { - server = info.serviceImpl - } - if s.opts.streamInt == nil { - appErr = sd.Handler(server, ss) - } else { - info := &StreamServerInfo{ - FullMethod: stream.Method(), - IsClientStream: sd.ClientStreams, - IsServerStream: sd.ServerStreams, - } - appErr = s.opts.streamInt(server, ss, info, sd.Handler) - } - if appErr != nil { - appStatus, ok := status.FromError(appErr) - if !ok { - // Convert non-status application error to a status error with code - // Unknown, but handle context errors specifically. - appStatus = status.FromContextError(appErr) - appErr = appStatus.Err() - } - if trInfo != nil { - ss.mu.Lock() - ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) - ss.trInfo.tr.SetError() - ss.mu.Unlock() - } - if len(ss.binlogs) != 0 { - st := &binarylog.ServerTrailer{ - Trailer: ss.s.Trailer(), - Err: appErr, - } - for _, binlog := range ss.binlogs { - binlog.Log(ctx, st) - } - } - t.WriteStatus(ss.s, appStatus) - // TODO: Should we log an error from WriteStatus here and below? - return appErr - } - if trInfo != nil { - ss.mu.Lock() - ss.trInfo.tr.LazyLog(stringer("OK"), false) - ss.mu.Unlock() - } - if len(ss.binlogs) != 0 { - st := &binarylog.ServerTrailer{ - Trailer: ss.s.Trailer(), - Err: appErr, - } - for _, binlog := range ss.binlogs { - binlog.Log(ctx, st) - } - } - return t.WriteStatus(ss.s, statusOK) -} - -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { - ctx := stream.Context() - ctx = contextWithServer(ctx, s) - var ti *traceInfo - if EnableTracing { - tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) - ctx = newTraceContext(ctx, tr) - ti = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: t.Peer().Addr, - }, - } - if dl, ok := ctx.Deadline(); ok { - ti.firstLine.deadline = time.Until(dl) - } - } - - sm := stream.Method() - if sm != "" && sm[0] == '/' { - sm = sm[1:] - } - pos := strings.LastIndex(sm, "/") - if pos == -1 { - if ti != nil { - ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) - ti.tr.SetError() - } - errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if ti != nil { - ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - ti.tr.SetError() - } - channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) - } - if ti != nil { - ti.tr.Finish() - } - return - } - service := sm[:pos] - method := sm[pos+1:] - - md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) - } - // To have calls in stream callouts work. Will delete once all stats handler - // calls come from the gRPC layer. - stream.SetContext(ctx) - - srv, knownService := s.services[service] - if knownService { - if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(ctx, t, stream, srv, md, ti) - return - } - if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(ctx, t, stream, srv, sd, ti) - return - } - } - // Unknown service, or known server unknown method. - if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) - return - } - var errDesc string - if !knownService { - errDesc = fmt.Sprintf("unknown service %v", service) - } else { - errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) - } - if ti != nil { - ti.tr.LazyPrintf("%s", errDesc) - ti.tr.SetError() - } - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if ti != nil { - ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - ti.tr.SetError() - } - channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) - } - if ti != nil { - ti.tr.Finish() - } -} - -// The key to save ServerTransportStream in the context. -type streamKey struct{} - -// NewContextWithServerTransportStream creates a new context from ctx and -// attaches stream to it. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { - return context.WithValue(ctx, streamKey{}, stream) -} - -// ServerTransportStream is a minimal interface that a transport stream must -// implement. This can be used to mock an actual transport stream for tests of -// handler code that use, for example, grpc.SetHeader (which requires some -// stream to be in context). -// -// See also NewContextWithServerTransportStream. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type ServerTransportStream interface { - Method() string - SetHeader(md metadata.MD) error - SendHeader(md metadata.MD) error - SetTrailer(md metadata.MD) error -} - -// ServerTransportStreamFromContext returns the ServerTransportStream saved in -// ctx. Returns nil if the given context has no stream associated with it -// (which implies it is not an RPC invocation context). -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { - s, _ := ctx.Value(streamKey{}).(ServerTransportStream) - return s -} - -// Stop stops the gRPC server. It immediately closes all open -// connections and listeners. -// It cancels all active RPCs on the server side and the corresponding -// pending RPCs on the client side will get notified by connection -// errors. -func (s *Server) Stop() { - s.stop(false) -} - -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. -func (s *Server) GracefulStop() { - s.stop(true) -} - -func (s *Server) stop(graceful bool) { - s.quit.Fire() - defer s.done.Fire() - - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - - s.mu.Lock() - s.closeListenersLocked() - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. - s.mu.Unlock() - s.serveWG.Wait() - - s.mu.Lock() - defer s.mu.Unlock() - - if graceful { - s.drainAllServerTransportsLocked() - } else { - s.closeServerTransportsLocked() - } - - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - - if s.opts.numServerWorkers > 0 { - // Closing the channel (only once, via grpcsync.OnceFunc) after all the - // connections have been closed above ensures that there are no - // goroutines executing the callback passed to st.HandleStreams (where - // the channel is written to). - s.serverWorkerChannelClose() - } - - if graceful || s.opts.waitForHandlers { - s.handlersWG.Wait() - } - - if s.events != nil { - s.events.Finish() - s.events = nil - } -} - -// s.mu must be held by the caller. -func (s *Server) closeServerTransportsLocked() { - for _, conns := range s.conns { - for st := range conns { - st.Close(errors.New("Server.Stop called")) - } - } -} - -// s.mu must be held by the caller. -func (s *Server) drainAllServerTransportsLocked() { - if !s.drain { - for _, conns := range s.conns { - for st := range conns { - st.Drain("graceful_stop") - } - } - s.drain = true - } -} - -// s.mu must be held by the caller. -func (s *Server) closeListenersLocked() { - for lis := range s.lis { - lis.Close() - } - s.lis = nil -} - -// contentSubtype must be lowercase -// cannot return nil -func (s *Server) getCodec(contentSubtype string) baseCodec { - if s.opts.codec != nil { - return s.opts.codec - } - if contentSubtype == "" { - return encoding.GetCodec(proto.Name) - } - codec := encoding.GetCodec(contentSubtype) - if codec == nil { - logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) - return encoding.GetCodec(proto.Name) - } - return codec -} - -type serverKey struct{} - -// serverFromContext gets the Server from the context. -func serverFromContext(ctx context.Context) *Server { - s, _ := ctx.Value(serverKey{}).(*Server) - return s -} - -// contextWithServer sets the Server in the context. -func contextWithServer(ctx context.Context, server *Server) context.Context { - return context.WithValue(ctx, serverKey{}, server) -} - -// isRegisteredMethod returns whether the passed in method is registered as a -// method on the server. /service/method and service/method will match if the -// service and method are registered on the server. -func (s *Server) isRegisteredMethod(serviceMethod string) bool { - if serviceMethod != "" && serviceMethod[0] == '/' { - serviceMethod = serviceMethod[1:] - } - pos := strings.LastIndex(serviceMethod, "/") - if pos == -1 { // Invalid method name syntax. - return false - } - service := serviceMethod[:pos] - method := serviceMethod[pos+1:] - srv, knownService := s.services[service] - if knownService { - if _, ok := srv.methods[method]; ok { - return true - } - if _, ok := srv.streams[method]; ok { - return true - } - } - return false -} - -// SetHeader sets the header metadata to be sent from the server to the client. -// The context provided must be the context passed to the server's handler. -// -// Streaming RPCs should prefer the SetHeader method of the ServerStream. -// -// When called multiple times, all the provided metadata will be merged. All -// the metadata will be sent out when one of the following happens: -// -// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. -// - The first response message is sent. For unary handlers, this occurs when -// the handler returns; for streaming handlers, this can happen when stream's -// SendMsg method is called. -// - An RPC status is sent out (error or success). This occurs when the handler -// returns. -// -// SetHeader will fail if called after any of the events above. -// -// The error returned is compatible with the status package. However, the -// status code will often not match the RPC status as seen by the client -// application, and therefore, should not be relied upon for this purpose. -func SetHeader(ctx context.Context, md metadata.MD) error { - if md.Len() == 0 { - return nil - } - stream := ServerTransportStreamFromContext(ctx) - if stream == nil { - return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) - } - return stream.SetHeader(md) -} - -// SendHeader sends header metadata. It may be called at most once, and may not -// be called after any event that causes headers to be sent (see SetHeader for -// a complete list). The provided md and headers set by SetHeader() will be -// sent. -// -// The error returned is compatible with the status package. However, the -// status code will often not match the RPC status as seen by the client -// application, and therefore, should not be relied upon for this purpose. -func SendHeader(ctx context.Context, md metadata.MD) error { - stream := ServerTransportStreamFromContext(ctx) - if stream == nil { - return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) - } - if err := stream.SendHeader(md); err != nil { - return toRPCErr(err) - } - return nil -} - -// SetSendCompressor sets a compressor for outbound messages from the server. -// It must not be called after any event that causes headers to be sent -// (see ServerStream.SetHeader for the complete list). Provided compressor is -// used when below conditions are met: -// -// - compressor is registered via encoding.RegisterCompressor -// - compressor name must exist in the client advertised compressor names -// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to -// get client supported compressor names. -// -// The context provided must be the context passed to the server's handler. -// It must be noted that compressor name encoding.Identity disables the -// outbound compression. -// By default, server messages will be sent using the same compressor with -// which request messages were sent. -// -// It is not safe to call SetSendCompressor concurrently with SendHeader and -// SendMsg. -// -// # Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. -func SetSendCompressor(ctx context.Context, name string) error { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) - if !ok || stream == nil { - return fmt.Errorf("failed to fetch the stream from the given context") - } - - if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { - return fmt.Errorf("unable to set send compressor: %w", err) - } - - return stream.SetSendCompress(name) -} - -// ClientSupportedCompressors returns compressor names advertised by the client -// via grpc-accept-encoding header. -// -// The context provided must be the context passed to the server's handler. -// -// # Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. -func ClientSupportedCompressors(ctx context.Context) ([]string, error) { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) - if !ok || stream == nil { - return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) - } - - return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil -} - -// SetTrailer sets the trailer metadata that will be sent when an RPC returns. -// When called more than once, all the provided metadata will be merged. -// -// The error returned is compatible with the status package. However, the -// status code will often not match the RPC status as seen by the client -// application, and therefore, should not be relied upon for this purpose. -func SetTrailer(ctx context.Context, md metadata.MD) error { - if md.Len() == 0 { - return nil - } - stream := ServerTransportStreamFromContext(ctx) - if stream == nil { - return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) - } - return stream.SetTrailer(md) -} - -// Method returns the method string for the server context. The returned -// string is in the format of "/service/method". -func Method(ctx context.Context) (string, bool) { - s := ServerTransportStreamFromContext(ctx) - if s == nil { - return "", false - } - return s.Method(), true -} - -type channelzServer struct { - s *Server -} - -func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { - return c.s.channelzMetric() -} - -// validateSendCompressor returns an error when given compressor name cannot be -// handled by the server or the client based on the advertised compressors. -func validateSendCompressor(name, clientCompressors string) error { - if name == encoding.Identity { - return nil - } - - if !grpcutil.IsCompressorNameRegistered(name) { - return fmt.Errorf("compressor not registered %q", name) - } - - for _, c := range strings.Split(clientCompressors, ",") { - if c == name { - return nil // found match - } - } - return fmt.Errorf("client does not support compressor %q", name) -} - -// atomicSemaphore implements a blocking, counting semaphore. acquire should be -// called synchronously; release may be called asynchronously. -type atomicSemaphore struct { - n atomic.Int64 - wait chan struct{} -} - -func (q *atomicSemaphore) acquire() { - if q.n.Add(-1) < 0 { - // We ran out of quota. Block until a release happens. - <-q.wait - } -} - -func (q *atomicSemaphore) release() { - // N.B. the "<= 0" check below should allow for this to work with multiple - // concurrent calls to acquire, but also note that with synchronous calls to - // acquire, as our system does, n will never be less than -1. There are - // fairness issues (queuing) to consider if this was to be generalized. - if q.n.Add(1) <= 0 { - // An acquire was waiting on us. Unblock it. - q.wait <- struct{}{} - } -} - -func newHandlerQuota(n uint32) *atomicSemaphore { - a := &atomicSemaphore{wait: make(chan struct{}, 1)} - a.n.Store(int64(n)) - return a -} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go deleted file mode 100644 index 0df11fc098..0000000000 --- a/vendor/google.golang.org/grpc/service_config.go +++ /dev/null @@ -1,347 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/serviceconfig" -) - -const maxInt = int(^uint(0) >> 1) - -// MethodConfig defines the configuration recommended by the service providers for a -// particular method. -// -// Deprecated: Users should not use this struct. Service config should be received -// through name resolver, as specified here -// https://github.com/grpc/grpc/blob/master/doc/service_config.md -type MethodConfig = internalserviceconfig.MethodConfig - -type lbConfig struct { - name string - cfg serviceconfig.LoadBalancingConfig -} - -// ServiceConfig is provided by the service provider and contains parameters for how -// clients that connect to the service should behave. -// -// Deprecated: Users should not use this struct. Service config should be received -// through name resolver, as specified here -// https://github.com/grpc/grpc/blob/master/doc/service_config.md -type ServiceConfig struct { - serviceconfig.Config - - // LB is the load balancer the service providers recommends. This is - // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, - // lbConfig will be used. - LB *string - - // lbConfig is the service config's load balancing configuration. If - // lbConfig and LB are both present, lbConfig will be used. - lbConfig *lbConfig - - // Methods contains a map for the methods in this service. If there is an - // exact match for a method (i.e. /service/method) in the map, use the - // corresponding MethodConfig. If there's no exact match, look for the - // default config for the service (/service/) and use the corresponding - // MethodConfig if it exists. Otherwise, the method has no MethodConfig to - // use. - Methods map[string]MethodConfig - - // If a retryThrottlingPolicy is provided, gRPC will automatically throttle - // retry attempts and hedged RPCs when the client’s ratio of failures to - // successes exceeds a threshold. - // - // For each server name, the gRPC client will maintain a token_count which is - // initially set to maxTokens, and can take values between 0 and maxTokens. - // - // Every outgoing RPC (regardless of service or method invoked) will change - // token_count as follows: - // - // - Every failed RPC will decrement the token_count by 1. - // - Every successful RPC will increment the token_count by tokenRatio. - // - // If token_count is less than or equal to maxTokens / 2, then RPCs will not - // be retried and hedged RPCs will not be sent. - retryThrottling *retryThrottlingPolicy - // healthCheckConfig must be set as one of the requirement to enable LB channel - // health check. - healthCheckConfig *healthCheckConfig - // rawJSONString stores service config json string that get parsed into - // this service config struct. - rawJSONString string -} - -// healthCheckConfig defines the go-native version of the LB channel health check config. -type healthCheckConfig struct { - // serviceName is the service name to use in the health-checking request. - ServiceName string -} - -type jsonRetryPolicy struct { - MaxAttempts int - InitialBackoff internalserviceconfig.Duration - MaxBackoff internalserviceconfig.Duration - BackoffMultiplier float64 - RetryableStatusCodes []codes.Code -} - -// retryThrottlingPolicy defines the go-native version of the retry throttling -// policy defined by the service config here: -// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config -type retryThrottlingPolicy struct { - // The number of tokens starts at maxTokens. The token_count will always be - // between 0 and maxTokens. - // - // This field is required and must be greater than zero. - MaxTokens float64 - // The amount of tokens to add on each successful RPC. Typically this will - // be some number between 0 and 1, e.g., 0.1. - // - // This field is required and must be greater than zero. Up to 3 decimal - // places are supported. - TokenRatio float64 -} - -type jsonName struct { - Service string - Method string -} - -var ( - errDuplicatedName = errors.New("duplicated name") - errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") -) - -func (j jsonName) generatePath() (string, error) { - if j.Service == "" { - if j.Method != "" { - return "", errEmptyServiceNonEmptyMethod - } - return "", nil - } - res := "/" + j.Service + "/" - if j.Method != "" { - res += j.Method - } - return res, nil -} - -// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. -type jsonMC struct { - Name *[]jsonName - WaitForReady *bool - Timeout *internalserviceconfig.Duration - MaxRequestMessageBytes *int64 - MaxResponseMessageBytes *int64 - RetryPolicy *jsonRetryPolicy -} - -// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. -type jsonSC struct { - LoadBalancingPolicy *string - LoadBalancingConfig *internalserviceconfig.BalancerConfig - MethodConfig *[]jsonMC - RetryThrottling *retryThrottlingPolicy - HealthCheckConfig *healthCheckConfig -} - -func init() { - internal.ParseServiceConfig = parseServiceConfig -} -func parseServiceConfig(js string) *serviceconfig.ParseResult { - if len(js) == 0 { - return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} - } - var rsc jsonSC - err := json.Unmarshal([]byte(js), &rsc) - if err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) - return &serviceconfig.ParseResult{Err: err} - } - sc := ServiceConfig{ - LB: rsc.LoadBalancingPolicy, - Methods: make(map[string]MethodConfig), - retryThrottling: rsc.RetryThrottling, - healthCheckConfig: rsc.HealthCheckConfig, - rawJSONString: js, - } - if c := rsc.LoadBalancingConfig; c != nil { - sc.lbConfig = &lbConfig{ - name: c.Name, - cfg: c.Config, - } - } - - if rsc.MethodConfig == nil { - return &serviceconfig.ParseResult{Config: &sc} - } - - paths := map[string]struct{}{} - for _, m := range *rsc.MethodConfig { - if m.Name == nil { - continue - } - - mc := MethodConfig{ - WaitForReady: m.WaitForReady, - Timeout: (*time.Duration)(m.Timeout), - } - if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) - return &serviceconfig.ParseResult{Err: err} - } - if m.MaxRequestMessageBytes != nil { - if *m.MaxRequestMessageBytes > int64(maxInt) { - mc.MaxReqSize = newInt(maxInt) - } else { - mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) - } - } - if m.MaxResponseMessageBytes != nil { - if *m.MaxResponseMessageBytes > int64(maxInt) { - mc.MaxRespSize = newInt(maxInt) - } else { - mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) - } - } - for i, n := range *m.Name { - path, err := n.generatePath() - if err != nil { - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) - return &serviceconfig.ParseResult{Err: err} - } - - if _, ok := paths[path]; ok { - err = errDuplicatedName - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) - return &serviceconfig.ParseResult{Err: err} - } - paths[path] = struct{}{} - sc.Methods[path] = mc - } - } - - if sc.retryThrottling != nil { - if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { - return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} - } - if tr := sc.retryThrottling.TokenRatio; tr <= 0 { - return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} - } - } - return &serviceconfig.ParseResult{Config: &sc} -} - -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { - if jrp == nil { - return nil, nil - } - - if jrp.MaxAttempts <= 1 || - jrp.InitialBackoff <= 0 || - jrp.MaxBackoff <= 0 || - jrp.BackoffMultiplier <= 0 || - len(jrp.RetryableStatusCodes) == 0 { - logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) - return nil, nil - } - - rp := &internalserviceconfig.RetryPolicy{ - MaxAttempts: jrp.MaxAttempts, - InitialBackoff: time.Duration(jrp.InitialBackoff), - MaxBackoff: time.Duration(jrp.MaxBackoff), - BackoffMultiplier: jrp.BackoffMultiplier, - RetryableStatusCodes: make(map[codes.Code]bool), - } - if rp.MaxAttempts > 5 { - // TODO(retry): Make the max maxAttempts configurable. - rp.MaxAttempts = 5 - } - for _, code := range jrp.RetryableStatusCodes { - rp.RetryableStatusCodes[code] = true - } - return rp, nil -} - -func min(a, b *int) *int { - if *a < *b { - return a - } - return b -} - -func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { - if mcMax == nil && doptMax == nil { - return &defaultVal - } - if mcMax != nil && doptMax != nil { - return min(mcMax, doptMax) - } - if mcMax != nil { - return mcMax - } - return doptMax -} - -func newInt(b int) *int { - return &b -} - -func init() { - internal.EqualServiceConfigForTesting = equalServiceConfig -} - -// equalServiceConfig compares two configs. The rawJSONString field is ignored, -// because they may diff in white spaces. -// -// If any of them is NOT *ServiceConfig, return false. -func equalServiceConfig(a, b serviceconfig.Config) bool { - if a == nil && b == nil { - return true - } - aa, ok := a.(*ServiceConfig) - if !ok { - return false - } - bb, ok := b.(*ServiceConfig) - if !ok { - return false - } - aaRaw := aa.rawJSONString - aa.rawJSONString = "" - bbRaw := bb.rawJSONString - bb.rawJSONString = "" - defer func() { - aa.rawJSONString = aaRaw - bb.rawJSONString = bbRaw - }() - // Using reflect.DeepEqual instead of cmp.Equal because many balancer - // configs are unexported, and cmp.Equal cannot compare unexported fields - // from unexported structs. - return reflect.DeepEqual(aa, bb) -} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go deleted file mode 100644 index 35e7a20a04..0000000000 --- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package serviceconfig defines types and methods for operating on gRPC -// service configs. -// -// # Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. -package serviceconfig - -// Config represents an opaque data structure holding a service config. -type Config interface { - isServiceConfig() -} - -// LoadBalancingConfig represents an opaque data structure holding a load -// balancing config. -type LoadBalancingConfig interface { - isLoadBalancingConfig() -} - -// ParseResult contains a service config or an error. Exactly one must be -// non-nil. -type ParseResult struct { - Config Config - Err error -} diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go deleted file mode 100644 index 48a64cfe8e..0000000000 --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import "sync" - -// SharedBufferPool is a pool of buffers that can be shared, resulting in -// decreased memory allocation. Currently, in gRPC-go, it is only utilized -// for parsing incoming messages. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -type SharedBufferPool interface { - // Get returns a buffer with specified length from the pool. - // - // The returned byte slice may be not zero initialized. - Get(length int) []byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -// NewSharedBufferPool creates a simple SharedBufferPool with buckets -// of different sizes to optimize memory usage. This prevents the pool from -// wasting large amounts of memory, even when handling messages of varying sizes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewSharedBufferPool() SharedBufferPool { - return &simpleSharedBufferPool{ - pools: [poolArraySize]simpleSharedBufferChildPool{ - newBytesPool(level0PoolMaxSize), - newBytesPool(level1PoolMaxSize), - newBytesPool(level2PoolMaxSize), - newBytesPool(level3PoolMaxSize), - newBytesPool(level4PoolMaxSize), - newBytesPool(0), - }, - } -} - -// simpleSharedBufferPool is a simple implementation of SharedBufferPool. -type simpleSharedBufferPool struct { - pools [poolArraySize]simpleSharedBufferChildPool -} - -func (p *simpleSharedBufferPool) Get(size int) []byte { - return p.pools[p.poolIdx(size)].Get(size) -} - -func (p *simpleSharedBufferPool) Put(bs *[]byte) { - p.pools[p.poolIdx(cap(*bs))].Put(bs) -} - -func (p *simpleSharedBufferPool) poolIdx(size int) int { - switch { - case size <= level0PoolMaxSize: - return level0PoolIdx - case size <= level1PoolMaxSize: - return level1PoolIdx - case size <= level2PoolMaxSize: - return level2PoolIdx - case size <= level3PoolMaxSize: - return level3PoolIdx - case size <= level4PoolMaxSize: - return level4PoolIdx - default: - return levelMaxPoolIdx - } -} - -const ( - level0PoolMaxSize = 16 // 16 B - level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B - level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB - level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB - level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB -) - -const ( - level0PoolIdx = iota - level1PoolIdx - level2PoolIdx - level3PoolIdx - level4PoolIdx - levelMaxPoolIdx - poolArraySize -) - -type simpleSharedBufferChildPool interface { - Get(size int) []byte - Put(any) -} - -type bufferPool struct { - sync.Pool - - defaultSize int -} - -func (p *bufferPool) Get(size int) []byte { - bs := p.Pool.Get().(*[]byte) - - if cap(*bs) < size { - p.Pool.Put(bs) - - return make([]byte, size) - } - - return (*bs)[:size] -} - -func newBytesPool(size int) simpleSharedBufferChildPool { - return &bufferPool{ - Pool: sync.Pool{ - New: func() any { - bs := make([]byte, size) - return &bs - }, - }, - defaultSize: size, - } -} - -// nopBufferPool is a buffer pool just makes new buffer without pooling. -type nopBufferPool struct { -} - -func (nopBufferPool) Get(length int) []byte { - return make([]byte, length) -} - -func (nopBufferPool) Put(*[]byte) { -} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go deleted file mode 100644 index dc03731e45..0000000000 --- a/vendor/google.golang.org/grpc/stats/handlers.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package stats - -import ( - "context" - "net" -) - -// ConnTagInfo defines the relevant information needed by connection context tagger. -type ConnTagInfo struct { - // RemoteAddr is the remote address of the corresponding connection. - RemoteAddr net.Addr - // LocalAddr is the local address of the corresponding connection. - LocalAddr net.Addr -} - -// RPCTagInfo defines the relevant information needed by RPC context tagger. -type RPCTagInfo struct { - // FullMethodName is the RPC method in the format of /package.service/method. - FullMethodName string - // FailFast indicates if this RPC is failfast. - // This field is only valid on client side, it's always false on server side. - FailFast bool -} - -// Handler defines the interface for the related stats handling (e.g., RPCs, connections). -type Handler interface { - // TagRPC can attach some information to the given context. - // The context used for the rest lifetime of the RPC will be derived from - // the returned context. - TagRPC(context.Context, *RPCTagInfo) context.Context - // HandleRPC processes the RPC stats. - HandleRPC(context.Context, RPCStats) - - // TagConn can attach some information to the given context. - // The returned context will be used for stats handling. - // For conn stats handling, the context used in HandleConn for this - // connection will be derived from the context returned. - // For RPC stats handling, - // - On server side, the context used in HandleRPC for all RPCs on this - // connection will be derived from the context returned. - // - On client side, the context is not derived from the context returned. - TagConn(context.Context, *ConnTagInfo) context.Context - // HandleConn processes the Conn stats. - HandleConn(context.Context, ConnStats) -} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go deleted file mode 100644 index 4ab70e2d46..0000000000 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ /dev/null @@ -1,343 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package stats is for collecting and reporting various network and RPC stats. -// This package is for monitoring purpose only. All fields are read-only. -// All APIs are experimental. -package stats // import "google.golang.org/grpc/stats" - -import ( - "context" - "net" - "time" - - "google.golang.org/grpc/metadata" -) - -// RPCStats contains stats information about RPCs. -type RPCStats interface { - isRPCStats() - // IsClient returns true if this RPCStats is from client side. - IsClient() bool -} - -// Begin contains stats when an RPC attempt begins. -// FailFast is only valid if this Begin is from client side. -type Begin struct { - // Client is true if this Begin is from client side. - Client bool - // BeginTime is the time when the RPC attempt begins. - BeginTime time.Time - // FailFast indicates if this RPC is failfast. - FailFast bool - // IsClientStream indicates whether the RPC is a client streaming RPC. - IsClientStream bool - // IsServerStream indicates whether the RPC is a server streaming RPC. - IsServerStream bool - // IsTransparentRetryAttempt indicates whether this attempt was initiated - // due to transparently retrying a previous attempt. - IsTransparentRetryAttempt bool -} - -// IsClient indicates if the stats information is from client side. -func (s *Begin) IsClient() bool { return s.Client } - -func (s *Begin) isRPCStats() {} - -// PickerUpdated indicates that the LB policy provided a new picker while the -// RPC was waiting for one. -type PickerUpdated struct{} - -// IsClient indicates if the stats information is from client side. Only Client -// Side interfaces with a Picker, thus always returns true. -func (*PickerUpdated) IsClient() bool { return true } - -func (*PickerUpdated) isRPCStats() {} - -// InPayload contains the information for an incoming payload. -type InPayload struct { - // Client is true if this InPayload is from client side. - Client bool - // Payload is the payload with original type. - Payload any - // Data is the serialized message payload. - Data []byte - - // Length is the size of the uncompressed payload data. Does not include any - // framing (gRPC or HTTP/2). - Length int - // CompressedLength is the size of the compressed payload data. Does not - // include any framing (gRPC or HTTP/2). Same as Length if compression not - // enabled. - CompressedLength int - // WireLength is the size of the compressed payload data plus gRPC framing. - // Does not include HTTP/2 framing. - WireLength int - - // RecvTime is the time when the payload is received. - RecvTime time.Time -} - -// IsClient indicates if the stats information is from client side. -func (s *InPayload) IsClient() bool { return s.Client } - -func (s *InPayload) isRPCStats() {} - -// InHeader contains stats when a header is received. -type InHeader struct { - // Client is true if this InHeader is from client side. - Client bool - // WireLength is the wire length of header. - WireLength int - // Compression is the compression algorithm used for the RPC. - Compression string - // Header contains the header metadata received. - Header metadata.MD - - // The following fields are valid only if Client is false. - // FullMethod is the full RPC method string, i.e., /package.service/method. - FullMethod string - // RemoteAddr is the remote address of the corresponding connection. - RemoteAddr net.Addr - // LocalAddr is the local address of the corresponding connection. - LocalAddr net.Addr -} - -// IsClient indicates if the stats information is from client side. -func (s *InHeader) IsClient() bool { return s.Client } - -func (s *InHeader) isRPCStats() {} - -// InTrailer contains stats when a trailer is received. -type InTrailer struct { - // Client is true if this InTrailer is from client side. - Client bool - // WireLength is the wire length of trailer. - WireLength int - // Trailer contains the trailer metadata received from the server. This - // field is only valid if this InTrailer is from the client side. - Trailer metadata.MD -} - -// IsClient indicates if the stats information is from client side. -func (s *InTrailer) IsClient() bool { return s.Client } - -func (s *InTrailer) isRPCStats() {} - -// OutPayload contains the information for an outgoing payload. -type OutPayload struct { - // Client is true if this OutPayload is from client side. - Client bool - // Payload is the payload with original type. - Payload any - // Data is the serialized message payload. - Data []byte - // Length is the size of the uncompressed payload data. Does not include any - // framing (gRPC or HTTP/2). - Length int - // CompressedLength is the size of the compressed payload data. Does not - // include any framing (gRPC or HTTP/2). Same as Length if compression not - // enabled. - CompressedLength int - // WireLength is the size of the compressed payload data plus gRPC framing. - // Does not include HTTP/2 framing. - WireLength int - // SentTime is the time when the payload is sent. - SentTime time.Time -} - -// IsClient indicates if this stats information is from client side. -func (s *OutPayload) IsClient() bool { return s.Client } - -func (s *OutPayload) isRPCStats() {} - -// OutHeader contains stats when a header is sent. -type OutHeader struct { - // Client is true if this OutHeader is from client side. - Client bool - // Compression is the compression algorithm used for the RPC. - Compression string - // Header contains the header metadata sent. - Header metadata.MD - - // The following fields are valid only if Client is true. - // FullMethod is the full RPC method string, i.e., /package.service/method. - FullMethod string - // RemoteAddr is the remote address of the corresponding connection. - RemoteAddr net.Addr - // LocalAddr is the local address of the corresponding connection. - LocalAddr net.Addr -} - -// IsClient indicates if this stats information is from client side. -func (s *OutHeader) IsClient() bool { return s.Client } - -func (s *OutHeader) isRPCStats() {} - -// OutTrailer contains stats when a trailer is sent. -type OutTrailer struct { - // Client is true if this OutTrailer is from client side. - Client bool - // WireLength is the wire length of trailer. - // - // Deprecated: This field is never set. The length is not known when this message is - // emitted because the trailer fields are compressed with hpack after that. - WireLength int - // Trailer contains the trailer metadata sent to the client. This - // field is only valid if this OutTrailer is from the server side. - Trailer metadata.MD -} - -// IsClient indicates if this stats information is from client side. -func (s *OutTrailer) IsClient() bool { return s.Client } - -func (s *OutTrailer) isRPCStats() {} - -// End contains stats when an RPC ends. -type End struct { - // Client is true if this End is from client side. - Client bool - // BeginTime is the time when the RPC began. - BeginTime time.Time - // EndTime is the time when the RPC ends. - EndTime time.Time - // Trailer contains the trailer metadata received from the server. This - // field is only valid if this End is from the client side. - // Deprecated: use Trailer in InTrailer instead. - Trailer metadata.MD - // Error is the error the RPC ended with. It is an error generated from - // status.Status and can be converted back to status.Status using - // status.FromError if non-nil. - Error error -} - -// IsClient indicates if this is from client side. -func (s *End) IsClient() bool { return s.Client } - -func (s *End) isRPCStats() {} - -// ConnStats contains stats information about connections. -type ConnStats interface { - isConnStats() - // IsClient returns true if this ConnStats is from client side. - IsClient() bool -} - -// ConnBegin contains the stats of a connection when it is established. -type ConnBegin struct { - // Client is true if this ConnBegin is from client side. - Client bool -} - -// IsClient indicates if this is from client side. -func (s *ConnBegin) IsClient() bool { return s.Client } - -func (s *ConnBegin) isConnStats() {} - -// ConnEnd contains the stats of a connection when it ends. -type ConnEnd struct { - // Client is true if this ConnEnd is from client side. - Client bool -} - -// IsClient indicates if this is from client side. -func (s *ConnEnd) IsClient() bool { return s.Client } - -func (s *ConnEnd) isConnStats() {} - -type incomingTagsKey struct{} -type outgoingTagsKey struct{} - -// SetTags attaches stats tagging data to the context, which will be sent in -// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to -// SetTags will overwrite the values from earlier calls. -// -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func SetTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTagsKey{}, b) -} - -// Tags returns the tags from the context for the inbound RPC. -// -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func Tags(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTagsKey{}).([]byte) - return b -} - -// SetIncomingTags attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). -// -// This is intended for gRPC-internal use ONLY. -func SetIncomingTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTagsKey{}, b) -} - -// OutgoingTags returns the tags from the context for the outbound RPC. -// -// This is intended for gRPC-internal use ONLY. -func OutgoingTags(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTagsKey{}).([]byte) - return b -} - -type incomingTraceKey struct{} -type outgoingTraceKey struct{} - -// SetTrace attaches stats tagging data to the context, which will be sent in -// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to -// SetTrace will overwrite the values from earlier calls. -// -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func SetTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTraceKey{}, b) -} - -// Trace returns the trace from the context for the inbound RPC. -// -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func Trace(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTraceKey{}).([]byte) - return b -} - -// SetIncomingTrace attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). It is intended for -// gRPC-internal use. -func SetIncomingTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTraceKey{}, b) -} - -// OutgoingTrace returns the trace from the context for the outbound RPC. It is -// intended for gRPC-internal use. -func OutgoingTrace(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTraceKey{}).([]byte) - return b -} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go deleted file mode 100644 index a93360efb8..0000000000 --- a/vendor/google.golang.org/grpc/status/status.go +++ /dev/null @@ -1,162 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package status implements errors returned by gRPC. These errors are -// serialized and transmitted on the wire between server and client, and allow -// for additional data to be transmitted via the Details field in the status -// proto. gRPC service handlers should return an error created by this -// package, and gRPC clients should expect a corresponding error to be -// returned from the RPC call. -// -// This package upholds the invariants that a non-nil error may not -// contain an OK code, and an OK code must result in a nil error. -package status - -import ( - "context" - "errors" - "fmt" - - spb "google.golang.org/genproto/googleapis/rpc/status" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/status" -) - -// Status references google.golang.org/grpc/internal/status. It represents an -// RPC status code, message, and details. It is immutable and should be -// created with New, Newf, or FromProto. -// https://godoc.org/google.golang.org/grpc/internal/status -type Status = status.Status - -// New returns a Status representing c and msg. -func New(c codes.Code, msg string) *Status { - return status.New(c, msg) -} - -// Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...any) *Status { - return New(c, fmt.Sprintf(format, a...)) -} - -// Error returns an error representing c and msg. If c is OK, returns nil. -func Error(c codes.Code, msg string) error { - return New(c, msg).Err() -} - -// Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...any) error { - return Error(c, fmt.Sprintf(format, a...)) -} - -// ErrorProto returns an error representing s. If s.Code is OK, returns nil. -func ErrorProto(s *spb.Status) error { - return FromProto(s).Err() -} - -// FromProto returns a Status representing s. -func FromProto(s *spb.Status) *Status { - return status.FromProto(s) -} - -// FromError returns a Status representation of err. -// -// - If err was produced by this package or implements the method `GRPCStatus() -// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type -// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped -// errors, the message returned contains the entire err.Error() text and not -// just the wrapped status. In that case, ok is true. -// -// - If err is nil, a Status is returned with codes.OK and no message, and ok -// is true. -// -// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` -// returns nil (which maps to Codes.OK), or if err wraps a type -// satisfying this, a Status is returned with codes.Unknown and err's -// Error() message, and ok is false. -// -// - Otherwise, err is an error not compatible with this package. In this -// case, a Status is returned with codes.Unknown and err's Error() message, -// and ok is false. -func FromError(err error) (s *Status, ok bool) { - if err == nil { - return nil, true - } - type grpcstatus interface{ GRPCStatus() *Status } - if gs, ok := err.(grpcstatus); ok { - grpcStatus := gs.GRPCStatus() - if grpcStatus == nil { - // Error has status nil, which maps to codes.OK. There - // is no sensible behavior for this, so we turn it into - // an error with codes.Unknown and discard the existing - // status. - return New(codes.Unknown, err.Error()), false - } - return grpcStatus, true - } - var gs grpcstatus - if errors.As(err, &gs) { - grpcStatus := gs.GRPCStatus() - if grpcStatus == nil { - // Error wraps an error that has status nil, which maps - // to codes.OK. There is no sensible behavior for this, - // so we turn it into an error with codes.Unknown and - // discard the existing status. - return New(codes.Unknown, err.Error()), false - } - p := grpcStatus.Proto() - p.Message = err.Error() - return status.FromProto(p), true - } - return New(codes.Unknown, err.Error()), false -} - -// Convert is a convenience function which removes the need to handle the -// boolean return value from FromError. -func Convert(err error) *Status { - s, _ := FromError(err) - return s -} - -// Code returns the Code of the error if it is a Status error or if it wraps a -// Status error. If that is not the case, it returns codes.OK if err is nil, or -// codes.Unknown otherwise. -func Code(err error) codes.Code { - // Don't use FromError to avoid allocation of OK status. - if err == nil { - return codes.OK - } - - return Convert(err).Code() -} - -// FromContextError converts a context error or wrapped context error into a -// Status. It returns a Status with codes.OK if err is nil, or a Status with -// codes.Unknown if err is non-nil and not a context error. -func FromContextError(err error) *Status { - if err == nil { - return nil - } - if errors.Is(err, context.DeadlineExceeded) { - return New(codes.DeadlineExceeded, err.Error()) - } - if errors.Is(err, context.Canceled) { - return New(codes.Canceled, err.Error()) - } - return New(codes.Unknown, err.Error()) -} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go deleted file mode 100644 index 814e998354..0000000000 --- a/vendor/google.golang.org/grpc/stream.go +++ /dev/null @@ -1,1781 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "errors" - "io" - "math" - "strconv" - "sync" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/encoding" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/balancerload" - "google.golang.org/grpc/internal/binarylog" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/grpcutil" - imetadata "google.golang.org/grpc/internal/metadata" - iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/serviceconfig" - istatus "google.golang.org/grpc/internal/status" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) - -// StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. -// -// If a StreamHandler returns an error, it should either be produced by the -// status package, or be one of the context errors. Otherwise, gRPC will use -// codes.Unknown as the status code and err.Error() as the status message of the -// RPC. -type StreamHandler func(srv any, stream ServerStream) error - -// StreamDesc represents a streaming RPC service's method specification. Used -// on the server when registering services and on the client when initiating -// new streams. -type StreamDesc struct { - // StreamName and Handler are only used when registering handlers on a - // server. - StreamName string // the name of the method excluding the service - Handler StreamHandler // the handler called for the method - - // ServerStreams and ClientStreams are used for registering handlers on a - // server as well as defining RPC behavior when passed to NewClientStream - // and ClientConn.NewStream. At least one must be true. - ServerStreams bool // indicates the server can perform streaming sends - ClientStreams bool // indicates the client can perform streaming sends -} - -// Stream defines the common interface a client or server stream has to satisfy. -// -// Deprecated: See ClientStream and ServerStream documentation instead. -type Stream interface { - // Deprecated: See ClientStream and ServerStream documentation instead. - Context() context.Context - // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m any) error - // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m any) error -} - -// ClientStream defines the client-side behavior of a streaming RPC. -// -// All errors returned from ClientStream methods are compatible with the -// status package. -type ClientStream interface { - // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. If the metadata - // is nil and the error is also nil, then the stream was terminated without - // headers, and the status can be discovered by calling RecvMsg. - Header() (metadata.MD, error) - // Trailer returns the trailer metadata from the server, if there is any. - // It must only be called after stream.CloseAndRecv has returned, or - // stream.Recv has returned a non-nil error (including io.EOF). - Trailer() metadata.MD - // CloseSend closes the send direction of the stream. It closes the stream - // when non-nil error is met. It is also not safe to call CloseSend - // concurrently with SendMsg. - CloseSend() error - // Context returns the context for this stream. - // - // It should not be called until after Header or RecvMsg has returned. Once - // called, subsequent client-side retries are disabled. - Context() context.Context - // SendMsg is generally called by generated code. On error, SendMsg aborts - // the stream. If the error was generated by the client, the status is - // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. - // - // SendMsg blocks until: - // - There is sufficient flow control to schedule m with the transport, or - // - The stream is done, or - // - The stream breaks. - // - // SendMsg does not wait until the message is received by the server. An - // untimely stream closure may result in lost messages. To ensure delivery, - // users should ensure the RPC completed successfully using RecvMsg. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not safe - // to call SendMsg on the same stream in different goroutines. It is also - // not safe to call CloseSend concurrently with SendMsg. - // - // It is not safe to modify the message after calling SendMsg. Tracing - // libraries and stats handlers may use the message lazily. - SendMsg(m any) error - // RecvMsg blocks until it receives a message into m or the stream is - // done. It returns io.EOF when the stream completes successfully. On - // any other error, the stream is aborted and the error contains the RPC - // status. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not - // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m any) error -} - -// NewStream creates a new Stream for the client side. This is typically -// called by generated code. ctx is used for the lifetime of the stream. -// -// To ensure resources are not leaked due to the stream returned, one of the following -// actions must be performed: -// -// 1. Call Close on the ClientConn. -// 2. Cancel the context provided. -// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated -// client-streaming RPC, for instance, might use the helper function -// CloseAndRecv (note that CloseSend does not Recv, therefore is not -// guaranteed to release all resources). -// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. -// -// If none of the above happen, a goroutine and a context will be leaked, and grpc -// will not call the optionally-configured stats handler with a stats.End message. -func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - // allow interceptor to see all applicable call options, which means those - // configured as defaults from dial option as well as per-call options - opts = combine(cc.dopts.callOptions, opts) - - if cc.dopts.streamInt != nil { - return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) - } - return newClientStream(ctx, desc, cc, method, opts...) -} - -// NewClientStream is a wrapper for ClientConn.NewStream. -func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { - return cc.NewStream(ctx, desc, method, opts...) -} - -func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { - // Start tracking the RPC for idleness purposes. This is where a stream is - // created for both streaming and unary RPCs, and hence is a good place to - // track active RPC count. - if err := cc.idlenessMgr.OnCallBegin(); err != nil { - return nil, err - } - // Add a calloption, to decrement the active call count, that gets executed - // when the RPC completes. - opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) - - if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { - // validate md - if err := imetadata.Validate(md); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - // validate added - for _, kvs := range added { - for i := 0; i < len(kvs); i += 2 { - if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - } - } - } - if channelz.IsOn() { - cc.incrCallsStarted() - defer func() { - if err != nil { - cc.incrCallsFailed() - } - }() - } - // Provide an opportunity for the first RPC to see the first service config - // provided by the resolver. - if err := cc.waitForResolvedAddrs(ctx); err != nil { - return nil, err - } - - var mc serviceconfig.MethodConfig - var onCommit func() - var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { - return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) - } - - rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} - rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) - if err != nil { - if st, ok := status.FromError(err); ok { - // Restrict the code to the list allowed by gRFC A54. - if istatus.IsRestrictedControlPlaneCode(st) { - err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) - } - return nil, err - } - return nil, toRPCErr(err) - } - - if rpcConfig != nil { - if rpcConfig.Context != nil { - ctx = rpcConfig.Context - } - mc = rpcConfig.MethodConfig - onCommit = rpcConfig.OnCommitted - if rpcConfig.Interceptor != nil { - rpcInfo.Context = nil - ns := newStream - newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { - cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) - if err != nil { - return nil, toRPCErr(err) - } - return cs, nil - } - } - } - - return newStream(ctx, func() {}) -} - -func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { - c := defaultCallInfo() - if mc.WaitForReady != nil { - c.failFast = !*mc.WaitForReady - } - - // Possible context leak: - // The cancel function for the child context we create will only be called - // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if - // an error is generated by SendMsg. - // https://github.com/grpc/grpc-go/issues/1818. - var cancel context.CancelFunc - if mc.Timeout != nil && *mc.Timeout >= 0 { - ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) - } else { - ctx, cancel = context.WithCancel(ctx) - } - defer func() { - if err != nil { - cancel() - } - }() - - for _, o := range opts { - if err := o.before(c); err != nil { - return nil, toRPCErr(err) - } - } - c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) - c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) - if err := setCallInfoCodec(c); err != nil { - return nil, err - } - - callHdr := &transport.CallHdr{ - Host: cc.authority, - Method: method, - ContentSubtype: c.contentSubtype, - DoneFunc: doneFunc, - } - - // Set our outgoing compression according to the UseCompressor CallOption, if - // set. In that case, also find the compressor from the encoding package. - // Otherwise, use the compressor configured by the WithCompressor DialOption, - // if set. - var cp Compressor - var comp encoding.Compressor - if ct := c.compressorType; ct != "" { - callHdr.SendCompress = ct - if ct != encoding.Identity { - comp = encoding.GetCompressor(ct) - if comp == nil { - return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) - } - } - } else if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - cp = cc.dopts.cp - } - if c.creds != nil { - callHdr.Creds = c.creds - } - - cs := &clientStream{ - callHdr: callHdr, - ctx: ctx, - methodConfig: &mc, - opts: opts, - callInfo: c, - cc: cc, - desc: desc, - codec: c.codec, - cp: cp, - comp: comp, - cancel: cancel, - firstAttempt: true, - onCommit: onCommit, - } - if !cc.dopts.disableRetry { - cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) - } - if ml := binarylog.GetMethodLogger(method); ml != nil { - cs.binlogs = append(cs.binlogs, ml) - } - if cc.dopts.binaryLogger != nil { - if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { - cs.binlogs = append(cs.binlogs, ml) - } - } - - // Pick the transport to use and create a new stream on the transport. - // Assign cs.attempt upon success. - op := func(a *csAttempt) error { - if err := a.getTransport(); err != nil { - return err - } - if err := a.newStream(); err != nil { - return err - } - // Because this operation is always called either here (while creating - // the clientStream) or by the retry code while locked when replaying - // the operation, it is safe to access cs.attempt directly. - cs.attempt = a - return nil - } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - return nil, err - } - - if len(cs.binlogs) != 0 { - md, _ := metadata.FromOutgoingContext(ctx) - logEntry := &binarylog.ClientHeader{ - OnClientSide: true, - Header: md, - MethodName: method, - Authority: cs.cc.authority, - } - if deadline, ok := ctx.Deadline(); ok { - logEntry.Timeout = time.Until(deadline) - if logEntry.Timeout < 0 { - logEntry.Timeout = 0 - } - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) - } - } - - if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. - go func() { - select { - case <-cc.ctx.Done(): - cs.finish(ErrClientConnClosing) - case <-ctx.Done(): - cs.finish(toRPCErr(ctx.Err())) - } - }() - } - return cs, nil -} - -// newAttemptLocked creates a new csAttempt without a transport or stream. -func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { - if err := cs.ctx.Err(); err != nil { - return nil, toRPCErr(err) - } - if err := cs.cc.ctx.Err(); err != nil { - return nil, ErrClientConnClosing - } - - ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) - method := cs.callHdr.Method - var beginTime time.Time - shs := cs.cc.dopts.copts.StatsHandlers - for _, sh := range shs { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) - beginTime = time.Now() - begin := &stats.Begin{ - Client: true, - BeginTime: beginTime, - FailFast: cs.callInfo.failFast, - IsClientStream: cs.desc.ClientStreams, - IsServerStream: cs.desc.ServerStreams, - IsTransparentRetryAttempt: isTransparent, - } - sh.HandleRPC(ctx, begin) - } - - var trInfo *traceInfo - if EnableTracing { - trInfo = &traceInfo{ - tr: newTrace("grpc.Sent."+methodFamily(method), method), - firstLine: firstLine{ - client: true, - }, - } - if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = time.Until(deadline) - } - trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = newTraceContext(ctx, trInfo.tr) - } - - if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { - // Add extra metadata (metadata that will be added by transport) to context - // so the balancer can see them. - ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( - "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), - )) - } - - return &csAttempt{ - ctx: ctx, - beginTime: beginTime, - cs: cs, - dc: cs.cc.dopts.dc, - statsHandlers: shs, - trInfo: trInfo, - }, nil -} - -func (a *csAttempt) getTransport() error { - cs := a.cs - - var err error - a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) - if err != nil { - if de, ok := err.(dropError); ok { - err = de.error - a.drop = true - } - return err - } - if a.trInfo != nil { - a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) - } - return nil -} - -func (a *csAttempt) newStream() error { - cs := a.cs - cs.callHdr.PreviousAttempts = cs.numRetries - - // Merge metadata stored in PickResult, if any, with existing call metadata. - // It is safe to overwrite the csAttempt's context here, since all state - // maintained in it are local to the attempt. When the attempt has to be - // retried, a new instance of csAttempt will be created. - if a.pickResult.Metadata != nil { - // We currently do not have a function it the metadata package which - // merges given metadata with existing metadata in a context. Existing - // function `AppendToOutgoingContext()` takes a variadic argument of key - // value pairs. - // - // TODO: Make it possible to retrieve key value pairs from metadata.MD - // in a form passable to AppendToOutgoingContext(), or create a version - // of AppendToOutgoingContext() that accepts a metadata.MD. - md, _ := metadata.FromOutgoingContext(a.ctx) - md = metadata.Join(md, a.pickResult.Metadata) - a.ctx = metadata.NewOutgoingContext(a.ctx, md) - } - - s, err := a.t.NewStream(a.ctx, cs.callHdr) - if err != nil { - nse, ok := err.(*transport.NewStreamError) - if !ok { - // Unexpected. - return err - } - - if nse.AllowTransparentRetry { - a.allowTransparentRetry = true - } - - // Unwrap and convert error. - return toRPCErr(nse.Err) - } - a.s = s - a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} - return nil -} - -// clientStream implements a client side Stream. -type clientStream struct { - callHdr *transport.CallHdr - opts []CallOption - callInfo *callInfo - cc *ClientConn - desc *StreamDesc - - codec baseCodec - cp Compressor - comp encoding.Compressor - - cancel context.CancelFunc // cancels all attempts - - sentLast bool // sent an end stream - - methodConfig *MethodConfig - - ctx context.Context // the application's context, wrapped by stats/tracing - - retryThrottler *retryThrottler // The throttler active when the RPC began. - - binlogs []binarylog.MethodLogger - // serverHeaderBinlogged is a boolean for whether server header has been - // logged. Server header will be logged when the first time one of those - // happens: stream.Header(), stream.Recv(). - // - // It's only read and used by Recv() and Header(), so it doesn't need to be - // synchronized. - serverHeaderBinlogged bool - - mu sync.Mutex - firstAttempt bool // if true, transparent retry is valid - numRetries int // exclusive of transparent retry attempt(s) - numRetriesSincePushback int // retries since pushback; to reset backoff - finished bool // TODO: replace with atomic cmpxchg or sync.Once? - // attempt is the active client stream attempt. - // The only place where it is written is the newAttemptLocked method and this method never writes nil. - // So, attempt can be nil only inside newClientStream function when clientStream is first created. - // One of the first things done after clientStream's creation, is to call newAttemptLocked which either - // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, - // then newClientStream calls finish on the clientStream and returns. So, finish method is the only - // place where we need to check if the attempt is nil. - attempt *csAttempt - // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - onCommit func() - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer -} - -// csAttempt implements a single transport stream attempt within a -// clientStream. -type csAttempt struct { - ctx context.Context - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - pickResult balancer.PickResult - - finished bool - dc Decompressor - decomp encoding.Compressor - decompSet bool - - mu sync.Mutex // guards trInfo.tr - // trInfo may be nil (if EnableTracing is false). - // trInfo.tr is set when created (if EnableTracing is true), - // and cleared when the finish method is called. - trInfo *traceInfo - - statsHandlers []stats.Handler - beginTime time.Time - - // set for newStream errors that may be transparently retried - allowTransparentRetry bool - // set for pick errors that are returned as a status - drop bool -} - -func (cs *clientStream) commitAttemptLocked() { - if !cs.committed && cs.onCommit != nil { - cs.onCommit() - } - cs.committed = true - cs.buffer = nil -} - -func (cs *clientStream) commitAttempt() { - cs.mu.Lock() - cs.commitAttemptLocked() - cs.mu.Unlock() -} - -// shouldRetry returns nil if the RPC should be retried; otherwise it returns -// the error that should be returned by the operation. If the RPC should be -// retried, the bool indicates whether it is being retried transparently. -func (a *csAttempt) shouldRetry(err error) (bool, error) { - cs := a.cs - - if cs.finished || cs.committed || a.drop { - // RPC is finished or committed or was dropped by the picker; cannot retry. - return false, err - } - if a.s == nil && a.allowTransparentRetry { - return true, nil - } - // Wait for the trailers. - unprocessed := false - if a.s != nil { - <-a.s.Done() - unprocessed = a.s.Unprocessed() - } - if cs.firstAttempt && unprocessed { - // First attempt, stream unprocessed: transparently retry. - return true, nil - } - if cs.cc.dopts.disableRetry { - return false, err - } - - pushback := 0 - hasPushback := false - if a.s != nil { - if !a.s.TrailersOnly() { - return false, err - } - - // TODO(retry): Move down if the spec changes to not check server pushback - // before considering this a failure for throttling. - sps := a.s.Trailer()["grpc-retry-pushback-ms"] - if len(sps) == 1 { - var e error - if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) - cs.retryThrottler.throttle() // This counts as a failure for throttling. - return false, err - } - hasPushback = true - } else if len(sps) > 1 { - channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) - cs.retryThrottler.throttle() // This counts as a failure for throttling. - return false, err - } - } - - var code codes.Code - if a.s != nil { - code = a.s.Status().Code() - } else { - code = status.Code(err) - } - - rp := cs.methodConfig.RetryPolicy - if rp == nil || !rp.RetryableStatusCodes[code] { - return false, err - } - - // Note: the ordering here is important; we count this as a failure - // only if the code matched a retryable code. - if cs.retryThrottler.throttle() { - return false, err - } - if cs.numRetries+1 >= rp.MaxAttempts { - return false, err - } - - var dur time.Duration - if hasPushback { - dur = time.Millisecond * time.Duration(pushback) - cs.numRetriesSincePushback = 0 - } else { - fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.InitialBackoff) * fact - if max := float64(rp.MaxBackoff); cur > max { - cur = max - } - dur = time.Duration(grpcrand.Int63n(int64(cur))) - cs.numRetriesSincePushback++ - } - - // TODO(dfawley): we could eagerly fail here if dur puts us past the - // deadline, but unsure if it is worth doing. - t := time.NewTimer(dur) - select { - case <-t.C: - cs.numRetries++ - return false, nil - case <-cs.ctx.Done(): - t.Stop() - return false, status.FromContextError(cs.ctx.Err()).Err() - } -} - -// Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { - for { - attempt.finish(toRPCErr(lastErr)) - isTransparent, err := attempt.shouldRetry(lastErr) - if err != nil { - cs.commitAttemptLocked() - return err - } - cs.firstAttempt = false - attempt, err = cs.newAttemptLocked(isTransparent) - if err != nil { - // Only returns error if the clientconn is closed or the context of - // the stream is canceled. - return err - } - // Note that the first op in the replay buffer always sets cs.attempt - // if it is able to pick a transport and create a stream. - if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { - return nil - } - } -} - -func (cs *clientStream) Context() context.Context { - cs.commitAttempt() - // No need to lock before using attempt, since we know it is committed and - // cannot change. - if cs.attempt.s != nil { - return cs.attempt.s.Context() - } - return cs.ctx -} - -func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { - cs.mu.Lock() - for { - if cs.committed { - cs.mu.Unlock() - // toRPCErr is used in case the error from the attempt comes from - // NewClientStream, which intentionally doesn't return a status - // error to allow for further inspection; all other errors should - // already be status errors. - return toRPCErr(op(cs.attempt)) - } - if len(cs.buffer) == 0 { - // For the first op, which controls creation of the stream and - // assigns cs.attempt, we need to create a new attempt inline - // before executing the first op. On subsequent ops, the attempt - // is created immediately before replaying the ops. - var err error - if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { - cs.mu.Unlock() - cs.finish(err) - return err - } - } - a := cs.attempt - cs.mu.Unlock() - err := op(a) - cs.mu.Lock() - if a != cs.attempt { - // We started another attempt already. - continue - } - if err == io.EOF { - <-a.s.Done() - } - if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { - onSuccess() - cs.mu.Unlock() - return err - } - if err := cs.retryLocked(a, err); err != nil { - cs.mu.Unlock() - return err - } - } -} - -func (cs *clientStream) Header() (metadata.MD, error) { - var m metadata.MD - err := cs.withRetry(func(a *csAttempt) error { - var err error - m, err = a.s.Header() - return toRPCErr(err) - }, cs.commitAttemptLocked) - - if m == nil && err == nil { - // The stream ended with success. Finish the clientStream. - err = io.EOF - } - - if err != nil { - cs.finish(err) - // Do not return the error. The user should get it by calling Recv(). - return nil, nil - } - - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { - // Only log if binary log is on and header has not been logged, and - // there is actually headers to log. - logEntry := &binarylog.ServerHeader{ - OnClientSide: true, - Header: m, - PeerAddr: nil, - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - cs.serverHeaderBinlogged = true - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) - } - } - - return m, nil -} - -func (cs *clientStream) Trailer() metadata.MD { - // On RPC failure, we never need to retry, because usage requires that - // RecvMsg() returned a non-nil error before calling this function is valid. - // We would have retried earlier if necessary. - // - // Commit the attempt anyway, just in case users are not following those - // directions -- it will prevent races and should not meaningfully impact - // performance. - cs.commitAttempt() - if cs.attempt.s == nil { - return nil - } - return cs.attempt.s.Trailer() -} - -func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { - for _, f := range cs.buffer { - if err := f(attempt); err != nil { - return err - } - } - return nil -} - -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { - // Note: we still will buffer if retry is disabled (for transparent retries). - if cs.committed { - return - } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { - cs.commitAttemptLocked() - return - } - cs.buffer = append(cs.buffer, op) -} - -func (cs *clientStream) SendMsg(m any) (err error) { - defer func() { - if err != nil && err != io.EOF { - // Call finish on the client stream for errors generated by this SendMsg - // call, as these indicate problems created by this client. (Transport - // errors are converted to an io.EOF error in csAttempt.sendMsg; the real - // error will be returned from RecvMsg eventually in that case, or be - // retried.) - cs.finish(err) - } - }() - if cs.sentLast { - return status.Errorf(codes.Internal, "SendMsg called after CloseSend") - } - if !cs.desc.ClientStreams { - cs.sentLast = true - } - - // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) - if err != nil { - return err - } - - // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) - } - op := func(a *csAttempt) error { - return a.sendMsg(m, hdr, payload, data) - } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if len(cs.binlogs) != 0 && err == nil { - cm := &binarylog.ClientMessage{ - OnClientSide: true, - Message: data, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, cm) - } - } - return err -} - -func (cs *clientStream) RecvMsg(m any) error { - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { - // Call Header() to binary log header if it's not already logged. - cs.Header() - } - var recvInfo *payloadInfo - if len(cs.binlogs) != 0 { - recvInfo = &payloadInfo{} - } - err := cs.withRetry(func(a *csAttempt) error { - return a.recvMsg(m, recvInfo) - }, cs.commitAttemptLocked) - if len(cs.binlogs) != 0 && err == nil { - sm := &binarylog.ServerMessage{ - OnClientSide: true, - Message: recvInfo.uncompressedBytes, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, sm) - } - } - if err != nil || !cs.desc.ServerStreams { - // err != nil or non-server-streaming indicates end of stream. - cs.finish(err) - } - return err -} - -func (cs *clientStream) CloseSend() error { - if cs.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? - return nil - } - cs.sentLast = true - op := func(a *csAttempt) error { - a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) - // Always return nil; io.EOF is the only error that might make sense - // instead, but there is no need to signal the client to call RecvMsg - // as the only use left for the stream after CloseSend is to call - // RecvMsg. This also matches historical behavior. - return nil - } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if len(cs.binlogs) != 0 { - chc := &binarylog.ClientHalfClose{ - OnClientSide: true, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, chc) - } - } - // We never returned an error here for reasons. - return nil -} - -func (cs *clientStream) finish(err error) { - if err == io.EOF { - // Ending a stream with EOF indicates a success. - err = nil - } - cs.mu.Lock() - if cs.finished { - cs.mu.Unlock() - return - } - cs.finished = true - for _, onFinish := range cs.callInfo.onFinish { - onFinish(err) - } - cs.commitAttemptLocked() - if cs.attempt != nil { - cs.attempt.finish(err) - // after functions all rely upon having a stream. - if cs.attempt.s != nil { - for _, o := range cs.opts { - o.after(cs.callInfo, cs.attempt) - } - } - } - - cs.mu.Unlock() - // Only one of cancel or trailer needs to be logged. - if len(cs.binlogs) != 0 { - switch err { - case errContextCanceled, errContextDeadline, ErrClientConnClosing: - c := &binarylog.Cancel{ - OnClientSide: true, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, c) - } - default: - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) - } - } - } - if err == nil { - cs.retryThrottler.successfulRPC() - } - if channelz.IsOn() { - if err != nil { - cs.cc.incrCallsFailed() - } else { - cs.cc.incrCallsSucceeded() - } - } - cs.cancel() -} - -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { - cs := a.cs - if a.trInfo != nil { - a.mu.Lock() - if a.trInfo.tr != nil { - a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) - } - a.mu.Unlock() - } - if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { - if !cs.desc.ClientStreams { - // For non-client-streaming RPCs, we return nil instead of EOF on error - // because the generated code requires it. finish is not called; RecvMsg() - // will call it with the stream's status independently. - return nil - } - return io.EOF - } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) - } - if channelz.IsOn() { - a.t.IncrMsgSent() - } - return nil -} - -func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { - cs := a.cs - if len(a.statsHandlers) != 0 && payInfo == nil { - payInfo = &payloadInfo{} - } - - if !a.decompSet { - // Block until we receive headers containing received message encoding. - if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { - if a.dc == nil || a.dc.Type() != ct { - // No configured decompressor, or it does not match the incoming - // message encoding; attempt to find a registered compressor that does. - a.dc = nil - a.decomp = encoding.GetCompressor(ct) - } - } else { - // No compression is used; disable our decompressor. - a.dc = nil - } - // Only initialize this state once per stream. - a.decompSet = true - } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { - if err == io.EOF { - if statusErr := a.s.Status().Err(); statusErr != nil { - return statusErr - } - return io.EOF // indicates successful end of stream. - } - - return toRPCErr(err) - } - if a.trInfo != nil { - a.mu.Lock() - if a.trInfo.tr != nil { - a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) - } - a.mu.Unlock() - } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), - }) - } - if channelz.IsOn() { - a.t.IncrMsgRecv() - } - if cs.desc.ServerStreams { - // Subsequent messages should be received by subsequent RecvMsg calls. - return nil - } - // Special handling for non-server-stream rpcs. - // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { - return a.s.Status().Err() // non-server streaming Recv returns nil on success - } - return toRPCErr(err) -} - -func (a *csAttempt) finish(err error) { - a.mu.Lock() - if a.finished { - a.mu.Unlock() - return - } - a.finished = true - if err == io.EOF { - // Ending a stream with EOF indicates a success. - err = nil - } - var tr metadata.MD - if a.s != nil { - a.t.CloseStream(a.s, err) - tr = a.s.Trailer() - } - - if a.pickResult.Done != nil { - br := false - if a.s != nil { - br = a.s.BytesReceived() - } - a.pickResult.Done(balancer.DoneInfo{ - Err: err, - Trailer: tr, - BytesSent: a.s != nil, - BytesReceived: br, - ServerLoad: balancerload.Parse(tr), - }) - } - for _, sh := range a.statsHandlers { - end := &stats.End{ - Client: true, - BeginTime: a.beginTime, - EndTime: time.Now(), - Trailer: tr, - Error: err, - } - sh.HandleRPC(a.ctx, end) - } - if a.trInfo != nil && a.trInfo.tr != nil { - if err == nil { - a.trInfo.tr.LazyPrintf("RPC: [OK]") - } else { - a.trInfo.tr.LazyPrintf("RPC: [%v]", err) - a.trInfo.tr.SetError() - } - a.trInfo.tr.Finish() - a.trInfo.tr = nil - } - a.mu.Unlock() -} - -// newClientStream creates a ClientStream with the specified transport, on the -// given addrConn. -// -// It's expected that the given transport is either the same one in addrConn, or -// is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. -// -// Main difference between this and ClientConn.NewStream: -// - no retry -// - no service config (or wait for service config) -// - no tracing or stats -func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { - if t == nil { - // TODO: return RPC error here? - return nil, errors.New("transport provided is nil") - } - // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. - c := &callInfo{} - - // Possible context leak: - // The cancel function for the child context we create will only be called - // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if - // an error is generated by SendMsg. - // https://github.com/grpc/grpc-go/issues/1818. - ctx, cancel := context.WithCancel(ctx) - defer func() { - if err != nil { - cancel() - } - }() - - for _, o := range opts { - if err := o.before(c); err != nil { - return nil, toRPCErr(err) - } - } - c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) - c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) - if err := setCallInfoCodec(c); err != nil { - return nil, err - } - - callHdr := &transport.CallHdr{ - Host: ac.cc.authority, - Method: method, - ContentSubtype: c.contentSubtype, - } - - // Set our outgoing compression according to the UseCompressor CallOption, if - // set. In that case, also find the compressor from the encoding package. - // Otherwise, use the compressor configured by the WithCompressor DialOption, - // if set. - var cp Compressor - var comp encoding.Compressor - if ct := c.compressorType; ct != "" { - callHdr.SendCompress = ct - if ct != encoding.Identity { - comp = encoding.GetCompressor(ct) - if comp == nil { - return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) - } - } - } else if ac.cc.dopts.cp != nil { - callHdr.SendCompress = ac.cc.dopts.cp.Type() - cp = ac.cc.dopts.cp - } - if c.creds != nil { - callHdr.Creds = c.creds - } - - // Use a special addrConnStream to avoid retry. - as := &addrConnStream{ - callHdr: callHdr, - ac: ac, - ctx: ctx, - cancel: cancel, - opts: opts, - callInfo: c, - desc: desc, - codec: c.codec, - cp: cp, - comp: comp, - t: t, - } - - s, err := as.t.NewStream(as.ctx, as.callHdr) - if err != nil { - err = toRPCErr(err) - return nil, err - } - as.s = s - as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} - ac.incrCallsStarted() - if desc != unaryStreamDesc { - // Listen on stream context to cleanup when the stream context is - // canceled. Also listen for the addrConn's context in case the - // addrConn is closed or reconnects to a different address. In all - // other cases, an error should already be injected into the recv - // buffer by the transport, which the client will eventually receive, - // and then we will cancel the stream's context in - // addrConnStream.finish. - go func() { - ac.mu.Lock() - acCtx := ac.ctx - ac.mu.Unlock() - select { - case <-acCtx.Done(): - as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) - case <-ctx.Done(): - as.finish(toRPCErr(ctx.Err())) - } - }() - } - return as, nil -} - -type addrConnStream struct { - s *transport.Stream - ac *addrConn - callHdr *transport.CallHdr - cancel context.CancelFunc - opts []CallOption - callInfo *callInfo - t transport.ClientTransport - ctx context.Context - sentLast bool - desc *StreamDesc - codec baseCodec - cp Compressor - comp encoding.Compressor - decompSet bool - dc Decompressor - decomp encoding.Compressor - p *parser - mu sync.Mutex - finished bool -} - -func (as *addrConnStream) Header() (metadata.MD, error) { - m, err := as.s.Header() - if err != nil { - as.finish(toRPCErr(err)) - } - return m, err -} - -func (as *addrConnStream) Trailer() metadata.MD { - return as.s.Trailer() -} - -func (as *addrConnStream) CloseSend() error { - if as.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? - return nil - } - as.sentLast = true - - as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) - // Always return nil; io.EOF is the only error that might make sense - // instead, but there is no need to signal the client to call RecvMsg - // as the only use left for the stream after CloseSend is to call - // RecvMsg. This also matches historical behavior. - return nil -} - -func (as *addrConnStream) Context() context.Context { - return as.s.Context() -} - -func (as *addrConnStream) SendMsg(m any) (err error) { - defer func() { - if err != nil && err != io.EOF { - // Call finish on the client stream for errors generated by this SendMsg - // call, as these indicate problems created by this client. (Transport - // errors are converted to an io.EOF error in csAttempt.sendMsg; the real - // error will be returned from RecvMsg eventually in that case, or be - // retried.) - as.finish(err) - } - }() - if as.sentLast { - return status.Errorf(codes.Internal, "SendMsg called after CloseSend") - } - if !as.desc.ClientStreams { - as.sentLast = true - } - - // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) - if err != nil { - return err - } - - // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) - } - - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { - if !as.desc.ClientStreams { - // For non-client-streaming RPCs, we return nil instead of EOF on error - // because the generated code requires it. finish is not called; RecvMsg() - // will call it with the stream's status independently. - return nil - } - return io.EOF - } - - if channelz.IsOn() { - as.t.IncrMsgSent() - } - return nil -} - -func (as *addrConnStream) RecvMsg(m any) (err error) { - defer func() { - if err != nil || !as.desc.ServerStreams { - // err != nil or non-server-streaming indicates end of stream. - as.finish(err) - } - }() - - if !as.decompSet { - // Block until we receive headers containing received message encoding. - if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { - if as.dc == nil || as.dc.Type() != ct { - // No configured decompressor, or it does not match the incoming - // message encoding; attempt to find a registered compressor that does. - as.dc = nil - as.decomp = encoding.GetCompressor(ct) - } - } else { - // No compression is used; disable our decompressor. - as.dc = nil - } - // Only initialize this state once per stream. - as.decompSet = true - } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { - if err == io.EOF { - if statusErr := as.s.Status().Err(); statusErr != nil { - return statusErr - } - return io.EOF // indicates successful end of stream. - } - return toRPCErr(err) - } - - if channelz.IsOn() { - as.t.IncrMsgRecv() - } - if as.desc.ServerStreams { - // Subsequent messages should be received by subsequent RecvMsg calls. - return nil - } - - // Special handling for non-server-stream rpcs. - // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { - return as.s.Status().Err() // non-server streaming Recv returns nil on success - } - return toRPCErr(err) -} - -func (as *addrConnStream) finish(err error) { - as.mu.Lock() - if as.finished { - as.mu.Unlock() - return - } - as.finished = true - if err == io.EOF { - // Ending a stream with EOF indicates a success. - err = nil - } - if as.s != nil { - as.t.CloseStream(as.s, err) - } - - if err != nil { - as.ac.incrCallsFailed() - } else { - as.ac.incrCallsSucceeded() - } - as.cancel() - as.mu.Unlock() -} - -// ServerStream defines the server-side behavior of a streaming RPC. -// -// Errors returned from ServerStream methods are compatible with the status -// package. However, the status code will often not match the RPC status as -// seen by the client application, and therefore, should not be relied upon for -// this purpose. -type ServerStream interface { - // SetHeader sets the header metadata. It may be called multiple times. - // When call multiple times, all the provided metadata will be merged. - // All the metadata will be sent out when one of the following happens: - // - ServerStream.SendHeader() is called; - // - The first response is sent out; - // - An RPC status is sent out (error or success). - SetHeader(metadata.MD) error - // SendHeader sends the header metadata. - // The provided md and headers set by SetHeader() will be sent. - // It fails if called multiple times. - SendHeader(metadata.MD) error - // SetTrailer sets the trailer metadata which will be sent with the RPC status. - // When called more than once, all the provided metadata will be merged. - SetTrailer(metadata.MD) - // Context returns the context for this stream. - Context() context.Context - // SendMsg sends a message. On error, SendMsg aborts the stream and the - // error is returned directly. - // - // SendMsg blocks until: - // - There is sufficient flow control to schedule m with the transport, or - // - The stream is done, or - // - The stream breaks. - // - // SendMsg does not wait until the message is received by the client. An - // untimely stream closure may result in lost messages. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not safe - // to call SendMsg on the same stream in different goroutines. - // - // It is not safe to modify the message after calling SendMsg. Tracing - // libraries and stats handlers may use the message lazily. - SendMsg(m any) error - // RecvMsg blocks until it receives a message into m or the stream is - // done. It returns io.EOF when the client has performed a CloseSend. On - // any non-EOF error, the stream is aborted and the error contains the - // RPC status. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not - // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m any) error -} - -// serverStream implements a server side Stream. -type serverStream struct { - ctx context.Context - t transport.ServerTransport - s *transport.Stream - p *parser - codec baseCodec - - cp Compressor - dc Decompressor - comp encoding.Compressor - decomp encoding.Compressor - - sendCompressorName string - - maxReceiveMessageSize int - maxSendMessageSize int - trInfo *traceInfo - - statsHandler []stats.Handler - - binlogs []binarylog.MethodLogger - // serverHeaderBinlogged indicates whether server header has been logged. It - // will happen when one of the following two happens: stream.SendHeader(), - // stream.Send(). - // - // It's only checked in send and sendHeader, doesn't need to be - // synchronized. - serverHeaderBinlogged bool - - mu sync.Mutex // protects trInfo.tr after the service handler runs. -} - -func (ss *serverStream) Context() context.Context { - return ss.ctx -} - -func (ss *serverStream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - err := imetadata.Validate(md) - if err != nil { - return status.Error(codes.Internal, err.Error()) - } - return ss.s.SetHeader(md) -} - -func (ss *serverStream) SendHeader(md metadata.MD) error { - err := imetadata.Validate(md) - if err != nil { - return status.Error(codes.Internal, err.Error()) - } - - err = ss.t.WriteHeader(ss.s, md) - if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { - h, _ := ss.s.Header() - sh := &binarylog.ServerHeader{ - Header: h, - } - ss.serverHeaderBinlogged = true - for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, sh) - } - } - return err -} - -func (ss *serverStream) SetTrailer(md metadata.MD) { - if md.Len() == 0 { - return - } - if err := imetadata.Validate(md); err != nil { - logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) - } - ss.s.SetTrailer(md) -} - -func (ss *serverStream) SendMsg(m any) (err error) { - defer func() { - if ss.trInfo != nil { - ss.mu.Lock() - if ss.trInfo.tr != nil { - if err == nil { - ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) - } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - ss.trInfo.tr.SetError() - } - } - ss.mu.Unlock() - } - if err != nil && err != io.EOF { - st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) - // Non-user specified status was sent out. This should be an error - // case (as a server side Cancel maybe). - // - // This is not handled specifically now. User will return a final - // status from the service handler, we will log that error instead. - // This behavior is similar to an interceptor. - } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgSent() - } - }() - - // Server handler could have set new compressor by calling SetSendCompressor. - // In case it is set, we need to use it for compressing outbound message. - if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { - ss.comp = encoding.GetCompressor(sendCompressorsName) - ss.sendCompressorName = sendCompressorsName - } - - // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) - if err != nil { - return err - } - - // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) - } - if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { - return toRPCErr(err) - } - if len(ss.binlogs) != 0 { - if !ss.serverHeaderBinlogged { - h, _ := ss.s.Header() - sh := &binarylog.ServerHeader{ - Header: h, - } - ss.serverHeaderBinlogged = true - for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, sh) - } - } - sm := &binarylog.ServerMessage{ - Message: data, - } - for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, sm) - } - } - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) - } - } - return nil -} - -func (ss *serverStream) RecvMsg(m any) (err error) { - defer func() { - if ss.trInfo != nil { - ss.mu.Lock() - if ss.trInfo.tr != nil { - if err == nil { - ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) - } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - ss.trInfo.tr.SetError() - } - } - ss.mu.Unlock() - } - if err != nil && err != io.EOF { - st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) - // Non-user specified status was sent out. This should be an error - // case (as a server side Cancel maybe). - // - // This is not handled specifically now. User will return a final - // status from the service handler, we will log that error instead. - // This behavior is similar to an interceptor. - } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgRecv() - } - }() - var payInfo *payloadInfo - if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { - payInfo = &payloadInfo{} - } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { - if err == io.EOF { - if len(ss.binlogs) != 0 { - chc := &binarylog.ClientHalfClose{} - for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, chc) - } - } - return err - } - if err == io.ErrUnexpectedEOF { - err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) - } - return toRPCErr(err) - } - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, - }) - } - } - if len(ss.binlogs) != 0 { - cm := &binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, - } - for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, cm) - } - } - return nil -} - -// MethodFromServerStream returns the method string for the input stream. -// The returned string is in the format of "/service/method". -func MethodFromServerStream(stream ServerStream) (string, bool) { - return Method(stream.Context()) -} - -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { - if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil - } - // The input interface is not a prepared msg. - // Marshal and Compress the data at this point - data, err = encode(codec, m) - if err != nil { - return nil, nil, nil, err - } - compData, err := compress(data, cp, comp) - if err != nil { - return nil, nil, nil, err - } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil -} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go deleted file mode 100644 index 07f0125768..0000000000 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package tap defines the function handles which are executed on the transport -// layer of gRPC-Go and related information. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -package tap - -import ( - "context" - - "google.golang.org/grpc/metadata" -) - -// Info defines the relevant information needed by the handles. -type Info struct { - // FullMethodName is the string of grpc method (in the format of - // /package.service/method). - FullMethodName string - - // Header contains the header metadata received. - Header metadata.MD - - // TODO: More to be added. -} - -// ServerInHandle defines the function which runs before a new stream is -// created on the server side. If it returns a non-nil error, the stream will -// not be created and an error will be returned to the client. If the error -// returned is a status error, that status code and message will be used, -// otherwise PermissionDenied will be the code and err.Error() will be the -// message. -// -// It's intended to be used in situations where you don't want to waste the -// resources to accept the new stream (e.g. rate-limiting). For other general -// usages, please use interceptors. -// -// Note that it is executed in the per-connection I/O goroutine(s) instead of -// per-RPC goroutine. Therefore, users should NOT have any -// blocking/time-consuming work in this handle. Otherwise all the RPCs would -// slow down. Also, for the same reason, this handle won't be called -// concurrently by gRPC. -type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go deleted file mode 100644 index 10f4f798f5..0000000000 --- a/vendor/google.golang.org/grpc/trace.go +++ /dev/null @@ -1,143 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "bytes" - "fmt" - "io" - "net" - "strings" - "sync" - "time" -) - -// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. -// This should only be set before any RPCs are sent or received by this program. -var EnableTracing bool - -// methodFamily returns the trace family for the given method. -// It turns "/pkg.Service/GetFoo" into "pkg.Service". -func methodFamily(m string) string { - m = strings.TrimPrefix(m, "/") // remove leading slash - if i := strings.Index(m, "/"); i >= 0 { - m = m[:i] // remove everything from second slash - } - return m -} - -// traceEventLog mirrors golang.org/x/net/trace.EventLog. -// -// It exists in order to avoid importing x/net/trace on grpcnotrace builds. -type traceEventLog interface { - Printf(format string, a ...any) - Errorf(format string, a ...any) - Finish() -} - -// traceLog mirrors golang.org/x/net/trace.Trace. -// -// It exists in order to avoid importing x/net/trace on grpcnotrace builds. -type traceLog interface { - LazyLog(x fmt.Stringer, sensitive bool) - LazyPrintf(format string, a ...any) - SetError() - SetRecycler(f func(any)) - SetTraceInfo(traceID, spanID uint64) - SetMaxEvents(m int) - Finish() -} - -// traceInfo contains tracing information for an RPC. -type traceInfo struct { - tr traceLog - firstLine firstLine -} - -// firstLine is the first line of an RPC trace. -// It may be mutated after construction; remoteAddr specifically may change -// during client-side use. -type firstLine struct { - mu sync.Mutex - client bool // whether this is a client (outgoing) RPC - remoteAddr net.Addr - deadline time.Duration // may be zero -} - -func (f *firstLine) SetRemoteAddr(addr net.Addr) { - f.mu.Lock() - f.remoteAddr = addr - f.mu.Unlock() -} - -func (f *firstLine) String() string { - f.mu.Lock() - defer f.mu.Unlock() - - var line bytes.Buffer - io.WriteString(&line, "RPC: ") - if f.client { - io.WriteString(&line, "to") - } else { - io.WriteString(&line, "from") - } - fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) - if f.deadline != 0 { - fmt.Fprint(&line, f.deadline) - } else { - io.WriteString(&line, "none") - } - return line.String() -} - -const truncateSize = 100 - -func truncate(x string, l int) string { - if l > len(x) { - return x - } - return x[:l] -} - -// payload represents an RPC request or response payload. -type payload struct { - sent bool // whether this is an outgoing payload - msg any // e.g. a proto.Message - // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? -} - -func (p payload) String() string { - if p.sent { - return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) - } - return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) -} - -type fmtStringer struct { - format string - a []any -} - -func (f *fmtStringer) String() string { - return fmt.Sprintf(f.format, f.a...) -} - -type stringer string - -func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/trace_notrace.go b/vendor/google.golang.org/grpc/trace_notrace.go deleted file mode 100644 index 1da3a2308e..0000000000 --- a/vendor/google.golang.org/grpc/trace_notrace.go +++ /dev/null @@ -1,52 +0,0 @@ -//go:build grpcnotrace - -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -// grpcnotrace can be used to avoid importing golang.org/x/net/trace, which in -// turn enables binaries using gRPC-Go for dead code elimination, which can -// yield 10-15% improvements in binary size when tracing is not needed. - -import ( - "context" - "fmt" -) - -type notrace struct{} - -func (notrace) LazyLog(x fmt.Stringer, sensitive bool) {} -func (notrace) LazyPrintf(format string, a ...any) {} -func (notrace) SetError() {} -func (notrace) SetRecycler(f func(any)) {} -func (notrace) SetTraceInfo(traceID, spanID uint64) {} -func (notrace) SetMaxEvents(m int) {} -func (notrace) Finish() {} - -func newTrace(family, title string) traceLog { - return notrace{} -} - -func newTraceContext(ctx context.Context, tr traceLog) context.Context { - return ctx -} - -func newTraceEventLog(family, title string) traceEventLog { - return nil -} diff --git a/vendor/google.golang.org/grpc/trace_withtrace.go b/vendor/google.golang.org/grpc/trace_withtrace.go deleted file mode 100644 index 88d6e8571e..0000000000 --- a/vendor/google.golang.org/grpc/trace_withtrace.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !grpcnotrace - -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - - t "golang.org/x/net/trace" -) - -func newTrace(family, title string) traceLog { - return t.New(family, title) -} - -func newTraceContext(ctx context.Context, tr traceLog) context.Context { - return t.NewContext(ctx, tr) -} - -func newTraceEventLog(family, title string) traceEventLog { - return t.NewEventLog(family, title) -} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go deleted file mode 100644 index df85a021ad..0000000000 --- a/vendor/google.golang.org/grpc/version.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -// Version is the current grpc version. -const Version = "1.62.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh deleted file mode 100644 index 7a33c215b5..0000000000 --- a/vendor/google.golang.org/grpc/vet.sh +++ /dev/null @@ -1,190 +0,0 @@ -#!/bin/bash - -set -ex # Exit on error; debugging enabled. -set -o pipefail # Fail a pipe if any sub-command fails. - -# not makes sure the command passed to it does not exit with a return code of 0. -not() { - # This is required instead of the earlier (! $COMMAND) because subshells and - # pipefail don't work the same on Darwin as in Linux. - ! "$@" -} - -die() { - echo "$@" >&2 - exit 1 -} - -fail_on_output() { - tee /dev/stderr | not read -} - -# Check to make sure it's safe to modify the user's git repo. -git status --porcelain | fail_on_output - -# Undo any edits made by this script. -cleanup() { - git reset --hard HEAD -} -trap cleanup EXIT - -PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" -go version - -if [[ "$1" = "-install" ]]; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=25.2 # a.k.a. v4.22.0 in pb.go files. - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/runner/go - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif not which protoc > /dev/null; then - die "Please install protoc into your path" - fi - fi - exit 0 -elif [[ "$#" -ne 0 ]]; then - die "Unknown argument(s): $*" -fi - -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - make proto && git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -if [[ -n "${VET_ONLY_PROTO}" ]]; then - exit 0 -fi - -# - Ensure all source files contain a copyright message. -# (Done in two parts because Darwin "git grep" has broken support for compound -# exclusion matches.) -(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output - -# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. -not grep 'func Test[^(]' *_test.go -not grep 'func Test[^(]' test/*.go - -# - Check for typos in test function names -git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' -git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' - -# - Do not import x/net/context. -not git grep -l 'x/net/context' -- "*.go" - -# - Do not import math/rand for real library code. Use internal/grpcrand for -# thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' - -# - Do not use "interface{}"; use "any" instead. -git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' - -# - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' - -# - Ensure all ptypes proto packages are renamed when importing. -not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" - -# - Ensure all usages of grpc_testing package are renamed when importing. -not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" - -# - Ensure all xds proto imports are renamed to *pb or *grpc. -git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' - -misspell -error . - -# - gofmt, goimports, go vet, go mod tidy. -# Perform these checks on each module inside gRPC. -for MOD_FILE in $(find . -name 'go.mod'); do - MOD_DIR=$(dirname ${MOD_FILE}) - pushd ${MOD_DIR} - go vet -all ./... | fail_on_output - gofmt -s -d -l . 2>&1 | fail_on_output - goimports -l . 2>&1 | not grep -vE "\.pb\.go" - - go mod tidy -compat=1.19 - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) - popd -done - -# - Collection of static analysis checks -SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true - -# Error for anything other than checks that need exclusions. -grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" - -# Exclude underscore checks for generated code. -grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' - -# Error for duplicate imports not including grpc protos. -grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused -channelz/grpc_channelz_v1" -go-control-plane/envoy -grpclb/grpc_lb_v1" -health/grpc_health_v1" -interop/grpc_testing" -orca/v3" -proto/grpc_gcp" -proto/grpc_lookup_v1" -reflection/grpc_reflection_v1" -reflection/grpc_reflection_v1alpha" -XXXXX PleaseIgnoreUnused' - -# Error for any package comments not in generated code. -grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" - -# Only ignore the following deprecated types/fields/functions and exclude -# generated code. -grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused -XXXXX Protobuf related deprecation errors: -"github.com/golang/protobuf -.pb.go: -grpc_testing_not_regenerate -: ptypes. -proto.RegisterType -XXXXX gRPC internal usage deprecation errors: -"google.golang.org/grpc -: grpc. -: v1alpha. -: v1alphareflectionpb. -BalancerAttributes is deprecated: -CredsBundle is deprecated: -Metadata is deprecated: use Attributes instead. -NewSubConn is deprecated: -OverrideServerName is deprecated: -RemoveSubConn is deprecated: -SecurityVersion is deprecated: -Target is deprecated: Use the Target field in the BuildOptions instead. -UpdateAddresses is deprecated: -UpdateSubConnState is deprecated: -balancer.ErrTransientFailure is deprecated: -grpc/reflection/v1alpha/reflection.proto -XXXXX xDS deprecated fields we support -.ExactMatch -.PrefixMatch -.SafeRegexMatch -.SuffixMatch -GetContainsMatch -GetExactMatch -GetMatchSubjectAltNames -GetPrefixMatch -GetSafeRegexMatch -GetSuffixMatch -GetTlsCertificateCertificateProviderInstance -GetValidationContextCertificateProviderInstance -XXXXX PleaseIgnoreUnused' - -echo SUCCESS diff --git a/vendor/k8s.io/dynamic-resource-allocation/LICENSE b/vendor/k8s.io/dynamic-resource-allocation/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/k8s.io/dynamic-resource-allocation/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/k8s.io/dynamic-resource-allocation/controller/controller.go b/vendor/k8s.io/dynamic-resource-allocation/controller/controller.go deleted file mode 100644 index 957b83f91c..0000000000 --- a/vendor/k8s.io/dynamic-resource-allocation/controller/controller.go +++ /dev/null @@ -1,957 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - "github.com/go-logr/logr" - "github.com/google/go-cmp/cmp" - - v1 "k8s.io/api/core/v1" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - corev1types "k8s.io/client-go/kubernetes/typed/core/v1" - resourcev1alpha2listers "k8s.io/client-go/listers/resource/v1alpha2" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" - "k8s.io/dynamic-resource-allocation/resourceclaim" - "k8s.io/klog/v2" -) - -// Controller watches ResourceClaims and triggers allocation and deallocation -// as needed. -type Controller interface { - // Run starts the controller. - Run(workers int) - - // SetReservedFor can be used to disable adding the Pod which - // triggered allocation to the status.reservedFor. Normally, - // DRA drivers should always do that, so it's the default. - // But nothing in the protocol between the scheduler and - // a driver requires it, so at least for testing the control - // plane components it is useful to disable it. - SetReservedFor(enabled bool) -} - -// Driver provides the actual allocation and deallocation operations. -type Driver interface { - // GetClassParameters gets called to retrieve the parameter object - // referenced by a class. The content should be validated now if - // possible. class.Parameters may be nil. - // - // The caller will wrap the error to include the parameter reference. - GetClassParameters(ctx context.Context, class *resourcev1alpha2.ResourceClass) (interface{}, error) - - // GetClaimParameters gets called to retrieve the parameter object - // referenced by a claim. The content should be validated now if - // possible. claim.Spec.Parameters may be nil. - // - // The caller will wrap the error to include the parameter reference. - GetClaimParameters(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, class *resourcev1alpha2.ResourceClass, classParameters interface{}) (interface{}, error) - - // Allocate gets called when all same-driver ResourceClaims for Pod are ready - // to be allocated. The selectedNode is empty for ResourceClaims with immediate - // allocation, in which case the resource driver decides itself where - // to allocate. If there is already an on-going allocation, the driver - // may finish it and ignore the new parameters or abort the on-going - // allocation and try again with the new parameters. - // - // Parameters have been retrieved earlier. - // - // Driver must set the result of allocation for every claim in "claims" - // parameter items. In case if there was no error encountered and allocation - // was successful - claims[i].Allocation field should be set. In case of - // particular claim allocation fail - respective item's claims[i].Error field - // should be set, in this case claims[i].Allocation will be ignored. - // - // If selectedNode is set, the driver must attempt to allocate for that - // node. If that is not possible, it must return an error. The - // controller will call UnsuitableNodes and pass the new information to - // the scheduler, which then will lead to selecting a different node - // if the current one is not suitable. - // - // The Claim, ClaimParameters, Class, ClassParameters fields of "claims" parameter - // items are read-only and must not be modified. This call must be idempotent. - Allocate(ctx context.Context, claims []*ClaimAllocation, selectedNode string) - - // Deallocate gets called when a ResourceClaim is ready to be - // freed. - // - // The claim is read-only and must not be modified. This call must be - // idempotent. In particular it must not return an error when the claim - // is currently not allocated. - // - // Deallocate may get called when a previous allocation got - // interrupted. Deallocate must then stop any on-going allocation - // activity and free resources before returning without an error. - Deallocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim) error - - // UnsuitableNodes checks all pending claims with delayed allocation - // for a pod. All claims are ready for allocation by the driver - // and parameters have been retrieved. - // - // The driver may consider each claim in isolation, but it's better - // to mark nodes as unsuitable for all claims if it not all claims - // can be allocated for it (for example, two GPUs requested but - // the node only has one). - // - // The potentialNodes slice contains all potential nodes selected - // by the scheduler plus the selected node. The response must - // not contain any other nodes. Implementations do not have to - // care about size limits in the PodSchedulingContext status, the - // caller will handle that. - // - // The result of the check is in ClaimAllocation.UnsuitableNodes. - // An error indicates that the entire check must be repeated. - UnsuitableNodes(ctx context.Context, pod *v1.Pod, claims []*ClaimAllocation, potentialNodes []string) error -} - -// ClaimAllocation represents information about one particular -// pod.Spec.ResourceClaim entry. -type ClaimAllocation struct { - PodClaimName string - Claim *resourcev1alpha2.ResourceClaim - Class *resourcev1alpha2.ResourceClass - ClaimParameters interface{} - ClassParameters interface{} - - // UnsuitableNodes needs to be filled in by the driver when - // Driver.UnsuitableNodes gets called. - UnsuitableNodes []string - - // Driver must populate this field with resources that were - // allocated for the claim in case of successful allocation. - Allocation *resourcev1alpha2.AllocationResult - // In case of error allocating particular claim, driver must - // populate this field. - Error error -} - -type controller struct { - ctx context.Context - logger klog.Logger - name string - finalizer string - driver Driver - setReservedFor bool - kubeClient kubernetes.Interface - claimNameLookup *resourceclaim.Lookup - queue workqueue.RateLimitingInterface - eventRecorder record.EventRecorder - rcLister resourcev1alpha2listers.ResourceClassLister - rcSynced cache.InformerSynced - claimCache cache.MutationCache - schedulingCtxLister resourcev1alpha2listers.PodSchedulingContextLister - claimSynced cache.InformerSynced - schedulingCtxSynced cache.InformerSynced -} - -// TODO: make it configurable -var recheckDelay = 30 * time.Second - -// New creates a new controller. -func New( - ctx context.Context, - name string, - driver Driver, - kubeClient kubernetes.Interface, - informerFactory informers.SharedInformerFactory) Controller { - logger := klog.LoggerWithName(klog.FromContext(ctx), "resource controller") - rcInformer := informerFactory.Resource().V1alpha2().ResourceClasses() - claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims() - schedulingCtxInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts() - claimNameLookup := resourceclaim.NewNameLookup(kubeClient) - - eventBroadcaster := record.NewBroadcaster() - go func() { - <-ctx.Done() - eventBroadcaster.Shutdown() - }() - // TODO: use contextual logging in eventBroadcaster once it - // supports it. There is a StartStructuredLogging API, but it - // uses the global klog, which is worse than redirecting an unstructured - // string into our logger, in particular during testing. - eventBroadcaster.StartLogging(func(format string, args ...interface{}) { - helper, logger := logger.WithCallStackHelper() - helper() - logger.V(2).Info(fmt.Sprintf(format, args...)) - }) - eventBroadcaster.StartRecordingToSink(&corev1types.EventSinkImpl{Interface: kubeClient.CoreV1().Events(v1.NamespaceAll)}) - eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, - v1.EventSource{Component: fmt.Sprintf("resource driver %s", name)}) - - // The work queue contains either keys for claims or PodSchedulingContext objects. - queue := workqueue.NewNamedRateLimitingQueue( - workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("%s-queue", name)) - - // The mutation cache acts as an additional layer for the informer - // cache and after an update made by the controller returns a more - // recent copy until the informer catches up. - claimInformerCache := claimInformer.Informer().GetIndexer() - claimCache := cache.NewIntegerResourceVersionMutationCache(claimInformerCache, claimInformerCache, 60*time.Second, - false /* only cache updated claims that exist in the informer cache */) - - ctrl := &controller{ - ctx: ctx, - logger: logger, - name: name, - finalizer: name + "/deletion-protection", - driver: driver, - setReservedFor: true, - kubeClient: kubeClient, - claimNameLookup: claimNameLookup, - rcLister: rcInformer.Lister(), - rcSynced: rcInformer.Informer().HasSynced, - claimCache: claimCache, - claimSynced: claimInformer.Informer().HasSynced, - schedulingCtxLister: schedulingCtxInformer.Lister(), - schedulingCtxSynced: schedulingCtxInformer.Informer().HasSynced, - queue: queue, - eventRecorder: eventRecorder, - } - - loggerV6 := logger.V(6) - if loggerV6.Enabled() { - resourceClaimLogger := klog.LoggerWithValues(loggerV6, "type", "ResourceClaim") - _, _ = claimInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&resourceClaimLogger, ctrl)) - schedulingCtxLogger := klog.LoggerWithValues(loggerV6, "type", "PodSchedulingContext") - _, _ = schedulingCtxInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&schedulingCtxLogger, ctrl)) - } else { - _, _ = claimInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl)) - _, _ = schedulingCtxInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl)) - } - - return ctrl -} - -func (ctrl *controller) SetReservedFor(enabled bool) { - ctrl.setReservedFor = enabled -} - -func resourceEventHandlerFuncs(logger *klog.Logger, ctrl *controller) cache.ResourceEventHandlerFuncs { - return cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - ctrl.add(logger, obj) - }, - UpdateFunc: func(oldObj, newObj interface{}) { - ctrl.update(logger, oldObj, newObj) - }, - DeleteFunc: ctrl.delete, - } -} - -const ( - claimKeyPrefix = "claim:" - schedulingCtxKeyPrefix = "schedulingCtx:" -) - -func (ctrl *controller) add(loggerV6 *klog.Logger, obj interface{}) { - var logger klog.Logger - if loggerV6 != nil { - logger = loggerV6.WithValues("object", prettyPrint(obj)) - } else { - logger = ctrl.logger.V(5) - } - ctrl.addNewOrUpdated(logger, "Adding new work item", obj) -} - -func (ctrl *controller) update(loggerV6 *klog.Logger, oldObj, newObj interface{}) { - var logger klog.Logger - if loggerV6 != nil { - diff := cmp.Diff(oldObj, newObj) - logger = loggerV6.WithValues("object", prettyPrint(newObj), "diff", diff) - } else { - logger = ctrl.logger.V(5) - } - ctrl.addNewOrUpdated(logger, "Adding updated work item", newObj) -} - -func (ctrl *controller) addNewOrUpdated(loggerV klog.Logger, msg string, obj interface{}) { - objKey, err := getKey(obj) - if err != nil { - loggerV.Error(err, "Failed to get key", "obj", obj) - return - } - loggerV.Info(msg, "key", objKey) - ctrl.queue.Add(objKey) -} - -func (ctrl *controller) delete(obj interface{}) { - objKey, err := getKey(obj) - if err != nil { - return - } - ctrl.logger.V(5).Info("Removing deleted work item", "key", objKey) - ctrl.queue.Forget(objKey) -} - -func getKey(obj interface{}) (string, error) { - objKey, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - return "", err - } - prefix := "" - switch obj.(type) { - case *resourcev1alpha2.ResourceClaim: - prefix = claimKeyPrefix - case *resourcev1alpha2.PodSchedulingContext: - prefix = schedulingCtxKeyPrefix - default: - return "", fmt.Errorf("unexpected object: %T", obj) - } - - return prefix + objKey, nil -} - -// Run starts the controller. -func (ctrl *controller) Run(workers int) { - defer ctrl.queue.ShutDown() - - ctrl.logger.Info("Starting", "driver", ctrl.name) - defer ctrl.logger.Info("Shutting down", "driver", ctrl.name) - - stopCh := ctrl.ctx.Done() - - if !cache.WaitForCacheSync(stopCh, ctrl.rcSynced, ctrl.claimSynced, ctrl.schedulingCtxSynced) { - ctrl.logger.Error(nil, "Cannot sync caches") - return - } - - for i := 0; i < workers; i++ { - go wait.Until(ctrl.sync, 0, stopCh) - } - - <-stopCh -} - -// errRequeue is a special error instance that functions can return -// to request silent requeueing (not logged as error, no event). -// Uses exponential backoff. -var errRequeue = errors.New("requeue") - -// errPeriodic is a special error instance that functions can return -// to request silent retrying at a fixed rate. -var errPeriodic = errors.New("periodic") - -// sync is the main worker. -func (ctrl *controller) sync() { - key, quit := ctrl.queue.Get() - if quit { - return - } - defer ctrl.queue.Done(key) - - logger := klog.LoggerWithValues(ctrl.logger, "key", key) - ctx := klog.NewContext(ctrl.ctx, logger) - logger.V(4).Info("processing") - obj, err := ctrl.syncKey(ctx, key.(string)) - switch err { - case nil: - logger.V(5).Info("completed") - ctrl.queue.Forget(key) - case errRequeue: - logger.V(5).Info("requeue") - ctrl.queue.AddRateLimited(key) - case errPeriodic: - logger.V(5).Info("recheck periodically") - ctrl.queue.AddAfter(key, recheckDelay) - default: - logger.Error(err, "processing failed") - if obj != nil { - // TODO: We don't know here *what* failed. Determine based on error? - ctrl.eventRecorder.Event(obj, v1.EventTypeWarning, "Failed", err.Error()) - } - ctrl.queue.AddRateLimited(key) - } -} - -// syncKey looks up a ResourceClaim by its key and processes it. -func (ctrl *controller) syncKey(ctx context.Context, key string) (obj runtime.Object, finalErr error) { - sep := strings.Index(key, ":") - if sep < 0 { - return nil, fmt.Errorf("unexpected key: %s", key) - } - prefix, object := key[0:sep+1], key[sep+1:] - namespace, name, err := cache.SplitMetaNamespaceKey(object) - if err != nil { - return nil, err - } - - switch prefix { - case claimKeyPrefix: - claim, err := ctrl.getCachedClaim(ctx, object) - if claim == nil || err != nil { - return nil, err - } - obj, finalErr = claim, ctrl.syncClaim(ctx, claim) - case schedulingCtxKeyPrefix: - schedulingCtx, err := ctrl.schedulingCtxLister.PodSchedulingContexts(namespace).Get(name) - if err != nil { - if k8serrors.IsNotFound(err) { - klog.FromContext(ctx).V(5).Info("PodSchedulingContext was deleted, no need to process it") - return nil, nil - } - return nil, err - } - obj, finalErr = schedulingCtx, ctrl.syncPodSchedulingContexts(ctx, schedulingCtx) - } - return -} - -func (ctrl *controller) getCachedClaim(ctx context.Context, key string) (*resourcev1alpha2.ResourceClaim, error) { - claimObj, exists, err := ctrl.claimCache.GetByKey(key) - if !exists || k8serrors.IsNotFound(err) { - klog.FromContext(ctx).V(5).Info("ResourceClaim not found, no need to process it") - return nil, nil - } - if err != nil { - return nil, err - } - claim, ok := claimObj.(*resourcev1alpha2.ResourceClaim) - if !ok { - return nil, fmt.Errorf("internal error: got %T instead of *resourcev1alpha2.ResourceClaim from claim cache", claimObj) - } - return claim, nil -} - -// syncClaim determines which next action may be needed for a ResourceClaim -// and does it. -func (ctrl *controller) syncClaim(ctx context.Context, claim *resourcev1alpha2.ResourceClaim) error { - var err error - logger := klog.FromContext(ctx) - - if len(claim.Status.ReservedFor) > 0 { - // In use. Nothing that we can do for it now. - if loggerV6 := logger.V(6); loggerV6.Enabled() { - loggerV6.Info("ResourceClaim in use", "reservedFor", claim.Status.ReservedFor) - } else { - logger.V(5).Info("ResourceClaim in use") - } - return nil - } - - if claim.DeletionTimestamp != nil || - claim.Status.DeallocationRequested { - // Ready for deallocation. We might have our finalizer set. The - // finalizer is specific to the driver, therefore we know that - // this claim is "ours" when the finalizer is set. - hasFinalizer := ctrl.hasFinalizer(claim) - logger.V(5).Info("ResourceClaim ready for deallocation", "deallocationRequested", claim.Status.DeallocationRequested, "deletionTimestamp", claim.DeletionTimestamp, "allocated", claim.Status.Allocation != nil, "hasFinalizer", hasFinalizer) - if hasFinalizer { - claim = claim.DeepCopy() - if claim.Status.Allocation != nil { - // Allocation was completed. Deallocate before proceeding. - if err := ctrl.driver.Deallocate(ctx, claim); err != nil { - return fmt.Errorf("deallocate: %v", err) - } - claim.Status.Allocation = nil - claim.Status.DriverName = "" - claim.Status.DeallocationRequested = false - claim, err = ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("remove allocation: %v", err) - } - ctrl.claimCache.Mutation(claim) - } else { - // Ensure that there is no on-going allocation. - if err := ctrl.driver.Deallocate(ctx, claim); err != nil { - return fmt.Errorf("stop allocation: %v", err) - } - } - - if claim.Status.DeallocationRequested { - // Still need to remove it. - claim.Status.DeallocationRequested = false - claim, err = ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("remove deallocation: %v", err) - } - ctrl.claimCache.Mutation(claim) - } - - claim.Finalizers = ctrl.removeFinalizer(claim.Finalizers) - claim, err = ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("remove finalizer: %v", err) - } - ctrl.claimCache.Mutation(claim) - } - - // Nothing further to do. The apiserver should remove it shortly. - return nil - - } - - if claim.Status.Allocation != nil { - logger.V(5).Info("ResourceClaim is allocated") - return nil - } - if claim.Spec.AllocationMode != resourcev1alpha2.AllocationModeImmediate { - logger.V(5).Info("ResourceClaim waiting for first consumer") - return nil - } - - // We need the ResourceClass to determine whether we should allocate it. - class, err := ctrl.rcLister.Get(claim.Spec.ResourceClassName) - if err != nil { - return err - } - if class.DriverName != ctrl.name { - // Not ours *at the moment*. This can change, so requeue and - // check again. We could trigger a faster check when the - // ResourceClass changes, but that shouldn't occur much in - // practice and thus isn't worth the effort. - // - // We use exponential backoff because it is unlikely that - // the ResourceClass changes much. - logger.V(5).Info("ResourceClaim is handled by other driver", "driver", class.DriverName) - return errRequeue - } - - // Check parameters. Do not record event to Claim if its parameters are invalid, - // syncKey will record the error. - claimParameters, classParameters, err := ctrl.getParameters(ctx, claim, class, false) - if err != nil { - return err - } - - claimAllocations := claimAllocations{&ClaimAllocation{ - Claim: claim, - ClaimParameters: claimParameters, - Class: class, - ClassParameters: classParameters, - }} - - ctrl.allocateClaims(ctx, claimAllocations, "", nil) - - if claimAllocations[0].Error != nil { - return fmt.Errorf("allocate: %v", claimAllocations[0].Error) - } - - return nil -} - -func (ctrl *controller) getParameters(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, class *resourcev1alpha2.ResourceClass, notifyClaim bool) (claimParameters, classParameters interface{}, err error) { - classParameters, err = ctrl.driver.GetClassParameters(ctx, class) - if err != nil { - ctrl.eventRecorder.Event(class, v1.EventTypeWarning, "Failed", err.Error()) - err = fmt.Errorf("class parameters %s: %v", class.ParametersRef, err) - return - } - claimParameters, err = ctrl.driver.GetClaimParameters(ctx, claim, class, classParameters) - if err != nil { - if notifyClaim { - ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "Failed", err.Error()) - } - err = fmt.Errorf("claim parameters %s: %v", claim.Spec.ParametersRef, err) - return - } - return -} - -// allocateClaims filters list of claims, keeps those needing allocation and asks driver to do the allocations. -// Driver is supposed to write the AllocationResult and Error field into argument claims slice. -func (ctrl *controller) allocateClaims(ctx context.Context, claims []*ClaimAllocation, selectedNode string, selectedUser *resourcev1alpha2.ResourceClaimConsumerReference) { - logger := klog.FromContext(ctx) - - needAllocation := make([]*ClaimAllocation, 0, len(claims)) - for _, claim := range claims { - if claim.Claim.Status.Allocation != nil { - // This can happen when two PodSchedulingContext objects trigger - // allocation attempts (first one wins) or when we see the - // update of the PodSchedulingContext object. - logger.V(5).Info("Claim is already allocated, skipping allocation", "claim", claim.PodClaimName) - continue - } - needAllocation = append(needAllocation, claim) - } - - if len(needAllocation) == 0 { - logger.V(5).Info("No claims need allocation, nothing to do") - return - } - - // Keep separately claims that succeeded adding finalizers, - // they will be sent for Allocate to the driver. - claimsWithFinalizers := make([]*ClaimAllocation, 0, len(needAllocation)) - for _, claimAllocation := range needAllocation { - if !ctrl.hasFinalizer(claimAllocation.Claim) { - claim := claimAllocation.Claim.DeepCopy() - // Set finalizer before doing anything. We continue with the updated claim. - logger.V(5).Info("Adding finalizer", "claim", claim.Name) - claim.Finalizers = append(claim.Finalizers, ctrl.finalizer) - var err error - claim, err = ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}) - if err != nil { - logger.Error(err, "add finalizer", "claim", claim.Name) - claimAllocation.Error = fmt.Errorf("add finalizer: %v", err) - // Do not save claim to ask for Allocate from Driver. - continue - } - ctrl.claimCache.Mutation(claim) - claimAllocation.Claim = claim - } - claimsWithFinalizers = append(claimsWithFinalizers, claimAllocation) - } - - // Beyond here we only operate with claimsWithFinalizers because those are ready for allocation. - - logger.V(5).Info("Allocating") - ctrl.driver.Allocate(ctx, claimsWithFinalizers, selectedNode) - - // Update successfully allocated claims' status with allocation info. - for _, claimAllocation := range claimsWithFinalizers { - if claimAllocation.Error != nil { - logger.Error(claimAllocation.Error, "allocating claim", "claim", claimAllocation.Claim.Name) - continue - } - if claimAllocation.Allocation == nil { - logger.Error(nil, "allocating claim: missing allocation from driver", "claim", claimAllocation.Claim.Name) - claimAllocation.Error = fmt.Errorf("allocating claim: missing allocation from driver") - // Do not update this claim with allocation, it might succeed next time. - continue - } - logger.V(5).Info("successfully allocated", "claim", klog.KObj(claimAllocation.Claim)) - claim := claimAllocation.Claim.DeepCopy() - claim.Status.Allocation = claimAllocation.Allocation - claim.Status.DriverName = ctrl.name - if selectedUser != nil && ctrl.setReservedFor { - claim.Status.ReservedFor = append(claim.Status.ReservedFor, *selectedUser) - } - logger.V(6).Info("Updating claim after allocation", "claim", claim) - claim, err := ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) - if err != nil { - claimAllocation.Error = fmt.Errorf("add allocation: %v", err) - continue - } - - ctrl.claimCache.Mutation(claim) - } - return -} - -func (ctrl *controller) checkPodClaim(ctx context.Context, pod *v1.Pod, podClaim v1.PodResourceClaim) (*ClaimAllocation, error) { - claimName, mustCheckOwner, err := ctrl.claimNameLookup.Name(pod, &podClaim) - if err != nil { - return nil, err - } - if claimName == nil { - // Nothing to do. - return nil, nil - } - key := pod.Namespace + "/" + *claimName - claim, err := ctrl.getCachedClaim(ctx, key) - if claim == nil || err != nil { - return nil, err - } - if mustCheckOwner { - if err := resourceclaim.IsForPod(pod, claim); err != nil { - return nil, err - } - } - if claim.Spec.AllocationMode != resourcev1alpha2.AllocationModeWaitForFirstConsumer { - // Nothing to do for it as part of pod scheduling. - return nil, nil - } - if claim.Status.Allocation != nil { - // Already allocated, class and parameter are not needed and nothing - // need to be done for the claim either. - return nil, nil - } - class, err := ctrl.rcLister.Get(claim.Spec.ResourceClassName) - if err != nil { - return nil, err - } - if class.DriverName != ctrl.name { - return nil, nil - } - // Check parameters. Record event to claim and pod if parameters are invalid. - claimParameters, classParameters, err := ctrl.getParameters(ctx, claim, class, true) - if err != nil { - ctrl.eventRecorder.Event(pod, v1.EventTypeWarning, "Failed", fmt.Sprintf("claim %v: %v", claim.Name, err.Error())) - return nil, err - } - return &ClaimAllocation{ - PodClaimName: podClaim.Name, - Claim: claim, - Class: class, - ClaimParameters: claimParameters, - ClassParameters: classParameters, - }, nil -} - -// syncPodSchedulingContext determines which next action may be needed for a PodSchedulingContext object -// and does it. -func (ctrl *controller) syncPodSchedulingContexts(ctx context.Context, schedulingCtx *resourcev1alpha2.PodSchedulingContext) error { - logger := klog.FromContext(ctx) - - // Ignore deleted objects. - if schedulingCtx.DeletionTimestamp != nil { - logger.V(5).Info("PodSchedulingContext marked for deletion") - return nil - } - - if schedulingCtx.Spec.SelectedNode == "" && - len(schedulingCtx.Spec.PotentialNodes) == 0 { - // Nothing to do? Shouldn't occur. - logger.V(5).Info("Waiting for scheduler to set fields") - return nil - } - - // Check pod. - // TODO (?): use an informer - only useful when many (most?) pods have claims - // TODO (?): let the scheduler copy all claim names + UIDs into PodSchedulingContext - then we don't need the pod - pod, err := ctrl.kubeClient.CoreV1().Pods(schedulingCtx.Namespace).Get(ctx, schedulingCtx.Name, metav1.GetOptions{}) - if err != nil { - return err - } - if pod.DeletionTimestamp != nil { - logger.V(5).Info("Pod marked for deletion") - return nil - } - - // Still the owner? - if !metav1.IsControlledBy(schedulingCtx, pod) { - // Must be obsolete object, do nothing for it. - logger.V(5).Info("Pod not owner, PodSchedulingContext is obsolete") - return nil - } - - // Find all pending claims that are owned by us. We bail out if any of the pre-requisites - // for pod scheduling (claims exist, classes exist, parameters exist) are not met. - // The scheduler will do the same, except for checking parameters, so usually - // everything should be ready once the PodSchedulingContext object exists. - var claims claimAllocations - for _, podClaim := range pod.Spec.ResourceClaims { - delayed, err := ctrl.checkPodClaim(ctx, pod, podClaim) - if err != nil { - return fmt.Errorf("pod claim %s: %v", podClaim.Name, err) - } - if delayed == nil { - // Nothing to do for it. This can change, so keep checking. - continue - } - claims = append(claims, delayed) - } - if len(claims) == 0 { - logger.V(5).Info("Found no pending pod claims") - return errPeriodic - } - - // Check current resource availability *before* triggering the - // allocations. If we find that any of the claims cannot be allocated - // for the selected node, we don't need to try for the others either - // and shouldn't, because those allocations might have to be undone to - // pick a better node. If we don't need to allocate now, then we'll - // simply report back the gather information. - // - // We shouldn't assume that the scheduler has included the selected node - // in the list of potential nodes. Usually it does, but let's make sure - // that we check it. - selectedNode := schedulingCtx.Spec.SelectedNode - potentialNodes := schedulingCtx.Spec.PotentialNodes - if selectedNode != "" && !hasString(potentialNodes, selectedNode) { - potentialNodes = append(potentialNodes, selectedNode) - } - if len(schedulingCtx.Spec.PotentialNodes) > 0 { - if err := ctrl.driver.UnsuitableNodes(ctx, pod, claims, potentialNodes); err != nil { - return fmt.Errorf("checking potential nodes: %v", err) - } - } - logger.V(5).Info("pending pod claims", "claims", claims, "selectedNode", selectedNode) - if selectedNode != "" { - unsuitable := false - for _, delayed := range claims { - if hasString(delayed.UnsuitableNodes, selectedNode) { - unsuitable = true - break - } - } - - if unsuitable { - logger.V(2).Info("skipping allocation for unsuitable selected node", "node", selectedNode) - } else { - logger.V(2).Info("allocation for selected node", "node", selectedNode) - selectedUser := &resourcev1alpha2.ResourceClaimConsumerReference{ - Resource: "pods", - Name: pod.Name, - UID: pod.UID, - } - - ctrl.allocateClaims(ctx, claims, selectedNode, selectedUser) - - var allErrors []error - for _, delayed := range claims { - if delayed.Error != nil { - if strings.Contains(delayed.Error.Error(), delayed.Claim.Name) { - // Avoid adding redundant information. - allErrors = append(allErrors, delayed.Error) - } else { - // Include claim name, it's not in the underlying error. - allErrors = append(allErrors, fmt.Errorf("claim %s: %v", delayed.Claim.Name, delayed.Error)) - } - } - } - if len(allErrors) > 0 { - return errors.Join(allErrors...) - } - } - } - - // Now update unsuitable nodes. This is useful information for the scheduler even if - // we managed to allocate because we might have to undo that. - // TODO: replace with patching the array. We can do that without race conditions - // because each driver is responsible for its own entries. - modified := false - schedulingCtx = schedulingCtx.DeepCopy() - for _, delayed := range claims { - i := findClaim(schedulingCtx.Status.ResourceClaims, delayed.PodClaimName) - if i < 0 { - // Add new entry. - schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims, - resourcev1alpha2.ResourceClaimSchedulingStatus{ - Name: delayed.PodClaimName, - UnsuitableNodes: truncateNodes(delayed.UnsuitableNodes, selectedNode), - }) - modified = true - } else if stringsDiffer(schedulingCtx.Status.ResourceClaims[i].UnsuitableNodes, delayed.UnsuitableNodes) { - // Update existing entry. - schedulingCtx.Status.ResourceClaims[i].UnsuitableNodes = truncateNodes(delayed.UnsuitableNodes, selectedNode) - modified = true - } - } - if modified { - logger.V(6).Info("Updating pod scheduling with modified unsuitable nodes", "podSchedulingCtx", schedulingCtx) - if _, err := ctrl.kubeClient.ResourceV1alpha2().PodSchedulingContexts(schedulingCtx.Namespace).UpdateStatus(ctx, schedulingCtx, metav1.UpdateOptions{}); err != nil { - return fmt.Errorf("update unsuitable node status: %v", err) - } - } - - // We must keep the object in our queue and keep updating the - // UnsuitableNodes fields. - return errPeriodic -} - -func truncateNodes(nodes []string, selectedNode string) []string { - // We might have checked "potential nodes + selected node" above, so - // this list might be too long by one element. When truncating it, make - // sure that the selected node is listed. - lenUnsuitable := len(nodes) - if lenUnsuitable > resourcev1alpha2.PodSchedulingNodeListMaxSize { - if nodes[0] == selectedNode { - // Truncate at the end and keep selected node in the first element. - nodes = nodes[0 : lenUnsuitable-1] - } else { - // Truncate at the front, it's not the selected node. - nodes = nodes[1:lenUnsuitable] - } - } - return nodes -} - -type claimAllocations []*ClaimAllocation - -// MarshalLog replaces the pointers with the actual structs because -// we care about the content, not the pointer values. -func (claims claimAllocations) MarshalLog() interface{} { - content := make([]ClaimAllocation, 0, len(claims)) - for _, claim := range claims { - content = append(content, *claim) - } - return content -} - -var _ logr.Marshaler = claimAllocations{} - -// findClaim returns the index of the specified pod claim, -1 if not found. -func findClaim(claims []resourcev1alpha2.ResourceClaimSchedulingStatus, podClaimName string) int { - for i := range claims { - if claims[i].Name == podClaimName { - return i - } - } - return -1 -} - -// hasString checks for a string in a slice. -func hasString(strings []string, str string) bool { - for _, s := range strings { - if s == str { - return true - } - } - return false -} - -// stringsDiffer does a strict comparison of two string arrays, order of entries matters. -func stringsDiffer(a, b []string) bool { - if len(a) != len(b) { - return true - } - for i := range a { - if a[i] != b[i] { - return true - } - } - return false -} - -// hasFinalizer checks if the claim has the finalizer of the driver. -func (ctrl *controller) hasFinalizer(claim *resourcev1alpha2.ResourceClaim) bool { - for _, finalizer := range claim.Finalizers { - if finalizer == ctrl.finalizer { - return true - } - } - return false -} - -// removeFinalizer creates a new slice without the finalizer of the driver. -func (ctrl *controller) removeFinalizer(in []string) []string { - out := make([]string, 0, len(in)) - for _, finalizer := range in { - if finalizer != ctrl.finalizer { - out = append(out, finalizer) - } - } - if len(out) == 0 { - return nil - } - return out -} - -// prettyPrint formats arbitrary objects as JSON or, if that fails, with Sprintf. -func prettyPrint(obj interface{}) string { - buffer, err := json.Marshal(obj) - if err != nil { - return fmt.Sprintf("%s", obj) - } - return string(buffer) -} diff --git a/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/doc.go b/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/doc.go deleted file mode 100644 index cdc33100a3..0000000000 --- a/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package kubeletplugin provides helper functions for running a dynamic -// resource allocation kubelet plugin. -package kubeletplugin diff --git a/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/draplugin.go b/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/draplugin.go deleted file mode 100644 index e3a0bafe2b..0000000000 --- a/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/draplugin.go +++ /dev/null @@ -1,273 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeletplugin - -import ( - "errors" - "fmt" - "net" - - "google.golang.org/grpc" - "k8s.io/klog/v2" - - drapbv1alpha2 "k8s.io/kubelet/pkg/apis/dra/v1alpha2" - drapbv1alpha3 "k8s.io/kubelet/pkg/apis/dra/v1alpha3" - registerapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" -) - -// DRAPlugin gets returned by Start and defines the public API of the generic -// dynamic resource allocation plugin. -type DRAPlugin interface { - // Stop ensures that all spawned goroutines are stopped and frees - // resources. - Stop() - - // RegistrationStatus returns the result of registration, nil if none - // received yet. - RegistrationStatus() *registerapi.RegistrationStatus - - // This unexported method ensures that we can modify the interface - // without causing an API break of the package - // (https://pkg.go.dev/golang.org/x/exp/apidiff#section-readme). - internal() -} - -// Option implements the functional options pattern for Start. -type Option func(o *options) error - -// DriverName defines the driver name for the dynamic resource allocation driver. -// Must be set. -func DriverName(driverName string) Option { - return func(o *options) error { - o.driverName = driverName - return nil - } -} - -// Logger overrides the default klog.Background logger. -func Logger(logger klog.Logger) Option { - return func(o *options) error { - o.logger = logger - return nil - } -} - -// GRPCVerbosity sets the verbosity for logging gRPC calls. Default is 4. A negative -// value disables logging. -func GRPCVerbosity(level int) Option { - return func(o *options) error { - o.grpcVerbosity = level - return nil - } -} - -// RegistrarSocketPath sets the file path for a Unix domain socket. -// If RegistrarListener is not used, then Start will remove -// a file at that path, should one exist, and creates a socket -// itself. Otherwise it uses the provided listener and only -// removes the socket at the specified path during shutdown. -// -// At least one of these two options is required. -func RegistrarSocketPath(path string) Option { - return func(o *options) error { - o.pluginRegistrationEndpoint.path = path - return nil - } -} - -// RegistrarListener sets an already created listener for the plugin -// registrarion API. Can be combined with RegistrarSocketPath. -// -// At least one of these two options is required. -func RegistrarListener(listener net.Listener) Option { - return func(o *options) error { - o.pluginRegistrationEndpoint.listener = listener - return nil - } -} - -// PluginSocketPath sets the file path for a Unix domain socket. -// If PluginListener is not used, then Start will remove -// a file at that path, should one exist, and creates a socket -// itself. Otherwise it uses the provided listener and only -// removes the socket at the specified path during shutdown. -// -// At least one of these two options is required. -func PluginSocketPath(path string) Option { - return func(o *options) error { - o.draEndpoint.path = path - return nil - } -} - -// PluginListener sets an already created listener for the dynamic resource -// allocation plugin API. Can be combined with PluginSocketPath. -// -// At least one of these two options is required. -func PluginListener(listener net.Listener) Option { - return func(o *options) error { - o.draEndpoint.listener = listener - return nil - } -} - -// KubeletPluginSocketPath defines how kubelet will connect to the dynamic -// resource allocation plugin. This corresponds to PluginSocketPath, except -// that PluginSocketPath defines the path in the filesystem of the caller and -// KubeletPluginSocketPath in the filesystem of kubelet. -func KubeletPluginSocketPath(path string) Option { - return func(o *options) error { - o.draAddress = path - return nil - } -} - -// GRPCInterceptor is called for each incoming gRPC method call. This option -// may be used more than once and each interceptor will get called. -func GRPCInterceptor(interceptor grpc.UnaryServerInterceptor) Option { - return func(o *options) error { - o.interceptors = append(o.interceptors, interceptor) - return nil - } -} - -// NodeV1alpha2 explicitly chooses whether the DRA gRPC API v1alpha2 -// gets enabled. -func NodeV1alpha2(enabled bool) Option { - return func(o *options) error { - o.nodeV1alpha2 = enabled - return nil - } -} - -// NodeV1alpha2 explicitly chooses whether the DRA gRPC API v1alpha3 -// gets enabled. -func NodeV1alpha3(enabled bool) Option { - return func(o *options) error { - o.nodeV1alpha3 = enabled - return nil - } -} - -type options struct { - logger klog.Logger - grpcVerbosity int - driverName string - draEndpoint endpoint - draAddress string - pluginRegistrationEndpoint endpoint - interceptors []grpc.UnaryServerInterceptor - - nodeV1alpha2, nodeV1alpha3 bool -} - -// draPlugin combines the kubelet registration service and the DRA node plugin -// service. -type draPlugin struct { - registrar *nodeRegistrar - plugin *grpcServer -} - -// Start sets up two gRPC servers (one for registration, one for the DRA node -// client). By default, all APIs implemented by the nodeServer get registered. -func Start(nodeServer interface{}, opts ...Option) (result DRAPlugin, finalErr error) { - d := &draPlugin{} - - o := options{ - logger: klog.Background(), - grpcVerbosity: 4, - nodeV1alpha2: true, - nodeV1alpha3: true, - } - for _, option := range opts { - if err := option(&o); err != nil { - return nil, err - } - } - - if o.driverName == "" { - return nil, errors.New("driver name must be set") - } - if o.draAddress == "" { - return nil, errors.New("DRA address must be set") - } - var emptyEndpoint endpoint - if o.draEndpoint == emptyEndpoint { - return nil, errors.New("a Unix domain socket path and/or listener must be set for the kubelet plugin") - } - if o.pluginRegistrationEndpoint == emptyEndpoint { - return nil, errors.New("a Unix domain socket path and/or listener must be set for the registrar") - } - - // Run the node plugin gRPC server first to ensure that it is ready. - implemented := false - plugin, err := startGRPCServer(klog.LoggerWithName(o.logger, "dra"), o.grpcVerbosity, o.interceptors, o.draEndpoint, func(grpcServer *grpc.Server) { - if nodeServer, ok := nodeServer.(drapbv1alpha3.NodeServer); ok && o.nodeV1alpha3 { - o.logger.V(5).Info("registering drapbv1alpha3.NodeServer") - drapbv1alpha3.RegisterNodeServer(grpcServer, nodeServer) - implemented = true - } - if nodeServer, ok := nodeServer.(drapbv1alpha2.NodeServer); ok && o.nodeV1alpha2 { - o.logger.V(5).Info("registering drapbv1alpha2.NodeServer") - drapbv1alpha2.RegisterNodeServer(grpcServer, nodeServer) - implemented = true - } - }) - if err != nil { - return nil, fmt.Errorf("start node client: %v", err) - } - d.plugin = plugin - defer func() { - // Clean up if we didn't finish succcessfully. - if r := recover(); r != nil { - plugin.stop() - panic(r) - } - if finalErr != nil { - plugin.stop() - } - }() - if !implemented { - return nil, errors.New("no supported DRA gRPC API is implemented and enabled") - } - - // Now make it available to kubelet. - registrar, err := startRegistrar(klog.LoggerWithName(o.logger, "registrar"), o.grpcVerbosity, o.interceptors, o.driverName, o.draAddress, o.pluginRegistrationEndpoint) - if err != nil { - return nil, fmt.Errorf("start registrar: %v", err) - } - d.registrar = registrar - - return d, nil -} - -func (d *draPlugin) Stop() { - if d == nil { - return - } - d.registrar.stop() - d.plugin.stop() -} - -func (d *draPlugin) RegistrationStatus() *registerapi.RegistrationStatus { - if d.registrar == nil { - return nil - } - return d.registrar.status -} - -func (d *draPlugin) internal() {} diff --git a/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/noderegistrar.go b/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/noderegistrar.go deleted file mode 100644 index f5148e4c9c..0000000000 --- a/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/noderegistrar.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeletplugin - -import ( - "fmt" - - "google.golang.org/grpc" - "k8s.io/klog/v2" - registerapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" -) - -type nodeRegistrar struct { - logger klog.Logger - registrationServer - server *grpcServer -} - -// startRegistrar returns a running instance. -func startRegistrar(logger klog.Logger, grpcVerbosity int, interceptors []grpc.UnaryServerInterceptor, driverName string, endpoint string, pluginRegistrationEndpoint endpoint) (*nodeRegistrar, error) { - n := &nodeRegistrar{ - logger: logger, - registrationServer: registrationServer{ - driverName: driverName, - endpoint: endpoint, - supportedVersions: []string{"1.0.0"}, // TODO: is this correct? - }, - } - s, err := startGRPCServer(logger, grpcVerbosity, interceptors, pluginRegistrationEndpoint, func(grpcServer *grpc.Server) { - registerapi.RegisterRegistrationServer(grpcServer, n) - }) - if err != nil { - return nil, fmt.Errorf("start gRPC server: %v", err) - } - n.server = s - return n, nil -} - -// stop ensures that the registrar is not running anymore and cleans up all resources. -// It is idempotent and may be called with a nil pointer. -func (s *nodeRegistrar) stop() { - if s == nil { - return - } - s.server.stop() -} diff --git a/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/nonblockinggrpcserver.go b/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/nonblockinggrpcserver.go deleted file mode 100644 index e6a835d969..0000000000 --- a/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/nonblockinggrpcserver.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeletplugin - -import ( - "context" - "fmt" - "net" - "os" - "sync" - "sync/atomic" - - "google.golang.org/grpc" - "k8s.io/klog/v2" -) - -type grpcServer struct { - logger klog.Logger - grpcVerbosity int - wg sync.WaitGroup - endpoint endpoint - server *grpc.Server - requestID int64 -} - -type registerService func(s *grpc.Server) - -// endpoint defines where to listen for incoming connections. -// The listener always gets closed when shutting down. -// -// If the listener is not set, a new listener for a Unix domain socket gets -// created at the path. -// -// If the path is non-empty, then the socket will get removed when shutting -// down, regardless of who created the listener. -type endpoint struct { - path string - listener net.Listener -} - -// startGRPCServer sets up the GRPC server on a Unix domain socket and spawns a goroutine -// which handles requests for arbitrary services. -func startGRPCServer(logger klog.Logger, grpcVerbosity int, interceptors []grpc.UnaryServerInterceptor, endpoint endpoint, services ...registerService) (*grpcServer, error) { - s := &grpcServer{ - logger: logger, - endpoint: endpoint, - grpcVerbosity: grpcVerbosity, - } - - listener := endpoint.listener - if listener == nil { - // Remove any (probably stale) existing socket. - if err := os.Remove(endpoint.path); err != nil && !os.IsNotExist(err) { - return nil, fmt.Errorf("remove Unix domain socket: %v", err) - } - - // Now we can use the endpoint for listening. - l, err := net.Listen("unix", endpoint.path) - if err != nil { - return nil, fmt.Errorf("listen on %q: %v", endpoint.path, err) - } - listener = l - } - - // Run a gRPC server. It will close the listening socket when - // shutting down, so we don't need to do that. - var opts []grpc.ServerOption - var finalInterceptors []grpc.UnaryServerInterceptor - if grpcVerbosity >= 0 { - finalInterceptors = append(finalInterceptors, s.interceptor) - } - finalInterceptors = append(finalInterceptors, interceptors...) - if len(finalInterceptors) >= 0 { - opts = append(opts, grpc.ChainUnaryInterceptor(finalInterceptors...)) - } - s.server = grpc.NewServer(opts...) - for _, service := range services { - service(s.server) - } - s.wg.Add(1) - go func() { - defer s.wg.Done() - err := s.server.Serve(listener) - if err != nil { - logger.Error(err, "GRPC server failed") - } else { - logger.V(3).Info("GRPC server terminated gracefully") - } - }() - - logger.Info("GRPC server started") - return s, nil -} - -// interceptor is called for each request. It creates a logger with a unique, -// sequentially increasing request ID and adds that logger to the context. It -// also logs request and response. -func (s *grpcServer) interceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - requestID := atomic.AddInt64(&s.requestID, 1) - logger := klog.LoggerWithValues(s.logger, "requestID", requestID) - ctx = klog.NewContext(ctx, logger) - logger.V(s.grpcVerbosity).Info("handling request", "request", req) - defer func() { - if r := recover(); r != nil { - logger.Error(nil, "handling request panicked", "panic", r, "request", req) - panic(r) - } - }() - resp, err = handler(ctx, req) - if err != nil { - logger.Error(err, "handling request failed", "request", req) - } else { - logger.V(s.grpcVerbosity).Info("handling request succeeded", "response", resp) - } - return -} - -// stop ensures that the server is not running anymore and cleans up all resources. -// It is idempotent and may be called with a nil pointer. -func (s *grpcServer) stop() { - if s == nil { - return - } - if s.server != nil { - s.server.Stop() - } - s.wg.Wait() - s.server = nil - if s.endpoint.path != "" { - if err := os.Remove(s.endpoint.path); err != nil && !os.IsNotExist(err) { - s.logger.Error(err, "remove Unix socket") - } - } - s.logger.V(3).Info("GRPC server stopped") -} diff --git a/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/registrationserver.go b/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/registrationserver.go deleted file mode 100644 index 4f0adefb30..0000000000 --- a/vendor/k8s.io/dynamic-resource-allocation/kubeletplugin/registrationserver.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeletplugin - -import ( - "context" - "fmt" - - registerapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" -) - -// registrationServer implements the kubelet plugin registration gRPC interface. -type registrationServer struct { - driverName string - endpoint string - supportedVersions []string - status *registerapi.RegistrationStatus -} - -var _ registerapi.RegistrationServer = ®istrationServer{} - -// GetInfo is the RPC invoked by plugin watcher. -func (e *registrationServer) GetInfo(ctx context.Context, req *registerapi.InfoRequest) (*registerapi.PluginInfo, error) { - return ®isterapi.PluginInfo{ - Type: registerapi.DRAPlugin, - Name: e.driverName, - Endpoint: e.endpoint, - SupportedVersions: e.supportedVersions, - }, nil -} - -// NotifyRegistrationStatus is the RPC invoked by plugin watcher. -func (e *registrationServer) NotifyRegistrationStatus(ctx context.Context, status *registerapi.RegistrationStatus) (*registerapi.RegistrationStatusResponse, error) { - e.status = status - if !status.PluginRegistered { - return nil, fmt.Errorf("failed registration process: %+v", status.Error) - } - - return ®isterapi.RegistrationStatusResponse{}, nil -} diff --git a/vendor/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim.go b/vendor/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim.go deleted file mode 100644 index 93d69695ef..0000000000 --- a/vendor/k8s.io/dynamic-resource-allocation/resourceclaim/resourceclaim.go +++ /dev/null @@ -1,179 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package resourceclaim provides code that supports the usual pattern -// for accessing the ResourceClaim that is referenced by a PodResourceClaim: -// -// - determine the ResourceClaim name that corresponds to the PodResourceClaim -// - retrieve the ResourceClaim -// - verify that the ResourceClaim is owned by the pod if generated from a template -// - use the ResourceClaim -package resourceclaim - -import ( - "errors" - "fmt" - "os" - "strings" - - v1 "k8s.io/api/core/v1" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/utils/ptr" -) - -var ( - // ErrAPIUnsupported is wrapped by the actual errors returned by Name and - // indicates that none of the required fields are set. - ErrAPIUnsupported = errors.New("none of the supported fields are set") - - // ErrClaimNotFound is wrapped by the actual errors returned by Name and - // indicates that the claim has not been created yet. - ErrClaimNotFound = errors.New("ResourceClaim not created yet") -) - -// Name returns the name of the ResourceClaim object that gets referenced by or -// created for the PodResourceClaim. Three different results are possible: -// -// - An error is returned when some field is not set as expected (either the -// input is invalid or the API got extended and the library and the client -// using it need to be updated) or the claim hasn't been created yet. -// -// The error includes pod and pod claim name and the unexpected field and -// is derived from one of the pre-defined errors in this package. -// -// - A nil string pointer and no error when the ResourceClaim intentionally -// didn't get created and the PodResourceClaim can be ignored. -// -// - A pointer to the name and no error when the ResourceClaim got created. -// In this case the boolean determines whether IsForPod must be called -// after retrieving the ResourceClaim and before using it. -// -// Determining the name depends on Kubernetes >= 1.28. -func Name(pod *v1.Pod, podClaim *v1.PodResourceClaim) (name *string, mustCheckOwner bool, err error) { - switch { - case podClaim.Source.ResourceClaimName != nil: - return podClaim.Source.ResourceClaimName, false, nil - case podClaim.Source.ResourceClaimTemplateName != nil: - for _, status := range pod.Status.ResourceClaimStatuses { - if status.Name == podClaim.Name { - return status.ResourceClaimName, true, nil - } - } - return nil, false, fmt.Errorf(`pod "%s/%s": %w`, pod.Namespace, pod.Name, ErrClaimNotFound) - default: - return nil, false, fmt.Errorf(`pod "%s/%s", spec.resourceClaim %q: %w`, pod.Namespace, pod.Name, podClaim.Name, ErrAPIUnsupported) - } -} - -// NewNameLookup returns an object which handles determining the name of -// a ResourceClaim. In contrast to the stand-alone Name it is compatible -// also with Kubernetes < 1.28. -// -// Providing a client is optional. If none is available, then code can pass nil -// and users can set the DRA_WITH_DETERMINISTIC_RESOURCE_CLAIM_NAMES env -// variable to an arbitrary non-empty value to use the naming from Kubernetes < -// 1.28. -func NewNameLookup(client kubernetes.Interface) *Lookup { - return &Lookup{client: client} -} - -// Lookup stores the state which is necessary to look up ResourceClaim names. -type Lookup struct { - client kubernetes.Interface - usePodStatus *bool -} - -// Name is a variant of the stand-alone Name with support also for Kubernetes < 1.28. -func (l *Lookup) Name(pod *v1.Pod, podClaim *v1.PodResourceClaim) (name *string, mustCheckOwner bool, err error) { - if l.usePodStatus == nil { - if value, _ := os.LookupEnv("DRA_WITH_DETERMINISTIC_RESOURCE_CLAIM_NAMES"); value != "" { - l.usePodStatus = ptr.To(false) - } else if l.client != nil { - // Check once. This does not detect upgrades or - // downgrades, but that is good enough for the simple - // test scenarios that the Kubernetes < 1.28 support is - // meant for. - info, err := l.client.Discovery().ServerVersion() - if err != nil { - return nil, false, fmt.Errorf("look up server version: %v", err) - } - if info.Major == "" { - // Fake client... - l.usePodStatus = ptr.To(true) - } else { - switch strings.Compare(info.Major, "1") { - case -1: - // Huh? - l.usePodStatus = ptr.To(false) - case 0: - // info.Minor may have a suffix which makes it larger than 28. - // We don't care about pre-releases here. - l.usePodStatus = ptr.To(strings.Compare("28", info.Minor) <= 0) - case 1: - // Kubernetes 2? Yeah! - l.usePodStatus = ptr.To(true) - } - } - } - } - - if *l.usePodStatus { - return Name(pod, podClaim) - } - - switch { - case podClaim.Source.ResourceClaimName != nil: - return podClaim.Source.ResourceClaimName, false, nil - case podClaim.Source.ResourceClaimTemplateName != nil: - name := pod.Name + "-" + podClaim.Name - return &name, true, nil - default: - return nil, false, fmt.Errorf(`pod "%s/%s", spec.resourceClaim %q: %w`, pod.Namespace, pod.Name, podClaim.Name, ErrAPIUnsupported) - } -} - -// IsForPod checks that the ResourceClaim is the one that -// was created for the Pod. It returns an error that is informative -// enough to be returned by the caller without adding further details -// about the Pod or ResourceClaim. -func IsForPod(pod *v1.Pod, claim *resourcev1alpha2.ResourceClaim) error { - // Checking the namespaces is just a precaution. The caller should - // never pass in a ResourceClaim that isn't from the same namespace as the - // Pod. - if claim.Namespace != pod.Namespace || !metav1.IsControlledBy(claim, pod) { - return fmt.Errorf("ResourceClaim %s/%s was not created for pod %s/%s (pod is not owner)", claim.Namespace, claim.Name, pod.Namespace, pod.Name) - } - return nil -} - -// IsReservedForPod checks whether a claim lists the pod as one of the objects -// that the claim was reserved for. -func IsReservedForPod(pod *v1.Pod, claim *resourcev1alpha2.ResourceClaim) bool { - for _, reserved := range claim.Status.ReservedFor { - if reserved.UID == pod.UID { - return true - } - } - return false -} - -// CanBeReserved checks whether the claim could be reserved for another object. -func CanBeReserved(claim *resourcev1alpha2.ResourceClaim) bool { - return claim.Status.Allocation.Shareable || - len(claim.Status.ReservedFor) == 0 -} diff --git a/vendor/k8s.io/kubelet/LICENSE b/vendor/k8s.io/kubelet/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/k8s.io/kubelet/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha2/api.pb.go b/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha2/api.pb.go deleted file mode 100644 index 9b0bd9cea8..0000000000 --- a/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha2/api.pb.go +++ /dev/null @@ -1,1312 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: api.proto - -package v1alpha2 - -import ( - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type NodePrepareResourceRequest struct { - // The ResourceClaim namespace (ResourceClaim.meta.Namespace). - // This field is REQUIRED. - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - // The UID of the Resource claim (ResourceClaim.meta.UUID). - // This field is REQUIRED. - ClaimUid string `protobuf:"bytes,2,opt,name=claim_uid,json=claimUid,proto3" json:"claim_uid,omitempty"` - // The name of the Resource claim (ResourceClaim.meta.Name) - // This field is REQUIRED. - ClaimName string `protobuf:"bytes,3,opt,name=claim_name,json=claimName,proto3" json:"claim_name,omitempty"` - // Resource handle (AllocationResult.ResourceHandles[*].Data) - // This field is REQUIRED. - ResourceHandle string `protobuf:"bytes,4,opt,name=resource_handle,json=resourceHandle,proto3" json:"resource_handle,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodePrepareResourceRequest) Reset() { *m = NodePrepareResourceRequest{} } -func (*NodePrepareResourceRequest) ProtoMessage() {} -func (*NodePrepareResourceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{0} -} -func (m *NodePrepareResourceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodePrepareResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NodePrepareResourceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NodePrepareResourceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePrepareResourceRequest.Merge(m, src) -} -func (m *NodePrepareResourceRequest) XXX_Size() int { - return m.Size() -} -func (m *NodePrepareResourceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodePrepareResourceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodePrepareResourceRequest proto.InternalMessageInfo - -func (m *NodePrepareResourceRequest) GetNamespace() string { - if m != nil { - return m.Namespace - } - return "" -} - -func (m *NodePrepareResourceRequest) GetClaimUid() string { - if m != nil { - return m.ClaimUid - } - return "" -} - -func (m *NodePrepareResourceRequest) GetClaimName() string { - if m != nil { - return m.ClaimName - } - return "" -} - -func (m *NodePrepareResourceRequest) GetResourceHandle() string { - if m != nil { - return m.ResourceHandle - } - return "" -} - -type NodePrepareResourceResponse struct { - // These are the additional devices that kubelet must - // make available via the container runtime. A resource - // may have zero or more devices. - CdiDevices []string `protobuf:"bytes,1,rep,name=cdi_devices,json=cdiDevices,proto3" json:"cdi_devices,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodePrepareResourceResponse) Reset() { *m = NodePrepareResourceResponse{} } -func (*NodePrepareResourceResponse) ProtoMessage() {} -func (*NodePrepareResourceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{1} -} -func (m *NodePrepareResourceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodePrepareResourceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NodePrepareResourceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NodePrepareResourceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePrepareResourceResponse.Merge(m, src) -} -func (m *NodePrepareResourceResponse) XXX_Size() int { - return m.Size() -} -func (m *NodePrepareResourceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodePrepareResourceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodePrepareResourceResponse proto.InternalMessageInfo - -func (m *NodePrepareResourceResponse) GetCdiDevices() []string { - if m != nil { - return m.CdiDevices - } - return nil -} - -type NodeUnprepareResourceRequest struct { - // The ResourceClaim namespace (ResourceClaim.meta.Namespace). - // This field is REQUIRED. - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - // The UID of the Resource claim (ResourceClaim.meta.UUID). - // This field is REQUIRED. - ClaimUid string `protobuf:"bytes,2,opt,name=claim_uid,json=claimUid,proto3" json:"claim_uid,omitempty"` - // The name of the Resource claim (ResourceClaim.meta.Name) - // This field is REQUIRED. - ClaimName string `protobuf:"bytes,3,opt,name=claim_name,json=claimName,proto3" json:"claim_name,omitempty"` - // Resource handle (AllocationResult.ResourceHandles[*].Data) - // This field is REQUIRED. - ResourceHandle string `protobuf:"bytes,4,opt,name=resource_handle,json=resourceHandle,proto3" json:"resource_handle,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUnprepareResourceRequest) Reset() { *m = NodeUnprepareResourceRequest{} } -func (*NodeUnprepareResourceRequest) ProtoMessage() {} -func (*NodeUnprepareResourceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{2} -} -func (m *NodeUnprepareResourceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeUnprepareResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NodeUnprepareResourceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NodeUnprepareResourceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnprepareResourceRequest.Merge(m, src) -} -func (m *NodeUnprepareResourceRequest) XXX_Size() int { - return m.Size() -} -func (m *NodeUnprepareResourceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnprepareResourceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUnprepareResourceRequest proto.InternalMessageInfo - -func (m *NodeUnprepareResourceRequest) GetNamespace() string { - if m != nil { - return m.Namespace - } - return "" -} - -func (m *NodeUnprepareResourceRequest) GetClaimUid() string { - if m != nil { - return m.ClaimUid - } - return "" -} - -func (m *NodeUnprepareResourceRequest) GetClaimName() string { - if m != nil { - return m.ClaimName - } - return "" -} - -func (m *NodeUnprepareResourceRequest) GetResourceHandle() string { - if m != nil { - return m.ResourceHandle - } - return "" -} - -type NodeUnprepareResourceResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUnprepareResourceResponse) Reset() { *m = NodeUnprepareResourceResponse{} } -func (*NodeUnprepareResourceResponse) ProtoMessage() {} -func (*NodeUnprepareResourceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{3} -} -func (m *NodeUnprepareResourceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeUnprepareResourceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NodeUnprepareResourceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NodeUnprepareResourceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnprepareResourceResponse.Merge(m, src) -} -func (m *NodeUnprepareResourceResponse) XXX_Size() int { - return m.Size() -} -func (m *NodeUnprepareResourceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnprepareResourceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUnprepareResourceResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*NodePrepareResourceRequest)(nil), "v1alpha2.NodePrepareResourceRequest") - proto.RegisterType((*NodePrepareResourceResponse)(nil), "v1alpha2.NodePrepareResourceResponse") - proto.RegisterType((*NodeUnprepareResourceRequest)(nil), "v1alpha2.NodeUnprepareResourceRequest") - proto.RegisterType((*NodeUnprepareResourceResponse)(nil), "v1alpha2.NodeUnprepareResourceResponse") -} - -func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } - -var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 369 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x52, 0xb1, 0x6e, 0xe2, 0x40, - 0x10, 0x65, 0x0f, 0x74, 0xc2, 0x7b, 0xd2, 0x9d, 0xb4, 0xa7, 0x93, 0x2c, 0x03, 0x06, 0x59, 0xdc, - 0x41, 0x73, 0xb6, 0x8e, 0x6b, 0xae, 0xba, 0x02, 0xa5, 0x48, 0x85, 0x22, 0x4b, 0x34, 0x69, 0xd0, - 0xda, 0x3b, 0x31, 0x1b, 0x6c, 0xef, 0xc6, 0x6b, 0x53, 0xe7, 0x13, 0xf2, 0x07, 0x51, 0xfe, 0x86, - 0x32, 0x25, 0x65, 0x70, 0x7e, 0x24, 0x62, 0x1d, 0x2b, 0x8a, 0x04, 0xa2, 0x4d, 0xb7, 0xf3, 0xe6, - 0xcd, 0xbc, 0x37, 0xb3, 0x83, 0x0d, 0x2a, 0xb9, 0x2b, 0x33, 0x91, 0x0b, 0xd2, 0x5e, 0xff, 0xa1, - 0xb1, 0x5c, 0xd2, 0x89, 0xf5, 0x3b, 0xe2, 0xf9, 0xb2, 0x08, 0xdc, 0x50, 0x24, 0x5e, 0x24, 0x22, - 0xe1, 0x69, 0x42, 0x50, 0x5c, 0xe9, 0x48, 0x07, 0xfa, 0x55, 0x15, 0x3a, 0xf7, 0x08, 0x5b, 0x33, - 0xc1, 0xe0, 0x22, 0x03, 0x49, 0x33, 0xf0, 0x41, 0x89, 0x22, 0x0b, 0xc1, 0x87, 0x9b, 0x02, 0x54, - 0x4e, 0xba, 0xd8, 0x48, 0x69, 0x02, 0x4a, 0xd2, 0x10, 0x4c, 0x34, 0x40, 0x63, 0xc3, 0x7f, 0x03, - 0x48, 0x07, 0x1b, 0x61, 0x4c, 0x79, 0xb2, 0x28, 0x38, 0x33, 0x3f, 0xe9, 0x6c, 0x5b, 0x03, 0x73, - 0xce, 0x48, 0x0f, 0xe3, 0x2a, 0xb9, 0xe7, 0x9b, 0xcd, 0xaa, 0x56, 0x23, 0x33, 0x9a, 0x00, 0x19, - 0xe1, 0x6f, 0xd9, 0xab, 0xd8, 0x62, 0x49, 0x53, 0x16, 0x83, 0xd9, 0xd2, 0x9c, 0xaf, 0x35, 0x7c, - 0xae, 0x51, 0xe7, 0x3f, 0xee, 0x1c, 0x34, 0xa8, 0xa4, 0x48, 0x15, 0x90, 0x3e, 0xfe, 0x12, 0x32, - 0xbe, 0x60, 0xb0, 0xe6, 0x21, 0x28, 0x13, 0x0d, 0x9a, 0x63, 0xc3, 0xc7, 0x21, 0xe3, 0x67, 0x15, - 0xe2, 0x3c, 0x20, 0xdc, 0xdd, 0x37, 0x98, 0xa7, 0xf2, 0xc3, 0xce, 0xd8, 0xc7, 0xbd, 0x23, 0x16, - 0xab, 0x29, 0x27, 0x5b, 0x84, 0x5b, 0x7b, 0x06, 0x61, 0xf8, 0xfb, 0x81, 0x6d, 0x90, 0xa1, 0x5b, - 0x1f, 0x80, 0x7b, 0xfc, 0x37, 0xad, 0x9f, 0x27, 0x58, 0x95, 0x98, 0xd3, 0x20, 0xd7, 0xf8, 0xc7, - 0x41, 0x3f, 0xe4, 0xd7, 0xfb, 0x0e, 0xc7, 0x76, 0x6a, 0x8d, 0x4e, 0xf2, 0x6a, 0xad, 0xe9, 0x74, - 0xb3, 0xb3, 0xd1, 0x76, 0x67, 0x37, 0x6e, 0x4b, 0x1b, 0x6d, 0x4a, 0x1b, 0x3d, 0x96, 0x36, 0x7a, - 0x2a, 0x6d, 0x74, 0xf7, 0x6c, 0x37, 0x2e, 0x87, 0xab, 0x7f, 0xca, 0xe5, 0xc2, 0x5b, 0x15, 0x01, - 0xc4, 0x90, 0x7b, 0x72, 0x15, 0x79, 0x54, 0x72, 0xe5, 0xb1, 0x8c, 0x7a, 0xb5, 0x46, 0xf0, 0x59, - 0x1f, 0xf3, 0xdf, 0x97, 0x00, 0x00, 0x00, 0xff, 0xff, 0x89, 0x2f, 0x77, 0x8e, 0x12, 0x03, 0x00, - 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// NodeClient is the client API for Node service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type NodeClient interface { - NodePrepareResource(ctx context.Context, in *NodePrepareResourceRequest, opts ...grpc.CallOption) (*NodePrepareResourceResponse, error) - NodeUnprepareResource(ctx context.Context, in *NodeUnprepareResourceRequest, opts ...grpc.CallOption) (*NodeUnprepareResourceResponse, error) -} - -type nodeClient struct { - cc *grpc.ClientConn -} - -func NewNodeClient(cc *grpc.ClientConn) NodeClient { - return &nodeClient{cc} -} - -func (c *nodeClient) NodePrepareResource(ctx context.Context, in *NodePrepareResourceRequest, opts ...grpc.CallOption) (*NodePrepareResourceResponse, error) { - out := new(NodePrepareResourceResponse) - err := c.cc.Invoke(ctx, "/v1alpha2.Node/NodePrepareResource", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodeUnprepareResource(ctx context.Context, in *NodeUnprepareResourceRequest, opts ...grpc.CallOption) (*NodeUnprepareResourceResponse, error) { - out := new(NodeUnprepareResourceResponse) - err := c.cc.Invoke(ctx, "/v1alpha2.Node/NodeUnprepareResource", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// NodeServer is the server API for Node service. -type NodeServer interface { - NodePrepareResource(context.Context, *NodePrepareResourceRequest) (*NodePrepareResourceResponse, error) - NodeUnprepareResource(context.Context, *NodeUnprepareResourceRequest) (*NodeUnprepareResourceResponse, error) -} - -// UnimplementedNodeServer can be embedded to have forward compatible implementations. -type UnimplementedNodeServer struct { -} - -func (*UnimplementedNodeServer) NodePrepareResource(ctx context.Context, req *NodePrepareResourceRequest) (*NodePrepareResourceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodePrepareResource not implemented") -} -func (*UnimplementedNodeServer) NodeUnprepareResource(ctx context.Context, req *NodeUnprepareResourceRequest) (*NodeUnprepareResourceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeUnprepareResource not implemented") -} - -func RegisterNodeServer(s *grpc.Server, srv NodeServer) { - s.RegisterService(&_Node_serviceDesc, srv) -} - -func _Node_NodePrepareResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodePrepareResourceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodePrepareResource(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v1alpha2.Node/NodePrepareResource", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodePrepareResource(ctx, req.(*NodePrepareResourceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeUnprepareResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeUnprepareResourceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeUnprepareResource(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v1alpha2.Node/NodeUnprepareResource", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeUnprepareResource(ctx, req.(*NodeUnprepareResourceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Node_serviceDesc = grpc.ServiceDesc{ - ServiceName: "v1alpha2.Node", - HandlerType: (*NodeServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NodePrepareResource", - Handler: _Node_NodePrepareResource_Handler, - }, - { - MethodName: "NodeUnprepareResource", - Handler: _Node_NodeUnprepareResource_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "api.proto", -} - -func (m *NodePrepareResourceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodePrepareResourceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodePrepareResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceHandle) > 0 { - i -= len(m.ResourceHandle) - copy(dAtA[i:], m.ResourceHandle) - i = encodeVarintApi(dAtA, i, uint64(len(m.ResourceHandle))) - i-- - dAtA[i] = 0x22 - } - if len(m.ClaimName) > 0 { - i -= len(m.ClaimName) - copy(dAtA[i:], m.ClaimName) - i = encodeVarintApi(dAtA, i, uint64(len(m.ClaimName))) - i-- - dAtA[i] = 0x1a - } - if len(m.ClaimUid) > 0 { - i -= len(m.ClaimUid) - copy(dAtA[i:], m.ClaimUid) - i = encodeVarintApi(dAtA, i, uint64(len(m.ClaimUid))) - i-- - dAtA[i] = 0x12 - } - if len(m.Namespace) > 0 { - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintApi(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *NodePrepareResourceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodePrepareResourceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodePrepareResourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.CdiDevices) > 0 { - for iNdEx := len(m.CdiDevices) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.CdiDevices[iNdEx]) - copy(dAtA[i:], m.CdiDevices[iNdEx]) - i = encodeVarintApi(dAtA, i, uint64(len(m.CdiDevices[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *NodeUnprepareResourceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeUnprepareResourceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeUnprepareResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceHandle) > 0 { - i -= len(m.ResourceHandle) - copy(dAtA[i:], m.ResourceHandle) - i = encodeVarintApi(dAtA, i, uint64(len(m.ResourceHandle))) - i-- - dAtA[i] = 0x22 - } - if len(m.ClaimName) > 0 { - i -= len(m.ClaimName) - copy(dAtA[i:], m.ClaimName) - i = encodeVarintApi(dAtA, i, uint64(len(m.ClaimName))) - i-- - dAtA[i] = 0x1a - } - if len(m.ClaimUid) > 0 { - i -= len(m.ClaimUid) - copy(dAtA[i:], m.ClaimUid) - i = encodeVarintApi(dAtA, i, uint64(len(m.ClaimUid))) - i-- - dAtA[i] = 0x12 - } - if len(m.Namespace) > 0 { - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintApi(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *NodeUnprepareResourceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeUnprepareResourceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeUnprepareResourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintApi(dAtA []byte, offset int, v uint64) int { - offset -= sovApi(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *NodePrepareResourceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.ClaimUid) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.ClaimName) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.ResourceHandle) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *NodePrepareResourceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.CdiDevices) > 0 { - for _, s := range m.CdiDevices { - l = len(s) - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *NodeUnprepareResourceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.ClaimUid) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.ClaimName) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.ResourceHandle) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *NodeUnprepareResourceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovApi(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozApi(x uint64) (n int) { - return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *NodePrepareResourceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodePrepareResourceRequest{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `ClaimUid:` + fmt.Sprintf("%v", this.ClaimUid) + `,`, - `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, - `ResourceHandle:` + fmt.Sprintf("%v", this.ResourceHandle) + `,`, - `}`, - }, "") - return s -} -func (this *NodePrepareResourceResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodePrepareResourceResponse{`, - `CdiDevices:` + fmt.Sprintf("%v", this.CdiDevices) + `,`, - `}`, - }, "") - return s -} -func (this *NodeUnprepareResourceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeUnprepareResourceRequest{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `ClaimUid:` + fmt.Sprintf("%v", this.ClaimUid) + `,`, - `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, - `ResourceHandle:` + fmt.Sprintf("%v", this.ResourceHandle) + `,`, - `}`, - }, "") - return s -} -func (this *NodeUnprepareResourceResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeUnprepareResourceResponse{`, - `}`, - }, "") - return s -} -func valueToStringApi(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *NodePrepareResourceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodePrepareResourceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodePrepareResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClaimUid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClaimUid = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClaimName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClaimName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceHandle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodePrepareResourceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodePrepareResourceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodePrepareResourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CdiDevices", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CdiDevices = append(m.CdiDevices, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeUnprepareResourceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeUnprepareResourceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeUnprepareResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClaimUid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClaimUid = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClaimName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClaimName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceHandle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeUnprepareResourceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeUnprepareResourceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeUnprepareResourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipApi(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthApi - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupApi - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthApi - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha2/api.proto b/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha2/api.proto deleted file mode 100644 index a7a2b1db0a..0000000000 --- a/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha2/api.proto +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` - -syntax = "proto3"; - -package v1alpha2; -option go_package = "k8s.io/kubelet/pkg/apis/dra/v1alpha2"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option (gogoproto.goproto_getters_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_unrecognized_all) = false; - -service Node { - rpc NodePrepareResource (NodePrepareResourceRequest) - returns (NodePrepareResourceResponse) {} - - rpc NodeUnprepareResource (NodeUnprepareResourceRequest) - returns (NodeUnprepareResourceResponse) {} -} - -message NodePrepareResourceRequest { - // The ResourceClaim namespace (ResourceClaim.meta.Namespace). - // This field is REQUIRED. - string namespace = 1; - // The UID of the Resource claim (ResourceClaim.meta.UUID). - // This field is REQUIRED. - string claim_uid = 2; - // The name of the Resource claim (ResourceClaim.meta.Name) - // This field is REQUIRED. - string claim_name = 3; - // Resource handle (AllocationResult.ResourceHandles[*].Data) - // This field is REQUIRED. - string resource_handle = 4; -} - -message NodePrepareResourceResponse { - // These are the additional devices that kubelet must - // make available via the container runtime. A resource - // may have zero or more devices. - repeated string cdi_devices = 1; -} - -message NodeUnprepareResourceRequest { - // The ResourceClaim namespace (ResourceClaim.meta.Namespace). - // This field is REQUIRED. - string namespace = 1; - // The UID of the Resource claim (ResourceClaim.meta.UUID). - // This field is REQUIRED. - string claim_uid = 2; - // The name of the Resource claim (ResourceClaim.meta.Name) - // This field is REQUIRED. - string claim_name = 3; - // Resource handle (AllocationResult.ResourceHandles[*].Data) - // This field is REQUIRED. - string resource_handle = 4; -} - -message NodeUnprepareResourceResponse { - // Intentionally empty. -} diff --git a/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha3/api.pb.go b/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha3/api.pb.go deleted file mode 100644 index 92233f98ee..0000000000 --- a/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha3/api.pb.go +++ /dev/null @@ -1,2134 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: api.proto - -package v1alpha3 - -import ( - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type NodePrepareResourcesRequest struct { - // The list of ResourceClaims that are to be prepared. - Claims []*Claim `protobuf:"bytes,1,rep,name=claims,proto3" json:"claims,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodePrepareResourcesRequest) Reset() { *m = NodePrepareResourcesRequest{} } -func (*NodePrepareResourcesRequest) ProtoMessage() {} -func (*NodePrepareResourcesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{0} -} -func (m *NodePrepareResourcesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodePrepareResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NodePrepareResourcesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NodePrepareResourcesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePrepareResourcesRequest.Merge(m, src) -} -func (m *NodePrepareResourcesRequest) XXX_Size() int { - return m.Size() -} -func (m *NodePrepareResourcesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodePrepareResourcesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodePrepareResourcesRequest proto.InternalMessageInfo - -func (m *NodePrepareResourcesRequest) GetClaims() []*Claim { - if m != nil { - return m.Claims - } - return nil -} - -type NodePrepareResourcesResponse struct { - // The ResourceClaims for which preparation was done - // or attempted, with claim_uid as key. - // - // It is an error if some claim listed in NodePrepareResourcesRequest - // does not get prepared. NodePrepareResources - // will be called again for those that are missing. - Claims map[string]*NodePrepareResourceResponse `protobuf:"bytes,1,rep,name=claims,proto3" json:"claims,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodePrepareResourcesResponse) Reset() { *m = NodePrepareResourcesResponse{} } -func (*NodePrepareResourcesResponse) ProtoMessage() {} -func (*NodePrepareResourcesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{1} -} -func (m *NodePrepareResourcesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodePrepareResourcesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NodePrepareResourcesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NodePrepareResourcesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePrepareResourcesResponse.Merge(m, src) -} -func (m *NodePrepareResourcesResponse) XXX_Size() int { - return m.Size() -} -func (m *NodePrepareResourcesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodePrepareResourcesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodePrepareResourcesResponse proto.InternalMessageInfo - -func (m *NodePrepareResourcesResponse) GetClaims() map[string]*NodePrepareResourceResponse { - if m != nil { - return m.Claims - } - return nil -} - -type NodePrepareResourceResponse struct { - // These are the additional devices that kubelet must - // make available via the container runtime. A resource - // may have zero or more devices. - CDIDevices []string `protobuf:"bytes,1,rep,name=cdi_devices,json=cdiDevices,proto3" json:"cdi_devices,omitempty"` - // If non-empty, preparing the ResourceClaim failed. - // cdi_devices is ignored in that case. - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodePrepareResourceResponse) Reset() { *m = NodePrepareResourceResponse{} } -func (*NodePrepareResourceResponse) ProtoMessage() {} -func (*NodePrepareResourceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{2} -} -func (m *NodePrepareResourceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodePrepareResourceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NodePrepareResourceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NodePrepareResourceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePrepareResourceResponse.Merge(m, src) -} -func (m *NodePrepareResourceResponse) XXX_Size() int { - return m.Size() -} -func (m *NodePrepareResourceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodePrepareResourceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodePrepareResourceResponse proto.InternalMessageInfo - -func (m *NodePrepareResourceResponse) GetCDIDevices() []string { - if m != nil { - return m.CDIDevices - } - return nil -} - -func (m *NodePrepareResourceResponse) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -type NodeUnprepareResourcesRequest struct { - // The list of ResourceClaims that are to be unprepared. - Claims []*Claim `protobuf:"bytes,1,rep,name=claims,proto3" json:"claims,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUnprepareResourcesRequest) Reset() { *m = NodeUnprepareResourcesRequest{} } -func (*NodeUnprepareResourcesRequest) ProtoMessage() {} -func (*NodeUnprepareResourcesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{3} -} -func (m *NodeUnprepareResourcesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeUnprepareResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NodeUnprepareResourcesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NodeUnprepareResourcesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnprepareResourcesRequest.Merge(m, src) -} -func (m *NodeUnprepareResourcesRequest) XXX_Size() int { - return m.Size() -} -func (m *NodeUnprepareResourcesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnprepareResourcesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUnprepareResourcesRequest proto.InternalMessageInfo - -func (m *NodeUnprepareResourcesRequest) GetClaims() []*Claim { - if m != nil { - return m.Claims - } - return nil -} - -type NodeUnprepareResourcesResponse struct { - // The ResourceClaims for which preparation was reverted. - // The same rules as for NodePrepareResourcesResponse.claims - // apply. - Claims map[string]*NodeUnprepareResourceResponse `protobuf:"bytes,1,rep,name=claims,proto3" json:"claims,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUnprepareResourcesResponse) Reset() { *m = NodeUnprepareResourcesResponse{} } -func (*NodeUnprepareResourcesResponse) ProtoMessage() {} -func (*NodeUnprepareResourcesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{4} -} -func (m *NodeUnprepareResourcesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeUnprepareResourcesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NodeUnprepareResourcesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NodeUnprepareResourcesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnprepareResourcesResponse.Merge(m, src) -} -func (m *NodeUnprepareResourcesResponse) XXX_Size() int { - return m.Size() -} -func (m *NodeUnprepareResourcesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnprepareResourcesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUnprepareResourcesResponse proto.InternalMessageInfo - -func (m *NodeUnprepareResourcesResponse) GetClaims() map[string]*NodeUnprepareResourceResponse { - if m != nil { - return m.Claims - } - return nil -} - -type NodeUnprepareResourceResponse struct { - // If non-empty, unpreparing the ResourceClaim failed. - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUnprepareResourceResponse) Reset() { *m = NodeUnprepareResourceResponse{} } -func (*NodeUnprepareResourceResponse) ProtoMessage() {} -func (*NodeUnprepareResourceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{5} -} -func (m *NodeUnprepareResourceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeUnprepareResourceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NodeUnprepareResourceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NodeUnprepareResourceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnprepareResourceResponse.Merge(m, src) -} -func (m *NodeUnprepareResourceResponse) XXX_Size() int { - return m.Size() -} -func (m *NodeUnprepareResourceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnprepareResourceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUnprepareResourceResponse proto.InternalMessageInfo - -func (m *NodeUnprepareResourceResponse) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -type Claim struct { - // The ResourceClaim namespace (ResourceClaim.meta.Namespace). - // This field is REQUIRED. - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - // The UID of the Resource claim (ResourceClaim.meta.UUID). - // This field is REQUIRED. - Uid string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"` - // The name of the Resource claim (ResourceClaim.meta.Name) - // This field is REQUIRED. - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - // Resource handle (AllocationResult.ResourceHandles[*].Data) - // This field is REQUIRED. - ResourceHandle string `protobuf:"bytes,4,opt,name=resource_handle,json=resourceHandle,proto3" json:"resource_handle,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Claim) Reset() { *m = Claim{} } -func (*Claim) ProtoMessage() {} -func (*Claim) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{6} -} -func (m *Claim) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Claim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Claim.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Claim) XXX_Merge(src proto.Message) { - xxx_messageInfo_Claim.Merge(m, src) -} -func (m *Claim) XXX_Size() int { - return m.Size() -} -func (m *Claim) XXX_DiscardUnknown() { - xxx_messageInfo_Claim.DiscardUnknown(m) -} - -var xxx_messageInfo_Claim proto.InternalMessageInfo - -func (m *Claim) GetNamespace() string { - if m != nil { - return m.Namespace - } - return "" -} - -func (m *Claim) GetUid() string { - if m != nil { - return m.Uid - } - return "" -} - -func (m *Claim) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Claim) GetResourceHandle() string { - if m != nil { - return m.ResourceHandle - } - return "" -} - -func init() { - proto.RegisterType((*NodePrepareResourcesRequest)(nil), "v1alpha3.NodePrepareResourcesRequest") - proto.RegisterType((*NodePrepareResourcesResponse)(nil), "v1alpha3.NodePrepareResourcesResponse") - proto.RegisterMapType((map[string]*NodePrepareResourceResponse)(nil), "v1alpha3.NodePrepareResourcesResponse.ClaimsEntry") - proto.RegisterType((*NodePrepareResourceResponse)(nil), "v1alpha3.NodePrepareResourceResponse") - proto.RegisterType((*NodeUnprepareResourcesRequest)(nil), "v1alpha3.NodeUnprepareResourcesRequest") - proto.RegisterType((*NodeUnprepareResourcesResponse)(nil), "v1alpha3.NodeUnprepareResourcesResponse") - proto.RegisterMapType((map[string]*NodeUnprepareResourceResponse)(nil), "v1alpha3.NodeUnprepareResourcesResponse.ClaimsEntry") - proto.RegisterType((*NodeUnprepareResourceResponse)(nil), "v1alpha3.NodeUnprepareResourceResponse") - proto.RegisterType((*Claim)(nil), "v1alpha3.Claim") -} - -func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } - -var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 500 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0xcd, 0x36, 0x49, 0x45, 0x26, 0x52, 0x8b, 0x56, 0x15, 0xb2, 0x42, 0x31, 0x91, 0x45, 0x49, - 0x2e, 0xd8, 0x22, 0x05, 0xa9, 0x02, 0x71, 0x49, 0x0b, 0x2a, 0x08, 0x21, 0x64, 0x89, 0x0b, 0x97, - 0xb2, 0xb6, 0x07, 0xc7, 0x8a, 0xe3, 0x35, 0xbb, 0x76, 0xa4, 0xde, 0xf8, 0x09, 0xfc, 0xac, 0x1e, - 0x38, 0x20, 0x4e, 0x9c, 0x2a, 0x6a, 0xfe, 0x08, 0xf2, 0xda, 0x4e, 0x3f, 0xe4, 0x34, 0x95, 0x7a, - 0x9b, 0x7d, 0xbb, 0x33, 0x6f, 0xe6, 0xbd, 0xb1, 0xa1, 0xc3, 0xe2, 0xc0, 0x8c, 0x05, 0x4f, 0x38, - 0xbd, 0x33, 0x7f, 0xca, 0xc2, 0x78, 0xc2, 0x76, 0x7b, 0x4f, 0xfc, 0x20, 0x99, 0xa4, 0x8e, 0xe9, - 0xf2, 0x99, 0xe5, 0x73, 0x9f, 0x5b, 0xea, 0x81, 0x93, 0x7e, 0x55, 0x27, 0x75, 0x50, 0x51, 0x91, - 0x68, 0xbc, 0x81, 0xfb, 0x1f, 0xb8, 0x87, 0x1f, 0x05, 0xc6, 0x4c, 0xa0, 0x8d, 0x92, 0xa7, 0xc2, - 0x45, 0x69, 0xe3, 0xb7, 0x14, 0x65, 0x42, 0x07, 0xb0, 0xee, 0x86, 0x2c, 0x98, 0x49, 0x8d, 0xf4, - 0x9b, 0xc3, 0xee, 0x68, 0xd3, 0xac, 0x88, 0xcc, 0xfd, 0x1c, 0xb7, 0xcb, 0x6b, 0xe3, 0x27, 0x81, - 0xed, 0xfa, 0x42, 0x32, 0xe6, 0x91, 0x44, 0xfa, 0xee, 0x4a, 0xa5, 0xd1, 0x79, 0xa5, 0xeb, 0xf2, - 0x0a, 0x1a, 0xf9, 0x3a, 0x4a, 0xc4, 0x71, 0x45, 0xd6, 0xfb, 0x02, 0xdd, 0x0b, 0x30, 0xbd, 0x0b, - 0xcd, 0x29, 0x1e, 0x6b, 0xa4, 0x4f, 0x86, 0x1d, 0x3b, 0x0f, 0xe9, 0x4b, 0x68, 0xcf, 0x59, 0x98, - 0xa2, 0xb6, 0xd6, 0x27, 0xc3, 0xee, 0x68, 0xe7, 0x5a, 0xae, 0x8a, 0xca, 0x2e, 0x72, 0x5e, 0xac, - 0xed, 0x11, 0xc3, 0xab, 0x95, 0x65, 0x31, 0x8c, 0x05, 0x5d, 0xd7, 0x0b, 0x8e, 0x3c, 0x9c, 0x07, - 0x2e, 0x16, 0x13, 0x75, 0xc6, 0x1b, 0xd9, 0xe9, 0x43, 0xd8, 0x3f, 0x78, 0x7b, 0x50, 0xa0, 0x36, - 0xb8, 0x5e, 0x50, 0xc6, 0x74, 0x0b, 0xda, 0x28, 0x04, 0x17, 0xaa, 0xa1, 0x8e, 0x5d, 0x1c, 0x8c, - 0x43, 0x78, 0x90, 0xb3, 0x7c, 0x8a, 0xe2, 0xdb, 0xca, 0xff, 0x9b, 0x80, 0xbe, 0xac, 0x54, 0xd9, - 0xf3, 0xfb, 0x2b, 0xb5, 0x9e, 0x5d, 0x16, 0x65, 0x79, 0x66, 0xad, 0x05, 0xce, 0x2a, 0x0b, 0x5e, - 0x5d, 0xb6, 0x60, 0xb0, 0x82, 0xad, 0xce, 0x84, 0xe7, 0x4b, 0xe4, 0x59, 0x8c, 0xb4, 0x50, 0x95, - 0x5c, 0x54, 0x35, 0x81, 0xb6, 0x6a, 0x8d, 0x6e, 0x43, 0x27, 0x62, 0x33, 0x94, 0x31, 0x73, 0xb1, - 0x7c, 0x72, 0x0e, 0xe4, 0x2d, 0xa7, 0x81, 0x57, 0x1a, 0x92, 0x87, 0x94, 0x42, 0x2b, 0xbf, 0xd6, - 0x9a, 0x0a, 0x52, 0x31, 0x1d, 0xc0, 0xa6, 0x28, 0x69, 0x8f, 0x26, 0x2c, 0xf2, 0x42, 0xd4, 0x5a, - 0xea, 0x7a, 0xa3, 0x82, 0x0f, 0x15, 0x3a, 0x3a, 0x25, 0xd0, 0xca, 0xbb, 0xa5, 0x3e, 0x6c, 0xd5, - 0x2d, 0x34, 0xdd, 0x59, 0xb5, 0xf0, 0xca, 0xf2, 0xde, 0xe3, 0x9b, 0x7d, 0x17, 0x46, 0x83, 0xce, - 0xe0, 0x5e, 0xbd, 0x71, 0x74, 0xb0, 0xda, 0xda, 0x82, 0x6c, 0x78, 0xd3, 0x1d, 0x30, 0x1a, 0xe3, - 0xf1, 0xc9, 0x99, 0x4e, 0xfe, 0x9c, 0xe9, 0x8d, 0xef, 0x99, 0x4e, 0x4e, 0x32, 0x9d, 0xfc, 0xca, - 0x74, 0xf2, 0x37, 0xd3, 0xc9, 0x8f, 0x7f, 0x7a, 0xe3, 0xf3, 0xa3, 0xe9, 0x9e, 0x34, 0x03, 0x6e, - 0x4d, 0x53, 0x07, 0x43, 0x4c, 0xac, 0x78, 0xea, 0x5b, 0x2c, 0x0e, 0xa4, 0xe5, 0x09, 0x66, 0x55, - 0x24, 0xce, 0xba, 0xfa, 0xe9, 0xec, 0xfe, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x42, 0xff, 0x15, 0x6b, - 0xba, 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// NodeClient is the client API for Node service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type NodeClient interface { - // NodePrepareResources prepares several ResourceClaims - // for use on the node. If an error is returned, the - // response is ignored. Failures for individual claims - // can be reported inside NodePrepareResourcesResponse. - NodePrepareResources(ctx context.Context, in *NodePrepareResourcesRequest, opts ...grpc.CallOption) (*NodePrepareResourcesResponse, error) - // NodeUnprepareResources is the opposite of NodePrepareResources. - // The same error handling rules apply, - NodeUnprepareResources(ctx context.Context, in *NodeUnprepareResourcesRequest, opts ...grpc.CallOption) (*NodeUnprepareResourcesResponse, error) -} - -type nodeClient struct { - cc *grpc.ClientConn -} - -func NewNodeClient(cc *grpc.ClientConn) NodeClient { - return &nodeClient{cc} -} - -func (c *nodeClient) NodePrepareResources(ctx context.Context, in *NodePrepareResourcesRequest, opts ...grpc.CallOption) (*NodePrepareResourcesResponse, error) { - out := new(NodePrepareResourcesResponse) - err := c.cc.Invoke(ctx, "/v1alpha3.Node/NodePrepareResources", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodeUnprepareResources(ctx context.Context, in *NodeUnprepareResourcesRequest, opts ...grpc.CallOption) (*NodeUnprepareResourcesResponse, error) { - out := new(NodeUnprepareResourcesResponse) - err := c.cc.Invoke(ctx, "/v1alpha3.Node/NodeUnprepareResources", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// NodeServer is the server API for Node service. -type NodeServer interface { - // NodePrepareResources prepares several ResourceClaims - // for use on the node. If an error is returned, the - // response is ignored. Failures for individual claims - // can be reported inside NodePrepareResourcesResponse. - NodePrepareResources(context.Context, *NodePrepareResourcesRequest) (*NodePrepareResourcesResponse, error) - // NodeUnprepareResources is the opposite of NodePrepareResources. - // The same error handling rules apply, - NodeUnprepareResources(context.Context, *NodeUnprepareResourcesRequest) (*NodeUnprepareResourcesResponse, error) -} - -// UnimplementedNodeServer can be embedded to have forward compatible implementations. -type UnimplementedNodeServer struct { -} - -func (*UnimplementedNodeServer) NodePrepareResources(ctx context.Context, req *NodePrepareResourcesRequest) (*NodePrepareResourcesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodePrepareResources not implemented") -} -func (*UnimplementedNodeServer) NodeUnprepareResources(ctx context.Context, req *NodeUnprepareResourcesRequest) (*NodeUnprepareResourcesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeUnprepareResources not implemented") -} - -func RegisterNodeServer(s *grpc.Server, srv NodeServer) { - s.RegisterService(&_Node_serviceDesc, srv) -} - -func _Node_NodePrepareResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodePrepareResourcesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodePrepareResources(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v1alpha3.Node/NodePrepareResources", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodePrepareResources(ctx, req.(*NodePrepareResourcesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeUnprepareResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeUnprepareResourcesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeUnprepareResources(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v1alpha3.Node/NodeUnprepareResources", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeUnprepareResources(ctx, req.(*NodeUnprepareResourcesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Node_serviceDesc = grpc.ServiceDesc{ - ServiceName: "v1alpha3.Node", - HandlerType: (*NodeServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NodePrepareResources", - Handler: _Node_NodePrepareResources_Handler, - }, - { - MethodName: "NodeUnprepareResources", - Handler: _Node_NodeUnprepareResources_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "api.proto", -} - -func (m *NodePrepareResourcesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodePrepareResourcesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodePrepareResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Claims) > 0 { - for iNdEx := len(m.Claims) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Claims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintApi(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *NodePrepareResourcesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodePrepareResourcesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodePrepareResourcesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Claims) > 0 { - for k := range m.Claims { - v := m.Claims[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintApi(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintApi(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintApi(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *NodePrepareResourceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodePrepareResourceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodePrepareResourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintApi(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x12 - } - if len(m.CDIDevices) > 0 { - for iNdEx := len(m.CDIDevices) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.CDIDevices[iNdEx]) - copy(dAtA[i:], m.CDIDevices[iNdEx]) - i = encodeVarintApi(dAtA, i, uint64(len(m.CDIDevices[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *NodeUnprepareResourcesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeUnprepareResourcesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeUnprepareResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Claims) > 0 { - for iNdEx := len(m.Claims) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Claims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintApi(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *NodeUnprepareResourcesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeUnprepareResourcesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeUnprepareResourcesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Claims) > 0 { - for k := range m.Claims { - v := m.Claims[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintApi(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintApi(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintApi(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *NodeUnprepareResourceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeUnprepareResourceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeUnprepareResourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintApi(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Claim) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Claim) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Claim) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceHandle) > 0 { - i -= len(m.ResourceHandle) - copy(dAtA[i:], m.ResourceHandle) - i = encodeVarintApi(dAtA, i, uint64(len(m.ResourceHandle))) - i-- - dAtA[i] = 0x22 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - } - if len(m.Uid) > 0 { - i -= len(m.Uid) - copy(dAtA[i:], m.Uid) - i = encodeVarintApi(dAtA, i, uint64(len(m.Uid))) - i-- - dAtA[i] = 0x12 - } - if len(m.Namespace) > 0 { - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintApi(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintApi(dAtA []byte, offset int, v uint64) int { - offset -= sovApi(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *NodePrepareResourcesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Claims) > 0 { - for _, e := range m.Claims { - l = e.Size() - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *NodePrepareResourcesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Claims) > 0 { - for k, v := range m.Claims { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovApi(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + l - n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) - } - } - return n -} - -func (m *NodePrepareResourceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.CDIDevices) > 0 { - for _, s := range m.CDIDevices { - l = len(s) - n += 1 + l + sovApi(uint64(l)) - } - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *NodeUnprepareResourcesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Claims) > 0 { - for _, e := range m.Claims { - l = e.Size() - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *NodeUnprepareResourcesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Claims) > 0 { - for k, v := range m.Claims { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovApi(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + l - n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) - } - } - return n -} - -func (m *NodeUnprepareResourceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Error) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *Claim) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.Uid) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.ResourceHandle) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func sovApi(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozApi(x uint64) (n int) { - return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *NodePrepareResourcesRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForClaims := "[]*Claim{" - for _, f := range this.Claims { - repeatedStringForClaims += strings.Replace(f.String(), "Claim", "Claim", 1) + "," - } - repeatedStringForClaims += "}" - s := strings.Join([]string{`&NodePrepareResourcesRequest{`, - `Claims:` + repeatedStringForClaims + `,`, - `}`, - }, "") - return s -} -func (this *NodePrepareResourcesResponse) String() string { - if this == nil { - return "nil" - } - keysForClaims := make([]string, 0, len(this.Claims)) - for k := range this.Claims { - keysForClaims = append(keysForClaims, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForClaims) - mapStringForClaims := "map[string]*NodePrepareResourceResponse{" - for _, k := range keysForClaims { - mapStringForClaims += fmt.Sprintf("%v: %v,", k, this.Claims[k]) - } - mapStringForClaims += "}" - s := strings.Join([]string{`&NodePrepareResourcesResponse{`, - `Claims:` + mapStringForClaims + `,`, - `}`, - }, "") - return s -} -func (this *NodePrepareResourceResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodePrepareResourceResponse{`, - `CDIDevices:` + fmt.Sprintf("%v", this.CDIDevices) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *NodeUnprepareResourcesRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForClaims := "[]*Claim{" - for _, f := range this.Claims { - repeatedStringForClaims += strings.Replace(f.String(), "Claim", "Claim", 1) + "," - } - repeatedStringForClaims += "}" - s := strings.Join([]string{`&NodeUnprepareResourcesRequest{`, - `Claims:` + repeatedStringForClaims + `,`, - `}`, - }, "") - return s -} -func (this *NodeUnprepareResourcesResponse) String() string { - if this == nil { - return "nil" - } - keysForClaims := make([]string, 0, len(this.Claims)) - for k := range this.Claims { - keysForClaims = append(keysForClaims, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForClaims) - mapStringForClaims := "map[string]*NodeUnprepareResourceResponse{" - for _, k := range keysForClaims { - mapStringForClaims += fmt.Sprintf("%v: %v,", k, this.Claims[k]) - } - mapStringForClaims += "}" - s := strings.Join([]string{`&NodeUnprepareResourcesResponse{`, - `Claims:` + mapStringForClaims + `,`, - `}`, - }, "") - return s -} -func (this *NodeUnprepareResourceResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeUnprepareResourceResponse{`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *Claim) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Claim{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Uid:` + fmt.Sprintf("%v", this.Uid) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ResourceHandle:` + fmt.Sprintf("%v", this.ResourceHandle) + `,`, - `}`, - }, "") - return s -} -func valueToStringApi(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *NodePrepareResourcesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodePrepareResourcesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodePrepareResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Claims", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Claims = append(m.Claims, &Claim{}) - if err := m.Claims[len(m.Claims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodePrepareResourcesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodePrepareResourcesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodePrepareResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Claims", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Claims == nil { - m.Claims = make(map[string]*NodePrepareResourceResponse) - } - var mapkey string - var mapvalue *NodePrepareResourceResponse - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthApi - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthApi - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthApi - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &NodePrepareResourceResponse{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Claims[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodePrepareResourceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodePrepareResourceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodePrepareResourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CDIDevices", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CDIDevices = append(m.CDIDevices, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeUnprepareResourcesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeUnprepareResourcesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeUnprepareResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Claims", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Claims = append(m.Claims, &Claim{}) - if err := m.Claims[len(m.Claims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeUnprepareResourcesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeUnprepareResourcesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeUnprepareResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Claims", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Claims == nil { - m.Claims = make(map[string]*NodeUnprepareResourceResponse) - } - var mapkey string - var mapvalue *NodeUnprepareResourceResponse - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthApi - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthApi - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthApi - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &NodeUnprepareResourceResponse{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Claims[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeUnprepareResourceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeUnprepareResourceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeUnprepareResourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Claim) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Claim: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Claim: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Uid = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceHandle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipApi(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthApi - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupApi - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthApi - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha3/api.proto b/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha3/api.proto deleted file mode 100644 index c729aaf271..0000000000 --- a/vendor/k8s.io/kubelet/pkg/apis/dra/v1alpha3/api.proto +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` - -syntax = "proto3"; - -package v1alpha3; -option go_package = "k8s.io/kubelet/pkg/apis/dra/v1alpha3"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option (gogoproto.goproto_getters_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_unrecognized_all) = false; - -service Node { - // NodePrepareResources prepares several ResourceClaims - // for use on the node. If an error is returned, the - // response is ignored. Failures for individual claims - // can be reported inside NodePrepareResourcesResponse. - rpc NodePrepareResources (NodePrepareResourcesRequest) - returns (NodePrepareResourcesResponse) {} - - // NodeUnprepareResources is the opposite of NodePrepareResources. - // The same error handling rules apply, - rpc NodeUnprepareResources (NodeUnprepareResourcesRequest) - returns (NodeUnprepareResourcesResponse) {} -} - -message NodePrepareResourcesRequest { - // The list of ResourceClaims that are to be prepared. - repeated Claim claims = 1; -} - -message NodePrepareResourcesResponse { - // The ResourceClaims for which preparation was done - // or attempted, with claim_uid as key. - // - // It is an error if some claim listed in NodePrepareResourcesRequest - // does not get prepared. NodePrepareResources - // will be called again for those that are missing. - map claims = 1; -} - -message NodePrepareResourceResponse { - // These are the additional devices that kubelet must - // make available via the container runtime. A resource - // may have zero or more devices. - repeated string cdi_devices = 1 [(gogoproto.customname) = "CDIDevices"]; - // If non-empty, preparing the ResourceClaim failed. - // cdi_devices is ignored in that case. - string error = 2; -} - -message NodeUnprepareResourcesRequest { - // The list of ResourceClaims that are to be unprepared. - repeated Claim claims = 1; -} - -message NodeUnprepareResourcesResponse { - // The ResourceClaims for which preparation was reverted. - // The same rules as for NodePrepareResourcesResponse.claims - // apply. - map claims = 1; -} - -message NodeUnprepareResourceResponse { - // If non-empty, unpreparing the ResourceClaim failed. - string error = 1; -} - -message Claim { - // The ResourceClaim namespace (ResourceClaim.meta.Namespace). - // This field is REQUIRED. - string namespace = 1; - // The UID of the Resource claim (ResourceClaim.meta.UUID). - // This field is REQUIRED. - string uid = 2; - // The name of the Resource claim (ResourceClaim.meta.Name) - // This field is REQUIRED. - string name = 3; - // Resource handle (AllocationResult.ResourceHandles[*].Data) - // This field is REQUIRED. - string resource_handle = 4; -} diff --git a/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.pb.go b/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.pb.go deleted file mode 100644 index ecfc843176..0000000000 --- a/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.pb.go +++ /dev/null @@ -1,1148 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: api.proto - -package v1 - -import ( - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// PluginInfo is the message sent from a plugin to the Kubelet pluginwatcher for plugin registration -type PluginInfo struct { - // Type of the Plugin. CSIPlugin or DevicePlugin - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Plugin name that uniquely identifies the plugin for the given plugin type. - // For DevicePlugin, this is the resource name that the plugin manages and - // should follow the extended resource name convention. - // For CSI, this is the CSI driver registrar name. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Optional endpoint location. If found set by Kubelet component, - // Kubelet component will use this endpoint for specific requests. - // This allows the plugin to register using one endpoint and possibly use - // a different socket for control operations. CSI uses this model to delegate - // its registration external from the plugin. - Endpoint string `protobuf:"bytes,3,opt,name=endpoint,proto3" json:"endpoint,omitempty"` - // Plugin service API versions the plugin supports. - // For DevicePlugin, this maps to the deviceplugin API versions the - // plugin supports at the given socket. - // The Kubelet component communicating with the plugin should be able - // to choose any preferred version from this list, or returns an error - // if none of the listed versions is supported. - SupportedVersions []string `protobuf:"bytes,4,rep,name=supported_versions,json=supportedVersions,proto3" json:"supported_versions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PluginInfo) Reset() { *m = PluginInfo{} } -func (*PluginInfo) ProtoMessage() {} -func (*PluginInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{0} -} -func (m *PluginInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PluginInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginInfo.Merge(m, src) -} -func (m *PluginInfo) XXX_Size() int { - return m.Size() -} -func (m *PluginInfo) XXX_DiscardUnknown() { - xxx_messageInfo_PluginInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginInfo proto.InternalMessageInfo - -func (m *PluginInfo) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *PluginInfo) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginInfo) GetEndpoint() string { - if m != nil { - return m.Endpoint - } - return "" -} - -func (m *PluginInfo) GetSupportedVersions() []string { - if m != nil { - return m.SupportedVersions - } - return nil -} - -// RegistrationStatus is the message sent from Kubelet pluginwatcher to the plugin for notification on registration status -type RegistrationStatus struct { - // True if plugin gets registered successfully at Kubelet - PluginRegistered bool `protobuf:"varint,1,opt,name=plugin_registered,json=pluginRegistered,proto3" json:"plugin_registered,omitempty"` - // Error message in case plugin fails to register, empty string otherwise - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RegistrationStatus) Reset() { *m = RegistrationStatus{} } -func (*RegistrationStatus) ProtoMessage() {} -func (*RegistrationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{1} -} -func (m *RegistrationStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RegistrationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RegistrationStatus.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RegistrationStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegistrationStatus.Merge(m, src) -} -func (m *RegistrationStatus) XXX_Size() int { - return m.Size() -} -func (m *RegistrationStatus) XXX_DiscardUnknown() { - xxx_messageInfo_RegistrationStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_RegistrationStatus proto.InternalMessageInfo - -func (m *RegistrationStatus) GetPluginRegistered() bool { - if m != nil { - return m.PluginRegistered - } - return false -} - -func (m *RegistrationStatus) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -// RegistrationStatusResponse is sent by plugin to kubelet in response to RegistrationStatus RPC -type RegistrationStatusResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RegistrationStatusResponse) Reset() { *m = RegistrationStatusResponse{} } -func (*RegistrationStatusResponse) ProtoMessage() {} -func (*RegistrationStatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{2} -} -func (m *RegistrationStatusResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RegistrationStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RegistrationStatusResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RegistrationStatusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegistrationStatusResponse.Merge(m, src) -} -func (m *RegistrationStatusResponse) XXX_Size() int { - return m.Size() -} -func (m *RegistrationStatusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RegistrationStatusResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_RegistrationStatusResponse proto.InternalMessageInfo - -// InfoRequest is the empty request message from Kubelet -type InfoRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InfoRequest) Reset() { *m = InfoRequest{} } -func (*InfoRequest) ProtoMessage() {} -func (*InfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{3} -} -func (m *InfoRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InfoRequest.Merge(m, src) -} -func (m *InfoRequest) XXX_Size() int { - return m.Size() -} -func (m *InfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_InfoRequest proto.InternalMessageInfo - -func init() { - proto.RegisterType((*PluginInfo)(nil), "pluginregistration.PluginInfo") - proto.RegisterType((*RegistrationStatus)(nil), "pluginregistration.RegistrationStatus") - proto.RegisterType((*RegistrationStatusResponse)(nil), "pluginregistration.RegistrationStatusResponse") - proto.RegisterType((*InfoRequest)(nil), "pluginregistration.InfoRequest") -} - -func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } - -var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 365 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xc1, 0x4a, 0xeb, 0x40, - 0x14, 0xcd, 0xbc, 0xf6, 0xbd, 0xd7, 0x8e, 0x0a, 0x76, 0x70, 0x11, 0x82, 0x8c, 0x25, 0x0b, 0x29, - 0x48, 0x13, 0xd4, 0x8d, 0x6b, 0x37, 0x22, 0x8a, 0x48, 0x04, 0x05, 0x37, 0x25, 0xb1, 0xb7, 0x71, - 0x68, 0x3b, 0x33, 0xce, 0x4c, 0x0a, 0x5d, 0xe9, 0x27, 0xf8, 0x59, 0x5d, 0x8a, 0x2b, 0x97, 0x36, - 0xfe, 0x88, 0x74, 0x52, 0x62, 0x21, 0x5d, 0xb8, 0xbb, 0xe7, 0xdc, 0x73, 0xef, 0xdc, 0x73, 0x18, - 0xdc, 0x8c, 0x25, 0x0b, 0xa4, 0x12, 0x46, 0x10, 0x22, 0x47, 0x59, 0xca, 0xb8, 0x82, 0x94, 0x69, - 0xa3, 0x62, 0xc3, 0x04, 0xf7, 0xba, 0x29, 0x33, 0x8f, 0x59, 0x12, 0x3c, 0x88, 0x71, 0x98, 0x8a, - 0x54, 0x84, 0x56, 0x9a, 0x64, 0x03, 0x8b, 0x2c, 0xb0, 0x55, 0xb1, 0xc2, 0x7f, 0xc6, 0xf8, 0xda, - 0x2e, 0x39, 0xe7, 0x03, 0x41, 0x08, 0xae, 0x9b, 0xa9, 0x04, 0x17, 0xb5, 0x51, 0xa7, 0x19, 0xd9, - 0x7a, 0xc1, 0xf1, 0x78, 0x0c, 0xee, 0x9f, 0x82, 0x5b, 0xd4, 0xc4, 0xc3, 0x0d, 0xe0, 0x7d, 0x29, - 0x18, 0x37, 0x6e, 0xcd, 0xf2, 0x25, 0x26, 0x5d, 0x4c, 0x74, 0x26, 0xa5, 0x50, 0x06, 0xfa, 0xbd, - 0x09, 0x28, 0xcd, 0x04, 0xd7, 0x6e, 0xbd, 0x5d, 0xeb, 0x34, 0xa3, 0x56, 0xd9, 0xb9, 0x5d, 0x36, - 0xfc, 0x3b, 0x4c, 0xa2, 0x95, 0xfb, 0x6f, 0x4c, 0x6c, 0x32, 0x4d, 0x0e, 0x70, 0xab, 0xf0, 0xd6, - 0x2b, 0xcc, 0x81, 0x82, 0xbe, 0xbd, 0xaa, 0x11, 0x6d, 0x17, 0x8d, 0xa8, 0xe4, 0xc9, 0x0e, 0xfe, - 0x0b, 0x4a, 0x09, 0xb5, 0x3c, 0xb1, 0x00, 0xfe, 0x2e, 0xf6, 0xaa, 0x8b, 0x23, 0xd0, 0x52, 0x70, - 0x0d, 0xfe, 0x16, 0xde, 0x58, 0x38, 0x8e, 0xe0, 0x29, 0x03, 0x6d, 0x8e, 0xde, 0x11, 0xde, 0x5c, - 0x55, 0x93, 0x4b, 0xfc, 0xff, 0x0c, 0x8c, 0x0d, 0x65, 0x2f, 0xa8, 0xc6, 0x1c, 0xac, 0x0c, 0x7b, - 0x74, 0x9d, 0xe0, 0x27, 0x55, 0xdf, 0x21, 0x06, 0xbb, 0x57, 0xc2, 0xb0, 0xc1, 0x74, 0x8d, 0xd5, - 0xfd, 0x75, 0xd3, 0x55, 0x9d, 0x17, 0xfc, 0x4e, 0x57, 0x3a, 0x74, 0x4e, 0x2f, 0x66, 0x73, 0x8a, - 0x3e, 0xe6, 0xd4, 0x79, 0xc9, 0x29, 0x9a, 0xe5, 0x14, 0xbd, 0xe5, 0x14, 0x7d, 0xe6, 0x14, 0xbd, - 0x7e, 0x51, 0xe7, 0xbe, 0x3b, 0x3c, 0xd1, 0x01, 0x13, 0xe1, 0x30, 0x4b, 0x60, 0x04, 0x26, 0x94, - 0xc3, 0x34, 0x8c, 0x25, 0xd3, 0x61, 0xf5, 0x99, 0x70, 0x72, 0x98, 0xfc, 0xb3, 0xff, 0xe5, 0xf8, - 0x3b, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x5f, 0xd4, 0xb2, 0x7f, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// RegistrationClient is the client API for Registration service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type RegistrationClient interface { - GetInfo(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*PluginInfo, error) - NotifyRegistrationStatus(ctx context.Context, in *RegistrationStatus, opts ...grpc.CallOption) (*RegistrationStatusResponse, error) -} - -type registrationClient struct { - cc *grpc.ClientConn -} - -func NewRegistrationClient(cc *grpc.ClientConn) RegistrationClient { - return ®istrationClient{cc} -} - -func (c *registrationClient) GetInfo(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*PluginInfo, error) { - out := new(PluginInfo) - err := c.cc.Invoke(ctx, "/pluginregistration.Registration/GetInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *registrationClient) NotifyRegistrationStatus(ctx context.Context, in *RegistrationStatus, opts ...grpc.CallOption) (*RegistrationStatusResponse, error) { - out := new(RegistrationStatusResponse) - err := c.cc.Invoke(ctx, "/pluginregistration.Registration/NotifyRegistrationStatus", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// RegistrationServer is the server API for Registration service. -type RegistrationServer interface { - GetInfo(context.Context, *InfoRequest) (*PluginInfo, error) - NotifyRegistrationStatus(context.Context, *RegistrationStatus) (*RegistrationStatusResponse, error) -} - -// UnimplementedRegistrationServer can be embedded to have forward compatible implementations. -type UnimplementedRegistrationServer struct { -} - -func (*UnimplementedRegistrationServer) GetInfo(ctx context.Context, req *InfoRequest) (*PluginInfo, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetInfo not implemented") -} -func (*UnimplementedRegistrationServer) NotifyRegistrationStatus(ctx context.Context, req *RegistrationStatus) (*RegistrationStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NotifyRegistrationStatus not implemented") -} - -func RegisterRegistrationServer(s *grpc.Server, srv RegistrationServer) { - s.RegisterService(&_Registration_serviceDesc, srv) -} - -func _Registration_GetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(InfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RegistrationServer).GetInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/pluginregistration.Registration/GetInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RegistrationServer).GetInfo(ctx, req.(*InfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Registration_NotifyRegistrationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RegistrationStatus) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RegistrationServer).NotifyRegistrationStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/pluginregistration.Registration/NotifyRegistrationStatus", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RegistrationServer).NotifyRegistrationStatus(ctx, req.(*RegistrationStatus)) - } - return interceptor(ctx, in, info, handler) -} - -var _Registration_serviceDesc = grpc.ServiceDesc{ - ServiceName: "pluginregistration.Registration", - HandlerType: (*RegistrationServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetInfo", - Handler: _Registration_GetInfo_Handler, - }, - { - MethodName: "NotifyRegistrationStatus", - Handler: _Registration_NotifyRegistrationStatus_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "api.proto", -} - -func (m *PluginInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PluginInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SupportedVersions) > 0 { - for iNdEx := len(m.SupportedVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.SupportedVersions[iNdEx]) - copy(dAtA[i:], m.SupportedVersions[iNdEx]) - i = encodeVarintApi(dAtA, i, uint64(len(m.SupportedVersions[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Endpoint) > 0 { - i -= len(m.Endpoint) - copy(dAtA[i:], m.Endpoint) - i = encodeVarintApi(dAtA, i, uint64(len(m.Endpoint))) - i-- - dAtA[i] = 0x1a - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintApi(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RegistrationStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RegistrationStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RegistrationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintApi(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x12 - } - if m.PluginRegistered { - i-- - if m.PluginRegistered { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *RegistrationStatusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RegistrationStatusResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RegistrationStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *InfoRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintApi(dAtA []byte, offset int, v uint64) int { - offset -= sovApi(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PluginInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.Endpoint) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - if len(m.SupportedVersions) > 0 { - for _, s := range m.SupportedVersions { - l = len(s) - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *RegistrationStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PluginRegistered { - n += 2 - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *RegistrationStatusResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *InfoRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovApi(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozApi(x uint64) (n int) { - return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PluginInfo) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PluginInfo{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, - `SupportedVersions:` + fmt.Sprintf("%v", this.SupportedVersions) + `,`, - `}`, - }, "") - return s -} -func (this *RegistrationStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RegistrationStatus{`, - `PluginRegistered:` + fmt.Sprintf("%v", this.PluginRegistered) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *RegistrationStatusResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RegistrationStatusResponse{`, - `}`, - }, "") - return s -} -func (this *InfoRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InfoRequest{`, - `}`, - }, "") - return s -} -func valueToStringApi(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PluginInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Endpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupportedVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SupportedVersions = append(m.SupportedVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RegistrationStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RegistrationStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RegistrationStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PluginRegistered", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PluginRegistered = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RegistrationStatusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RegistrationStatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RegistrationStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InfoRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipApi(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthApi - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupApi - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthApi - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.proto b/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.proto deleted file mode 100644 index 8972e7e82c..0000000000 --- a/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/api.proto +++ /dev/null @@ -1,61 +0,0 @@ -// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` -syntax = "proto3"; - -package pluginregistration; // This should have been v1. -option go_package = "k8s.io/kubelet/pkg/apis/pluginregistration/v1"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option (gogoproto.goproto_getters_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_unrecognized_all) = false; - -// PluginInfo is the message sent from a plugin to the Kubelet pluginwatcher for plugin registration -message PluginInfo { - // Type of the Plugin. CSIPlugin or DevicePlugin - string type = 1; - // Plugin name that uniquely identifies the plugin for the given plugin type. - // For DevicePlugin, this is the resource name that the plugin manages and - // should follow the extended resource name convention. - // For CSI, this is the CSI driver registrar name. - string name = 2; - // Optional endpoint location. If found set by Kubelet component, - // Kubelet component will use this endpoint for specific requests. - // This allows the plugin to register using one endpoint and possibly use - // a different socket for control operations. CSI uses this model to delegate - // its registration external from the plugin. - string endpoint = 3; - // Plugin service API versions the plugin supports. - // For DevicePlugin, this maps to the deviceplugin API versions the - // plugin supports at the given socket. - // The Kubelet component communicating with the plugin should be able - // to choose any preferred version from this list, or returns an error - // if none of the listed versions is supported. - repeated string supported_versions = 4; -} - -// RegistrationStatus is the message sent from Kubelet pluginwatcher to the plugin for notification on registration status -message RegistrationStatus { - // True if plugin gets registered successfully at Kubelet - bool plugin_registered = 1; - // Error message in case plugin fails to register, empty string otherwise - string error = 2; -} - -// RegistrationStatusResponse is sent by plugin to kubelet in response to RegistrationStatus RPC -message RegistrationStatusResponse { -} - -// InfoRequest is the empty request message from Kubelet -message InfoRequest { -} - -// Registration is the service advertised by the Plugins. -service Registration { - rpc GetInfo(InfoRequest) returns (PluginInfo) {} - rpc NotifyRegistrationStatus(RegistrationStatus) returns (RegistrationStatusResponse) {} -} diff --git a/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/constants.go b/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/constants.go deleted file mode 100644 index 475c0404b9..0000000000 --- a/vendor/k8s.io/kubelet/pkg/apis/pluginregistration/v1/constants.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -const ( - // CSIPlugin identifier for registered CSI plugins - CSIPlugin = "CSIPlugin" - // DevicePlugin identifier for registered device plugins - DevicePlugin = "DevicePlugin" - // DRAPlugin identifier for registered Dynamic Resourc Allocation plugins - DRAPlugin = "DRAPlugin" -) diff --git a/vendor/modules.txt b/vendor/modules.txt index cceb100684..28b0cd689b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -107,6 +107,8 @@ github.com/cilium/cilium/pkg/versioncheck # github.com/cilium/proxy v0.0.0-20230623092907-8fddead4e52c ## explicit; go 1.20 github.com/cilium/proxy/pkg/policy/api/kafka +# github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa +## explicit; go 1.19 # github.com/containernetworking/cni v1.1.2 ## explicit; go 1.14 github.com/containernetworking/cni/libcni @@ -142,6 +144,8 @@ github.com/docker/go-units ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log +# github.com/envoyproxy/protoc-gen-validate v1.0.4 +## explicit; go 1.19 # github.com/evanphx/json-patch v5.6.0+incompatible ## explicit github.com/evanphx/json-patch @@ -451,14 +455,6 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/opencontainers/runtime-spec v1.1.0 -## explicit -github.com/opencontainers/runtime-spec/specs-go -# github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 -## explicit; go 1.16 -github.com/opencontainers/runtime-tools/generate -github.com/opencontainers/runtime-tools/generate/seccomp -github.com/opencontainers/runtime-tools/validate/capabilities # github.com/openkruise/kruise-api v1.3.0 ## explicit; go 1.18 github.com/openkruise/kruise-api @@ -543,7 +539,7 @@ github.com/shopspring/decimal ## explicit; go 1.13 github.com/sirupsen/logrus github.com/sirupsen/logrus/hooks/syslog -# github.com/spf13/afero v1.9.5 +# github.com/spf13/afero v1.10.0 ## explicit; go 1.16 github.com/spf13/afero github.com/spf13/afero/internal/common @@ -579,9 +575,6 @@ github.com/spidernet-io/e2eframework/tools # github.com/subosito/gotenv v1.4.2 ## explicit; go 1.18 github.com/subosito/gotenv -# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 -## explicit -github.com/syndtr/gocapability/capability # github.com/tigera/api v0.0.0-20230406222214-ca74195900cb ## explicit; go 1.18 github.com/tigera/api/pkg/lib/numorstring @@ -716,11 +709,9 @@ golang.org/x/net/icmp golang.org/x/net/idna golang.org/x/net/internal/iana golang.org/x/net/internal/socket -golang.org/x/net/internal/timeseries golang.org/x/net/ipv4 golang.org/x/net/ipv6 golang.org/x/net/netutil -golang.org/x/net/trace # golang.org/x/oauth2 v0.16.0 ## explicit; go 1.18 golang.org/x/oauth2 @@ -804,62 +795,8 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 +# google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 ## explicit; go 1.19 -google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.62.0 -## explicit; go 1.19 -google.golang.org/grpc -google.golang.org/grpc/attributes -google.golang.org/grpc/backoff -google.golang.org/grpc/balancer -google.golang.org/grpc/balancer/base -google.golang.org/grpc/balancer/grpclb/state -google.golang.org/grpc/balancer/roundrobin -google.golang.org/grpc/binarylog/grpc_binarylog_v1 -google.golang.org/grpc/channelz -google.golang.org/grpc/codes -google.golang.org/grpc/connectivity -google.golang.org/grpc/credentials -google.golang.org/grpc/credentials/insecure -google.golang.org/grpc/encoding -google.golang.org/grpc/encoding/proto -google.golang.org/grpc/grpclog -google.golang.org/grpc/internal -google.golang.org/grpc/internal/backoff -google.golang.org/grpc/internal/balancer/gracefulswitch -google.golang.org/grpc/internal/balancerload -google.golang.org/grpc/internal/binarylog -google.golang.org/grpc/internal/buffer -google.golang.org/grpc/internal/channelz -google.golang.org/grpc/internal/credentials -google.golang.org/grpc/internal/envconfig -google.golang.org/grpc/internal/grpclog -google.golang.org/grpc/internal/grpcrand -google.golang.org/grpc/internal/grpcsync -google.golang.org/grpc/internal/grpcutil -google.golang.org/grpc/internal/idle -google.golang.org/grpc/internal/metadata -google.golang.org/grpc/internal/pretty -google.golang.org/grpc/internal/resolver -google.golang.org/grpc/internal/resolver/dns -google.golang.org/grpc/internal/resolver/dns/internal -google.golang.org/grpc/internal/resolver/passthrough -google.golang.org/grpc/internal/resolver/unix -google.golang.org/grpc/internal/serviceconfig -google.golang.org/grpc/internal/status -google.golang.org/grpc/internal/syscall -google.golang.org/grpc/internal/transport -google.golang.org/grpc/internal/transport/networktype -google.golang.org/grpc/keepalive -google.golang.org/grpc/metadata -google.golang.org/grpc/peer -google.golang.org/grpc/resolver -google.golang.org/grpc/resolver/dns -google.golang.org/grpc/serviceconfig -google.golang.org/grpc/stats -google.golang.org/grpc/status -google.golang.org/grpc/tap # google.golang.org/protobuf v1.33.0 ## explicit; go 1.17 google.golang.org/protobuf/encoding/protojson @@ -1378,11 +1315,6 @@ k8s.io/code-generator/third_party/forked/golang/reflect ## explicit; go 1.21 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 -# k8s.io/dynamic-resource-allocation v0.29.2 -## explicit; go 1.21 -k8s.io/dynamic-resource-allocation/controller -k8s.io/dynamic-resource-allocation/kubeletplugin -k8s.io/dynamic-resource-allocation/resourceclaim # k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 ## explicit; go 1.20 k8s.io/gengo/v2 @@ -1413,11 +1345,6 @@ k8s.io/kube-openapi/pkg/validation/spec # k8s.io/kubectl v0.26.3 ## explicit; go 1.19 k8s.io/kubectl/pkg/util/podutils -# k8s.io/kubelet v0.29.2 -## explicit; go 1.21 -k8s.io/kubelet/pkg/apis/dra/v1alpha2 -k8s.io/kubelet/pkg/apis/dra/v1alpha3 -k8s.io/kubelet/pkg/apis/pluginregistration/v1 # k8s.io/kubernetes v1.29.0 ## explicit; go 1.21 k8s.io/kubernetes/pkg/util/iptables @@ -1521,13 +1448,3 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# tags.cncf.io/container-device-interface v0.6.2 -## explicit; go 1.19 -tags.cncf.io/container-device-interface/internal/multierror -tags.cncf.io/container-device-interface/internal/validation -tags.cncf.io/container-device-interface/internal/validation/k8s -tags.cncf.io/container-device-interface/pkg/cdi -tags.cncf.io/container-device-interface/pkg/parser -# tags.cncf.io/container-device-interface/specs-go v0.6.0 -## explicit; go 1.19 -tags.cncf.io/container-device-interface/specs-go diff --git a/vendor/tags.cncf.io/container-device-interface/LICENSE b/vendor/tags.cncf.io/container-device-interface/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/tags.cncf.io/container-device-interface/internal/multierror/multierror.go b/vendor/tags.cncf.io/container-device-interface/internal/multierror/multierror.go deleted file mode 100644 index 07aca4a1d3..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/internal/multierror/multierror.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright © 2022 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package multierror - -import ( - "strings" -) - -// New combines several errors into a single error. Parameters that are nil are -// ignored. If no errors are passed in or all parameters are nil, then the -// result is also nil. -func New(errors ...error) error { - // Filter out nil entries. - numErrors := 0 - for _, err := range errors { - if err != nil { - errors[numErrors] = err - numErrors++ - } - } - if numErrors == 0 { - return nil - } - return multiError(errors[0:numErrors]) -} - -// multiError is the underlying implementation used by New. -// -// Beware that a null multiError is not the same as a nil error. -type multiError []error - -// multiError returns all individual error strings concatenated with "\n" -func (e multiError) Error() string { - var builder strings.Builder - for i, err := range e { - if i > 0 { - _, _ = builder.WriteString("\n") - } - _, _ = builder.WriteString(err.Error()) - } - return builder.String() -} - -// Append returns a new multi error all errors concatenated. Errors that are -// multi errors get flattened, nil is ignored. -func Append(err error, errors ...error) error { - var result multiError - if m, ok := err.(multiError); ok { - result = m - } else if err != nil { - result = append(result, err) - } - - for _, e := range errors { - if e == nil { - continue - } - if m, ok := e.(multiError); ok { - result = append(result, m...) - } else { - result = append(result, e) - } - } - if len(result) == 0 { - return nil - } - return result -} diff --git a/vendor/tags.cncf.io/container-device-interface/internal/validation/k8s/objectmeta.go b/vendor/tags.cncf.io/container-device-interface/internal/validation/k8s/objectmeta.go deleted file mode 100644 index 5cf63dabf4..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/internal/validation/k8s/objectmeta.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Adapted from k8s.io/apimachinery/pkg/api/validation: -// https://github.com/kubernetes/apimachinery/blob/7687996c715ee7d5c8cf1e3215e607eb065a4221/pkg/api/validation/objectmeta.go - -package k8s - -import ( - "fmt" - "strings" - - "tags.cncf.io/container-device-interface/internal/multierror" -) - -// TotalAnnotationSizeLimitB defines the maximum size of all annotations in characters. -const TotalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB - -// ValidateAnnotations validates that a set of annotations are correctly defined. -func ValidateAnnotations(annotations map[string]string, path string) error { - errors := multierror.New() - for k := range annotations { - // The rule is QualifiedName except that case doesn't matter, so convert to lowercase before checking. - for _, msg := range IsQualifiedName(strings.ToLower(k)) { - errors = multierror.Append(errors, fmt.Errorf("%v.%v is invalid: %v", path, k, msg)) - } - } - if err := ValidateAnnotationsSize(annotations); err != nil { - errors = multierror.Append(errors, fmt.Errorf("%v is too long: %v", path, err)) - } - return errors -} - -// ValidateAnnotationsSize validates that a set of annotations is not too large. -func ValidateAnnotationsSize(annotations map[string]string) error { - var totalSize int64 - for k, v := range annotations { - totalSize += (int64)(len(k)) + (int64)(len(v)) - } - if totalSize > (int64)(TotalAnnotationSizeLimitB) { - return fmt.Errorf("annotations size %d is larger than limit %d", totalSize, TotalAnnotationSizeLimitB) - } - return nil -} diff --git a/vendor/tags.cncf.io/container-device-interface/internal/validation/k8s/validation.go b/vendor/tags.cncf.io/container-device-interface/internal/validation/k8s/validation.go deleted file mode 100644 index 5ad6ce2776..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/internal/validation/k8s/validation.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Adapted from k8s.io/apimachinery/pkg/util/validation: -// https://github.com/kubernetes/apimachinery/blob/7687996c715ee7d5c8cf1e3215e607eb065a4221/pkg/util/validation/validation.go - -package k8s - -import ( - "fmt" - "regexp" - "strings" -) - -const qnameCharFmt string = "[A-Za-z0-9]" -const qnameExtCharFmt string = "[-A-Za-z0-9_.]" -const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt -const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" -const qualifiedNameMaxLength int = 63 - -var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") - -// IsQualifiedName tests whether the value passed is what Kubernetes calls a -// "qualified name". This is a format used in various places throughout the -// system. If the value is not valid, a list of error strings is returned. -// Otherwise an empty list (or nil) is returned. -func IsQualifiedName(value string) []string { - var errs []string - parts := strings.Split(value, "/") - var name string - switch len(parts) { - case 1: - name = parts[0] - case 2: - var prefix string - prefix, name = parts[0], parts[1] - if len(prefix) == 0 { - errs = append(errs, "prefix part "+EmptyError()) - } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { - errs = append(errs, prefixEach(msgs, "prefix part ")...) - } - default: - return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ - " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')") - } - - if len(name) == 0 { - errs = append(errs, "name part "+EmptyError()) - } else if len(name) > qualifiedNameMaxLength { - errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) - } - if !qualifiedNameRegexp.MatchString(name) { - errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")) - } - return errs -} - -const labelValueFmt string = "(" + qualifiedNameFmt + ")?" -const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" - -// LabelValueMaxLength is a label's max length -const LabelValueMaxLength int = 63 - -var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") - -// IsValidLabelValue tests whether the value passed is a valid label value. If -// the value is not valid, a list of error strings is returned. Otherwise an -// empty list (or nil) is returned. -func IsValidLabelValue(value string) []string { - var errs []string - if len(value) > LabelValueMaxLength { - errs = append(errs, MaxLenError(LabelValueMaxLength)) - } - if !labelValueRegexp.MatchString(value) { - errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345")) - } - return errs -} - -const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" -const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" - -// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) -const DNS1123LabelMaxLength int = 63 - -var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") - -// IsDNS1123Label tests for a string that conforms to the definition of a label in -// DNS (RFC 1123). -func IsDNS1123Label(value string) []string { - var errs []string - if len(value) > DNS1123LabelMaxLength { - errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) - } - if !dns1123LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) - } - return errs -} - -const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" -const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" - -// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) -const DNS1123SubdomainMaxLength int = 253 - -var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") - -// IsDNS1123Subdomain tests for a string that conforms to the definition of a -// subdomain in DNS (RFC 1123). -func IsDNS1123Subdomain(value string) []string { - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !dns1123SubdomainRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) - } - return errs -} - -const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" -const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" - -// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035) -const DNS1035LabelMaxLength int = 63 - -var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") - -// IsDNS1035Label tests for a string that conforms to the definition of a label in -// DNS (RFC 1035). -func IsDNS1035Label(value string) []string { - var errs []string - if len(value) > DNS1035LabelMaxLength { - errs = append(errs, MaxLenError(DNS1035LabelMaxLength)) - } - if !dns1035LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123")) - } - return errs -} - -// wildcard definition - RFC 1034 section 4.3.3. -// examples: -// - valid: *.bar.com, *.foo.bar.com -// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, * -const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt -const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character" - -// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a -// wildcard subdomain in DNS (RFC 1034 section 4.3.3). -func IsWildcardDNS1123Subdomain(value string) []string { - wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$") - - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !wildcardDNS1123SubdomainRegexp.MatchString(value) { - errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com")) - } - return errs -} - -// MaxLenError returns a string explanation of a "string too long" validation -// failure. -func MaxLenError(length int) string { - return fmt.Sprintf("must be no more than %d characters", length) -} - -// RegexError returns a string explanation of a regex validation failure. -func RegexError(msg string, fmt string, examples ...string) string { - if len(examples) == 0 { - return msg + " (regex used for validation is '" + fmt + "')" - } - msg += " (e.g. " - for i := range examples { - if i > 0 { - msg += " or " - } - msg += "'" + examples[i] + "', " - } - msg += "regex used for validation is '" + fmt + "')" - return msg -} - -// EmptyError returns a string explanation of a "must not be empty" validation -// failure. -func EmptyError() string { - return "must be non-empty" -} - -func prefixEach(msgs []string, prefix string) []string { - for i := range msgs { - msgs[i] = prefix + msgs[i] - } - return msgs -} - -// InclusiveRangeError returns a string explanation of a numeric "must be -// between" validation failure. -func InclusiveRangeError(lo, hi int) string { - return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) -} diff --git a/vendor/tags.cncf.io/container-device-interface/internal/validation/validate.go b/vendor/tags.cncf.io/container-device-interface/internal/validation/validate.go deleted file mode 100644 index 5d9b55ff3f..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/internal/validation/validate.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright © The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package validation - -import ( - "fmt" - "strings" - - "tags.cncf.io/container-device-interface/internal/validation/k8s" -) - -// ValidateSpecAnnotations checks whether spec annotations are valid. -func ValidateSpecAnnotations(name string, any interface{}) error { - if any == nil { - return nil - } - - switch v := any.(type) { - case map[string]interface{}: - annotations := make(map[string]string) - for k, v := range v { - if s, ok := v.(string); ok { - annotations[k] = s - } else { - return fmt.Errorf("invalid annotation %v.%v; %v is not a string", name, k, any) - } - } - return validateSpecAnnotations(name, annotations) - } - - return nil -} - -// validateSpecAnnotations checks whether spec annotations are valid. -func validateSpecAnnotations(name string, annotations map[string]string) error { - path := "annotations" - if name != "" { - path = strings.Join([]string{name, path}, ".") - } - - return k8s.ValidateAnnotations(annotations, path) -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/annotations.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/annotations.go deleted file mode 100644 index a38b0f1bcf..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/annotations.go +++ /dev/null @@ -1,141 +0,0 @@ -/* - Copyright © 2021-2022 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "errors" - "fmt" - "strings" - - "tags.cncf.io/container-device-interface/pkg/parser" -) - -const ( - // AnnotationPrefix is the prefix for CDI container annotation keys. - AnnotationPrefix = "cdi.k8s.io/" -) - -// UpdateAnnotations updates annotations with a plugin-specific CDI device -// injection request for the given devices. Upon any error a non-nil error -// is returned and annotations are left intact. By convention plugin should -// be in the format of "vendor.device-type". -func UpdateAnnotations(annotations map[string]string, plugin string, deviceID string, devices []string) (map[string]string, error) { - key, err := AnnotationKey(plugin, deviceID) - if err != nil { - return annotations, fmt.Errorf("CDI annotation failed: %w", err) - } - if _, ok := annotations[key]; ok { - return annotations, fmt.Errorf("CDI annotation failed, key %q used", key) - } - value, err := AnnotationValue(devices) - if err != nil { - return annotations, fmt.Errorf("CDI annotation failed: %w", err) - } - - if annotations == nil { - annotations = make(map[string]string) - } - annotations[key] = value - - return annotations, nil -} - -// ParseAnnotations parses annotations for CDI device injection requests. -// The keys and devices from all such requests are collected into slices -// which are returned as the result. All devices are expected to be fully -// qualified CDI device names. If any device fails this check empty slices -// are returned along with a non-nil error. The annotations are expected -// to be formatted by, or in a compatible fashion to UpdateAnnotations(). -func ParseAnnotations(annotations map[string]string) ([]string, []string, error) { - var ( - keys []string - devices []string - ) - - for key, value := range annotations { - if !strings.HasPrefix(key, AnnotationPrefix) { - continue - } - for _, d := range strings.Split(value, ",") { - if !IsQualifiedName(d) { - return nil, nil, fmt.Errorf("invalid CDI device name %q", d) - } - devices = append(devices, d) - } - keys = append(keys, key) - } - - return keys, devices, nil -} - -// AnnotationKey returns a unique annotation key for an device allocation -// by a K8s device plugin. pluginName should be in the format of -// "vendor.device-type". deviceID is the ID of the device the plugin is -// allocating. It is used to make sure that the generated key is unique -// even if multiple allocations by a single plugin needs to be annotated. -func AnnotationKey(pluginName, deviceID string) (string, error) { - const maxNameLen = 63 - - if pluginName == "" { - return "", errors.New("invalid plugin name, empty") - } - if deviceID == "" { - return "", errors.New("invalid deviceID, empty") - } - - name := pluginName + "_" + strings.ReplaceAll(deviceID, "/", "_") - - if len(name) > maxNameLen { - return "", fmt.Errorf("invalid plugin+deviceID %q, too long", name) - } - - if c := rune(name[0]); !parser.IsAlphaNumeric(c) { - return "", fmt.Errorf("invalid name %q, first '%c' should be alphanumeric", - name, c) - } - if len(name) > 2 { - for _, c := range name[1 : len(name)-1] { - switch { - case parser.IsAlphaNumeric(c): - case c == '_' || c == '-' || c == '.': - default: - return "", fmt.Errorf("invalid name %q, invalid character '%c'", - name, c) - } - } - } - if c := rune(name[len(name)-1]); !parser.IsAlphaNumeric(c) { - return "", fmt.Errorf("invalid name %q, last '%c' should be alphanumeric", - name, c) - } - - return AnnotationPrefix + name, nil -} - -// AnnotationValue returns an annotation value for the given devices. -func AnnotationValue(devices []string) (string, error) { - value, sep := "", "" - for _, d := range devices { - if _, _, _, err := ParseQualifiedName(d); err != nil { - return "", err - } - value += sep + d - sep = "," - } - - return value, nil -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache.go deleted file mode 100644 index c807b55fd4..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache.go +++ /dev/null @@ -1,581 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "errors" - "fmt" - "io/fs" - "os" - "path/filepath" - "sort" - "strings" - "sync" - - "github.com/fsnotify/fsnotify" - oci "github.com/opencontainers/runtime-spec/specs-go" - "tags.cncf.io/container-device-interface/internal/multierror" - cdi "tags.cncf.io/container-device-interface/specs-go" -) - -// Option is an option to change some aspect of default CDI behavior. -type Option func(*Cache) error - -// Cache stores CDI Specs loaded from Spec directories. -type Cache struct { - sync.Mutex - specDirs []string - specs map[string][]*Spec - devices map[string]*Device - errors map[string][]error - dirErrors map[string]error - - autoRefresh bool - watch *watch -} - -// WithAutoRefresh returns an option to control automatic Cache refresh. -// By default, auto-refresh is enabled, the list of Spec directories are -// monitored and the Cache is automatically refreshed whenever a change -// is detected. This option can be used to disable this behavior when a -// manually refreshed mode is preferable. -func WithAutoRefresh(autoRefresh bool) Option { - return func(c *Cache) error { - c.autoRefresh = autoRefresh - return nil - } -} - -// NewCache creates a new CDI Cache. The cache is populated from a set -// of CDI Spec directories. These can be specified using a WithSpecDirs -// option. The default set of directories is exposed in DefaultSpecDirs. -func NewCache(options ...Option) (*Cache, error) { - c := &Cache{ - autoRefresh: true, - watch: &watch{}, - } - - WithSpecDirs(DefaultSpecDirs...)(c) - c.Lock() - defer c.Unlock() - - return c, c.configure(options...) -} - -// Configure applies options to the Cache. Updates and refreshes the -// Cache if options have changed. -func (c *Cache) Configure(options ...Option) error { - if len(options) == 0 { - return nil - } - - c.Lock() - defer c.Unlock() - - return c.configure(options...) -} - -// Configure the Cache. Start/stop CDI Spec directory watch, refresh -// the Cache if necessary. -func (c *Cache) configure(options ...Option) error { - var err error - - for _, o := range options { - if err = o(c); err != nil { - return fmt.Errorf("failed to apply cache options: %w", err) - } - } - - c.dirErrors = make(map[string]error) - - c.watch.stop() - if c.autoRefresh { - c.watch.setup(c.specDirs, c.dirErrors) - c.watch.start(&c.Mutex, c.refresh, c.dirErrors) - } - c.refresh() - - return nil -} - -// Refresh rescans the CDI Spec directories and refreshes the Cache. -// In manual refresh mode the cache is always refreshed. In auto- -// refresh mode the cache is only refreshed if it is out of date. -func (c *Cache) Refresh() error { - c.Lock() - defer c.Unlock() - - // force a refresh in manual mode - if refreshed, err := c.refreshIfRequired(!c.autoRefresh); refreshed { - return err - } - - // collect and return cached errors, much like refresh() does it - var result error - for _, errors := range c.errors { - result = multierror.Append(result, errors...) - } - return result -} - -// Refresh the Cache by rescanning CDI Spec directories and files. -func (c *Cache) refresh() error { - var ( - specs = map[string][]*Spec{} - devices = map[string]*Device{} - conflicts = map[string]struct{}{} - specErrors = map[string][]error{} - result []error - ) - - // collect errors per spec file path and once globally - collectError := func(err error, paths ...string) { - result = append(result, err) - for _, path := range paths { - specErrors[path] = append(specErrors[path], err) - } - } - // resolve conflicts based on device Spec priority (order of precedence) - resolveConflict := func(name string, dev *Device, old *Device) bool { - devSpec, oldSpec := dev.GetSpec(), old.GetSpec() - devPrio, oldPrio := devSpec.GetPriority(), oldSpec.GetPriority() - switch { - case devPrio > oldPrio: - return false - case devPrio == oldPrio: - devPath, oldPath := devSpec.GetPath(), oldSpec.GetPath() - collectError(fmt.Errorf("conflicting device %q (specs %q, %q)", - name, devPath, oldPath), devPath, oldPath) - conflicts[name] = struct{}{} - } - return true - } - - _ = scanSpecDirs(c.specDirs, func(path string, priority int, spec *Spec, err error) error { - path = filepath.Clean(path) - if err != nil { - collectError(fmt.Errorf("failed to load CDI Spec %w", err), path) - return nil - } - - vendor := spec.GetVendor() - specs[vendor] = append(specs[vendor], spec) - - for _, dev := range spec.devices { - qualified := dev.GetQualifiedName() - other, ok := devices[qualified] - if ok { - if resolveConflict(qualified, dev, other) { - continue - } - } - devices[qualified] = dev - } - - return nil - }) - - for conflict := range conflicts { - delete(devices, conflict) - } - - c.specs = specs - c.devices = devices - c.errors = specErrors - - return multierror.New(result...) -} - -// RefreshIfRequired triggers a refresh if necessary. -func (c *Cache) refreshIfRequired(force bool) (bool, error) { - // We need to refresh if - // - it's forced by an explicit call to Refresh() in manual mode - // - a missing Spec dir appears (added to watch) in auto-refresh mode - if force || (c.autoRefresh && c.watch.update(c.dirErrors)) { - return true, c.refresh() - } - return false, nil -} - -// InjectDevices injects the given qualified devices to an OCI Spec. It -// returns any unresolvable devices and an error if injection fails for -// any of the devices. -func (c *Cache) InjectDevices(ociSpec *oci.Spec, devices ...string) ([]string, error) { - var unresolved []string - - if ociSpec == nil { - return devices, fmt.Errorf("can't inject devices, nil OCI Spec") - } - - c.Lock() - defer c.Unlock() - - c.refreshIfRequired(false) - - edits := &ContainerEdits{} - specs := map[*Spec]struct{}{} - - for _, device := range devices { - d := c.devices[device] - if d == nil { - unresolved = append(unresolved, device) - continue - } - if _, ok := specs[d.GetSpec()]; !ok { - specs[d.GetSpec()] = struct{}{} - edits.Append(d.GetSpec().edits()) - } - edits.Append(d.edits()) - } - - if unresolved != nil { - return unresolved, fmt.Errorf("unresolvable CDI devices %s", - strings.Join(unresolved, ", ")) - } - - if err := edits.Apply(ociSpec); err != nil { - return nil, fmt.Errorf("failed to inject devices: %w", err) - } - - return nil, nil -} - -// highestPrioritySpecDir returns the Spec directory with highest priority -// and its priority. -func (c *Cache) highestPrioritySpecDir() (string, int) { - if len(c.specDirs) == 0 { - return "", -1 - } - - prio := len(c.specDirs) - 1 - dir := c.specDirs[prio] - - return dir, prio -} - -// WriteSpec writes a Spec file with the given content into the highest -// priority Spec directory. If name has a "json" or "yaml" extension it -// choses the encoding. Otherwise the default YAML encoding is used. -func (c *Cache) WriteSpec(raw *cdi.Spec, name string) error { - var ( - specDir string - path string - prio int - spec *Spec - err error - ) - - specDir, prio = c.highestPrioritySpecDir() - if specDir == "" { - return errors.New("no Spec directories to write to") - } - - path = filepath.Join(specDir, name) - if ext := filepath.Ext(path); ext != ".json" && ext != ".yaml" { - path += defaultSpecExt - } - - spec, err = newSpec(raw, path, prio) - if err != nil { - return err - } - - return spec.write(true) -} - -// RemoveSpec removes a Spec with the given name from the highest -// priority Spec directory. This function can be used to remove a -// Spec previously written by WriteSpec(). If the file exists and -// its removal fails RemoveSpec returns an error. -func (c *Cache) RemoveSpec(name string) error { - var ( - specDir string - path string - err error - ) - - specDir, _ = c.highestPrioritySpecDir() - if specDir == "" { - return errors.New("no Spec directories to remove from") - } - - path = filepath.Join(specDir, name) - if ext := filepath.Ext(path); ext != ".json" && ext != ".yaml" { - path += defaultSpecExt - } - - err = os.Remove(path) - if err != nil && errors.Is(err, fs.ErrNotExist) { - err = nil - } - - return err -} - -// GetDevice returns the cached device for the given qualified name. -func (c *Cache) GetDevice(device string) *Device { - c.Lock() - defer c.Unlock() - - c.refreshIfRequired(false) - - return c.devices[device] -} - -// ListDevices lists all cached devices by qualified name. -func (c *Cache) ListDevices() []string { - var devices []string - - c.Lock() - defer c.Unlock() - - c.refreshIfRequired(false) - - for name := range c.devices { - devices = append(devices, name) - } - sort.Strings(devices) - - return devices -} - -// ListVendors lists all vendors known to the cache. -func (c *Cache) ListVendors() []string { - var vendors []string - - c.Lock() - defer c.Unlock() - - c.refreshIfRequired(false) - - for vendor := range c.specs { - vendors = append(vendors, vendor) - } - sort.Strings(vendors) - - return vendors -} - -// ListClasses lists all device classes known to the cache. -func (c *Cache) ListClasses() []string { - var ( - cmap = map[string]struct{}{} - classes []string - ) - - c.Lock() - defer c.Unlock() - - c.refreshIfRequired(false) - - for _, specs := range c.specs { - for _, spec := range specs { - cmap[spec.GetClass()] = struct{}{} - } - } - for class := range cmap { - classes = append(classes, class) - } - sort.Strings(classes) - - return classes -} - -// GetVendorSpecs returns all specs for the given vendor. -func (c *Cache) GetVendorSpecs(vendor string) []*Spec { - c.Lock() - defer c.Unlock() - - c.refreshIfRequired(false) - - return c.specs[vendor] -} - -// GetSpecErrors returns all errors encountered for the spec during the -// last cache refresh. -func (c *Cache) GetSpecErrors(spec *Spec) []error { - var errors []error - - c.Lock() - defer c.Unlock() - - if errs, ok := c.errors[spec.GetPath()]; ok { - errors = make([]error, len(errs)) - copy(errors, errs) - } - - return errors -} - -// GetErrors returns all errors encountered during the last -// cache refresh. -func (c *Cache) GetErrors() map[string][]error { - c.Lock() - defer c.Unlock() - - errors := map[string][]error{} - for path, errs := range c.errors { - errors[path] = errs - } - for path, err := range c.dirErrors { - errors[path] = []error{err} - } - - return errors -} - -// GetSpecDirectories returns the CDI Spec directories currently in use. -func (c *Cache) GetSpecDirectories() []string { - c.Lock() - defer c.Unlock() - - dirs := make([]string, len(c.specDirs)) - copy(dirs, c.specDirs) - return dirs -} - -// GetSpecDirErrors returns any errors related to configured Spec directories. -func (c *Cache) GetSpecDirErrors() map[string]error { - if c.dirErrors == nil { - return nil - } - - c.Lock() - defer c.Unlock() - - errors := make(map[string]error) - for dir, err := range c.dirErrors { - errors[dir] = err - } - return errors -} - -// Our fsnotify helper wrapper. -type watch struct { - watcher *fsnotify.Watcher - tracked map[string]bool -} - -// Setup monitoring for the given Spec directories. -func (w *watch) setup(dirs []string, dirErrors map[string]error) { - var ( - dir string - err error - ) - w.tracked = make(map[string]bool) - for _, dir = range dirs { - w.tracked[dir] = false - } - - w.watcher, err = fsnotify.NewWatcher() - if err != nil { - for _, dir := range dirs { - dirErrors[dir] = fmt.Errorf("failed to create watcher: %w", err) - } - return - } - - w.update(dirErrors) -} - -// Start watching Spec directories for relevant changes. -func (w *watch) start(m *sync.Mutex, refresh func() error, dirErrors map[string]error) { - go w.watch(w.watcher, m, refresh, dirErrors) -} - -// Stop watching directories. -func (w *watch) stop() { - if w.watcher == nil { - return - } - - w.watcher.Close() - w.tracked = nil -} - -// Watch Spec directory changes, triggering a refresh if necessary. -func (w *watch) watch(fsw *fsnotify.Watcher, m *sync.Mutex, refresh func() error, dirErrors map[string]error) { - watch := fsw - if watch == nil { - return - } - for { - select { - case event, ok := <-watch.Events: - if !ok { - return - } - - if (event.Op & (fsnotify.Rename | fsnotify.Remove | fsnotify.Write)) == 0 { - continue - } - if event.Op == fsnotify.Write { - if ext := filepath.Ext(event.Name); ext != ".json" && ext != ".yaml" { - continue - } - } - - m.Lock() - if event.Op == fsnotify.Remove && w.tracked[event.Name] { - w.update(dirErrors, event.Name) - } else { - w.update(dirErrors) - } - refresh() - m.Unlock() - - case _, ok := <-watch.Errors: - if !ok { - return - } - } - } -} - -// Update watch with pending/missing or removed directories. -func (w *watch) update(dirErrors map[string]error, removed ...string) bool { - var ( - dir string - ok bool - err error - update bool - ) - - for dir, ok = range w.tracked { - if ok { - continue - } - - err = w.watcher.Add(dir) - if err == nil { - w.tracked[dir] = true - delete(dirErrors, dir) - update = true - } else { - w.tracked[dir] = false - dirErrors[dir] = fmt.Errorf("failed to monitor for changes: %w", err) - } - } - - for _, dir = range removed { - w.tracked[dir] = false - dirErrors[dir] = errors.New("directory removed") - update = true - } - - return update -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_unix.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_unix.go deleted file mode 100644 index 0ee5fb86f5..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build !windows -// +build !windows - -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import "syscall" - -func osSync() { - syscall.Sync() -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_windows.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_windows.go deleted file mode 100644 index c6dabf5fa8..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache_test_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build windows -// +build windows - -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -func osSync() {} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits.go deleted file mode 100644 index 688ddf78b6..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits.go +++ /dev/null @@ -1,332 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "sort" - "strings" - - oci "github.com/opencontainers/runtime-spec/specs-go" - ocigen "github.com/opencontainers/runtime-tools/generate" - "tags.cncf.io/container-device-interface/specs-go" -) - -const ( - // PrestartHook is the name of the OCI "prestart" hook. - PrestartHook = "prestart" - // CreateRuntimeHook is the name of the OCI "createRuntime" hook. - CreateRuntimeHook = "createRuntime" - // CreateContainerHook is the name of the OCI "createContainer" hook. - CreateContainerHook = "createContainer" - // StartContainerHook is the name of the OCI "startContainer" hook. - StartContainerHook = "startContainer" - // PoststartHook is the name of the OCI "poststart" hook. - PoststartHook = "poststart" - // PoststopHook is the name of the OCI "poststop" hook. - PoststopHook = "poststop" -) - -var ( - // Names of recognized hooks. - validHookNames = map[string]struct{}{ - PrestartHook: {}, - CreateRuntimeHook: {}, - CreateContainerHook: {}, - StartContainerHook: {}, - PoststartHook: {}, - PoststopHook: {}, - } -) - -// ContainerEdits represent updates to be applied to an OCI Spec. -// These updates can be specific to a CDI device, or they can be -// specific to a CDI Spec. In the former case these edits should -// be applied to all OCI Specs where the corresponding CDI device -// is injected. In the latter case, these edits should be applied -// to all OCI Specs where at least one devices from the CDI Spec -// is injected. -type ContainerEdits struct { - *specs.ContainerEdits -} - -// Apply edits to the given OCI Spec. Updates the OCI Spec in place. -// Returns an error if the update fails. -func (e *ContainerEdits) Apply(spec *oci.Spec) error { - if spec == nil { - return errors.New("can't edit nil OCI Spec") - } - if e == nil || e.ContainerEdits == nil { - return nil - } - - specgen := ocigen.NewFromSpec(spec) - if len(e.Env) > 0 { - specgen.AddMultipleProcessEnv(e.Env) - } - - for _, d := range e.DeviceNodes { - dn := DeviceNode{d} - - err := dn.fillMissingInfo() - if err != nil { - return err - } - dev := d.ToOCI() - if dev.UID == nil && spec.Process != nil { - if uid := spec.Process.User.UID; uid > 0 { - dev.UID = &uid - } - } - if dev.GID == nil && spec.Process != nil { - if gid := spec.Process.User.GID; gid > 0 { - dev.GID = &gid - } - } - - specgen.RemoveDevice(dev.Path) - specgen.AddDevice(dev) - - if dev.Type == "b" || dev.Type == "c" { - access := d.Permissions - if access == "" { - access = "rwm" - } - specgen.AddLinuxResourcesDevice(true, dev.Type, &dev.Major, &dev.Minor, access) - } - } - - if len(e.Mounts) > 0 { - for _, m := range e.Mounts { - specgen.RemoveMount(m.ContainerPath) - specgen.AddMount(m.ToOCI()) - } - sortMounts(&specgen) - } - - for _, h := range e.Hooks { - switch h.HookName { - case PrestartHook: - specgen.AddPreStartHook(h.ToOCI()) - case PoststartHook: - specgen.AddPostStartHook(h.ToOCI()) - case PoststopHook: - specgen.AddPostStopHook(h.ToOCI()) - // TODO: Maybe runtime-tools/generate should be updated with these... - case CreateRuntimeHook: - ensureOCIHooks(spec) - spec.Hooks.CreateRuntime = append(spec.Hooks.CreateRuntime, h.ToOCI()) - case CreateContainerHook: - ensureOCIHooks(spec) - spec.Hooks.CreateContainer = append(spec.Hooks.CreateContainer, h.ToOCI()) - case StartContainerHook: - ensureOCIHooks(spec) - spec.Hooks.StartContainer = append(spec.Hooks.StartContainer, h.ToOCI()) - default: - return fmt.Errorf("unknown hook name %q", h.HookName) - } - } - - return nil -} - -// Validate container edits. -func (e *ContainerEdits) Validate() error { - if e == nil || e.ContainerEdits == nil { - return nil - } - - if err := ValidateEnv(e.Env); err != nil { - return fmt.Errorf("invalid container edits: %w", err) - } - for _, d := range e.DeviceNodes { - if err := (&DeviceNode{d}).Validate(); err != nil { - return err - } - } - for _, h := range e.Hooks { - if err := (&Hook{h}).Validate(); err != nil { - return err - } - } - for _, m := range e.Mounts { - if err := (&Mount{m}).Validate(); err != nil { - return err - } - } - - return nil -} - -// Append other edits into this one. If called with a nil receiver, -// allocates and returns newly allocated edits. -func (e *ContainerEdits) Append(o *ContainerEdits) *ContainerEdits { - if o == nil || o.ContainerEdits == nil { - return e - } - if e == nil { - e = &ContainerEdits{} - } - if e.ContainerEdits == nil { - e.ContainerEdits = &specs.ContainerEdits{} - } - - e.Env = append(e.Env, o.Env...) - e.DeviceNodes = append(e.DeviceNodes, o.DeviceNodes...) - e.Hooks = append(e.Hooks, o.Hooks...) - e.Mounts = append(e.Mounts, o.Mounts...) - - return e -} - -// isEmpty returns true if these edits are empty. This is valid in a -// global Spec context but invalid in a Device context. -func (e *ContainerEdits) isEmpty() bool { - if e == nil { - return false - } - return len(e.Env)+len(e.DeviceNodes)+len(e.Hooks)+len(e.Mounts) == 0 -} - -// ValidateEnv validates the given environment variables. -func ValidateEnv(env []string) error { - for _, v := range env { - if strings.IndexByte(v, byte('=')) <= 0 { - return fmt.Errorf("invalid environment variable %q", v) - } - } - return nil -} - -// DeviceNode is a CDI Spec DeviceNode wrapper, used for validating DeviceNodes. -type DeviceNode struct { - *specs.DeviceNode -} - -// Validate a CDI Spec DeviceNode. -func (d *DeviceNode) Validate() error { - validTypes := map[string]struct{}{ - "": {}, - "b": {}, - "c": {}, - "u": {}, - "p": {}, - } - - if d.Path == "" { - return errors.New("invalid (empty) device path") - } - if _, ok := validTypes[d.Type]; !ok { - return fmt.Errorf("device %q: invalid type %q", d.Path, d.Type) - } - for _, bit := range d.Permissions { - if bit != 'r' && bit != 'w' && bit != 'm' { - return fmt.Errorf("device %q: invalid permissions %q", - d.Path, d.Permissions) - } - } - return nil -} - -// Hook is a CDI Spec Hook wrapper, used for validating hooks. -type Hook struct { - *specs.Hook -} - -// Validate a hook. -func (h *Hook) Validate() error { - if _, ok := validHookNames[h.HookName]; !ok { - return fmt.Errorf("invalid hook name %q", h.HookName) - } - if h.Path == "" { - return fmt.Errorf("invalid hook %q with empty path", h.HookName) - } - if err := ValidateEnv(h.Env); err != nil { - return fmt.Errorf("invalid hook %q: %w", h.HookName, err) - } - return nil -} - -// Mount is a CDI Mount wrapper, used for validating mounts. -type Mount struct { - *specs.Mount -} - -// Validate a mount. -func (m *Mount) Validate() error { - if m.HostPath == "" { - return errors.New("invalid mount, empty host path") - } - if m.ContainerPath == "" { - return errors.New("invalid mount, empty container path") - } - return nil -} - -// Ensure OCI Spec hooks are not nil so we can add hooks. -func ensureOCIHooks(spec *oci.Spec) { - if spec.Hooks == nil { - spec.Hooks = &oci.Hooks{} - } -} - -// sortMounts sorts the mounts in the given OCI Spec. -func sortMounts(specgen *ocigen.Generator) { - mounts := specgen.Mounts() - specgen.ClearMounts() - sort.Sort(orderedMounts(mounts)) - specgen.Config.Mounts = mounts -} - -// orderedMounts defines how to sort an OCI Spec Mount slice. -// This is the almost the same implementation sa used by CRI-O and Docker, -// with a minor tweak for stable sorting order (easier to test): -// -// https://github.com/moby/moby/blob/17.05.x/daemon/volumes.go#L26 -type orderedMounts []oci.Mount - -// Len returns the number of mounts. Used in sorting. -func (m orderedMounts) Len() int { - return len(m) -} - -// Less returns true if the number of parts (a/b/c would be 3 parts) in the -// mount indexed by parameter 1 is less than that of the mount indexed by -// parameter 2. Used in sorting. -func (m orderedMounts) Less(i, j int) bool { - ip, jp := m.parts(i), m.parts(j) - if ip < jp { - return true - } - if jp < ip { - return false - } - return m[i].Destination < m[j].Destination -} - -// Swap swaps two items in an array of mounts. Used in sorting -func (m orderedMounts) Swap(i, j int) { - m[i], m[j] = m[j], m[i] -} - -// parts returns the number of parts in the destination of a mount. Used in sorting. -func (m orderedMounts) parts(i int) int { - return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits_unix.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits_unix.go deleted file mode 100644 index 59977b2171..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits_unix.go +++ /dev/null @@ -1,88 +0,0 @@ -//go:build !windows -// +build !windows - -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "errors" - "fmt" - - "golang.org/x/sys/unix" -) - -const ( - blockDevice = "b" - charDevice = "c" // or "u" - fifoDevice = "p" -) - -// deviceInfoFromPath takes the path to a device and returns its type, -// major and minor device numbers. -// -// It was adapted from https://github.com/opencontainers/runc/blob/v1.1.9/libcontainer/devices/device_unix.go#L30-L69 -func deviceInfoFromPath(path string) (devType string, major, minor int64, _ error) { - var stat unix.Stat_t - err := unix.Lstat(path, &stat) - if err != nil { - return "", 0, 0, err - } - switch stat.Mode & unix.S_IFMT { - case unix.S_IFBLK: - devType = blockDevice - case unix.S_IFCHR: - devType = charDevice - case unix.S_IFIFO: - devType = fifoDevice - default: - return "", 0, 0, errors.New("not a device node") - } - devNumber := uint64(stat.Rdev) //nolint:unconvert // Rdev is uint32 on e.g. MIPS. - return devType, int64(unix.Major(devNumber)), int64(unix.Minor(devNumber)), nil -} - -// fillMissingInfo fills in missing mandatory attributes from the host device. -func (d *DeviceNode) fillMissingInfo() error { - if d.HostPath == "" { - d.HostPath = d.Path - } - - if d.Type != "" && (d.Major != 0 || d.Type == "p") { - return nil - } - - deviceType, major, minor, err := deviceInfoFromPath(d.HostPath) - if err != nil { - return fmt.Errorf("failed to stat CDI host device %q: %w", d.HostPath, err) - } - - if d.Type == "" { - d.Type = deviceType - } else { - if d.Type != deviceType { - return fmt.Errorf("CDI device (%q, %q), host type mismatch (%s, %s)", - d.Path, d.HostPath, d.Type, deviceType) - } - } - if d.Major == 0 && d.Type != "p" { - d.Major = major - d.Minor = minor - } - - return nil -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits_windows.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits_windows.go deleted file mode 100644 index fd91afa926..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build windows -// +build windows - -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import "fmt" - -// fillMissingInfo fills in missing mandatory attributes from the host device. -func (d *DeviceNode) fillMissingInfo() error { - return fmt.Errorf("unimplemented") -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/device.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/device.go deleted file mode 100644 index 00be48dd5e..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/device.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "fmt" - - oci "github.com/opencontainers/runtime-spec/specs-go" - "tags.cncf.io/container-device-interface/internal/validation" - "tags.cncf.io/container-device-interface/pkg/parser" - cdi "tags.cncf.io/container-device-interface/specs-go" -) - -// Device represents a CDI device of a Spec. -type Device struct { - *cdi.Device - spec *Spec -} - -// Create a new Device, associate it with the given Spec. -func newDevice(spec *Spec, d cdi.Device) (*Device, error) { - dev := &Device{ - Device: &d, - spec: spec, - } - - if err := dev.validate(); err != nil { - return nil, err - } - - return dev, nil -} - -// GetSpec returns the Spec this device is defined in. -func (d *Device) GetSpec() *Spec { - return d.spec -} - -// GetQualifiedName returns the qualified name for this device. -func (d *Device) GetQualifiedName() string { - return parser.QualifiedName(d.spec.GetVendor(), d.spec.GetClass(), d.Name) -} - -// ApplyEdits applies the device-speific container edits to an OCI Spec. -func (d *Device) ApplyEdits(ociSpec *oci.Spec) error { - return d.edits().Apply(ociSpec) -} - -// edits returns the applicable container edits for this spec. -func (d *Device) edits() *ContainerEdits { - return &ContainerEdits{&d.ContainerEdits} -} - -// Validate the device. -func (d *Device) validate() error { - if err := ValidateDeviceName(d.Name); err != nil { - return err - } - name := d.Name - if d.spec != nil { - name = d.GetQualifiedName() - } - if err := validation.ValidateSpecAnnotations(name, d.Annotations); err != nil { - return err - } - edits := d.edits() - if edits.isEmpty() { - return fmt.Errorf("invalid device, empty device edits") - } - if err := edits.Validate(); err != nil { - return fmt.Errorf("invalid device %q: %w", d.Name, err) - } - return nil -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/doc.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/doc.go deleted file mode 100644 index 1897ef1fca..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/doc.go +++ /dev/null @@ -1,276 +0,0 @@ -// Package cdi has the primary purpose of providing an API for -// interacting with CDI and consuming CDI devices. -// -// For more information about Container Device Interface, please refer to -// https://tags.cncf.io/container-device-interface -// -// # Container Device Interface -// -// Container Device Interface, or CDI for short, provides comprehensive -// third party device support for container runtimes. CDI uses vendor -// provided specification files, CDI Specs for short, to describe how a -// container's runtime environment should be modified when one or more -// of the vendor-specific devices is injected into the container. Beyond -// describing the low level platform-specific details of how to gain -// basic access to a device, CDI Specs allow more fine-grained device -// initialization, and the automatic injection of any necessary vendor- -// or device-specific software that might be required for a container -// to use a device or take full advantage of it. -// -// In the CDI device model containers request access to a device using -// fully qualified device names, qualified names for short, consisting of -// a vendor identifier, a device class and a device name or identifier. -// These pieces of information together uniquely identify a device among -// all device vendors, classes and device instances. -// -// This package implements an API for easy consumption of CDI. The API -// implements discovery, loading and caching of CDI Specs and injection -// of CDI devices into containers. This is the most common functionality -// the vast majority of CDI consumers need. The API should be usable both -// by OCI runtime clients and runtime implementations. -// -// # CDI Registry -// -// The primary interface to interact with CDI devices is the Registry. It -// is essentially a cache of all Specs and devices discovered in standard -// CDI directories on the host. The registry has two main functionality, -// injecting devices into an OCI Spec and refreshing the cache of CDI -// Specs and devices. -// -// # Device Injection -// -// Using the Registry one can inject CDI devices into a container with code -// similar to the following snippet: -// -// import ( -// "fmt" -// "strings" -// -// log "github.com/sirupsen/logrus" -// -// "tags.cncf.io/container-device-interface/pkg/cdi" -// oci "github.com/opencontainers/runtime-spec/specs-go" -// ) -// -// func injectCDIDevices(spec *oci.Spec, devices []string) error { -// log.Debug("pristine OCI Spec: %s", dumpSpec(spec)) -// -// unresolved, err := cdi.GetRegistry().InjectDevices(spec, devices) -// if err != nil { -// return fmt.Errorf("CDI device injection failed: %w", err) -// } -// -// log.Debug("CDI-updated OCI Spec: %s", dumpSpec(spec)) -// return nil -// } -// -// # Cache Refresh -// -// By default the CDI Spec cache monitors the configured Spec directories -// and automatically refreshes itself when necessary. This behavior can be -// disabled using the WithAutoRefresh(false) option. -// -// Failure to set up monitoring for a Spec directory causes the directory to -// get ignored and an error to be recorded among the Spec directory errors. -// These errors can be queried using the GetSpecDirErrors() function. If the -// error condition is transient, for instance a missing directory which later -// gets created, the corresponding error will be removed once the condition -// is over. -// -// With auto-refresh enabled injecting any CDI devices can be done without -// an explicit call to Refresh(), using a code snippet similar to the -// following: -// -// In a runtime implementation one typically wants to make sure the -// CDI Spec cache is up to date before performing device injection. -// A code snippet similar to the following accmplishes that: -// -// import ( -// "fmt" -// "strings" -// -// log "github.com/sirupsen/logrus" -// -// "tags.cncf.io/container-device-interface/pkg/cdi" -// oci "github.com/opencontainers/runtime-spec/specs-go" -// ) -// -// func injectCDIDevices(spec *oci.Spec, devices []string) error { -// registry := cdi.GetRegistry() -// -// if err := registry.Refresh(); err != nil { -// // Note: -// // It is up to the implementation to decide whether -// // to abort injection on errors. A failed Refresh() -// // does not necessarily render the registry unusable. -// // For instance, a parse error in a Spec file for -// // vendor A does not have any effect on devices of -// // vendor B... -// log.Warnf("pre-injection Refresh() failed: %v", err) -// } -// -// log.Debug("pristine OCI Spec: %s", dumpSpec(spec)) -// -// unresolved, err := registry.InjectDevices(spec, devices) -// if err != nil { -// return fmt.Errorf("CDI device injection failed: %w", err) -// } -// -// log.Debug("CDI-updated OCI Spec: %s", dumpSpec(spec)) -// return nil -// } -// -// # Generated Spec Files, Multiple Directories, Device Precedence -// -// It is often necessary to generate Spec files dynamically. On some -// systems the available or usable set of CDI devices might change -// dynamically which then needs to be reflected in CDI Specs. For -// some device classes it makes sense to enumerate the available -// devices at every boot and generate Spec file entries for each -// device found. Some CDI devices might need special client- or -// request-specific configuration which can only be fulfilled by -// dynamically generated client-specific entries in transient Spec -// files. -// -// CDI can collect Spec files from multiple directories. Spec files are -// automatically assigned priorities according to which directory they -// were loaded from. The later a directory occurs in the list of CDI -// directories to scan, the higher priority Spec files loaded from that -// directory are assigned to. When two or more Spec files define the -// same device, conflict is resolved by choosing the definition from the -// Spec file with the highest priority. -// -// The default CDI directory configuration is chosen to encourage -// separating dynamically generated CDI Spec files from static ones. -// The default directories are '/etc/cdi' and '/var/run/cdi'. By putting -// dynamically generated Spec files under '/var/run/cdi', those take -// precedence over static ones in '/etc/cdi'. With this scheme, static -// Spec files, typically installed by distro-specific packages, go into -// '/etc/cdi' while all the dynamically generated Spec files, transient -// or other, go into '/var/run/cdi'. -// -// # Spec File Generation -// -// CDI offers two functions for writing and removing dynamically generated -// Specs from CDI Spec directories. These functions, WriteSpec() and -// RemoveSpec() implicitly follow the principle of separating dynamic Specs -// from the rest and therefore always write to and remove Specs from the -// last configured directory. -// -// Corresponding functions are also provided for generating names for Spec -// files. These functions follow a simple naming convention to ensure that -// multiple entities generating Spec files simultaneously on the same host -// do not end up using conflicting Spec file names. GenerateSpecName(), -// GenerateNameForSpec(), GenerateTransientSpecName(), and -// GenerateTransientNameForSpec() all generate names which can be passed -// as such to WriteSpec() and subsequently to RemoveSpec(). -// -// Generating a Spec file for a vendor/device class can be done with a -// code snippet similar to the following: -// -// import ( -// -// "fmt" -// ... -// "tags.cncf.io/container-device-interface/specs-go" -// "tags.cncf.io/container-device-interface/pkg/cdi" -// -// ) -// -// func generateDeviceSpecs() error { -// registry := cdi.GetRegistry() -// spec := &specs.Spec{ -// Version: specs.CurrentVersion, -// Kind: vendor+"/"+class, -// } -// -// for _, dev := range enumerateDevices() { -// spec.Devices = append(spec.Devices, specs.Device{ -// Name: dev.Name, -// ContainerEdits: getContainerEditsForDevice(dev), -// }) -// } -// -// specName, err := cdi.GenerateNameForSpec(spec) -// if err != nil { -// return fmt.Errorf("failed to generate Spec name: %w", err) -// } -// -// return registry.SpecDB().WriteSpec(spec, specName) -// } -// -// Similarly, generating and later cleaning up transient Spec files can be -// done with code fragments similar to the following. These transient Spec -// files are temporary Spec files with container-specific parametrization. -// They are typically created before the associated container is created -// and removed once that container is removed. -// -// import ( -// -// "fmt" -// ... -// "tags.cncf.io/container-device-interface/specs-go" -// "tags.cncf.io/container-device-interface/pkg/cdi" -// -// ) -// -// func generateTransientSpec(ctr Container) error { -// registry := cdi.GetRegistry() -// devices := getContainerDevs(ctr, vendor, class) -// spec := &specs.Spec{ -// Version: specs.CurrentVersion, -// Kind: vendor+"/"+class, -// } -// -// for _, dev := range devices { -// spec.Devices = append(spec.Devices, specs.Device{ -// // the generated name needs to be unique within the -// // vendor/class domain on the host/node. -// Name: generateUniqueDevName(dev, ctr), -// ContainerEdits: getEditsForContainer(dev), -// }) -// } -// -// // transientID is expected to guarantee that the Spec file name -// // generated using is unique within -// // the host/node. If more than one device is allocated with the -// // same vendor/class domain, either all generated Spec entries -// // should go to a single Spec file (like in this sample snippet), -// // or transientID should be unique for each generated Spec file. -// transientID := getSomeSufficientlyUniqueIDForContainer(ctr) -// specName, err := cdi.GenerateNameForTransientSpec(vendor, class, transientID) -// if err != nil { -// return fmt.Errorf("failed to generate Spec name: %w", err) -// } -// -// return registry.SpecDB().WriteSpec(spec, specName) -// } -// -// func removeTransientSpec(ctr Container) error { -// registry := cdi.GetRegistry() -// transientID := getSomeSufficientlyUniqueIDForContainer(ctr) -// specName := cdi.GenerateNameForTransientSpec(vendor, class, transientID) -// -// return registry.SpecDB().RemoveSpec(specName) -// } -// -// # CDI Spec Validation -// -// This package performs both syntactic and semantic validation of CDI -// Spec file data when a Spec file is loaded via the registry or using -// the ReadSpec API function. As part of the semantic verification, the -// Spec file is verified against the CDI Spec JSON validation schema. -// -// If a valid externally provided JSON validation schema is found in -// the filesystem at /etc/cdi/schema/schema.json it is loaded and used -// as the default validation schema. If such a file is not found or -// fails to load, an embedded no-op schema is used. -// -// The used validation schema can also be changed programmatically using -// the SetSchema API convenience function. This function also accepts -// the special "builtin" (BuiltinSchemaName) and "none" (NoneSchemaName) -// schema names which switch the used schema to the in-repo validation -// schema embedded into the binary or the now default no-op schema -// correspondingly. Other names are interpreted as the path to the actual -// validation schema to load and use. -package cdi diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/qualified-device.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/qualified-device.go deleted file mode 100644 index 0bdfdc1661..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/qualified-device.go +++ /dev/null @@ -1,113 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "tags.cncf.io/container-device-interface/pkg/parser" -) - -// QualifiedName returns the qualified name for a device. -// The syntax for a qualified device names is -// -// "/=". -// -// A valid vendor and class name may contain the following runes: -// -// 'A'-'Z', 'a'-'z', '0'-'9', '.', '-', '_'. -// -// A valid device name may contain the following runes: -// -// 'A'-'Z', 'a'-'z', '0'-'9', '-', '_', '.', ':' -// -// Deprecated: use parser.QualifiedName instead -func QualifiedName(vendor, class, name string) string { - return parser.QualifiedName(vendor, class, name) -} - -// IsQualifiedName tests if a device name is qualified. -// -// Deprecated: use parser.IsQualifiedName instead -func IsQualifiedName(device string) bool { - return parser.IsQualifiedName(device) -} - -// ParseQualifiedName splits a qualified name into device vendor, class, -// and name. If the device fails to parse as a qualified name, or if any -// of the split components fail to pass syntax validation, vendor and -// class are returned as empty, together with the verbatim input as the -// name and an error describing the reason for failure. -// -// Deprecated: use parser.ParseQualifiedName instead -func ParseQualifiedName(device string) (string, string, string, error) { - return parser.ParseQualifiedName(device) -} - -// ParseDevice tries to split a device name into vendor, class, and name. -// If this fails, for instance in the case of unqualified device names, -// ParseDevice returns an empty vendor and class together with name set -// to the verbatim input. -// -// Deprecated: use parser.ParseDevice instead -func ParseDevice(device string) (string, string, string) { - return parser.ParseDevice(device) -} - -// ParseQualifier splits a device qualifier into vendor and class. -// The syntax for a device qualifier is -// -// "/" -// -// If parsing fails, an empty vendor and the class set to the -// verbatim input is returned. -// -// Deprecated: use parser.ParseQualifier instead -func ParseQualifier(kind string) (string, string) { - return parser.ParseQualifier(kind) -} - -// ValidateVendorName checks the validity of a vendor name. -// A vendor name may contain the following ASCII characters: -// - upper- and lowercase letters ('A'-'Z', 'a'-'z') -// - digits ('0'-'9') -// - underscore, dash, and dot ('_', '-', and '.') -// -// Deprecated: use parser.ValidateVendorName instead -func ValidateVendorName(vendor string) error { - return parser.ValidateVendorName(vendor) -} - -// ValidateClassName checks the validity of class name. -// A class name may contain the following ASCII characters: -// - upper- and lowercase letters ('A'-'Z', 'a'-'z') -// - digits ('0'-'9') -// - underscore, dash, and dot ('_', '-', and '.') -// -// Deprecated: use parser.ValidateClassName instead -func ValidateClassName(class string) error { - return parser.ValidateClassName(class) -} - -// ValidateDeviceName checks the validity of a device name. -// A device name may contain the following ASCII characters: -// - upper- and lowercase letters ('A'-'Z', 'a'-'z') -// - digits ('0'-'9') -// - underscore, dash, dot, colon ('_', '-', '.', ':') -// -// Deprecated: use parser.ValidateDeviceName instead -func ValidateDeviceName(name string) error { - return parser.ValidateDeviceName(name) -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/registry.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/registry.go deleted file mode 100644 index 7f12c777e8..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/registry.go +++ /dev/null @@ -1,150 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "sync" - - oci "github.com/opencontainers/runtime-spec/specs-go" - cdi "tags.cncf.io/container-device-interface/specs-go" -) - -// Registry keeps a cache of all CDI Specs installed or generated on -// the host. Registry is the primary interface clients should use to -// interact with CDI. -// -// The most commonly used Registry functions are for refreshing the -// registry and injecting CDI devices into an OCI Spec. -type Registry interface { - RegistryResolver - RegistryRefresher - DeviceDB() RegistryDeviceDB - SpecDB() RegistrySpecDB -} - -// RegistryRefresher is the registry interface for refreshing the -// cache of CDI Specs and devices. -// -// Configure reconfigures the registry with the given options. -// -// Refresh rescans all CDI Spec directories and updates the -// state of the cache to reflect any changes. It returns any -// errors encountered during the refresh. -// -// GetErrors returns all errors encountered for any of the scanned -// Spec files during the last cache refresh. -// -// GetSpecDirectories returns the set up CDI Spec directories -// currently in use. The directories are returned in the scan -// order of Refresh(). -// -// GetSpecDirErrors returns any errors related to the configured -// Spec directories. -type RegistryRefresher interface { - Configure(...Option) error - Refresh() error - GetErrors() map[string][]error - GetSpecDirectories() []string - GetSpecDirErrors() map[string]error -} - -// RegistryResolver is the registry interface for injecting CDI -// devices into an OCI Spec. -// -// InjectDevices takes an OCI Spec and injects into it a set of -// CDI devices given by qualified name. It returns the names of -// any unresolved devices and an error if injection fails. -type RegistryResolver interface { - InjectDevices(spec *oci.Spec, device ...string) (unresolved []string, err error) -} - -// RegistryDeviceDB is the registry interface for querying devices. -// -// GetDevice returns the CDI device for the given qualified name. If -// the device is not GetDevice returns nil. -// -// ListDevices returns a slice with the names of qualified device -// known. The returned slice is sorted. -type RegistryDeviceDB interface { - GetDevice(device string) *Device - ListDevices() []string -} - -// RegistrySpecDB is the registry interface for querying CDI Specs. -// -// ListVendors returns a slice with all vendors known. The returned -// slice is sorted. -// -// ListClasses returns a slice with all classes known. The returned -// slice is sorted. -// -// GetVendorSpecs returns a slice of all Specs for the vendor. -// -// GetSpecErrors returns any errors for the Spec encountered during -// the last cache refresh. -// -// WriteSpec writes the Spec with the given content and name to the -// last Spec directory. -type RegistrySpecDB interface { - ListVendors() []string - ListClasses() []string - GetVendorSpecs(vendor string) []*Spec - GetSpecErrors(*Spec) []error - WriteSpec(raw *cdi.Spec, name string) error - RemoveSpec(name string) error -} - -type registry struct { - *Cache -} - -var _ Registry = ®istry{} - -var ( - reg *registry - initOnce sync.Once -) - -// GetRegistry returns the CDI registry. If any options are given, those -// are applied to the registry. -func GetRegistry(options ...Option) Registry { - var new bool - initOnce.Do(func() { - reg, _ = getRegistry(options...) - new = true - }) - if !new && len(options) > 0 { - reg.Configure(options...) - reg.Refresh() - } - return reg -} - -// DeviceDB returns the registry interface for querying devices. -func (r *registry) DeviceDB() RegistryDeviceDB { - return r -} - -// SpecDB returns the registry interface for querying Specs. -func (r *registry) SpecDB() RegistrySpecDB { - return r -} - -func getRegistry(options ...Option) (*registry, error) { - c, err := NewCache(options...) - return ®istry{c}, err -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec-dirs.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec-dirs.go deleted file mode 100644 index f339349bba..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec-dirs.go +++ /dev/null @@ -1,114 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "errors" - "io/fs" - "os" - "path/filepath" -) - -const ( - // DefaultStaticDir is the default directory for static CDI Specs. - DefaultStaticDir = "/etc/cdi" - // DefaultDynamicDir is the default directory for generated CDI Specs - DefaultDynamicDir = "/var/run/cdi" -) - -var ( - // DefaultSpecDirs is the default Spec directory configuration. - // While altering this variable changes the package defaults, - // the preferred way of overriding the default directories is - // to use a WithSpecDirs options. Otherwise the change is only - // effective if it takes place before creating the Registry or - // other Cache instances. - DefaultSpecDirs = []string{DefaultStaticDir, DefaultDynamicDir} - // ErrStopScan can be returned from a ScanSpecFunc to stop the scan. - ErrStopScan = errors.New("stop Spec scan") -) - -// WithSpecDirs returns an option to override the CDI Spec directories. -func WithSpecDirs(dirs ...string) Option { - return func(c *Cache) error { - specDirs := make([]string, len(dirs)) - for i, dir := range dirs { - specDirs[i] = filepath.Clean(dir) - } - c.specDirs = specDirs - return nil - } -} - -// scanSpecFunc is a function for processing CDI Spec files. -type scanSpecFunc func(string, int, *Spec, error) error - -// ScanSpecDirs scans the given directories looking for CDI Spec files, -// which are all files with a '.json' or '.yaml' suffix. For every Spec -// file discovered, ScanSpecDirs loads a Spec from the file then calls -// the scan function passing it the path to the file, the priority (the -// index of the directory in the slice of directories given), the Spec -// itself, and any error encountered while loading the Spec. -// -// Scanning stops once all files have been processed or when the scan -// function returns an error. The result of ScanSpecDirs is the error -// returned by the scan function, if any. The special error ErrStopScan -// can be used to terminate the scan gracefully without ScanSpecDirs -// returning an error. ScanSpecDirs silently skips any subdirectories. -func scanSpecDirs(dirs []string, scanFn scanSpecFunc) error { - var ( - spec *Spec - err error - ) - - for priority, dir := range dirs { - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - // for initial stat failure Walk calls us with nil info - if info == nil { - if errors.Is(err, fs.ErrNotExist) { - return nil - } - return err - } - // first call from Walk is for dir itself, others we skip - if info.IsDir() { - if path == dir { - return nil - } - return filepath.SkipDir - } - - // ignore obviously non-Spec files - if ext := filepath.Ext(path); ext != ".json" && ext != ".yaml" { - return nil - } - - if err != nil { - return scanFn(path, priority, nil, err) - } - - spec, err = ReadSpec(path, priority) - return scanFn(path, priority, spec, err) - }) - - if err != nil && err != ErrStopScan { - return err - } - } - - return nil -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec.go deleted file mode 100644 index 8bd63cc529..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec.go +++ /dev/null @@ -1,352 +0,0 @@ -/* - Copyright © 2021 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" - - oci "github.com/opencontainers/runtime-spec/specs-go" - "sigs.k8s.io/yaml" - - "tags.cncf.io/container-device-interface/internal/validation" - cdi "tags.cncf.io/container-device-interface/specs-go" -) - -const ( - // defaultSpecExt is the file extension for the default encoding. - defaultSpecExt = ".yaml" -) - -var ( - // Externally set CDI Spec validation function. - specValidator func(*cdi.Spec) error - validatorLock sync.RWMutex -) - -// Spec represents a single CDI Spec. It is usually loaded from a -// file and stored in a cache. The Spec has an associated priority. -// This priority is inherited from the associated priority of the -// CDI Spec directory that contains the CDI Spec file and is used -// to resolve conflicts if multiple CDI Spec files contain entries -// for the same fully qualified device. -type Spec struct { - *cdi.Spec - vendor string - class string - path string - priority int - devices map[string]*Device -} - -// ReadSpec reads the given CDI Spec file. The resulting Spec is -// assigned the given priority. If reading or parsing the Spec -// data fails ReadSpec returns a nil Spec and an error. -func ReadSpec(path string, priority int) (*Spec, error) { - data, err := ioutil.ReadFile(path) - switch { - case os.IsNotExist(err): - return nil, err - case err != nil: - return nil, fmt.Errorf("failed to read CDI Spec %q: %w", path, err) - } - - raw, err := ParseSpec(data) - if err != nil { - return nil, fmt.Errorf("failed to parse CDI Spec %q: %w", path, err) - } - if raw == nil { - return nil, fmt.Errorf("failed to parse CDI Spec %q, no Spec data", path) - } - - spec, err := newSpec(raw, path, priority) - if err != nil { - return nil, err - } - - return spec, nil -} - -// newSpec creates a new Spec from the given CDI Spec data. The -// Spec is marked as loaded from the given path with the given -// priority. If Spec data validation fails newSpec returns a nil -// Spec and an error. -func newSpec(raw *cdi.Spec, path string, priority int) (*Spec, error) { - err := validateSpec(raw) - if err != nil { - return nil, err - } - - spec := &Spec{ - Spec: raw, - path: filepath.Clean(path), - priority: priority, - } - - if ext := filepath.Ext(spec.path); ext != ".yaml" && ext != ".json" { - spec.path += defaultSpecExt - } - - spec.vendor, spec.class = ParseQualifier(spec.Kind) - - if spec.devices, err = spec.validate(); err != nil { - return nil, fmt.Errorf("invalid CDI Spec: %w", err) - } - - return spec, nil -} - -// Write the CDI Spec to the file associated with it during instantiation -// by newSpec() or ReadSpec(). -func (s *Spec) write(overwrite bool) error { - var ( - data []byte - dir string - tmp *os.File - err error - ) - - err = validateSpec(s.Spec) - if err != nil { - return err - } - - if filepath.Ext(s.path) == ".yaml" { - data, err = yaml.Marshal(s.Spec) - data = append([]byte("---\n"), data...) - } else { - data, err = json.Marshal(s.Spec) - } - if err != nil { - return fmt.Errorf("failed to marshal Spec file: %w", err) - } - - dir = filepath.Dir(s.path) - err = os.MkdirAll(dir, 0o755) - if err != nil { - return fmt.Errorf("failed to create Spec dir: %w", err) - } - - tmp, err = os.CreateTemp(dir, "spec.*.tmp") - if err != nil { - return fmt.Errorf("failed to create Spec file: %w", err) - } - _, err = tmp.Write(data) - tmp.Close() - if err != nil { - return fmt.Errorf("failed to write Spec file: %w", err) - } - - err = renameIn(dir, filepath.Base(tmp.Name()), filepath.Base(s.path), overwrite) - - if err != nil { - os.Remove(tmp.Name()) - err = fmt.Errorf("failed to write Spec file: %w", err) - } - - return err -} - -// GetVendor returns the vendor of this Spec. -func (s *Spec) GetVendor() string { - return s.vendor -} - -// GetClass returns the device class of this Spec. -func (s *Spec) GetClass() string { - return s.class -} - -// GetDevice returns the device for the given unqualified name. -func (s *Spec) GetDevice(name string) *Device { - return s.devices[name] -} - -// GetPath returns the filesystem path of this Spec. -func (s *Spec) GetPath() string { - return s.path -} - -// GetPriority returns the priority of this Spec. -func (s *Spec) GetPriority() int { - return s.priority -} - -// ApplyEdits applies the Spec's global-scope container edits to an OCI Spec. -func (s *Spec) ApplyEdits(ociSpec *oci.Spec) error { - return s.edits().Apply(ociSpec) -} - -// edits returns the applicable global container edits for this spec. -func (s *Spec) edits() *ContainerEdits { - return &ContainerEdits{&s.ContainerEdits} -} - -// Validate the Spec. -func (s *Spec) validate() (map[string]*Device, error) { - if err := validateVersion(s.Version); err != nil { - return nil, err - } - - minVersion, err := MinimumRequiredVersion(s.Spec) - if err != nil { - return nil, fmt.Errorf("could not determine minimum required version: %v", err) - } - if newVersion(minVersion).IsGreaterThan(newVersion(s.Version)) { - return nil, fmt.Errorf("the spec version must be at least v%v", minVersion) - } - - if err := ValidateVendorName(s.vendor); err != nil { - return nil, err - } - if err := ValidateClassName(s.class); err != nil { - return nil, err - } - if err := validation.ValidateSpecAnnotations(s.Kind, s.Annotations); err != nil { - return nil, err - } - if err := s.edits().Validate(); err != nil { - return nil, err - } - - devices := make(map[string]*Device) - for _, d := range s.Devices { - dev, err := newDevice(s, d) - if err != nil { - return nil, fmt.Errorf("failed add device %q: %w", d.Name, err) - } - if _, conflict := devices[d.Name]; conflict { - return nil, fmt.Errorf("invalid spec, multiple device %q", d.Name) - } - devices[d.Name] = dev - } - - return devices, nil -} - -// validateVersion checks whether the specified spec version is supported. -func validateVersion(version string) error { - if !validSpecVersions.isValidVersion(version) { - return fmt.Errorf("invalid version %q", version) - } - - return nil -} - -// ParseSpec parses CDI Spec data into a raw CDI Spec. -func ParseSpec(data []byte) (*cdi.Spec, error) { - var raw *cdi.Spec - err := yaml.UnmarshalStrict(data, &raw) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal CDI Spec: %w", err) - } - return raw, nil -} - -// SetSpecValidator sets a CDI Spec validator function. This function -// is used for extra CDI Spec content validation whenever a Spec file -// loaded (using ReadSpec() or written (using WriteSpec()). -func SetSpecValidator(fn func(*cdi.Spec) error) { - validatorLock.Lock() - defer validatorLock.Unlock() - specValidator = fn -} - -// validateSpec validates the Spec using the extneral validator. -func validateSpec(raw *cdi.Spec) error { - validatorLock.RLock() - defer validatorLock.RUnlock() - - if specValidator == nil { - return nil - } - err := specValidator(raw) - if err != nil { - return fmt.Errorf("Spec validation failed: %w", err) - } - return nil -} - -// GenerateSpecName generates a vendor+class scoped Spec file name. The -// name can be passed to WriteSpec() to write a Spec file to the file -// system. -// -// vendor and class should match the vendor and class of the CDI Spec. -// The file name is generated without a ".json" or ".yaml" extension. -// The caller can append the desired extension to choose a particular -// encoding. Otherwise WriteSpec() will use its default encoding. -// -// This function always returns the same name for the same vendor/class -// combination. Therefore it cannot be used as such to generate multiple -// Spec file names for a single vendor and class. -func GenerateSpecName(vendor, class string) string { - return vendor + "-" + class -} - -// GenerateTransientSpecName generates a vendor+class scoped transient -// Spec file name. The name can be passed to WriteSpec() to write a Spec -// file to the file system. -// -// Transient Specs are those whose lifecycle is tied to that of some -// external entity, for instance a container. vendor and class should -// match the vendor and class of the CDI Spec. transientID should be -// unique among all CDI users on the same host that might generate -// transient Spec files using the same vendor/class combination. If -// the external entity to which the lifecycle of the transient Spec -// is tied to has a unique ID of its own, then this is usually a -// good choice for transientID. -// -// The file name is generated without a ".json" or ".yaml" extension. -// The caller can append the desired extension to choose a particular -// encoding. Otherwise WriteSpec() will use its default encoding. -func GenerateTransientSpecName(vendor, class, transientID string) string { - transientID = strings.ReplaceAll(transientID, "/", "_") - return GenerateSpecName(vendor, class) + "_" + transientID -} - -// GenerateNameForSpec generates a name for the given Spec using -// GenerateSpecName with the vendor and class taken from the Spec. -// On success it returns the generated name and a nil error. If -// the Spec does not contain a valid vendor or class, it returns -// an empty name and a non-nil error. -func GenerateNameForSpec(raw *cdi.Spec) (string, error) { - vendor, class := ParseQualifier(raw.Kind) - if vendor == "" { - return "", fmt.Errorf("invalid vendor/class %q in Spec", raw.Kind) - } - - return GenerateSpecName(vendor, class), nil -} - -// GenerateNameForTransientSpec generates a name for the given transient -// Spec using GenerateTransientSpecName with the vendor and class taken -// from the Spec. On success it returns the generated name and a nil error. -// If the Spec does not contain a valid vendor or class, it returns an -// an empty name and a non-nil error. -func GenerateNameForTransientSpec(raw *cdi.Spec, transientID string) (string, error) { - vendor, class := ParseQualifier(raw.Kind) - if vendor == "" { - return "", fmt.Errorf("invalid vendor/class %q in Spec", raw.Kind) - } - - return GenerateTransientSpecName(vendor, class, transientID), nil -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec_linux.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec_linux.go deleted file mode 100644 index 9ad2739256..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec_linux.go +++ /dev/null @@ -1,48 +0,0 @@ -/* - Copyright © 2022 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "fmt" - "os" - - "golang.org/x/sys/unix" -) - -// Rename src to dst, both relative to the directory dir. If dst already exists -// refuse renaming with an error unless overwrite is explicitly asked for. -func renameIn(dir, src, dst string, overwrite bool) error { - var flags uint - - dirf, err := os.Open(dir) - if err != nil { - return fmt.Errorf("rename failed: %w", err) - } - defer dirf.Close() - - if !overwrite { - flags = unix.RENAME_NOREPLACE - } - - dirFd := int(dirf.Fd()) - err = unix.Renameat2(dirFd, src, dirFd, dst, flags) - if err != nil { - return fmt.Errorf("rename failed: %w", err) - } - - return nil -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec_other.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec_other.go deleted file mode 100644 index 285e04e27a..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec_other.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !linux -// +build !linux - -/* - Copyright © 2022 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "os" - "path/filepath" -) - -// Rename src to dst, both relative to the directory dir. If dst already exists -// refuse renaming with an error unless overwrite is explicitly asked for. -func renameIn(dir, src, dst string, overwrite bool) error { - src = filepath.Join(dir, src) - dst = filepath.Join(dir, dst) - - _, err := os.Stat(dst) - if err == nil && !overwrite { - return os.ErrExist - } - - return os.Rename(src, dst) -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/version.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/version.go deleted file mode 100644 index a617812784..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/version.go +++ /dev/null @@ -1,188 +0,0 @@ -/* - Copyright © The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cdi - -import ( - "strings" - - "golang.org/x/mod/semver" - - "tags.cncf.io/container-device-interface/pkg/parser" - cdi "tags.cncf.io/container-device-interface/specs-go" -) - -const ( - // CurrentVersion is the current version of the CDI Spec. - CurrentVersion = cdi.CurrentVersion - - // vCurrent is the current version as a semver-comparable type - vCurrent version = "v" + CurrentVersion - - // These represent the released versions of the CDI specification - v010 version = "v0.1.0" - v020 version = "v0.2.0" - v030 version = "v0.3.0" - v040 version = "v0.4.0" - v050 version = "v0.5.0" - v060 version = "v0.6.0" - - // vEarliest is the earliest supported version of the CDI specification - vEarliest version = v030 -) - -// validSpecVersions stores a map of spec versions to functions to check the required versions. -// Adding new fields / spec versions requires that a `requiredFunc` be implemented and -// this map be updated. -var validSpecVersions = requiredVersionMap{ - v010: nil, - v020: nil, - v030: nil, - v040: requiresV040, - v050: requiresV050, - v060: requiresV060, -} - -// MinimumRequiredVersion determines the minimum spec version for the input spec. -func MinimumRequiredVersion(spec *cdi.Spec) (string, error) { - minVersion := validSpecVersions.requiredVersion(spec) - return minVersion.String(), nil -} - -// version represents a semantic version string -type version string - -// newVersion creates a version that can be used for semantic version comparisons. -func newVersion(v string) version { - return version("v" + strings.TrimPrefix(v, "v")) -} - -// String returns the string representation of the version. -// This trims a leading v if present. -func (v version) String() string { - return strings.TrimPrefix(string(v), "v") -} - -// IsGreaterThan checks with a version is greater than the specified version. -func (v version) IsGreaterThan(o version) bool { - return semver.Compare(string(v), string(o)) > 0 -} - -// IsLatest checks whether the version is the latest supported version -func (v version) IsLatest() bool { - return v == vCurrent -} - -type requiredFunc func(*cdi.Spec) bool - -type requiredVersionMap map[version]requiredFunc - -// isValidVersion checks whether the specified version is valid. -// A version is valid if it is contained in the required version map. -func (r requiredVersionMap) isValidVersion(specVersion string) bool { - _, ok := validSpecVersions[newVersion(specVersion)] - - return ok -} - -// requiredVersion returns the minimum version required for the given spec -func (r requiredVersionMap) requiredVersion(spec *cdi.Spec) version { - minVersion := vEarliest - - for v, isRequired := range validSpecVersions { - if isRequired == nil { - continue - } - if isRequired(spec) && v.IsGreaterThan(minVersion) { - minVersion = v - } - // If we have already detected the latest version then no later version could be detected - if minVersion.IsLatest() { - break - } - } - - return minVersion -} - -// requiresV060 returns true if the spec uses v0.6.0 features -func requiresV060(spec *cdi.Spec) bool { - // The v0.6.0 spec allows annotations to be specified at a spec level - for range spec.Annotations { - return true - } - - // The v0.6.0 spec allows annotations to be specified at a device level - for _, d := range spec.Devices { - for range d.Annotations { - return true - } - } - - // The v0.6.0 spec allows dots "." in Kind name label (class) - vendor, class := parser.ParseQualifier(spec.Kind) - if vendor != "" { - if strings.ContainsRune(class, '.') { - return true - } - } - - return false -} - -// requiresV050 returns true if the spec uses v0.5.0 features -func requiresV050(spec *cdi.Spec) bool { - var edits []*cdi.ContainerEdits - - for _, d := range spec.Devices { - // The v0.5.0 spec allowed device names to start with a digit instead of requiring a letter - if len(d.Name) > 0 && !parser.IsLetter(rune(d.Name[0])) { - return true - } - edits = append(edits, &d.ContainerEdits) - } - - edits = append(edits, &spec.ContainerEdits) - for _, e := range edits { - for _, dn := range e.DeviceNodes { - // The HostPath field was added in v0.5.0 - if dn.HostPath != "" { - return true - } - } - } - return false -} - -// requiresV040 returns true if the spec uses v0.4.0 features -func requiresV040(spec *cdi.Spec) bool { - var edits []*cdi.ContainerEdits - - for _, d := range spec.Devices { - edits = append(edits, &d.ContainerEdits) - } - - edits = append(edits, &spec.ContainerEdits) - for _, e := range edits { - for _, m := range e.Mounts { - // The Type field was added in v0.4.0 - if m.Type != "" { - return true - } - } - } - return false -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/parser/parser.go b/vendor/tags.cncf.io/container-device-interface/pkg/parser/parser.go deleted file mode 100644 index 5325989541..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/pkg/parser/parser.go +++ /dev/null @@ -1,212 +0,0 @@ -/* - Copyright © The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package parser - -import ( - "fmt" - "strings" -) - -// QualifiedName returns the qualified name for a device. -// The syntax for a qualified device names is -// -// "/=". -// -// A valid vendor and class name may contain the following runes: -// -// 'A'-'Z', 'a'-'z', '0'-'9', '.', '-', '_'. -// -// A valid device name may contain the following runes: -// -// 'A'-'Z', 'a'-'z', '0'-'9', '-', '_', '.', ':' -func QualifiedName(vendor, class, name string) string { - return vendor + "/" + class + "=" + name -} - -// IsQualifiedName tests if a device name is qualified. -func IsQualifiedName(device string) bool { - _, _, _, err := ParseQualifiedName(device) - return err == nil -} - -// ParseQualifiedName splits a qualified name into device vendor, class, -// and name. If the device fails to parse as a qualified name, or if any -// of the split components fail to pass syntax validation, vendor and -// class are returned as empty, together with the verbatim input as the -// name and an error describing the reason for failure. -func ParseQualifiedName(device string) (string, string, string, error) { - vendor, class, name := ParseDevice(device) - - if vendor == "" { - return "", "", device, fmt.Errorf("unqualified device %q, missing vendor", device) - } - if class == "" { - return "", "", device, fmt.Errorf("unqualified device %q, missing class", device) - } - if name == "" { - return "", "", device, fmt.Errorf("unqualified device %q, missing device name", device) - } - - if err := ValidateVendorName(vendor); err != nil { - return "", "", device, fmt.Errorf("invalid device %q: %w", device, err) - } - if err := ValidateClassName(class); err != nil { - return "", "", device, fmt.Errorf("invalid device %q: %w", device, err) - } - if err := ValidateDeviceName(name); err != nil { - return "", "", device, fmt.Errorf("invalid device %q: %w", device, err) - } - - return vendor, class, name, nil -} - -// ParseDevice tries to split a device name into vendor, class, and name. -// If this fails, for instance in the case of unqualified device names, -// ParseDevice returns an empty vendor and class together with name set -// to the verbatim input. -func ParseDevice(device string) (string, string, string) { - if device == "" || device[0] == '/' { - return "", "", device - } - - parts := strings.SplitN(device, "=", 2) - if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return "", "", device - } - - name := parts[1] - vendor, class := ParseQualifier(parts[0]) - if vendor == "" { - return "", "", device - } - - return vendor, class, name -} - -// ParseQualifier splits a device qualifier into vendor and class. -// The syntax for a device qualifier is -// -// "/" -// -// If parsing fails, an empty vendor and the class set to the -// verbatim input is returned. -func ParseQualifier(kind string) (string, string) { - parts := strings.SplitN(kind, "/", 2) - if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return "", kind - } - return parts[0], parts[1] -} - -// ValidateVendorName checks the validity of a vendor name. -// A vendor name may contain the following ASCII characters: -// - upper- and lowercase letters ('A'-'Z', 'a'-'z') -// - digits ('0'-'9') -// - underscore, dash, and dot ('_', '-', and '.') -func ValidateVendorName(vendor string) error { - err := validateVendorOrClassName(vendor) - if err != nil { - err = fmt.Errorf("invalid vendor. %w", err) - } - return err -} - -// ValidateClassName checks the validity of class name. -// A class name may contain the following ASCII characters: -// - upper- and lowercase letters ('A'-'Z', 'a'-'z') -// - digits ('0'-'9') -// - underscore, dash, and dot ('_', '-', and '.') -func ValidateClassName(class string) error { - err := validateVendorOrClassName(class) - if err != nil { - err = fmt.Errorf("invalid class. %w", err) - } - return err -} - -// validateVendorOrClassName checks the validity of vendor or class name. -// A name may contain the following ASCII characters: -// - upper- and lowercase letters ('A'-'Z', 'a'-'z') -// - digits ('0'-'9') -// - underscore, dash, and dot ('_', '-', and '.') -func validateVendorOrClassName(name string) error { - if name == "" { - return fmt.Errorf("empty name") - } - if !IsLetter(rune(name[0])) { - return fmt.Errorf("%q, should start with letter", name) - } - for _, c := range string(name[1 : len(name)-1]) { - switch { - case IsAlphaNumeric(c): - case c == '_' || c == '-' || c == '.': - default: - return fmt.Errorf("invalid character '%c' in name %q", - c, name) - } - } - if !IsAlphaNumeric(rune(name[len(name)-1])) { - return fmt.Errorf("%q, should end with a letter or digit", name) - } - - return nil -} - -// ValidateDeviceName checks the validity of a device name. -// A device name may contain the following ASCII characters: -// - upper- and lowercase letters ('A'-'Z', 'a'-'z') -// - digits ('0'-'9') -// - underscore, dash, dot, colon ('_', '-', '.', ':') -func ValidateDeviceName(name string) error { - if name == "" { - return fmt.Errorf("invalid (empty) device name") - } - if !IsAlphaNumeric(rune(name[0])) { - return fmt.Errorf("invalid class %q, should start with a letter or digit", name) - } - if len(name) == 1 { - return nil - } - for _, c := range string(name[1 : len(name)-1]) { - switch { - case IsAlphaNumeric(c): - case c == '_' || c == '-' || c == '.' || c == ':': - default: - return fmt.Errorf("invalid character '%c' in device name %q", - c, name) - } - } - if !IsAlphaNumeric(rune(name[len(name)-1])) { - return fmt.Errorf("invalid name %q, should end with a letter or digit", name) - } - return nil -} - -// IsLetter reports whether the rune is a letter. -func IsLetter(c rune) bool { - return ('A' <= c && c <= 'Z') || ('a' <= c && c <= 'z') -} - -// IsDigit reports whether the rune is a digit. -func IsDigit(c rune) bool { - return '0' <= c && c <= '9' -} - -// IsAlphaNumeric reports whether the rune is a letter or digit. -func IsAlphaNumeric(c rune) bool { - return IsLetter(c) || IsDigit(c) -} diff --git a/vendor/tags.cncf.io/container-device-interface/specs-go/LICENSE b/vendor/tags.cncf.io/container-device-interface/specs-go/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/specs-go/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/tags.cncf.io/container-device-interface/specs-go/config.go b/vendor/tags.cncf.io/container-device-interface/specs-go/config.go deleted file mode 100644 index 4043b858f2..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/specs-go/config.go +++ /dev/null @@ -1,62 +0,0 @@ -package specs - -import "os" - -// CurrentVersion is the current version of the Spec. -const CurrentVersion = "0.6.0" - -// Spec is the base configuration for CDI -type Spec struct { - Version string `json:"cdiVersion"` - Kind string `json:"kind"` - // Annotations add meta information per CDI spec. Note these are CDI-specific and do not affect container metadata. - Annotations map[string]string `json:"annotations,omitempty"` - Devices []Device `json:"devices"` - ContainerEdits ContainerEdits `json:"containerEdits,omitempty"` -} - -// Device is a "Device" a container runtime can add to a container -type Device struct { - Name string `json:"name"` - // Annotations add meta information per device. Note these are CDI-specific and do not affect container metadata. - Annotations map[string]string `json:"annotations,omitempty"` - ContainerEdits ContainerEdits `json:"containerEdits"` -} - -// ContainerEdits are edits a container runtime must make to the OCI spec to expose the device. -type ContainerEdits struct { - Env []string `json:"env,omitempty"` - DeviceNodes []*DeviceNode `json:"deviceNodes,omitempty"` - Hooks []*Hook `json:"hooks,omitempty"` - Mounts []*Mount `json:"mounts,omitempty"` -} - -// DeviceNode represents a device node that needs to be added to the OCI spec. -type DeviceNode struct { - Path string `json:"path"` - HostPath string `json:"hostPath,omitempty"` - Type string `json:"type,omitempty"` - Major int64 `json:"major,omitempty"` - Minor int64 `json:"minor,omitempty"` - FileMode *os.FileMode `json:"fileMode,omitempty"` - Permissions string `json:"permissions,omitempty"` - UID *uint32 `json:"uid,omitempty"` - GID *uint32 `json:"gid,omitempty"` -} - -// Mount represents a mount that needs to be added to the OCI spec. -type Mount struct { - HostPath string `json:"hostPath"` - ContainerPath string `json:"containerPath"` - Options []string `json:"options,omitempty"` - Type string `json:"type,omitempty"` -} - -// Hook represents a hook that needs to be added to the OCI spec. -type Hook struct { - HookName string `json:"hookName"` - Path string `json:"path"` - Args []string `json:"args,omitempty"` - Env []string `json:"env,omitempty"` - Timeout *int `json:"timeout,omitempty"` -} diff --git a/vendor/tags.cncf.io/container-device-interface/specs-go/oci.go b/vendor/tags.cncf.io/container-device-interface/specs-go/oci.go deleted file mode 100644 index 229ad52e0c..0000000000 --- a/vendor/tags.cncf.io/container-device-interface/specs-go/oci.go +++ /dev/null @@ -1,38 +0,0 @@ -package specs - -import ( - spec "github.com/opencontainers/runtime-spec/specs-go" -) - -// ToOCI returns the opencontainers runtime Spec Hook for this Hook. -func (h *Hook) ToOCI() spec.Hook { - return spec.Hook{ - Path: h.Path, - Args: h.Args, - Env: h.Env, - Timeout: h.Timeout, - } -} - -// ToOCI returns the opencontainers runtime Spec Mount for this Mount. -func (m *Mount) ToOCI() spec.Mount { - return spec.Mount{ - Source: m.HostPath, - Destination: m.ContainerPath, - Options: m.Options, - Type: m.Type, - } -} - -// ToOCI returns the opencontainers runtime Spec LinuxDevice for this DeviceNode. -func (d *DeviceNode) ToOCI() spec.LinuxDevice { - return spec.LinuxDevice{ - Path: d.Path, - Type: d.Type, - Major: d.Major, - Minor: d.Minor, - FileMode: d.FileMode, - UID: d.UID, - GID: d.GID, - } -}