diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index c494f5d9d..f518c20fb 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -175,6 +175,9 @@ jobs: - distribution: "k3s" multinamespace-mode: "true" test-suite-path: "./test/e2e_target_namespace" + - distribution: "k3s" + multinamespace-mode: "true" + test-suite-path: "./test/e2e_plugin" - distribution: "k3s" multinamespace-mode: "true" test-suite-path: "./test/e2e_isolation_mode" @@ -279,8 +282,12 @@ jobs: echo "======================================================================================================================" kubectl get events -n ${{ env.VCLUSTER_NAMESPACE }} --sort-by='.lastTimestamp' echo "======================================================================================================================" - kubectl logs -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} -c syncer --tail=500 + kubectl logs -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} -c syncer --tail=-1 -p || kubectl logs -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} -c syncer --tail=-1 echo "======================================================================================================================" + if [[ "${{ matrix.test-suite-path }}" = "./test/e2e_plugin" ]]; then + kubectl logs -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} -c bootstrap-with-deployment --tail=-1 -p || kubectl logs -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} -c bootstrap-with-deployment --tail=-1 + echo "======================================================================================================================" + fi kubectl describe pods -n ${{ env.VCLUSTER_NAMESPACE }} exit 1 @@ -294,6 +301,10 @@ jobs: sudo chmod +x $(echo "${{ matrix.test-suite-path }}" | sed "s#./test/##g").test VCLUSTER_SUFFIX=${{ env.VCLUSTER_SUFFIX }} VCLUSTER_NAME=${{ env.VCLUSTER_NAME }} VCLUSTER_NAMESPACE=${{ env.VCLUSTER_NAMESPACE }} MULTINAMESPACE_MODE=${{ matrix.multinamespace-mode }} ./$(echo "${{ matrix.test-suite-path }}" | sed "s#./test/##g").test -test.v --ginkgo.v --ginkgo.skip='.*NetworkPolicy.*' --ginkgo.fail-fast + if kubectl logs -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} -c syncer --tail=-1 -p >/dev/null 2>/dev/null; then + echo "vCluster has restarted during testing, failing..." + exit 1 + fi continue-on-error: true @@ -305,7 +316,7 @@ jobs: echo "======================================================================================================================" kubectl get events -n ${{ env.VCLUSTER_NAMESPACE }} --sort-by='.lastTimestamp' echo "======================================================================================================================" - kubectl logs -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} -c syncer --tail=500 + kubectl logs -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} -c syncer --tail=-1 -p || kubectl logs -l app=${{ env.VCLUSTER_SUFFIX }} -n ${{ env.VCLUSTER_NAMESPACE }} -c syncer --tail=-1 echo "======================================================================================================================" kubectl describe pods -n ${{ env.VCLUSTER_NAMESPACE }} exit 1 diff --git a/cmd/vclusterctl/cmd/connect.go b/cmd/vclusterctl/cmd/connect.go index c7223e40e..a97c6dec4 100644 --- a/cmd/vclusterctl/cmd/connect.go +++ b/cmd/vclusterctl/cmd/connect.go @@ -32,7 +32,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/clientcmd/api" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" loftctlUtil "github.com/loft-sh/loftctl/v3/pkg/util" "github.com/loft-sh/log" @@ -72,7 +72,7 @@ type ConnectCmd struct { kubeClientConfig clientcmd.ClientConfig kubeClient *kubernetes.Clientset restConfig *rest.Config - rawConfig api.Config + rawConfig clientcmdapi.Config portForwarding bool interruptChan chan struct{} @@ -270,7 +270,7 @@ func (cmd *ConnectCmd) connectOss(ctx context.Context, vCluster *find.VCluster, return cmd.writeKubeConfig(kubeConfig, vCluster.Name) } -func (cmd *ConnectCmd) writeKubeConfig(kubeConfig *api.Config, vClusterName string) error { +func (cmd *ConnectCmd) writeKubeConfig(kubeConfig *clientcmdapi.Config, vClusterName string) error { // write kube config to buffer out, err := clientcmd.Write(*kubeConfig) if err != nil { @@ -279,12 +279,12 @@ func (cmd *ConnectCmd) writeKubeConfig(kubeConfig *api.Config, vClusterName stri // write kube config to file if cmd.UpdateCurrent { - var clusterConfig *api.Cluster + var clusterConfig *clientcmdapi.Cluster for _, c := range kubeConfig.Clusters { clusterConfig = c } - var authConfig *api.AuthInfo + var authConfig *clientcmdapi.AuthInfo for _, a := range kubeConfig.AuthInfos { authConfig = a } @@ -404,7 +404,7 @@ func (cmd *ConnectCmd) prepare(ctx context.Context, vCluster *find.VCluster) err return nil } -func (cmd *ConnectCmd) getVClusterProKubeConfig(ctx context.Context, proClient proclient.Client, vCluster *pro.VirtualClusterInstanceProject) (*api.Config, error) { +func (cmd *ConnectCmd) getVClusterProKubeConfig(ctx context.Context, proClient proclient.Client, vCluster *pro.VirtualClusterInstanceProject) (*clientcmdapi.Config, error) { contextOptions, err := use.CreateVirtualClusterInstanceOptions(ctx, proClient, "", vCluster.Project.Name, vCluster.VirtualCluster, false, false, cmd.Log) if err != nil { return nil, fmt.Errorf("prepare vCluster kube config: %w", err) @@ -463,7 +463,7 @@ func (cmd *ConnectCmd) getVClusterProKubeConfig(ctx context.Context, proClient p // set service account token for k := range kubeConfig.AuthInfos { - kubeConfig.AuthInfos[k] = &api.AuthInfo{ + kubeConfig.AuthInfos[k] = &clientcmdapi.AuthInfo{ Token: token, Extensions: make(map[string]runtime.Object), ImpersonateUserExtra: make(map[string][]string), @@ -474,7 +474,7 @@ func (cmd *ConnectCmd) getVClusterProKubeConfig(ctx context.Context, proClient p return kubeConfig, nil } -func (cmd *ConnectCmd) getVClusterKubeConfig(ctx context.Context, vclusterName string, command []string) (*api.Config, error) { +func (cmd *ConnectCmd) getVClusterKubeConfig(ctx context.Context, vclusterName string, command []string) (*clientcmdapi.Config, error) { var err error podName := cmd.PodName if podName == "" { @@ -587,7 +587,7 @@ func (cmd *ConnectCmd) getVClusterKubeConfig(ctx context.Context, vclusterName s // set service account token for k := range kubeConfig.AuthInfos { - kubeConfig.AuthInfos[k] = &api.AuthInfo{ + kubeConfig.AuthInfos[k] = &clientcmdapi.AuthInfo{ Token: token, Extensions: make(map[string]runtime.Object), ImpersonateUserExtra: make(map[string][]string), @@ -598,7 +598,7 @@ func (cmd *ConnectCmd) getVClusterKubeConfig(ctx context.Context, vclusterName s return kubeConfig, nil } -func (cmd *ConnectCmd) setServerIfExposed(ctx context.Context, vClusterName string, vClusterConfig *api.Config) error { +func (cmd *ConnectCmd) setServerIfExposed(ctx context.Context, vClusterName string, vClusterConfig *clientcmdapi.Config) error { printedWaiting := false err := wait.PollUntilContextTimeout(ctx, time.Second*2, time.Minute*5, true, func(ctx context.Context) (done bool, err error) { // first check for load balancer service, look for the other service if it's not there @@ -665,7 +665,7 @@ func (cmd *ConnectCmd) setServerIfExposed(ctx context.Context, vClusterName stri // exchangeContextName switches the context name specified in the remote kubeconfig with // the context name specified by the user. It cannot correctly handle kubeconfigs with multiple entries // for clusters, authInfos, contexts, but ideally this is pointed at a secret created by us. -func (cmd *ConnectCmd) exchangeContextName(kubeConfig *api.Config, vclusterName string) error { +func (cmd *ConnectCmd) exchangeContextName(kubeConfig *clientcmdapi.Config, vclusterName string) error { if cmd.KubeConfigContextName == "" { if vclusterName != "" { cmd.KubeConfigContextName = find.VClusterContextName(vclusterName, cmd.Namespace, cmd.rawConfig.CurrentContext) @@ -712,7 +712,7 @@ func (cmd *ConnectCmd) exchangeContextName(kubeConfig *api.Config, vclusterName return nil } -func (cmd *ConnectCmd) executeCommand(vKubeConfig api.Config, command []string) error { +func (cmd *ConnectCmd) executeCommand(vKubeConfig clientcmdapi.Config, command []string) error { // convert to local kube config out, err := clientcmd.Write(vKubeConfig) if err != nil { @@ -774,7 +774,7 @@ func (cmd *ConnectCmd) executeCommand(vKubeConfig api.Config, command []string) } } -func (cmd *ConnectCmd) getLocalVClusterConfig(vKubeConfig api.Config) api.Config { +func (cmd *ConnectCmd) getLocalVClusterConfig(vKubeConfig clientcmdapi.Config) clientcmdapi.Config { // wait until we can access the virtual cluster vKubeConfig = *vKubeConfig.DeepCopy() for k := range vKubeConfig.Clusters { @@ -783,7 +783,7 @@ func (cmd *ConnectCmd) getLocalVClusterConfig(vKubeConfig api.Config) api.Config return vKubeConfig } -func (cmd *ConnectCmd) getLocalVClusterClient(vKubeConfig api.Config) (kubernetes.Interface, error) { +func (cmd *ConnectCmd) getLocalVClusterClient(vKubeConfig clientcmdapi.Config) (kubernetes.Interface, error) { vRestConfig, err := clientcmd.NewDefaultClientConfig(cmd.getLocalVClusterConfig(vKubeConfig), &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return nil, errors.Wrap(err, "create virtual rest config") @@ -797,7 +797,7 @@ func (cmd *ConnectCmd) getLocalVClusterClient(vKubeConfig api.Config) (kubernete return vKubeClient, nil } -func (cmd *ConnectCmd) waitForVCluster(ctx context.Context, vKubeConfig api.Config, errorChan chan error) error { +func (cmd *ConnectCmd) waitForVCluster(ctx context.Context, vKubeConfig clientcmdapi.Config, errorChan chan error) error { vKubeClient, err := cmd.getLocalVClusterClient(vKubeConfig) if err != nil { return err @@ -820,7 +820,7 @@ func (cmd *ConnectCmd) waitForVCluster(ctx context.Context, vKubeConfig api.Conf return nil } -func (cmd *ConnectCmd) createServiceAccountToken(ctx context.Context, vKubeConfig api.Config) (string, error) { +func (cmd *ConnectCmd) createServiceAccountToken(ctx context.Context, vKubeConfig clientcmdapi.Config) (string, error) { vKubeClient, err := cmd.getLocalVClusterClient(vKubeConfig) if err != nil { return "", err diff --git a/cmd/vclusterctl/cmd/connect_test.go b/cmd/vclusterctl/cmd/connect_test.go index 88825c721..c7f1cddb0 100644 --- a/cmd/vclusterctl/cmd/connect_test.go +++ b/cmd/vclusterctl/cmd/connect_test.go @@ -5,7 +5,7 @@ import ( "github.com/loft-sh/vcluster/cmd/vclusterctl/flags" "gotest.tools/v3/assert" - "k8s.io/client-go/tools/clientcmd/api" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) const ( @@ -15,16 +15,16 @@ const ( func TestExchangeContextName(t *testing.T) { vclusterName := "vcluster-name" namespace := "default" - defaultContext := &api.Config{ - Clusters: map[string]*api.Cluster{ + defaultContext := &clientcmdapi.Config{ + Clusters: map[string]*clientcmdapi.Cluster{ defaultContextName: {Server: "foo"}, }, - AuthInfos: map[string]*api.AuthInfo{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ defaultContextName: { Token: "foo", }, }, - Contexts: map[string]*api.Context{ + Contexts: map[string]*clientcmdapi.Context{ defaultContextName: { Cluster: defaultContextName, AuthInfo: defaultContextName, @@ -35,23 +35,23 @@ func TestExchangeContextName(t *testing.T) { testTable := []struct { desc string newContextName string - config *api.Config - expectedConfig *api.Config + config *clientcmdapi.Config + expectedConfig *clientcmdapi.Config }{ { desc: "KubeConfigContextName specified", newContextName: "new-context", config: defaultContext.DeepCopy(), - expectedConfig: &api.Config{ - Clusters: map[string]*api.Cluster{ + expectedConfig: &clientcmdapi.Config{ + Clusters: map[string]*clientcmdapi.Cluster{ "new-context": {Server: "foo"}, }, - AuthInfos: map[string]*api.AuthInfo{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ "new-context": { Token: "foo", }, }, - Contexts: map[string]*api.Context{ + Contexts: map[string]*clientcmdapi.Context{ "new-context": { Cluster: "new-context", AuthInfo: "new-context", @@ -64,16 +64,16 @@ func TestExchangeContextName(t *testing.T) { desc: "KubeConfigContextName same as default", newContextName: defaultContextName, config: defaultContext.DeepCopy(), - expectedConfig: &api.Config{ - Clusters: map[string]*api.Cluster{ + expectedConfig: &clientcmdapi.Config{ + Clusters: map[string]*clientcmdapi.Cluster{ defaultContextName: {Server: "foo"}, }, - AuthInfos: map[string]*api.AuthInfo{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ defaultContextName: { Token: "foo", }, }, - Contexts: map[string]*api.Context{ + Contexts: map[string]*clientcmdapi.Context{ defaultContextName: { Cluster: defaultContextName, AuthInfo: defaultContextName, diff --git a/cmd/vclusterctl/cmd/find/find.go b/cmd/vclusterctl/cmd/find/find.go index 320d11aa2..714068b0c 100644 --- a/cmd/vclusterctl/cmd/find/find.go +++ b/cmd/vclusterctl/cmd/find/find.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/clientcmd/api" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) const VirtualClusterSelector = "app=vcluster" @@ -45,12 +45,12 @@ const ( StatusUnknown Status = "Unknown" ) -func SwitchContext(kubeConfig *api.Config, otherContext string) error { +func SwitchContext(kubeConfig *clientcmdapi.Config, otherContext string) error { kubeConfig.CurrentContext = otherContext return clientcmd.ModifyConfig(clientcmd.NewDefaultClientConfigLoadingRules(), *kubeConfig, false) } -func CurrentContext() (string, *api.Config, error) { +func CurrentContext() (string, *clientcmdapi.Config, error) { rawConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() if err != nil { return "", nil, err diff --git a/go.mod b/go.mod index db1a0e151..8300f7f77 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-openapi/loads v0.21.2 github.com/google/go-github/v53 v53.2.1-0.20230815134205-bb00f570d301 github.com/gorilla/websocket v1.5.1 + github.com/hashicorp/go-plugin v1.6.0 github.com/hashicorp/golang-lru/v2 v2.0.2 github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 github.com/loft-sh/agentapi/v3 v3.4.0-beta.11 @@ -75,16 +76,21 @@ require ( github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.7.0 // indirect github.com/fatih/camelcase v1.0.0 // indirect + github.com/fatih/color v1.12.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/cel-go v0.16.1 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect + github.com/hashicorp/go-hclog v0.14.1 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213 // indirect github.com/loft-sh/admin-apis v0.0.0-20231102064836-7a6574be5f16 // indirect github.com/loft-sh/apiserver v0.0.0-20240110162222-7923cde3a92d // indirect github.com/loft-sh/jspolicy v0.2.2 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/oklog/run v1.0.0 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/otiai10/copy v1.11.0 // indirect diff --git a/go.sum b/go.sum index 2eade0a1c..813fe8630 100644 --- a/go.sum +++ b/go.sum @@ -125,6 +125,8 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdn github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= @@ -225,6 +227,7 @@ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZM github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -533,9 +536,13 @@ github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBt github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= +github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -551,6 +558,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -566,6 +575,8 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= @@ -653,11 +664,13 @@ github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsI github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= @@ -677,6 +690,7 @@ github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceT github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= @@ -716,6 +730,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= @@ -1168,6 +1184,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/pkg/controllers/register.go b/pkg/controllers/register.go index d71e6a049..848cfe1dd 100644 --- a/pkg/controllers/register.go +++ b/pkg/controllers/register.go @@ -10,6 +10,7 @@ import ( "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd" "github.com/loft-sh/vcluster/pkg/setup/options" + "github.com/loft-sh/vcluster/pkg/util/kubeconfig" "github.com/loft-sh/vcluster/pkg/util/translate" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" @@ -19,7 +20,6 @@ import ( "github.com/loft-sh/vcluster/pkg/controllers/servicesync" "github.com/loft-sh/vcluster/pkg/controllers/syncer" "github.com/loft-sh/vcluster/pkg/helm" - "github.com/loft-sh/vcluster/pkg/plugin" "github.com/loft-sh/vcluster/pkg/util/blockingcacheclient" util "github.com/loft-sh/vcluster/pkg/util/context" "k8s.io/apimachinery/pkg/api/meta" @@ -251,7 +251,7 @@ func RegisterGenericSyncController(ctx *options.ControllerContext) error { } func RegisterInitManifestsController(controllerCtx *options.ControllerContext) error { - vconfig, err := plugin.ConvertRestConfigToClientConfig(controllerCtx.VirtualManager.GetConfig()) + vconfig, err := kubeconfig.ConvertRestConfigToClientConfig(controllerCtx.VirtualManager.GetConfig()) if err != nil { return err } diff --git a/pkg/plugin/plugin.go b/pkg/plugin/plugin.go index c1478b294..b07f0c30e 100644 --- a/pkg/plugin/plugin.go +++ b/pkg/plugin/plugin.go @@ -1,107 +1,31 @@ package plugin import ( - context "context" - "encoding/json" + "context" "fmt" - "net" - "os" - "sync" - "time" + plugintypes "github.com/loft-sh/vcluster/pkg/plugin/types" + pluginv1 "github.com/loft-sh/vcluster/pkg/plugin/v1" + pluginv2 "github.com/loft-sh/vcluster/pkg/plugin/v2" "github.com/loft-sh/vcluster/pkg/setup/options" - "github.com/loft-sh/vcluster/pkg/util/loghelper" - "github.com/loft-sh/vcluster/pkg/util/random" - "go.uber.org/atomic" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" - - remote "github.com/loft-sh/vcluster/pkg/plugin/remote" - grpc "google.golang.org/grpc" - rest "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - - "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/client" ) -var runID = random.String(12) - -var DefaultManager Manager = &manager{ - clientHooks: map[VersionKindType][]*Plugin{}, - pluginVersions: map[string]*remote.RegisterPluginRequest{}, -} +var DefaultManager = newManager() -type Manager interface { - Start( - ctx context.Context, - currentNamespace, targetNamespace string, - virtualKubeConfig *rest.Config, - physicalKubeConfig *rest.Config, - syncerConfig *clientcmdapi.Config, - options *options.VirtualClusterOptions, - ) error - SetLeader(isLeader bool) - ClientHooksFor(versionKindType VersionKindType) []*Plugin - HasClientHooks() bool - HasPlugins() bool +func newManager() plugintypes.Manager { + return &manager{ + legacyManager: pluginv1.NewManager(), + pluginManager: pluginv2.NewManager(), + } } -var _ remote.VClusterServer = &manager{} - type manager struct { - remote.UnimplementedVClusterServer - - physicalKubeConfig string - virtualKubeConfig string - syncerKubeConfig string - - targetNamespace string - currentNamespace string - - options string - - isLeader atomic.Bool - hasPlugins atomic.Bool - - clientHooksMutex sync.Mutex - clientHooks map[VersionKindType][]*Plugin - - pluginMutex sync.Mutex - pluginVersions map[string]*remote.RegisterPluginRequest -} - -type VersionKindType struct { - APIVersion string - Kind string - Type string -} - -type Plugin struct { - Name string - Address string -} - -func (m *manager) HasClientHooks() bool { - m.clientHooksMutex.Lock() - defer m.clientHooksMutex.Unlock() - - return len(m.clientHooks) > 0 -} - -func (m *manager) ClientHooksFor(versionKindType VersionKindType) []*Plugin { - m.clientHooksMutex.Lock() - defer m.clientHooksMutex.Unlock() - - return m.clientHooks[versionKindType] -} - -func (m *manager) HasPlugins() bool { - return m.hasPlugins.Load() -} - -func (m *manager) SetLeader(isLeader bool) { - m.isLeader.Store(isLeader) + legacyManager *pluginv1.Manager + pluginManager *pluginv2.Manager } func (m *manager) Start( @@ -112,268 +36,46 @@ func (m *manager) Start( syncerConfig *clientcmdapi.Config, options *options.VirtualClusterOptions, ) error { - // set if we have plugins - m.hasPlugins.Store(len(options.Plugins) > 0) - - // base options - m.currentNamespace = currentNamespace - m.targetNamespace = targetNamespace - - // Context options - out, err := json.Marshal(options) - if err != nil { - return errors.Wrap(err, "marshal options") - } - m.options = string(out) - - // Virtual client config - convertedVirtualConfig, err := ConvertRestConfigToClientConfig(virtualKubeConfig) - if err != nil { - return errors.Wrap(err, "convert virtual client config") - } - rawVirtualConfig, err := convertedVirtualConfig.RawConfig() - if err != nil { - return errors.Wrap(err, "convert virtual client config") - } - virtualConfigBytes, err := clientcmd.Write(rawVirtualConfig) - if err != nil { - return errors.Wrap(err, "marshal virtual client config") - } - m.virtualKubeConfig = string(virtualConfigBytes) - - // Physical client config - convertedPhysicalConfig, err := ConvertRestConfigToClientConfig(physicalKubeConfig) - if err != nil { - return errors.Wrap(err, "convert physical client config") - } - rawPhysicalConfig, err := convertedPhysicalConfig.RawConfig() - if err != nil { - return errors.Wrap(err, "convert physical client config") - } - phyisicalConfigBytes, err := clientcmd.Write(rawPhysicalConfig) - if err != nil { - return errors.Wrap(err, "marshal physical client config") - } - m.physicalKubeConfig = string(phyisicalConfigBytes) - - // Syncer client config - syncerConfigBytes, err := clientcmd.Write(*syncerConfig) + err := m.legacyManager.Start(ctx, currentNamespace, targetNamespace, virtualKubeConfig, physicalKubeConfig, syncerConfig, options) if err != nil { - return errors.Wrap(err, "marshal syncer client config") + return fmt.Errorf("start legacy plugins: %w", err) } - m.syncerKubeConfig = string(syncerConfigBytes) - // start the grpc server - loghelper.Infof("Plugin server listening on %s", options.PluginListenAddress) - lis, err := net.Listen("tcp", options.PluginListenAddress) + err = m.pluginManager.Start(ctx, currentNamespace, targetNamespace, virtualKubeConfig, physicalKubeConfig, syncerConfig, options) if err != nil { - return fmt.Errorf("failed to listen: %w", err) - } - - var opts []grpc.ServerOption - grpcServer := grpc.NewServer(opts...) - remote.RegisterVClusterServer(grpcServer, m) - go func() { - err := grpcServer.Serve(lis) - if err != nil { - panic(err) - } - }() - - return m.waitForPlugins(ctx, options) -} - -func (m *manager) waitForPlugins(ctx context.Context, options *options.VirtualClusterOptions) error { - for _, plugin := range options.Plugins { - klog.Infof("Waiting for plugin %s to register...", plugin) - err := wait.PollUntilContextTimeout(ctx, time.Millisecond*100, time.Minute*10, true, func(context.Context) (done bool, err error) { - m.pluginMutex.Lock() - defer m.pluginMutex.Unlock() - - _, ok := m.pluginVersions[plugin] - return ok, nil - }) - if err != nil { - return fmt.Errorf("error waiting for plugin %s: %w", plugin, err) - } - klog.Infof("Plugin %s has successfully registered", plugin) + return fmt.Errorf("start plugins: %w", err) } return nil } -func (m *manager) IsLeader(context.Context, *remote.Empty) (*remote.LeaderInfo, error) { - return &remote.LeaderInfo{ - Leader: m.isLeader.Load(), - RunID: runID, - }, nil -} - -func (m *manager) GetContext(context.Context, *remote.Empty) (*remote.Context, error) { - return &remote.Context{ - VirtualClusterConfig: m.virtualKubeConfig, - PhysicalClusterConfig: m.physicalKubeConfig, - SyncerConfig: m.syncerKubeConfig, - TargetNamespace: m.targetNamespace, - CurrentNamespace: m.currentNamespace, - Options: m.options, - }, nil +func (m *manager) SetLeader(ctx context.Context) error { + m.legacyManager.SetLeader(true) + return m.pluginManager.SetLeader(ctx) } -func (m *manager) RegisterPlugin(_ context.Context, info *remote.RegisterPluginRequest) (*remote.RegisterPluginResult, error) { - if info != nil && info.Name != "" { - klog.Infof("Registering plugin %s", info.Name) - - // copy map - m.pluginMutex.Lock() - defer m.pluginMutex.Unlock() - - m.clientHooksMutex.Lock() - defer m.clientHooksMutex.Unlock() - - newPlugins := map[string]*remote.RegisterPluginRequest{} - for k, v := range m.pluginVersions { - newPlugins[k] = v - } - newPlugins[info.Name] = info - - // regenerate client hooks - newClientHooks, err := regenerateClientHooks(newPlugins) - if err != nil { - klog.Infof("Error regenerating client hooks for plugin %s: %v", info.Name, err) - return nil, errors.Wrap(err, "generate client hooks") - } - - m.clientHooks = newClientHooks - m.pluginVersions = newPlugins +func (m *manager) MutateObject(ctx context.Context, obj client.Object, hookType string, scheme *runtime.Scheme) error { + err := m.legacyManager.MutateObject(ctx, obj, hookType, scheme) + if err != nil { + return err } - return &remote.RegisterPluginResult{}, nil -} - -// Register is deprecated and will be removed in future -func (m *manager) Register(_ context.Context, info *remote.PluginInfo) (*remote.Context, error) { - if info != nil && info.Name != "" { - klog.Infof("Registering plugin %s", info.Name) - - m.pluginMutex.Lock() - defer m.pluginMutex.Unlock() - - m.pluginVersions[info.Name] = &remote.RegisterPluginRequest{ - Name: info.Name, - } + err = m.pluginManager.MutateObject(ctx, obj, hookType, scheme) + if err != nil { + return err } - return &remote.Context{ - VirtualClusterConfig: m.virtualKubeConfig, - PhysicalClusterConfig: m.physicalKubeConfig, - SyncerConfig: m.syncerKubeConfig, - TargetNamespace: m.targetNamespace, - CurrentNamespace: m.currentNamespace, - Options: m.options, - }, nil + return nil } -func regenerateClientHooks(plugins map[string]*remote.RegisterPluginRequest) (map[VersionKindType][]*Plugin, error) { - retMap := map[VersionKindType][]*Plugin{} - for _, pluginInfo := range plugins { - plugin := &Plugin{ - Name: pluginInfo.Name, - Address: pluginInfo.Address, - } - for _, clientHookInfo := range pluginInfo.ClientHooks { - if clientHookInfo.ApiVersion == "" { - return nil, fmt.Errorf("api version is empty in plugin %s hook", plugin.Name) - } else if clientHookInfo.Kind == "" { - return nil, fmt.Errorf("kind is empty in plugin %s hook", plugin.Name) - } - - for _, t := range clientHookInfo.Types { - if t == "" { - continue - } - - versionKindType := VersionKindType{ - APIVersion: clientHookInfo.ApiVersion, - Kind: clientHookInfo.Kind, - Type: t, - } - retMap[versionKindType] = append(retMap[versionKindType], plugin) - } - - klog.Infof("Register client hook for %s %s in plugin %s", clientHookInfo.ApiVersion, clientHookInfo.Kind, plugin.Name) - } - } - - return retMap, nil +func (m *manager) HasClientHooks() bool { + return m.legacyManager.HasClientHooks() || m.pluginManager.HasClientHooks() } -func ConvertRestConfigToClientConfig(config *rest.Config) (clientcmd.ClientConfig, error) { - contextName := "local" - kubeConfig := clientcmdapi.NewConfig() - kubeConfig.Contexts = map[string]*clientcmdapi.Context{ - contextName: { - Cluster: contextName, - AuthInfo: contextName, - }, - } - kubeConfig.Clusters = map[string]*clientcmdapi.Cluster{ - contextName: { - Server: config.Host, - InsecureSkipTLSVerify: config.Insecure, - CertificateAuthorityData: config.CAData, - CertificateAuthority: config.CAFile, - }, - } - kubeConfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{ - contextName: { - Token: config.BearerToken, - TokenFile: config.BearerTokenFile, - Impersonate: config.Impersonate.UserName, - ImpersonateGroups: config.Impersonate.Groups, - ImpersonateUserExtra: config.Impersonate.Extra, - ClientCertificate: config.CertFile, - ClientCertificateData: config.CertData, - ClientKey: config.KeyFile, - ClientKeyData: config.KeyData, - Username: config.Username, - Password: config.Password, - AuthProvider: config.AuthProvider, - Exec: config.ExecProvider, - }, - } - kubeConfig.CurrentContext = contextName - - // resolve certificate - if kubeConfig.Clusters[contextName].CertificateAuthorityData == nil && kubeConfig.Clusters[contextName].CertificateAuthority != "" { - o, err := os.ReadFile(kubeConfig.Clusters[contextName].CertificateAuthority) - if err != nil { - return nil, err - } - - kubeConfig.Clusters[contextName].CertificateAuthority = "" - kubeConfig.Clusters[contextName].CertificateAuthorityData = o - } - - // fill in data - if kubeConfig.AuthInfos[contextName].ClientCertificateData == nil && kubeConfig.AuthInfos[contextName].ClientCertificate != "" { - o, err := os.ReadFile(kubeConfig.AuthInfos[contextName].ClientCertificate) - if err != nil { - return nil, err - } - - kubeConfig.AuthInfos[contextName].ClientCertificate = "" - kubeConfig.AuthInfos[contextName].ClientCertificateData = o - } - if kubeConfig.AuthInfos[contextName].ClientKeyData == nil && kubeConfig.AuthInfos[contextName].ClientKey != "" { - o, err := os.ReadFile(kubeConfig.AuthInfos[contextName].ClientKey) - if err != nil { - return nil, err - } - - kubeConfig.AuthInfos[contextName].ClientKey = "" - kubeConfig.AuthInfos[contextName].ClientKeyData = o - } +func (m *manager) HasClientHooksForType(versionKindType plugintypes.VersionKindType) bool { + return m.legacyManager.HasClientHooksForType(versionKindType) || m.pluginManager.HasClientHooksForType(versionKindType) +} - return clientcmd.NewDefaultClientConfig(*kubeConfig, &clientcmd.ConfigOverrides{}), nil +func (m *manager) HasPlugins() bool { + return m.legacyManager.HasPlugins() || m.pluginManager.HasPlugins() } diff --git a/pkg/plugin/types/types.go b/pkg/plugin/types/types.go new file mode 100644 index 000000000..eb638e8fb --- /dev/null +++ b/pkg/plugin/types/types.go @@ -0,0 +1,44 @@ +package types + +import ( + "context" + + "github.com/loft-sh/vcluster/pkg/setup/options" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Manager interface { + // Start starts the plugins with the given information + Start( + ctx context.Context, + currentNamespace, targetNamespace string, + virtualKubeConfig *rest.Config, + physicalKubeConfig *rest.Config, + syncerConfig *clientcmdapi.Config, + options *options.VirtualClusterOptions, + ) error + + // SetLeader sets the leader for the plugins + SetLeader(ctx context.Context) error + + // MutateObject mutates the objects of the given version kind type + MutateObject(ctx context.Context, obj client.Object, hookType string, scheme *runtime.Scheme) error + + // HasClientHooks returns if there are any plugin client hooks + HasClientHooks() bool + + // HasClientHooksForType returns if there are any plugin client hooks for the given type + HasClientHooksForType(VersionKindType) bool + + // HasPlugins returns if there are any plugins to start + HasPlugins() bool +} + +type VersionKindType struct { + APIVersion string + Kind string + Type string +} diff --git a/pkg/plugin/v1/plugin.go b/pkg/plugin/v1/plugin.go new file mode 100644 index 000000000..8377ab063 --- /dev/null +++ b/pkg/plugin/v1/plugin.go @@ -0,0 +1,365 @@ +package v1 + +import ( + context "context" + "encoding/json" + "fmt" + "net" + "sync" + "time" + + plugintypes "github.com/loft-sh/vcluster/pkg/plugin/types" + "github.com/loft-sh/vcluster/pkg/setup/options" + "github.com/loft-sh/vcluster/pkg/util/kubeconfig" + "github.com/loft-sh/vcluster/pkg/util/loghelper" + "github.com/loft-sh/vcluster/pkg/util/random" + "go.uber.org/atomic" + "google.golang.org/grpc/credentials/insecure" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + remote "github.com/loft-sh/vcluster/pkg/plugin/v1/remote" + grpc "google.golang.org/grpc" + rest "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + + "github.com/pkg/errors" +) + +var runID = random.String(12) + +var _ remote.VClusterServer = &Manager{} + +func NewManager() *Manager { + return &Manager{ + clientHooks: map[plugintypes.VersionKindType][]*Plugin{}, + pluginVersions: map[string]*remote.RegisterPluginRequest{}, + } +} + +type Manager struct { + remote.UnimplementedVClusterServer + + physicalKubeConfig string + virtualKubeConfig string + syncerKubeConfig string + + targetNamespace string + currentNamespace string + + options string + + isLeader atomic.Bool + hasPlugins atomic.Bool + + clientHooksMutex sync.Mutex + clientHooks map[plugintypes.VersionKindType][]*Plugin + + pluginMutex sync.Mutex + pluginVersions map[string]*remote.RegisterPluginRequest +} + +type Plugin struct { + Name string + Address string +} + +func (m *Manager) HasClientHooks() bool { + m.clientHooksMutex.Lock() + defer m.clientHooksMutex.Unlock() + + return len(m.clientHooks) > 0 +} + +func (m *Manager) ClientHooksFor(versionKindType plugintypes.VersionKindType) []*Plugin { + m.clientHooksMutex.Lock() + defer m.clientHooksMutex.Unlock() + + return m.clientHooks[versionKindType] +} + +func (m *Manager) MutateObject(ctx context.Context, obj client.Object, hookType string, scheme *runtime.Scheme) error { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return err + } + + apiVersion, kind := gvk.ToAPIVersionAndKind() + versionKindType := plugintypes.VersionKindType{ + APIVersion: apiVersion, + Kind: kind, + Type: hookType, + } + clientHooks := m.ClientHooksFor(versionKindType) + if len(clientHooks) == 0 { + return nil + } + + encodedObj, err := json.Marshal(obj) + if err != nil { + return errors.Wrap(err, "encode obj") + } + + for _, clientHook := range clientHooks { + encodedObj, err = m.mutateObject(ctx, versionKindType, encodedObj, clientHook) + if err != nil { + return err + } + } + + err = json.Unmarshal(encodedObj, obj) + if err != nil { + return errors.Wrap(err, "unmarshal obj") + } + + return nil +} + +func (m *Manager) mutateObject(ctx context.Context, versionKindType plugintypes.VersionKindType, obj []byte, plugin *Plugin) ([]byte, error) { + conn, err := grpc.Dial(plugin.Address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, fmt.Errorf("error dialing plugin %s: %w", plugin.Name, err) + } + defer func(conn *grpc.ClientConn) { + _ = conn.Close() + }(conn) + + ctx, cancel := context.WithTimeout(ctx, time.Second*10) + defer cancel() + + loghelper.New("mutate").Debugf("calling plugin %s to mutate object %s %s", plugin.Name, versionKindType.APIVersion, versionKindType.Kind) + mutateResult, err := remote.NewPluginClient(conn).Mutate(ctx, &remote.MutateRequest{ + ApiVersion: versionKindType.APIVersion, + Kind: versionKindType.Kind, + Object: string(obj), + Type: versionKindType.Type, + }) + if err != nil { + return nil, errors.Wrapf(err, "call plugin %s", plugin.Name) + } + + if mutateResult.Mutated { + return []byte(mutateResult.Object), nil + } + return obj, nil +} + +func (m *Manager) HasClientHooksForType(versionKindType plugintypes.VersionKindType) bool { + return len(m.ClientHooksFor(versionKindType)) > 0 +} + +func (m *Manager) HasPlugins() bool { + return m.hasPlugins.Load() +} + +func (m *Manager) SetLeader(isLeader bool) { + m.isLeader.Store(isLeader) +} + +func (m *Manager) Start( + ctx context.Context, + currentNamespace, targetNamespace string, + virtualKubeConfig *rest.Config, + physicalKubeConfig *rest.Config, + syncerConfig *clientcmdapi.Config, + options *options.VirtualClusterOptions, +) error { + // set if we have plugins + m.hasPlugins.Store(len(options.Plugins) > 0) + + // base options + m.currentNamespace = currentNamespace + m.targetNamespace = targetNamespace + + // Context options + out, err := json.Marshal(options) + if err != nil { + return errors.Wrap(err, "marshal options") + } + m.options = string(out) + + // Virtual client config + convertedVirtualConfig, err := kubeconfig.ConvertRestConfigToClientConfig(virtualKubeConfig) + if err != nil { + return errors.Wrap(err, "convert virtual client config") + } + rawVirtualConfig, err := convertedVirtualConfig.RawConfig() + if err != nil { + return errors.Wrap(err, "convert virtual client config") + } + virtualConfigBytes, err := clientcmd.Write(rawVirtualConfig) + if err != nil { + return errors.Wrap(err, "marshal virtual client config") + } + m.virtualKubeConfig = string(virtualConfigBytes) + + // Physical client config + convertedPhysicalConfig, err := kubeconfig.ConvertRestConfigToClientConfig(physicalKubeConfig) + if err != nil { + return errors.Wrap(err, "convert physical client config") + } + rawPhysicalConfig, err := convertedPhysicalConfig.RawConfig() + if err != nil { + return errors.Wrap(err, "convert physical client config") + } + phyisicalConfigBytes, err := clientcmd.Write(rawPhysicalConfig) + if err != nil { + return errors.Wrap(err, "marshal physical client config") + } + m.physicalKubeConfig = string(phyisicalConfigBytes) + + // Syncer client config + syncerConfigBytes, err := clientcmd.Write(*syncerConfig) + if err != nil { + return errors.Wrap(err, "marshal syncer client config") + } + m.syncerKubeConfig = string(syncerConfigBytes) + + // start the grpc server + loghelper.Infof("Plugin server listening on %s", options.PluginListenAddress) + lis, err := net.Listen("tcp", options.PluginListenAddress) + if err != nil { + return fmt.Errorf("failed to listen: %w", err) + } + + var opts []grpc.ServerOption + grpcServer := grpc.NewServer(opts...) + remote.RegisterVClusterServer(grpcServer, m) + go func() { + err := grpcServer.Serve(lis) + if err != nil { + panic(err) + } + }() + + return m.waitForPlugins(ctx, options) +} + +func (m *Manager) waitForPlugins(ctx context.Context, options *options.VirtualClusterOptions) error { + for _, plugin := range options.Plugins { + klog.Infof("Waiting for plugin %s to register...", plugin) + err := wait.PollUntilContextTimeout(ctx, time.Millisecond*100, time.Minute*10, true, func(context.Context) (done bool, err error) { + m.pluginMutex.Lock() + defer m.pluginMutex.Unlock() + + _, ok := m.pluginVersions[plugin] + return ok, nil + }) + if err != nil { + return fmt.Errorf("error waiting for plugin %s: %w", plugin, err) + } + klog.Infof("Plugin %s has successfully registered", plugin) + } + + return nil +} + +func (m *Manager) IsLeader(context.Context, *remote.Empty) (*remote.LeaderInfo, error) { + return &remote.LeaderInfo{ + Leader: m.isLeader.Load(), + RunID: runID, + }, nil +} + +func (m *Manager) GetContext(context.Context, *remote.Empty) (*remote.Context, error) { + return &remote.Context{ + VirtualClusterConfig: m.virtualKubeConfig, + PhysicalClusterConfig: m.physicalKubeConfig, + SyncerConfig: m.syncerKubeConfig, + TargetNamespace: m.targetNamespace, + CurrentNamespace: m.currentNamespace, + Options: m.options, + }, nil +} + +func (m *Manager) RegisterPlugin(_ context.Context, info *remote.RegisterPluginRequest) (*remote.RegisterPluginResult, error) { + if info != nil && info.Name != "" { + klog.Infof("Registering plugin %s", info.Name) + + // copy map + m.pluginMutex.Lock() + defer m.pluginMutex.Unlock() + + m.clientHooksMutex.Lock() + defer m.clientHooksMutex.Unlock() + + newPlugins := map[string]*remote.RegisterPluginRequest{} + for k, v := range m.pluginVersions { + newPlugins[k] = v + } + newPlugins[info.Name] = info + + // regenerate client hooks + newClientHooks, err := regenerateClientHooks(newPlugins) + if err != nil { + klog.Infof("Error regenerating client hooks for plugin %s: %v", info.Name, err) + return nil, errors.Wrap(err, "generate client hooks") + } + + m.clientHooks = newClientHooks + m.pluginVersions = newPlugins + } + + return &remote.RegisterPluginResult{}, nil +} + +// Register is deprecated and will be removed in future +func (m *Manager) Register(_ context.Context, info *remote.PluginInfo) (*remote.Context, error) { + if info != nil && info.Name != "" { + klog.Infof("Registering plugin %s", info.Name) + + m.pluginMutex.Lock() + defer m.pluginMutex.Unlock() + + m.pluginVersions[info.Name] = &remote.RegisterPluginRequest{ + Name: info.Name, + } + } + + return &remote.Context{ + VirtualClusterConfig: m.virtualKubeConfig, + PhysicalClusterConfig: m.physicalKubeConfig, + SyncerConfig: m.syncerKubeConfig, + TargetNamespace: m.targetNamespace, + CurrentNamespace: m.currentNamespace, + Options: m.options, + }, nil +} + +func regenerateClientHooks(plugins map[string]*remote.RegisterPluginRequest) (map[plugintypes.VersionKindType][]*Plugin, error) { + retMap := map[plugintypes.VersionKindType][]*Plugin{} + for _, pluginInfo := range plugins { + plugin := &Plugin{ + Name: pluginInfo.Name, + Address: pluginInfo.Address, + } + for _, clientHookInfo := range pluginInfo.ClientHooks { + if clientHookInfo.ApiVersion == "" { + return nil, fmt.Errorf("api version is empty in plugin %s hook", plugin.Name) + } else if clientHookInfo.Kind == "" { + return nil, fmt.Errorf("kind is empty in plugin %s hook", plugin.Name) + } + + for _, t := range clientHookInfo.Types { + if t == "" { + continue + } + + versionKindType := plugintypes.VersionKindType{ + APIVersion: clientHookInfo.ApiVersion, + Kind: clientHookInfo.Kind, + Type: t, + } + retMap[versionKindType] = append(retMap[versionKindType], plugin) + } + + klog.Infof("Register client hook for %s %s in plugin %s", clientHookInfo.ApiVersion, clientHookInfo.Kind, plugin.Name) + } + } + + return retMap, nil +} diff --git a/pkg/plugin/remote/plugin.pb.go b/pkg/plugin/v1/remote/plugin.pb.go similarity index 100% rename from pkg/plugin/remote/plugin.pb.go rename to pkg/plugin/v1/remote/plugin.pb.go diff --git a/pkg/plugin/remote/plugin.proto b/pkg/plugin/v1/remote/plugin.proto similarity index 95% rename from pkg/plugin/remote/plugin.proto rename to pkg/plugin/v1/remote/plugin.proto index b2431d0ec..5aeed8feb 100644 --- a/pkg/plugin/remote/plugin.proto +++ b/pkg/plugin/v1/remote/plugin.proto @@ -3,7 +3,7 @@ syntax = "proto3"; package remote; -option go_package = "github.com/loft-sh/vcluster/pkg/plugin/remote"; +option go_package = "github.com/loft-sh/vcluster/pkg/plugin/v1/remote"; service VCluster { // Deprecated: Use GetContext & RegisterPlugin instead diff --git a/pkg/plugin/remote/plugin_grpc.pb.go b/pkg/plugin/v1/remote/plugin_grpc.pb.go similarity index 100% rename from pkg/plugin/remote/plugin_grpc.pb.go rename to pkg/plugin/v1/remote/plugin_grpc.pb.go diff --git a/pkg/plugin/v2/grpc.go b/pkg/plugin/v2/grpc.go new file mode 100644 index 000000000..1d5a3cbf9 --- /dev/null +++ b/pkg/plugin/v2/grpc.go @@ -0,0 +1,40 @@ +package v2 + +import ( + "context" + "errors" + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/loft-sh/vcluster/pkg/plugin/v2/pluginv2" + "google.golang.org/grpc" +) + +// GRPCProviderPlugin is an implementation of the +// github.com/hashicorp/go-plugin#Plugin and +// github.com/hashicorp/go-plugin#GRPCPlugin interfaces +type GRPCProviderPlugin struct{} + +// Server always returns an error; we're only implementing the GRPCPlugin +// interface, not the Plugin interface. +func (p *GRPCProviderPlugin) Server(*plugin.MuxBroker) (interface{}, error) { + return nil, errors.New("vcluster-plugin only implements gRPC clients") +} + +// Client always returns an error; we're only implementing the GRPCPlugin +// interface, not the Plugin interface. +func (p *GRPCProviderPlugin) Client(*plugin.MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("vcluster-plugin only implements gRPC clients") +} + +// GRPCClient always returns an error; we're only implementing the server half +// of the interface. +func (p *GRPCProviderPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, clientConn *grpc.ClientConn) (interface{}, error) { + return pluginv2.NewPluginClient(clientConn), nil +} + +// GRPCServer registers the gRPC provider server with the gRPC server that +// go-plugin is standing up. +func (p *GRPCProviderPlugin) GRPCServer(_ *plugin.GRPCBroker, _ *grpc.Server) error { + return errors.New("vcluster-plugin only implements gRPC clients") +} diff --git a/pkg/plugin/v2/plugin.go b/pkg/plugin/v2/plugin.go new file mode 100644 index 000000000..1c2f816ef --- /dev/null +++ b/pkg/plugin/v2/plugin.go @@ -0,0 +1,349 @@ +package v2 + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/exec" + "path" + "time" + + "github.com/hashicorp/go-plugin" + plugintypes "github.com/loft-sh/vcluster/pkg/plugin/types" + "github.com/loft-sh/vcluster/pkg/plugin/v2/pluginv2" + "github.com/loft-sh/vcluster/pkg/setup/options" + "github.com/loft-sh/vcluster/pkg/util/kubeconfig" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +func NewManager() *Manager { + pluginFolder := os.Getenv("PLUGIN_FOLDER") + if pluginFolder == "" { + pluginFolder = "/plugins" + } + + return &Manager{ + PluginFolder: pluginFolder, + ClientHooks: map[plugintypes.VersionKindType][]*vClusterPlugin{}, + } +} + +type Manager struct { + // PluginFolder where to load plugins from + PluginFolder string + + // Plugins that were loaded + Plugins []*vClusterPlugin + + // ClientHooks that were loaded + ClientHooks map[plugintypes.VersionKindType][]*vClusterPlugin +} + +type vClusterPlugin struct { + // Path is the path where the plugin was loaded from + Path string + + // Client is the plugin client + Client *plugin.Client + + // GRPCClient is the direct grpc client + GRPCClient pluginv2.PluginClient +} + +func (m *Manager) Start( + ctx context.Context, + currentNamespace, targetNamespace string, + virtualKubeConfig *rest.Config, + physicalKubeConfig *rest.Config, + syncerConfig *clientcmdapi.Config, + options *options.VirtualClusterOptions, +) error { + // try to search for plugins + plugins, err := m.findPlugins(ctx) + if err != nil { + return fmt.Errorf("find plugins: %w", err) + } else if len(plugins) == 0 { + return nil + } + + // loop over plugins and load them + for _, pluginPath := range plugins { + err = m.loadPlugin(pluginPath) + if err != nil { + return fmt.Errorf("start plugin %s: %w", pluginPath, err) + } + } + + // build the start request + startRequest, err := m.buildStartRequest(currentNamespace, targetNamespace, virtualKubeConfig, physicalKubeConfig, syncerConfig, options) + if err != nil { + return fmt.Errorf("build start request: %w", err) + } + + // after loading all plugins we start them + for _, vClusterPlugin := range m.Plugins { + // start the plugin + _, err = vClusterPlugin.GRPCClient.Start(ctx, startRequest) + if err != nil { + return fmt.Errorf("error starting plugin %s: %w", vClusterPlugin.Path, err) + } + + // get client hooks + clientHooks, err := vClusterPlugin.GRPCClient.GetClientHooks(ctx, &pluginv2.GetClientHooks_Request{}) + if err != nil { + return fmt.Errorf("error retrieving client hooks for plugin %s: %w", vClusterPlugin.Path, err) + } + + // add client hooks + err = m.addClientHooks(vClusterPlugin, clientHooks.ClientHooks) + if err != nil { + return fmt.Errorf("error adding client hook for plugin %s: %w", vClusterPlugin.Path, err) + } + } + + return nil +} + +func (m *Manager) MutateObject(ctx context.Context, obj client.Object, hookType string, scheme *runtime.Scheme) error { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return err + } + + apiVersion, kind := gvk.ToAPIVersionAndKind() + versionKindType := plugintypes.VersionKindType{ + APIVersion: apiVersion, + Kind: kind, + Type: hookType, + } + clientHooks := m.ClientHooks[versionKindType] + if len(clientHooks) == 0 { + return nil + } + + encodedObj, err := json.Marshal(obj) + if err != nil { + return fmt.Errorf("encode object: %w", err) + } + + for _, clientHook := range clientHooks { + encodedObj, err = m.mutateObject(ctx, versionKindType, encodedObj, clientHook) + if err != nil { + return err + } + } + + err = json.Unmarshal(encodedObj, obj) + if err != nil { + return fmt.Errorf("decode object: %w", err) + } + + return nil +} + +func (m *Manager) mutateObject(ctx context.Context, versionKindType plugintypes.VersionKindType, obj []byte, plugin *vClusterPlugin) ([]byte, error) { + ctx, cancel := context.WithTimeout(ctx, time.Second*10) + defer cancel() + + klog.FromContext(ctx).V(1).Info("calling plugin to mutate object", "plugin", plugin.Path, "apiVersion", versionKindType.APIVersion, "kind", versionKindType.Kind) + mutateResult, err := plugin.GRPCClient.Mutate(ctx, &pluginv2.Mutate_Request{ + ApiVersion: versionKindType.APIVersion, + Kind: versionKindType.Kind, + Object: string(obj), + Type: versionKindType.Type, + }) + if err != nil { + return nil, fmt.Errorf("call plugin mutate %s: %w", plugin.Path, err) + } + + if mutateResult.Mutated { + return []byte(mutateResult.Object), nil + } + return obj, nil +} + +func (m *Manager) SetLeader(ctx context.Context) error { + for _, vClusterPlugin := range m.Plugins { + _, err := vClusterPlugin.GRPCClient.SetLeader(ctx, &pluginv2.SetLeader_Request{}) + if err != nil { + return fmt.Errorf("error setting leader in plugin %s: %w", vClusterPlugin.Path, err) + } + } + + return nil +} + +func (m *Manager) HasClientHooksForType(versionKindType plugintypes.VersionKindType) bool { + return len(m.ClientHooks[versionKindType]) > 0 +} + +func (m *Manager) HasClientHooks() bool { + return len(m.ClientHooks) > 0 +} + +func (m *Manager) HasPlugins() bool { + return len(m.Plugins) > 0 +} + +func (m *Manager) addClientHooks(vClusterPlugin *vClusterPlugin, clientHooks []*pluginv2.GetClientHooks_ClientHook) error { + for _, clientHookInfo := range clientHooks { + if clientHookInfo.ApiVersion == "" { + return fmt.Errorf("api version is empty in plugin %s hook", vClusterPlugin.Path) + } else if clientHookInfo.Kind == "" { + return fmt.Errorf("kind is empty in plugin %s hook", vClusterPlugin.Path) + } + + for _, t := range clientHookInfo.Types { + if t == "" { + continue + } + + versionKindType := plugintypes.VersionKindType{ + APIVersion: clientHookInfo.ApiVersion, + Kind: clientHookInfo.Kind, + Type: t, + } + + m.ClientHooks[versionKindType] = append(m.ClientHooks[versionKindType], vClusterPlugin) + } + + klog.Infof("Register client hook for %s %s in plugin %s", clientHookInfo.ApiVersion, clientHookInfo.Kind, vClusterPlugin.Path) + } + + return nil +} + +func (m *Manager) buildStartRequest( + currentNamespace, targetNamespace string, + virtualKubeConfig *rest.Config, + physicalKubeConfig *rest.Config, + syncerConfig *clientcmdapi.Config, + options *options.VirtualClusterOptions, +) (*pluginv2.Start_Request, error) { + // Context options + encodedOptions, err := json.Marshal(options) + if err != nil { + return nil, fmt.Errorf("marshal options: %w", err) + } + + // Virtual client config + convertedVirtualConfig, err := kubeconfig.ConvertRestConfigToClientConfig(virtualKubeConfig) + if err != nil { + return nil, fmt.Errorf("convert virtual client config: %w", err) + } + rawVirtualConfig, err := convertedVirtualConfig.RawConfig() + if err != nil { + return nil, fmt.Errorf("convert virtual client config: %w", err) + } + virtualConfigBytes, err := clientcmd.Write(rawVirtualConfig) + if err != nil { + return nil, fmt.Errorf("marshal virtual client config: %w", err) + } + + // Physical client config + convertedPhysicalConfig, err := kubeconfig.ConvertRestConfigToClientConfig(physicalKubeConfig) + if err != nil { + return nil, fmt.Errorf("convert physical client config: %w", err) + } + rawPhysicalConfig, err := convertedPhysicalConfig.RawConfig() + if err != nil { + return nil, fmt.Errorf("convert physical client config: %w", err) + } + phyisicalConfigBytes, err := clientcmd.Write(rawPhysicalConfig) + if err != nil { + return nil, fmt.Errorf("marshal physical client config: %w", err) + } + + // Syncer client config + syncerConfigBytes, err := clientcmd.Write(*syncerConfig) + if err != nil { + return nil, fmt.Errorf("marshal syncer client config: %w", err) + } + + return &pluginv2.Start_Request{ + VirtualClusterConfig: string(virtualConfigBytes), + PhysicalClusterConfig: string(phyisicalConfigBytes), + SyncerConfig: string(syncerConfigBytes), + TargetNamespace: targetNamespace, + CurrentNamespace: currentNamespace, + Options: string(encodedOptions), + }, nil +} + +func (m *Manager) loadPlugin(pluginPath string) error { + // connect to plugin + client := plugin.NewClient(&plugin.ClientConfig{ + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + }, + Plugins: map[string]plugin.Plugin{ + "plugin": &GRPCProviderPlugin{}, + }, + Cmd: exec.Command(pluginPath), + AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, + }) + + // Connect via RPC + rpcClient, err := client.Client() + if err != nil { + client.Kill() + return err + } + + // Request the plugin + raw, err := rpcClient.Dispense("plugin") + if err != nil { + client.Kill() + return err + } + + // add to loaded plugins + m.Plugins = append(m.Plugins, &vClusterPlugin{ + Path: pluginPath, + Client: client, + GRPCClient: raw.(pluginv2.PluginClient), + }) + + return nil +} + +func (m *Manager) findPlugins(ctx context.Context) ([]string, error) { + _, err := os.Stat(m.PluginFolder) + if err != nil { + // we cannot stat plugin folder, skip reading plugins + klog.FromContext(ctx).V(1).Info("Error reading plugin folder", "error", err) + return nil, nil + } + + plugins, err := os.ReadDir(m.PluginFolder) + if err != nil { + return nil, fmt.Errorf("error reading") + } + + pluginPaths := []string{} + for _, pluginName := range plugins { + // is dir or executable? + if pluginName.IsDir() { + // check if plugin binary is there + pluginPath := path.Join(m.PluginFolder, pluginName.Name(), "plugin") + _, err := os.Stat(pluginPath) + if err != nil { + klog.FromContext(ctx).Error(fmt.Errorf("error loading plugin %s: %w", pluginPath, err), "error loading plugin") + continue + } + + pluginPaths = append(pluginPaths, pluginPath) + } else { + pluginPaths = append(pluginPaths, path.Join(m.PluginFolder, pluginName.Name())) + } + } + + return pluginPaths, nil +} diff --git a/pkg/plugin/v2/pluginv2/pluginv2.pb.go b/pkg/plugin/v2/pluginv2/pluginv2.pb.go new file mode 100644 index 000000000..2fa0a2f32 --- /dev/null +++ b/pkg/plugin/v2/pluginv2/pluginv2.pb.go @@ -0,0 +1,951 @@ +// protoc -I . pluginv2.proto --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.19.3 +// source: pluginv2.proto + +package pluginv2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Start struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Start) Reset() { + *x = Start{} + if protoimpl.UnsafeEnabled { + mi := &file_pluginv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Start) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Start) ProtoMessage() {} + +func (x *Start) ProtoReflect() protoreflect.Message { + mi := &file_pluginv2_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Start.ProtoReflect.Descriptor instead. +func (*Start) Descriptor() ([]byte, []int) { + return file_pluginv2_proto_rawDescGZIP(), []int{0} +} + +type GetClientHooks struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetClientHooks) Reset() { + *x = GetClientHooks{} + if protoimpl.UnsafeEnabled { + mi := &file_pluginv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetClientHooks) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetClientHooks) ProtoMessage() {} + +func (x *GetClientHooks) ProtoReflect() protoreflect.Message { + mi := &file_pluginv2_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetClientHooks.ProtoReflect.Descriptor instead. +func (*GetClientHooks) Descriptor() ([]byte, []int) { + return file_pluginv2_proto_rawDescGZIP(), []int{1} +} + +type Mutate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Mutate) Reset() { + *x = Mutate{} + if protoimpl.UnsafeEnabled { + mi := &file_pluginv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Mutate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Mutate) ProtoMessage() {} + +func (x *Mutate) ProtoReflect() protoreflect.Message { + mi := &file_pluginv2_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Mutate.ProtoReflect.Descriptor instead. +func (*Mutate) Descriptor() ([]byte, []int) { + return file_pluginv2_proto_rawDescGZIP(), []int{2} +} + +type SetLeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SetLeader) Reset() { + *x = SetLeader{} + if protoimpl.UnsafeEnabled { + mi := &file_pluginv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetLeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetLeader) ProtoMessage() {} + +func (x *SetLeader) ProtoReflect() protoreflect.Message { + mi := &file_pluginv2_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetLeader.ProtoReflect.Descriptor instead. +func (*SetLeader) Descriptor() ([]byte, []int) { + return file_pluginv2_proto_rawDescGZIP(), []int{3} +} + +type Start_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VirtualClusterConfig string `protobuf:"bytes,1,opt,name=virtualClusterConfig,proto3" json:"virtualClusterConfig,omitempty"` + PhysicalClusterConfig string `protobuf:"bytes,2,opt,name=physicalClusterConfig,proto3" json:"physicalClusterConfig,omitempty"` + SyncerConfig string `protobuf:"bytes,3,opt,name=syncerConfig,proto3" json:"syncerConfig,omitempty"` + TargetNamespace string `protobuf:"bytes,4,opt,name=targetNamespace,proto3" json:"targetNamespace,omitempty"` + CurrentNamespace string `protobuf:"bytes,5,opt,name=currentNamespace,proto3" json:"currentNamespace,omitempty"` + Options string `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *Start_Request) Reset() { + *x = Start_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_pluginv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Start_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Start_Request) ProtoMessage() {} + +func (x *Start_Request) ProtoReflect() protoreflect.Message { + mi := &file_pluginv2_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Start_Request.ProtoReflect.Descriptor instead. +func (*Start_Request) Descriptor() ([]byte, []int) { + return file_pluginv2_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Start_Request) GetVirtualClusterConfig() string { + if x != nil { + return x.VirtualClusterConfig + } + return "" +} + +func (x *Start_Request) GetPhysicalClusterConfig() string { + if x != nil { + return x.PhysicalClusterConfig + } + return "" +} + +func (x *Start_Request) GetSyncerConfig() string { + if x != nil { + return x.SyncerConfig + } + return "" +} + +func (x *Start_Request) GetTargetNamespace() string { + if x != nil { + return x.TargetNamespace + } + return "" +} + +func (x *Start_Request) GetCurrentNamespace() string { + if x != nil { + return x.CurrentNamespace + } + return "" +} + +func (x *Start_Request) GetOptions() string { + if x != nil { + return x.Options + } + return "" +} + +type Start_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Start_Response) Reset() { + *x = Start_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_pluginv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Start_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Start_Response) ProtoMessage() {} + +func (x *Start_Response) ProtoReflect() protoreflect.Message { + mi := &file_pluginv2_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Start_Response.ProtoReflect.Descriptor instead. +func (*Start_Response) Descriptor() ([]byte, []int) { + return file_pluginv2_proto_rawDescGZIP(), []int{0, 1} +} + +type GetClientHooks_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetClientHooks_Request) Reset() { + *x = GetClientHooks_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_pluginv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetClientHooks_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetClientHooks_Request) ProtoMessage() {} + +func (x *GetClientHooks_Request) ProtoReflect() protoreflect.Message { + mi := &file_pluginv2_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetClientHooks_Request.ProtoReflect.Descriptor instead. +func (*GetClientHooks_Request) Descriptor() ([]byte, []int) { + return file_pluginv2_proto_rawDescGZIP(), []int{1, 0} +} + +type GetClientHooks_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClientHooks []*GetClientHooks_ClientHook `protobuf:"bytes,1,rep,name=clientHooks,proto3" json:"clientHooks,omitempty"` +} + +func (x *GetClientHooks_Response) Reset() { + *x = GetClientHooks_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_pluginv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetClientHooks_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetClientHooks_Response) ProtoMessage() {} + +func (x *GetClientHooks_Response) ProtoReflect() protoreflect.Message { + mi := &file_pluginv2_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetClientHooks_Response.ProtoReflect.Descriptor instead. +func (*GetClientHooks_Response) Descriptor() ([]byte, []int) { + return file_pluginv2_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *GetClientHooks_Response) GetClientHooks() []*GetClientHooks_ClientHook { + if x != nil { + return x.ClientHooks + } + return nil +} + +type GetClientHooks_ClientHook struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ApiVersion string `protobuf:"bytes,1,opt,name=apiVersion,proto3" json:"apiVersion,omitempty"` + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + Types []string `protobuf:"bytes,3,rep,name=types,proto3" json:"types,omitempty"` +} + +func (x *GetClientHooks_ClientHook) Reset() { + *x = GetClientHooks_ClientHook{} + if protoimpl.UnsafeEnabled { + mi := &file_pluginv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetClientHooks_ClientHook) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetClientHooks_ClientHook) ProtoMessage() {} + +func (x *GetClientHooks_ClientHook) ProtoReflect() protoreflect.Message { + mi := &file_pluginv2_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetClientHooks_ClientHook.ProtoReflect.Descriptor instead. +func (*GetClientHooks_ClientHook) Descriptor() ([]byte, []int) { + return file_pluginv2_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *GetClientHooks_ClientHook) GetApiVersion() string { + if x != nil { + return x.ApiVersion + } + return "" +} + +func (x *GetClientHooks_ClientHook) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *GetClientHooks_ClientHook) GetTypes() []string { + if x != nil { + return x.Types + } + return nil +} + +type Mutate_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ApiVersion string `protobuf:"bytes,1,opt,name=apiVersion,proto3" json:"apiVersion,omitempty"` + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + Object string `protobuf:"bytes,3,opt,name=object,proto3" json:"object,omitempty"` + Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *Mutate_Request) Reset() { + *x = Mutate_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_pluginv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Mutate_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Mutate_Request) ProtoMessage() {} + +func (x *Mutate_Request) ProtoReflect() protoreflect.Message { + mi := &file_pluginv2_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Mutate_Request.ProtoReflect.Descriptor instead. +func (*Mutate_Request) Descriptor() ([]byte, []int) { + return file_pluginv2_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *Mutate_Request) GetApiVersion() string { + if x != nil { + return x.ApiVersion + } + return "" +} + +func (x *Mutate_Request) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *Mutate_Request) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *Mutate_Request) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +type Mutate_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Object string `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + Mutated bool `protobuf:"varint,2,opt,name=mutated,proto3" json:"mutated,omitempty"` +} + +func (x *Mutate_Response) Reset() { + *x = Mutate_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_pluginv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Mutate_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Mutate_Response) ProtoMessage() {} + +func (x *Mutate_Response) ProtoReflect() protoreflect.Message { + mi := &file_pluginv2_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Mutate_Response.ProtoReflect.Descriptor instead. +func (*Mutate_Response) Descriptor() ([]byte, []int) { + return file_pluginv2_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *Mutate_Response) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *Mutate_Response) GetMutated() bool { + if x != nil { + return x.Mutated + } + return false +} + +type SetLeader_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SetLeader_Request) Reset() { + *x = SetLeader_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_pluginv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetLeader_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetLeader_Request) ProtoMessage() {} + +func (x *SetLeader_Request) ProtoReflect() protoreflect.Message { + mi := &file_pluginv2_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetLeader_Request.ProtoReflect.Descriptor instead. +func (*SetLeader_Request) Descriptor() ([]byte, []int) { + return file_pluginv2_proto_rawDescGZIP(), []int{3, 0} +} + +type SetLeader_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SetLeader_Response) Reset() { + *x = SetLeader_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_pluginv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetLeader_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetLeader_Response) ProtoMessage() {} + +func (x *SetLeader_Response) ProtoReflect() protoreflect.Message { + mi := &file_pluginv2_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetLeader_Response.ProtoReflect.Descriptor instead. +func (*SetLeader_Response) Descriptor() ([]byte, []int) { + return file_pluginv2_proto_rawDescGZIP(), []int{3, 1} +} + +var File_pluginv2_proto protoreflect.FileDescriptor + +var file_pluginv2_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x76, 0x32, 0x22, 0x9d, 0x02, 0x0a, 0x05, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x1a, 0x87, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x32, 0x0a, 0x14, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x15, 0x70, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x15, 0x70, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x79, + 0x6e, 0x63, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x73, 0x79, 0x6e, 0x63, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, + 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2a, 0x0a, 0x10, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x0a, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc6, 0x01, 0x0a, 0x0e, 0x47, + 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x73, 0x1a, 0x09, 0x0a, + 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x51, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x6f, + 0x6f, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x6f, + 0x6f, 0x6b, 0x73, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x0b, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x73, 0x1a, 0x56, 0x0a, 0x0a, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x70, 0x69, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, + 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x22, 0xb1, 0x01, 0x0a, 0x06, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x69, + 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x70, 0x69, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, + 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x3c, 0x0a, 0x08, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x22, 0x22, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x4c, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x0a, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xa2, 0x02, 0x0a, 0x06, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x3a, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x17, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x55, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, + 0x6f, 0x6f, 0x6b, 0x73, 0x12, 0x20, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x76, 0x32, 0x2e, + 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x73, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x76, + 0x32, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x4d, 0x75, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x76, 0x32, 0x2e, 0x4d, + 0x75, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x4c, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x1b, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x76, 0x32, + 0x2e, 0x53, 0x65, 0x74, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x76, 0x32, 0x2e, 0x53, 0x65, + 0x74, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, + 0x6f, 0x66, 0x74, 0x2d, 0x73, 0x68, 0x2f, 0x76, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, + 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pluginv2_proto_rawDescOnce sync.Once + file_pluginv2_proto_rawDescData = file_pluginv2_proto_rawDesc +) + +func file_pluginv2_proto_rawDescGZIP() []byte { + file_pluginv2_proto_rawDescOnce.Do(func() { + file_pluginv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_pluginv2_proto_rawDescData) + }) + return file_pluginv2_proto_rawDescData +} + +var file_pluginv2_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_pluginv2_proto_goTypes = []interface{}{ + (*Start)(nil), // 0: pluginv2.Start + (*GetClientHooks)(nil), // 1: pluginv2.GetClientHooks + (*Mutate)(nil), // 2: pluginv2.Mutate + (*SetLeader)(nil), // 3: pluginv2.SetLeader + (*Start_Request)(nil), // 4: pluginv2.Start.Request + (*Start_Response)(nil), // 5: pluginv2.Start.Response + (*GetClientHooks_Request)(nil), // 6: pluginv2.GetClientHooks.Request + (*GetClientHooks_Response)(nil), // 7: pluginv2.GetClientHooks.Response + (*GetClientHooks_ClientHook)(nil), // 8: pluginv2.GetClientHooks.ClientHook + (*Mutate_Request)(nil), // 9: pluginv2.Mutate.Request + (*Mutate_Response)(nil), // 10: pluginv2.Mutate.Response + (*SetLeader_Request)(nil), // 11: pluginv2.SetLeader.Request + (*SetLeader_Response)(nil), // 12: pluginv2.SetLeader.Response +} +var file_pluginv2_proto_depIdxs = []int32{ + 8, // 0: pluginv2.GetClientHooks.Response.clientHooks:type_name -> pluginv2.GetClientHooks.ClientHook + 4, // 1: pluginv2.Plugin.Start:input_type -> pluginv2.Start.Request + 6, // 2: pluginv2.Plugin.GetClientHooks:input_type -> pluginv2.GetClientHooks.Request + 9, // 3: pluginv2.Plugin.Mutate:input_type -> pluginv2.Mutate.Request + 11, // 4: pluginv2.Plugin.SetLeader:input_type -> pluginv2.SetLeader.Request + 5, // 5: pluginv2.Plugin.Start:output_type -> pluginv2.Start.Response + 7, // 6: pluginv2.Plugin.GetClientHooks:output_type -> pluginv2.GetClientHooks.Response + 10, // 7: pluginv2.Plugin.Mutate:output_type -> pluginv2.Mutate.Response + 12, // 8: pluginv2.Plugin.SetLeader:output_type -> pluginv2.SetLeader.Response + 5, // [5:9] is the sub-list for method output_type + 1, // [1:5] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_pluginv2_proto_init() } +func file_pluginv2_proto_init() { + if File_pluginv2_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pluginv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Start); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pluginv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetClientHooks); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pluginv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Mutate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pluginv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetLeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pluginv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Start_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pluginv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Start_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pluginv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetClientHooks_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pluginv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetClientHooks_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pluginv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetClientHooks_ClientHook); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pluginv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Mutate_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pluginv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Mutate_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pluginv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetLeader_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pluginv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetLeader_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pluginv2_proto_rawDesc, + NumEnums: 0, + NumMessages: 13, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_pluginv2_proto_goTypes, + DependencyIndexes: file_pluginv2_proto_depIdxs, + MessageInfos: file_pluginv2_proto_msgTypes, + }.Build() + File_pluginv2_proto = out.File + file_pluginv2_proto_rawDesc = nil + file_pluginv2_proto_goTypes = nil + file_pluginv2_proto_depIdxs = nil +} diff --git a/pkg/plugin/v2/pluginv2/pluginv2.proto b/pkg/plugin/v2/pluginv2/pluginv2.proto new file mode 100644 index 000000000..4b29a1be4 --- /dev/null +++ b/pkg/plugin/v2/pluginv2/pluginv2.proto @@ -0,0 +1,64 @@ +// protoc -I . pluginv2.proto --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative +syntax = "proto3"; + +package pluginv2; + +option go_package = "github.com/loft-sh/vcluster/pkg/plugin/v2/pluginv2"; + +service Plugin { + rpc Start(Start.Request) returns (Start.Response); + rpc GetClientHooks(GetClientHooks.Request) returns (GetClientHooks.Response); + rpc Mutate(Mutate.Request) returns (Mutate.Response); + rpc SetLeader(SetLeader.Request) returns (SetLeader.Response); +} + +message Start { + message Request { + string virtualClusterConfig = 1; + string physicalClusterConfig = 2; + string syncerConfig = 3; + + string targetNamespace = 4; + string currentNamespace = 5; + + string options = 6; + } + + message Response {} +} + +message GetClientHooks { + message Request {} + + message Response { + repeated ClientHook clientHooks = 1; + } + + message ClientHook { + string apiVersion = 1; + string kind = 2; + repeated string types = 3; + } +} + +message Mutate { + message Request { + string apiVersion = 1; + string kind = 2; + string object = 3; + string type = 4; + } + + message Response { + string object = 1; + bool mutated = 2; + } +} + +message SetLeader { + message Request {} + message Response {} +} + + + diff --git a/pkg/plugin/v2/pluginv2/pluginv2_grpc.pb.go b/pkg/plugin/v2/pluginv2/pluginv2_grpc.pb.go new file mode 100644 index 000000000..b08b2129f --- /dev/null +++ b/pkg/plugin/v2/pluginv2/pluginv2_grpc.pb.go @@ -0,0 +1,213 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.19.3 +// source: pluginv2.proto + +package pluginv2 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// PluginClient is the client API for Plugin service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type PluginClient interface { + Start(ctx context.Context, in *Start_Request, opts ...grpc.CallOption) (*Start_Response, error) + GetClientHooks(ctx context.Context, in *GetClientHooks_Request, opts ...grpc.CallOption) (*GetClientHooks_Response, error) + Mutate(ctx context.Context, in *Mutate_Request, opts ...grpc.CallOption) (*Mutate_Response, error) + SetLeader(ctx context.Context, in *SetLeader_Request, opts ...grpc.CallOption) (*SetLeader_Response, error) +} + +type pluginClient struct { + cc grpc.ClientConnInterface +} + +func NewPluginClient(cc grpc.ClientConnInterface) PluginClient { + return &pluginClient{cc} +} + +func (c *pluginClient) Start(ctx context.Context, in *Start_Request, opts ...grpc.CallOption) (*Start_Response, error) { + out := new(Start_Response) + err := c.cc.Invoke(ctx, "/pluginv2.Plugin/Start", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pluginClient) GetClientHooks(ctx context.Context, in *GetClientHooks_Request, opts ...grpc.CallOption) (*GetClientHooks_Response, error) { + out := new(GetClientHooks_Response) + err := c.cc.Invoke(ctx, "/pluginv2.Plugin/GetClientHooks", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pluginClient) Mutate(ctx context.Context, in *Mutate_Request, opts ...grpc.CallOption) (*Mutate_Response, error) { + out := new(Mutate_Response) + err := c.cc.Invoke(ctx, "/pluginv2.Plugin/Mutate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pluginClient) SetLeader(ctx context.Context, in *SetLeader_Request, opts ...grpc.CallOption) (*SetLeader_Response, error) { + out := new(SetLeader_Response) + err := c.cc.Invoke(ctx, "/pluginv2.Plugin/SetLeader", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PluginServer is the server API for Plugin service. +// All implementations must embed UnimplementedPluginServer +// for forward compatibility +type PluginServer interface { + Start(context.Context, *Start_Request) (*Start_Response, error) + GetClientHooks(context.Context, *GetClientHooks_Request) (*GetClientHooks_Response, error) + Mutate(context.Context, *Mutate_Request) (*Mutate_Response, error) + SetLeader(context.Context, *SetLeader_Request) (*SetLeader_Response, error) + mustEmbedUnimplementedPluginServer() +} + +// UnimplementedPluginServer must be embedded to have forward compatible implementations. +type UnimplementedPluginServer struct { +} + +func (UnimplementedPluginServer) Start(context.Context, *Start_Request) (*Start_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Start not implemented") +} +func (UnimplementedPluginServer) GetClientHooks(context.Context, *GetClientHooks_Request) (*GetClientHooks_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetClientHooks not implemented") +} +func (UnimplementedPluginServer) Mutate(context.Context, *Mutate_Request) (*Mutate_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Mutate not implemented") +} +func (UnimplementedPluginServer) SetLeader(context.Context, *SetLeader_Request) (*SetLeader_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetLeader not implemented") +} +func (UnimplementedPluginServer) mustEmbedUnimplementedPluginServer() {} + +// UnsafePluginServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to PluginServer will +// result in compilation errors. +type UnsafePluginServer interface { + mustEmbedUnimplementedPluginServer() +} + +func RegisterPluginServer(s grpc.ServiceRegistrar, srv PluginServer) { + s.RegisterService(&Plugin_ServiceDesc, srv) +} + +func _Plugin_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Start_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PluginServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pluginv2.Plugin/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PluginServer).Start(ctx, req.(*Start_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Plugin_GetClientHooks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClientHooks_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PluginServer).GetClientHooks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pluginv2.Plugin/GetClientHooks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PluginServer).GetClientHooks(ctx, req.(*GetClientHooks_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Plugin_Mutate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Mutate_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PluginServer).Mutate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pluginv2.Plugin/Mutate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PluginServer).Mutate(ctx, req.(*Mutate_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Plugin_SetLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetLeader_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PluginServer).SetLeader(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pluginv2.Plugin/SetLeader", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PluginServer).SetLeader(ctx, req.(*SetLeader_Request)) + } + return interceptor(ctx, in, info, handler) +} + +// Plugin_ServiceDesc is the grpc.ServiceDesc for Plugin service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Plugin_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "pluginv2.Plugin", + HandlerType: (*PluginServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Start", + Handler: _Plugin_Start_Handler, + }, + { + MethodName: "GetClientHooks", + Handler: _Plugin_GetClientHooks_Handler, + }, + { + MethodName: "Mutate", + Handler: _Plugin_Mutate_Handler, + }, + { + MethodName: "SetLeader", + Handler: _Plugin_SetLeader_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pluginv2.proto", +} diff --git a/pkg/plugin/v2/types.go b/pkg/plugin/v2/types.go new file mode 100644 index 000000000..207e534f9 --- /dev/null +++ b/pkg/plugin/v2/types.go @@ -0,0 +1,36 @@ +package v2 + +import ( + "context" + + plugintypes "github.com/loft-sh/vcluster/pkg/plugin/types" + "github.com/loft-sh/vcluster/pkg/setup/options" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Plugin interface { + // Start runs the plugin and blocks until the plugin finishes + Start( + ctx context.Context, + currentNamespace, targetNamespace string, + virtualKubeConfig *rest.Config, + physicalKubeConfig *rest.Config, + syncerConfig *clientcmdapi.Config, + options *options.VirtualClusterOptions, + ) error + + // SetLeader signals the plugin that the syncer acquired leadership and starts executing controllers + SetLeader() error + + // MutateObject mutates the objects of the given version kind type + MutateObject(ctx context.Context, obj client.Object, hookType string, scheme *runtime.Scheme) error + + // HasClientHooks returns if there are any plugin client hooks + HasClientHooks() bool + + // HasClientHooksForType returns if there are any plugin client hooks for the given type + HasClientHooksForType(versionKindType plugintypes.VersionKindType) bool +} diff --git a/pkg/setup/controllers.go b/pkg/setup/controllers.go index 279bfe323..faf857247 100644 --- a/pkg/setup/controllers.go +++ b/pkg/setup/controllers.go @@ -2,6 +2,7 @@ package setup import ( "context" + "fmt" "math" "time" @@ -64,7 +65,10 @@ func StartControllers(controllerContext *options.ControllerContext) error { // set leader if !controllerContext.Options.DisablePlugins { - plugin.DefaultManager.SetLeader(true) + err = plugin.DefaultManager.SetLeader(controllerContext.Context) + if err != nil { + return fmt.Errorf("plugin set leader: %w", err) + } } return nil diff --git a/pkg/util/clihelper/clihelper.go b/pkg/util/clihelper/clihelper.go index 56d8399ba..9e0b98cc5 100644 --- a/pkg/util/clihelper/clihelper.go +++ b/pkg/util/clihelper/clihelper.go @@ -20,7 +20,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/clientcmd/api" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) // CriticalStatus container status @@ -40,26 +40,26 @@ var SortPodsByNewest = func(pods []corev1.Pod, i, j int) bool { } // GetProKubeConfig builds a pro kube config from options and client -func GetProKubeConfig(options loftkubeconfig.ContextOptions) (*api.Config, error) { +func GetProKubeConfig(options loftkubeconfig.ContextOptions) (*clientcmdapi.Config, error) { contextName := options.Name - cluster := api.NewCluster() + cluster := clientcmdapi.NewCluster() cluster.Server = options.Server cluster.CertificateAuthorityData = options.CaData cluster.InsecureSkipTLSVerify = options.InsecureSkipTLSVerify - authInfo := api.NewAuthInfo() + authInfo := clientcmdapi.NewAuthInfo() if options.Token != "" || options.ClientCertificateData != nil || options.ClientKeyData != nil { authInfo.Token = options.Token authInfo.ClientKeyData = options.ClientKeyData authInfo.ClientCertificateData = options.ClientCertificateData } - config := api.NewConfig() + config := clientcmdapi.NewConfig() config.Clusters[contextName] = cluster config.AuthInfos[contextName] = authInfo // Update kube context - kubeContext := api.NewContext() + kubeContext := clientcmdapi.NewContext() kubeContext.Cluster = contextName kubeContext.AuthInfo = contextName kubeContext.Namespace = options.CurrentNamespace @@ -79,8 +79,8 @@ func GetProKubeConfig(options loftkubeconfig.ContextOptions) (*api.Config, error // can be eventually removed in the future. // // This is retried until the kube config is successfully retrieve, or until 10 minute timeout is reached. -func GetKubeConfig(ctx context.Context, kubeClient *kubernetes.Clientset, vclusterName string, namespace string, log log.Logger) (*api.Config, error) { - var kubeConfig *api.Config +func GetKubeConfig(ctx context.Context, kubeClient *kubernetes.Clientset, vclusterName string, namespace string, log log.Logger) (*clientcmdapi.Config, error) { + var kubeConfig *clientcmdapi.Config printedWaiting := false podInfoPrinter := podprinter.PodInfoPrinter{LastWarning: time.Now().Add(time.Second * 6)} @@ -156,7 +156,7 @@ func CheckHelmVersion(output string) error { return nil } -func UpdateKubeConfig(contextName string, cluster *api.Cluster, authInfo *api.AuthInfo, setActive bool) error { +func UpdateKubeConfig(contextName string, cluster *clientcmdapi.Cluster, authInfo *clientcmdapi.AuthInfo, setActive bool) error { config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() if err != nil { return err @@ -166,7 +166,7 @@ func UpdateKubeConfig(contextName string, cluster *api.Cluster, authInfo *api.Au config.AuthInfos[contextName] = authInfo // Update kube context - newContext := api.NewContext() + newContext := clientcmdapi.NewContext() newContext.Cluster = contextName newContext.AuthInfo = contextName diff --git a/pkg/util/kubeconfig/kubeconfig.go b/pkg/util/kubeconfig/kubeconfig.go index 20769b2e3..e4d98e195 100644 --- a/pkg/util/kubeconfig/kubeconfig.go +++ b/pkg/util/kubeconfig/kubeconfig.go @@ -10,8 +10,9 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/clientcmd/api" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -25,7 +26,7 @@ const ( CertificateKeySecretKey = "client-key" ) -func WriteKubeConfig(ctx context.Context, currentNamespaceClient client.Client, secretName, secretNamespace string, config *api.Config, isRemote bool) error { +func WriteKubeConfig(ctx context.Context, currentNamespaceClient client.Client, secretName, secretNamespace string, config *clientcmdapi.Config, isRemote bool) error { out, err := clientcmd.Write(*config) if err != nil { return err @@ -88,7 +89,7 @@ func WriteKubeConfig(ctx context.Context, currentNamespaceClient client.Client, return nil } -func ReadKubeConfig(ctx context.Context, client *kubernetes.Clientset, suffix, namespace string) (*api.Config, error) { +func ReadKubeConfig(ctx context.Context, client *kubernetes.Clientset, suffix, namespace string) (*clientcmdapi.Config, error) { secret, err := client.CoreV1().Secrets(namespace).Get(ctx, GetDefaultSecretName(suffix), metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("could not Get the %s secret in order to read kubeconfig: %w", GetDefaultSecretName(suffix), err) @@ -103,3 +104,73 @@ func ReadKubeConfig(ctx context.Context, client *kubernetes.Clientset, suffix, n func GetDefaultSecretName(suffix string) string { return DefaultSecretPrefix + suffix } + +func ConvertRestConfigToClientConfig(config *rest.Config) (clientcmd.ClientConfig, error) { + contextName := "local" + kubeConfig := clientcmdapi.NewConfig() + kubeConfig.Contexts = map[string]*clientcmdapi.Context{ + contextName: { + Cluster: contextName, + AuthInfo: contextName, + }, + } + kubeConfig.Clusters = map[string]*clientcmdapi.Cluster{ + contextName: { + Server: config.Host, + InsecureSkipTLSVerify: config.Insecure, + CertificateAuthorityData: config.CAData, + CertificateAuthority: config.CAFile, + }, + } + kubeConfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{ + contextName: { + Token: config.BearerToken, + TokenFile: config.BearerTokenFile, + Impersonate: config.Impersonate.UserName, + ImpersonateGroups: config.Impersonate.Groups, + ImpersonateUserExtra: config.Impersonate.Extra, + ClientCertificate: config.CertFile, + ClientCertificateData: config.CertData, + ClientKey: config.KeyFile, + ClientKeyData: config.KeyData, + Username: config.Username, + Password: config.Password, + AuthProvider: config.AuthProvider, + Exec: config.ExecProvider, + }, + } + kubeConfig.CurrentContext = contextName + + // resolve certificate + if kubeConfig.Clusters[contextName].CertificateAuthorityData == nil && kubeConfig.Clusters[contextName].CertificateAuthority != "" { + o, err := os.ReadFile(kubeConfig.Clusters[contextName].CertificateAuthority) + if err != nil { + return nil, err + } + + kubeConfig.Clusters[contextName].CertificateAuthority = "" + kubeConfig.Clusters[contextName].CertificateAuthorityData = o + } + + // fill in data + if kubeConfig.AuthInfos[contextName].ClientCertificateData == nil && kubeConfig.AuthInfos[contextName].ClientCertificate != "" { + o, err := os.ReadFile(kubeConfig.AuthInfos[contextName].ClientCertificate) + if err != nil { + return nil, err + } + + kubeConfig.AuthInfos[contextName].ClientCertificate = "" + kubeConfig.AuthInfos[contextName].ClientCertificateData = o + } + if kubeConfig.AuthInfos[contextName].ClientKeyData == nil && kubeConfig.AuthInfos[contextName].ClientKey != "" { + o, err := os.ReadFile(kubeConfig.AuthInfos[contextName].ClientKey) + if err != nil { + return nil, err + } + + kubeConfig.AuthInfos[contextName].ClientKey = "" + kubeConfig.AuthInfos[contextName].ClientKeyData = o + } + + return clientcmd.NewDefaultClientConfig(*kubeConfig, &clientcmd.ConfigOverrides{}), nil +} diff --git a/pkg/util/pluginhookclient/client.go b/pkg/util/pluginhookclient/client.go index 987e45080..02936e007 100644 --- a/pkg/util/pluginhookclient/client.go +++ b/pkg/util/pluginhookclient/client.go @@ -2,17 +2,10 @@ package pluginhookclient import ( "context" - "encoding/json" - "fmt" "strings" - "time" "github.com/loft-sh/vcluster/pkg/plugin" - "github.com/loft-sh/vcluster/pkg/plugin/remote" - "github.com/loft-sh/vcluster/pkg/util/loghelper" - "github.com/pkg/errors" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" + plugintypes "github.com/loft-sh/vcluster/pkg/plugin/types" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" @@ -81,7 +74,7 @@ func (c *Client) Get(ctx context.Context, key client.ObjectKey, obj client.Objec return err } - return executeClientHooksFor(ctx, obj, "Get"+c.suffix, c.scheme) + return plugin.DefaultManager.MutateObject(ctx, obj, "Get"+c.suffix, c.scheme) } func (c *Client) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { @@ -95,12 +88,11 @@ func (c *Client) List(ctx context.Context, list client.ObjectList, opts ...clien return err } gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - clientHooks := plugin.DefaultManager.ClientHooksFor(plugin.VersionKindType{ + if !plugin.DefaultManager.HasClientHooksForType(plugintypes.VersionKindType{ APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind, Type: "Get" + c.suffix, - }) - if len(clientHooks) == 0 { + }) { return c.Client.List(ctx, list, opts...) } @@ -115,7 +107,7 @@ func (c *Client) List(ctx context.Context, list client.ObjectList, opts ...clien } for i := range objs { - err = executeClientHooksFor(ctx, objs[i].(client.Object), "Get"+c.suffix, c.scheme) + err = plugin.DefaultManager.MutateObject(ctx, objs[i].(client.Object), "Get"+c.suffix, c.scheme) if err != nil { return err } @@ -129,7 +121,7 @@ func (c *Client) Create(ctx context.Context, obj client.Object, opts ...client.C return c.Client.Create(ctx, obj, opts...) } - err := executeClientHooksFor(ctx, obj, "Create"+c.suffix, c.scheme) + err := plugin.DefaultManager.MutateObject(ctx, obj, "Create"+c.suffix, c.scheme) if err != nil { return err } @@ -142,7 +134,7 @@ func (c *Client) Patch(ctx context.Context, obj client.Object, patch client.Patc return c.Client.Patch(ctx, obj, patch, opts...) } - err := executeClientHooksFor(ctx, obj, "Update"+c.suffix, c.scheme) + err := plugin.DefaultManager.MutateObject(ctx, obj, "Update"+c.suffix, c.scheme) if err != nil { return err } @@ -155,7 +147,7 @@ func (c *Client) Update(ctx context.Context, obj client.Object, opts ...client.U return c.Client.Update(ctx, obj, opts...) } - err := executeClientHooksFor(ctx, obj, "Update"+c.suffix, c.scheme) + err := plugin.DefaultManager.MutateObject(ctx, obj, "Update"+c.suffix, c.scheme) if err != nil { return err } @@ -168,7 +160,7 @@ func (c *Client) Delete(ctx context.Context, obj client.Object, opts ...client.D return c.Client.Delete(ctx, obj, opts...) } - err := executeClientHooksFor(ctx, obj, "Delete"+c.suffix, c.scheme) + err := plugin.DefaultManager.MutateObject(ctx, obj, "Delete"+c.suffix, c.scheme) if err != nil { return err } @@ -200,7 +192,7 @@ func (c *StatusClient) Create(ctx context.Context, obj client.Object, subResourc return c.Client.Status().Create(ctx, obj, subResource, opts...) } - err := executeClientHooksFor(ctx, obj, "Create"+c.suffix, c.scheme) + err := plugin.DefaultManager.MutateObject(ctx, obj, "Create"+c.suffix, c.scheme) if err != nil { return err } @@ -213,7 +205,7 @@ func (c *StatusClient) Update(ctx context.Context, obj client.Object, opts ...cl return c.Client.Status().Update(ctx, obj, opts...) } - err := executeClientHooksFor(ctx, obj, "Update"+c.suffix, c.scheme) + err := plugin.DefaultManager.MutateObject(ctx, obj, "Update"+c.suffix, c.scheme) if err != nil { return err } @@ -226,74 +218,10 @@ func (c *StatusClient) Patch(ctx context.Context, obj client.Object, patch clien return c.Client.Status().Patch(ctx, obj, patch, opts...) } - err := executeClientHooksFor(ctx, obj, "Update"+c.suffix, c.scheme) + err := plugin.DefaultManager.MutateObject(ctx, obj, "Update"+c.suffix, c.scheme) if err != nil { return err } return c.Client.Status().Patch(ctx, obj, patch, opts...) } - -func executeClientHooksFor(ctx context.Context, obj client.Object, hookType string, scheme *runtime.Scheme) error { - gvk, err := apiutil.GVKForObject(obj, scheme) - if err != nil { - return err - } - - apiVersion, kind := gvk.ToAPIVersionAndKind() - versionKindType := plugin.VersionKindType{ - APIVersion: apiVersion, - Kind: kind, - Type: hookType, - } - clientHooks := plugin.DefaultManager.ClientHooksFor(versionKindType) - if len(clientHooks) > 0 { - encodedObj, err := json.Marshal(obj) - if err != nil { - return errors.Wrap(err, "encode obj") - } - - for _, clientHook := range clientHooks { - encodedObj, err = mutateObject(ctx, versionKindType, encodedObj, clientHook) - if err != nil { - return err - } - } - - err = json.Unmarshal(encodedObj, obj) - if err != nil { - return errors.Wrap(err, "unmarshal obj") - } - } - - return nil -} - -func mutateObject(ctx context.Context, versionKindType plugin.VersionKindType, obj []byte, plugin *plugin.Plugin) ([]byte, error) { - conn, err := grpc.Dial(plugin.Address, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return nil, fmt.Errorf("error dialing plugin %s: %w", plugin.Name, err) - } - defer func(conn *grpc.ClientConn) { - _ = conn.Close() - }(conn) - - ctx, cancel := context.WithTimeout(ctx, time.Second*10) - defer cancel() - - loghelper.New("mutate").Debugf("calling plugin %s to mutate object %s %s", plugin.Name, versionKindType.APIVersion, versionKindType.Kind) - mutateResult, err := remote.NewPluginClient(conn).Mutate(ctx, &remote.MutateRequest{ - ApiVersion: versionKindType.APIVersion, - Kind: versionKindType.Kind, - Object: string(obj), - Type: versionKindType.Type, - }) - if err != nil { - return nil, errors.Wrapf(err, "call plugin %s", plugin.Name) - } - - if mutateResult.Mutated { - return []byte(mutateResult.Object), nil - } - return obj, nil -} diff --git a/test/e2e_plugin/e2e_plugin_suite_test.go b/test/e2e_plugin/e2e_plugin_suite_test.go new file mode 100644 index 000000000..9fdb8d642 --- /dev/null +++ b/test/e2e_plugin/e2e_plugin_suite_test.go @@ -0,0 +1,55 @@ +package e2eplugin + +import ( + "context" + "testing" + + "github.com/loft-sh/log" + "github.com/loft-sh/vcluster/test/framework" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + + // Enable cloud provider auth + _ "k8s.io/client-go/plugin/pkg/client/auth" +) + +var ( + scheme = runtime.NewScheme() +) + +func init() { + _ = clientgoscheme.AddToScheme(scheme) + // API extensions are not in the above scheme set, + // and must thus be added separately. + _ = apiextensionsv1beta1.AddToScheme(scheme) + _ = apiextensionsv1.AddToScheme(scheme) + _ = apiregistrationv1.AddToScheme(scheme) +} + +// TestRunE2EPluginModeTests checks configuration parameters (specified through flags) and then runs +// E2E tests using the Ginkgo runner. +// If a "report directory" is specified, one or more JUnit test reports will be +// generated in this directory, and cluster logs will also be saved. +// This function is called on each Ginkgo node in parallel mode. +func TestRunE2EPluginModeTests(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + err := framework.CreateFramework(context.Background(), scheme) + if err != nil { + log.GetInstance().Fatalf("Error setting up framework: %v", err) + } + + var _ = ginkgo.AfterSuite(func() { + err = framework.DefaultFramework.Cleanup() + if err != nil { + log.GetInstance().Warnf("Error executing testsuite cleanup: %v", err) + } + }) + + ginkgo.RunSpecs(t, "vCluster e2ePlugin suite") +} diff --git a/test/e2e_plugin/plugin.go b/test/e2e_plugin/plugin.go new file mode 100644 index 000000000..2b9985c24 --- /dev/null +++ b/test/e2e_plugin/plugin.go @@ -0,0 +1,30 @@ +package e2eplugin + +import ( + "time" + + "github.com/loft-sh/vcluster/test/framework" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + pollingInterval = time.Second * 2 + pollingDurationLong = time.Second * 60 +) + +var _ = ginkgo.Describe("plugin", func() { + f := framework.DefaultFramework + + ginkgo.It("test legacy vCluster plugin", func() { + // check if deployment is there + gomega.Eventually(func() bool { + _, err := f.VclusterClient.AppsV1().Deployments("default").Get(f.Context, "mydeployment", metav1.GetOptions{}) + return err == nil + }). + WithPolling(pollingInterval). + WithTimeout(pollingDurationLong). + Should(gomega.BeTrue()) + }) +}) diff --git a/test/e2e_plugin/values.yaml b/test/e2e_plugin/values.yaml new file mode 100644 index 000000000..5100036c7 --- /dev/null +++ b/test/e2e_plugin/values.yaml @@ -0,0 +1,6 @@ +# Plugin Definition below. This is essentially a valid helm values file that will be merged +# with the other vcluster values during vcluster create or helm install. +plugin: + bootstrap-with-deployment: + image: ghcr.io/loft-sh/vcluster-example-bootstrap-with-deployment:v2 + imagePullPolicy: IfNotPresent diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 000000000..25fdaf639 --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 000000000..5c751f215 --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,178 @@ +# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color) + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + +![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) + + +## Install + +```bash +go get github.com/fatih/color +``` + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`). + +The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment +variable is set (regardless of its value). + +`Color` has support to disable/enable colors programatically both globally and +for single color definitions. For example suppose you have a CLI app and a +`--no-color` bool flag. You can easily disable the color output with: + +```go +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## GitHub Actions + +To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams. + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 000000000..98a60f3c8 --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,618 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. It's also set to true if the NO_COLOR environment variable is + // set (regardless of its value). This is a global option and affects all + // colors. For more control over each color block use the methods + // DisableColor() individually. + NoColor = noColorExists() || os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// noColorExists returns true if the environment variable NO_COLOR exists. +func noColorExists() bool { + _, exists := os.LookupEnv("NO_COLOR") + return exists +} + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{ + params: make([]Attribute, 0), + } + + if noColorExists() { + c.noColor = boolPtr(true) + } + + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user set action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 000000000..04541de78 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,135 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +You can also disable the color by setting the NO_COLOR environment variable to any value. + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 000000000..16686a655 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/empty/empty.proto + +package empty + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/empty.proto. + +type Empty = emptypb.Empty + +var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() } +func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() { + if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File + file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil +} diff --git a/vendor/github.com/hashicorp/go-hclog/.gitignore b/vendor/github.com/hashicorp/go-hclog/.gitignore new file mode 100644 index 000000000..42cc4105f --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/.gitignore @@ -0,0 +1 @@ +.idea* \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE new file mode 100644 index 000000000..abaf1e45f --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 HashiCorp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md new file mode 100644 index 000000000..5d56f4b59 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -0,0 +1,148 @@ +# go-hclog + +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[godocs]: https://godoc.org/github.com/hashicorp/go-hclog + +`go-hclog` is a package for Go that provides a simple key/value logging +interface for use in development and production environments. + +It provides logging levels that provide decreased output based upon the +desired amount of output, unlike the standard library `log` package. + +It provides `Printf` style logging of values via `hclog.Fmt()`. + +It provides a human readable output mode for use in development as well as +JSON output mode for production. + +## Stability Note + +While this library is fully open source and HashiCorp will be maintaining it +(since we are and will be making extensive use of it), the API and output +format is subject to minor changes as we fully bake and vet it in our projects. +This notice will be removed once it's fully integrated into our major projects +and no further changes are anticipated. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-hclog`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-hclog + +## Usage + +### Use the global logger + +```go +hclog.Default().Info("hello world") +``` + +```text +2017-07-05T16:15:55.167-0700 [INFO ] hello world +``` + +(Note timestamps are removed in future examples for brevity.) + +### Create a new logger + +```go +appLogger := hclog.New(&hclog.LoggerOptions{ + Name: "my-app", + Level: hclog.LevelFromString("DEBUG"), +}) +``` + +### Emit an Info level message with 2 key/value pairs + +```go +input := "5.5" +_, err := strconv.ParseInt(input, 10, 32) +if err != nil { + appLogger.Info("Invalid input for ParseInt", "input", input, "error", err) +} +``` + +```text +... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax" +``` + +### Create a new Logger for a major subsystem + +```go +subsystemLogger := appLogger.Named("transport") +subsystemLogger.Info("we are transporting something") +``` + +```text +... [INFO ] my-app.transport: we are transporting something +``` + +Notice that logs emitted by `subsystemLogger` contain `my-app.transport`, +reflecting both the application and subsystem names. + +### Create a new Logger with fixed key/value pairs + +Using `With()` will include a specific key-value pair in all messages emitted +by that logger. + +```go +requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363" +requestLogger := subsystemLogger.With("request", requestID) +requestLogger.Info("we are transporting a request") +``` + +```text +... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363 +``` + +This allows sub Loggers to be context specific without having to thread that +into all the callers. + +### Using `hclog.Fmt()` + +```go +var int totalBandwidth = 200 +appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth)) +``` + +```text +... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s" +``` + +### Use this with code that uses the standard library logger + +If you want to use the standard library's `log.Logger` interface you can wrap +`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use +it with the familiar `Println()`, `Printf()`, etc. For example: + +```go +stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, +}) +// Printf() is provided by stdlib log.Logger interface, not hclog.Logger +stdLogger.Printf("[DEBUG] %+v", stdLogger) +``` + +```text +... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]} +``` + +Alternatively, you may configure the system-wide logger: + +```go +// log the standard logger from 'import "log"' +log.SetOutput(appLogger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true})) +log.SetPrefix("") +log.SetFlags(0) + +log.Printf("[DEBUG] %d", 42) +``` + +```text +... [DEBUG] my-app: 42 +``` + +Notice that if `appLogger` is initialized with the `INFO` log level _and_ you +specify `InferLevels: true`, you will not see any output here. You must change +`appLogger` to `DEBUG` to see output. See the docs for more information. diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go new file mode 100644 index 000000000..44aa9bf2c --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package hclog + +import ( + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + fallthrough + case ForceColor: + return + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(fi.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + } + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go new file mode 100644 index 000000000..23486b6d7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package hclog + +import ( + "os" + + colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + return + case ForceColor: + fi := l.checkWriterIsFile() + l.writer.w = colorable.NewColorable(fi) + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(os.Stdout.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + return + } + l.writer.w = colorable.NewColorable(fi) + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go new file mode 100644 index 000000000..7815f5019 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/context.go @@ -0,0 +1,38 @@ +package hclog + +import ( + "context" +) + +// WithContext inserts a logger into the context and is retrievable +// with FromContext. The optional args can be set with the same syntax as +// Logger.With to set fields on the inserted logger. This will not modify +// the logger argument in-place. +func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context { + // While we could call logger.With even with zero args, we have this + // check to avoid unnecessary allocations around creating a copy of a + // logger. + if len(args) > 0 { + logger = logger.With(args...) + } + + return context.WithValue(ctx, contextKey, logger) +} + +// FromContext returns a logger from the context. This will return L() +// (the default logger) if no logger is found in the context. Therefore, +// this will never return a nil value. +func FromContext(ctx context.Context) Logger { + logger, _ := ctx.Value(contextKey).(Logger) + if logger == nil { + return L() + } + + return logger +} + +// Unexported new type so that our context key never collides with another. +type contextKeyType struct{} + +// contextKey is the key used for the context to store the logger. +var contextKey = contextKeyType{} diff --git a/vendor/github.com/hashicorp/go-hclog/exclude.go b/vendor/github.com/hashicorp/go-hclog/exclude.go new file mode 100644 index 000000000..cfd4307a8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/exclude.go @@ -0,0 +1,71 @@ +package hclog + +import ( + "regexp" + "strings" +) + +// ExcludeByMessage provides a simple way to build a list of log messages that +// can be queried and matched. This is meant to be used with the Exclude +// option on Options to suppress log messages. This does not hold any mutexs +// within itself, so normal usage would be to Add entries at setup and none after +// Exclude is going to be called. Exclude is called with a mutex held within +// the Logger, so that doesn't need to use a mutex. Example usage: +// +// f := new(ExcludeByMessage) +// f.Add("Noisy log message text") +// appLogger.Exclude = f.Exclude +type ExcludeByMessage struct { + messages map[string]struct{} +} + +// Add a message to be filtered. Do not call this after Exclude is to be called +// due to concurrency issues. +func (f *ExcludeByMessage) Add(msg string) { + if f.messages == nil { + f.messages = make(map[string]struct{}) + } + + f.messages[msg] = struct{}{} +} + +// Return true if the given message should be included +func (f *ExcludeByMessage) Exclude(level Level, msg string, args ...interface{}) bool { + _, ok := f.messages[msg] + return ok +} + +// ExcludeByPrefix is a simple type to match a message string that has a common prefix. +type ExcludeByPrefix string + +// Matches an message that starts with the prefix. +func (p ExcludeByPrefix) Exclude(level Level, msg string, args ...interface{}) bool { + return strings.HasPrefix(msg, string(p)) +} + +// ExcludeByRegexp takes a regexp and uses it to match a log message string. If it matches +// the log entry is excluded. +type ExcludeByRegexp struct { + Regexp *regexp.Regexp +} + +// Exclude the log message if the message string matches the regexp +func (e ExcludeByRegexp) Exclude(level Level, msg string, args ...interface{}) bool { + return e.Regexp.MatchString(msg) +} + +// ExcludeFuncs is a slice of functions that will called to see if a log entry +// should be filtered or not. It stops calling functions once at least one returns +// true. +type ExcludeFuncs []func(level Level, msg string, args ...interface{}) bool + +// Calls each function until one of them returns true +func (ff ExcludeFuncs) Exclude(level Level, msg string, args ...interface{}) bool { + for _, f := range ff { + if f(level, msg, args...) { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go new file mode 100644 index 000000000..22ebc57d8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -0,0 +1,62 @@ +package hclog + +import ( + "sync" +) + +var ( + protect sync.Once + def Logger + + // DefaultOptions is used to create the Default logger. These are read + // only when the Default logger is created, so set them as soon as the + // process starts. + DefaultOptions = &LoggerOptions{ + Level: DefaultLevel, + Output: DefaultOutput, + } +) + +// Default returns a globally held logger. This can be a good starting +// place, and then you can use .With() and .Name() to create sub-loggers +// to be used in more specific contexts. +// The value of the Default logger can be set via SetDefault() or by +// changing the options in DefaultOptions. +// +// This method is goroutine safe, returning a global from memory, but +// cause should be used if SetDefault() is called it random times +// in the program as that may result in race conditions and an unexpected +// Logger being returned. +func Default() Logger { + protect.Do(func() { + // If SetDefault was used before Default() was called, we need to + // detect that here. + if def == nil { + def = New(DefaultOptions) + } + }) + + return def +} + +// L is a short alias for Default(). +func L() Logger { + return Default() +} + +// SetDefault changes the logger to be returned by Default()and L() +// to the one given. This allows packages to use the default logger +// and have higher level packages change it to match the execution +// environment. It returns any old default if there is one. +// +// NOTE: This is expected to be called early in the program to setup +// a default logger. As such, it does not attempt to make itself +// not racy with regard to the value of the default logger. Ergo +// if it is called in goroutines, you may experience race conditions +// with other goroutines retrieving the default logger. Basically, +// don't do that. +func SetDefault(log Logger) Logger { + old := def + def = log + return old +} diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go new file mode 100644 index 000000000..08a6677eb --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -0,0 +1,246 @@ +package hclog + +import ( + "io" + "log" + "sync" + "sync/atomic" +) + +var _ Logger = &interceptLogger{} + +type interceptLogger struct { + Logger + + mu *sync.Mutex + sinkCount *int32 + Sinks map[SinkAdapter]struct{} +} + +func NewInterceptLogger(opts *LoggerOptions) InterceptLogger { + intercept := &interceptLogger{ + Logger: New(opts), + mu: new(sync.Mutex), + sinkCount: new(int32), + Sinks: make(map[SinkAdapter]struct{}), + } + + atomic.StoreInt32(intercept.sinkCount, 0) + + return intercept +} + +func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) { + i.Logger.Log(level, msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at TRACE level to log and sinks +func (i *interceptLogger) Trace(msg string, args ...interface{}) { + i.Logger.Trace(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Trace, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at DEBUG level to log and sinks +func (i *interceptLogger) Debug(msg string, args ...interface{}) { + i.Logger.Debug(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Debug, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at INFO level to log and sinks +func (i *interceptLogger) Info(msg string, args ...interface{}) { + i.Logger.Info(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Info, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at WARN level to log and sinks +func (i *interceptLogger) Warn(msg string, args ...interface{}) { + i.Logger.Warn(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Warn, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at ERROR level to log and sinks +func (i *interceptLogger) Error(msg string, args ...interface{}) { + i.Logger.Error(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Error, msg, i.retrieveImplied(args...)...) + } +} + +func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} { + top := i.Logger.ImpliedArgs() + + cp := make([]interface{}, len(top)+len(args)) + copy(cp, top) + copy(cp[len(top):], args) + + return cp +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) Named(name string) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.Named(name) + + return &sub +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamed(name string) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.ResetNamed(name) + + return &sub +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) NamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.Named(name) + + return &sub +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.ResetNamed(name) + + return &sub +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (i *interceptLogger) With(args ...interface{}) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.With(args...) + + return &sub +} + +// RegisterSink attaches a SinkAdapter to interceptLoggers sinks. +func (i *interceptLogger) RegisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + i.Sinks[sink] = struct{}{} + + atomic.AddInt32(i.sinkCount, 1) +} + +// DeregisterSink removes a SinkAdapter from interceptLoggers sinks. +func (i *interceptLogger) DeregisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + delete(i.Sinks, sink) + + atomic.AddInt32(i.sinkCount, -1) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library to log to +// actually use this logger, which will also send to any registered sinks. +func (i *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(i.StandardWriterIntercept(opts), "", 0) +} + +func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: i, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} + +func (i *interceptLogger) ResetOutput(opts *LoggerOptions) error { + if or, ok := i.Logger.(OutputResettable); ok { + return or.ResetOutput(opts) + } else { + return nil + } +} + +func (i *interceptLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error { + if or, ok := i.Logger.(OutputResettable); ok { + return or.ResetOutputWithFlush(opts, flushable) + } else { + return nil + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go new file mode 100644 index 000000000..7158125de --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -0,0 +1,665 @@ +package hclog + +import ( + "bytes" + "encoding" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "reflect" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/fatih/color" +) + +// TimeFormat to use for logging. This is a version of RFC3339 that contains +// contains millisecond precision +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json +const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json" + +var ( + _levelToBracket = map[Level]string{ + Debug: "[DEBUG]", + Trace: "[TRACE]", + Info: "[INFO] ", + Warn: "[WARN] ", + Error: "[ERROR]", + } + + _levelToColor = map[Level]*color.Color{ + Debug: color.New(color.FgHiWhite), + Trace: color.New(color.FgHiGreen), + Info: color.New(color.FgHiBlue), + Warn: color.New(color.FgHiYellow), + Error: color.New(color.FgHiRed), + } +) + +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// intLogger is an internal logger implementation. Internal in that it is +// defined entirely by this package. +type intLogger struct { + json bool + caller bool + name string + timeFormat string + + // This is an interface so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + mutex Locker + writer *writer + level *int32 + + implied []interface{} + + exclude func(level Level, msg string, args ...interface{}) bool +} + +// New returns a configured logger. +func New(opts *LoggerOptions) Logger { + return newLogger(opts) +} + +// NewSinkAdapter returns a SinkAdapter with configured settings +// defined by LoggerOptions +func NewSinkAdapter(opts *LoggerOptions) SinkAdapter { + return newLogger(opts) +} + +func newLogger(opts *LoggerOptions) *intLogger { + if opts == nil { + opts = &LoggerOptions{} + } + + output := opts.Output + if output == nil { + output = DefaultOutput + } + + level := opts.Level + if level == NoLevel { + level = DefaultLevel + } + + mutex := opts.Mutex + if mutex == nil { + mutex = new(sync.Mutex) + } + + l := &intLogger{ + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + timeFormat: TimeFormat, + mutex: mutex, + writer: newWriter(output, opts.Color), + level: new(int32), + exclude: opts.Exclude, + } + + l.setColorization(opts) + + if opts.DisableTime { + l.timeFormat = "" + } else if opts.TimeFormat != "" { + l.timeFormat = opts.TimeFormat + } + + atomic.StoreInt32(l.level, int32(level)) + + return l +} + +// Log a message and a set of key/value pairs if the given level is at +// or more severe that the threshold configured in the Logger. +func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) { + if level < Level(atomic.LoadInt32(l.level)) { + return + } + + t := time.Now() + + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.exclude != nil && l.exclude(level, msg, args...) { + return + } + + if l.json { + l.logJSON(t, name, level, msg, args...) + } else { + l.logPlain(t, name, level, msg, args...) + } + + l.writer.Flush(level) +} + +// Cleanup a path by returning the last 2 segments of the path only. +func trimCallerPath(path string) string { + // lovely borrowed from zap + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + + // Find the last separator. + idx := strings.LastIndexByte(path, '/') + if idx == -1 { + return path + } + + // Find the penultimate separator. + idx = strings.LastIndexByte(path[:idx], '/') + if idx == -1 { + return path + } + + return path[idx+1:] +} + +var logImplFile = regexp.MustCompile(`.+intlogger.go|.+interceptlogger.go$`) + +// Non-JSON logging format function +func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { + if len(l.timeFormat) > 0 { + l.writer.WriteString(t.Format(l.timeFormat)) + l.writer.WriteByte(' ') + } + + s, ok := _levelToBracket[level] + if ok { + l.writer.WriteString(s) + } else { + l.writer.WriteString("[?????]") + } + + offset := 3 + if l.caller { + // Check if the caller is inside our package and inside + // a logger implementation file + if _, file, _, ok := runtime.Caller(3); ok { + match := logImplFile.MatchString(file) + if match { + offset = 4 + } + } + + if _, file, line, ok := runtime.Caller(offset); ok { + l.writer.WriteByte(' ') + l.writer.WriteString(trimCallerPath(file)) + l.writer.WriteByte(':') + l.writer.WriteString(strconv.Itoa(line)) + l.writer.WriteByte(':') + } + } + + l.writer.WriteByte(' ') + + if name != "" { + l.writer.WriteString(name) + l.writer.WriteString(": ") + } + + l.writer.WriteString(msg) + + args = append(l.implied, args...) + + var stacktrace CapturedStacktrace + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + stacktrace = cs + } else { + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) + } + } + + l.writer.WriteByte(':') + + FOR: + for i := 0; i < len(args); i = i + 2 { + var ( + val string + raw bool + ) + + switch st := args[i+1].(type) { + case string: + val = st + case int: + val = strconv.FormatInt(int64(st), 10) + case int64: + val = strconv.FormatInt(int64(st), 10) + case int32: + val = strconv.FormatInt(int64(st), 10) + case int16: + val = strconv.FormatInt(int64(st), 10) + case int8: + val = strconv.FormatInt(int64(st), 10) + case uint: + val = strconv.FormatUint(uint64(st), 10) + case uint64: + val = strconv.FormatUint(uint64(st), 10) + case uint32: + val = strconv.FormatUint(uint64(st), 10) + case uint16: + val = strconv.FormatUint(uint64(st), 10) + case uint8: + val = strconv.FormatUint(uint64(st), 10) + case Hex: + val = "0x" + strconv.FormatUint(uint64(st), 16) + case Octal: + val = "0" + strconv.FormatUint(uint64(st), 8) + case Binary: + val = "0b" + strconv.FormatUint(uint64(st), 2) + case CapturedStacktrace: + stacktrace = st + continue FOR + case Format: + val = fmt.Sprintf(st[0].(string), st[1:]...) + default: + v := reflect.ValueOf(st) + if v.Kind() == reflect.Slice { + val = l.renderSlice(v) + raw = true + } else { + val = fmt.Sprintf("%v", st) + } + } + + l.writer.WriteByte(' ') + switch st := args[i].(type) { + case string: + l.writer.WriteString(st) + default: + l.writer.WriteString(fmt.Sprintf("%s", st)) + } + l.writer.WriteByte('=') + + if !raw && strings.ContainsAny(val, " \t\n\r") { + l.writer.WriteByte('"') + l.writer.WriteString(val) + l.writer.WriteByte('"') + } else { + l.writer.WriteString(val) + } + } + } + + l.writer.WriteString("\n") + + if stacktrace != "" { + l.writer.WriteString(string(stacktrace)) + } +} + +func (l *intLogger) renderSlice(v reflect.Value) string { + var buf bytes.Buffer + + buf.WriteRune('[') + + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + sv := v.Index(i) + + var val string + + switch sv.Kind() { + case reflect.String: + val = sv.String() + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + val = strconv.FormatInt(sv.Int(), 10) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val = strconv.FormatUint(sv.Uint(), 10) + default: + val = fmt.Sprintf("%v", sv.Interface()) + } + + if strings.ContainsAny(val, " \t\n\r") { + buf.WriteByte('"') + buf.WriteString(val) + buf.WriteByte('"') + } else { + buf.WriteString(val) + } + } + + buf.WriteRune(']') + + return buf.String() +} + +// JSON logging function +func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) { + vals := l.jsonMapEntry(t, name, level, msg) + args = append(l.implied, args...) + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + vals["stacktrace"] = cs + } else { + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) + } + } + + for i := 0; i < len(args); i = i + 2 { + val := args[i+1] + switch sv := val.(type) { + case error: + // Check if val is of type error. If error type doesn't + // implement json.Marshaler or encoding.TextMarshaler + // then set val to err.Error() so that it gets marshaled + switch sv.(type) { + case json.Marshaler, encoding.TextMarshaler: + default: + val = sv.Error() + } + case Format: + val = fmt.Sprintf(sv[0].(string), sv[1:]...) + } + + var key string + + switch st := args[i].(type) { + case string: + key = st + default: + key = fmt.Sprintf("%s", st) + } + vals[key] = val + } + } + + err := json.NewEncoder(l.writer).Encode(vals) + if err != nil { + if _, ok := err.(*json.UnsupportedTypeError); ok { + plainVal := l.jsonMapEntry(t, name, level, msg) + plainVal["@warn"] = errJsonUnsupportedTypeMsg + + json.NewEncoder(l.writer).Encode(plainVal) + } + } +} + +func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} { + vals := map[string]interface{}{ + "@message": msg, + "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if name != "" { + vals["@module"] = name + } + + if l.caller { + if _, file, line, ok := runtime.Caller(4); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + return vals +} + +// Emit the message and args at the provided level +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + l.log(l.Name(), level, msg, args...) +} + +// Emit the message and args at DEBUG level +func (l *intLogger) Debug(msg string, args ...interface{}) { + l.log(l.Name(), Debug, msg, args...) +} + +// Emit the message and args at TRACE level +func (l *intLogger) Trace(msg string, args ...interface{}) { + l.log(l.Name(), Trace, msg, args...) +} + +// Emit the message and args at INFO level +func (l *intLogger) Info(msg string, args ...interface{}) { + l.log(l.Name(), Info, msg, args...) +} + +// Emit the message and args at WARN level +func (l *intLogger) Warn(msg string, args ...interface{}) { + l.log(l.Name(), Warn, msg, args...) +} + +// Emit the message and args at ERROR level +func (l *intLogger) Error(msg string, args ...interface{}) { + l.log(l.Name(), Error, msg, args...) +} + +// Indicate that the logger would emit TRACE level logs +func (l *intLogger) IsTrace() bool { + return Level(atomic.LoadInt32(l.level)) == Trace +} + +// Indicate that the logger would emit DEBUG level logs +func (l *intLogger) IsDebug() bool { + return Level(atomic.LoadInt32(l.level)) <= Debug +} + +// Indicate that the logger would emit INFO level logs +func (l *intLogger) IsInfo() bool { + return Level(atomic.LoadInt32(l.level)) <= Info +} + +// Indicate that the logger would emit WARN level logs +func (l *intLogger) IsWarn() bool { + return Level(atomic.LoadInt32(l.level)) <= Warn +} + +// Indicate that the logger would emit ERROR level logs +func (l *intLogger) IsError() bool { + return Level(atomic.LoadInt32(l.level)) <= Error +} + +const MissingKey = "EXTRA_VALUE_AT_END" + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (l *intLogger) With(args ...interface{}) Logger { + var extra interface{} + + if len(args)%2 != 0 { + extra = args[len(args)-1] + args = args[:len(args)-1] + } + + sl := *l + + result := make(map[string]interface{}, len(l.implied)+len(args)) + keys := make([]string, 0, len(l.implied)+len(args)) + + // Read existing args, store map and key for consistent sorting + for i := 0; i < len(l.implied); i += 2 { + key := l.implied[i].(string) + keys = append(keys, key) + result[key] = l.implied[i+1] + } + // Read new args, store map and key for consistent sorting + for i := 0; i < len(args); i += 2 { + key := args[i].(string) + _, exists := result[key] + if !exists { + keys = append(keys, key) + } + result[key] = args[i+1] + } + + // Sort keys to be consistent + sort.Strings(keys) + + sl.implied = make([]interface{}, 0, len(l.implied)+len(args)) + for _, k := range keys { + sl.implied = append(sl.implied, k) + sl.implied = append(sl.implied, result[k]) + } + + if extra != nil { + sl.implied = append(sl.implied, MissingKey, extra) + } + + return &sl +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +func (l *intLogger) Named(name string) Logger { + sl := *l + + if sl.name != "" { + sl.name = sl.name + "." + name + } else { + sl.name = name + } + + return &sl +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. +func (l *intLogger) ResetNamed(name string) Logger { + sl := *l + + sl.name = name + + return &sl +} + +func (l *intLogger) ResetOutput(opts *LoggerOptions) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + return l.resetOutput(opts) +} + +func (l *intLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + if flushable == nil { + return errors.New("flushable is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + if err := flushable.Flush(); err != nil { + return err + } + + return l.resetOutput(opts) +} + +func (l *intLogger) resetOutput(opts *LoggerOptions) error { + l.writer = newWriter(opts.Output, opts.Color) + l.setColorization(opts) + return nil +} + +// Update the logging level on-the-fly. This will affect all subloggers as +// well. +func (l *intLogger) SetLevel(level Level) { + atomic.StoreInt32(l.level, int32(level)) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library log to actually +// use this logger. +func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(l.StandardWriter(opts), "", 0) +} + +func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: l, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} + +// checks if the underlying io.Writer is a file, and +// panics if not. For use by colorization. +func (l *intLogger) checkWriterIsFile() *os.File { + fi, ok := l.writer.w.(*os.File) + if !ok { + panic("Cannot enable coloring of non-file Writers") + } + return fi +} + +// Accept implements the SinkAdapter interface +func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) { + i.log(name, level, msg, args...) +} + +// ImpliedArgs returns the loggers implied args +func (i *intLogger) ImpliedArgs() []interface{} { + return i.implied +} + +// Name returns the loggers name +func (i *intLogger) Name() string { + return i.name +} diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go new file mode 100644 index 000000000..8d5eed76e --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -0,0 +1,327 @@ +package hclog + +import ( + "io" + "log" + "os" + "strings" +) + +var ( + //DefaultOutput is used as the default log output. + DefaultOutput io.Writer = os.Stderr + + // DefaultLevel is used as the default log level. + DefaultLevel = Info +) + +// Level represents a log level. +type Level int32 + +const ( + // NoLevel is a special level used to indicate that no level has been + // set and allow for a default to be used. + NoLevel Level = 0 + + // Trace is the most verbose level. Intended to be used for the tracing + // of actions in code, such as function enters/exits, etc. + Trace Level = 1 + + // Debug information for programmer lowlevel analysis. + Debug Level = 2 + + // Info information about steady state operations. + Info Level = 3 + + // Warn information about rare but handled events. + Warn Level = 4 + + // Error information about unrecoverable events. + Error Level = 5 +) + +// Format is a simple convience type for when formatting is required. When +// processing a value of this type, the logger automatically treats the first +// argument as a Printf formatting string and passes the rest as the values +// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). +type Format []interface{} + +// Fmt returns a Format type. This is a convience function for creating a Format +// type. +func Fmt(str string, args ...interface{}) Format { + return append(Format{str}, args...) +} + +// A simple shortcut to format numbers in hex when displayed with the normal +// text output. For example: L.Info("header value", Hex(17)) +type Hex int + +// A simple shortcut to format numbers in octal when displayed with the normal +// text output. For example: L.Info("perms", Octal(17)) +type Octal int + +// A simple shortcut to format numbers in binary when displayed with the normal +// text output. For example: L.Info("bits", Binary(17)) +type Binary int + +// ColorOption expresses how the output should be colored, if at all. +type ColorOption uint8 + +const ( + // ColorOff is the default coloration, and does not + // inject color codes into the io.Writer. + ColorOff ColorOption = iota + // AutoColor checks if the io.Writer is a tty, + // and if so enables coloring. + AutoColor + // ForceColor will enable coloring, regardless of whether + // the io.Writer is a tty or not. + ForceColor +) + +// LevelFromString returns a Level type for the named log level, or "NoLevel" if +// the level string is invalid. This facilitates setting the log level via +// config or environment variable by name in a predictable way. +func LevelFromString(levelStr string) Level { + // We don't care about case. Accept both "INFO" and "info". + levelStr = strings.ToLower(strings.TrimSpace(levelStr)) + switch levelStr { + case "trace": + return Trace + case "debug": + return Debug + case "info": + return Info + case "warn": + return Warn + case "error": + return Error + default: + return NoLevel + } +} + +func (l Level) String() string { + switch l { + case Trace: + return "trace" + case Debug: + return "debug" + case Info: + return "info" + case Warn: + return "warn" + case Error: + return "error" + case NoLevel: + return "none" + default: + return "unknown" + } +} + +// Logger describes the interface that must be implemeted by all loggers. +type Logger interface { + // Args are alternating key, val pairs + // keys must be strings + // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at a provided log level + Log(level Level, msg string, args ...interface{}) + + // Emit a message and key/value pairs at the TRACE level + Trace(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the DEBUG level + Debug(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the INFO level + Info(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the WARN level + Warn(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the ERROR level + Error(msg string, args ...interface{}) + + // Indicate if TRACE logs would be emitted. This and the other Is* guards + // are used to elide expensive logging code based on the current level. + IsTrace() bool + + // Indicate if DEBUG logs would be emitted. This and the other Is* guards + IsDebug() bool + + // Indicate if INFO logs would be emitted. This and the other Is* guards + IsInfo() bool + + // Indicate if WARN logs would be emitted. This and the other Is* guards + IsWarn() bool + + // Indicate if ERROR logs would be emitted. This and the other Is* guards + IsError() bool + + // ImpliedArgs returns With key/value pairs + ImpliedArgs() []interface{} + + // Creates a sublogger that will always have the given key/value pairs + With(args ...interface{}) Logger + + // Returns the Name of the logger + Name() string + + // Create a logger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + Named(name string) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamed(name string) Logger + + // Updates the level. This should affect all sub-loggers as well. If an + // implementation cannot update the level on the fly, it should no-op. + SetLevel(level Level) + + // Return a value that conforms to the stdlib log.Logger interface + StandardLogger(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriter(opts *StandardLoggerOptions) io.Writer +} + +// StandardLoggerOptions can be used to configure a new standard logger. +type StandardLoggerOptions struct { + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + InferLevels bool + + // ForceLevel is used to force all output from the standard logger to be at + // the specified level. Similar to InferLevels, this will strip any level + // prefix contained in the logged string before applying the forced level. + // If set, this override InferLevels. + ForceLevel Level +} + +// LoggerOptions can be used to configure a new logger. +type LoggerOptions struct { + // Name of the subsystem to prefix logs with + Name string + + // The threshold for the logger. Anything less severe is supressed + Level Level + + // Where to write the logs to. Defaults to os.Stderr if nil + Output io.Writer + + // An optional Locker in case Output is shared. This can be a sync.Mutex or + // a NoopLocker if the caller wants control over output, e.g. for batching + // log lines. + Mutex Locker + + // Control if the output should be in JSON. + JSONFormat bool + + // Include file and line information in each log line + IncludeLocation bool + + // The time format to use instead of the default + TimeFormat string + + // Control whether or not to display the time at all. This is required + // because setting TimeFormat to empty assumes the default format. + DisableTime bool + + // Color the output. On Windows, colored logs are only avaiable for io.Writers that + // are concretely instances of *os.File. + Color ColorOption + + // A function which is called with the log information and if it returns true the value + // should not be logged. + // This is useful when interacting with a system that you wish to suppress the log + // message for (because it's too noisy, etc) + Exclude func(level Level, msg string, args ...interface{}) bool +} + +// InterceptLogger describes the interface for using a logger +// that can register different output sinks. +// This is useful for sending lower level log messages +// to a different output while keeping the root logger +// at a higher one. +type InterceptLogger interface { + // Logger is the root logger for an InterceptLogger + Logger + + // RegisterSink adds a SinkAdapter to the InterceptLogger + RegisterSink(sink SinkAdapter) + + // DeregisterSink removes a SinkAdapter from the InterceptLogger + DeregisterSink(sink SinkAdapter) + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + NamedIntercept(name string) InterceptLogger + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamedIntercept(name string) InterceptLogger + + // Return a value that conforms to the stdlib log.Logger interface + StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer +} + +// SinkAdapter describes the interface that must be implemented +// in order to Register a new sink to an InterceptLogger +type SinkAdapter interface { + Accept(name string, level Level, msg string, args ...interface{}) +} + +// Flushable represents a method for flushing an output buffer. It can be used +// if Resetting the log to use a new output, in order to flush the writes to +// the existing output beforehand. +type Flushable interface { + Flush() error +} + +// OutputResettable provides ways to swap the output in use at runtime +type OutputResettable interface { + // ResetOutput swaps the current output writer with the one given in the + // opts. Color options given in opts will be used for the new output. + ResetOutput(opts *LoggerOptions) error + + // ResetOutputWithFlush swaps the current output writer with the one given + // in the opts, first calling Flush on the given Flushable. Color options + // given in opts will be used for the new output. + ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error +} + +// Locker is used for locking output. If not set when creating a logger, a +// sync.Mutex will be used internally. +type Locker interface { + // Lock is called when the output is going to be changed or written to + Lock() + + // Unlock is called when the operation that called Lock() completes + Unlock() +} + +// NoopLocker implements locker but does nothing. This is useful if the client +// wants tight control over locking, in order to provide grouping of log +// entries or other functionality. +type NoopLocker struct{} + +// Lock does nothing +func (n NoopLocker) Lock() {} + +// Unlock does nothing +func (n NoopLocker) Unlock() {} + +var _ Locker = (*NoopLocker)(nil) diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go new file mode 100644 index 000000000..bc14f7708 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -0,0 +1,58 @@ +package hclog + +import ( + "io" + "io/ioutil" + "log" +) + +// NewNullLogger instantiates a Logger for which all calls +// will succeed without doing anything. +// Useful for testing purposes. +func NewNullLogger() Logger { + return &nullLogger{} +} + +type nullLogger struct{} + +func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {} + +func (l *nullLogger) Trace(msg string, args ...interface{}) {} + +func (l *nullLogger) Debug(msg string, args ...interface{}) {} + +func (l *nullLogger) Info(msg string, args ...interface{}) {} + +func (l *nullLogger) Warn(msg string, args ...interface{}) {} + +func (l *nullLogger) Error(msg string, args ...interface{}) {} + +func (l *nullLogger) IsTrace() bool { return false } + +func (l *nullLogger) IsDebug() bool { return false } + +func (l *nullLogger) IsInfo() bool { return false } + +func (l *nullLogger) IsWarn() bool { return false } + +func (l *nullLogger) IsError() bool { return false } + +func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} } + +func (l *nullLogger) With(args ...interface{}) Logger { return l } + +func (l *nullLogger) Name() string { return "" } + +func (l *nullLogger) Named(name string) Logger { return l } + +func (l *nullLogger) ResetNamed(name string) Logger { return l } + +func (l *nullLogger) SetLevel(level Level) {} + +func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + return log.New(l.StandardWriter(opts), "", log.LstdFlags) +} + +func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return ioutil.Discard +} diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go new file mode 100644 index 000000000..9b27bd3d3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package hclog + +import ( + "bytes" + "runtime" + "strconv" + "strings" + "sync" +) + +var ( + _stacktraceIgnorePrefixes = []string{ + "runtime.goexit", + "runtime.main", + } + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } +) + +// CapturedStacktrace represents a stacktrace captured by a previous call +// to log.Stacktrace. If passed to a logging function, the stacktrace +// will be appended. +type CapturedStacktrace string + +// Stacktrace captures a stacktrace of the current goroutine and returns +// it to be passed to a logging function. +func Stacktrace() CapturedStacktrace { + return CapturedStacktrace(takeStacktrace()) +} + +func takeStacktrace() string { + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var buffer bytes.Buffer + + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + n := runtime.Callers(2, programCounters.pcs) + if n < cap(programCounters.pcs) { + programCounters.pcs = programCounters.pcs[:n] + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + frames := runtime.CallersFrames(programCounters.pcs) + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if shouldIgnoreStacktraceFunction(frame.Function) { + continue + } + if i != 0 { + buffer.WriteByte('\n') + } + i++ + buffer.WriteString(frame.Function) + buffer.WriteByte('\n') + buffer.WriteByte('\t') + buffer.WriteString(frame.File) + buffer.WriteByte(':') + buffer.WriteString(strconv.Itoa(int(frame.Line))) + } + + return buffer.String() +} + +func shouldIgnoreStacktraceFunction(function string) bool { + for _, prefix := range _stacktraceIgnorePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go new file mode 100644 index 000000000..f35d875d3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -0,0 +1,95 @@ +package hclog + +import ( + "bytes" + "log" + "strings" +) + +// Provides a io.Writer to shim the data out of *log.Logger +// and back into our Logger. This is basically the only way to +// build upon *log.Logger. +type stdlogAdapter struct { + log Logger + inferLevels bool + forceLevel Level +} + +// Take the data, infer the levels if configured, and send it through +// a regular Logger. +func (s *stdlogAdapter) Write(data []byte) (int, error) { + str := string(bytes.TrimRight(data, " \t\n")) + + if s.forceLevel != NoLevel { + // Use pickLevel to strip log levels included in the line since we are + // forcing the level + _, str := s.pickLevel(str) + + // Log at the forced level + s.dispatch(str, s.forceLevel) + } else if s.inferLevels { + level, str := s.pickLevel(str) + s.dispatch(str, level) + } else { + s.log.Info(str) + } + + return len(data), nil +} + +func (s *stdlogAdapter) dispatch(str string, level Level) { + switch level { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } +} + +// Detect, based on conventions, what log level this is. +func (s *stdlogAdapter) pickLevel(str string) (Level, string) { + switch { + case strings.HasPrefix(str, "[DEBUG]"): + return Debug, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[TRACE]"): + return Trace, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[INFO]"): + return Info, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[WARN]"): + return Warn, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERROR]"): + return Error, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERR]"): + return Error, strings.TrimSpace(str[5:]) + default: + return Info, str + } +} + +type logWriter struct { + l *log.Logger +} + +func (l *logWriter) Write(b []byte) (int, error) { + l.l.Println(string(bytes.TrimRight(b, " \n\t"))) + return len(b), nil +} + +// Takes a standard library logger and returns a Logger that will write to it +func FromStandardLogger(l *log.Logger, opts *LoggerOptions) Logger { + var dl LoggerOptions = *opts + + // Use the time format that log.Logger uses + dl.DisableTime = true + dl.Output = &logWriter{l} + + return New(&dl) +} diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go new file mode 100644 index 000000000..421a1f06c --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -0,0 +1,82 @@ +package hclog + +import ( + "bytes" + "io" +) + +type writer struct { + b bytes.Buffer + w io.Writer + color ColorOption +} + +func newWriter(w io.Writer, color ColorOption) *writer { + return &writer{w: w, color: color} +} + +func (w *writer) Flush(level Level) (err error) { + var unwritten = w.b.Bytes() + + if w.color != ColorOff { + color := _levelToColor[level] + unwritten = []byte(color.Sprintf("%s", unwritten)) + } + + if lw, ok := w.w.(LevelWriter); ok { + _, err = lw.LevelWrite(level, unwritten) + } else { + _, err = w.w.Write(unwritten) + } + w.b.Reset() + return err +} + +func (w *writer) Write(p []byte) (int, error) { + return w.b.Write(p) +} + +func (w *writer) WriteByte(c byte) error { + return w.b.WriteByte(c) +} + +func (w *writer) WriteString(s string) (int, error) { + return w.b.WriteString(s) +} + +// LevelWriter is the interface that wraps the LevelWrite method. +type LevelWriter interface { + LevelWrite(level Level, p []byte) (n int, err error) +} + +// LeveledWriter writes all log messages to the standard writer, +// except for log levels that are defined in the overrides map. +type LeveledWriter struct { + standard io.Writer + overrides map[Level]io.Writer +} + +// NewLeveledWriter returns an initialized LeveledWriter. +// +// standard will be used as the default writer for all log levels, +// except for log levels that are defined in the overrides map. +func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter { + return &LeveledWriter{ + standard: standard, + overrides: overrides, + } +} + +// Write implements io.Writer. +func (lw *LeveledWriter) Write(p []byte) (int, error) { + return lw.standard.Write(p) +} + +// LevelWrite implements LevelWriter. +func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) { + w, ok := lw.overrides[level] + if !ok { + w = lw.standard + } + return w.Write(p) +} diff --git a/vendor/github.com/hashicorp/go-plugin/.gitignore b/vendor/github.com/hashicorp/go-plugin/.gitignore new file mode 100644 index 000000000..4befed30a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/.gitignore @@ -0,0 +1,2 @@ +.DS_Store +.idea diff --git a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md new file mode 100644 index 000000000..3d0379c50 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md @@ -0,0 +1,102 @@ +## v1.6.0 + +CHANGES: + +* plugin: Plugins written in other languages can optionally start to advertise whether they support gRPC broker multiplexing. + If the environment variable `PLUGIN_MULTIPLEX_GRPC` is set, it is safe to include a seventh field containing a boolean + value in the `|`-separated protocol negotiation line. + +ENHANCEMENTS: + +* Support muxing gRPC broker connections over a single listener [[GH-288](https://github.com/hashicorp/go-plugin/pull/288)] +* client: Configurable buffer size for reading plugin log lines [[GH-265](https://github.com/hashicorp/go-plugin/pull/265)] +* Use `buf` for proto generation [[GH-286](https://github.com/hashicorp/go-plugin/pull/286)] +* deps: bump golang.org/x/net to v0.17.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] +* deps: bump golang.org/x/sys to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] +* deps: bump golang.org/x/text to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] + +## v1.5.2 + +ENHANCEMENTS: + +client: New `UnixSocketConfig.TempDir` option allows setting the directory to use when creating plugin-specific Unix socket directories [[GH-282](https://github.com/hashicorp/go-plugin/pull/282)] + +## v1.5.1 + +BUGS: + +* server: `PLUGIN_UNIX_SOCKET_DIR` is consistently used for gRPC broker sockets as well as the initial socket [[GH-277](https://github.com/hashicorp/go-plugin/pull/277)] + +ENHANCEMENTS: + +* client: New `UnixSocketConfig` option in `ClientConfig` to support making the client's Unix sockets group-writable [[GH-277](https://github.com/hashicorp/go-plugin/pull/277)] + +## v1.5.0 + +ENHANCEMENTS: + +* client: New `runner.Runner` interface to support clients providing custom plugin command runner implementations [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] + * Accessible via new `ClientConfig` field `RunnerFunc`, which is mutually exclusive with `Cmd` and `Reattach` + * Reattaching support via `ReattachConfig` field `ReattachFunc` +* client: New `ClientConfig` field `SkipHostEnv` allows omitting the client process' own environment variables from the plugin command's environment [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] +* client: Add `ID()` method to `Client` for retrieving the pid or other unique ID of a running plugin [[GH-272](https://github.com/hashicorp/go-plugin/pull/272)] +* server: Support setting the directory to create Unix sockets in with the env var `PLUGIN_UNIX_SOCKET_DIR` [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] +* server: Support setting group write permission and a custom group name or gid owner with the env var `PLUGIN_UNIX_SOCKET_GROUP` [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] + +## v1.4.11-rc1 + +ENHANCEMENTS: + +* deps: bump protoreflect to v1.15.1 [[GH-264](https://github.com/hashicorp/go-plugin/pull/264)] + +## v1.4.10 + +BUG FIXES: + +* additional notes: ensure to close files [[GH-241](https://github.com/hashicorp/go-plugin/pull/241)] + +ENHANCEMENTS: + +* deps: Remove direct dependency on golang.org/x/net [[GH-240](https://github.com/hashicorp/go-plugin/pull/240)] + +## v1.4.9 + +ENHANCEMENTS: + +* client: Remove log warning introduced in 1.4.5 when SecureConfig is nil. [[GH-238](https://github.com/hashicorp/go-plugin/pull/238)] + +## v1.4.8 + +BUG FIXES: + +* Fix windows build: [[GH-227](https://github.com/hashicorp/go-plugin/pull/227)] + +## v1.4.7 + +ENHANCEMENTS: + +* More detailed error message on plugin start failure: [[GH-223](https://github.com/hashicorp/go-plugin/pull/223)] + +## v1.4.6 + +BUG FIXES: + +* server: Prevent gRPC broker goroutine leak when using `GRPCServer` type `GracefulStop()` or `Stop()` methods [[GH-220](https://github.com/hashicorp/go-plugin/pull/220)] + +## v1.4.5 + +ENHANCEMENTS: + +* client: log warning when SecureConfig is nil [[GH-207](https://github.com/hashicorp/go-plugin/pull/207)] + + +## v1.4.4 + +ENHANCEMENTS: + +* client: increase level of plugin exit logs [[GH-195](https://github.com/hashicorp/go-plugin/pull/195)] + +BUG FIXES: + +* Bidirectional communication: fix bidirectional communication when AutoMTLS is enabled [[GH-193](https://github.com/hashicorp/go-plugin/pull/193)] +* RPC: Trim a spurious log message for plugins using RPC [[GH-186](https://github.com/hashicorp/go-plugin/pull/186)] diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE new file mode 100644 index 000000000..042324fb7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/LICENSE @@ -0,0 +1,355 @@ +Copyright (c) 2016 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md new file mode 100644 index 000000000..50baee06e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -0,0 +1,165 @@ +# Go Plugin System over RPC + +`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system +that has been in use by HashiCorp tooling for over 4 years. While initially +created for [Packer](https://www.packer.io), it is additionally in use by +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), +[Vault](https://www.vaultproject.io), +[Boundary](https://www.boundaryproject.io), +and [Waypoint](https://www.waypointproject.io). + +While the plugin system is over RPC, it is currently only designed to work +over a local [reliable] network. Plugins over a real network are not supported +and will lead to unexpected behavior. + +This plugin system has been used on millions of machines across many different +projects and has proven to be battle hardened and ready for production use. + +## Features + +The HashiCorp plugin system supports a number of features: + +**Plugins are Go interface implementations.** This makes writing and consuming +plugins feel very natural. To a plugin author: you just implement an +interface as if it were going to run in the same process. For a plugin user: +you just use and call functions on an interface as if it were in the same +process. This plugin system handles the communication in between. + +**Cross-language support.** Plugins can be written (and consumed) by +almost every major language. This library supports serving plugins via +[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written +in any language. + +**Complex arguments and return values are supported.** This library +provides APIs for handling complex arguments and return values such +as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library +(`MuxBroker`) for creating new connections between the client/server to +serve additional interfaces or transfer raw data. + +**Bidirectional communication.** Because the plugin system supports +complex arguments, the host process can send it interface implementations +and the plugin can call back into the host process. + +**Built-in Logging.** Any plugins that use the `log` standard library +will have log data automatically sent to the host process. The host +process will mirror this output prefixed with the path to the plugin +binary. This makes debugging with plugins simple. If the host system +uses [hclog](https://github.com/hashicorp/go-hclog) then the log data +will be structured. If the plugin also uses hclog, logs from the plugin +will be sent to the host hclog and be structured. + +**Protocol Versioning.** A very basic "protocol version" is supported that +can be incremented to invalidate any previous plugins. This is useful when +interface signatures are changing, protocol level changes are necessary, +etc. When a protocol version is incompatible, a human friendly error +message is shown to the end user. + +**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue +to use stdout/stderr as usual and the output will get mirrored back to +the host process. The host process can control what `io.Writer` these +streams go to to prevent this from happening. + +**TTY Preservation.** Plugin subprocesses are connected to the identical +stdin file descriptor as the host process, allowing software that requires +a TTY to work. For example, a plugin can execute `ssh` and even though there +are multiple subprocesses and RPC happening, it will look and act perfectly +to the end user. + +**Host upgrade while a plugin is running.** Plugins can be "reattached" +so that the host process can be upgraded while the plugin is still running. +This requires the host/plugin to know this is possible and daemonize +properly. `NewClient` takes a `ReattachConfig` to determine if and how to +reattach. + +**Cryptographically Secure Plugins.** Plugins can be verified with an expected +checksum and RPC communications can be configured to use TLS. The host process +must be properly secured to protect this configuration. + +## Architecture + +The HashiCorp plugin system works by launching subprocesses and communicating +over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single +connection is made between any plugin and the host process. For net/rpc-based +plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) +library to multiplex any other connections on top. For gRPC-based plugins, +the HTTP2 protocol handles multiplexing. + +This architecture has a number of benefits: + + * Plugins can't crash your host process: A panic in a plugin doesn't + panic the plugin user. + + * Plugins are very easy to write: just write a Go application and `go build`. + Or use any other language to write a gRPC server with a tiny amount of + boilerplate to support go-plugin. + + * Plugins are very easy to install: just put the binary in a location where + the host will find it (depends on the host but this library also provides + helpers), and the plugin host handles the rest. + + * Plugins can be relatively secure: The plugin only has access to the + interfaces and args given to it, not to the entire memory space of the + process. Additionally, go-plugin can communicate with the plugin over + TLS. + +## Usage + +To use the plugin system, you must take the following steps. These are +high-level steps that must be done. Examples are available in the +`examples/` directory. + + 1. Choose the interface(s) you want to expose for plugins. + + 2. For each interface, implement an implementation of that interface + that communicates over a `net/rpc` connection or over a + [gRPC](http://www.grpc.io) connection or both. You'll have to implement + both a client and server implementation. + + 3. Create a `Plugin` implementation that knows how to create the RPC + client/server for a given plugin type. + + 4. Plugin authors call `plugin.Serve` to serve a plugin from the + `main` function. + + 5. Plugin users use `plugin.Client` to launch a subprocess and request + an interface implementation over RPC. + +That's it! In practice, step 2 is the most tedious and time consuming step. +Even so, it isn't very difficult and you can see examples in the `examples/` +directory as well as throughout our various open source projects. + +For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin). + +## Roadmap + +Our plugin system is constantly evolving. As we use the plugin system for +new projects or for new features in existing projects, we constantly find +improvements we can make. + +At this point in time, the roadmap for the plugin system is: + +**Semantic Versioning.** Plugins will be able to implement a semantic version. +This plugin system will give host processes a system for constraining +versions. This is in addition to the protocol versioning already present +which is more for larger underlying changes. + +## What About Shared Libraries? + +When we started using plugins (late 2012, early 2013), plugins over RPC +were the only option since Go didn't support dynamic library loading. Today, +Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with +a number of limitations. Since 2012, our plugin system has stabilized +from tens of millions of users using it, and has many benefits we've come to +value greatly. + +For example, we use this plugin system in +[Vault](https://www.vaultproject.io) where dynamic library loading is +not acceptable for security reasons. That is an extreme +example, but we believe our library system has more upsides than downsides +over dynamic library loading and since we've had it built and tested for years, +we'll continue to use it. + +Shared libraries have one major advantage over our system which is much +higher performance. In real world scenarios across our various tools, +we've never required any more performance out of our plugin system and it +has seen very high throughput, so this isn't a concern for us at the moment. diff --git a/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml b/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml new file mode 100644 index 000000000..033d0153b --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +version: v1 +plugins: + - plugin: buf.build/protocolbuffers/go + out: . + opt: + - paths=source_relative + - plugin: buf.build/grpc/go:v1.3.0 + out: . + opt: + - paths=source_relative + - require_unimplemented_servers=false diff --git a/vendor/github.com/hashicorp/go-plugin/buf.yaml b/vendor/github.com/hashicorp/go-plugin/buf.yaml new file mode 100644 index 000000000..3d0da4c71 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/buf.yaml @@ -0,0 +1,7 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +version: v1 +build: + excludes: + - examples/ \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go new file mode 100644 index 000000000..73f6b3515 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -0,0 +1,1239 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "bufio" + "context" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/cmdrunner" + "github.com/hashicorp/go-plugin/internal/grpcmux" + "github.com/hashicorp/go-plugin/runner" + "google.golang.org/grpc" +) + +// If this is 1, then we've called CleanupClients. This can be used +// by plugin RPC implementations to change error behavior since you +// can expected network connection errors at this point. This should be +// read by using sync/atomic. +var Killed uint32 = 0 + +// This is a slice of the "managed" clients which are cleaned up when +// calling Cleanup +var managedClients = make([]*Client, 0, 5) +var managedClientsLock sync.Mutex + +// Error types +var ( + // ErrProcessNotFound is returned when a client is instantiated to + // reattach to an existing process and it isn't found. + ErrProcessNotFound = cmdrunner.ErrProcessNotFound + + // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match + // the one provided in the SecureConfig. + ErrChecksumsDoNotMatch = errors.New("checksums did not match") + + // ErrSecureNoChecksum is returned when an empty checksum is provided to the + // SecureConfig. + ErrSecureConfigNoChecksum = errors.New("no checksum provided") + + // ErrSecureNoHash is returned when a nil Hash object is provided to the + // SecureConfig. + ErrSecureConfigNoHash = errors.New("no hash implementation provided") + + // ErrSecureConfigAndReattach is returned when both Reattach and + // SecureConfig are set. + ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") + + // ErrGRPCBrokerMuxNotSupported is returned when the client requests + // multiplexing over the gRPC broker, but the plugin does not support the + // feature. In most cases, this should be resolvable by updating and + // rebuilding the plugin, or restarting the plugin with + // ClientConfig.GRPCBrokerMultiplex set to false. + ErrGRPCBrokerMuxNotSupported = errors.New("client requested gRPC broker multiplexing but plugin does not support the feature") +) + +// defaultPluginLogBufferSize is the default size of the buffer used to read from stderr for plugin log lines. +const defaultPluginLogBufferSize = 64 * 1024 + +// Client handles the lifecycle of a plugin application. It launches +// plugins, connects to them, dispenses interface implementations, and handles +// killing the process. +// +// Plugin hosts should use one Client for each plugin executable. To +// dispense a plugin type, use the `Client.Client` function, and then +// cal `Dispense`. This awkward API is mostly historical but is used to split +// the client that deals with subprocess management and the client that +// does RPC management. +// +// See NewClient and ClientConfig for using a Client. +type Client struct { + config *ClientConfig + exited bool + l sync.Mutex + address net.Addr + runner runner.AttachedRunner + client ClientProtocol + protocol Protocol + logger hclog.Logger + doneCtx context.Context + ctxCancel context.CancelFunc + negotiatedVersion int + + // clientWaitGroup is used to manage the lifecycle of the plugin management + // goroutines. + clientWaitGroup sync.WaitGroup + + // stderrWaitGroup is used to prevent the command's Wait() function from + // being called before we've finished reading from the stderr pipe. + stderrWaitGroup sync.WaitGroup + + // processKilled is used for testing only, to flag when the process was + // forcefully killed. + processKilled bool + + unixSocketCfg UnixSocketConfig + + grpcMuxerOnce sync.Once + grpcMuxer *grpcmux.GRPCClientMuxer +} + +// NegotiatedVersion returns the protocol version negotiated with the server. +// This is only valid after Start() is called. +func (c *Client) NegotiatedVersion() int { + return c.negotiatedVersion +} + +// ID returns a unique ID for the running plugin. By default this is the process +// ID (pid), but it could take other forms if RunnerFunc was provided. +func (c *Client) ID() string { + c.l.Lock() + defer c.l.Unlock() + + if c.runner != nil { + return c.runner.ID() + } + + return "" +} + +// ClientConfig is the configuration used to initialize a new +// plugin client. After being used to initialize a plugin client, +// that configuration must not be modified again. +type ClientConfig struct { + // HandshakeConfig is the configuration that must match servers. + HandshakeConfig + + // Plugins are the plugins that can be consumed. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // One of the following must be set, but not both. + // + // Cmd is the unstarted subprocess for starting the plugin. If this is + // set, then the Client starts the plugin process on its own and connects + // to it. + // + // Reattach is configuration for reattaching to an existing plugin process + // that is already running. This isn't common. + Cmd *exec.Cmd + Reattach *ReattachConfig + + // RunnerFunc allows consumers to provide their own implementation of + // runner.Runner and control the context within which a plugin is executed. + // The cmd argument will have been copied from the config and populated with + // environment variables that a go-plugin server expects to read such as + // AutoMTLS certs and the magic cookie key. + RunnerFunc func(l hclog.Logger, cmd *exec.Cmd, tmpDir string) (runner.Runner, error) + + // SecureConfig is configuration for verifying the integrity of the + // executable. It can not be used with Reattach. + SecureConfig *SecureConfig + + // TLSConfig is used to enable TLS on the RPC client. + TLSConfig *tls.Config + + // Managed represents if the client should be managed by the + // plugin package or not. If true, then by calling CleanupClients, + // it will automatically be cleaned up. Otherwise, the client + // user is fully responsible for making sure to Kill all plugin + // clients. By default the client is _not_ managed. + Managed bool + + // The minimum and maximum port to use for communicating with + // the subprocess. If not set, this defaults to 10,000 and 25,000 + // respectively. + MinPort, MaxPort uint + + // StartTimeout is the timeout to wait for the plugin to say it + // has started successfully. + StartTimeout time.Duration + + // If non-nil, then the stderr of the client will be written to here + // (as well as the log). This is the original os.Stderr of the subprocess. + // This isn't the output of synced stderr. + Stderr io.Writer + + // SyncStdout, SyncStderr can be set to override the + // respective os.Std* values in the plugin. Care should be taken to + // avoid races here. If these are nil, then this will be set to + // ioutil.Discard. + SyncStdout io.Writer + SyncStderr io.Writer + + // AllowedProtocols is a list of allowed protocols. If this isn't set, + // then only netrpc is allowed. This is so that older go-plugin systems + // can show friendly errors if they see a plugin with an unknown + // protocol. + // + // By setting this, you can cause an error immediately on plugin start + // if an unsupported protocol is used with a good error message. + // + // If this isn't set at all (nil value), then only net/rpc is accepted. + // This is done for legacy reasons. You must explicitly opt-in to + // new protocols. + AllowedProtocols []Protocol + + // Logger is the logger that the client will used. If none is provided, + // it will default to hclog's default logger. + Logger hclog.Logger + + // PluginLogBufferSize is the buffer size(bytes) to read from stderr for plugin log lines. + // If this is 0, then the default of 64KB is used. + PluginLogBufferSize int + + // AutoMTLS has the client and server automatically negotiate mTLS for + // transport authentication. This ensures that only the original client will + // be allowed to connect to the server, and all other connections will be + // rejected. The client will also refuse to connect to any server that isn't + // the original instance started by the client. + // + // In this mode of operation, the client generates a one-time use tls + // certificate, sends the public x.509 certificate to the new server, and + // the server generates a one-time use tls certificate, and sends the public + // x.509 certificate back to the client. These are used to authenticate all + // rpc connections between the client and server. + // + // Setting AutoMTLS to true implies that the server must support the + // protocol, and correctly negotiate the tls certificates, or a connection + // failure will result. + // + // The client should not set TLSConfig, nor should the server set a + // TLSProvider, because AutoMTLS implies that a new certificate and tls + // configuration will be generated at startup. + // + // You cannot Reattach to a server with this option enabled. + AutoMTLS bool + + // GRPCDialOptions allows plugin users to pass custom grpc.DialOption + // to create gRPC connections. This only affects plugins using the gRPC + // protocol. + GRPCDialOptions []grpc.DialOption + + // GRPCBrokerMultiplex turns on multiplexing for the gRPC broker. The gRPC + // broker will multiplex all brokered gRPC servers over the plugin's original + // listener socket instead of making a new listener for each server. The + // go-plugin library currently only includes a Go implementation for the + // server (i.e. plugin) side of gRPC broker multiplexing. + // + // Does not support reattaching. + // + // Multiplexed gRPC streams MUST be established sequentially, i.e. after + // calling AcceptAndServe from one side, wait for the other side to Dial + // before calling AcceptAndServe again. + GRPCBrokerMultiplex bool + + // SkipHostEnv allows plugins to run without inheriting the parent process' + // environment variables. + SkipHostEnv bool + + // UnixSocketConfig configures additional options for any Unix sockets + // that are created. Not normally required. Not supported on Windows. + UnixSocketConfig *UnixSocketConfig +} + +type UnixSocketConfig struct { + // If set, go-plugin will change the owner of any Unix sockets created to + // this group, and set them as group-writable. Can be a name or gid. The + // client process must be a member of this group or chown will fail. + Group string + + // TempDir specifies the base directory to use when creating a plugin-specific + // temporary directory. It is expected to already exist and be writable. If + // not set, defaults to the directory chosen by os.MkdirTemp. + TempDir string + + // The directory to create Unix sockets in. Internally created and managed + // by go-plugin and deleted when the plugin is killed. Will be created + // inside TempDir if specified. + socketDir string +} + +// ReattachConfig is used to configure a client to reattach to an +// already-running plugin process. You can retrieve this information by +// calling ReattachConfig on Client. +type ReattachConfig struct { + Protocol Protocol + ProtocolVersion int + Addr net.Addr + Pid int + + // ReattachFunc allows consumers to provide their own implementation of + // runner.AttachedRunner and attach to something other than a plain process. + // At least one of Pid or ReattachFunc must be set. + ReattachFunc runner.ReattachFunc + + // Test is set to true if this is reattaching to to a plugin in "test mode" + // (see ServeConfig.Test). In this mode, client.Kill will NOT kill the + // process and instead will rely on the plugin to terminate itself. This + // should not be used in non-test environments. + Test bool +} + +// SecureConfig is used to configure a client to verify the integrity of an +// executable before running. It does this by verifying the checksum is +// expected. Hash is used to specify the hashing method to use when checksumming +// the file. The configuration is verified by the client by calling the +// SecureConfig.Check() function. +// +// The host process should ensure the checksum was provided by a trusted and +// authoritative source. The binary should be installed in such a way that it +// can not be modified by an unauthorized user between the time of this check +// and the time of execution. +type SecureConfig struct { + Checksum []byte + Hash hash.Hash +} + +// Check takes the filepath to an executable and returns true if the checksum of +// the file matches the checksum provided in the SecureConfig. +func (s *SecureConfig) Check(filePath string) (bool, error) { + if len(s.Checksum) == 0 { + return false, ErrSecureConfigNoChecksum + } + + if s.Hash == nil { + return false, ErrSecureConfigNoHash + } + + file, err := os.Open(filePath) + if err != nil { + return false, err + } + defer file.Close() + + _, err = io.Copy(s.Hash, file) + if err != nil { + return false, err + } + + sum := s.Hash.Sum(nil) + + return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil +} + +// This makes sure all the managed subprocesses are killed and properly +// logged. This should be called before the parent process running the +// plugins exits. +// +// This must only be called _once_. +func CleanupClients() { + // Set the killed to true so that we don't get unexpected panics + atomic.StoreUint32(&Killed, 1) + + // Kill all the managed clients in parallel and use a WaitGroup + // to wait for them all to finish up. + var wg sync.WaitGroup + managedClientsLock.Lock() + for _, client := range managedClients { + wg.Add(1) + + go func(client *Client) { + client.Kill() + wg.Done() + }(client) + } + managedClientsLock.Unlock() + + wg.Wait() +} + +// NewClient creates a new plugin client which manages the lifecycle of an external +// plugin and gets the address for the RPC connection. +// +// The client must be cleaned up at some point by calling Kill(). If +// the client is a managed client (created with ClientConfig.Managed) you +// can just call CleanupClients at the end of your program and they will +// be properly cleaned. +func NewClient(config *ClientConfig) (c *Client) { + if config.MinPort == 0 && config.MaxPort == 0 { + config.MinPort = 10000 + config.MaxPort = 25000 + } + + if config.StartTimeout == 0 { + config.StartTimeout = 1 * time.Minute + } + + if config.Stderr == nil { + config.Stderr = ioutil.Discard + } + + if config.SyncStdout == nil { + config.SyncStdout = io.Discard + } + if config.SyncStderr == nil { + config.SyncStderr = io.Discard + } + + if config.AllowedProtocols == nil { + config.AllowedProtocols = []Protocol{ProtocolNetRPC} + } + + if config.Logger == nil { + config.Logger = hclog.New(&hclog.LoggerOptions{ + Output: hclog.DefaultOutput, + Level: hclog.Trace, + Name: "plugin", + }) + } + + if config.PluginLogBufferSize == 0 { + config.PluginLogBufferSize = defaultPluginLogBufferSize + } + + c = &Client{ + config: config, + logger: config.Logger, + } + if config.Managed { + managedClientsLock.Lock() + managedClients = append(managedClients, c) + managedClientsLock.Unlock() + } + + return +} + +// Client returns the protocol client for this connection. +// +// Subsequent calls to this will return the same client. +func (c *Client) Client() (ClientProtocol, error) { + _, err := c.Start() + if err != nil { + return nil, err + } + + c.l.Lock() + defer c.l.Unlock() + + if c.client != nil { + return c.client, nil + } + + switch c.protocol { + case ProtocolNetRPC: + c.client, err = newRPCClient(c) + + case ProtocolGRPC: + c.client, err = newGRPCClient(c.doneCtx, c) + + default: + return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) + } + + if err != nil { + c.client = nil + return nil, err + } + + return c.client, nil +} + +// Tells whether or not the underlying process has exited. +func (c *Client) Exited() bool { + c.l.Lock() + defer c.l.Unlock() + return c.exited +} + +// killed is used in tests to check if a process failed to exit gracefully, and +// needed to be killed. +func (c *Client) killed() bool { + c.l.Lock() + defer c.l.Unlock() + return c.processKilled +} + +// End the executing subprocess (if it is running) and perform any cleanup +// tasks necessary such as capturing any remaining logs and so on. +// +// This method blocks until the process successfully exits. +// +// This method can safely be called multiple times. +func (c *Client) Kill() { + // Grab a lock to read some private fields. + c.l.Lock() + runner := c.runner + addr := c.address + hostSocketDir := c.unixSocketCfg.socketDir + c.l.Unlock() + + // If there is no runner or ID, there is nothing to kill. + if runner == nil || runner.ID() == "" { + return + } + + defer func() { + // Wait for the all client goroutines to finish. + c.clientWaitGroup.Wait() + + if hostSocketDir != "" { + os.RemoveAll(hostSocketDir) + } + + // Make sure there is no reference to the old process after it has been + // killed. + c.l.Lock() + c.runner = nil + c.l.Unlock() + }() + + // We need to check for address here. It is possible that the plugin + // started (process != nil) but has no address (addr == nil) if the + // plugin failed at startup. If we do have an address, we need to close + // the plugin net connections. + graceful := false + if addr != nil { + // Close the client to cleanly exit the process. + client, err := c.Client() + if err == nil { + err = client.Close() + + // If there is no error, then we attempt to wait for a graceful + // exit. If there was an error, we assume that graceful cleanup + // won't happen and just force kill. + graceful = err == nil + if err != nil { + // If there was an error just log it. We're going to force + // kill in a moment anyways. + c.logger.Warn("error closing client during Kill", "err", err) + } + } else { + c.logger.Error("client", "error", err) + } + } + + // If we're attempting a graceful exit, then we wait for a short period + // of time to allow that to happen. To wait for this we just wait on the + // doneCh which would be closed if the process exits. + if graceful { + select { + case <-c.doneCtx.Done(): + c.logger.Debug("plugin exited") + return + case <-time.After(2 * time.Second): + } + } + + // If graceful exiting failed, just kill it + c.logger.Warn("plugin failed to exit gracefully") + if err := runner.Kill(context.Background()); err != nil { + c.logger.Debug("error killing plugin", "error", err) + } + + c.l.Lock() + c.processKilled = true + c.l.Unlock() +} + +// Start the underlying subprocess, communicating with it to negotiate +// a port for RPC connections, and returning the address to connect via RPC. +// +// This method is safe to call multiple times. Subsequent calls have no effect. +// Once a client has been started once, it cannot be started again, even if +// it was killed. +func (c *Client) Start() (addr net.Addr, err error) { + c.l.Lock() + defer c.l.Unlock() + + if c.address != nil { + return c.address, nil + } + + // If one of cmd or reattach isn't set, then it is an error. We wrap + // this in a {} for scoping reasons, and hopeful that the escape + // analysis will pop the stack here. + { + var mutuallyExclusiveOptions int + if c.config.Cmd != nil { + mutuallyExclusiveOptions += 1 + } + if c.config.Reattach != nil { + mutuallyExclusiveOptions += 1 + } + if c.config.RunnerFunc != nil { + mutuallyExclusiveOptions += 1 + } + if mutuallyExclusiveOptions != 1 { + return nil, fmt.Errorf("exactly one of Cmd, or Reattach, or RunnerFunc must be set") + } + + if c.config.SecureConfig != nil && c.config.Reattach != nil { + return nil, ErrSecureConfigAndReattach + } + + if c.config.GRPCBrokerMultiplex && c.config.Reattach != nil { + return nil, fmt.Errorf("gRPC broker multiplexing is not supported with Reattach config") + } + } + + if c.config.Reattach != nil { + return c.reattach() + } + + if c.config.VersionedPlugins == nil { + c.config.VersionedPlugins = make(map[int]PluginSet) + } + + // handle all plugins as versioned, using the handshake config as the default. + version := int(c.config.ProtocolVersion) + + // Make sure we're not overwriting a real version 0. If ProtocolVersion was + // non-zero, then we have to just assume the user made sure that + // VersionedPlugins doesn't conflict. + if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil { + c.config.VersionedPlugins[version] = c.config.Plugins + } + + var versionStrings []string + for v := range c.config.VersionedPlugins { + versionStrings = append(versionStrings, strconv.Itoa(v)) + } + + env := []string{ + fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), + fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), + fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), + fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), + } + if c.config.GRPCBrokerMultiplex { + env = append(env, fmt.Sprintf("%s=true", envMultiplexGRPC)) + } + + cmd := c.config.Cmd + if cmd == nil { + // It's only possible to get here if RunnerFunc is non-nil, but we'll + // still use cmd as a spec to populate metadata for the external + // implementation to consume. + cmd = exec.Command("") + } + if !c.config.SkipHostEnv { + cmd.Env = append(cmd.Env, os.Environ()...) + } + cmd.Env = append(cmd.Env, env...) + cmd.Stdin = os.Stdin + + if c.config.SecureConfig != nil { + if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { + return nil, fmt.Errorf("error verifying checksum: %s", err) + } else if !ok { + return nil, ErrChecksumsDoNotMatch + } + } + + // Setup a temporary certificate for client/server mtls, and send the public + // certificate to the plugin. + if c.config.AutoMTLS { + c.logger.Info("configuring client automatic mTLS") + certPEM, keyPEM, err := generateCert() + if err != nil { + c.logger.Error("failed to generate client certificate", "error", err) + return nil, err + } + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + c.logger.Error("failed to parse client certificate", "error", err) + return nil, err + } + + cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM)) + + c.config.TLSConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + MinVersion: tls.VersionTLS12, + ServerName: "localhost", + } + } + + if c.config.UnixSocketConfig != nil { + c.unixSocketCfg = *c.config.UnixSocketConfig + } + + if c.unixSocketCfg.Group != "" { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketGroup, c.unixSocketCfg.Group)) + } + + var runner runner.Runner + switch { + case c.config.RunnerFunc != nil: + c.unixSocketCfg.socketDir, err = os.MkdirTemp(c.unixSocketCfg.TempDir, "plugin-dir") + if err != nil { + return nil, err + } + // os.MkdirTemp creates folders with 0o700, so if we have a group + // configured we need to make it group-writable. + if c.unixSocketCfg.Group != "" { + err = setGroupWritable(c.unixSocketCfg.socketDir, c.unixSocketCfg.Group, 0o770) + if err != nil { + return nil, err + } + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketDir, c.unixSocketCfg.socketDir)) + c.logger.Trace("created temporary directory for unix sockets", "dir", c.unixSocketCfg.socketDir) + + runner, err = c.config.RunnerFunc(c.logger, cmd, c.unixSocketCfg.socketDir) + if err != nil { + return nil, err + } + default: + runner, err = cmdrunner.NewCmdRunner(c.logger, cmd) + if err != nil { + return nil, err + } + + } + + c.runner = runner + startCtx, startCtxCancel := context.WithTimeout(context.Background(), c.config.StartTimeout) + defer startCtxCancel() + err = runner.Start(startCtx) + if err != nil { + return nil, err + } + + // Make sure the command is properly cleaned up if there is an error + defer func() { + rErr := recover() + + if err != nil || rErr != nil { + runner.Kill(context.Background()) + } + + if rErr != nil { + panic(rErr) + } + }() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + // Start goroutine that logs the stderr + c.clientWaitGroup.Add(1) + c.stderrWaitGroup.Add(1) + // logStderr calls Done() + go c.logStderr(runner.Name(), runner.Stderr()) + + c.clientWaitGroup.Add(1) + go func() { + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + defer c.clientWaitGroup.Done() + + // wait to finish reading from stderr since the stderr pipe reader + // will be closed by the subsequent call to cmd.Wait(). + c.stderrWaitGroup.Wait() + + // Wait for the command to end. + err := runner.Wait(context.Background()) + if err != nil { + c.logger.Error("plugin process exited", "plugin", runner.Name(), "id", runner.ID(), "error", err.Error()) + } else { + // Log and make sure to flush the logs right away + c.logger.Info("plugin process exited", "plugin", runner.Name(), "id", runner.ID()) + } + + os.Stderr.Sync() + + // Set that we exited, which takes a lock + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }() + + // Start a goroutine that is going to be reading the lines + // out of stdout + linesCh := make(chan string) + c.clientWaitGroup.Add(1) + go func() { + defer c.clientWaitGroup.Done() + defer close(linesCh) + + scanner := bufio.NewScanner(runner.Stdout()) + for scanner.Scan() { + linesCh <- scanner.Text() + } + if scanner.Err() != nil { + c.logger.Error("error encountered while scanning stdout", "error", scanner.Err()) + } + }() + + // Make sure after we exit we read the lines from stdout forever + // so they don't block since it is a pipe. + // The scanner goroutine above will close this, but track it with a wait + // group for completeness. + c.clientWaitGroup.Add(1) + defer func() { + go func() { + defer c.clientWaitGroup.Done() + for range linesCh { + } + }() + }() + + // Some channels for the next step + timeout := time.After(c.config.StartTimeout) + + // Start looking for the address + c.logger.Debug("waiting for RPC address", "plugin", runner.Name()) + select { + case <-timeout: + err = errors.New("timeout while waiting for plugin to start") + case <-c.doneCtx.Done(): + err = errors.New("plugin exited before we could connect") + case line, ok := <-linesCh: + // Trim the line and split by "|" in order to get the parts of + // the output. + line = strings.TrimSpace(line) + parts := strings.Split(line, "|") + if len(parts) < 4 { + errText := fmt.Sprintf("Unrecognized remote plugin message: %s", line) + if !ok { + errText += "\n" + "Failed to read any lines from plugin's stdout" + } + additionalNotes := runner.Diagnose(context.Background()) + if additionalNotes != "" { + errText += "\n" + additionalNotes + } + err = errors.New(errText) + return + } + + // Check the core protocol. Wrapped in a {} for scoping. + { + var coreProtocol int + coreProtocol, err = strconv.Atoi(parts[0]) + if err != nil { + err = fmt.Errorf("Error parsing core protocol version: %s", err) + return + } + + if coreProtocol != CoreProtocolVersion { + err = fmt.Errorf("Incompatible core API version with plugin. "+ + "Plugin version: %s, Core version: %d\n\n"+ + "To fix this, the plugin usually only needs to be recompiled.\n"+ + "Please report this to the plugin author.", parts[0], CoreProtocolVersion) + return + } + } + + // Test the API version + version, pluginSet, err := c.checkProtoVersion(parts[1]) + if err != nil { + return addr, err + } + + // set the Plugins value to the compatible set, so the version + // doesn't need to be passed through to the ClientProtocol + // implementation. + c.config.Plugins = pluginSet + c.negotiatedVersion = version + c.logger.Debug("using plugin", "version", version) + + network, address, err := runner.PluginToHost(parts[2], parts[3]) + if err != nil { + return addr, err + } + + switch network { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", address) + case "unix": + addr, err = net.ResolveUnixAddr("unix", address) + default: + err = fmt.Errorf("Unknown address type: %s", address) + } + + // If we have a server type, then record that. We default to net/rpc + // for backwards compatibility. + c.protocol = ProtocolNetRPC + if len(parts) >= 5 { + c.protocol = Protocol(parts[4]) + } + + found := false + for _, p := range c.config.AllowedProtocols { + if p == c.protocol { + found = true + break + } + } + if !found { + err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", + c.protocol, c.config.AllowedProtocols) + return addr, err + } + + // See if we have a TLS certificate from the server. + // Checking if the length is > 50 rules out catching the unused "extra" + // data returned from some older implementations. + if len(parts) >= 6 && len(parts[5]) > 50 { + err := c.loadServerCert(parts[5]) + if err != nil { + return nil, fmt.Errorf("error parsing server cert: %s", err) + } + } + + if c.config.GRPCBrokerMultiplex && c.protocol == ProtocolGRPC { + if len(parts) <= 6 { + return nil, fmt.Errorf("%w; for Go plugins, you will need to update the "+ + "github.com/hashicorp/go-plugin dependency and recompile", ErrGRPCBrokerMuxNotSupported) + } + if muxSupported, err := strconv.ParseBool(parts[6]); err != nil { + return nil, fmt.Errorf("error parsing %q as a boolean for gRPC broker multiplexing support", parts[6]) + } else if !muxSupported { + return nil, ErrGRPCBrokerMuxNotSupported + } + } + } + + c.address = addr + return +} + +// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the +// server, and load it as the RootCA and ClientCA for the client TLSConfig. +func (c *Client) loadServerCert(cert string) error { + certPool := x509.NewCertPool() + + asn1, err := base64.RawStdEncoding.DecodeString(cert) + if err != nil { + return err + } + + x509Cert, err := x509.ParseCertificate([]byte(asn1)) + if err != nil { + return err + } + + certPool.AddCert(x509Cert) + + c.config.TLSConfig.RootCAs = certPool + c.config.TLSConfig.ClientCAs = certPool + return nil +} + +func (c *Client) reattach() (net.Addr, error) { + reattachFunc := c.config.Reattach.ReattachFunc + // For backwards compatibility default to cmdrunner.ReattachFunc + if reattachFunc == nil { + reattachFunc = cmdrunner.ReattachFunc(c.config.Reattach.Pid, c.config.Reattach.Addr) + } + + r, err := reattachFunc() + if err != nil { + return nil, err + } + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + c.clientWaitGroup.Add(1) + // Goroutine to mark exit status + go func(r runner.AttachedRunner) { + defer c.clientWaitGroup.Done() + + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + // Wait for the process to die + r.Wait(context.Background()) + + // Log so we can see it + c.logger.Debug("reattached plugin process exited") + + // Mark it + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }(r) + + // Set the address and protocol + c.address = c.config.Reattach.Addr + c.protocol = c.config.Reattach.Protocol + if c.protocol == "" { + // Default the protocol to net/rpc for backwards compatibility + c.protocol = ProtocolNetRPC + } + + if c.config.Reattach.Test { + c.negotiatedVersion = c.config.Reattach.ProtocolVersion + } else { + // If we're in test mode, we do NOT set the runner. This avoids the + // runner being killed (the only purpose we have for setting c.runner + // when reattaching), since in test mode the process is responsible for + // exiting on its own. + c.runner = r + } + + return c.address, nil +} + +// checkProtoVersion returns the negotiated version and PluginSet. +// This returns an error if the server returned an incompatible protocol +// version, or an invalid handshake response. +func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) { + serverVersion, err := strconv.Atoi(protoVersion) + if err != nil { + return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err) + } + + // record these for the error message + var clientVersions []int + + // all versions, including the legacy ProtocolVersion have been added to + // the versions set + for version, plugins := range c.config.VersionedPlugins { + clientVersions = append(clientVersions, version) + + if serverVersion != version { + continue + } + return version, plugins, nil + } + + return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+ + "Plugin version: %d, Client versions: %d", serverVersion, clientVersions) +} + +// ReattachConfig returns the information that must be provided to NewClient +// to reattach to the plugin process that this client started. This is +// useful for plugins that detach from their parent process. +// +// If this returns nil then the process hasn't been started yet. Please +// call Start or Client before calling this. +// +// Clients who specified a RunnerFunc will need to populate their own +// ReattachFunc in the returned ReattachConfig before it can be used. +func (c *Client) ReattachConfig() *ReattachConfig { + c.l.Lock() + defer c.l.Unlock() + + if c.address == nil { + return nil + } + + if c.config.Cmd != nil && c.config.Cmd.Process == nil { + return nil + } + + // If we connected via reattach, just return the information as-is + if c.config.Reattach != nil { + return c.config.Reattach + } + + reattach := &ReattachConfig{ + Protocol: c.protocol, + Addr: c.address, + } + + if c.config.Cmd != nil && c.config.Cmd.Process != nil { + reattach.Pid = c.config.Cmd.Process.Pid + } + + return reattach +} + +// Protocol returns the protocol of server on the remote end. This will +// start the plugin process if it isn't already started. Errors from +// starting the plugin are surpressed and ProtocolInvalid is returned. It +// is recommended you call Start explicitly before calling Protocol to ensure +// no errors occur. +func (c *Client) Protocol() Protocol { + _, err := c.Start() + if err != nil { + return ProtocolInvalid + } + + return c.protocol +} + +func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) { + return func(_ string, _ time.Duration) (net.Conn, error) { + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + return conn, nil + } +} + +// dialer is compatible with grpc.WithDialer and creates the connection +// to the plugin. +func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { + muxer, err := c.getGRPCMuxer(c.address) + if err != nil { + return nil, err + } + + var conn net.Conn + if muxer.Enabled() { + conn, err = muxer.Dial() + if err != nil { + return nil, err + } + } else { + conn, err = netAddrDialer(c.address)("", timeout) + if err != nil { + return nil, err + } + } + + // If we have a TLS config we wrap our connection. We only do this + // for net/rpc since gRPC uses its own mechanism for TLS. + if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + return conn, nil +} + +func (c *Client) getGRPCMuxer(addr net.Addr) (*grpcmux.GRPCClientMuxer, error) { + if c.protocol != ProtocolGRPC || !c.config.GRPCBrokerMultiplex { + return nil, nil + } + + var err error + c.grpcMuxerOnce.Do(func() { + c.grpcMuxer, err = grpcmux.NewGRPCClientMuxer(c.logger, addr) + }) + if err != nil { + return nil, err + } + + return c.grpcMuxer, nil +} + +func (c *Client) logStderr(name string, r io.Reader) { + defer c.clientWaitGroup.Done() + defer c.stderrWaitGroup.Done() + l := c.logger.Named(filepath.Base(name)) + + reader := bufio.NewReaderSize(r, c.config.PluginLogBufferSize) + // continuation indicates the previous line was a prefix + continuation := false + + for { + line, isPrefix, err := reader.ReadLine() + switch { + case err == io.EOF: + return + case err != nil: + l.Error("reading plugin stderr", "error", err) + return + } + + c.config.Stderr.Write(line) + + // The line was longer than our max token size, so it's likely + // incomplete and won't unmarshal. + if isPrefix || continuation { + l.Debug(string(line)) + + // if we're finishing a continued line, add the newline back in + if !isPrefix { + c.config.Stderr.Write([]byte{'\n'}) + } + + continuation = isPrefix + continue + } + + c.config.Stderr.Write([]byte{'\n'}) + + entry, err := parseJSON(line) + // If output is not JSON format, print directly to Debug + if err != nil { + // Attempt to infer the desired log level from the commonly used + // string prefixes + switch line := string(line); { + case strings.HasPrefix(line, "[TRACE]"): + l.Trace(line) + case strings.HasPrefix(line, "[DEBUG]"): + l.Debug(line) + case strings.HasPrefix(line, "[INFO]"): + l.Info(line) + case strings.HasPrefix(line, "[WARN]"): + l.Warn(line) + case strings.HasPrefix(line, "[ERROR]"): + l.Error(line) + default: + l.Debug(line) + } + } else { + out := flattenKVPairs(entry.KVPairs) + + out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat)) + switch hclog.LevelFromString(entry.Level) { + case hclog.Trace: + l.Trace(entry.Message, out...) + case hclog.Debug: + l.Debug(entry.Message, out...) + case hclog.Info: + l.Info(entry.Message, out...) + case hclog.Warn: + l.Warn(entry.Message, out...) + case hclog.Error: + l.Error(entry.Message, out...) + default: + // if there was no log level, it's likely this is unexpected + // json from something other than hclog, and we should output + // it verbatim. + l.Debug(string(line)) + } + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/constants.go b/vendor/github.com/hashicorp/go-plugin/constants.go new file mode 100644 index 000000000..e7f5bbe5f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/constants.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +const ( + // EnvUnixSocketDir specifies the directory that _plugins_ should create unix + // sockets in. Does not affect client behavior. + EnvUnixSocketDir = "PLUGIN_UNIX_SOCKET_DIR" + + // EnvUnixSocketGroup specifies the owning, writable group to set for Unix + // sockets created by _plugins_. Does not affect client behavior. + EnvUnixSocketGroup = "PLUGIN_UNIX_SOCKET_GROUP" + + envMultiplexGRPC = "PLUGIN_MULTIPLEX_GRPC" +) diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go new file mode 100644 index 000000000..c5b96242b --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/discover.go @@ -0,0 +1,31 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "path/filepath" +) + +// Discover discovers plugins that are in a given directory. +// +// The directory doesn't need to be absolute. For example, "." will work fine. +// +// This currently assumes any file matching the glob is a plugin. +// In the future this may be smarter about checking that a file is +// executable and so on. +// +// TODO: test +func Discover(glob, dir string) ([]string, error) { + var err error + + // Make the directory absolute if it isn't already + if !filepath.IsAbs(dir) { + dir, err = filepath.Abs(dir) + if err != nil { + return nil, err + } + } + + return filepath.Glob(filepath.Join(dir, glob)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go new file mode 100644 index 000000000..e62a21913 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/error.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +// This is a type that wraps error types so that they can be messaged +// across RPC channels. Since "error" is an interface, we can't always +// gob-encode the underlying structure. This is a valid error interface +// implementer that we will push across. +type BasicError struct { + Message string +} + +// NewBasicError is used to create a BasicError. +// +// err is allowed to be nil. +func NewBasicError(err error) *BasicError { + if err == nil { + return nil + } + + return &BasicError{err.Error()} +} + +func (e *BasicError) Error() string { + return e.Message +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go new file mode 100644 index 000000000..5b17e37fe --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -0,0 +1,654 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-plugin/internal/grpcmux" + "github.com/hashicorp/go-plugin/internal/plugin" + "github.com/hashicorp/go-plugin/runner" + + "github.com/oklog/run" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// streamer interface is used in the broker to send/receive connection +// information. +type streamer interface { + Send(*plugin.ConnInfo) error + Recv() (*plugin.ConnInfo, error) + Close() +} + +// sendErr is used to pass errors back during a send. +type sendErr struct { + i *plugin.ConnInfo + ch chan error +} + +// gRPCBrokerServer is used by the plugin to start a stream and to send +// connection information to/from the plugin. Implements GRPCBrokerServer and +// streamer interfaces. +type gRPCBrokerServer struct { + plugin.UnimplementedGRPCBrokerServer + + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerServer() *gRPCBrokerServer { + return &gRPCBrokerServer{ + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerServer interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the client. +func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error { + doneCh := stream.Context().Done() + defer s.Close() + + // Proccess send stream + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + // Process receive stream + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the client. +func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the client from the stream to the broker. +func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerServer) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// gRPCBrokerClientImpl is used by the client to start a stream and to send +// connection information to/from the client. Implements GRPCBrokerClient and +// streamer interfaces. +type gRPCBrokerClientImpl struct { + // client is the underlying GRPC client used to make calls to the server. + client plugin.GRPCBrokerClient + + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { + return &gRPCBrokerClientImpl{ + client: plugin.NewGRPCBrokerClient(conn), + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerClient interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the plugin. +func (s *gRPCBrokerClientImpl) StartStream() error { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + defer s.Close() + + stream, err := s.client.StartStream(ctx) + if err != nil { + return err + } + doneCh := stream.Context().Done() + + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the plugin. +func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the plugin to the broker. +func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerClientImpl) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// GRPCBroker is responsible for brokering connections by unique ID. +// +// It is used by plugins to create multiple gRPC connections and data +// streams between the plugin process and the host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type GRPCBroker struct { + nextId uint32 + streamer streamer + tls *tls.Config + doneCh chan struct{} + o sync.Once + + clientStreams map[uint32]*gRPCBrokerPending + serverStreams map[uint32]*gRPCBrokerPending + + unixSocketCfg UnixSocketConfig + addrTranslator runner.AddrTranslator + + dialMutex sync.Mutex + + muxer grpcmux.GRPCMuxer + + sync.Mutex +} + +type gRPCBrokerPending struct { + ch chan *plugin.ConnInfo + doneCh chan struct{} + once sync.Once +} + +func newGRPCBroker(s streamer, tls *tls.Config, unixSocketCfg UnixSocketConfig, addrTranslator runner.AddrTranslator, muxer grpcmux.GRPCMuxer) *GRPCBroker { + return &GRPCBroker{ + streamer: s, + tls: tls, + doneCh: make(chan struct{}), + + clientStreams: make(map[uint32]*gRPCBrokerPending), + serverStreams: make(map[uint32]*gRPCBrokerPending), + muxer: muxer, + + unixSocketCfg: unixSocketCfg, + addrTranslator: addrTranslator, + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { + if b.muxer.Enabled() { + p := b.getServerStream(id) + go func() { + err := b.listenForKnocks(id) + if err != nil { + log.Printf("[ERR]: error listening for knocks, id: %d, error: %s", id, err) + } + }() + + ln, err := b.muxer.Listener(id, p.doneCh) + if err != nil { + return nil, err + } + + ln = &rmListener{ + Listener: ln, + close: func() error { + // We could have multiple listeners on the same ID, so use sync.Once + // for closing doneCh to ensure we don't get a panic. + p.once.Do(func() { + close(p.doneCh) + }) + + b.Lock() + defer b.Unlock() + + // No longer need to listen for knocks once the listener is closed. + delete(b.serverStreams, id) + + return nil + }, + } + + return ln, nil + } + + listener, err := serverListener(b.unixSocketCfg) + if err != nil { + return nil, err + } + + advertiseNet := listener.Addr().Network() + advertiseAddr := listener.Addr().String() + if b.addrTranslator != nil { + advertiseNet, advertiseAddr, err = b.addrTranslator.HostToPlugin(advertiseNet, advertiseAddr) + if err != nil { + return nil, err + } + } + err = b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Network: advertiseNet, + Address: advertiseAddr, + }) + if err != nil { + return nil, err + } + + return listener, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve a gRPC server on that stream ID. This is used to easily serve +// complex arguments. Each AcceptAndServe call opens a new listener socket and +// sends the connection info down the stream to the dialer. Since a new +// connection is opened every call, these calls should be used sparingly. +// Multiple gRPC server implementations can be registered to a single +// AcceptAndServe call. +func (b *GRPCBroker) AcceptAndServe(id uint32, newGRPCServer func([]grpc.ServerOption) *grpc.Server) { + ln, err := b.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + defer ln.Close() + + var opts []grpc.ServerOption + if b.tls != nil { + opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} + } + + server := newGRPCServer(opts) + + // Here we use a run group to close this goroutine if the server is shutdown + // or the broker is shutdown. + var g run.Group + { + // Serve on the listener, if shutting down call GracefulStop. + g.Add(func() error { + return server.Serve(ln) + }, func(err error) { + server.GracefulStop() + }) + } + { + // block on the closeCh or the doneCh. If we are shutting down close the + // closeCh. + closeCh := make(chan struct{}) + g.Add(func() error { + select { + case <-b.doneCh: + case <-closeCh: + } + return nil + }, func(err error) { + close(closeCh) + }) + } + + // Block until we are done + g.Run() +} + +// Close closes the stream and all servers. +func (b *GRPCBroker) Close() error { + b.streamer.Close() + b.o.Do(func() { + close(b.doneCh) + }) + return nil +} + +func (b *GRPCBroker) listenForKnocks(id uint32) error { + p := b.getServerStream(id) + for { + select { + case msg := <-p.ch: + // Shouldn't be possible. + if msg.ServiceId != id { + return fmt.Errorf("knock received with wrong service ID; expected %d but got %d", id, msg.ServiceId) + } + + // Also shouldn't be possible. + if msg.Knock == nil || !msg.Knock.Knock || msg.Knock.Ack { + return fmt.Errorf("knock received for service ID %d with incorrect values; knock=%+v", id, msg.Knock) + } + + // Successful knock, open the door for the given ID. + var ackError string + err := b.muxer.AcceptKnock(id) + if err != nil { + ackError = err.Error() + } + + // Send back an acknowledgement to allow the client to start dialling. + err = b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Knock: &plugin.ConnInfo_Knock{ + Knock: true, + Ack: true, + Error: ackError, + }, + }) + if err != nil { + return fmt.Errorf("error sending back knock acknowledgement: %w", err) + } + case <-p.doneCh: + return nil + } + } +} + +func (b *GRPCBroker) knock(id uint32) error { + // Send a knock. + err := b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Knock: &plugin.ConnInfo_Knock{ + Knock: true, + }, + }) + if err != nil { + return err + } + + // Wait for the ack. + p := b.getClientStream(id) + select { + case msg := <-p.ch: + if msg.ServiceId != id { + return fmt.Errorf("handshake failed for multiplexing on id %d; got response for %d", id, msg.ServiceId) + } + if msg.Knock == nil || !msg.Knock.Knock || !msg.Knock.Ack { + return fmt.Errorf("handshake failed for multiplexing on id %d; expected knock and ack, but got %+v", id, msg.Knock) + } + if msg.Knock.Error != "" { + return fmt.Errorf("failed to knock for id %d: %s", id, msg.Knock.Error) + } + case <-time.After(5 * time.Second): + return fmt.Errorf("timeout waiting for multiplexing knock handshake on id %d", id) + } + + return nil +} + +func (b *GRPCBroker) muxDial(id uint32) func(string, time.Duration) (net.Conn, error) { + return func(string, time.Duration) (net.Conn, error) { + b.dialMutex.Lock() + defer b.dialMutex.Unlock() + + // Tell the other side the listener ID it should give the next stream to. + err := b.knock(id) + if err != nil { + return nil, fmt.Errorf("failed to knock before dialling client: %w", err) + } + + conn, err := b.muxer.Dial() + if err != nil { + return nil, err + } + + return conn, nil + } +} + +// Dial opens a connection by ID. +func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { + if b.muxer.Enabled() { + return dialGRPCConn(b.tls, b.muxDial(id)) + } + + var c *plugin.ConnInfo + + // Open the stream + p := b.getClientStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + return nil, fmt.Errorf("timeout waiting for connection info") + } + + network, address := c.Network, c.Address + if b.addrTranslator != nil { + network, address, err = b.addrTranslator.PluginToHost(network, address) + if err != nil { + return nil, err + } + } + + var addr net.Addr + switch network { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", address) + case "unix": + addr, err = net.ResolveUnixAddr("unix", address) + default: + err = fmt.Errorf("Unknown address type: %s", c.Address) + } + if err != nil { + return nil, err + } + + return dialGRPCConn(b.tls, netAddrDialer(addr)) +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of calls. In practice +// we've never seen it happen. +func (m *GRPCBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of GRPCBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *GRPCBroker) Run() { + for { + msg, err := m.streamer.Recv() + if err != nil { + // Once we receive an error, just exit + break + } + + // Initialize the waiter + var p *gRPCBrokerPending + if msg.Knock != nil && msg.Knock.Knock && !msg.Knock.Ack { + p = m.getServerStream(msg.ServiceId) + // The server side doesn't close the channel immediately as it needs + // to continuously listen for knocks. + } else { + p = m.getClientStream(msg.ServiceId) + go m.timeoutWait(msg.ServiceId, p) + } + select { + case p.ch <- msg: + default: + } + } +} + +// getClientStream is a buffer to receive new connection info and knock acks +// by stream ID. +func (m *GRPCBroker) getClientStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.clientStreams[id] + if ok { + return p + } + + m.clientStreams[id] = &gRPCBrokerPending{ + ch: make(chan *plugin.ConnInfo, 1), + doneCh: make(chan struct{}), + } + return m.clientStreams[id] +} + +// getServerStream is a buffer to receive knocks to a multiplexed stream ID +// that its side is listening on. Not used unless multiplexing is enabled. +func (m *GRPCBroker) getServerStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.serverStreams[id] + if ok { + return p + } + + m.serverStreams[id] = &gRPCBrokerPending{ + ch: make(chan *plugin.ConnInfo, 1), + doneCh: make(chan struct{}), + } + return m.serverStreams[id] +} + +func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.clientStreams, id) +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go new file mode 100644 index 000000000..627649d83 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -0,0 +1,134 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "crypto/tls" + "fmt" + "math" + "net" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error), dialOpts ...grpc.DialOption) (*grpc.ClientConn, error) { + // Build dialing options. + opts := make([]grpc.DialOption, 0) + + // We use a custom dialer so that we can connect over unix domain sockets. + opts = append(opts, grpc.WithDialer(dialer)) + + // Fail right away + opts = append(opts, grpc.FailOnNonTempDialError(true)) + + // If we have no TLS configuration set, we need to explicitly tell grpc + // that we're connecting with an insecure connection. + if tls == nil { + opts = append(opts, grpc.WithInsecure()) + } else { + opts = append(opts, grpc.WithTransportCredentials( + credentials.NewTLS(tls))) + } + + opts = append(opts, + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) + + // Add our custom options if we have any + opts = append(opts, dialOpts...) + + // Connect. Note the first parameter is unused because we use a custom + // dialer that has the state to see the address. + conn, err := grpc.Dial("unused", opts...) + if err != nil { + return nil, err + } + + return conn, nil +} + +// newGRPCClient creates a new GRPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer, c.config.GRPCDialOptions...) + if err != nil { + return nil, err + } + + muxer, err := c.getGRPCMuxer(c.address) + if err != nil { + return nil, err + } + + // Start the broker. + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig, c.unixSocketCfg, c.runner, muxer) + go broker.Run() + go brokerGRPCClient.StartStream() + + // Start the stdio client + stdioClient, err := newGRPCStdioClient(doneCtx, c.logger.Named("stdio"), conn) + if err != nil { + return nil, err + } + go stdioClient.Run(c.config.SyncStdout, c.config.SyncStderr) + + cl := &GRPCClient{ + Conn: conn, + Plugins: c.config.Plugins, + doneCtx: doneCtx, + broker: broker, + controller: plugin.NewGRPCControllerClient(conn), + } + + return cl, nil +} + +// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. +type GRPCClient struct { + Conn *grpc.ClientConn + Plugins map[string]Plugin + + doneCtx context.Context + broker *GRPCBroker + + controller plugin.GRPCControllerClient +} + +// ClientProtocol impl. +func (c *GRPCClient) Close() error { + c.broker.Close() + c.controller.Shutdown(c.doneCtx, &plugin.Empty{}) + return c.Conn.Close() +} + +// ClientProtocol impl. +func (c *GRPCClient) Dispense(name string) (interface{}, error) { + raw, ok := c.Plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + p, ok := raw.(GRPCPlugin) + if !ok { + return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) + } + + return p.GRPCClient(c.doneCtx, c.broker, c.Conn) +} + +// ClientProtocol impl. +func (c *GRPCClient) Ping() error { + client := grpc_health_v1.NewHealthClient(c.Conn) + _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ + Service: GRPCServiceName, + }) + + return err +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go new file mode 100644 index 000000000..2085356cd --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + + "github.com/hashicorp/go-plugin/internal/plugin" +) + +// GRPCControllerServer handles shutdown calls to terminate the server when the +// plugin client is closed. +type grpcControllerServer struct { + server *GRPCServer +} + +// Shutdown stops the grpc server. It first will attempt a graceful stop, then a +// full stop on the server. +func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) { + resp := &plugin.Empty{} + + // TODO: figure out why GracefullStop doesn't work. + s.server.Stop() + return resp, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go new file mode 100644 index 000000000..a5f40c7f0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -0,0 +1,167 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/grpcmux" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/reflection" +) + +// GRPCServiceName is the name of the service that the health check should +// return as passing. +const GRPCServiceName = "plugin" + +// DefaultGRPCServer can be used with the "GRPCServer" field for Server +// as a default factory method to create a gRPC server with no extra options. +func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(opts...) +} + +// GRPCServer is a ServerType implementation that serves plugins over +// gRPC. This allows plugins to easily be written for other languages. +// +// The GRPCServer outputs a custom configuration as a base64-encoded +// JSON structure represented by the GRPCServerConfig config structure. +type GRPCServer struct { + // Plugins are the list of plugins to serve. + Plugins map[string]Plugin + + // Server is the actual server that will accept connections. This + // will be used for plugin registration as well. + Server func([]grpc.ServerOption) *grpc.Server + + // TLS should be the TLS configuration if available. If this is nil, + // the connection will not have transport security. + TLS *tls.Config + + // DoneCh is the channel that is closed when this server has exited. + DoneCh chan struct{} + + // Stdout/StderrLis are the readers for stdout/stderr that will be copied + // to the stdout/stderr connection that is output. + Stdout io.Reader + Stderr io.Reader + + config GRPCServerConfig + server *grpc.Server + broker *GRPCBroker + stdioServer *grpcStdioServer + + logger hclog.Logger + + muxer *grpcmux.GRPCServerMuxer +} + +// ServerProtocol impl. +func (s *GRPCServer) Init() error { + // Create our server + var opts []grpc.ServerOption + if s.TLS != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) + } + s.server = s.Server(opts) + + // Register the health service + healthCheck := health.NewServer() + healthCheck.SetServingStatus( + GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + + // Register the reflection service + reflection.Register(s.server) + + // Register the broker service + brokerServer := newGRPCBrokerServer() + plugin.RegisterGRPCBrokerServer(s.server, brokerServer) + s.broker = newGRPCBroker(brokerServer, s.TLS, unixSocketConfigFromEnv(), nil, s.muxer) + go s.broker.Run() + + // Register the controller + controllerServer := &grpcControllerServer{server: s} + plugin.RegisterGRPCControllerServer(s.server, controllerServer) + + // Register the stdio service + s.stdioServer = newGRPCStdioServer(s.logger, s.Stdout, s.Stderr) + plugin.RegisterGRPCStdioServer(s.server, s.stdioServer) + + // Register all our plugins onto the gRPC server. + for k, raw := range s.Plugins { + p, ok := raw.(GRPCPlugin) + if !ok { + return fmt.Errorf("%q is not a GRPC-compatible plugin", k) + } + + if err := p.GRPCServer(s.broker, s.server); err != nil { + return fmt.Errorf("error registering %q: %s", k, err) + } + } + + return nil +} + +// Stop calls Stop on the underlying grpc.Server and Close on the underlying +// grpc.Broker if present. +func (s *GRPCServer) Stop() { + s.server.Stop() + + if s.broker != nil { + s.broker.Close() + s.broker = nil + } +} + +// GracefulStop calls GracefulStop on the underlying grpc.Server and Close on +// the underlying grpc.Broker if present. +func (s *GRPCServer) GracefulStop() { + s.server.GracefulStop() + + if s.broker != nil { + s.broker.Close() + s.broker = nil + } +} + +// Config is the GRPCServerConfig encoded as JSON then base64. +func (s *GRPCServer) Config() string { + // Create a buffer that will contain our final contents + var buf bytes.Buffer + + // Wrap the base64 encoding with JSON encoding. + if err := json.NewEncoder(&buf).Encode(s.config); err != nil { + // We panic since ths shouldn't happen under any scenario. We + // carefully control the structure being encoded here and it should + // always be successful. + panic(err) + } + + return buf.String() +} + +func (s *GRPCServer) Serve(lis net.Listener) { + defer close(s.DoneCh) + err := s.server.Serve(lis) + if err != nil { + s.logger.Error("grpc server", "error", err) + } +} + +// GRPCServerConfig is the extra configuration passed along for consumers +// to facilitate using GRPC plugins. +type GRPCServerConfig struct { + StdoutAddr string `json:"stdout_addr"` + StderrAddr string `json:"stderr_addr"` +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go new file mode 100644 index 000000000..ae06c1163 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go @@ -0,0 +1,210 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "bufio" + "bytes" + "context" + "io" + + empty "github.com/golang/protobuf/ptypes/empty" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// grpcStdioBuffer is the buffer size we try to fill when sending a chunk of +// stdio data. This is currently 1 KB for no reason other than that seems like +// enough (stdio data isn't that common) and is fairly low. +const grpcStdioBuffer = 1 * 1024 + +// grpcStdioServer implements the Stdio service and streams stdiout/stderr. +type grpcStdioServer struct { + stdoutCh <-chan []byte + stderrCh <-chan []byte +} + +// newGRPCStdioServer creates a new grpcStdioServer and starts the stream +// copying for the given out and err readers. +// +// This must only be called ONCE per srcOut, srcErr. +func newGRPCStdioServer(log hclog.Logger, srcOut, srcErr io.Reader) *grpcStdioServer { + stdoutCh := make(chan []byte) + stderrCh := make(chan []byte) + + // Begin copying the streams + go copyChan(log, stdoutCh, srcOut) + go copyChan(log, stderrCh, srcErr) + + // Construct our server + return &grpcStdioServer{ + stdoutCh: stdoutCh, + stderrCh: stderrCh, + } +} + +// StreamStdio streams our stdout/err as the response. +func (s *grpcStdioServer) StreamStdio( + _ *empty.Empty, + srv plugin.GRPCStdio_StreamStdioServer, +) error { + // Share the same data value between runs. Sending this over the wire + // marshals it so we can reuse this. + var data plugin.StdioData + + for { + // Read our data + select { + case data.Data = <-s.stdoutCh: + data.Channel = plugin.StdioData_STDOUT + + case data.Data = <-s.stderrCh: + data.Channel = plugin.StdioData_STDERR + + case <-srv.Context().Done(): + return nil + } + + // Not sure if this is possible, but if we somehow got here and + // we didn't populate any data at all, then just continue. + if len(data.Data) == 0 { + continue + } + + // Send our data to the client. + if err := srv.Send(&data); err != nil { + return err + } + } +} + +// grpcStdioClient wraps the stdio service as a client to copy +// the stdio data to output writers. +type grpcStdioClient struct { + log hclog.Logger + stdioClient plugin.GRPCStdio_StreamStdioClient +} + +// newGRPCStdioClient creates a grpcStdioClient. This will perform the +// initial connection to the stdio service. If the stdio service is unavailable +// then this will be a no-op. This allows this to work without error for +// plugins that don't support this. +func newGRPCStdioClient( + ctx context.Context, + log hclog.Logger, + conn *grpc.ClientConn, +) (*grpcStdioClient, error) { + client := plugin.NewGRPCStdioClient(conn) + + // Connect immediately to the endpoint + stdioClient, err := client.StreamStdio(ctx, &empty.Empty{}) + + // If we get an Unavailable or Unimplemented error, this means that the plugin isn't + // updated and linking to the latest version of go-plugin that supports + // this. We fall back to the previous behavior of just not syncing anything. + if status.Code(err) == codes.Unavailable || status.Code(err) == codes.Unimplemented { + log.Warn("stdio service not available, stdout/stderr syncing unavailable") + stdioClient = nil + err = nil + } + if err != nil { + return nil, err + } + + return &grpcStdioClient{ + log: log, + stdioClient: stdioClient, + }, nil +} + +// Run starts the loop that receives stdio data and writes it to the given +// writers. This blocks and should be run in a goroutine. +func (c *grpcStdioClient) Run(stdout, stderr io.Writer) { + // This will be nil if stdio is not supported by the plugin + if c.stdioClient == nil { + c.log.Warn("stdio service unavailable, run will do nothing") + return + } + + for { + c.log.Trace("waiting for stdio data") + data, err := c.stdioClient.Recv() + if err != nil { + if err == io.EOF || + status.Code(err) == codes.Unavailable || + status.Code(err) == codes.Canceled || + status.Code(err) == codes.Unimplemented || + err == context.Canceled { + c.log.Debug("received EOF, stopping recv loop", "err", err) + return + } + + c.log.Error("error receiving data", "err", err) + return + } + + // Determine our output writer based on channel + var w io.Writer + switch data.Channel { + case plugin.StdioData_STDOUT: + w = stdout + + case plugin.StdioData_STDERR: + w = stderr + + default: + c.log.Warn("unknown channel, dropping", "channel", data.Channel) + continue + } + + // Write! In the event of an error we just continue. + if c.log.IsTrace() { + c.log.Trace("received data", "channel", data.Channel.String(), "len", len(data.Data)) + } + if _, err := io.Copy(w, bytes.NewReader(data.Data)); err != nil { + c.log.Error("failed to copy all bytes", "err", err) + } + } +} + +// copyChan copies an io.Reader into a channel. +func copyChan(log hclog.Logger, dst chan<- []byte, src io.Reader) { + bufsrc := bufio.NewReader(src) + + for { + // Make our data buffer. We allocate a new one per loop iteration + // so that we can send it over the channel. + var data [1024]byte + + // Read the data, this will block until data is available + n, err := bufsrc.Read(data[:]) + + // We have to check if we have data BEFORE err != nil. The bufio + // docs guarantee n == 0 on EOF but its better to be safe here. + if n > 0 { + // We have data! Send it on the channel. This will block if there + // is no reader on the other side. We expect that go-plugin will + // connect immediately to the stdio server to drain this so we want + // this block to happen for backpressure. + dst <- data[:n] + } + + // If we hit EOF we're done copying + if err == io.EOF { + log.Debug("stdio EOF, exiting copy loop") + return + } + + // Any other error we just exit the loop. We don't expect there to + // be errors since our use case for this is reading/writing from + // a in-process pipe (os.Pipe). + if err != nil { + log.Warn("error copying stdio data, stopping copy", "err", err) + return + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go new file mode 100644 index 000000000..1854d2dd5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cmdrunner + +// addrTranslator implements stateless identity functions, as the host and plugin +// run in the same context wrt Unix and network addresses. +type addrTranslator struct{} + +func (*addrTranslator) PluginToHost(pluginNet, pluginAddr string) (string, string, error) { + return pluginNet, pluginAddr, nil +} + +func (*addrTranslator) HostToPlugin(hostNet, hostAddr string) (string, string, error) { + return hostNet, hostAddr, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go new file mode 100644 index 000000000..dce1a86a8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cmdrunner + +import ( + "context" + "fmt" + "net" + "os" + + "github.com/hashicorp/go-plugin/runner" +) + +// ReattachFunc returns a function that allows reattaching to a plugin running +// as a plain process. The process may or may not be a child process. +func ReattachFunc(pid int, addr net.Addr) runner.ReattachFunc { + return func() (runner.AttachedRunner, error) { + p, err := os.FindProcess(pid) + if err != nil { + // On Unix systems, FindProcess never returns an error. + // On Windows, for non-existent pids it returns: + // os.SyscallError - 'OpenProcess: the paremter is incorrect' + return nil, ErrProcessNotFound + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + return &CmdAttachedRunner{ + pid: pid, + process: p, + }, nil + } +} + +// CmdAttachedRunner is mostly a subset of CmdRunner, except the Wait function +// does not assume the process is a child of the host process, and so uses a +// different implementation to wait on the process. +type CmdAttachedRunner struct { + pid int + process *os.Process + + addrTranslator +} + +func (c *CmdAttachedRunner) Wait(_ context.Context) error { + return pidWait(c.pid) +} + +func (c *CmdAttachedRunner) Kill(_ context.Context) error { + return c.process.Kill() +} + +func (c *CmdAttachedRunner) ID() string { + return fmt.Sprintf("%d", c.pid) +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go new file mode 100644 index 000000000..b26fea928 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go @@ -0,0 +1,129 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cmdrunner + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "os/exec" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/runner" +) + +var ( + _ runner.Runner = (*CmdRunner)(nil) + + // ErrProcessNotFound is returned when a client is instantiated to + // reattach to an existing process and it isn't found. + ErrProcessNotFound = errors.New("Reattachment process not found") +) + +const unrecognizedRemotePluginMessage = `This usually means + the plugin was not compiled for this architecture, + the plugin is missing dynamic-link libraries necessary to run, + the plugin is not executable by this process due to file permissions, or + the plugin failed to negotiate the initial go-plugin protocol handshake +%s` + +// CmdRunner implements the runner.Runner interface. It mostly just passes through +// to exec.Cmd methods. +type CmdRunner struct { + logger hclog.Logger + cmd *exec.Cmd + + stdout io.ReadCloser + stderr io.ReadCloser + + // Cmd info is persisted early, since the process information will be removed + // after Kill is called. + path string + pid int + + addrTranslator +} + +// NewCmdRunner returns an implementation of runner.Runner for running a plugin +// as a subprocess. It must be passed a cmd that hasn't yet been started. +func NewCmdRunner(logger hclog.Logger, cmd *exec.Cmd) (*CmdRunner, error) { + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + + return &CmdRunner{ + logger: logger, + cmd: cmd, + stdout: stdout, + stderr: stderr, + path: cmd.Path, + }, nil +} + +func (c *CmdRunner) Start(_ context.Context) error { + c.logger.Debug("starting plugin", "path", c.cmd.Path, "args", c.cmd.Args) + err := c.cmd.Start() + if err != nil { + return err + } + + c.pid = c.cmd.Process.Pid + c.logger.Debug("plugin started", "path", c.path, "pid", c.pid) + return nil +} + +func (c *CmdRunner) Wait(_ context.Context) error { + return c.cmd.Wait() +} + +func (c *CmdRunner) Kill(_ context.Context) error { + if c.cmd.Process != nil { + err := c.cmd.Process.Kill() + // Swallow ErrProcessDone, we support calling Kill multiple times. + if !errors.Is(err, os.ErrProcessDone) { + return err + } + return nil + } + + return nil +} + +func (c *CmdRunner) Stdout() io.ReadCloser { + return c.stdout +} + +func (c *CmdRunner) Stderr() io.ReadCloser { + return c.stderr +} + +func (c *CmdRunner) Name() string { + return c.path +} + +func (c *CmdRunner) ID() string { + return fmt.Sprintf("%d", c.pid) +} + +// peTypes is a list of Portable Executable (PE) machine types from https://learn.microsoft.com/en-us/windows/win32/debug/pe-format +// mapped to GOARCH types. It is not comprehensive, and only includes machine types that Go supports. +var peTypes = map[uint16]string{ + 0x14c: "386", + 0x1c0: "arm", + 0x6264: "loong64", + 0x8664: "amd64", + 0xaa64: "arm64", +} + +func (c *CmdRunner) Diagnose(_ context.Context) string { + return fmt.Sprintf(unrecognizedRemotePluginMessage, additionalNotesAboutCommand(c.cmd.Path)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go new file mode 100644 index 000000000..ce04cfebc --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows +// +build !windows + +package cmdrunner + +import ( + "debug/elf" + "debug/macho" + "debug/pe" + "fmt" + "os" + "os/user" + "runtime" + "strconv" + "syscall" +) + +// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose +// why it won't run correctly. It runs as a best effort only. +func additionalNotesAboutCommand(path string) string { + notes := "" + stat, err := os.Stat(path) + if err != nil { + return notes + } + + notes += "\nAdditional notes about plugin:\n" + notes += fmt.Sprintf(" Path: %s\n", path) + notes += fmt.Sprintf(" Mode: %s\n", stat.Mode()) + statT, ok := stat.Sys().(*syscall.Stat_t) + if ok { + currentUsername := "?" + if u, err := user.LookupId(strconv.FormatUint(uint64(os.Getuid()), 10)); err == nil { + currentUsername = u.Username + } + currentGroup := "?" + if g, err := user.LookupGroupId(strconv.FormatUint(uint64(os.Getgid()), 10)); err == nil { + currentGroup = g.Name + } + username := "?" + if u, err := user.LookupId(strconv.FormatUint(uint64(statT.Uid), 10)); err == nil { + username = u.Username + } + group := "?" + if g, err := user.LookupGroupId(strconv.FormatUint(uint64(statT.Gid), 10)); err == nil { + group = g.Name + } + notes += fmt.Sprintf(" Owner: %d [%s] (current: %d [%s])\n", statT.Uid, username, os.Getuid(), currentUsername) + notes += fmt.Sprintf(" Group: %d [%s] (current: %d [%s])\n", statT.Gid, group, os.Getgid(), currentGroup) + } + + if elfFile, err := elf.Open(path); err == nil { + defer elfFile.Close() + notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) + } else if machoFile, err := macho.Open(path); err == nil { + defer machoFile.Close() + notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) + } else if peFile, err := pe.Open(path); err == nil { + defer peFile.Close() + machine, ok := peTypes[peFile.Machine] + if !ok { + machine = "unknown" + } + notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH) + } + return notes +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go new file mode 100644 index 000000000..39c51dd1e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build windows +// +build windows + +package cmdrunner + +import ( + "debug/elf" + "debug/macho" + "debug/pe" + "fmt" + "os" + "runtime" +) + +// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose +// why it won't run correctly. It runs as a best effort only. +func additionalNotesAboutCommand(path string) string { + notes := "" + stat, err := os.Stat(path) + if err != nil { + return notes + } + + notes += "\nAdditional notes about plugin:\n" + notes += fmt.Sprintf(" Path: %s\n", path) + notes += fmt.Sprintf(" Mode: %s\n", stat.Mode()) + + if elfFile, err := elf.Open(path); err == nil { + defer elfFile.Close() + notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) + } else if machoFile, err := macho.Open(path); err == nil { + defer machoFile.Close() + notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) + } else if peFile, err := pe.Open(path); err == nil { + defer peFile.Close() + machine, ok := peTypes[peFile.Machine] + if !ok { + machine = "unknown" + } + notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH) + } + return notes +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go new file mode 100644 index 000000000..6c34dc774 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cmdrunner + +import "time" + +// pidAlive checks whether a pid is alive. +func pidAlive(pid int) bool { + return _pidAlive(pid) +} + +// pidWait blocks for a process to exit. +func pidWait(pid int) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for range ticker.C { + if !pidAlive(pid) { + break + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go new file mode 100644 index 000000000..bf3fc5b68 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows +// +build !windows + +package cmdrunner + +import ( + "os" + "syscall" +) + +// _pidAlive tests whether a process is alive or not by sending it Signal 0, +// since Go otherwise has no way to test this. +func _pidAlive(pid int) bool { + proc, err := os.FindProcess(pid) + if err == nil { + err = proc.Signal(syscall.Signal(0)) + } + + return err == nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go new file mode 100644 index 000000000..6c39df28f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go @@ -0,0 +1,33 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cmdrunner + +import ( + "syscall" +) + +const ( + // Weird name but matches the MSDN docs + exit_STILL_ACTIVE = 259 + + processDesiredAccess = syscall.STANDARD_RIGHTS_READ | + syscall.PROCESS_QUERY_INFORMATION | + syscall.SYNCHRONIZE +) + +// _pidAlive tests whether a process is alive or not +func _pidAlive(pid int) bool { + h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) + if err != nil { + return false + } + defer syscall.CloseHandle(h) + + var ec uint32 + if e := syscall.GetExitCodeProcess(h, &ec); e != nil { + return false + } + + return ec == exit_STILL_ACTIVE +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go new file mode 100644 index 000000000..e8a3a152a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "io" + "net" + + "github.com/hashicorp/yamux" +) + +var _ net.Listener = (*blockedClientListener)(nil) + +// blockedClientListener accepts connections for a specific gRPC broker stream +// ID on the client (host) side of the connection. +type blockedClientListener struct { + session *yamux.Session + waitCh chan struct{} + doneCh <-chan struct{} +} + +func newBlockedClientListener(session *yamux.Session, doneCh <-chan struct{}) *blockedClientListener { + return &blockedClientListener{ + waitCh: make(chan struct{}, 1), + doneCh: doneCh, + session: session, + } +} + +func (b *blockedClientListener) Accept() (net.Conn, error) { + select { + case <-b.waitCh: + return b.session.Accept() + case <-b.doneCh: + return nil, io.EOF + } +} + +func (b *blockedClientListener) Addr() net.Addr { + return b.session.Addr() +} + +func (b *blockedClientListener) Close() error { + // We don't close the session, the client muxer is responsible for that. + return nil +} + +func (b *blockedClientListener) unblock() { + b.waitCh <- struct{}{} +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go new file mode 100644 index 000000000..0edb2c05d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "io" + "net" +) + +var _ net.Listener = (*blockedServerListener)(nil) + +// blockedServerListener accepts connections for a specific gRPC broker stream +// ID on the server (plugin) side of the connection. +type blockedServerListener struct { + addr net.Addr + acceptCh chan acceptResult + doneCh <-chan struct{} +} + +type acceptResult struct { + conn net.Conn + err error +} + +func newBlockedServerListener(addr net.Addr, doneCh <-chan struct{}) *blockedServerListener { + return &blockedServerListener{ + addr: addr, + acceptCh: make(chan acceptResult), + doneCh: doneCh, + } +} + +func (b *blockedServerListener) Accept() (net.Conn, error) { + select { + case accept := <-b.acceptCh: + return accept.conn, accept.err + case <-b.doneCh: + return nil, io.EOF + } +} + +func (b *blockedServerListener) Addr() net.Addr { + return b.addr +} + +func (b *blockedServerListener) Close() error { + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go new file mode 100644 index 000000000..b203ba467 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go @@ -0,0 +1,105 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "fmt" + "net" + "sync" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/yamux" +) + +var _ GRPCMuxer = (*GRPCClientMuxer)(nil) + +// GRPCClientMuxer implements the client (host) side of the gRPC broker's +// GRPCMuxer interface for multiplexing multiple gRPC broker connections over +// a single net.Conn. +// +// The client dials the initial net.Conn eagerly, and creates a yamux.Session +// as the implementation for multiplexing any additional connections. +// +// Each net.Listener returned from Listener will block until the client receives +// a knock that matches its gRPC broker stream ID. There is no default listener +// on the client, as it is a client for the gRPC broker's control services. (See +// GRPCServerMuxer for more details). +type GRPCClientMuxer struct { + logger hclog.Logger + session *yamux.Session + + acceptMutex sync.Mutex + acceptListeners map[uint32]*blockedClientListener +} + +func NewGRPCClientMuxer(logger hclog.Logger, addr net.Addr) (*GRPCClientMuxer, error) { + // Eagerly establish the underlying connection as early as possible. + logger.Debug("making new client mux initial connection", "addr", addr) + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + _ = tcpConn.SetKeepAlive(true) + } + + cfg := yamux.DefaultConfig() + cfg.Logger = logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, + }) + cfg.LogOutput = nil + sess, err := yamux.Client(conn, cfg) + if err != nil { + return nil, err + } + + logger.Debug("client muxer connected", "addr", addr) + m := &GRPCClientMuxer{ + logger: logger, + session: sess, + acceptListeners: make(map[uint32]*blockedClientListener), + } + + return m, nil +} + +func (m *GRPCClientMuxer) Enabled() bool { + return m != nil +} + +func (m *GRPCClientMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) { + ln := newBlockedClientListener(m.session, doneCh) + + m.acceptMutex.Lock() + m.acceptListeners[id] = ln + m.acceptMutex.Unlock() + + return ln, nil +} + +func (m *GRPCClientMuxer) AcceptKnock(id uint32) error { + m.acceptMutex.Lock() + defer m.acceptMutex.Unlock() + + ln, ok := m.acceptListeners[id] + if !ok { + return fmt.Errorf("no listener for id %d", id) + } + ln.unblock() + return nil +} + +func (m *GRPCClientMuxer) Dial() (net.Conn, error) { + stream, err := m.session.Open() + if err != nil { + return nil, fmt.Errorf("error dialling new client stream: %w", err) + } + + return stream, nil +} + +func (m *GRPCClientMuxer) Close() error { + return m.session.Close() +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go new file mode 100644 index 000000000..c52aaf553 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "net" +) + +// GRPCMuxer enables multiple implementations of net.Listener to accept +// connections over a single "main" multiplexed net.Conn, and dial multiple +// client connections over the same multiplexed net.Conn. +// +// The first multiplexed connection is used to serve the gRPC broker's own +// control services: plugin.GRPCBroker, plugin.GRPCController, plugin.GRPCStdio. +// +// Clients must "knock" before dialling, to tell the server side that the +// next net.Conn should be accepted onto a specific stream ID. The knock is a +// bidirectional streaming message on the plugin.GRPCBroker service. +type GRPCMuxer interface { + // Enabled determines whether multiplexing should be used. It saves users + // of the interface from having to compare an interface with nil, which + // is a bit awkward to do correctly. + Enabled() bool + + // Listener returns a multiplexed listener that will wait until AcceptKnock + // is called with a matching ID before its Accept function returns. + Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) + + // AcceptKnock unblocks the listener with the matching ID, and returns an + // error if it hasn't been created yet. + AcceptKnock(id uint32) error + + // Dial makes a new multiplexed client connection. To dial a specific ID, + // a knock must be sent first. + Dial() (net.Conn, error) + + // Close closes connections and releases any resources associated with the + // muxer. + Close() error +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go new file mode 100644 index 000000000..27696ee76 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go @@ -0,0 +1,190 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/yamux" +) + +var _ GRPCMuxer = (*GRPCServerMuxer)(nil) +var _ net.Listener = (*GRPCServerMuxer)(nil) + +// GRPCServerMuxer implements the server (plugin) side of the gRPC broker's +// GRPCMuxer interface for multiplexing multiple gRPC broker connections over +// a single net.Conn. +// +// The server side needs a listener to serve the gRPC broker's control services, +// which includes the service we will receive knocks on. That means we always +// accept the first connection onto a "default" main listener, and if we accept +// any further connections without receiving a knock first, they are also given +// to the default listener. +// +// When creating additional multiplexed listeners for specific stream IDs, we +// can't control the order in which gRPC servers will call Accept() on each +// listener, but we do need to control which gRPC server accepts which connection. +// As such, each multiplexed listener blocks waiting on a channel. It will be +// unblocked when a knock is received for the matching stream ID. +type GRPCServerMuxer struct { + addr net.Addr + logger hclog.Logger + + sessionErrCh chan error + sess *yamux.Session + + knockCh chan uint32 + + acceptMutex sync.Mutex + acceptChannels map[uint32]chan acceptResult +} + +func NewGRPCServerMuxer(logger hclog.Logger, ln net.Listener) *GRPCServerMuxer { + m := &GRPCServerMuxer{ + addr: ln.Addr(), + logger: logger, + + sessionErrCh: make(chan error), + + knockCh: make(chan uint32, 1), + acceptChannels: make(map[uint32]chan acceptResult), + } + + go m.acceptSession(ln) + + return m +} + +// acceptSessionAndMuxAccept is responsible for establishing the yamux session, +// and then kicking off the acceptLoop function. +func (m *GRPCServerMuxer) acceptSession(ln net.Listener) { + defer close(m.sessionErrCh) + + m.logger.Debug("accepting initial connection", "addr", m.addr) + conn, err := ln.Accept() + if err != nil { + m.sessionErrCh <- err + return + } + + m.logger.Debug("initial server connection accepted", "addr", m.addr) + cfg := yamux.DefaultConfig() + cfg.Logger = m.logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, + }) + cfg.LogOutput = nil + m.sess, err = yamux.Server(conn, cfg) + if err != nil { + m.sessionErrCh <- err + return + } +} + +func (m *GRPCServerMuxer) session() (*yamux.Session, error) { + select { + case err := <-m.sessionErrCh: + if err != nil { + return nil, err + } + case <-time.After(5 * time.Second): + return nil, errors.New("timed out waiting for connection to be established") + } + + // Should never happen. + if m.sess == nil { + return nil, errors.New("no connection established and no error received") + } + + return m.sess, nil +} + +// Accept accepts all incoming connections and routes them to the correct +// stream ID based on the most recent knock received. +func (m *GRPCServerMuxer) Accept() (net.Conn, error) { + session, err := m.session() + if err != nil { + return nil, fmt.Errorf("error establishing yamux session: %w", err) + } + + for { + conn, acceptErr := session.Accept() + + select { + case id := <-m.knockCh: + m.acceptMutex.Lock() + acceptCh, ok := m.acceptChannels[id] + m.acceptMutex.Unlock() + + if !ok { + if conn != nil { + _ = conn.Close() + } + return nil, fmt.Errorf("received knock on ID %d that doesn't have a listener", id) + } + m.logger.Debug("sending conn to brokered listener", "id", id) + acceptCh <- acceptResult{ + conn: conn, + err: acceptErr, + } + default: + m.logger.Debug("sending conn to default listener") + return conn, acceptErr + } + } +} + +func (m *GRPCServerMuxer) Addr() net.Addr { + return m.addr +} + +func (m *GRPCServerMuxer) Close() error { + session, err := m.session() + if err != nil { + return err + } + + return session.Close() +} + +func (m *GRPCServerMuxer) Enabled() bool { + return m != nil +} + +func (m *GRPCServerMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) { + sess, err := m.session() + if err != nil { + return nil, err + } + + ln := newBlockedServerListener(sess.Addr(), doneCh) + m.acceptMutex.Lock() + m.acceptChannels[id] = ln.acceptCh + m.acceptMutex.Unlock() + + return ln, nil +} + +func (m *GRPCServerMuxer) Dial() (net.Conn, error) { + sess, err := m.session() + if err != nil { + return nil, err + } + + stream, err := sess.OpenStream() + if err != nil { + return nil, fmt.Errorf("error dialling new server stream: %w", err) + } + + return stream, nil +} + +func (m *GRPCServerMuxer) AcceptKnock(id uint32) error { + m.knockCh <- id + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go new file mode 100644 index 000000000..acc6dc9c7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -0,0 +1,264 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: internal/plugin/grpc_broker.proto + +package plugin + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ConnInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + Knock *ConnInfo_Knock `protobuf:"bytes,4,opt,name=knock,proto3" json:"knock,omitempty"` +} + +func (x *ConnInfo) Reset() { + *x = ConnInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnInfo) ProtoMessage() {} + +func (x *ConnInfo) ProtoReflect() protoreflect.Message { + mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnInfo.ProtoReflect.Descriptor instead. +func (*ConnInfo) Descriptor() ([]byte, []int) { + return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0} +} + +func (x *ConnInfo) GetServiceId() uint32 { + if x != nil { + return x.ServiceId + } + return 0 +} + +func (x *ConnInfo) GetNetwork() string { + if x != nil { + return x.Network + } + return "" +} + +func (x *ConnInfo) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *ConnInfo) GetKnock() *ConnInfo_Knock { + if x != nil { + return x.Knock + } + return nil +} + +type ConnInfo_Knock struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Knock bool `protobuf:"varint,1,opt,name=knock,proto3" json:"knock,omitempty"` + Ack bool `protobuf:"varint,2,opt,name=ack,proto3" json:"ack,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *ConnInfo_Knock) Reset() { + *x = ConnInfo_Knock{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnInfo_Knock) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnInfo_Knock) ProtoMessage() {} + +func (x *ConnInfo_Knock) ProtoReflect() protoreflect.Message { + mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnInfo_Knock.ProtoReflect.Descriptor instead. +func (*ConnInfo_Knock) Descriptor() ([]byte, []int) { + return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *ConnInfo_Knock) GetKnock() bool { + if x != nil { + return x.Knock + } + return false +} + +func (x *ConnInfo_Knock) GetAck() bool { + if x != nil { + return x.Ack + } + return false +} + +func (x *ConnInfo_Knock) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +var File_internal_plugin_grpc_broker_proto protoreflect.FileDescriptor + +var file_internal_plugin_grpc_broker_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, 0xd2, 0x01, 0x0a, 0x08, + 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2c, 0x0a, 0x05, 0x6b, + 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4b, 0x6e, 0x6f, + 0x63, 0x6b, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x1a, 0x45, 0x0a, 0x05, 0x4b, 0x6e, 0x6f, + 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x32, 0x43, 0x0a, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x35, + 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x10, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x1a, + 0x10, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x28, 0x01, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_plugin_grpc_broker_proto_rawDescOnce sync.Once + file_internal_plugin_grpc_broker_proto_rawDescData = file_internal_plugin_grpc_broker_proto_rawDesc +) + +func file_internal_plugin_grpc_broker_proto_rawDescGZIP() []byte { + file_internal_plugin_grpc_broker_proto_rawDescOnce.Do(func() { + file_internal_plugin_grpc_broker_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_broker_proto_rawDescData) + }) + return file_internal_plugin_grpc_broker_proto_rawDescData +} + +var file_internal_plugin_grpc_broker_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_internal_plugin_grpc_broker_proto_goTypes = []interface{}{ + (*ConnInfo)(nil), // 0: plugin.ConnInfo + (*ConnInfo_Knock)(nil), // 1: plugin.ConnInfo.Knock +} +var file_internal_plugin_grpc_broker_proto_depIdxs = []int32{ + 1, // 0: plugin.ConnInfo.knock:type_name -> plugin.ConnInfo.Knock + 0, // 1: plugin.GRPCBroker.StartStream:input_type -> plugin.ConnInfo + 0, // 2: plugin.GRPCBroker.StartStream:output_type -> plugin.ConnInfo + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_internal_plugin_grpc_broker_proto_init() } +func file_internal_plugin_grpc_broker_proto_init() { + if File_internal_plugin_grpc_broker_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_plugin_grpc_broker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_plugin_grpc_broker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnInfo_Knock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_plugin_grpc_broker_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_internal_plugin_grpc_broker_proto_goTypes, + DependencyIndexes: file_internal_plugin_grpc_broker_proto_depIdxs, + MessageInfos: file_internal_plugin_grpc_broker_proto_msgTypes, + }.Build() + File_internal_plugin_grpc_broker_proto = out.File + file_internal_plugin_grpc_broker_proto_rawDesc = nil + file_internal_plugin_grpc_broker_proto_goTypes = nil + file_internal_plugin_grpc_broker_proto_depIdxs = nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto new file mode 100644 index 000000000..c92cd645c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package plugin; +option go_package = "./plugin"; + +message ConnInfo { + uint32 service_id = 1; + string network = 2; + string address = 3; + message Knock { + bool knock = 1; + bool ack = 2; + string error = 3; + } + Knock knock = 4; +} + +service GRPCBroker { + rpc StartStream(stream ConnInfo) returns (stream ConnInfo); +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go new file mode 100644 index 000000000..1b0f80705 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go @@ -0,0 +1,142 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: internal/plugin/grpc_broker.proto + +package plugin + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + GRPCBroker_StartStream_FullMethodName = "/plugin.GRPCBroker/StartStream" +) + +// GRPCBrokerClient is the client API for GRPCBroker service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type GRPCBrokerClient interface { + StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) +} + +type gRPCBrokerClient struct { + cc grpc.ClientConnInterface +} + +func NewGRPCBrokerClient(cc grpc.ClientConnInterface) GRPCBrokerClient { + return &gRPCBrokerClient{cc} +} + +func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &GRPCBroker_ServiceDesc.Streams[0], GRPCBroker_StartStream_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &gRPCBrokerStartStreamClient{stream} + return x, nil +} + +type GRPCBroker_StartStreamClient interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ClientStream +} + +type gRPCBrokerStartStreamClient struct { + grpc.ClientStream +} + +func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { + return x.ClientStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBrokerServer is the server API for GRPCBroker service. +// All implementations should embed UnimplementedGRPCBrokerServer +// for forward compatibility +type GRPCBrokerServer interface { + StartStream(GRPCBroker_StartStreamServer) error +} + +// UnimplementedGRPCBrokerServer should be embedded to have forward compatible implementations. +type UnimplementedGRPCBrokerServer struct { +} + +func (UnimplementedGRPCBrokerServer) StartStream(GRPCBroker_StartStreamServer) error { + return status.Errorf(codes.Unimplemented, "method StartStream not implemented") +} + +// UnsafeGRPCBrokerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to GRPCBrokerServer will +// result in compilation errors. +type UnsafeGRPCBrokerServer interface { + mustEmbedUnimplementedGRPCBrokerServer() +} + +func RegisterGRPCBrokerServer(s grpc.ServiceRegistrar, srv GRPCBrokerServer) { + s.RegisterService(&GRPCBroker_ServiceDesc, srv) +} + +func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) +} + +type GRPCBroker_StartStreamServer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ServerStream +} + +type gRPCBrokerStartStreamServer struct { + grpc.ServerStream +} + +func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { + return x.ServerStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBroker_ServiceDesc is the grpc.ServiceDesc for GRPCBroker service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var GRPCBroker_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCBroker", + HandlerType: (*GRPCBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StartStream", + Handler: _GRPCBroker_StartStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "internal/plugin/grpc_broker.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go new file mode 100644 index 000000000..8ca48e0d9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: internal/plugin/grpc_controller.proto + +package plugin + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_internal_plugin_grpc_controller_proto_rawDescGZIP(), []int{0} +} + +var File_internal_plugin_grpc_controller_proto protoreflect.FileDescriptor + +var file_internal_plugin_grpc_controller_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, + 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x3a, 0x0a, 0x0e, 0x47, 0x52, 0x50, 0x43, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x08, 0x53, 0x68, + 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_plugin_grpc_controller_proto_rawDescOnce sync.Once + file_internal_plugin_grpc_controller_proto_rawDescData = file_internal_plugin_grpc_controller_proto_rawDesc +) + +func file_internal_plugin_grpc_controller_proto_rawDescGZIP() []byte { + file_internal_plugin_grpc_controller_proto_rawDescOnce.Do(func() { + file_internal_plugin_grpc_controller_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_controller_proto_rawDescData) + }) + return file_internal_plugin_grpc_controller_proto_rawDescData +} + +var file_internal_plugin_grpc_controller_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_plugin_grpc_controller_proto_goTypes = []interface{}{ + (*Empty)(nil), // 0: plugin.Empty +} +var file_internal_plugin_grpc_controller_proto_depIdxs = []int32{ + 0, // 0: plugin.GRPCController.Shutdown:input_type -> plugin.Empty + 0, // 1: plugin.GRPCController.Shutdown:output_type -> plugin.Empty + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_internal_plugin_grpc_controller_proto_init() } +func file_internal_plugin_grpc_controller_proto_init() { + if File_internal_plugin_grpc_controller_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_plugin_grpc_controller_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_plugin_grpc_controller_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_internal_plugin_grpc_controller_proto_goTypes, + DependencyIndexes: file_internal_plugin_grpc_controller_proto_depIdxs, + MessageInfos: file_internal_plugin_grpc_controller_proto_msgTypes, + }.Build() + File_internal_plugin_grpc_controller_proto = out.File + file_internal_plugin_grpc_controller_proto_rawDesc = nil + file_internal_plugin_grpc_controller_proto_goTypes = nil + file_internal_plugin_grpc_controller_proto_depIdxs = nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto new file mode 100644 index 000000000..2755fa638 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package plugin; +option go_package = "./plugin"; + +message Empty { +} + +// The GRPCController is responsible for telling the plugin server to shutdown. +service GRPCController { + rpc Shutdown(Empty) returns (Empty); +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go new file mode 100644 index 000000000..427611aa0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go @@ -0,0 +1,110 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: internal/plugin/grpc_controller.proto + +package plugin + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + GRPCController_Shutdown_FullMethodName = "/plugin.GRPCController/Shutdown" +) + +// GRPCControllerClient is the client API for GRPCController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type GRPCControllerClient interface { + Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type gRPCControllerClient struct { + cc grpc.ClientConnInterface +} + +func NewGRPCControllerClient(cc grpc.ClientConnInterface) GRPCControllerClient { + return &gRPCControllerClient{cc} +} + +func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, GRPCController_Shutdown_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GRPCControllerServer is the server API for GRPCController service. +// All implementations should embed UnimplementedGRPCControllerServer +// for forward compatibility +type GRPCControllerServer interface { + Shutdown(context.Context, *Empty) (*Empty, error) +} + +// UnimplementedGRPCControllerServer should be embedded to have forward compatible implementations. +type UnimplementedGRPCControllerServer struct { +} + +func (UnimplementedGRPCControllerServer) Shutdown(context.Context, *Empty) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") +} + +// UnsafeGRPCControllerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to GRPCControllerServer will +// result in compilation errors. +type UnsafeGRPCControllerServer interface { + mustEmbedUnimplementedGRPCControllerServer() +} + +func RegisterGRPCControllerServer(s grpc.ServiceRegistrar, srv GRPCControllerServer) { + s.RegisterService(&GRPCController_ServiceDesc, srv) +} + +func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GRPCControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: GRPCController_Shutdown_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// GRPCController_ServiceDesc is the grpc.ServiceDesc for GRPCController service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var GRPCController_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCController", + HandlerType: (*GRPCControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Shutdown", + Handler: _GRPCController_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "internal/plugin/grpc_controller.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go new file mode 100644 index 000000000..139cbb4a9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go @@ -0,0 +1,225 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: internal/plugin/grpc_stdio.proto + +package plugin + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StdioData_Channel int32 + +const ( + StdioData_INVALID StdioData_Channel = 0 + StdioData_STDOUT StdioData_Channel = 1 + StdioData_STDERR StdioData_Channel = 2 +) + +// Enum value maps for StdioData_Channel. +var ( + StdioData_Channel_name = map[int32]string{ + 0: "INVALID", + 1: "STDOUT", + 2: "STDERR", + } + StdioData_Channel_value = map[string]int32{ + "INVALID": 0, + "STDOUT": 1, + "STDERR": 2, + } +) + +func (x StdioData_Channel) Enum() *StdioData_Channel { + p := new(StdioData_Channel) + *p = x + return p +} + +func (x StdioData_Channel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (StdioData_Channel) Descriptor() protoreflect.EnumDescriptor { + return file_internal_plugin_grpc_stdio_proto_enumTypes[0].Descriptor() +} + +func (StdioData_Channel) Type() protoreflect.EnumType { + return &file_internal_plugin_grpc_stdio_proto_enumTypes[0] +} + +func (x StdioData_Channel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use StdioData_Channel.Descriptor instead. +func (StdioData_Channel) EnumDescriptor() ([]byte, []int) { + return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0, 0} +} + +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +type StdioData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *StdioData) Reset() { + *x = StdioData{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StdioData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StdioData) ProtoMessage() {} + +func (x *StdioData) ProtoReflect() protoreflect.Message { + mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StdioData.ProtoReflect.Descriptor instead. +func (*StdioData) Descriptor() ([]byte, []int) { + return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0} +} + +func (x *StdioData) GetChannel() StdioData_Channel { + if x != nil { + return x.Channel + } + return StdioData_INVALID +} + +func (x *StdioData) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +var File_internal_plugin_grpc_stdio_proto protoreflect.FileDescriptor + +var file_internal_plugin_grpc_stdio_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x64, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x01, 0x0a, 0x09, 0x53, 0x74, 0x64, 0x69, + 0x6f, 0x44, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x53, 0x74, 0x64, 0x69, 0x6f, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x2e, + 0x0a, 0x07, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, + 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x4f, 0x55, 0x54, + 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x45, 0x52, 0x52, 0x10, 0x02, 0x32, 0x47, + 0x0a, 0x09, 0x47, 0x52, 0x50, 0x43, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x64, 0x69, + 0x6f, 0x44, 0x61, 0x74, 0x61, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_plugin_grpc_stdio_proto_rawDescOnce sync.Once + file_internal_plugin_grpc_stdio_proto_rawDescData = file_internal_plugin_grpc_stdio_proto_rawDesc +) + +func file_internal_plugin_grpc_stdio_proto_rawDescGZIP() []byte { + file_internal_plugin_grpc_stdio_proto_rawDescOnce.Do(func() { + file_internal_plugin_grpc_stdio_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_stdio_proto_rawDescData) + }) + return file_internal_plugin_grpc_stdio_proto_rawDescData +} + +var file_internal_plugin_grpc_stdio_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_internal_plugin_grpc_stdio_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_plugin_grpc_stdio_proto_goTypes = []interface{}{ + (StdioData_Channel)(0), // 0: plugin.StdioData.Channel + (*StdioData)(nil), // 1: plugin.StdioData + (*emptypb.Empty)(nil), // 2: google.protobuf.Empty +} +var file_internal_plugin_grpc_stdio_proto_depIdxs = []int32{ + 0, // 0: plugin.StdioData.channel:type_name -> plugin.StdioData.Channel + 2, // 1: plugin.GRPCStdio.StreamStdio:input_type -> google.protobuf.Empty + 1, // 2: plugin.GRPCStdio.StreamStdio:output_type -> plugin.StdioData + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_internal_plugin_grpc_stdio_proto_init() } +func file_internal_plugin_grpc_stdio_proto_init() { + if File_internal_plugin_grpc_stdio_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_plugin_grpc_stdio_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StdioData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_plugin_grpc_stdio_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_internal_plugin_grpc_stdio_proto_goTypes, + DependencyIndexes: file_internal_plugin_grpc_stdio_proto_depIdxs, + EnumInfos: file_internal_plugin_grpc_stdio_proto_enumTypes, + MessageInfos: file_internal_plugin_grpc_stdio_proto_msgTypes, + }.Build() + File_internal_plugin_grpc_stdio_proto = out.File + file_internal_plugin_grpc_stdio_proto_rawDesc = nil + file_internal_plugin_grpc_stdio_proto_goTypes = nil + file_internal_plugin_grpc_stdio_proto_depIdxs = nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto new file mode 100644 index 000000000..f48ac76c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto @@ -0,0 +1,33 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package plugin; +option go_package = "./plugin"; + +import "google/protobuf/empty.proto"; + +// GRPCStdio is a service that is automatically run by the plugin process +// to stream any stdout/err data so that it can be mirrored on the plugin +// host side. +service GRPCStdio { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + rpc StreamStdio(google.protobuf.Empty) returns (stream StdioData); +} + +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +message StdioData { + enum Channel { + INVALID = 0; + STDOUT = 1; + STDERR = 2; + } + + Channel channel = 1; + bytes data = 2; +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go new file mode 100644 index 000000000..f82b15035 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go @@ -0,0 +1,148 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: internal/plugin/grpc_stdio.proto + +package plugin + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + GRPCStdio_StreamStdio_FullMethodName = "/plugin.GRPCStdio/StreamStdio" +) + +// GRPCStdioClient is the client API for GRPCStdio service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type GRPCStdioClient interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) +} + +type gRPCStdioClient struct { + cc grpc.ClientConnInterface +} + +func NewGRPCStdioClient(cc grpc.ClientConnInterface) GRPCStdioClient { + return &gRPCStdioClient{cc} +} + +func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) { + stream, err := c.cc.NewStream(ctx, &GRPCStdio_ServiceDesc.Streams[0], GRPCStdio_StreamStdio_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &gRPCStdioStreamStdioClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type GRPCStdio_StreamStdioClient interface { + Recv() (*StdioData, error) + grpc.ClientStream +} + +type gRPCStdioStreamStdioClient struct { + grpc.ClientStream +} + +func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) { + m := new(StdioData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCStdioServer is the server API for GRPCStdio service. +// All implementations should embed UnimplementedGRPCStdioServer +// for forward compatibility +type GRPCStdioServer interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error +} + +// UnimplementedGRPCStdioServer should be embedded to have forward compatible implementations. +type UnimplementedGRPCStdioServer struct { +} + +func (UnimplementedGRPCStdioServer) StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error { + return status.Errorf(codes.Unimplemented, "method StreamStdio not implemented") +} + +// UnsafeGRPCStdioServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to GRPCStdioServer will +// result in compilation errors. +type UnsafeGRPCStdioServer interface { + mustEmbedUnimplementedGRPCStdioServer() +} + +func RegisterGRPCStdioServer(s grpc.ServiceRegistrar, srv GRPCStdioServer) { + s.RegisterService(&GRPCStdio_ServiceDesc, srv) +} + +func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(emptypb.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream}) +} + +type GRPCStdio_StreamStdioServer interface { + Send(*StdioData) error + grpc.ServerStream +} + +type gRPCStdioStreamStdioServer struct { + grpc.ServerStream +} + +func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error { + return x.ServerStream.SendMsg(m) +} + +// GRPCStdio_ServiceDesc is the grpc.ServiceDesc for GRPCStdio service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var GRPCStdio_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCStdio", + HandlerType: (*GRPCStdioServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamStdio", + Handler: _GRPCStdio_StreamStdio_Handler, + ServerStreams: true, + }, + }, + Metadata: "internal/plugin/grpc_stdio.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go new file mode 100644 index 000000000..ab963d56b --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -0,0 +1,76 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "encoding/json" + "time" +) + +// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host +type logEntry struct { + Message string `json:"@message"` + Level string `json:"@level"` + Timestamp time.Time `json:"timestamp"` + KVPairs []*logEntryKV `json:"kv_pairs"` +} + +// logEntryKV is a key value pair within the Output payload +type logEntryKV struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +// flattenKVPairs is used to flatten KVPair slice into []interface{} +// for hclog consumption. +func flattenKVPairs(kvs []*logEntryKV) []interface{} { + var result []interface{} + for _, kv := range kvs { + result = append(result, kv.Key) + result = append(result, kv.Value) + } + + return result +} + +// parseJSON handles parsing JSON output +func parseJSON(input []byte) (*logEntry, error) { + var raw map[string]interface{} + entry := &logEntry{} + + err := json.Unmarshal(input, &raw) + if err != nil { + return nil, err + } + + // Parse hclog-specific objects + if v, ok := raw["@message"]; ok { + entry.Message = v.(string) + delete(raw, "@message") + } + + if v, ok := raw["@level"]; ok { + entry.Level = v.(string) + delete(raw, "@level") + } + + if v, ok := raw["@timestamp"]; ok { + t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) + if err != nil { + return nil, err + } + entry.Timestamp = t + delete(raw, "@timestamp") + } + + // Parse dynamic KV args from the hclog payload. + for k, v := range raw { + entry.KVPairs = append(entry.KVPairs, &logEntryKV{ + Key: k, + Value: v, + }) + } + + return entry, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go new file mode 100644 index 000000000..09ecafaf4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mtls.go @@ -0,0 +1,76 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" +) + +// generateCert generates a temporary certificate for plugin authentication. The +// certificate and private key are returns in PEM format. +func generateCert() (cert []byte, privateKey []byte, err error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + sn, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, err + } + + host := "localhost" + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + Organization: []string{"HashiCorp"}, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, err + } + + var certOut bytes.Buffer + if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { + return nil, nil, err + } + + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, err + } + + var keyOut bytes.Buffer + if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return nil, nil, err + } + + cert = certOut.Bytes() + privateKey = keyOut.Bytes() + + return cert, privateKey, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go new file mode 100644 index 000000000..4eb1208fb --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go @@ -0,0 +1,207 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "encoding/binary" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/yamux" +) + +// MuxBroker is responsible for brokering multiplexed connections by unique ID. +// +// It is used by plugins to multiplex multiple RPC connections and data +// streams on top of a single connection between the plugin process and the +// host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new multiplexed streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type MuxBroker struct { + nextId uint32 + session *yamux.Session + streams map[uint32]*muxBrokerPending + + sync.Mutex +} + +type muxBrokerPending struct { + ch chan net.Conn + doneCh chan struct{} +} + +func newMuxBroker(s *yamux.Session) *MuxBroker { + return &MuxBroker{ + session: s, + streams: make(map[uint32]*muxBrokerPending), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { + var c net.Conn + p := m.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + m.Lock() + defer m.Unlock() + delete(m.streams, id) + + return nil, fmt.Errorf("timeout waiting for accept") + } + + // Ack our connection + if err := binary.Write(c, binary.LittleEndian, id); err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve an RPC server on that stream ID. This is used to easily serve +// complex arguments. +// +// The served interface is always registered to the "Plugin" name. +func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { + conn, err := m.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + + serve(conn, "Plugin", v) +} + +// Close closes the connection and all sub-connections. +func (m *MuxBroker) Close() error { + return m.session.Close() +} + +// Dial opens a connection by ID. +func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { + // Open the stream + stream, err := m.session.OpenStream() + if err != nil { + return nil, err + } + + // Write the stream ID onto the wire. + if err := binary.Write(stream, binary.LittleEndian, id); err != nil { + stream.Close() + return nil, err + } + + // Read the ack that we connected. Then we're off! + var ack uint32 + if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { + stream.Close() + return nil, err + } + if ack != id { + stream.Close() + return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) + } + + return stream, nil +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of RPC calls. In practice +// we've never seen it happen. +func (m *MuxBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of MuxBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *MuxBroker) Run() { + for { + stream, err := m.session.AcceptStream() + if err != nil { + // Once we receive an error, just exit + break + } + + // Read the stream ID from the stream + var id uint32 + if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { + stream.Close() + continue + } + + // Initialize the waiter + p := m.getStream(id) + select { + case p.ch <- stream: + default: + } + + // Wait for a timeout + go m.timeoutWait(id, p) + } +} + +func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &muxBrokerPending{ + ch: make(chan net.Conn, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + timeout := false + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + timeout = true + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) + + // If we timed out, then check if we have a channel in the buffer, + // and if so, close it. + if timeout { + select { + case s := <-p.ch: + s.Close() + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go new file mode 100644 index 000000000..184749b96 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// The plugin package exposes functions and helpers for communicating to +// plugins which are implemented as standalone binary applications. +// +// plugin.Client fully manages the lifecycle of executing the application, +// connecting to it, and returning the RPC client for dispensing plugins. +// +// plugin.Serve fully manages listeners to expose an RPC server from a binary +// that plugin.Client can connect to. +package plugin + +import ( + "context" + "errors" + "net/rpc" + + "google.golang.org/grpc" +) + +// Plugin is the interface that is implemented to serve/connect to an +// inteface implementation. +type Plugin interface { + // Server should return the RPC server compatible struct to serve + // the methods that the Client calls over net/rpc. + Server(*MuxBroker) (interface{}, error) + + // Client returns an interface implementation for the plugin you're + // serving that communicates to the server end of the plugin. + Client(*MuxBroker, *rpc.Client) (interface{}, error) +} + +// GRPCPlugin is the interface that is implemented to serve/connect to +// a plugin over gRPC. +type GRPCPlugin interface { + // GRPCServer should register this plugin for serving with the + // given GRPCServer. Unlike Plugin.Server, this is only called once + // since gRPC plugins serve singletons. + GRPCServer(*GRPCBroker, *grpc.Server) error + + // GRPCClient should return the interface implementation for the plugin + // you're serving via gRPC. The provided context will be canceled by + // go-plugin in the event of the plugin process exiting. + GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error) +} + +// NetRPCUnsupportedPlugin implements Plugin but returns errors for the +// Server and Client functions. This will effectively disable support for +// net/rpc based plugins. +// +// This struct can be embedded in your struct. +type NetRPCUnsupportedPlugin struct{} + +func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} + +func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go new file mode 100644 index 000000000..b88446361 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process.go @@ -0,0 +1,4 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go new file mode 100644 index 000000000..e4b7be383 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -0,0 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "io" + "net" +) + +// Protocol is an enum representing the types of protocols. +type Protocol string + +const ( + ProtocolInvalid Protocol = "" + ProtocolNetRPC Protocol = "netrpc" + ProtocolGRPC Protocol = "grpc" +) + +// ServerProtocol is an interface that must be implemented for new plugin +// protocols to be servers. +type ServerProtocol interface { + // Init is called once to configure and initialize the protocol, but + // not start listening. This is the point at which all validation should + // be done and errors returned. + Init() error + + // Config is extra configuration to be outputted to stdout. This will + // be automatically base64 encoded to ensure it can be parsed properly. + // This can be an empty string if additional configuration is not needed. + Config() string + + // Serve is called to serve connections on the given listener. This should + // continue until the listener is closed. + Serve(net.Listener) +} + +// ClientProtocol is an interface that must be implemented for new plugin +// protocols to be clients. +type ClientProtocol interface { + io.Closer + + // Dispense dispenses a new instance of the plugin with the given name. + Dispense(string) (interface{}, error) + + // Ping checks that the client connection is still healthy. + Ping() error +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go new file mode 100644 index 000000000..142454df8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -0,0 +1,173 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/rpc" + + "github.com/hashicorp/yamux" +) + +// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. +type RPCClient struct { + broker *MuxBroker + control *rpc.Client + plugins map[string]Plugin + + // These are the streams used for the various stdout/err overrides + stdout, stderr net.Conn +} + +// newRPCClient creates a new RPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newRPCClient(c *Client) (*RPCClient, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + if c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + // Create the actual RPC client + result, err := NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = result.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + result.Close() + return nil, err + } + + return result, nil +} + +// NewRPCClient creates a client from an already-open connection-like value. +// Dial is typically used instead. +func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { + // Create the yamux client so we can multiplex + mux, err := yamux.Client(conn, nil) + if err != nil { + conn.Close() + return nil, err + } + + // Connect to the control stream. + control, err := mux.Open() + if err != nil { + mux.Close() + return nil, err + } + + // Connect stdout, stderr streams + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Open() + if err != nil { + mux.Close() + return nil, err + } + } + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Build the client using our broker and control channel. + return &RPCClient{ + broker: broker, + control: rpc.NewClient(control), + plugins: plugins, + stdout: stdstream[0], + stderr: stdstream[1], + }, nil +} + +// SyncStreams should be called to enable syncing of stdout, +// stderr with the plugin. +// +// This will return immediately and the syncing will continue to happen +// in the background. You do not need to launch this in a goroutine itself. +// +// This should never be called multiple times. +func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { + go copyStream("stdout", stdout, c.stdout) + go copyStream("stderr", stderr, c.stderr) + return nil +} + +// Close closes the connection. The client is no longer usable after this +// is called. +func (c *RPCClient) Close() error { + // Call the control channel and ask it to gracefully exit. If this + // errors, then we save it so that we always return an error but we + // want to try to close the other channels anyways. + var empty struct{} + returnErr := c.control.Call("Control.Quit", true, &empty) + + // Close the other streams we have + if err := c.control.Close(); err != nil { + return err + } + if err := c.stdout.Close(); err != nil { + return err + } + if err := c.stderr.Close(); err != nil { + return err + } + if err := c.broker.Close(); err != nil { + return err + } + + // Return back the error we got from Control.Quit. This is very important + // since we MUST return non-nil error if this fails so that Client.Kill + // will properly try a process.Kill. + return returnErr +} + +func (c *RPCClient) Dispense(name string) (interface{}, error) { + p, ok := c.plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + var id uint32 + if err := c.control.Call( + "Dispenser.Dispense", name, &id); err != nil { + return nil, err + } + + conn, err := c.broker.Dial(id) + if err != nil { + return nil, err + } + + return p.Client(c.broker, rpc.NewClient(conn)) +} + +// Ping pings the connection to ensure it is still alive. +// +// The error from the RPC call is returned exactly if you want to inspect +// it for further error analysis. Any error returned from here would indicate +// that the connection to the plugin is not healthy. +func (c *RPCClient) Ping() error { + var empty struct{} + return c.control.Call("Control.Ping", true, &empty) +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go new file mode 100644 index 000000000..cec0a3d93 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -0,0 +1,209 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "errors" + "fmt" + "io" + "log" + "net" + "net/rpc" + "sync" + + "github.com/hashicorp/yamux" +) + +// RPCServer listens for network connections and then dispenses interface +// implementations over net/rpc. +// +// After setting the fields below, they shouldn't be read again directly +// from the structure which may be reading/writing them concurrently. +type RPCServer struct { + Plugins map[string]Plugin + + // Stdout, Stderr are what this server will use instead of the + // normal stdin/out/err. This is because due to the multi-process nature + // of our plugin system, we can't use the normal process values so we + // make our own custom one we pipe across. + Stdout io.Reader + Stderr io.Reader + + // DoneCh should be set to a non-nil channel that will be closed + // when the control requests the RPC server to end. + DoneCh chan<- struct{} + + lock sync.Mutex +} + +// ServerProtocol impl. +func (s *RPCServer) Init() error { return nil } + +// ServerProtocol impl. +func (s *RPCServer) Config() string { return "" } + +// ServerProtocol impl. +func (s *RPCServer) Serve(lis net.Listener) { + defer s.done() + + for { + conn, err := lis.Accept() + if err != nil { + severity := "ERR" + if errors.Is(err, net.ErrClosed) { + severity = "DEBUG" + } + log.Printf("[%s] plugin: plugin server: %s", severity, err) + return + } + + go s.ServeConn(conn) + } +} + +// ServeConn runs a single connection. +// +// ServeConn blocks, serving the connection until the client hangs up. +func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { + // First create the yamux server to wrap this connection + mux, err := yamux.Server(conn, nil) + if err != nil { + conn.Close() + log.Printf("[ERR] plugin: error creating yamux server: %s", err) + return + } + + // Accept the control connection + control, err := mux.Accept() + if err != nil { + mux.Close() + if err != io.EOF { + log.Printf("[ERR] plugin: error accepting control connection: %s", err) + } + + return + } + + // Connect the stdstreams (in, out, err) + stdstream := make([]net.Conn, 2) + for i := range stdstream { + stdstream[i], err = mux.Accept() + if err != nil { + mux.Close() + log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) + return + } + } + + // Copy std streams out to the proper place + go copyStream("stdout", stdstream[0], s.Stdout) + go copyStream("stderr", stdstream[1], s.Stderr) + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Use the control connection to build the dispenser and serve the + // connection. + server := rpc.NewServer() + server.RegisterName("Control", &controlServer{ + server: s, + }) + server.RegisterName("Dispenser", &dispenseServer{ + broker: broker, + plugins: s.Plugins, + }) + server.ServeConn(control) +} + +// done is called internally by the control server to trigger the +// doneCh to close which is listened to by the main process to cleanly +// exit. +func (s *RPCServer) done() { + s.lock.Lock() + defer s.lock.Unlock() + + if s.DoneCh != nil { + close(s.DoneCh) + s.DoneCh = nil + } +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type controlServer struct { + server *RPCServer +} + +// Ping can be called to verify the connection (and likely the binary) +// is still alive to a plugin. +func (c *controlServer) Ping( + null bool, response *struct{}, +) error { + *response = struct{}{} + return nil +} + +func (c *controlServer) Quit( + null bool, response *struct{}, +) error { + // End the server + c.server.done() + + // Always return true + *response = struct{}{} + + return nil +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type dispenseServer struct { + broker *MuxBroker + plugins map[string]Plugin +} + +func (d *dispenseServer) Dispense( + name string, response *uint32, +) error { + // Find the function to create this implementation + p, ok := d.plugins[name] + if !ok { + return fmt.Errorf("unknown plugin type: %s", name) + } + + // Create the implementation first so we know if there is an error. + impl, err := p.Server(d.broker) + if err != nil { + // We turn the error into an errors error so that it works across RPC + return errors.New(err.Error()) + } + + // Reserve an ID for our implementation + id := d.broker.NextId() + *response = id + + // Run the rest in a goroutine since it can only happen once this RPC + // call returns. We wait for a connection for the plugin implementation + // and serve it. + go func() { + conn, err := d.broker.Accept(id) + if err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) + return + } + + serve(conn, "Plugin", impl) + }() + + return nil +} + +func serve(conn io.ReadWriteCloser, name string, v interface{}) { + server := rpc.NewServer() + if err := server.RegisterName(name, v); err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) + return + } + + server.ServeConn(conn) +} diff --git a/vendor/github.com/hashicorp/go-plugin/runner/runner.go b/vendor/github.com/hashicorp/go-plugin/runner/runner.go new file mode 100644 index 000000000..e638ae5f8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/runner/runner.go @@ -0,0 +1,72 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package runner + +import ( + "context" + "io" +) + +// Runner defines the interface required by go-plugin to manage the lifecycle of +// of a plugin and attempt to negotiate a connection with it. Note that this +// is orthogonal to the protocol and transport used, which is negotiated over stdout. +type Runner interface { + // Start should start the plugin and ensure any work required for servicing + // other interface methods is done. If the context is cancelled, it should + // only abort any attempts to _start_ the plugin. Waiting and shutdown are + // handled separately. + Start(ctx context.Context) error + + // Diagnose makes a best-effort attempt to return any debug information that + // might help users understand why a plugin failed to start and negotiate a + // connection. + Diagnose(ctx context.Context) string + + // Stdout is used to negotiate the go-plugin protocol. + Stdout() io.ReadCloser + + // Stderr is used for forwarding plugin logs to the host process logger. + Stderr() io.ReadCloser + + // Name is a human-friendly name for the plugin, such as the path to the + // executable. It does not have to be unique. + Name() string + + AttachedRunner +} + +// AttachedRunner defines a limited subset of Runner's interface to represent the +// reduced responsibility for plugin lifecycle when attaching to an already running +// plugin. +type AttachedRunner interface { + // Wait should wait until the plugin stops running, whether in response to + // an out of band signal or in response to calling Kill(). + Wait(ctx context.Context) error + + // Kill should stop the plugin and perform any cleanup required. + Kill(ctx context.Context) error + + // ID is a unique identifier to represent the running plugin. e.g. pid or + // container ID. + ID() string + + AddrTranslator +} + +// AddrTranslator translates addresses between the execution context of the host +// process and the plugin. For example, if the plugin is in a container, the file +// path for a Unix socket may be different between the host and the container. +// +// It is only intended to be used by the host process. +type AddrTranslator interface { + // Called before connecting on any addresses received back from the plugin. + PluginToHost(pluginNet, pluginAddr string) (hostNet string, hostAddr string, err error) + + // Called on any host process addresses before they are sent to the plugin. + HostToPlugin(hostNet, hostAddr string) (pluginNet string, pluginAddr string, err error) +} + +// ReattachFunc can be passed to a client's reattach config to reattach to an +// already running plugin instead of starting it ourselves. +type ReattachFunc func() (AttachedRunner, error) diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go new file mode 100644 index 000000000..e741bc7fa --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -0,0 +1,665 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "os" + "os/signal" + "os/user" + "runtime" + "sort" + "strconv" + "strings" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/grpcmux" + "google.golang.org/grpc" +) + +// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. +// We will increment this whenever we change any protocol behavior. This +// will invalidate any prior plugins but will at least allow us to iterate +// on the core in a safe way. We will do our best to do this very +// infrequently. +const CoreProtocolVersion = 1 + +// HandshakeConfig is the configuration used by client and servers to +// handshake before starting a plugin connection. This is embedded by +// both ServeConfig and ClientConfig. +// +// In practice, the plugin host creates a HandshakeConfig that is exported +// and plugins then can easily consume it. +type HandshakeConfig struct { + // ProtocolVersion is the version that clients must match on to + // agree they can communicate. This should match the ProtocolVersion + // set on ClientConfig when using a plugin. + // This field is not required if VersionedPlugins are being used in the + // Client or Server configurations. + ProtocolVersion uint + + // MagicCookieKey and value are used as a very basic verification + // that a plugin is intended to be launched. This is not a security + // measure, just a UX feature. If the magic cookie doesn't match, + // we show human-friendly output. + MagicCookieKey string + MagicCookieValue string +} + +// PluginSet is a set of plugins provided to be registered in the plugin +// server. +type PluginSet map[string]Plugin + +// ServeConfig configures what sorts of plugins are served. +type ServeConfig struct { + // HandshakeConfig is the configuration that must match clients. + HandshakeConfig + + // TLSProvider is a function that returns a configured tls.Config. + TLSProvider func() (*tls.Config, error) + + // Plugins are the plugins that are served. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // GRPCServer should be non-nil to enable serving the plugins over + // gRPC. This is a function to create the server when needed with the + // given server options. The server options populated by go-plugin will + // be for TLS if set. You may modify the input slice. + // + // Note that the grpc.Server will automatically be registered with + // the gRPC health checking service. This is not optional since go-plugin + // relies on this to implement Ping(). + GRPCServer func([]grpc.ServerOption) *grpc.Server + + // Logger is used to pass a logger into the server. If none is provided the + // server will create a default logger. + Logger hclog.Logger + + // Test, if non-nil, will put plugin serving into "test mode". This is + // meant to be used as part of `go test` within a plugin's codebase to + // launch the plugin in-process and output a ReattachConfig. + // + // This changes the behavior of the server in a number of ways to + // accomodate the expectation of running in-process: + // + // * The handshake cookie is not validated. + // * Stdout/stderr will receive plugin reads and writes + // * Connection information will not be sent to stdout + // + Test *ServeTestConfig +} + +// ServeTestConfig configures plugin serving for test mode. See ServeConfig.Test. +type ServeTestConfig struct { + // Context, if set, will force the plugin serving to end when cancelled. + // This is only a test configuration because the non-test configuration + // expects to take over the process and therefore end on an interrupt or + // kill signal. For tests, we need to kill the plugin serving routinely + // and this provides a way to do so. + // + // If you want to wait for the plugin process to close before moving on, + // you can wait on CloseCh. + Context context.Context + + // If this channel is non-nil, we will send the ReattachConfig via + // this channel. This can be encoded (via JSON recommended) to the + // plugin client to attach to this plugin. + ReattachConfigCh chan<- *ReattachConfig + + // CloseCh, if non-nil, will be closed when serving exits. This can be + // used along with Context to determine when the server is fully shut down. + // If this is not set, you can still use Context on its own, but note there + // may be a period of time between canceling the context and the plugin + // server being shut down. + CloseCh chan<- struct{} + + // SyncStdio, if true, will enable the client side "SyncStdout/Stderr" + // functionality to work. This defaults to false because the implementation + // of making this work within test environments is particularly messy + // and SyncStdio functionality is fairly rare, so we default to the simple + // scenario. + SyncStdio bool +} + +func unixSocketConfigFromEnv() UnixSocketConfig { + return UnixSocketConfig{ + Group: os.Getenv(EnvUnixSocketGroup), + socketDir: os.Getenv(EnvUnixSocketDir), + } +} + +// protocolVersion determines the protocol version and plugin set to be used by +// the server. In the event that there is no suitable version, the last version +// in the config is returned leaving the client to report the incompatibility. +func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { + protoVersion := int(opts.ProtocolVersion) + pluginSet := opts.Plugins + protoType := ProtocolNetRPC + // Check if the client sent a list of acceptable versions + var clientVersions []int + if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" { + for _, s := range strings.Split(vs, ",") { + v, err := strconv.Atoi(s) + if err != nil { + fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s) + continue + } + clientVersions = append(clientVersions, v) + } + } + + // We want to iterate in reverse order, to ensure we match the newest + // compatible plugin version. + sort.Sort(sort.Reverse(sort.IntSlice(clientVersions))) + + // set the old un-versioned fields as if they were versioned plugins + if opts.VersionedPlugins == nil { + opts.VersionedPlugins = make(map[int]PluginSet) + } + + if pluginSet != nil { + opts.VersionedPlugins[protoVersion] = pluginSet + } + + // Sort the version to make sure we match the latest first + var versions []int + for v := range opts.VersionedPlugins { + versions = append(versions, v) + } + + sort.Sort(sort.Reverse(sort.IntSlice(versions))) + + // See if we have multiple versions of Plugins to choose from + for _, version := range versions { + // Record each version, since we guarantee that this returns valid + // values even if they are not a protocol match. + protoVersion = version + pluginSet = opts.VersionedPlugins[version] + + // If we have a configured gRPC server we should select a protocol + if opts.GRPCServer != nil { + // All plugins in a set must use the same transport, so check the first + // for the protocol type + for _, p := range pluginSet { + switch p.(type) { + case GRPCPlugin: + protoType = ProtocolGRPC + default: + protoType = ProtocolNetRPC + } + break + } + } + + for _, clientVersion := range clientVersions { + if clientVersion == protoVersion { + return protoVersion, protoType, pluginSet + } + } + } + + // Return the lowest version as the fallback. + // Since we iterated over all the versions in reverse order above, these + // values are from the lowest version number plugins (which may be from + // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins + // fields). This allows serving the oldest version of our plugins to a + // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list. + return protoVersion, protoType, pluginSet +} + +// Serve serves the plugins given by ServeConfig. +// +// Serve doesn't return until the plugin is done being executed. Any +// fixable errors will be output to os.Stderr and the process will +// exit with a status code of 1. Serve will panic for unexpected +// conditions where a user's fix is unknown. +// +// This is the method that plugins should call in their main() functions. +func Serve(opts *ServeConfig) { + exitCode := -1 + // We use this to trigger an `os.Exit` so that we can execute our other + // deferred functions. In test mode, we just output the err to stderr + // and return. + defer func() { + if opts.Test == nil && exitCode >= 0 { + os.Exit(exitCode) + } + + if opts.Test != nil && opts.Test.CloseCh != nil { + close(opts.Test.CloseCh) + } + }() + + if opts.Test == nil { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + exitCode = 1 + return + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + exitCode = 1 + return + } + } + + // negotiate the version and plugins + // start with default version in the handshake config + protoVersion, protoType, pluginSet := protocolVersion(opts) + + logger := opts.Logger + if logger == nil { + // internal logger to os.Stderr + logger = hclog.New(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } + + // Register a listener so we can accept a connection + listener, err := serverListener(unixSocketConfigFromEnv()) + if err != nil { + logger.Error("plugin init error", "error", err) + return + } + + // Close the listener on return. We wrap this in a func() on purpose + // because the "listener" reference may change to TLS. + defer func() { + listener.Close() + }() + + var tlsConfig *tls.Config + if opts.TLSProvider != nil { + tlsConfig, err = opts.TLSProvider() + if err != nil { + logger.Error("plugin tls init", "error", err) + return + } + } + + var serverCert string + clientCert := os.Getenv("PLUGIN_CLIENT_CERT") + // If the client is configured using AutoMTLS, the certificate will be here, + // and we need to generate our own in response. + if tlsConfig == nil && clientCert != "" { + logger.Info("configuring server automatic mTLS") + clientCertPool := x509.NewCertPool() + if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) { + logger.Error("client cert provided but failed to parse", "cert", clientCert) + } + + certPEM, keyPEM, err := generateCert() + if err != nil { + logger.Error("failed to generate server certificate", "error", err) + panic(err) + } + + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + logger.Error("failed to parse server certificate", "error", err) + panic(err) + } + + tlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: clientCertPool, + MinVersion: tls.VersionTLS12, + RootCAs: clientCertPool, + ServerName: "localhost", + } + + // We send back the raw leaf cert data for the client rather than the + // PEM, since the protocol can't handle newlines. + serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0]) + } + + // Create the channel to tell us when we're done + doneCh := make(chan struct{}) + + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + var stdout_r, stderr_r io.Reader + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // If we're in test mode, we tee off the reader and write the data + // as-is to our normal Stdout and Stderr so that they continue working + // while stdio works. This is because in test mode, we assume we're running + // in `go test` or some equivalent and we want output to go to standard + // locations. + if opts.Test != nil { + // TODO(mitchellh): This isn't super ideal because a TeeReader + // only works if the reader side is actively read. If we never + // connect via a plugin client, the output still gets swallowed. + stdout_r = io.TeeReader(stdout_r, os.Stdout) + stderr_r = io.TeeReader(stderr_r, os.Stderr) + } + + // Build the server type + var server ServerProtocol + switch protoType { + case ProtocolNetRPC: + // If we have a TLS configuration then we wrap the listener + // ourselves and do it at that level. + if tlsConfig != nil { + listener = tls.NewListener(listener, tlsConfig) + } + + // Create the RPC server to dispense + server = &RPCServer{ + Plugins: pluginSet, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + case ProtocolGRPC: + var muxer *grpcmux.GRPCServerMuxer + if multiplex, _ := strconv.ParseBool(os.Getenv(envMultiplexGRPC)); multiplex { + muxer = grpcmux.NewGRPCServerMuxer(logger, listener) + listener = muxer + } + + // Create the gRPC server + server = &GRPCServer{ + Plugins: pluginSet, + Server: opts.GRPCServer, + TLS: tlsConfig, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + logger: logger, + muxer: muxer, + } + + default: + panic("unknown server protocol: " + protoType) + } + + // Initialize the servers + if err := server.Init(); err != nil { + logger.Error("protocol init", "error", err) + return + } + + logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) + + // Output the address and service name to stdout so that the client can + // bring it up. In test mode, we don't do this because clients will + // attach via a reattach config. + if opts.Test == nil { + const grpcBrokerMultiplexingSupported = true + protocolLine := fmt.Sprintf("%d|%d|%s|%s|%s|%s", + CoreProtocolVersion, + protoVersion, + listener.Addr().Network(), + listener.Addr().String(), + protoType, + serverCert) + + // Old clients will error with new plugins if we blindly append the + // seventh segment for gRPC broker multiplexing support, because old + // client code uses strings.SplitN(line, "|", 6), which means a seventh + // segment will get appended to the sixth segment as "sixthpart|true". + // + // If the environment variable is set, we assume the client is new enough + // to handle a seventh segment, as it should now use + // strings.Split(line, "|") and always handle each segment individually. + if os.Getenv(envMultiplexGRPC) != "" { + protocolLine += fmt.Sprintf("|%v", grpcBrokerMultiplexingSupported) + } + fmt.Printf("%s\n", protocolLine) + os.Stdout.Sync() + } else if ch := opts.Test.ReattachConfigCh; ch != nil { + // Send back the reattach config that can be used. This isn't + // quite ready if they connect immediately but the client should + // retry a few times. + ch <- &ReattachConfig{ + Protocol: protoType, + ProtocolVersion: protoVersion, + Addr: listener.Addr(), + Pid: os.Getpid(), + Test: true, + } + } + + // Eat the interrupts. In test mode we disable this so that go test + // can be cancelled properly. + if opts.Test == nil { + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + count := 0 + for { + <-ch + count++ + logger.Trace("plugin received interrupt signal, ignoring", "count", count) + } + }() + } + + // Set our stdout, stderr to the stdio stream that clients can retrieve + // using ClientConfig.SyncStdout/err. We only do this for non-test mode + // or if the test mode explicitly requests it. + // + // In test mode, we use a multiwriter so that the data continues going + // to the normal stdout/stderr so output can show up in test logs. We + // also send to the stdio stream so that clients can continue working + // if they depend on that. + if opts.Test == nil || opts.Test.SyncStdio { + if opts.Test != nil { + // In test mode we need to maintain the original values so we can + // reset it. + defer func(out, err *os.File) { + os.Stdout = out + os.Stderr = err + }(os.Stdout, os.Stderr) + } + os.Stdout = stdout_w + os.Stderr = stderr_w + } + + // Accept connections and wait for completion + go server.Serve(listener) + + ctx := context.Background() + if opts.Test != nil && opts.Test.Context != nil { + ctx = opts.Test.Context + } + select { + case <-ctx.Done(): + // Cancellation. We can stop the server by closing the listener. + // This isn't graceful at all but this is currently only used by + // tests and its our only way to stop. + listener.Close() + + // If this is a grpc server, then we also ask the server itself to + // end which will kill all connections. There isn't an easy way to do + // this for net/rpc currently but net/rpc is more and more unused. + if s, ok := server.(*GRPCServer); ok { + s.Stop() + } + + // Wait for the server itself to shut down + <-doneCh + + case <-doneCh: + // Note that given the documentation of Serve we should probably be + // setting exitCode = 0 and using os.Exit here. That's how it used to + // work before extracting this library. However, for years we've done + // this so we'll keep this functionality. + } +} + +func serverListener(unixSocketCfg UnixSocketConfig) (net.Listener, error) { + if runtime.GOOS == "windows" { + return serverListener_tcp() + } + + return serverListener_unix(unixSocketCfg) +} + +func serverListener_tcp() (net.Listener, error) { + envMinPort := os.Getenv("PLUGIN_MIN_PORT") + envMaxPort := os.Getenv("PLUGIN_MAX_PORT") + + var minPort, maxPort int64 + var err error + + switch { + case len(envMinPort) == 0: + minPort = 0 + default: + minPort, err = strconv.ParseInt(envMinPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err) + } + } + + switch { + case len(envMaxPort) == 0: + maxPort = 0 + default: + maxPort, err = strconv.ParseInt(envMaxPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err) + } + } + + if minPort > maxPort { + return nil, fmt.Errorf("PLUGIN_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) + } + + for port := minPort; port <= maxPort; port++ { + address := fmt.Sprintf("127.0.0.1:%d", port) + listener, err := net.Listen("tcp", address) + if err == nil { + return listener, nil + } + } + + return nil, errors.New("Couldn't bind plugin TCP listener") +} + +func serverListener_unix(unixSocketCfg UnixSocketConfig) (net.Listener, error) { + tf, err := os.CreateTemp(unixSocketCfg.socketDir, "plugin") + if err != nil { + return nil, err + } + path := tf.Name() + + // Close the file and remove it because it has to not exist for + // the domain socket. + if err := tf.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + + // By default, unix sockets are only writable by the owner. Set up a custom + // group owner and group write permissions if configured. + if unixSocketCfg.Group != "" { + err = setGroupWritable(path, unixSocketCfg.Group, 0o660) + if err != nil { + return nil, err + } + } + + // Wrap the listener in rmListener so that the Unix domain socket file + // is removed on close. + return newDeleteFileListener(l, path), nil +} + +func setGroupWritable(path, groupString string, mode os.FileMode) error { + groupID, err := strconv.Atoi(groupString) + if err != nil { + group, err := user.LookupGroup(groupString) + if err != nil { + return fmt.Errorf("failed to find gid from %q: %w", groupString, err) + } + groupID, err = strconv.Atoi(group.Gid) + if err != nil { + return fmt.Errorf("failed to parse %q group's gid as an integer: %w", groupString, err) + } + } + + err = os.Chown(path, -1, groupID) + if err != nil { + return err + } + + err = os.Chmod(path, mode) + if err != nil { + return err + } + + return nil +} + +// rmListener is an implementation of net.Listener that forwards most +// calls to the listener but also calls an additional close function. We +// use this to cleanup the unix domain socket on close, as well as clean +// up multiplexed listeners. +type rmListener struct { + net.Listener + close func() error +} + +func newDeleteFileListener(ln net.Listener, path string) *rmListener { + return &rmListener{ + Listener: ln, + close: func() error { + return os.Remove(path) + }, + } +} + +func (l *rmListener) Close() error { + // Close the listener itself + if err := l.Listener.Close(); err != nil { + return err + } + + // Remove the file + return l.close() +} diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go new file mode 100644 index 000000000..6b14b0c29 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "fmt" + "os" +) + +// ServeMuxMap is the type that is used to configure ServeMux +type ServeMuxMap map[string]*ServeConfig + +// ServeMux is like Serve, but serves multiple types of plugins determined +// by the argument given on the command-line. +// +// This command doesn't return until the plugin is done being executed. Any +// errors are logged or output to stderr. +func ServeMux(m ServeMuxMap) { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, + "Invoked improperly. This is an internal command that shouldn't\n"+ + "be manually invoked.\n") + os.Exit(1) + } + + opts, ok := m[os.Args[1]] + if !ok { + fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) + os.Exit(1) + } + + Serve(opts) +} diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go new file mode 100644 index 000000000..a2348642d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/stream.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "io" + "log" +) + +func copyStream(name string, dst io.Writer, src io.Reader) { + if src == nil { + panic(name + ": src is nil") + } + if dst == nil { + panic(name + ": dst is nil") + } + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go new file mode 100644 index 000000000..a8735dfc8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -0,0 +1,185 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "bytes" + "context" + "io" + "net" + "net/rpc" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/grpcmux" + "github.com/mitchellh/go-testing-interface" + "google.golang.org/grpc" +) + +// TestOptions allows specifying options that can affect the behavior of the +// test functions +type TestOptions struct { + //ServerStdout causes the given value to be used in place of a blank buffer + //for RPCServer's Stdout + ServerStdout io.ReadCloser + + //ServerStderr causes the given value to be used in place of a blank buffer + //for RPCServer's Stderr + ServerStderr io.ReadCloser +} + +// The testing file contains test helpers that you can use outside of +// this package for making it easier to test plugins themselves. + +// TestConn is a helper function for returning a client and server +// net.Conn connected to each other. +func TestConn(t testing.T) (net.Conn, net.Conn) { + // Listen to any local port. This listener will be closed + // after a single connection is established. + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start a goroutine to accept our client connection + var serverConn net.Conn + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer l.Close() + var err error + serverConn, err = l.Accept() + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + // Connect to the server + clientConn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Wait for the server side to acknowledge it has connected + <-doneCh + + return clientConn, serverConn +} + +// TestRPCConn returns a rpc client and server connected to each other. +func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { + clientConn, serverConn := TestConn(t) + + server := rpc.NewServer() + go server.ServeConn(serverConn) + + client := rpc.NewClient(clientConn) + return client, server +} + +// TestPluginRPCConn returns a plugin RPC client and server that are connected +// together and configured. +func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) { + // Create two net.Conns we can use to shuttle our control connection + clientConn, serverConn := TestConn(t) + + // Start up the server + server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} + if opts != nil { + if opts.ServerStdout != nil { + server.Stdout = opts.ServerStdout + } + if opts.ServerStderr != nil { + server.Stderr = opts.ServerStderr + } + } + go server.ServeConn(serverConn) + + // Connect the client to the server + client, err := NewRPCClient(clientConn, ps) + if err != nil { + t.Fatalf("err: %s", err) + } + + return client, server +} + +// TestGRPCConn returns a gRPC client conn and grpc server that are connected +// together and configured. The register function is used to register services +// prior to the Serve call. This is used to test gRPC connections. +func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := grpc.NewServer() + register(server) + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Connection successful, close the listener + l.Close() + + return conn, server +} + +// TestPluginGRPCConn returns a plugin gRPC client and server that are connected +// together and configured. This is used to test gRPC connections. +func TestPluginGRPCConn(t testing.T, multiplex bool, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { + // Create a listener + ln, err := serverListener(UnixSocketConfig{}) + if err != nil { + t.Fatal(err) + } + + logger := hclog.New(&hclog.LoggerOptions{ + Level: hclog.Debug, + }) + + // Start up the server + var muxer *grpcmux.GRPCServerMuxer + if multiplex { + muxer = grpcmux.NewGRPCServerMuxer(logger, ln) + ln = muxer + } + server := &GRPCServer{ + Plugins: ps, + DoneCh: make(chan struct{}), + Server: DefaultGRPCServer, + Stdout: new(bytes.Buffer), + Stderr: new(bytes.Buffer), + logger: logger, + muxer: muxer, + } + if err := server.Init(); err != nil { + t.Fatalf("err: %s", err) + } + go server.Serve(ln) + + client := &Client{ + address: ln.Addr(), + protocol: ProtocolGRPC, + config: &ClientConfig{ + Plugins: ps, + GRPCBrokerMultiplex: multiplex, + }, + logger: logger, + } + + grpcClient, err := newGRPCClient(context.Background(), client) + if err != nil { + t.Fatal(err) + } + + return grpcClient, server +} diff --git a/vendor/github.com/hashicorp/yamux/.gitignore b/vendor/github.com/hashicorp/yamux/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE new file mode 100644 index 000000000..f0e5c79e1 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md new file mode 100644 index 000000000..d4db7fc99 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/README.md @@ -0,0 +1,86 @@ +# Yamux + +Yamux (Yet another Multiplexer) is a multiplexing library for Golang. +It relies on an underlying connection to provide reliability +and ordering, such as TCP or Unix domain sockets, and provides +stream-oriented multiplexing. It is inspired by SPDY but is not +interoperable with it. + +Yamux features include: + +* Bi-directional streams + * Streams can be opened by either client or server + * Useful for NAT traversal + * Server-side push support +* Flow control + * Avoid starvation + * Back-pressure to prevent overwhelming a receiver +* Keep Alives + * Enables persistent connections over a load balancer +* Efficient + * Enables thousands of logical streams with low overhead + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux). + +## Specification + +The full specification for Yamux is provided in the `spec.md` file. +It can be used as a guide to implementors of interoperable libraries. + +## Usage + +Using Yamux is remarkably simple: + +```go + +func client() { + // Get a TCP connection + conn, err := net.Dial(...) + if err != nil { + panic(err) + } + + // Setup client side of yamux + session, err := yamux.Client(conn, nil) + if err != nil { + panic(err) + } + + // Open a new stream + stream, err := session.Open() + if err != nil { + panic(err) + } + + // Stream implements net.Conn + stream.Write([]byte("ping")) +} + +func server() { + // Accept a TCP connection + conn, err := listener.Accept() + if err != nil { + panic(err) + } + + // Setup server side of yamux + session, err := yamux.Server(conn, nil) + if err != nil { + panic(err) + } + + // Accept a stream + stream, err := session.Accept() + if err != nil { + panic(err) + } + + // Listen for a message + buf := make([]byte, 4) + stream.Read(buf) +} + +``` + diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go new file mode 100644 index 000000000..f6a00199c --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -0,0 +1,60 @@ +package yamux + +import ( + "fmt" + "net" +) + +// hasAddr is used to get the address from the underlying connection +type hasAddr interface { + LocalAddr() net.Addr + RemoteAddr() net.Addr +} + +// yamuxAddr is used when we cannot get the underlying address +type yamuxAddr struct { + Addr string +} + +func (*yamuxAddr) Network() string { + return "yamux" +} + +func (y *yamuxAddr) String() string { + return fmt.Sprintf("yamux:%s", y.Addr) +} + +// Addr is used to get the address of the listener. +func (s *Session) Addr() net.Addr { + return s.LocalAddr() +} + +// LocalAddr is used to get the local address of the +// underlying connection. +func (s *Session) LocalAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"local"} + } + return addr.LocalAddr() +} + +// RemoteAddr is used to get the address of remote end +// of the underlying connection +func (s *Session) RemoteAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"remote"} + } + return addr.RemoteAddr() +} + +// LocalAddr returns the local address +func (s *Stream) LocalAddr() net.Addr { + return s.session.LocalAddr() +} + +// RemoteAddr returns the remote address +func (s *Stream) RemoteAddr() net.Addr { + return s.session.RemoteAddr() +} diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go new file mode 100644 index 000000000..2fdbf844a --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/const.go @@ -0,0 +1,182 @@ +package yamux + +import ( + "encoding/binary" + "fmt" +) + +// NetError implements net.Error +type NetError struct { + err error + timeout bool + temporary bool +} + +func (e *NetError) Error() string { + return e.err.Error() +} + +func (e *NetError) Timeout() bool { + return e.timeout +} + +func (e *NetError) Temporary() bool { + return e.temporary +} + +var ( + // ErrInvalidVersion means we received a frame with an + // invalid version + ErrInvalidVersion = fmt.Errorf("invalid protocol version") + + // ErrInvalidMsgType means we received a frame with an + // invalid message type + ErrInvalidMsgType = fmt.Errorf("invalid msg type") + + // ErrSessionShutdown is used if there is a shutdown during + // an operation + ErrSessionShutdown = fmt.Errorf("session shutdown") + + // ErrStreamsExhausted is returned if we have no more + // stream ids to issue + ErrStreamsExhausted = fmt.Errorf("streams exhausted") + + // ErrDuplicateStream is used if a duplicate stream is + // opened inbound + ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") + + // ErrReceiveWindowExceeded indicates the window was exceeded + ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") + + // ErrTimeout is used when we reach an IO deadline + ErrTimeout = &NetError{ + err: fmt.Errorf("i/o deadline reached"), + + // Error should meet net.Error interface for timeouts for compatability + // with standard library expectations, such as http servers. + timeout: true, + } + + // ErrStreamClosed is returned when using a closed stream + ErrStreamClosed = fmt.Errorf("stream closed") + + // ErrUnexpectedFlag is set when we get an unexpected flag + ErrUnexpectedFlag = fmt.Errorf("unexpected flag") + + // ErrRemoteGoAway is used when we get a go away from the other side + ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") + + // ErrConnectionReset is sent if a stream is reset. This can happen + // if the backlog is exceeded, or if there was a remote GoAway. + ErrConnectionReset = fmt.Errorf("connection reset") + + // ErrConnectionWriteTimeout indicates that we hit the "safety valve" + // timeout writing to the underlying stream connection. + ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") + + // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close + ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") +) + +const ( + // protoVersion is the only version we support + protoVersion uint8 = 0 +) + +const ( + // Data is used for data frames. They are followed + // by length bytes worth of payload. + typeData uint8 = iota + + // WindowUpdate is used to change the window of + // a given stream. The length indicates the delta + // update to the window. + typeWindowUpdate + + // Ping is sent as a keep-alive or to measure + // the RTT. The StreamID and Length value are echoed + // back in the response. + typePing + + // GoAway is sent to terminate a session. The StreamID + // should be 0 and the length is an error code. + typeGoAway +) + +const ( + // SYN is sent to signal a new stream. May + // be sent with a data payload + flagSYN uint16 = 1 << iota + + // ACK is sent to acknowledge a new stream. May + // be sent with a data payload + flagACK + + // FIN is sent to half-close the given stream. + // May be sent with a data payload. + flagFIN + + // RST is used to hard close a given stream. + flagRST +) + +const ( + // initialStreamWindow is the initial stream window size + initialStreamWindow uint32 = 256 * 1024 +) + +const ( + // goAwayNormal is sent on a normal termination + goAwayNormal uint32 = iota + + // goAwayProtoErr sent on a protocol error + goAwayProtoErr + + // goAwayInternalErr sent on an internal error + goAwayInternalErr +) + +const ( + sizeOfVersion = 1 + sizeOfType = 1 + sizeOfFlags = 2 + sizeOfStreamID = 4 + sizeOfLength = 4 + headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + + sizeOfStreamID + sizeOfLength +) + +type header []byte + +func (h header) Version() uint8 { + return h[0] +} + +func (h header) MsgType() uint8 { + return h[1] +} + +func (h header) Flags() uint16 { + return binary.BigEndian.Uint16(h[2:4]) +} + +func (h header) StreamID() uint32 { + return binary.BigEndian.Uint32(h[4:8]) +} + +func (h header) Length() uint32 { + return binary.BigEndian.Uint32(h[8:12]) +} + +func (h header) String() string { + return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", + h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) +} + +func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { + h[0] = protoVersion + h[1] = msgType + binary.BigEndian.PutUint16(h[2:4], flags) + binary.BigEndian.PutUint32(h[4:8], streamID) + binary.BigEndian.PutUint32(h[8:12], length) +} diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go new file mode 100644 index 000000000..0c3e67b02 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -0,0 +1,114 @@ +package yamux + +import ( + "fmt" + "io" + "log" + "os" + "time" +) + +// Config is used to tune the Yamux session +type Config struct { + // AcceptBacklog is used to limit how many streams may be + // waiting an accept. + AcceptBacklog int + + // EnableKeepalive is used to do a period keep alive + // messages using a ping. + EnableKeepAlive bool + + // KeepAliveInterval is how often to perform the keep alive + KeepAliveInterval time.Duration + + // ConnectionWriteTimeout is meant to be a "safety valve" timeout after + // we which will suspect a problem with the underlying connection and + // close it. This is only applied to writes, where's there's generally + // an expectation that things will move along quickly. + ConnectionWriteTimeout time.Duration + + // MaxStreamWindowSize is used to control the maximum + // window size that we allow for a stream. + MaxStreamWindowSize uint32 + + // StreamOpenTimeout is the maximum amount of time that a stream will + // be allowed to remain in pending state while waiting for an ack from the peer. + // Once the timeout is reached the session will be gracefully closed. + // A zero value disables the StreamOpenTimeout allowing unbounded + // blocking on OpenStream calls. + StreamOpenTimeout time.Duration + + // StreamCloseTimeout is the maximum time that a stream will allowed to + // be in a half-closed state when `Close` is called before forcibly + // closing the connection. Forcibly closed connections will empty the + // receive buffer, drop any future packets received for that stream, + // and send a RST to the remote side. + StreamCloseTimeout time.Duration + + // LogOutput is used to control the log destination. Either Logger or + // LogOutput can be set, not both. + LogOutput io.Writer + + // Logger is used to pass in the logger to be used. Either Logger or + // LogOutput can be set, not both. + Logger *log.Logger +} + +// DefaultConfig is used to return a default configuration +func DefaultConfig() *Config { + return &Config{ + AcceptBacklog: 256, + EnableKeepAlive: true, + KeepAliveInterval: 30 * time.Second, + ConnectionWriteTimeout: 10 * time.Second, + MaxStreamWindowSize: initialStreamWindow, + StreamCloseTimeout: 5 * time.Minute, + StreamOpenTimeout: 75 * time.Second, + LogOutput: os.Stderr, + } +} + +// VerifyConfig is used to verify the sanity of configuration +func VerifyConfig(config *Config) error { + if config.AcceptBacklog <= 0 { + return fmt.Errorf("backlog must be positive") + } + if config.KeepAliveInterval == 0 { + return fmt.Errorf("keep-alive interval must be positive") + } + if config.MaxStreamWindowSize < initialStreamWindow { + return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) + } + if config.LogOutput != nil && config.Logger != nil { + return fmt.Errorf("both Logger and LogOutput may not be set, select one") + } else if config.LogOutput == nil && config.Logger == nil { + return fmt.Errorf("one of Logger or LogOutput must be set, select one") + } + return nil +} + +// Server is used to initialize a new server-side connection. +// There must be at most one server-side connection. If a nil config is +// provided, the DefaultConfiguration will be used. +func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, false), nil +} + +// Client is used to initialize a new client-side connection. +// There must be at most one client-side connection. +func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, true), nil +} diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go new file mode 100644 index 000000000..38fe3ed1f --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -0,0 +1,732 @@ +package yamux + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Session is used to wrap a reliable ordered connection and to +// multiplex it into multiple streams. +type Session struct { + // remoteGoAway indicates the remote side does + // not want futher connections. Must be first for alignment. + remoteGoAway int32 + + // localGoAway indicates that we should stop + // accepting futher connections. Must be first for alignment. + localGoAway int32 + + // nextStreamID is the next stream we should + // send. This depends if we are a client/server. + nextStreamID uint32 + + // config holds our configuration + config *Config + + // logger is used for our logs + logger *log.Logger + + // conn is the underlying connection + conn io.ReadWriteCloser + + // bufRead is a buffered reader + bufRead *bufio.Reader + + // pings is used to track inflight pings + pings map[uint32]chan struct{} + pingID uint32 + pingLock sync.Mutex + + // streams maps a stream id to a stream, and inflight has an entry + // for any outgoing stream that has not yet been established. Both are + // protected by streamLock. + streams map[uint32]*Stream + inflight map[uint32]struct{} + streamLock sync.Mutex + + // synCh acts like a semaphore. It is sized to the AcceptBacklog which + // is assumed to be symmetric between the client and server. This allows + // the client to avoid exceeding the backlog and instead blocks the open. + synCh chan struct{} + + // acceptCh is used to pass ready streams to the client + acceptCh chan *Stream + + // sendCh is used to mark a stream as ready to send, + // or to send a header out directly. + sendCh chan *sendReady + + // recvDoneCh is closed when recv() exits to avoid a race + // between stream registration and stream shutdown + recvDoneCh chan struct{} + sendDoneCh chan struct{} + + // shutdown is used to safely close a session + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex + shutdownErrLock sync.Mutex +} + +// sendReady is used to either mark a stream as ready +// or to directly send a header +type sendReady struct { + Hdr []byte + mu sync.Mutex // Protects Body from unsafe reads. + Body []byte + Err chan error +} + +// newSession is used to construct a new session +func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + logger := config.Logger + if logger == nil { + logger = log.New(config.LogOutput, "", log.LstdFlags) + } + + s := &Session{ + config: config, + logger: logger, + conn: conn, + bufRead: bufio.NewReader(conn), + pings: make(map[uint32]chan struct{}), + streams: make(map[uint32]*Stream), + inflight: make(map[uint32]struct{}), + synCh: make(chan struct{}, config.AcceptBacklog), + acceptCh: make(chan *Stream, config.AcceptBacklog), + sendCh: make(chan *sendReady, 64), + recvDoneCh: make(chan struct{}), + sendDoneCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + } + if client { + s.nextStreamID = 1 + } else { + s.nextStreamID = 2 + } + go s.recv() + go s.send() + if config.EnableKeepAlive { + go s.keepalive() + } + return s +} + +// IsClosed does a safe check to see if we have shutdown +func (s *Session) IsClosed() bool { + select { + case <-s.shutdownCh: + return true + default: + return false + } +} + +// CloseChan returns a read-only channel which is closed as +// soon as the session is closed. +func (s *Session) CloseChan() <-chan struct{} { + return s.shutdownCh +} + +// NumStreams returns the number of currently open streams +func (s *Session) NumStreams() int { + s.streamLock.Lock() + num := len(s.streams) + s.streamLock.Unlock() + return num +} + +// Open is used to create a new stream as a net.Conn +func (s *Session) Open() (net.Conn, error) { + conn, err := s.OpenStream() + if err != nil { + return nil, err + } + return conn, nil +} + +// OpenStream is used to create a new stream +func (s *Session) OpenStream() (*Stream, error) { + if s.IsClosed() { + return nil, ErrSessionShutdown + } + if atomic.LoadInt32(&s.remoteGoAway) == 1 { + return nil, ErrRemoteGoAway + } + + // Block if we have too many inflight SYNs + select { + case s.synCh <- struct{}{}: + case <-s.shutdownCh: + return nil, ErrSessionShutdown + } + +GET_ID: + // Get an ID, and check for stream exhaustion + id := atomic.LoadUint32(&s.nextStreamID) + if id >= math.MaxUint32-1 { + return nil, ErrStreamsExhausted + } + if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { + goto GET_ID + } + + // Register the stream + stream := newStream(s, id, streamInit) + s.streamLock.Lock() + s.streams[id] = stream + s.inflight[id] = struct{}{} + s.streamLock.Unlock() + + if s.config.StreamOpenTimeout > 0 { + go s.setOpenTimeout(stream) + } + + // Send the window update to create + if err := stream.sendWindowUpdate(); err != nil { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore") + } + return nil, err + } + return stream, nil +} + +// setOpenTimeout implements a timeout for streams that are opened but not established. +// If the StreamOpenTimeout is exceeded we assume the peer is unable to ACK, +// and close the session. +// The number of running timers is bounded by the capacity of the synCh. +func (s *Session) setOpenTimeout(stream *Stream) { + timer := time.NewTimer(s.config.StreamOpenTimeout) + defer timer.Stop() + + select { + case <-stream.establishCh: + return + case <-s.shutdownCh: + return + case <-timer.C: + // Timeout reached while waiting for ACK. + // Close the session to force connection re-establishment. + s.logger.Printf("[ERR] yamux: aborted stream open (destination=%s): %v", s.RemoteAddr().String(), ErrTimeout.err) + s.Close() + } +} + +// Accept is used to block until the next available stream +// is ready to be accepted. +func (s *Session) Accept() (net.Conn, error) { + conn, err := s.AcceptStream() + if err != nil { + return nil, err + } + return conn, err +} + +// AcceptStream is used to block until the next available stream +// is ready to be accepted. +func (s *Session) AcceptStream() (*Stream, error) { + select { + case stream := <-s.acceptCh: + if err := stream.sendWindowUpdate(); err != nil { + return nil, err + } + return stream, nil + case <-s.shutdownCh: + return nil, s.shutdownErr + } +} + +// Close is used to close the session and all streams. +// Attempts to send a GoAway before closing the connection. +func (s *Session) Close() error { + s.shutdownLock.Lock() + defer s.shutdownLock.Unlock() + + if s.shutdown { + return nil + } + s.shutdown = true + + s.shutdownErrLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = ErrSessionShutdown + } + s.shutdownErrLock.Unlock() + + close(s.shutdownCh) + + s.conn.Close() + <-s.recvDoneCh + + s.streamLock.Lock() + defer s.streamLock.Unlock() + for _, stream := range s.streams { + stream.forceClose() + } + <-s.sendDoneCh + return nil +} + +// exitErr is used to handle an error that is causing the +// session to terminate. +func (s *Session) exitErr(err error) { + s.shutdownErrLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = err + } + s.shutdownErrLock.Unlock() + s.Close() +} + +// GoAway can be used to prevent accepting further +// connections. It does not close the underlying conn. +func (s *Session) GoAway() error { + return s.waitForSend(s.goAway(goAwayNormal), nil) +} + +// goAway is used to send a goAway message +func (s *Session) goAway(reason uint32) header { + atomic.SwapInt32(&s.localGoAway, 1) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeGoAway, 0, 0, reason) + return hdr +} + +// Ping is used to measure the RTT response time +func (s *Session) Ping() (time.Duration, error) { + // Get a channel for the ping + ch := make(chan struct{}) + + // Get a new ping id, mark as pending + s.pingLock.Lock() + id := s.pingID + s.pingID++ + s.pings[id] = ch + s.pingLock.Unlock() + + // Send the ping request + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagSYN, 0, id) + if err := s.waitForSend(hdr, nil); err != nil { + return 0, err + } + + // Wait for a response + start := time.Now() + select { + case <-ch: + case <-time.After(s.config.ConnectionWriteTimeout): + s.pingLock.Lock() + delete(s.pings, id) // Ignore it if a response comes later. + s.pingLock.Unlock() + return 0, ErrTimeout + case <-s.shutdownCh: + return 0, ErrSessionShutdown + } + + // Compute the RTT + return time.Now().Sub(start), nil +} + +// keepalive is a long running goroutine that periodically does +// a ping to keep the connection alive. +func (s *Session) keepalive() { + for { + select { + case <-time.After(s.config.KeepAliveInterval): + _, err := s.Ping() + if err != nil { + if err != ErrSessionShutdown { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + } + return + } + case <-s.shutdownCh: + return + } + } +} + +// waitForSendErr waits to send a header, checking for a potential shutdown +func (s *Session) waitForSend(hdr header, body []byte) error { + errCh := make(chan error, 1) + return s.waitForSendErr(hdr, body, errCh) +} + +// waitForSendErr waits to send a header with optional data, checking for a +// potential shutdown. Since there's the expectation that sends can happen +// in a timely manner, we enforce the connection write timeout here. +func (s *Session) waitForSendErr(hdr header, body []byte, errCh chan error) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + ready := &sendReady{Hdr: hdr, Body: body, Err: errCh} + select { + case s.sendCh <- ready: + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } + + bodyCopy := func() { + if body == nil { + return // A nil body is ignored. + } + + // In the event of session shutdown or connection write timeout, + // we need to prevent `send` from reading the body buffer after + // returning from this function since the caller may re-use the + // underlying array. + ready.mu.Lock() + defer ready.mu.Unlock() + + if ready.Body == nil { + return // Body was already copied in `send`. + } + newBody := make([]byte, len(body)) + copy(newBody, body) + ready.Body = newBody + } + + select { + case err := <-errCh: + return err + case <-s.shutdownCh: + bodyCopy() + return ErrSessionShutdown + case <-timer.C: + bodyCopy() + return ErrConnectionWriteTimeout + } +} + +// sendNoWait does a send without waiting. Since there's the expectation that +// the send happens right here, we enforce the connection write timeout if we +// can't queue the header to be sent. +func (s *Session) sendNoWait(hdr header) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + select { + case s.sendCh <- &sendReady{Hdr: hdr}: + return nil + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// send is a long running goroutine that sends data +func (s *Session) send() { + if err := s.sendLoop(); err != nil { + s.exitErr(err) + } +} + +func (s *Session) sendLoop() error { + defer close(s.sendDoneCh) + var bodyBuf bytes.Buffer + for { + bodyBuf.Reset() + + select { + case ready := <-s.sendCh: + // Send a header if ready + if ready.Hdr != nil { + _, err := s.conn.Write(ready.Hdr) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + return err + } + } + + ready.mu.Lock() + if ready.Body != nil { + // Copy the body into the buffer to avoid + // holding a mutex lock during the write. + _, err := bodyBuf.Write(ready.Body) + if err != nil { + ready.Body = nil + ready.mu.Unlock() + s.logger.Printf("[ERR] yamux: Failed to copy body into buffer: %v", err) + asyncSendErr(ready.Err, err) + return err + } + ready.Body = nil + } + ready.mu.Unlock() + + if bodyBuf.Len() > 0 { + // Send data from a body if given + _, err := s.conn.Write(bodyBuf.Bytes()) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) + asyncSendErr(ready.Err, err) + return err + } + } + + // No error, successful send + asyncSendErr(ready.Err, nil) + case <-s.shutdownCh: + return nil + } + } +} + +// recv is a long running goroutine that accepts new data +func (s *Session) recv() { + if err := s.recvLoop(); err != nil { + s.exitErr(err) + } +} + +// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type +var ( + handlers = []func(*Session, header) error{ + typeData: (*Session).handleStreamMessage, + typeWindowUpdate: (*Session).handleStreamMessage, + typePing: (*Session).handlePing, + typeGoAway: (*Session).handleGoAway, + } +) + +// recvLoop continues to receive data until a fatal error is encountered +func (s *Session) recvLoop() error { + defer close(s.recvDoneCh) + hdr := header(make([]byte, headerSize)) + for { + // Read the header + if _, err := io.ReadFull(s.bufRead, hdr); err != nil { + if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { + s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) + } + return err + } + + // Verify the version + if hdr.Version() != protoVersion { + s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) + return ErrInvalidVersion + } + + mt := hdr.MsgType() + if mt < typeData || mt > typeGoAway { + return ErrInvalidMsgType + } + + if err := handlers[mt](s, hdr); err != nil { + return err + } + } +} + +// handleStreamMessage handles either a data or window update frame +func (s *Session) handleStreamMessage(hdr header) error { + // Check for a new stream creation + id := hdr.StreamID() + flags := hdr.Flags() + if flags&flagSYN == flagSYN { + if err := s.incomingStream(id); err != nil { + return err + } + } + + // Get the stream + s.streamLock.Lock() + stream := s.streams[id] + s.streamLock.Unlock() + + // If we do not have a stream, likely we sent a RST + if stream == nil { + // Drain any data on the wire + if hdr.MsgType() == typeData && hdr.Length() > 0 { + s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) + if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { + s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) + return nil + } + } else { + s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) + } + return nil + } + + // Check if this is a window update + if hdr.MsgType() == typeWindowUpdate { + if err := stream.incrSendWindow(hdr, flags); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil + } + + // Read the new data + if err := stream.readData(hdr, flags, s.bufRead); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil +} + +// handlePing is invokde for a typePing frame +func (s *Session) handlePing(hdr header) error { + flags := hdr.Flags() + pingID := hdr.Length() + + // Check if this is a query, respond back in a separate context so we + // don't interfere with the receiving thread blocking for the write. + if flags&flagSYN == flagSYN { + go func() { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, pingID) + if err := s.sendNoWait(hdr); err != nil { + s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) + } + }() + return nil + } + + // Handle a response + s.pingLock.Lock() + ch := s.pings[pingID] + if ch != nil { + delete(s.pings, pingID) + close(ch) + } + s.pingLock.Unlock() + return nil +} + +// handleGoAway is invokde for a typeGoAway frame +func (s *Session) handleGoAway(hdr header) error { + code := hdr.Length() + switch code { + case goAwayNormal: + atomic.SwapInt32(&s.remoteGoAway, 1) + case goAwayProtoErr: + s.logger.Printf("[ERR] yamux: received protocol error go away") + return fmt.Errorf("yamux protocol error") + case goAwayInternalErr: + s.logger.Printf("[ERR] yamux: received internal error go away") + return fmt.Errorf("remote yamux internal error") + default: + s.logger.Printf("[ERR] yamux: received unexpected go away") + return fmt.Errorf("unexpected go away received") + } + return nil +} + +// incomingStream is used to create a new incoming stream +func (s *Session) incomingStream(id uint32) error { + // Reject immediately if we are doing a go away + if atomic.LoadInt32(&s.localGoAway) == 1 { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } + + // Allocate a new stream + stream := newStream(s, id, streamSYNReceived) + + s.streamLock.Lock() + defer s.streamLock.Unlock() + + // Check if stream already exists + if _, ok := s.streams[id]; ok { + s.logger.Printf("[ERR] yamux: duplicate stream declared") + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return ErrDuplicateStream + } + + // Register the stream + s.streams[id] = stream + + // Check if we've exceeded the backlog + select { + case s.acceptCh <- stream: + return nil + default: + // Backlog exceeded! RST the stream + s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") + delete(s.streams, id) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } +} + +// closeStream is used to close a stream once both sides have +// issued a close. If there was an in-flight SYN and the stream +// was not yet established, then this will give the credit back. +func (s *Session) closeStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: SYN tracking out of sync") + } + } + delete(s.streams, id) + s.streamLock.Unlock() +} + +// establishStream is used to mark a stream that was in the +// SYN Sent state as established. +func (s *Session) establishStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + delete(s.inflight, id) + } else { + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)") + } + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)") + } + s.streamLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md new file mode 100644 index 000000000..183d797bd --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/spec.md @@ -0,0 +1,140 @@ +# Specification + +We use this document to detail the internal specification of Yamux. +This is used both as a guide for implementing Yamux, but also for +alternative interoperable libraries to be built. + +# Framing + +Yamux uses a streaming connection underneath, but imposes a message +framing so that it can be shared between many logical streams. Each +frame contains a header like: + +* Version (8 bits) +* Type (8 bits) +* Flags (16 bits) +* StreamID (32 bits) +* Length (32 bits) + +This means that each header has a 12 byte overhead. +All fields are encoded in network order (big endian). +Each field is described below: + +## Version Field + +The version field is used for future backward compatibility. At the +current time, the field is always set to 0, to indicate the initial +version. + +## Type Field + +The type field is used to switch the frame message type. The following +message types are supported: + +* 0x0 Data - Used to transmit data. May transmit zero length payloads + depending on the flags. + +* 0x1 Window Update - Used to updated the senders receive window size. + This is used to implement per-session flow control. + +* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat + and do keep-alives over TCP. + +* 0x3 Go Away - Used to close a session. + +## Flag Field + +The flags field is used to provide additional information related +to the message type. The following flags are supported: + +* 0x1 SYN - Signals the start of a new stream. May be sent with a data or + window update message. Also sent with a ping to indicate outbound. + +* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data + or window update message. Also sent with a ping to indicate response. + +* 0x4 FIN - Performs a half-close of a stream. May be sent with a data + message or window update. + +* 0x8 RST - Reset a stream immediately. May be sent with a data or + window update message. + +## StreamID Field + +The StreamID field is used to identify the logical stream the frame +is addressing. The client side should use odd ID's, and the server even. +This prevents any collisions. Additionally, the 0 ID is reserved to represent +the session. + +Both Ping and Go Away messages should always use the 0 StreamID. + +## Length Field + +The meaning of the length field depends on the message type: + +* Data - provides the length of bytes following the header +* Window update - provides a delta update to the window size +* Ping - Contains an opaque value, echoed back +* Go Away - Contains an error code + +# Message Flow + +There is no explicit connection setup, as Yamux relies on an underlying +transport to be provided. However, there is a distinction between client +and server side of the connection. + +## Opening a stream + +To open a stream, an initial data or window update frame is sent +with a new StreamID. The SYN flag should be set to signal a new stream. + +The receiver must then reply with either a data or window update frame +with the StreamID along with the ACK flag to accept the stream or with +the RST flag to reject the stream. + +Because we are relying on the reliable stream underneath, a connection +can begin sending data once the SYN flag is sent. The corresponding +ACK does not need to be received. This is particularly well suited +for an RPC system where a client wants to open a stream and immediately +fire a request without waiting for the RTT of the ACK. + +This does introduce the possibility of a connection being rejected +after data has been sent already. This is a slight semantic difference +from TCP, where the conection cannot be refused after it is opened. +Clients should be prepared to handle this by checking for an error +that indicates a RST was received. + +## Closing a stream + +To close a stream, either side sends a data or window update frame +along with the FIN flag. This does a half-close indicating the sender +will send no further data. + +Once both sides have closed the connection, the stream is closed. + +Alternatively, if an error occurs, the RST flag can be used to +hard close a stream immediately. + +## Flow Control + +When Yamux is initially starts each stream with a 256KB window size. +There is no window size for the session. + +To prevent the streams from stalling, window update frames should be +sent regularly. Yamux can be configured to provide a larger limit for +windows sizes. Both sides assume the initial 256KB window, but can +immediately send a window update as part of the SYN/ACK indicating a +larger window. + +Both sides should track the number of bytes sent in Data frames +only, as only they are tracked as part of the window size. + +## Session termination + +When a session is being terminated, the Go Away message should +be sent. The Length should be set to one of the following to +provide an error code: + +* 0x0 Normal termination +* 0x1 Protocol error +* 0x2 Internal error diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go new file mode 100644 index 000000000..23d08fcc8 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -0,0 +1,544 @@ +package yamux + +import ( + "bytes" + "errors" + "io" + "sync" + "sync/atomic" + "time" +) + +type streamState int + +const ( + streamInit streamState = iota + streamSYNSent + streamSYNReceived + streamEstablished + streamLocalClose + streamRemoteClose + streamClosed + streamReset +) + +// Stream is used to represent a logical stream +// within a session. +type Stream struct { + recvWindow uint32 + sendWindow uint32 + + id uint32 + session *Session + + state streamState + stateLock sync.Mutex + + recvBuf *bytes.Buffer + recvLock sync.Mutex + + controlHdr header + controlErr chan error + controlHdrLock sync.Mutex + + sendHdr header + sendErr chan error + sendLock sync.Mutex + + recvNotifyCh chan struct{} + sendNotifyCh chan struct{} + + readDeadline atomic.Value // time.Time + writeDeadline atomic.Value // time.Time + + // establishCh is notified if the stream is established or being closed. + establishCh chan struct{} + + // closeTimer is set with stateLock held to honor the StreamCloseTimeout + // setting on Session. + closeTimer *time.Timer +} + +// newStream is used to construct a new stream within +// a given session for an ID +func newStream(session *Session, id uint32, state streamState) *Stream { + s := &Stream{ + id: id, + session: session, + state: state, + controlHdr: header(make([]byte, headerSize)), + controlErr: make(chan error, 1), + sendHdr: header(make([]byte, headerSize)), + sendErr: make(chan error, 1), + recvWindow: initialStreamWindow, + sendWindow: initialStreamWindow, + recvNotifyCh: make(chan struct{}, 1), + sendNotifyCh: make(chan struct{}, 1), + establishCh: make(chan struct{}, 1), + } + s.readDeadline.Store(time.Time{}) + s.writeDeadline.Store(time.Time{}) + return s +} + +// Session returns the associated stream session +func (s *Stream) Session() *Session { + return s.session +} + +// StreamID returns the ID of this stream +func (s *Stream) StreamID() uint32 { + return s.id +} + +// Read is used to read from the stream +func (s *Stream) Read(b []byte) (n int, err error) { + defer asyncNotify(s.recvNotifyCh) +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamRemoteClose: + fallthrough + case streamClosed: + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + s.stateLock.Unlock() + return 0, io.EOF + } + s.recvLock.Unlock() + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + goto WAIT + } + + // Read any bytes + n, _ = s.recvBuf.Read(b) + s.recvLock.Unlock() + + // Send a window update potentially + err = s.sendWindowUpdate() + if err == ErrSessionShutdown { + err = nil + } + return n, err + +WAIT: + var timeout <-chan time.Time + var timer *time.Timer + readDeadline := s.readDeadline.Load().(time.Time) + if !readDeadline.IsZero() { + delay := readDeadline.Sub(time.Now()) + timer = time.NewTimer(delay) + timeout = timer.C + } + select { + case <-s.recvNotifyCh: + if timer != nil { + timer.Stop() + } + goto START + case <-timeout: + return 0, ErrTimeout + } +} + +// Write is used to write to the stream +func (s *Stream) Write(b []byte) (n int, err error) { + s.sendLock.Lock() + defer s.sendLock.Unlock() + total := 0 + for total < len(b) { + n, err := s.write(b[total:]) + total += n + if err != nil { + return total, err + } + } + return total, nil +} + +// write is used to write to the stream, may return on +// a short write. +func (s *Stream) write(b []byte) (n int, err error) { + var flags uint16 + var max uint32 + var body []byte +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamClosed: + s.stateLock.Unlock() + return 0, ErrStreamClosed + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + window := atomic.LoadUint32(&s.sendWindow) + if window == 0 { + goto WAIT + } + + // Determine the flags if any + flags = s.sendFlags() + + // Send up to our send window + max = min(window, uint32(len(b))) + body = b[:max] + + // Send the header + s.sendHdr.encode(typeData, flags, s.id, max) + if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.sendHdr = header(make([]byte, headerSize)) + } + return 0, err + } + + // Reduce our send window + atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) + + // Unlock + return int(max), err + +WAIT: + var timeout <-chan time.Time + writeDeadline := s.writeDeadline.Load().(time.Time) + if !writeDeadline.IsZero() { + delay := writeDeadline.Sub(time.Now()) + timeout = time.After(delay) + } + select { + case <-s.sendNotifyCh: + goto START + case <-timeout: + return 0, ErrTimeout + } + return 0, nil +} + +// sendFlags determines any flags that are appropriate +// based on the current stream state +func (s *Stream) sendFlags() uint16 { + s.stateLock.Lock() + defer s.stateLock.Unlock() + var flags uint16 + switch s.state { + case streamInit: + flags |= flagSYN + s.state = streamSYNSent + case streamSYNReceived: + flags |= flagACK + s.state = streamEstablished + } + return flags +} + +// sendWindowUpdate potentially sends a window update enabling +// further writes to take place. Must be invoked with the lock. +func (s *Stream) sendWindowUpdate() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + // Determine the delta update + max := s.session.config.MaxStreamWindowSize + var bufLen uint32 + s.recvLock.Lock() + if s.recvBuf != nil { + bufLen = uint32(s.recvBuf.Len()) + } + delta := (max - bufLen) - s.recvWindow + + // Determine the flags if any + flags := s.sendFlags() + + // Check if we can omit the update + if delta < (max/2) && flags == 0 { + s.recvLock.Unlock() + return nil + } + + // Update our window + s.recvWindow += delta + s.recvLock.Unlock() + + // Send the header + s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } + return err + } + return nil +} + +// sendClose is used to send a FIN +func (s *Stream) sendClose() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + flags := s.sendFlags() + flags |= flagFIN + s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } + return err + } + return nil +} + +// Close is used to close the stream +func (s *Stream) Close() error { + closeStream := false + s.stateLock.Lock() + switch s.state { + // Opened means we need to signal a close + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamLocalClose + goto SEND_CLOSE + + case streamLocalClose: + case streamRemoteClose: + s.state = streamClosed + closeStream = true + goto SEND_CLOSE + + case streamClosed: + case streamReset: + default: + panic("unhandled state") + } + s.stateLock.Unlock() + return nil +SEND_CLOSE: + // This shouldn't happen (the more realistic scenario to cancel the + // timer is via processFlags) but just in case this ever happens, we + // cancel the timer to prevent dangling timers. + if s.closeTimer != nil { + s.closeTimer.Stop() + s.closeTimer = nil + } + + // If we have a StreamCloseTimeout set we start the timeout timer. + // We do this only if we're not already closing the stream since that + // means this was a graceful close. + // + // This prevents memory leaks if one side (this side) closes and the + // remote side poorly behaves and never responds with a FIN to complete + // the close. After the specified timeout, we clean our resources up no + // matter what. + if !closeStream && s.session.config.StreamCloseTimeout > 0 { + s.closeTimer = time.AfterFunc( + s.session.config.StreamCloseTimeout, s.closeTimeout) + } + + s.stateLock.Unlock() + s.sendClose() + s.notifyWaiting() + if closeStream { + s.session.closeStream(s.id) + } + return nil +} + +// closeTimeout is called after StreamCloseTimeout during a close to +// close this stream. +func (s *Stream) closeTimeout() { + // Close our side forcibly + s.forceClose() + + // Free the stream from the session map + s.session.closeStream(s.id) + + // Send a RST so the remote side closes too. + s.sendLock.Lock() + defer s.sendLock.Unlock() + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, s.id, 0) + s.session.sendNoWait(hdr) +} + +// forceClose is used for when the session is exiting +func (s *Stream) forceClose() { + s.stateLock.Lock() + s.state = streamClosed + s.stateLock.Unlock() + s.notifyWaiting() +} + +// processFlags is used to update the state of the stream +// based on set flags, if any. Lock must be held +func (s *Stream) processFlags(flags uint16) error { + s.stateLock.Lock() + defer s.stateLock.Unlock() + + // Close the stream without holding the state lock + closeStream := false + defer func() { + if closeStream { + if s.closeTimer != nil { + // Stop our close timeout timer since we gracefully closed + s.closeTimer.Stop() + } + + s.session.closeStream(s.id) + } + }() + + if flags&flagACK == flagACK { + if s.state == streamSYNSent { + s.state = streamEstablished + } + asyncNotify(s.establishCh) + s.session.establishStream(s.id) + } + if flags&flagFIN == flagFIN { + switch s.state { + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamRemoteClose + s.notifyWaiting() + case streamLocalClose: + s.state = streamClosed + closeStream = true + s.notifyWaiting() + default: + s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) + return ErrUnexpectedFlag + } + } + if flags&flagRST == flagRST { + s.state = streamReset + closeStream = true + s.notifyWaiting() + } + return nil +} + +// notifyWaiting notifies all the waiting channels +func (s *Stream) notifyWaiting() { + asyncNotify(s.recvNotifyCh) + asyncNotify(s.sendNotifyCh) + asyncNotify(s.establishCh) +} + +// incrSendWindow updates the size of our send window +func (s *Stream) incrSendWindow(hdr header, flags uint16) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Increase window, unblock a sender + atomic.AddUint32(&s.sendWindow, hdr.Length()) + asyncNotify(s.sendNotifyCh) + return nil +} + +// readData is used to handle a data frame +func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Check that our recv window is not exceeded + length := hdr.Length() + if length == 0 { + return nil + } + + // Wrap in a limited reader + conn = &io.LimitedReader{R: conn, N: int64(length)} + + // Copy into buffer + s.recvLock.Lock() + + if length > s.recvWindow { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + s.recvLock.Unlock() + return ErrRecvWindowExceeded + } + + if s.recvBuf == nil { + // Allocate the receive buffer just-in-time to fit the full data frame. + // This way we can read in the whole packet without further allocations. + s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) + } + copiedLength, err := io.Copy(s.recvBuf, conn) + if err != nil { + s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) + s.recvLock.Unlock() + return err + } + + // Decrement the receive window + s.recvWindow -= uint32(copiedLength) + s.recvLock.Unlock() + + // Unblock any readers + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetDeadline sets the read and write deadlines +func (s *Stream) SetDeadline(t time.Time) error { + if err := s.SetReadDeadline(t); err != nil { + return err + } + if err := s.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +// SetReadDeadline sets the deadline for blocked and future Read calls. +func (s *Stream) SetReadDeadline(t time.Time) error { + s.readDeadline.Store(t) + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetWriteDeadline sets the deadline for blocked and future Write calls +func (s *Stream) SetWriteDeadline(t time.Time) error { + s.writeDeadline.Store(t) + asyncNotify(s.sendNotifyCh) + return nil +} + +// Shrink is used to compact the amount of buffers utilized +// This is useful when using Yamux in a connection pool to reduce +// the idle memory utilization. +func (s *Stream) Shrink() { + s.recvLock.Lock() + if s.recvBuf != nil && s.recvBuf.Len() == 0 { + s.recvBuf = nil + } + s.recvLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go new file mode 100644 index 000000000..8a73e9249 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/util.go @@ -0,0 +1,43 @@ +package yamux + +import ( + "sync" + "time" +) + +var ( + timerPool = &sync.Pool{ + New: func() interface{} { + timer := time.NewTimer(time.Hour * 1e6) + timer.Stop() + return timer + }, + } +) + +// asyncSendErr is used to try an async send of an error +func asyncSendErr(ch chan error, err error) { + if ch == nil { + return + } + select { + case ch <- err: + default: + } +} + +// asyncNotify is used to signal a waiting goroutine +func asyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// min computes the minimum of two values +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml new file mode 100644 index 000000000..928d000ec --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.8 + - 1.x + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE new file mode 100644 index 000000000..a3866a291 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md new file mode 100644 index 000000000..26781bbae --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/README.md @@ -0,0 +1,52 @@ +# go-testing-interface + +go-testing-interface is a Go library that exports an interface that +`*testing.T` implements as well as a runtime version you can use in its +place. + +The purpose of this library is so that you can export test helpers as a +public API without depending on the "testing" package, since you can't +create a `*testing.T` struct manually. This lets you, for example, use the +public testing APIs to generate mock data at runtime, rather than just at +test time. + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface). + +Given a test helper written using `go-testing-interface` like this: + + import "github.com/mitchellh/go-testing-interface" + + func TestHelper(t testing.T) { + t.Fatal("I failed") + } + +You can call the test helper in a real test easily: + + import "testing" + + func TestThing(t *testing.T) { + TestHelper(t) + } + +You can also call the test helper at runtime if needed: + + import "github.com/mitchellh/go-testing-interface" + + func main() { + TestHelper(&testing.RuntimeT{}) + } + +## Why?! + +**Why would I call a test helper that takes a *testing.T at runtime?** + +You probably shouldn't. The only use case I've seen (and I've had) for this +is to implement a "dev mode" for a service where the test helpers are used +to populate mock data, create a mock DB, perhaps run service dependencies +in-memory, etc. + +Outside of a "dev mode", I've never seen a use case for this and I think +there shouldn't be one since the point of the `testing.T` interface is that +you can fail immediately. diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go new file mode 100644 index 000000000..204afb420 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -0,0 +1,84 @@ +// +build !go1.9 + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. Name and Skip methods are +// unimplemented noops. +type RuntimeT struct { + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.Fail() +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.FailNow() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { return "" } +func (t *RuntimeT) Skip(args ...interface{}) {} +func (t *RuntimeT) SkipNow() {} +func (t *RuntimeT) Skipf(format string, args ...interface{}) {} +func (t *RuntimeT) Skipped() bool { return false } diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go new file mode 100644 index 000000000..31b42cadf --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -0,0 +1,108 @@ +// +build go1.9 + +// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition +// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC +// and is set for release shortly. We'll support this on master as the default +// as soon as 1.9 is released. + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + Helper() +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. +type RuntimeT struct { + skipped bool + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Printf(format, args...) + t.Fail() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Print(args...) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Printf(format, args...) + t.FailNow() +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} + +func (t *RuntimeT) Helper() {} diff --git a/vendor/github.com/oklog/run/.gitignore b/vendor/github.com/oklog/run/.gitignore new file mode 100644 index 000000000..a1338d685 --- /dev/null +++ b/vendor/github.com/oklog/run/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/oklog/run/.travis.yml b/vendor/github.com/oklog/run/.travis.yml new file mode 100644 index 000000000..362bdd41c --- /dev/null +++ b/vendor/github.com/oklog/run/.travis.yml @@ -0,0 +1,12 @@ +language: go +sudo: false +go: + - 1.x + - tip +install: + - go get -v github.com/golang/lint/golint + - go build ./... +script: + - go vet ./... + - $HOME/gopath/bin/golint . + - go test -v -race ./... diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/oklog/run/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/oklog/run/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md new file mode 100644 index 000000000..a7228cd9a --- /dev/null +++ b/vendor/github.com/oklog/run/README.md @@ -0,0 +1,73 @@ +# run + +[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) +[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run) +[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) + +run.Group is a universal mechanism to manage goroutine lifecycles. + +Create a zero-value run.Group, and then add actors to it. Actors are defined as +a pair of functions: an **execute** function, which should run synchronously; +and an **interrupt** function, which, when invoked, should cause the execute +function to return. Finally, invoke Run, which blocks until the first actor +returns. This general-purpose API allows callers to model pretty much any +runnable task, and achieve well-defined lifecycle semantics for the group. + +run.Group was written to manage component lifecycles in func main for +[OK Log](https://github.com/oklog/oklog). +But it's useful in any circumstance where you need to orchestrate multiple +goroutines as a unit whole. +[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a +video of a talk where run.Group is described. + +## Examples + +### context.Context + +```go +ctx, cancel := context.WithCancel(context.Background()) +g.Add(func() error { + return myProcess(ctx, ...) +}, func(error) { + cancel() +}) +``` + +### net.Listener + +```go +ln, _ := net.Listen("tcp", ":8080") +g.Add(func() error { + return http.Serve(ln, nil) +}, func(error) { + ln.Close() +}) +``` + +### io.ReadCloser + +```go +var conn io.ReadCloser = ... +g.Add(func() error { + s := bufio.NewScanner(conn) + for s.Scan() { + println(s.Text()) + } + return s.Err() +}, func(error) { + conn.Close() +}) +``` + +## Comparisons + +Package run is somewhat similar to package +[errgroup](https://godoc.org/golang.org/x/sync/errgroup), +except it doesn't require actor goroutines to understand context semantics. + +It's somewhat similar to package +[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or +[tomb.v2](https://godoc.org/gopkg.in/tomb.v2), +except it has a much smaller API surface, delegating e.g. staged shutdown of +goroutines to the caller. diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go new file mode 100644 index 000000000..832d47dd1 --- /dev/null +++ b/vendor/github.com/oklog/run/group.go @@ -0,0 +1,62 @@ +// Package run implements an actor-runner with deterministic teardown. It is +// somewhat similar to package errgroup, except it does not require actor +// goroutines to understand context semantics. This makes it suitable for use in +// more circumstances; for example, goroutines which are handling connections +// from net.Listeners, or scanning input from a closable io.Reader. +package run + +// Group collects actors (functions) and runs them concurrently. +// When one actor (function) returns, all actors are interrupted. +// The zero value of a Group is useful. +type Group struct { + actors []actor +} + +// Add an actor (function) to the group. Each actor must be pre-emptable by an +// interrupt function. That is, if interrupt is invoked, execute should return. +// Also, it must be safe to call interrupt even after execute has returned. +// +// The first actor (function) to return interrupts all running actors. +// The error is passed to the interrupt functions, and is returned by Run. +func (g *Group) Add(execute func() error, interrupt func(error)) { + g.actors = append(g.actors, actor{execute, interrupt}) +} + +// Run all actors (functions) concurrently. +// When the first actor returns, all others are interrupted. +// Run only returns when all actors have exited. +// Run returns the error returned by the first exiting actor. +func (g *Group) Run() error { + if len(g.actors) == 0 { + return nil + } + + // Run each actor. + errors := make(chan error, len(g.actors)) + for _, a := range g.actors { + go func(a actor) { + errors <- a.execute() + }(a) + } + + // Wait for the first actor to stop. + err := <-errors + + // Signal all actors to stop. + for _, a := range g.actors { + a.interrupt(err) + } + + // Wait for all actors to stop. + for i := 1; i < cap(errors); i++ { + <-errors + } + + // Return the original error. + return err +} + +type actor struct { + execute func() error + interrupt func(error) +} diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go new file mode 100644 index 000000000..740745c45 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/client.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import ( + "context" + "fmt" + "io" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/status" +) + +var ( + backoffStrategy = backoff.DefaultExponential + backoffFunc = func(ctx context.Context, retries int) bool { + d := backoffStrategy.Backoff(retries) + timer := time.NewTimer(d) + select { + case <-timer.C: + return true + case <-ctx.Done(): + timer.Stop() + return false + } + } +) + +func init() { + internal.HealthCheckFunc = clientHealthCheck +} + +const healthCheckMethod = "/grpc.health.v1.Health/Watch" + +// This function implements the protocol defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +func clientHealthCheck(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), service string) error { + tryCnt := 0 + +retryConnection: + for { + // Backs off if the connection has failed in some way without receiving a message in the previous retry. + if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) { + return nil + } + tryCnt++ + + if ctx.Err() != nil { + return nil + } + setConnectivityState(connectivity.Connecting, nil) + rawS, err := newStream(healthCheckMethod) + if err != nil { + continue retryConnection + } + + s, ok := rawS.(grpc.ClientStream) + // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. + if !ok { + setConnectivityState(connectivity.Ready, nil) + return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) + } + + if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF { + // Stream should have been closed, so we can safely continue to create a new stream. + continue retryConnection + } + s.CloseSend() + + resp := new(healthpb.HealthCheckResponse) + for { + err = s.RecvMsg(resp) + + // Reports healthy for the LBing purposes if health check is not implemented in the server. + if status.Code(err) == codes.Unimplemented { + setConnectivityState(connectivity.Ready, nil) + return err + } + + // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. + if err != nil { + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err)) + continue retryConnection + } + + // As a message has been received, removes the need for backoff for the next retry by resetting the try count. + tryCnt = 0 + if resp.Status == healthpb.HealthCheckResponse_SERVING { + setConnectivityState(connectivity.Ready, nil) + } else { + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status)) + } + } + } +} diff --git a/vendor/google.golang.org/grpc/health/logging.go b/vendor/google.golang.org/grpc/health/logging.go new file mode 100644 index 000000000..83c6acf55 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/logging.go @@ -0,0 +1,23 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import "google.golang.org/grpc/grpclog" + +var logger = grpclog.Component("health_service") diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go new file mode 100644 index 000000000..cce6312d7 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/server.go @@ -0,0 +1,163 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package health provides a service that exposes server's health and it must be +// imported to enable support for client-side health checks. +package health + +import ( + "context" + "sync" + + "google.golang.org/grpc/codes" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +// Server implements `service Health`. +type Server struct { + healthgrpc.UnimplementedHealthServer + mu sync.RWMutex + // If shutdown is true, it's expected all serving status is NOT_SERVING, and + // will stay in NOT_SERVING. + shutdown bool + // statusMap stores the serving status of the services this Server monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus + updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus +} + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING}, + updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus), + } +} + +// Check implements `service Health`. +func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.RLock() + defer s.mu.RUnlock() + if servingStatus, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: servingStatus, + }, nil + } + return nil, status.Error(codes.NotFound, "unknown service") +} + +// Watch implements `service Health`. +func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + service := in.Service + // update channel is used for getting service status updates. + update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1) + s.mu.Lock() + // Puts the initial status to the channel. + if servingStatus, ok := s.statusMap[service]; ok { + update <- servingStatus + } else { + update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN + } + + // Registers the update channel to the correct place in the updates map. + if _, ok := s.updates[service]; !ok { + s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus) + } + s.updates[service][stream] = update + defer func() { + s.mu.Lock() + delete(s.updates[service], stream) + s.mu.Unlock() + }() + s.mu.Unlock() + + var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1 + for { + select { + // Status updated. Sends the up-to-date status to the client. + case servingStatus := <-update: + if lastSentStatus == servingStatus { + continue + } + lastSentStatus = servingStatus + err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus}) + if err != nil { + return status.Error(codes.Canceled, "Stream has ended.") + } + // Context done. Removes the update channel from the updates map. + case <-stream.Context().Done(): + return status.Error(codes.Canceled, "Stream has ended.") + } + } +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + defer s.mu.Unlock() + if s.shutdown { + logger.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus) + return + } + + s.setServingStatusLocked(service, servingStatus) +} + +func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { + s.statusMap[service] = servingStatus + for _, update := range s.updates[service] { + // Clears previous updates, that are not sent to the client, from the channel. + // This can happen if the client is not reading and the server gets flow control limited. + select { + case <-update: + default: + } + // Puts the most recent update to the channel. + update <- servingStatus + } +} + +// Shutdown sets all serving status to NOT_SERVING, and configures the server to +// ignore all future status changes. +// +// This changes serving status for all services. To set status for a particular +// services, call SetServingStatus(). +func (s *Server) Shutdown() { + s.mu.Lock() + defer s.mu.Unlock() + s.shutdown = true + for service := range s.statusMap { + s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING) + } +} + +// Resume sets all serving status to SERVING, and configures the server to +// accept all future status changes. +// +// This changes serving status for all services. To set status for a particular +// services, call SetServingStatus(). +func (s *Server) Resume() { + s.mu.Lock() + defer s.mu.Unlock() + s.shutdown = false + for service := range s.statusMap { + s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING) + } +} diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md new file mode 100644 index 000000000..04b6371af --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/README.md @@ -0,0 +1,18 @@ +# Reflection + +Package reflection implements server reflection service. + +The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: +```go +import "google.golang.org/grpc/reflection" + +s := grpc.NewServer() +pb.RegisterYourOwnServer(s, &server{}) + +// Register reflection service on gRPC server. +reflection.Register(s) + +s.Serve(lis) +``` diff --git a/vendor/google.golang.org/grpc/reflection/adapt.go b/vendor/google.golang.org/grpc/reflection/adapt.go new file mode 100644 index 000000000..33b907a36 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/adapt.go @@ -0,0 +1,187 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reflection + +import ( + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// asV1Alpha returns an implementation of the v1alpha version of the reflection +// interface that delegates all calls to the given v1 version. +func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer { + return v1AlphaServerImpl{svr: svr} +} + +type v1AlphaServerImpl struct { + svr v1reflectiongrpc.ServerReflectionServer +} + +func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) +} + +type v1AlphaServerStreamAdapter struct { + v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer +} + +func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { + return s.ServerReflection_ServerReflectionInfoServer.Send(v1ToV1AlphaResponse(response)) +} + +func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { + resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() + if err != nil { + return nil, err + } + return v1AlphaToV1Request(resp), nil +} + +func v1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { + var v1alpha v1alphareflectionpb.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = v1ToV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1alphareflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphareflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} + +func v1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { + var v1 v1reflectionpb.ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *v1alphareflectionpb.ServerReflectionRequest_ListServices: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +func v1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { + var v1alpha v1alphareflectionpb.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go new file mode 100644 index 000000000..6f5c786b2 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -0,0 +1,953 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requests. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services requests. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} +} + +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} +} + +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, + 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, + 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, + 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, + 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, + 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse +} +var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest + 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest + 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse + 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse + 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse + 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse + 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest + 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1_reflection_proto_init() } +func file_grpc_reflection_v1_reflection_proto_init() { + if File_grpc_reflection_v1_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1_reflection_proto = out.File + file_grpc_reflection_v1_reflection_proto_rawDesc = nil + file_grpc_reflection_v1_reflection_proto_goTypes = nil + file_grpc_reflection_v1_reflection_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go new file mode 100644 index 000000000..62b56a8be --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -0,0 +1,164 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" +) + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go new file mode 100644 index 000000000..69fbfb621 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -0,0 +1,1028 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Service exported by server reflection + +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.22.0 +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. + +package grpc_reflection_v1alpha + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of extendee_type, and + // appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server set one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. As + // the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requst. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services request. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x22, 0xf8, 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, + 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, + 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, + 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x6b, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, + 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x42, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x32, 0x93, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x73, 0x0a, 0x1a, 0x69, 0x6f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_grpc_reflection_v1alpha_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1alpha_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1alpha_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1alpha.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1alpha.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1alpha.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse +} +var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest + 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest + 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse + 4, // 3: grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1alpha.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1alpha.ListServiceResponse + 7, // 5: grpc.reflection.v1alpha.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1alpha.ErrorResponse + 6, // 6: grpc.reflection.v1alpha.ListServiceResponse.service:type_name -> grpc.reflection.v1alpha.ServiceResponse + 0, // 7: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1alpha.ServerReflectionRequest + 2, // 8: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1alpha.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1alpha_reflection_proto_init() } +func file_grpc_reflection_v1alpha_reflection_proto_init() { + if File_grpc_reflection_v1alpha_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1alpha_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1alpha_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1alpha_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1alpha_reflection_proto = out.File + file_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil + file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil + file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go new file mode 100644 index 000000000..367a029be --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -0,0 +1,161 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Service exported by server reflection + +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. + +package grpc_reflection_v1alpha + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" +) + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1alpha.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1alpha/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go new file mode 100644 index 000000000..76dae09d8 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -0,0 +1,351 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package reflection implements server reflection service. + +The service implemented is defined in: +https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: + + import "google.golang.org/grpc/reflection" + + s := grpc.NewServer() + pb.RegisterYourOwnServer(s, &server{}) + + // Register reflection service on gRPC server. + reflection.Register(s) + + s.Serve(lis) +*/ +package reflection // import "google.golang.org/grpc/reflection" + +import ( + "io" + "sort" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// GRPCServer is the interface provided by a gRPC server. It is implemented by +// *grpc.Server, but could also be implemented by other concrete types. It acts +// as a registry, for accumulating the services exposed by the server. +type GRPCServer interface { + grpc.ServiceRegistrar + ServiceInfoProvider +} + +var _ GRPCServer = (*grpc.Server)(nil) + +// Register registers the server reflection service on the given gRPC server. +// Both the v1 and v1alpha versions are registered. +func Register(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) +} + +// RegisterV1 registers only the v1 version of the server reflection service +// on the given gRPC server. Many clients may only support v1alpha so most +// users should use Register instead, at least until clients have upgraded. +func RegisterV1(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) +} + +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +// +// The reflection service is only interested in the service names, but the +// signature is this way so that *grpc.Server implements it. So it is okay +// for a custom implementation to return zero values for the +// grpc.ServiceInfo values in the map. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo +} + +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// ServerOptions represents the options used to construct a reflection server. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerOptions struct { + // The source of advertised RPC services. If not specified, the reflection + // server will report an empty list when asked to list services. + // + // This value will typically be a *grpc.Server. But the set of advertised + // services can be customized by wrapping a *grpc.Server or using an + // alternate implementation that returns a custom set of service names. + Services ServiceInfoProvider + // Optional resolver used to load descriptors. If not specified, + // protoregistry.GlobalFiles will be used. + DescriptorResolver protodesc.Resolver + // Optional resolver used to query for known extensions. If not specified, + // protoregistry.GlobalTypes will be used. + ExtensionResolver ExtensionResolver +} + +// NewServer returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. For backwards compatibility reasons, +// this returns the v1alpha version of the reflection server. For a v1 version +// of the reflection server, see NewServerV1. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer { + return asV1Alpha(NewServerV1(opts)) +} + +// NewServerV1 returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { + if opts.DescriptorResolver == nil { + opts.DescriptorResolver = protoregistry.GlobalFiles + } + if opts.ExtensionResolver == nil { + opts.ExtensionResolver = protoregistry.GlobalTypes + } + return &serverReflectionServer{ + s: opts.Services, + descResolver: opts.DescriptorResolver, + extResolver: opts.ExtensionResolver, + } +} + +type serverReflectionServer struct { + v1alphareflectiongrpc.UnimplementedServerReflectionServer + s ServiceInfoProvider + descResolver protodesc.Resolver + extResolver ExtensionResolver +} + +// fileDescWithDependencies returns a slice of serialized fileDescriptors in +// wire format ([]byte). The fileDescriptors will include fd and all the +// transitive dependencies of fd with names not in sentFileDescriptors. +func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} + for len(queue) > 0 { + currentfd := queue[0] + queue = queue[1:] + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) + if err != nil { + return nil, err + } + r = append(r, currentfdEncoded) + } + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) + } + } + return r, nil +} + +// fileDescEncodingContainingSymbol finds the file descriptor containing the +// given symbol, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. The given symbol +// can be a type, a service or a method. +func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { + d, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err + } + return s.fileDescWithDependencies(d.ParentFile(), sentFileDescriptors) +} + +// fileDescEncodingContainingExtension finds the file descriptor containing +// given extension, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { + xt, err := s.extResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) + if err != nil { + return nil, err + } + return s.fileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) +} + +// allExtensionNumbersForTypeName returns all extension numbers for the given type. +func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { + var numbers []int32 + s.extResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } + } + return numbers, nil +} + +// listServices returns the names of services this server exposes. +func (s *serverReflectionServer) listServices() []*v1reflectionpb.ServiceResponse { + serviceInfo := s.s.GetServiceInfo() + resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) + } + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp +} + +// ServerReflectionInfo is the reflection service handler. +func (s *serverReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + sentFileDescriptors := make(map[string]bool) + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + out := &v1reflectionpb.ServerReflectionResponse{ + ValidHost: in.Host, + OriginalRequest: in, + } + switch req := in.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + var b [][]byte + fd, err := s.descResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) + } + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + typeName := req.FileContainingExtension.ContainingType + extNum := req.FileContainingExtension.ExtensionNumber + b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ + BaseTypeName: req.AllExtensionNumbersOfType, + ExtensionNumber: extNums, + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ + Service: s.listServices(), + }, + } + default: + return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) + } + + if err := stream.Send(out); err != nil { + return err + } + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 3ad3a5eed..d4afe67cd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -128,6 +128,9 @@ github.com/exponent-io/jsonpath # github.com/fatih/camelcase v1.0.0 ## explicit github.com/fatih/camelcase +# github.com/fatih/color v1.12.0 +## explicit; go 1.13 +github.com/fatih/color # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop @@ -201,6 +204,7 @@ github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp # github.com/google/btree v1.1.2 ## explicit; go 1.18 @@ -275,10 +279,23 @@ github.com/grpc-ecosystem/go-grpc-prometheus github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities +# github.com/hashicorp/go-hclog v0.14.1 +## explicit; go 1.13 +github.com/hashicorp/go-hclog +# github.com/hashicorp/go-plugin v1.6.0 +## explicit; go 1.17 +github.com/hashicorp/go-plugin +github.com/hashicorp/go-plugin/internal/cmdrunner +github.com/hashicorp/go-plugin/internal/grpcmux +github.com/hashicorp/go-plugin/internal/plugin +github.com/hashicorp/go-plugin/runner # github.com/hashicorp/golang-lru/v2 v2.0.2 ## explicit; go 1.18 github.com/hashicorp/golang-lru/v2 github.com/hashicorp/golang-lru/v2/simplelru +# github.com/hashicorp/yamux v0.1.1 +## explicit; go 1.15 +github.com/hashicorp/yamux # github.com/imdario/mergo v0.3.16 ## explicit; go 1.13 github.com/imdario/mergo @@ -437,6 +454,9 @@ github.com/mgutz/ansi # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir +# github.com/mitchellh/go-testing-interface v1.0.0 +## explicit +github.com/mitchellh/go-testing-interface # github.com/mitchellh/go-wordwrap v1.0.1 ## explicit; go 1.14 github.com/mitchellh/go-wordwrap @@ -466,6 +486,9 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate +# github.com/oklog/run v1.0.0 +## explicit +github.com/oklog/run # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid @@ -839,6 +862,7 @@ google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog +google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff @@ -868,6 +892,9 @@ google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer +google.golang.org/grpc/reflection +google.golang.org/grpc/reflection/grpc_reflection_v1 +google.golang.org/grpc/reflection/grpc_reflection_v1alpha google.golang.org/grpc/resolver google.golang.org/grpc/resolver/manual google.golang.org/grpc/serviceconfig