From 632ee2478822a3f8eadbc90c4d203dda22218c7c Mon Sep 17 00:00:00 2001 From: Vasil Averyanau Date: Tue, 17 Dec 2024 17:15:29 +0100 Subject: [PATCH 1/6] chore: updates scylla-manager module to the latest version of `v3/swagger`. This updates scylla-manager module to the latest version of `v3/swagger` package. --- go.mod | 8 +- go.sum | 14 +- .../v3/swagger/gen/agent/models/node_info.go | 6 + .../v1/client/operations/operations_client.go | 74 +++++ .../task_manager_ttl_post_parameters.go | 2 +- .../task_manager_user_ttl_get_parameters.go | 113 +++++++ .../task_manager_user_ttl_get_responses.go | 114 +++++++ .../task_manager_user_ttl_post_parameters.go | 141 +++++++++ .../task_manager_user_ttl_post_responses.go | 114 +++++++ vendor/golang.org/x/net/http2/frame.go | 4 +- vendor/golang.org/x/net/http2/http2.go | 42 ++- vendor/golang.org/x/net/http2/server.go | 35 ++- vendor/golang.org/x/net/http2/transport.go | 137 +++++++-- .../x/tools/go/gcexportdata/gcexportdata.go | 22 +- .../x/tools/internal/gcimporter/exportdata.go | 71 +++-- .../x/tools/internal/gcimporter/gcimporter.go | 80 +++-- .../x/tools/internal/imports/source.go | 2 +- .../x/tools/internal/imports/source_env.go | 8 +- .../tools/internal/imports/source_modindex.go | 103 +++++++ .../x/tools/internal/modindex/directories.go | 135 +++++++++ .../x/tools/internal/modindex/index.go | 262 ++++++++++++++++ .../x/tools/internal/modindex/lookup.go | 145 +++++++++ .../x/tools/internal/modindex/modindex.go | 164 ++++++++++ .../x/tools/internal/modindex/symbols.go | 189 ++++++++++++ .../x/tools/internal/modindex/types.go | 25 ++ .../tools/internal/typesinternal/zerovalue.go | 282 ++++++++++++++++++ .../x/tools/internal/versions/constraint.go | 13 - .../internal/versions/constraint_go121.go | 14 - vendor/modules.txt | 9 +- 29 files changed, 2141 insertions(+), 187 deletions(-) create mode 100644 vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_get_parameters.go create mode 100644 vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_get_responses.go create mode 100644 vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_post_parameters.go create mode 100644 vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_post_responses.go create mode 100644 vendor/golang.org/x/tools/internal/imports/source_modindex.go create mode 100644 vendor/golang.org/x/tools/internal/modindex/directories.go create mode 100644 vendor/golang.org/x/tools/internal/modindex/index.go create mode 100644 vendor/golang.org/x/tools/internal/modindex/lookup.go create mode 100644 vendor/golang.org/x/tools/internal/modindex/modindex.go create mode 100644 vendor/golang.org/x/tools/internal/modindex/symbols.go create mode 100644 vendor/golang.org/x/tools/internal/modindex/types.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/constraint.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/constraint_go121.go diff --git a/go.mod b/go.mod index 9b8b95ee5d..964e08ae4f 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/scylladb/scylla-manager/v3 go 1.23.2 require ( + cloud.google.com/go/compute/metadata v0.3.0 github.com/aws/aws-sdk-go v1.35.17 github.com/cenkalti/backoff/v4 v4.3.0 github.com/cespare/xxhash/v2 v2.3.0 @@ -32,7 +33,7 @@ require ( github.com/scylladb/gocqlx/v2 v2.8.0 github.com/scylladb/scylla-manager/v3/pkg/managerclient v0.0.0-20241104134613-aba35605c28b github.com/scylladb/scylla-manager/v3/pkg/util v0.0.0-20241104134613-aba35605c28b - github.com/scylladb/scylla-manager/v3/swagger v0.0.0-20241112131737-4fc93b5355fd + github.com/scylladb/scylla-manager/v3/swagger v0.0.0-20241217161122-cafa851a39fc github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/stoewer/go-strcase v1.3.0 @@ -48,7 +49,6 @@ require ( ) require ( - cloud.google.com/go/compute/metadata v0.3.0 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect github.com/Azure/azure-storage-blob-go v0.13.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect @@ -112,12 +112,12 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/config v1.4.0 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect - golang.org/x/net v0.31.0 // indirect + golang.org/x/net v0.32.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.27.0 // indirect + golang.org/x/tools v0.28.0 // indirect google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect diff --git a/go.sum b/go.sum index 32dbe91583..f62351d8d5 100644 --- a/go.sum +++ b/go.sum @@ -1066,8 +1066,10 @@ github.com/scylladb/scylla-manager/v3/pkg/managerclient v0.0.0-20241104134613-ab github.com/scylladb/scylla-manager/v3/pkg/managerclient v0.0.0-20241104134613-aba35605c28b/go.mod h1:Tss7a99vrgds+B70w8ZFG3Skxfr9Br3kAzrKP2b3CmQ= github.com/scylladb/scylla-manager/v3/pkg/util v0.0.0-20241104134613-aba35605c28b h1:7CHNmPrQqSdApaEh5nkRL+D52KFHaOHVBBVDvytHEOY= github.com/scylladb/scylla-manager/v3/pkg/util v0.0.0-20241104134613-aba35605c28b/go.mod h1:+sPCx2oaOXmMpy/ODNNEDGJ7vCghBeKP4S7xEfMI+eA= -github.com/scylladb/scylla-manager/v3/swagger v0.0.0-20241112131737-4fc93b5355fd h1:NNkXlN5SnutcpictWx3sc4jaOz2QMeGmzxES2XdC9RQ= -github.com/scylladb/scylla-manager/v3/swagger v0.0.0-20241112131737-4fc93b5355fd/go.mod h1:Oxfuz1XcXi9iV4ggSGfQdn+p6gPz6djPOegRMMe/6/s= +github.com/scylladb/scylla-manager/v3/swagger v0.0.0-20241217153220-2fde88815361 h1:JFDmfDNdCaKNfJfrikWNOOEnt8upW9Ah1lRwoJ+bgbY= +github.com/scylladb/scylla-manager/v3/swagger v0.0.0-20241217153220-2fde88815361/go.mod h1:nCN5P0jiWL0W7jbcZ9p0ndtZAPoyEWXefddx/nbyFes= +github.com/scylladb/scylla-manager/v3/swagger v0.0.0-20241217161122-cafa851a39fc h1:0Ous1SELrmSuO44z+3XcAGZnDNafjd4IQxoQ2yuFn4k= +github.com/scylladb/scylla-manager/v3/swagger v0.0.0-20241217161122-cafa851a39fc/go.mod h1:nCN5P0jiWL0W7jbcZ9p0ndtZAPoyEWXefddx/nbyFes= github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4 h1:8qmTC5ByIXO3GP/IzBkxcZ/99VITvnIETDhdFz/om7A= github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= @@ -1296,8 +1298,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1528,8 +1530,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= -golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/node_info.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/node_info.go index 2a07069885..c2ed167dbd 100644 --- a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/node_info.go +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/node_info.go @@ -59,6 +59,9 @@ type NodeInfo struct { // Whether CQL requires password authentication. CqlPasswordProtected bool `json:"cql_password_protected,omitempty"` + // First entry from `data_file_directories` list from scylla config file. + DataDirectory string `json:"data_directory,omitempty"` + // Whether tablets are enabled. EnableTablets bool `json:"enable_tablets,omitempty"` @@ -92,6 +95,9 @@ type NodeInfo struct { // Whether Scylla supports uuid-like sstable naming. SstableUUIDFormat bool `json:"sstable_uuid_format,omitempty"` + // Total disk size of the first entry from `data_file_directories` list from scylla config file. In bytes. + StorageSize uint64 `json:"storage_size,omitempty"` + // Uptime in seconds. Uptime int64 `json:"uptime,omitempty"` } diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/operations_client.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/operations_client.go index cea2dd1b85..bf05935842 100644 --- a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/operations_client.go +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/operations_client.go @@ -841,6 +841,10 @@ type ClientService interface { TaskManagerTTLPost(params *TaskManagerTTLPostParams) (*TaskManagerTTLPostOK, error) + TaskManagerUserTTLGet(params *TaskManagerUserTTLGetParams) (*TaskManagerUserTTLGetOK, error) + + TaskManagerUserTTLPost(params *TaskManagerUserTTLPostParams) (*TaskManagerUserTTLPostOK, error) + TaskManagerWaitTaskTaskIDGet(params *TaskManagerWaitTaskTaskIDGetParams) (*TaskManagerWaitTaskTaskIDGetOK, error) SetTransport(transport runtime.ClientTransport) @@ -15116,6 +15120,76 @@ func (a *Client) TaskManagerTTLPost(params *TaskManagerTTLPostParams) (*TaskMana return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) } +/* +TaskManagerUserTTLGet gets user ttl + +Get current user task ttl value +*/ +func (a *Client) TaskManagerUserTTLGet(params *TaskManagerUserTTLGetParams) (*TaskManagerUserTTLGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewTaskManagerUserTTLGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "TaskManagerUserTtlGet", + Method: "GET", + PathPattern: "/task_manager/user_ttl", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &TaskManagerUserTTLGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*TaskManagerUserTTLGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*TaskManagerUserTTLGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +TaskManagerUserTTLPost gets and update user ttl + +Set user task ttl in seconds and get last value +*/ +func (a *Client) TaskManagerUserTTLPost(params *TaskManagerUserTTLPostParams) (*TaskManagerUserTTLPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewTaskManagerUserTTLPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "TaskManagerUserTtlPost", + Method: "POST", + PathPattern: "/task_manager/user_ttl", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &TaskManagerUserTTLPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*TaskManagerUserTTLPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*TaskManagerUserTTLPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + /* TaskManagerWaitTaskTaskIDGet waits task diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_ttl_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_ttl_post_parameters.go index fa6b2326a2..1cc0c2323d 100644 --- a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_ttl_post_parameters.go +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_ttl_post_parameters.go @@ -63,7 +63,7 @@ for the task manager Ttl post operation typically these are written to a http.Re type TaskManagerTTLPostParams struct { /*TTL - The number of seconds for which the tasks will be kept in memory after it finishes + The number of seconds for which the task started internally will be kept in memory after it finishes */ TTL int64 diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_get_parameters.go new file mode 100644 index 0000000000..e7cee5769f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewTaskManagerUserTTLGetParams creates a new TaskManagerUserTTLGetParams object +// with the default values initialized. +func NewTaskManagerUserTTLGetParams() *TaskManagerUserTTLGetParams { + + return &TaskManagerUserTTLGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewTaskManagerUserTTLGetParamsWithTimeout creates a new TaskManagerUserTTLGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewTaskManagerUserTTLGetParamsWithTimeout(timeout time.Duration) *TaskManagerUserTTLGetParams { + + return &TaskManagerUserTTLGetParams{ + + timeout: timeout, + } +} + +// NewTaskManagerUserTTLGetParamsWithContext creates a new TaskManagerUserTTLGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewTaskManagerUserTTLGetParamsWithContext(ctx context.Context) *TaskManagerUserTTLGetParams { + + return &TaskManagerUserTTLGetParams{ + + Context: ctx, + } +} + +// NewTaskManagerUserTTLGetParamsWithHTTPClient creates a new TaskManagerUserTTLGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewTaskManagerUserTTLGetParamsWithHTTPClient(client *http.Client) *TaskManagerUserTTLGetParams { + + return &TaskManagerUserTTLGetParams{ + HTTPClient: client, + } +} + +/* +TaskManagerUserTTLGetParams contains all the parameters to send to the API endpoint +for the task manager user Ttl get operation typically these are written to a http.Request +*/ +type TaskManagerUserTTLGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the task manager user Ttl get params +func (o *TaskManagerUserTTLGetParams) WithTimeout(timeout time.Duration) *TaskManagerUserTTLGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the task manager user Ttl get params +func (o *TaskManagerUserTTLGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the task manager user Ttl get params +func (o *TaskManagerUserTTLGetParams) WithContext(ctx context.Context) *TaskManagerUserTTLGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the task manager user Ttl get params +func (o *TaskManagerUserTTLGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the task manager user Ttl get params +func (o *TaskManagerUserTTLGetParams) WithHTTPClient(client *http.Client) *TaskManagerUserTTLGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the task manager user Ttl get params +func (o *TaskManagerUserTTLGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *TaskManagerUserTTLGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_get_responses.go new file mode 100644 index 0000000000..0a18a610a4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// TaskManagerUserTTLGetReader is a Reader for the TaskManagerUserTTLGet structure. +type TaskManagerUserTTLGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *TaskManagerUserTTLGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewTaskManagerUserTTLGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewTaskManagerUserTTLGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewTaskManagerUserTTLGetOK creates a TaskManagerUserTTLGetOK with default headers values +func NewTaskManagerUserTTLGetOK() *TaskManagerUserTTLGetOK { + return &TaskManagerUserTTLGetOK{} +} + +/* +TaskManagerUserTTLGetOK handles this case with default header values. + +Success +*/ +type TaskManagerUserTTLGetOK struct { + Payload int64 +} + +func (o *TaskManagerUserTTLGetOK) GetPayload() int64 { + return o.Payload +} + +func (o *TaskManagerUserTTLGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTaskManagerUserTTLGetDefault creates a TaskManagerUserTTLGetDefault with default headers values +func NewTaskManagerUserTTLGetDefault(code int) *TaskManagerUserTTLGetDefault { + return &TaskManagerUserTTLGetDefault{ + _statusCode: code, + } +} + +/* +TaskManagerUserTTLGetDefault handles this case with default header values. + +internal server error +*/ +type TaskManagerUserTTLGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the task manager user Ttl get default response +func (o *TaskManagerUserTTLGetDefault) Code() int { + return o._statusCode +} + +func (o *TaskManagerUserTTLGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *TaskManagerUserTTLGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *TaskManagerUserTTLGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_post_parameters.go new file mode 100644 index 0000000000..bb1fe813bd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewTaskManagerUserTTLPostParams creates a new TaskManagerUserTTLPostParams object +// with the default values initialized. +func NewTaskManagerUserTTLPostParams() *TaskManagerUserTTLPostParams { + var () + return &TaskManagerUserTTLPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewTaskManagerUserTTLPostParamsWithTimeout creates a new TaskManagerUserTTLPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewTaskManagerUserTTLPostParamsWithTimeout(timeout time.Duration) *TaskManagerUserTTLPostParams { + var () + return &TaskManagerUserTTLPostParams{ + + timeout: timeout, + } +} + +// NewTaskManagerUserTTLPostParamsWithContext creates a new TaskManagerUserTTLPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewTaskManagerUserTTLPostParamsWithContext(ctx context.Context) *TaskManagerUserTTLPostParams { + var () + return &TaskManagerUserTTLPostParams{ + + Context: ctx, + } +} + +// NewTaskManagerUserTTLPostParamsWithHTTPClient creates a new TaskManagerUserTTLPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewTaskManagerUserTTLPostParamsWithHTTPClient(client *http.Client) *TaskManagerUserTTLPostParams { + var () + return &TaskManagerUserTTLPostParams{ + HTTPClient: client, + } +} + +/* +TaskManagerUserTTLPostParams contains all the parameters to send to the API endpoint +for the task manager user Ttl post operation typically these are written to a http.Request +*/ +type TaskManagerUserTTLPostParams struct { + + /*UserTTL + The number of seconds for which the task started by user will be kept in memory after it finishes + + */ + UserTTL int64 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the task manager user Ttl post params +func (o *TaskManagerUserTTLPostParams) WithTimeout(timeout time.Duration) *TaskManagerUserTTLPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the task manager user Ttl post params +func (o *TaskManagerUserTTLPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the task manager user Ttl post params +func (o *TaskManagerUserTTLPostParams) WithContext(ctx context.Context) *TaskManagerUserTTLPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the task manager user Ttl post params +func (o *TaskManagerUserTTLPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the task manager user Ttl post params +func (o *TaskManagerUserTTLPostParams) WithHTTPClient(client *http.Client) *TaskManagerUserTTLPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the task manager user Ttl post params +func (o *TaskManagerUserTTLPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithUserTTL adds the userTTL to the task manager user Ttl post params +func (o *TaskManagerUserTTLPostParams) WithUserTTL(userTTL int64) *TaskManagerUserTTLPostParams { + o.SetUserTTL(userTTL) + return o +} + +// SetUserTTL adds the userTtl to the task manager user Ttl post params +func (o *TaskManagerUserTTLPostParams) SetUserTTL(userTTL int64) { + o.UserTTL = userTTL +} + +// WriteToRequest writes these params to a swagger request +func (o *TaskManagerUserTTLPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param user_ttl + qrUserTTL := o.UserTTL + qUserTTL := swag.FormatInt64(qrUserTTL) + if qUserTTL != "" { + if err := r.SetQueryParam("user_ttl", qUserTTL); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_post_responses.go new file mode 100644 index 0000000000..3b902ad8d2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/task_manager_user_ttl_post_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// TaskManagerUserTTLPostReader is a Reader for the TaskManagerUserTTLPost structure. +type TaskManagerUserTTLPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *TaskManagerUserTTLPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewTaskManagerUserTTLPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewTaskManagerUserTTLPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewTaskManagerUserTTLPostOK creates a TaskManagerUserTTLPostOK with default headers values +func NewTaskManagerUserTTLPostOK() *TaskManagerUserTTLPostOK { + return &TaskManagerUserTTLPostOK{} +} + +/* +TaskManagerUserTTLPostOK handles this case with default header values. + +Success +*/ +type TaskManagerUserTTLPostOK struct { + Payload int64 +} + +func (o *TaskManagerUserTTLPostOK) GetPayload() int64 { + return o.Payload +} + +func (o *TaskManagerUserTTLPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewTaskManagerUserTTLPostDefault creates a TaskManagerUserTTLPostDefault with default headers values +func NewTaskManagerUserTTLPostDefault(code int) *TaskManagerUserTTLPostDefault { + return &TaskManagerUserTTLPostDefault{ + _statusCode: code, + } +} + +/* +TaskManagerUserTTLPostDefault handles this case with default header values. + +internal server error +*/ +type TaskManagerUserTTLPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the task manager user Ttl post default response +func (o *TaskManagerUserTTLPostDefault) Code() int { + return o._statusCode +} + +func (o *TaskManagerUserTTLPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *TaskManagerUserTTLPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *TaskManagerUserTTLPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 105c3b279c..81faec7e75 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1490,7 +1490,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1498,7 +1498,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 7688c356b7..c7601c909f 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -34,10 +34,11 @@ import ( ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + disableExtendedConnectProtocol bool ) func init() { @@ -50,6 +51,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=0") { + disableExtendedConnectProtocol = true + } } const ( @@ -141,6 +145,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -150,21 +158,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 832414b450..b55547aec6 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -932,14 +932,18 @@ func (sc *serverConn) serve(conf http2Config) { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, conf.MaxReadFrameSize}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, - {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, - }, + write: settings, }) sc.unackedSettings++ @@ -1801,6 +1805,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2231,11 +2238,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res scheme: f.PseudoValue("scheme"), authority: f.PseudoValue("authority"), path: f.PseudoValue("path"), + protocol: f.PseudoValue("protocol"), + } + + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } isConnect := rp.method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { @@ -2259,6 +2272,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if rp.authority == "" { rp.authority = rp.header.Get("Host") } + if rp.protocol != "" { + rp.header.Set(":protocol", rp.protocol) + } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) if err != nil { @@ -2285,6 +2301,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res type requestParam struct { method string scheme, authority, path string + protocol string header http.Header } @@ -2326,7 +2343,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var url_ *url.URL var requestURI string - if rp.method == "CONNECT" { + if rp.method == "CONNECT" && rp.protocol == "" { url_ = &url.URL{Host: rp.authority} requestURI = rp.authority // mimic HTTP/1 server behavior } else { diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f5968f4407..090d0e1bdb 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -368,25 +368,26 @@ type ClientConn struct { idleTimeout time.Duration // or 0 for never idleTimer timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) maxFrameSize uint32 maxConcurrentStreams uint32 @@ -396,6 +397,17 @@ type ClientConn struct { initialStreamRecvWindowSize int32 readIdleTimeout time.Duration pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool // pendingResets is the number of RST_STREAM frames we have sent to the peer, // without confirming that the peer has received them. When we send a RST_STREAM, @@ -819,6 +831,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, + seenSettingsChan: make(chan struct{}), wantSettingsAck: true, readIdleTimeout: conf.SendPingTimeout, pingTimeout: conf.PingTimeout, @@ -1466,6 +1479,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1481,12 +1496,31 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre return err } + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true + } + // Acquire the new-request lock by writing to reqHeaderMu. // This lock guards the critical section covering allocating a new stream ID // (requires mu) and creating the stream (requires wmu). if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported + } + } + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1714,10 +1748,14 @@ func (cs *clientStream) cleanupWriteRequest(err error) { ping := false if !closeOnIdle { cc.mu.Lock() - if cc.pendingResets == 0 { - ping = true + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ } - cc.pendingResets++ cc.mu.Unlock() } cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) @@ -2030,7 +2068,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) func validateHeaders(hdrs http.Header) string { for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { return fmt.Sprintf("name %q", k) } for _, v := range vv { @@ -2046,6 +2084,10 @@ func validateHeaders(hdrs http.Header) string { var errNilRequestURL = errors.New("http2: Request.URI is nil") +func isNormalConnect(req *http.Request) bool { + return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" +} + // requires cc.wmu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -2066,7 +2108,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } var path string - if req.Method != "CONNECT" { + if !isNormalConnect(req) { path = req.URL.RequestURI() if !validPseudoPath(path) { orig := path @@ -2103,7 +2145,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail m = http.MethodGet } f(":method", m) - if req.Method != "CONNECT" { + if !isNormalConnect(req) { f(":path", path) f(":scheme", req.URL.Scheme) } @@ -2461,7 +2503,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2507,13 +2549,16 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } + if !cc.seenSettings { + close(cc.seenSettingsChan) + } return err } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2842,7 +2887,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2977,9 +3022,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -3073,6 +3131,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -3090,6 +3163,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -3098,7 +3172,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -3127,7 +3201,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3205,6 +3279,7 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if cc.pendingResets > 0 { // See clientStream.cleanupWriteRequest. cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true cc.cond.Broadcast() } return nil diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index f3ab0a2e12..65fe2628e9 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -106,24 +106,18 @@ func Find(importPath, srcDir string) (filename, path string) { // additional trailing data beyond the end of the export data. func NewReader(r io.Reader) (io.Reader, error) { buf := bufio.NewReader(r) - _, size, err := gcimporter.FindExportData(buf) + size, err := gcimporter.FindExportData(buf) if err != nil { return nil, err } - if size >= 0 { - // We were given an archive and found the __.PKGDEF in it. - // This tells us the size of the export data, and we don't - // need to return the entire file. - return &io.LimitedReader{ - R: buf, - N: size, - }, nil - } else { - // We were given an object file. As such, we don't know how large - // the export data is and must return the entire file. - return buf, nil - } + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil } // readAll works the same way as io.ReadAll, but avoids allocations and copies diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go index f6437feb1c..6f5d8a2139 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -39,12 +39,15 @@ func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { } // FindExportData positions the reader r at the beginning of the -// export data section of an underlying GC-created object/archive +// export data section of an underlying cmd/compile created archive // file by reading from it. The reader must be positioned at the -// start of the file before calling this function. The hdr result -// is the string before the export data, either "$$" or "$$B". -// The size result is the length of the export data in bytes, or -1 if not known. -func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { +// start of the file before calling this function. +// The size result is the length of the export data in bytes. +// +// This function is needed by [gcexportdata.Read], which must +// accept inputs produced by the last two releases of cmd/compile, +// plus tip. +func FindExportData(r *bufio.Reader) (size int64, err error) { // Read first line to make sure this is an object file. line, err := r.ReadSlice('\n') if err != nil { @@ -52,27 +55,32 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { return } - if string(line) == "!\n" { - // Archive file. Scan to __.PKGDEF. - var name string - if name, size, err = readGopackHeader(r); err != nil { - return - } + // Is the first line an archive file signature? + if string(line) != "!\n" { + err = fmt.Errorf("not the start of an archive file (%q)", line) + return + } - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") - return - } + // Archive file. Scan to __.PKGDEF. + var name string + if name, size, err = readGopackHeader(r); err != nil { + return + } + arsize := size - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - size -= int64(len(line)) + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return } + size -= int64(len(line)) // Now at __.PKGDEF in archive or still at beginning of file. // Either way, line should begin with "go object ". @@ -81,8 +89,8 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { return } - // Skip over object header to export data. - // Begins after first line starting with $$. + // Skip over object headers to get to the export data section header "$$B\n". + // Object headers are lines that do not start with '$'. for line[0] != '$' { if line, err = r.ReadSlice('\n'); err != nil { err = fmt.Errorf("can't find export data (%v)", err) @@ -90,9 +98,18 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { } size -= int64(len(line)) } - hdr = string(line) + + // Check for the binary export data section header "$$B\n". + hdr := string(line) + if hdr != "$$B\n" { + err = fmt.Errorf("unknown export data header: %q", hdr) + return + } + // TODO(taking): Remove end-of-section marker "\n$$\n" from size. + if size < 0 { - size = -1 + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) + return } return diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index e6c5d51f8e..dbbca86043 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -161,6 +161,8 @@ func FindPkg(path, srcDir string) (filename, id string) { // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. +// +// TODO(taking): Import is only used in tests. Move to gcimporter_test. func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser var filename, id string @@ -210,58 +212,50 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } defer rc.Close() - var hdr string var size int64 buf := bufio.NewReader(rc) - if hdr, size, err = FindExportData(buf); err != nil { + if size, err = FindExportData(buf); err != nil { return } - switch hdr { - case "$$B\n": - var data []byte - data, err = io.ReadAll(buf) - if err != nil { - break - } + var data []byte + data, err = io.ReadAll(buf) + if err != nil { + return + } + if len(data) == 0 { + return nil, fmt.Errorf("no data to load a package from for path %s", id) + } - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // Select appropriate importer. - if len(data) > 0 { - switch data[0] { - case 'v', 'c', 'd': - // binary: emitted by cmd/compile till go1.10; obsolete. - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - - case 'i': - // indexed: emitted by cmd/compile till go1.19; - // now used only for serializing go/types. - // See https://github.com/golang/go/issues/69491. - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err - - case 'u': - // unified: emitted by cmd/compile since go1.20. - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } - } + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // Select appropriate importer. + switch data[0] { + case 'v', 'c', 'd': + // binary: emitted by cmd/compile till go1.10; obsolete. + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': + // indexed: emitted by cmd/compile till go1.19; + // now used only for serializing go/types. + // See https://github.com/golang/go/issues/69491. + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'u': + // unified: emitted by cmd/compile since go1.20. + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err default: - err = fmt.Errorf("unknown export data header: %q", hdr) + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) } - - return } type byPath []*types.Package diff --git a/vendor/golang.org/x/tools/internal/imports/source.go b/vendor/golang.org/x/tools/internal/imports/source.go index 5d2aeeebc9..cbe4f3c5ba 100644 --- a/vendor/golang.org/x/tools/internal/imports/source.go +++ b/vendor/golang.org/x/tools/internal/imports/source.go @@ -59,5 +59,5 @@ type Source interface { // candidates satisfy all missing references for that package name. It is up // to each data source to select the best result for each entry in the // missing map. - ResolveReferences(ctx context.Context, filename string, missing References) (map[PackageName]*Result, error) + ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) } diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go index ff9555d287..d14abaa319 100644 --- a/vendor/golang.org/x/tools/internal/imports/source_env.go +++ b/vendor/golang.org/x/tools/internal/imports/source_env.go @@ -48,7 +48,7 @@ func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, return r.loadPackageNames(unknown, srcDir) } -func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) (map[string]*Result, error) { +func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) { var mu sync.Mutex found := make(map[string][]pkgDistance) callback := &scanCallback{ @@ -121,5 +121,9 @@ func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename strin if err := g.Wait(); err != nil { return nil, err } - return results, nil + var ans []*Result + for _, x := range results { + ans = append(ans, x) + } + return ans, nil } diff --git a/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/vendor/golang.org/x/tools/internal/imports/source_modindex.go new file mode 100644 index 0000000000..05229f06ce --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source_modindex.go @@ -0,0 +1,103 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "sync" + "time" + + "golang.org/x/tools/internal/modindex" +) + +// This code is here rather than in the modindex package +// to avoid import loops + +// implements Source using modindex, so only for module cache. +// +// this is perhaps over-engineered. A new Index is read at first use. +// And then Update is called after every 15 minutes, and a new Index +// is read if the index changed. It is not clear the Mutex is needed. +type IndexSource struct { + modcachedir string + mutex sync.Mutex + ix *modindex.Index + expires time.Time +} + +// create a new Source. Called from NewView in cache/session.go. +func NewIndexSource(cachedir string) *IndexSource { + return &IndexSource{modcachedir: cachedir} +} + +func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) { + /// This is used by goimports to resolve the package names of imports of the + // current package, which is irrelevant for the module cache. + return nil, nil +} + +func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) { + if err := s.maybeReadIndex(); err != nil { + return nil, err + } + var cs []modindex.Candidate + for pkg, nms := range missing { + for nm := range nms { + x := s.ix.Lookup(pkg, nm, false) + cs = append(cs, x...) + } + } + found := make(map[string]*Result) + for _, c := range cs { + var x *Result + if x = found[c.ImportPath]; x == nil { + x = &Result{ + Import: &ImportInfo{ + ImportPath: c.ImportPath, + Name: "", + }, + Package: &PackageInfo{ + Name: c.PkgName, + Exports: make(map[string]bool), + }, + } + found[c.ImportPath] = x + } + x.Package.Exports[c.Name] = true + } + var ans []*Result + for _, x := range found { + ans = append(ans, x) + } + return ans, nil +} + +func (s *IndexSource) maybeReadIndex() error { + s.mutex.Lock() + defer s.mutex.Unlock() + + var readIndex bool + if time.Now().After(s.expires) { + ok, err := modindex.Update(s.modcachedir) + if err != nil { + return err + } + if ok { + readIndex = true + } + } + + if readIndex || s.ix == nil { + ix, err := modindex.ReadIndex(s.modcachedir) + if err != nil { + return err + } + s.ix = ix + // for now refresh every 15 minutes + s.expires = time.Now().Add(time.Minute * 15) + } + + return nil +} diff --git a/vendor/golang.org/x/tools/internal/modindex/directories.go b/vendor/golang.org/x/tools/internal/modindex/directories.go new file mode 100644 index 0000000000..1e1a02f239 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/modindex/directories.go @@ -0,0 +1,135 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modindex + +import ( + "fmt" + "log" + "os" + "path/filepath" + "regexp" + "slices" + "strings" + "sync" + "time" + + "golang.org/x/mod/semver" + "golang.org/x/tools/internal/gopathwalk" +) + +type directory struct { + path Relpath + importPath string + version string // semantic version + syms []symbol +} + +// filterDirs groups the directories by import path, +// sorting the ones with the same import path by semantic version, +// most recent first. +func byImportPath(dirs []Relpath) (map[string][]*directory, error) { + ans := make(map[string][]*directory) // key is import path + for _, d := range dirs { + ip, sv, err := DirToImportPathVersion(d) + if err != nil { + return nil, err + } + ans[ip] = append(ans[ip], &directory{ + path: d, + importPath: ip, + version: sv, + }) + } + for k, v := range ans { + semanticSort(v) + ans[k] = v + } + return ans, nil +} + +// sort the directories by semantic version, latest first +func semanticSort(v []*directory) { + slices.SortFunc(v, func(l, r *directory) int { + if n := semver.Compare(l.version, r.version); n != 0 { + return -n // latest first + } + return strings.Compare(string(l.path), string(r.path)) + }) +} + +// modCacheRegexp splits a relpathpath into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +// DirToImportPathVersion computes import path and semantic version +func DirToImportPathVersion(dir Relpath) (string, string, error) { + m := modCacheRegexp.FindStringSubmatch(string(dir)) + // m[1] is the module path + // m[2] is the version major.minor.patch(-
= 4 {
+					sig := strings.Split(flds[3], " ")
+					for i := 0; i < len(sig); i++ {
+						// $ cannot otherwise occur. removing the spaces
+						// almost works, but for chan struct{}, e.g.
+						sig[i] = strings.Replace(sig[i], "$", " ", -1)
+					}
+					px.Sig = toFields(sig)
+				}
+			}
+			ans = append(ans, px)
+		}
+	}
+	return ans
+}
+
+func toFields(sig []string) []Field {
+	ans := make([]Field, len(sig)/2)
+	for i := 0; i < len(ans); i++ {
+		ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
+	}
+	return ans
+}
+
+// benchmarks show this is measurably better than strings.Split
+func fastSplit(x string) []string {
+	ans := make([]string, 0, 4)
+	nxt := 0
+	start := 0
+	for i := 0; i < len(x); i++ {
+		if x[i] != ' ' {
+			continue
+		}
+		ans = append(ans, x[start:i])
+		nxt++
+		start = i + 1
+		if nxt >= 3 {
+			break
+		}
+	}
+	ans = append(ans, x[start:])
+	return ans
+}
+
+func asLexType(c byte) LexType {
+	switch c {
+	case 'C':
+		return Const
+	case 'V':
+		return Var
+	case 'T':
+		return Type
+	case 'F':
+		return Func
+	}
+	return -1
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/modindex.go b/vendor/golang.org/x/tools/internal/modindex/modindex.go
new file mode 100644
index 0000000000..355a53e71a
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/modindex.go
@@ -0,0 +1,164 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package modindex contains code for building and searching an index to
+// the Go module cache. The directory containing the index, returned by
+// IndexDir(), contains a file index-name- that contains the name
+// of the current index. We believe writing that short file is atomic.
+// ReadIndex reads that file to get the file name of the index.
+// WriteIndex writes an index with a unique name and then
+// writes that name into a new version of index-name-.
+// ( stands for the CurrentVersion of the index format.)
+package modindex
+
+import (
+	"path/filepath"
+	"slices"
+	"strings"
+	"time"
+
+	"golang.org/x/mod/semver"
+)
+
+// Create always creates a new index for the go module cache that is in cachedir.
+func Create(cachedir string) error {
+	_, err := indexModCache(cachedir, true)
+	return err
+}
+
+// Update the index for the go module cache that is in cachedir,
+// If there is no existing index it will build one.
+// If there are changed directories since the last index, it will
+// write a new one and return true. Otherwise it returns false.
+func Update(cachedir string) (bool, error) {
+	return indexModCache(cachedir, false)
+}
+
+// indexModCache writes an index current as of when it is called.
+// If clear is true the index is constructed from all of GOMODCACHE
+// otherwise the index is constructed from the last previous index
+// and the updates to the cache. It returns true if it wrote an index,
+// false otherwise.
+func indexModCache(cachedir string, clear bool) (bool, error) {
+	cachedir, err := filepath.Abs(cachedir)
+	if err != nil {
+		return false, err
+	}
+	cd := Abspath(cachedir)
+	future := time.Now().Add(24 * time.Hour) // safely in the future
+	ok, err := modindexTimed(future, cd, clear)
+	if err != nil {
+		return false, err
+	}
+	return ok, nil
+}
+
+// modindexTimed writes an index current as of onlyBefore.
+// If clear is true the index is constructed from all of GOMODCACHE
+// otherwise the index is constructed from the last previous index
+// and all the updates to the cache before onlyBefore.
+// It returns true if it wrote a new index, false if it wrote nothing.
+func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
+	var curIndex *Index
+	if !clear {
+		var err error
+		curIndex, err = ReadIndex(string(cachedir))
+		if clear && err != nil {
+			return false, err
+		}
+		// TODO(pjw): check that most of those directories still exist
+	}
+	cfg := &work{
+		onlyBefore: onlyBefore,
+		oldIndex:   curIndex,
+		cacheDir:   cachedir,
+	}
+	if curIndex != nil {
+		cfg.onlyAfter = curIndex.Changed
+	}
+	if err := cfg.buildIndex(); err != nil {
+		return false, err
+	}
+	if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
+		// no changes from existing curIndex, don't write a new index
+		return false, nil
+	}
+	if err := cfg.writeIndex(); err != nil {
+		return false, err
+	}
+	return true, nil
+}
+
+type work struct {
+	onlyBefore time.Time // do not use directories later than this
+	onlyAfter  time.Time // only interested in directories after this
+	// directories from before onlyAfter come from oldIndex
+	oldIndex *Index
+	newIndex *Index
+	cacheDir Abspath
+}
+
+func (w *work) buildIndex() error {
+	// The effective date of the new index should be at least
+	// slightly earlier than when the directories are scanned
+	// so set it now.
+	w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
+	dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
+	if len(dirs) == 0 {
+		return nil
+	}
+	newdirs, err := byImportPath(dirs)
+	if err != nil {
+		return err
+	}
+	// for each import path it might occur only in newdirs,
+	// only in w.oldIndex, or in both.
+	// If it occurs in both, use the semantically later one
+	if w.oldIndex != nil {
+		for _, e := range w.oldIndex.Entries {
+			found, ok := newdirs[e.ImportPath]
+			if !ok {
+				w.newIndex.Entries = append(w.newIndex.Entries, e)
+				continue // use this one, there is no new one
+			}
+			if semver.Compare(found[0].version, e.Version) > 0 {
+				// use the new one
+			} else {
+				// use the old one, forget the new one
+				w.newIndex.Entries = append(w.newIndex.Entries, e)
+				delete(newdirs, e.ImportPath)
+			}
+		}
+	}
+	// get symbol information for all the new diredtories
+	getSymbols(w.cacheDir, newdirs)
+	// assemble the new index entries
+	for k, v := range newdirs {
+		d := v[0]
+		pkg, names := processSyms(d.syms)
+		if pkg == "" {
+			continue // PJW: does this ever happen?
+		}
+		entry := Entry{
+			PkgName:    pkg,
+			Dir:        d.path,
+			ImportPath: k,
+			Version:    d.version,
+			Names:      names,
+		}
+		w.newIndex.Entries = append(w.newIndex.Entries, entry)
+	}
+	// sort the entries in the new index
+	slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
+		if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
+			return n
+		}
+		return strings.Compare(l.ImportPath, r.ImportPath)
+	})
+	return nil
+}
+
+func (w *work) writeIndex() error {
+	return writeIndex(w.cacheDir, w.newIndex)
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go
new file mode 100644
index 0000000000..2e285ed996
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go
@@ -0,0 +1,189 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+	"fmt"
+	"go/ast"
+	"go/parser"
+	"go/token"
+	"go/types"
+	"os"
+	"path/filepath"
+	"slices"
+	"strings"
+
+	"golang.org/x/sync/errgroup"
+)
+
+// The name of a symbol contains information about the symbol:
+//  T for types
+//  C for consts
+//  V for vars
+// and for funcs:  F  ( )*
+// any spaces in  are replaced by $s so that the fields
+// of the name are space separated
+type symbol struct {
+	pkg  string // name of the symbols's package
+	name string // declared name
+	kind string // T, C, V, or F
+	sig  string // signature information, for F
+}
+
+// find the symbols for the best directories
+func getSymbols(cd Abspath, dirs map[string][]*directory) {
+	var g errgroup.Group
+	g.SetLimit(-1) // maybe throttle this some day
+	for _, vv := range dirs {
+		// throttling some day?
+		d := vv[0]
+		g.Go(func() error {
+			thedir := filepath.Join(string(cd), string(d.path))
+			mode := parser.SkipObjectResolution
+
+			fi, err := os.ReadDir(thedir)
+			if err != nil {
+				return nil // log this someday?
+			}
+			for _, fx := range fi {
+				if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
+					continue
+				}
+				fname := filepath.Join(thedir, fx.Name())
+				tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
+				if err != nil {
+					continue // ignore errors, someday log them?
+				}
+				d.syms = append(d.syms, getFileExports(tr)...)
+			}
+			return nil
+		})
+	}
+	g.Wait()
+}
+
+func getFileExports(f *ast.File) []symbol {
+	pkg := f.Name.Name
+	if pkg == "main" {
+		return nil
+	}
+	var ans []symbol
+	// should we look for //go:build ignore?
+	for _, decl := range f.Decls {
+		switch decl := decl.(type) {
+		case *ast.FuncDecl:
+			if decl.Recv != nil {
+				// ignore methods, as we are completing package selections
+				continue
+			}
+			name := decl.Name.Name
+			dtype := decl.Type
+			// not looking at dtype.TypeParams. That is, treating
+			// generic functions just like non-generic ones.
+			sig := dtype.Params
+			kind := "F"
+			result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
+			for _, x := range sig.List {
+				// This code creates a string representing the type.
+				// TODO(pjw): it may be fragile:
+				// 1. x.Type could be nil, perhaps in ill-formed code
+				// 2. ExprString might someday change incompatibly to
+				//    include struct tags, which can be arbitrary strings
+				if x.Type == nil {
+					// Can this happen without a parse error? (Files with parse
+					// errors are ignored in getSymbols)
+					continue // maybe report this someday
+				}
+				tp := types.ExprString(x.Type)
+				if len(tp) == 0 {
+					// Can this happen?
+					continue // maybe report this someday
+				}
+				// This is only safe if ExprString never returns anything with a $
+				// The only place a $ can occur seems to be in a struct tag, which
+				// can be an arbitrary string literal, and ExprString does not presently
+				// print struct tags. So for this to happen the type of a formal parameter
+				// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
+				// would have to show the struct tag. Even testing for this case seems
+				// a waste of effort, but let's not ignore such pathologies
+				if strings.Contains(tp, "$") {
+					continue
+				}
+				tp = strings.Replace(tp, " ", "$", -1)
+				if len(x.Names) == 0 {
+					result = append(result, "_")
+					result = append(result, tp)
+				} else {
+					for _, y := range x.Names {
+						result = append(result, y.Name)
+						result = append(result, tp)
+					}
+				}
+			}
+			sigs := strings.Join(result, " ")
+			if s := newsym(pkg, name, kind, sigs); s != nil {
+				ans = append(ans, *s)
+			}
+		case *ast.GenDecl:
+			switch decl.Tok {
+			case token.CONST, token.VAR:
+				tp := "V"
+				if decl.Tok == token.CONST {
+					tp = "C"
+				}
+				for _, sp := range decl.Specs {
+					for _, x := range sp.(*ast.ValueSpec).Names {
+						if s := newsym(pkg, x.Name, tp, ""); s != nil {
+							ans = append(ans, *s)
+						}
+					}
+				}
+			case token.TYPE:
+				for _, sp := range decl.Specs {
+					if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, "T", ""); s != nil {
+						ans = append(ans, *s)
+					}
+				}
+			}
+		}
+	}
+	return ans
+}
+
+func newsym(pkg, name, kind, sig string) *symbol {
+	if len(name) == 0 || !ast.IsExported(name) {
+		return nil
+	}
+	sym := symbol{pkg: pkg, name: name, kind: kind, sig: sig}
+	return &sym
+}
+
+// return the package name and the value for the symbols.
+// if there are multiple packages, choose one arbitrarily
+// the returned slice is sorted lexicographically
+func processSyms(syms []symbol) (string, []string) {
+	if len(syms) == 0 {
+		return "", nil
+	}
+	slices.SortFunc(syms, func(l, r symbol) int {
+		return strings.Compare(l.name, r.name)
+	})
+	pkg := syms[0].pkg
+	var names []string
+	for _, s := range syms {
+		var nx string
+		if s.pkg == pkg {
+			if s.sig != "" {
+				nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
+			} else {
+				nx = fmt.Sprintf("%s %s", s.name, s.kind)
+			}
+			names = append(names, nx)
+		} else {
+			continue // PJW: do we want to keep track of these?
+		}
+	}
+	return pkg, names
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/types.go b/vendor/golang.org/x/tools/internal/modindex/types.go
new file mode 100644
index 0000000000..ece4488630
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/types.go
@@ -0,0 +1,25 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+	"strings"
+)
+
+// some special types to avoid confusions
+
+// distinguish various types of directory names. It's easy to get confused.
+type Abspath string // absolute paths
+type Relpath string // paths with GOMODCACHE prefix removed
+
+func toRelpath(cachedir Abspath, s string) Relpath {
+	if strings.HasPrefix(s, string(cachedir)) {
+		if s == string(cachedir) {
+			return Relpath("")
+		}
+		return Relpath(s[len(cachedir)+1:])
+	}
+	return Relpath(s)
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
new file mode 100644
index 0000000000..1066980649
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
@@ -0,0 +1,282 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+	"fmt"
+	"go/ast"
+	"go/token"
+	"go/types"
+	"strconv"
+	"strings"
+)
+
+// ZeroString returns the string representation of the "zero" value of the type t.
+// This string can be used on the right-hand side of an assignment where the
+// left-hand side has that explicit type.
+// Exception: This does not apply to tuples. Their string representation is
+// informational only and cannot be used in an assignment.
+// When assigning to a wider type (such as 'any'), it's the caller's
+// responsibility to handle any necessary type conversions.
+// See [ZeroExpr] for a variant that returns an [ast.Expr].
+func ZeroString(t types.Type, qf types.Qualifier) string {
+	switch t := t.(type) {
+	case *types.Basic:
+		switch {
+		case t.Info()&types.IsBoolean != 0:
+			return "false"
+		case t.Info()&types.IsNumeric != 0:
+			return "0"
+		case t.Info()&types.IsString != 0:
+			return `""`
+		case t.Kind() == types.UnsafePointer:
+			fallthrough
+		case t.Kind() == types.UntypedNil:
+			return "nil"
+		default:
+			panic(fmt.Sprint("ZeroString for unexpected type:", t))
+		}
+
+	case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
+		return "nil"
+
+	case *types.Named, *types.Alias:
+		switch under := t.Underlying().(type) {
+		case *types.Struct, *types.Array:
+			return types.TypeString(t, qf) + "{}"
+		default:
+			return ZeroString(under, qf)
+		}
+
+	case *types.Array, *types.Struct:
+		return types.TypeString(t, qf) + "{}"
+
+	case *types.TypeParam:
+		// Assumes func new is not shadowed.
+		return "*new(" + types.TypeString(t, qf) + ")"
+
+	case *types.Tuple:
+		// Tuples are not normal values.
+		// We are currently format as "(t[0], ..., t[n])". Could be something else.
+		components := make([]string, t.Len())
+		for i := 0; i < t.Len(); i++ {
+			components[i] = ZeroString(t.At(i).Type(), qf)
+		}
+		return "(" + strings.Join(components, ", ") + ")"
+
+	case *types.Union:
+		// Variables of these types cannot be created, so it makes
+		// no sense to ask for their zero value.
+		panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+	default:
+		panic(t) // unreachable.
+	}
+}
+
+// ZeroExpr returns the ast.Expr representation of the "zero" value of the type t.
+// ZeroExpr is defined for types that are suitable for variables.
+// It may panic for other types such as Tuple or Union.
+// See [ZeroString] for a variant that returns a string.
+func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
+	switch t := typ.(type) {
+	case *types.Basic:
+		switch {
+		case t.Info()&types.IsBoolean != 0:
+			return &ast.Ident{Name: "false"}
+		case t.Info()&types.IsNumeric != 0:
+			return &ast.BasicLit{Kind: token.INT, Value: "0"}
+		case t.Info()&types.IsString != 0:
+			return &ast.BasicLit{Kind: token.STRING, Value: `""`}
+		case t.Kind() == types.UnsafePointer:
+			fallthrough
+		case t.Kind() == types.UntypedNil:
+			return ast.NewIdent("nil")
+		default:
+			panic(fmt.Sprint("ZeroExpr for unexpected type:", t))
+		}
+
+	case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
+		return ast.NewIdent("nil")
+
+	case *types.Named, *types.Alias:
+		switch under := t.Underlying().(type) {
+		case *types.Struct, *types.Array:
+			return &ast.CompositeLit{
+				Type: TypeExpr(f, pkg, typ),
+			}
+		default:
+			return ZeroExpr(f, pkg, under)
+		}
+
+	case *types.Array, *types.Struct:
+		return &ast.CompositeLit{
+			Type: TypeExpr(f, pkg, typ),
+		}
+
+	case *types.TypeParam:
+		return &ast.StarExpr{ // *new(T)
+			X: &ast.CallExpr{
+				// Assumes func new is not shadowed.
+				Fun: ast.NewIdent("new"),
+				Args: []ast.Expr{
+					ast.NewIdent(t.Obj().Name()),
+				},
+			},
+		}
+
+	case *types.Tuple:
+		// Unlike ZeroString, there is no ast.Expr can express tuple by
+		// "(t[0], ..., t[n])".
+		panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+	case *types.Union:
+		// Variables of these types cannot be created, so it makes
+		// no sense to ask for their zero value.
+		panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+	default:
+		panic(t) // unreachable.
+	}
+}
+
+// IsZeroExpr uses simple syntactic heuristics to report whether expr
+// is a obvious zero value, such as 0, "", nil, or false.
+// It cannot do better without type information.
+func IsZeroExpr(expr ast.Expr) bool {
+	switch e := expr.(type) {
+	case *ast.BasicLit:
+		return e.Value == "0" || e.Value == `""`
+	case *ast.Ident:
+		return e.Name == "nil" || e.Name == "false"
+	default:
+		return false
+	}
+}
+
+// TypeExpr returns syntax for the specified type. References to named types
+// from packages other than pkg are qualified by an appropriate package name, as
+// defined by the import environment of file.
+// It may panic for types such as Tuple or Union.
+func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
+	switch t := typ.(type) {
+	case *types.Basic:
+		switch t.Kind() {
+		case types.UnsafePointer:
+			// TODO(hxjiang): replace the implementation with types.Qualifier.
+			return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")}
+		default:
+			return ast.NewIdent(t.Name())
+		}
+
+	case *types.Pointer:
+		return &ast.UnaryExpr{
+			Op: token.MUL,
+			X:  TypeExpr(f, pkg, t.Elem()),
+		}
+
+	case *types.Array:
+		return &ast.ArrayType{
+			Len: &ast.BasicLit{
+				Kind:  token.INT,
+				Value: fmt.Sprintf("%d", t.Len()),
+			},
+			Elt: TypeExpr(f, pkg, t.Elem()),
+		}
+
+	case *types.Slice:
+		return &ast.ArrayType{
+			Elt: TypeExpr(f, pkg, t.Elem()),
+		}
+
+	case *types.Map:
+		return &ast.MapType{
+			Key:   TypeExpr(f, pkg, t.Key()),
+			Value: TypeExpr(f, pkg, t.Elem()),
+		}
+
+	case *types.Chan:
+		dir := ast.ChanDir(t.Dir())
+		if t.Dir() == types.SendRecv {
+			dir = ast.SEND | ast.RECV
+		}
+		return &ast.ChanType{
+			Dir:   dir,
+			Value: TypeExpr(f, pkg, t.Elem()),
+		}
+
+	case *types.Signature:
+		var params []*ast.Field
+		for i := 0; i < t.Params().Len(); i++ {
+			params = append(params, &ast.Field{
+				Type: TypeExpr(f, pkg, t.Params().At(i).Type()),
+				Names: []*ast.Ident{
+					{
+						Name: t.Params().At(i).Name(),
+					},
+				},
+			})
+		}
+		if t.Variadic() {
+			last := params[len(params)-1]
+			last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt}
+		}
+		var returns []*ast.Field
+		for i := 0; i < t.Results().Len(); i++ {
+			returns = append(returns, &ast.Field{
+				Type: TypeExpr(f, pkg, t.Results().At(i).Type()),
+			})
+		}
+		return &ast.FuncType{
+			Params: &ast.FieldList{
+				List: params,
+			},
+			Results: &ast.FieldList{
+				List: returns,
+			},
+		}
+
+	case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam}
+		switch t.Obj().Pkg() {
+		case pkg, nil:
+			return ast.NewIdent(t.Obj().Name())
+		}
+		pkgName := t.Obj().Pkg().Name()
+
+		// TODO(hxjiang): replace the implementation with types.Qualifier.
+		// If the file already imports the package under another name, use that.
+		for _, cand := range f.Imports {
+			if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() {
+				if cand.Name != nil && cand.Name.Name != "" {
+					pkgName = cand.Name.Name
+				}
+			}
+		}
+		if pkgName == "." {
+			return ast.NewIdent(t.Obj().Name())
+		}
+		return &ast.SelectorExpr{
+			X:   ast.NewIdent(pkgName),
+			Sel: ast.NewIdent(t.Obj().Name()),
+		}
+
+	case *types.Struct:
+		return ast.NewIdent(t.String())
+
+	case *types.Interface:
+		return ast.NewIdent(t.String())
+
+	case *types.Union:
+		// TODO(hxjiang): handle the union through syntax (~A | ... | ~Z).
+		// Remove nil check when calling typesinternal.TypeExpr.
+		return nil
+
+	case *types.Tuple:
+		panic("invalid input type types.Tuple")
+
+	default:
+		panic("unreachable")
+	}
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/constraint.go b/vendor/golang.org/x/tools/internal/versions/constraint.go
deleted file mode 100644
index 179063d484..0000000000
--- a/vendor/golang.org/x/tools/internal/versions/constraint.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package versions
-
-import "go/build/constraint"
-
-// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+).
-// Otherwise nil.
-//
-// Deprecate once x/tools is after go1.21.
-var ConstraintGoVersion func(x constraint.Expr) string
diff --git a/vendor/golang.org/x/tools/internal/versions/constraint_go121.go b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go
deleted file mode 100644
index 38011407d5..0000000000
--- a/vendor/golang.org/x/tools/internal/versions/constraint_go121.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.21
-// +build go1.21
-
-package versions
-
-import "go/build/constraint"
-
-func init() {
-	ConstraintGoVersion = constraint.GoVersion
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 18e6a425f1..567fe92ab6 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -437,8 +437,8 @@ github.com/scylladb/scylla-manager/v3/pkg/util/timeutc
 github.com/scylladb/scylla-manager/v3/pkg/util/uuid
 github.com/scylladb/scylla-manager/v3/pkg/util/version
 github.com/scylladb/scylla-manager/v3/pkg/util/workerpool
-# github.com/scylladb/scylla-manager/v3/swagger v0.0.0-20241112131737-4fc93b5355fd
-## explicit; go 1.21.1
+# github.com/scylladb/scylla-manager/v3/swagger v0.0.0-20241217161122-cafa851a39fc
+## explicit; go 1.23.2
 github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client
 github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations
 github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models
@@ -571,7 +571,7 @@ golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/modfile
 golang.org/x/mod/module
 golang.org/x/mod/semver
-# golang.org/x/net v0.31.0
+# golang.org/x/net v0.32.0
 ## explicit; go 1.18
 golang.org/x/net/context
 golang.org/x/net/http/httpguts
@@ -616,7 +616,7 @@ golang.org/x/text/unicode/norm
 # golang.org/x/time v0.3.0
 ## explicit
 golang.org/x/time/rate
-# golang.org/x/tools v0.27.0
+# golang.org/x/tools v0.28.0
 ## explicit; go 1.22.0
 golang.org/x/tools/go/ast/astutil
 golang.org/x/tools/go/gcexportdata
@@ -632,6 +632,7 @@ golang.org/x/tools/internal/gcimporter
 golang.org/x/tools/internal/gocommand
 golang.org/x/tools/internal/gopathwalk
 golang.org/x/tools/internal/imports
+golang.org/x/tools/internal/modindex
 golang.org/x/tools/internal/pkgbits
 golang.org/x/tools/internal/stdlib
 golang.org/x/tools/internal/typeparams

From a6fecd9c3aa1ac7c9c8342559fc666acd999d3ad Mon Sep 17 00:00:00 2001
From: Vasil Averyanau 
Date: Tue, 17 Dec 2024 17:27:20 +0100
Subject: [PATCH 2/6] feat(agent): extends `/node_info` response with
 storage_size.

This extends agent `/node_info` response with `stroage_size` and
`data_directory` fields.
---
 pkg/cmd/agent/nodeinfo_linux.go   | 8 ++++++++
 pkg/cmd/agent/nodeinfo_others.go  | 1 +
 pkg/scyllaclient/config_client.go | 1 +
 3 files changed, 10 insertions(+)

diff --git a/pkg/cmd/agent/nodeinfo_linux.go b/pkg/cmd/agent/nodeinfo_linux.go
index fd8ccb998d..b44e457d14 100644
--- a/pkg/cmd/agent/nodeinfo_linux.go
+++ b/pkg/cmd/agent/nodeinfo_linux.go
@@ -7,6 +7,7 @@ package main
 import (
 	"runtime"
 
+	"github.com/pkg/errors"
 	"github.com/scylladb/scylla-manager/v3/pkg/scyllaclient"
 	"golang.org/x/sys/unix"
 )
@@ -17,9 +18,16 @@ func (h *nodeInfoHandler) sysInfo(info *scyllaclient.NodeInfo) error {
 		return err
 	}
 
+	var statfs unix.Statfs_t
+	if err := unix.Statfs(info.DataDirectory, &statfs); err != nil {
+		return errors.Wrap(err, "statfs")
+	}
+	total := statfs.Blocks * uint64(statfs.Bsize)
+
 	info.MemoryTotal = int64(si.Totalram)
 	info.CPUCount = int64(runtime.NumCPU())
 	info.Uptime = si.Uptime
+	info.StorageSize = total
 
 	return nil
 }
diff --git a/pkg/cmd/agent/nodeinfo_others.go b/pkg/cmd/agent/nodeinfo_others.go
index 784b10368f..a2f04bf127 100644
--- a/pkg/cmd/agent/nodeinfo_others.go
+++ b/pkg/cmd/agent/nodeinfo_others.go
@@ -15,5 +15,6 @@ func (h *nodeInfoHandler) sysInfo(info *scyllaclient.NodeInfo) error {
 	info.MemoryTotal = 0
 	info.CPUCount = 0
 	info.Uptime = 0
+	info.StorageSize = 0
 	return nil
 }
diff --git a/pkg/scyllaclient/config_client.go b/pkg/scyllaclient/config_client.go
index 47ced8fa52..3ccac7d963 100644
--- a/pkg/scyllaclient/config_client.go
+++ b/pkg/scyllaclient/config_client.go
@@ -286,6 +286,7 @@ func (c *ConfigClient) NodeInfo(ctx context.Context) (*NodeInfo, error) {
 		{Field: &ni.AlternatorAddress, Fetcher: c.AlternatorAddress},
 		{Field: &ni.AlternatorPort, Fetcher: c.AlternatorPort},
 		{Field: &ni.AlternatorHTTPSPort, Fetcher: c.AlternatorHTTPSPort},
+		{Field: &ni.DataDirectory, Fetcher: c.DataDirectory},
 	}
 
 	for i, ff := range ffs {

From f03311afcff1fbc12199de6cc9fad9e3cc2680a7 Mon Sep 17 00:00:00 2001
From: Vasil Averyanau 
Date: Tue, 17 Dec 2024 17:40:54 +0100
Subject: [PATCH 3/6] feat(backup): extends manifest with info needed for
 1-to-1 restore.

This adds following data to the backup manifest:
General:
  cluster_id: uuid of the cluster
  dc: data center name
  rack: rack from the scylla configuration
  node_id: id of the scylla node (equals to host id)
Instance Details:
  shard_count: number of shard in scylla node
  storage_size: total size of the disk in bytes
  cloud_provider: aws|gcp|azure or empty in case of on-premise
  instance_type: instance type, e.g. t2.nano or empty when on-premise

Fixes: #4130
---
 pkg/service/backup/backupspec/manifest.go | 27 ++++++++---
 pkg/service/backup/worker_manifest.go     | 58 ++++++++++++++++++++++-
 2 files changed, 77 insertions(+), 8 deletions(-)

diff --git a/pkg/service/backup/backupspec/manifest.go b/pkg/service/backup/backupspec/manifest.go
index 00aed1906f..9f7b0596eb 100644
--- a/pkg/service/backup/backupspec/manifest.go
+++ b/pkg/service/backup/backupspec/manifest.go
@@ -115,12 +115,27 @@ func (m *ManifestInfo) fileNameParser(v string) error {
 
 // ManifestContent is structure containing information about the backup.
 type ManifestContent struct {
-	Version     string  `json:"version"`
-	ClusterName string  `json:"cluster_name"`
-	IP          string  `json:"ip"`
-	Size        int64   `json:"size"`
-	Tokens      []int64 `json:"tokens"`
-	Schema      string  `json:"schema"`
+	Version         string          `json:"version"`
+	ClusterID       uuid.UUID       `json:"cluster_id"`
+	ClusterName     string          `json:"cluster_name"`
+	NodeID          string          `json:"node_id"`
+	DC              string          `json:"dc"`
+	IP              string          `json:"ip"`
+	Size            int64           `json:"size"`
+	Tokens          []int64         `json:"tokens"`
+	Schema          string          `json:"schema"`
+	Rack            string          `json:"rack"`
+	InstanceDetails InstanceDetails `json:"instance_details"`
+}
+
+// InstanceDetails extends backup manifest with additional instance details.
+// Mainly needed for 1-to-1 restore.
+type InstanceDetails struct {
+	CloudProvider string `json:"cloud_provider,omitempty"`
+	InstanceType  string `json:"instance_type,omitempty"`
+
+	ShardCount  int    `json:"shard_count"`
+	StorageSize uint64 `json:"storage_size"`
 }
 
 // ManifestContentWithIndex is structure containing information about the backup
diff --git a/pkg/service/backup/worker_manifest.go b/pkg/service/backup/worker_manifest.go
index 1e121a4b17..0d5a2946f4 100644
--- a/pkg/service/backup/worker_manifest.go
+++ b/pkg/service/backup/worker_manifest.go
@@ -8,6 +8,7 @@ import (
 	"net/http"
 
 	"github.com/pkg/errors"
+	"github.com/scylladb/scylla-manager/v3/pkg/cloudmeta"
 	"github.com/scylladb/scylla-manager/v3/pkg/scyllaclient"
 	. "github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec"
 	"github.com/scylladb/scylla-manager/v3/pkg/util/parallel"
@@ -45,11 +46,14 @@ func (w *worker) createAndUploadHostManifest(ctx context.Context, h hostInfo) er
 		return err
 	}
 
-	m := w.createTemporaryManifest(h, tokens)
+	m, err := w.createTemporaryManifest(ctx, h, tokens)
+	if err != nil {
+		return errors.Wrap(err, "create temp manifest")
+	}
 	return w.uploadHostManifest(ctx, h, m)
 }
 
-func (w *worker) createTemporaryManifest(h hostInfo, tokens []int64) ManifestInfoWithContent {
+func (w *worker) createTemporaryManifest(ctx context.Context, h hostInfo, tokens []int64) (ManifestInfoWithContent, error) {
 	m := &ManifestInfo{
 		Location:    h.Location,
 		DC:          h.DC,
@@ -88,10 +92,60 @@ func (w *worker) createTemporaryManifest(h hostInfo, tokens []int64) ManifestInf
 		c.Size += d.Progress.Size
 	}
 
+	c.ClusterID = w.ClusterID
+	c.NodeID = h.ID
+	c.DC = h.DC
+
+	rack, err := w.Client.HostRack(ctx, h.IP)
+	if err != nil {
+		return ManifestInfoWithContent{}, errors.Wrap(err, "client.HostRack")
+	}
+	c.Rack = rack
+
+	instanceDetails, err := w.manifestInstanceDetails(ctx, h)
+	if err != nil {
+		return ManifestInfoWithContent{}, errors.Wrap(err, "manifest instance details")
+	}
+	c.InstanceDetails = instanceDetails
+
 	return ManifestInfoWithContent{
 		ManifestInfo:             m,
 		ManifestContentWithIndex: c,
+	}, nil
+}
+
+// manifestInstanceDetails collects node/instance specific information that's needed for 1-to-1 restore.
+func (w *worker) manifestInstanceDetails(ctx context.Context, host hostInfo) (InstanceDetails, error) {
+	var result InstanceDetails
+
+	shardCound, err := w.Client.ShardCount(ctx, host.IP)
+	if err != nil {
+		return InstanceDetails{}, errors.Wrap(err, "client.ShardCount")
+	}
+	result.ShardCount = int(shardCound)
+
+	nodeInfo, err := w.Client.NodeInfo(ctx, host.IP)
+	if err != nil {
+		return InstanceDetails{}, errors.Wrap(err, "client.NodeInfo")
 	}
+	result.StorageSize = nodeInfo.StorageSize
+
+	metaSvc, err := cloudmeta.NewCloudMeta(w.Logger)
+	if err != nil {
+		return InstanceDetails{}, errors.Wrap(err, "new cloud meta svc")
+	}
+
+	instanceMeta, err := metaSvc.GetInstanceMetadata(ctx)
+	if err != nil {
+		// Metadata may not be available for several reasons:
+		// 1. running on-premise 2. disabled 3. smth went wrong with metadata server.
+		// As we cannot distiguish between this cases we can only log err and continue with backup.
+		w.Logger.Error(ctx, "Get instance metadata", "err", err)
+	}
+	result.CloudProvider = string(instanceMeta.CloudProvider)
+	result.InstanceType = instanceMeta.InstanceType
+
+	return result, nil
 }
 
 func (w *worker) uploadHostManifest(ctx context.Context, h hostInfo, m ManifestInfoWithContent) error {

From 3afd1cb1056aa26960a3828cb595cb6f9e68f1ba Mon Sep 17 00:00:00 2001
From: Vasil Averyanau 
Date: Wed, 18 Dec 2024 09:05:02 +0100
Subject: [PATCH 4/6] fix(unit-tests): adds `data_directory` to node_info
 golden files.

---
 .../testdata/scylla_api/v2_config_node_info.golden.json      | 5 +++--
 .../v2_config_node_info_alternator_disabled.golden.json      | 5 +++--
 2 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/pkg/scyllaclient/testdata/scylla_api/v2_config_node_info.golden.json b/pkg/scyllaclient/testdata/scylla_api/v2_config_node_info.golden.json
index bb23b229b4..5118cc942e 100644
--- a/pkg/scyllaclient/testdata/scylla_api/v2_config_node_info.golden.json
+++ b/pkg/scyllaclient/testdata/scylla_api/v2_config_node_info.golden.json
@@ -18,5 +18,6 @@
   "rpc_port":"9160",
   "sstable_uuid_format":true,
   "consistent_cluster_management":true,
-  "enable_tablets":true
-}
\ No newline at end of file
+  "enable_tablets":true,
+  "data_directory": "/var/lib/scylla/data"
+}
diff --git a/pkg/scyllaclient/testdata/scylla_api/v2_config_node_info_alternator_disabled.golden.json b/pkg/scyllaclient/testdata/scylla_api/v2_config_node_info_alternator_disabled.golden.json
index fa9484d0b1..6eb3b53f49 100644
--- a/pkg/scyllaclient/testdata/scylla_api/v2_config_node_info_alternator_disabled.golden.json
+++ b/pkg/scyllaclient/testdata/scylla_api/v2_config_node_info_alternator_disabled.golden.json
@@ -17,5 +17,6 @@
   "rpc_address":"192.168.100.101",
   "rpc_port":"9160",
   "sstable_uuid_format":false,
-  "consistent_cluster_management":false
-}
\ No newline at end of file
+  "consistent_cluster_management":false,
+  "data_directory": "/var/lib/scylla/data"
+}

From acf312c7b813d9eec3fecb401e20722e470c8253 Mon Sep 17 00:00:00 2001
From: Vasil Averyanau 
Date: Wed, 18 Dec 2024 13:45:15 +0100
Subject: [PATCH 5/6] fix(cloudmeta): fixes handling of context cancellation.

This fixes the issue when context that was passed to GetInstanceMetadata is
canceled before any of provider's functions returned.
---
 pkg/cloudmeta/metadata.go      | 14 +++++++++-----
 pkg/cloudmeta/metadata_test.go | 30 ++++++++++++++++++++++++++++--
 2 files changed, 37 insertions(+), 7 deletions(-)

diff --git a/pkg/cloudmeta/metadata.go b/pkg/cloudmeta/metadata.go
index 15aee9a174..785d8ecade 100644
--- a/pkg/cloudmeta/metadata.go
+++ b/pkg/cloudmeta/metadata.go
@@ -98,12 +98,16 @@ func (cloud *CloudMeta) GetInstanceMetadata(ctx context.Context) (InstanceMetada
 	// Return the first non error result or wait until all providers return err.
 	var mErr error
 	for range len(cloud.providers) {
-		res := <-results
-		if res.err != nil {
-			mErr = multierr.Append(mErr, res.err)
-			continue
+		select {
+		case <-ctx.Done():
+			return InstanceMetadata{}, ctx.Err()
+		case res := <-results:
+			if res.err != nil {
+				mErr = multierr.Append(mErr, res.err)
+				continue
+			}
+			return res.meta, nil
 		}
-		return res.meta, nil
 	}
 	return InstanceMetadata{}, mErr
 }
diff --git a/pkg/cloudmeta/metadata_test.go b/pkg/cloudmeta/metadata_test.go
index a2d5b782d9..21a85adcdd 100644
--- a/pkg/cloudmeta/metadata_test.go
+++ b/pkg/cloudmeta/metadata_test.go
@@ -77,7 +77,8 @@ func TestGetInstanceMetadata(t *testing.T) {
 	for _, tc := range testCases {
 		t.Run(tc.name, func(t *testing.T) {
 			cloudmeta := &CloudMeta{
-				providers: tc.providers,
+				providers:       tc.providers,
+				providerTimeout: 1 * time.Second,
 			}
 
 			meta, err := cloudmeta.GetInstanceMetadata(context.Background())
@@ -101,6 +102,31 @@ func TestGetInstanceMetadata(t *testing.T) {
 	}
 }
 
+func TestGetInstanceMetadataWithCancelledContext(t *testing.T) {
+	cloudmeta := &CloudMeta{
+		providers: []CloudMetadataProvider{
+			newTestProvider(t, "test_provider_1", "x-test-1", 1*time.Second, nil),
+		},
+		providerTimeout: 100 * time.Millisecond,
+	}
+
+	ctx, cancel := context.WithCancel(context.Background())
+	cancel()
+
+	meta, err := cloudmeta.GetInstanceMetadata(ctx)
+	if !errors.Is(err, context.Canceled) {
+		t.Fatalf("expected context.Canceled, got %v", err)
+	}
+
+	if meta.CloudProvider != "" {
+		t.Fatalf("meta.CloudProvider should be empty, got %s", meta.CloudProvider)
+	}
+
+	if meta.InstanceType != "" {
+		t.Fatalf("meta.InstanceType should be empty, got %s", meta.InstanceType)
+	}
+}
+
 func newTestProvider(t *testing.T, providerName, instanceType string, latency time.Duration, err error) *testProvider {
 	t.Helper()
 
@@ -128,5 +154,5 @@ func (tp testProvider) Metadata(ctx context.Context) (InstanceMetadata, error) {
 	return InstanceMetadata{
 		CloudProvider: tp.name,
 		InstanceType:  tp.instanceType,
-	}, nil
+	}, ctx.Err()
 }

From 164ff2ce065cd106253fed40d2f6ffe883e73cc6 Mon Sep 17 00:00:00 2001
From: Vasil Averyanau 
Date: Thu, 19 Dec 2024 09:40:17 +0100
Subject: [PATCH 6/6] feat(backup): extends backup manifest with snapshot_tag
 and task_id.

This extends backup manist with snapshot_tag and task_id + small
refactoring.
---
 pkg/service/backup/backupspec/manifest.go |  2 ++
 pkg/service/backup/worker_manifest.go     | 11 ++++++-----
 2 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/pkg/service/backup/backupspec/manifest.go b/pkg/service/backup/backupspec/manifest.go
index 9f7b0596eb..41c30895cd 100644
--- a/pkg/service/backup/backupspec/manifest.go
+++ b/pkg/service/backup/backupspec/manifest.go
@@ -120,6 +120,8 @@ type ManifestContent struct {
 	ClusterName     string          `json:"cluster_name"`
 	NodeID          string          `json:"node_id"`
 	DC              string          `json:"dc"`
+	TaskID          uuid.UUID       `json:"task_id"`
+	SnapshotTag     string          `json:"snapshot_tag"`
 	IP              string          `json:"ip"`
 	Size            int64           `json:"size"`
 	Tokens          []int64         `json:"tokens"`
diff --git a/pkg/service/backup/worker_manifest.go b/pkg/service/backup/worker_manifest.go
index 0d5a2946f4..80beed154e 100644
--- a/pkg/service/backup/worker_manifest.go
+++ b/pkg/service/backup/worker_manifest.go
@@ -70,6 +70,11 @@ func (w *worker) createTemporaryManifest(ctx context.Context, h hostInfo, tokens
 		ManifestContent: ManifestContent{
 			Version:     "v2",
 			ClusterName: w.ClusterName,
+			ClusterID:   w.ClusterID,
+			NodeID:      h.ID,
+			DC:          h.DC,
+			SnapshotTag: w.SnapshotTag,
+			TaskID:      w.TaskID,
 			IP:          h.IP,
 			Tokens:      tokens,
 		},
@@ -92,10 +97,6 @@ func (w *worker) createTemporaryManifest(ctx context.Context, h hostInfo, tokens
 		c.Size += d.Progress.Size
 	}
 
-	c.ClusterID = w.ClusterID
-	c.NodeID = h.ID
-	c.DC = h.DC
-
 	rack, err := w.Client.HostRack(ctx, h.IP)
 	if err != nil {
 		return ManifestInfoWithContent{}, errors.Wrap(err, "client.HostRack")
@@ -139,7 +140,7 @@ func (w *worker) manifestInstanceDetails(ctx context.Context, host hostInfo) (In
 	if err != nil {
 		// Metadata may not be available for several reasons:
 		// 1. running on-premise 2. disabled 3. smth went wrong with metadata server.
-		// As we cannot distiguish between this cases we can only log err and continue with backup.
+		// As we cannot distinguish between these cases, we can only log err and continue with the backup.
 		w.Logger.Error(ctx, "Get instance metadata", "err", err)
 	}
 	result.CloudProvider = string(instanceMeta.CloudProvider)