diff --git a/pkg/keyspace/keyspace_test.go b/pkg/keyspace/keyspace_test.go index 3eee9e13a65..3c259649cd3 100644 --- a/pkg/keyspace/keyspace_test.go +++ b/pkg/keyspace/keyspace_test.go @@ -80,7 +80,7 @@ func (suite *keyspaceTestSuite) SetupTest() { suite.ctx, suite.cancel = context.WithCancel(context.Background()) store := endpoint.NewStorageEndpoint(kv.NewMemoryKV(), nil) allocator := mockid.NewIDAllocator() - kgm := NewKeyspaceGroupManager(suite.ctx, store, nil, 0) + kgm := NewKeyspaceGroupManager(suite.ctx, store, nil) suite.manager = NewKeyspaceManager(suite.ctx, store, nil, allocator, &mockConfig{}, kgm) re.NoError(kgm.Bootstrap(suite.ctx)) re.NoError(suite.manager.Bootstrap()) diff --git a/pkg/keyspace/tso_keyspace_group.go b/pkg/keyspace/tso_keyspace_group.go index 68409bd471c..81804e38c2a 100644 --- a/pkg/keyspace/tso_keyspace_group.go +++ b/pkg/keyspace/tso_keyspace_group.go @@ -57,11 +57,10 @@ const ( // GroupManager is the manager of keyspace group related data. type GroupManager struct { - ctx context.Context - cancel context.CancelFunc - wg sync.WaitGroup - client *clientv3.Client - clusterID uint64 + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + client *clientv3.Client syncutil.RWMutex // groups is the cache of keyspace group related information. @@ -86,7 +85,6 @@ func NewKeyspaceGroupManager( ctx context.Context, store endpoint.KeyspaceGroupStorage, client *clientv3.Client, - clusterID uint64, ) *GroupManager { ctx, cancel := context.WithCancel(ctx) groups := make(map[endpoint.UserKind]*indexedHeap) @@ -99,7 +97,6 @@ func NewKeyspaceGroupManager( store: store, groups: groups, client: client, - clusterID: clusterID, nodesBalancer: balancer.GenByPolicy[string](defaultBalancerPolicy), serviceRegistryMap: make(map[string]string), } @@ -107,7 +104,7 @@ func NewKeyspaceGroupManager( // If the etcd client is not nil, start the watch loop for the registered tso servers. // The PD(TSO) Client relies on this info to discover tso servers. if m.client != nil { - m.initTSONodesWatcher(m.client, m.clusterID) + m.initTSONodesWatcher(m.client) m.tsoNodesWatcher.StartWatchLoop() } return m @@ -218,8 +215,8 @@ func (m *GroupManager) allocNodesToAllKeyspaceGroups(ctx context.Context) { } } -func (m *GroupManager) initTSONodesWatcher(client *clientv3.Client, clusterID uint64) { - tsoServiceKey := discovery.TSOPath(clusterID) +func (m *GroupManager) initTSONodesWatcher(client *clientv3.Client) { + tsoServiceKey := keypath.TSOPath() putFn := func(kv *mvccpb.KeyValue) error { s := &discovery.ServiceRegistryEntry{} @@ -1154,7 +1151,7 @@ func (m *GroupManager) GetKeyspaceGroupPrimaryByID(id uint32) (string, error) { return "", ErrKeyspaceGroupNotExists(id) } - rootPath := keypath.TSOSvcRootPath(m.clusterID) + rootPath := keypath.TSOSvcRootPath() primaryPath := keypath.KeyspaceGroupPrimaryPath(rootPath, id) leader := &tsopb.Participant{} ok, _, err := etcdutil.GetProtoMsgWithModRev(m.client, primaryPath, leader) diff --git a/pkg/keyspace/tso_keyspace_group_test.go b/pkg/keyspace/tso_keyspace_group_test.go index 2661cde9e7a..5878d7d907f 100644 --- a/pkg/keyspace/tso_keyspace_group_test.go +++ b/pkg/keyspace/tso_keyspace_group_test.go @@ -47,7 +47,7 @@ func (suite *keyspaceGroupTestSuite) SetupTest() { re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) store := endpoint.NewStorageEndpoint(kv.NewMemoryKV(), nil) - suite.kgm = NewKeyspaceGroupManager(suite.ctx, store, nil, 0) + suite.kgm = NewKeyspaceGroupManager(suite.ctx, store, nil) idAllocator := mockid.NewIDAllocator() cluster := mockcluster.NewCluster(suite.ctx, mockconfig.NewTestOptions()) suite.kg = NewKeyspaceManager(suite.ctx, store, cluster, idAllocator, &mockConfig{}, suite.kgm) diff --git a/pkg/mcs/discovery/discover.go b/pkg/mcs/discovery/discover.go index 119ce7957d9..083059780b4 100644 --- a/pkg/mcs/discovery/discover.go +++ b/pkg/mcs/discovery/discover.go @@ -15,21 +15,20 @@ package discovery import ( - "strconv" - "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/storage/kv" "github.com/tikv/pd/pkg/utils/etcdutil" + "github.com/tikv/pd/pkg/utils/keypath" clientv3 "go.etcd.io/etcd/client/v3" "go.uber.org/zap" ) // Discover is used to get all the service instances of the specified service name. -func Discover(cli *clientv3.Client, clusterID, serviceName string) ([]string, error) { - key := ServicePath(clusterID, serviceName) +func Discover(cli *clientv3.Client, serviceName string) ([]string, error) { + key := keypath.ServicePath(serviceName) endKey := clientv3.GetPrefixRangeEnd(key) withRange := clientv3.WithRange(endKey) @@ -48,11 +47,7 @@ func Discover(cli *clientv3.Client, clusterID, serviceName string) ([]string, er func GetMSMembers(serviceName string, client *clientv3.Client) ([]ServiceRegistryEntry, error) { switch serviceName { case constant.TSOServiceName, constant.SchedulingServiceName, constant.ResourceManagerServiceName: - clusterID, err := etcdutil.GetClusterID(client, constant.ClusterIDPath) - if err != nil { - return nil, err - } - servicePath := ServicePath(strconv.FormatUint(clusterID, 10), serviceName) + servicePath := keypath.ServicePath(serviceName) resps, err := kv.NewSlowLogTxn(client).Then(clientv3.OpGet(servicePath, clientv3.WithPrefix())).Commit() if err != nil { return nil, errs.ErrEtcdKVGet.Wrap(err).GenWithStackByCause() diff --git a/pkg/mcs/discovery/discover_test.go b/pkg/mcs/discovery/discover_test.go index 2894dfa8d2d..fd66ddcad18 100644 --- a/pkg/mcs/discovery/discover_test.go +++ b/pkg/mcs/discovery/discover_test.go @@ -27,14 +27,14 @@ func TestDiscover(t *testing.T) { re := require.New(t) _, client, clean := etcdutil.NewTestEtcdCluster(t, 1) defer clean() - sr1 := NewServiceRegister(context.Background(), client, "12345", "test_service", "127.0.0.1:1", "127.0.0.1:1", 1) + sr1 := NewServiceRegister(context.Background(), client, "test_service", "127.0.0.1:1", "127.0.0.1:1", 1) err := sr1.Register() re.NoError(err) - sr2 := NewServiceRegister(context.Background(), client, "12345", "test_service", "127.0.0.1:2", "127.0.0.1:2", 1) + sr2 := NewServiceRegister(context.Background(), client, "test_service", "127.0.0.1:2", "127.0.0.1:2", 1) err = sr2.Register() re.NoError(err) - endpoints, err := Discover(client, "12345", "test_service") + endpoints, err := Discover(client, "test_service") re.NoError(err) re.Len(endpoints, 2) re.Equal("127.0.0.1:1", endpoints[0]) @@ -43,7 +43,7 @@ func TestDiscover(t *testing.T) { sr1.cancel() sr2.cancel() time.Sleep(3 * time.Second) - endpoints, err = Discover(client, "12345", "test_service") + endpoints, err = Discover(client, "test_service") re.NoError(err) re.Empty(endpoints) } @@ -55,17 +55,17 @@ func TestServiceRegistryEntry(t *testing.T) { entry1 := &ServiceRegistryEntry{ServiceAddr: "127.0.0.1:1"} s1, err := entry1.Serialize() re.NoError(err) - sr1 := NewServiceRegister(context.Background(), client, "12345", "test_service", "127.0.0.1:1", s1, 1) + sr1 := NewServiceRegister(context.Background(), client, "test_service", "127.0.0.1:1", s1, 1) err = sr1.Register() re.NoError(err) entry2 := &ServiceRegistryEntry{ServiceAddr: "127.0.0.1:2"} s2, err := entry2.Serialize() re.NoError(err) - sr2 := NewServiceRegister(context.Background(), client, "12345", "test_service", "127.0.0.1:2", s2, 1) + sr2 := NewServiceRegister(context.Background(), client, "test_service", "127.0.0.1:2", s2, 1) err = sr2.Register() re.NoError(err) - endpoints, err := Discover(client, "12345", "test_service") + endpoints, err := Discover(client, "test_service") re.NoError(err) re.Len(endpoints, 2) returnedEntry1 := &ServiceRegistryEntry{} @@ -78,7 +78,7 @@ func TestServiceRegistryEntry(t *testing.T) { sr1.cancel() sr2.cancel() time.Sleep(3 * time.Second) - endpoints, err = Discover(client, "12345", "test_service") + endpoints, err = Discover(client, "test_service") re.NoError(err) re.Empty(endpoints) } diff --git a/pkg/mcs/discovery/key_path.go b/pkg/mcs/discovery/key_path.go deleted file mode 100644 index 76ca387d4b1..00000000000 --- a/pkg/mcs/discovery/key_path.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2023 TiKV Project Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package discovery - -import ( - "strconv" - "strings" - - "github.com/tikv/pd/pkg/mcs/utils/constant" -) - -const ( - registryKey = "registry" -) - -// RegistryPath returns the full path to store microservice addresses. -func RegistryPath(clusterID, serviceName, serviceAddr string) string { - return strings.Join([]string{constant.MicroserviceRootPath, clusterID, serviceName, registryKey, serviceAddr}, "/") -} - -// ServicePath returns the path to store microservice addresses. -func ServicePath(clusterID, serviceName string) string { - return strings.Join([]string{constant.MicroserviceRootPath, clusterID, serviceName, registryKey, ""}, "/") -} - -// TSOPath returns the path to store TSO addresses. -func TSOPath(clusterID uint64) string { - return ServicePath(strconv.FormatUint(clusterID, 10), "tso") -} diff --git a/pkg/mcs/discovery/register.go b/pkg/mcs/discovery/register.go index ab27387ab91..5ab0ceabfce 100644 --- a/pkg/mcs/discovery/register.go +++ b/pkg/mcs/discovery/register.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/utils/etcdutil" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/logutil" clientv3 "go.etcd.io/etcd/client/v3" "go.uber.org/zap" @@ -40,9 +41,9 @@ type ServiceRegister struct { } // NewServiceRegister creates a new ServiceRegister. -func NewServiceRegister(ctx context.Context, cli *clientv3.Client, clusterID, serviceName, serviceAddr, serializedValue string, ttl int64) *ServiceRegister { +func NewServiceRegister(ctx context.Context, cli *clientv3.Client, serviceName, serviceAddr, serializedValue string, ttl int64) *ServiceRegister { cctx, cancel := context.WithCancel(ctx) - serviceKey := RegistryPath(clusterID, serviceName, serviceAddr) + serviceKey := keypath.RegistryPath(serviceName, serviceAddr) return &ServiceRegister{ ctx: cctx, cancel: cancel, diff --git a/pkg/mcs/discovery/register_test.go b/pkg/mcs/discovery/register_test.go index bf35393a814..bdaf7e379a4 100644 --- a/pkg/mcs/discovery/register_test.go +++ b/pkg/mcs/discovery/register_test.go @@ -35,10 +35,10 @@ func TestRegister(t *testing.T) { etcd, cfg := servers[0], servers[0].Config() // Test register with http prefix. - sr := NewServiceRegister(context.Background(), client, "12345", "test_service", "http://127.0.0.1:1", "http://127.0.0.1:1", 10) + sr := NewServiceRegister(context.Background(), client, "test_service", "http://127.0.0.1:1", "http://127.0.0.1:1", 10) err := sr.Register() re.NoError(err) - re.Equal("/ms/12345/test_service/registry/http://127.0.0.1:1", sr.key) + re.Equal("/ms/0/test_service/registry/http://127.0.0.1:1", sr.key) resp, err := client.Get(context.Background(), sr.key) re.NoError(err) re.Equal("http://127.0.0.1:1", string(resp.Kvs[0].Value)) @@ -51,14 +51,14 @@ func TestRegister(t *testing.T) { re.Empty(resp.Kvs) // Test the case that ctx is canceled. - sr = NewServiceRegister(context.Background(), client, "12345", "test_service", "127.0.0.1:2", "127.0.0.1:2", 1) + sr = NewServiceRegister(context.Background(), client, "test_service", "127.0.0.1:2", "127.0.0.1:2", 1) err = sr.Register() re.NoError(err) sr.cancel() re.Empty(getKeyAfterLeaseExpired(re, client, sr.key)) // Test the case that keepalive is failed when the etcd is restarted. - sr = NewServiceRegister(context.Background(), client, "12345", "test_service", "127.0.0.1:2", "127.0.0.1:2", 1) + sr = NewServiceRegister(context.Background(), client, "test_service", "127.0.0.1:2", "127.0.0.1:2", 1) err = sr.Register() re.NoError(err) fname := testutil.InitTempFileLogger("info") diff --git a/pkg/mcs/metastorage/server/grpc_service.go b/pkg/mcs/metastorage/server/grpc_service.go index c0037353437..32b3788906d 100644 --- a/pkg/mcs/metastorage/server/grpc_service.go +++ b/pkg/mcs/metastorage/server/grpc_service.go @@ -24,6 +24,7 @@ import ( bs "github.com/tikv/pd/pkg/basicserver" "github.com/tikv/pd/pkg/mcs/registry" "github.com/tikv/pd/pkg/utils/apiutil" + "github.com/tikv/pd/pkg/utils/keypath" clientv3 "go.etcd.io/etcd/client/v3" "go.uber.org/zap" "google.golang.org/grpc" @@ -58,10 +59,10 @@ type Service struct { } // NewService creates a new meta storage service. -func NewService[T ClusterIDProvider](svr bs.Server) registry.RegistrableService { +func NewService(svr bs.Server) registry.RegistrableService { return &Service{ ctx: svr.Context(), - manager: NewManager[T](svr), + manager: NewManager(svr), } } @@ -115,11 +116,11 @@ func (s *Service) Watch(req *meta_storagepb.WatchRequest, server meta_storagepb. if res.Err() != nil { var resp meta_storagepb.WatchResponse if startRevision < res.CompactRevision { - resp.Header = s.wrapErrorAndRevision(res.Header.GetRevision(), meta_storagepb.ErrorType_DATA_COMPACTED, + resp.Header = wrapErrorAndRevision(res.Header.GetRevision(), meta_storagepb.ErrorType_DATA_COMPACTED, fmt.Sprintf("required watch revision: %d is smaller than current compact/min revision %d.", startRevision, res.CompactRevision)) resp.CompactRevision = res.CompactRevision } else { - resp.Header = s.wrapErrorAndRevision(res.Header.GetRevision(), meta_storagepb.ErrorType_UNKNOWN, + resp.Header = wrapErrorAndRevision(res.Header.GetRevision(), meta_storagepb.ErrorType_UNKNOWN, fmt.Sprintf("watch channel meet other error %s.", res.Err().Error())) } if err := server.Send(&resp); err != nil { @@ -146,7 +147,7 @@ func (s *Service) Watch(req *meta_storagepb.WatchRequest, server meta_storagepb. } if len(events) > 0 { if err := server.Send(&meta_storagepb.WatchResponse{ - Header: &meta_storagepb.ResponseHeader{ClusterId: s.manager.ClusterID(), Revision: res.Header.GetRevision()}, + Header: &meta_storagepb.ResponseHeader{ClusterId: keypath.ClusterID(), Revision: res.Header.GetRevision()}, Events: events, CompactRevision: res.CompactRevision}); err != nil { return err } @@ -180,10 +181,10 @@ func (s *Service) Get(ctx context.Context, req *meta_storagepb.GetRequest) (*met revision = res.Header.GetRevision() } if err != nil { - return &meta_storagepb.GetResponse{Header: s.wrapErrorAndRevision(revision, meta_storagepb.ErrorType_UNKNOWN, err.Error())}, nil + return &meta_storagepb.GetResponse{Header: wrapErrorAndRevision(revision, meta_storagepb.ErrorType_UNKNOWN, err.Error())}, nil } resp := &meta_storagepb.GetResponse{ - Header: &meta_storagepb.ResponseHeader{ClusterId: s.manager.ClusterID(), Revision: revision}, + Header: &meta_storagepb.ResponseHeader{ClusterId: keypath.ClusterID(), Revision: revision}, Count: res.Count, More: res.More, } @@ -219,11 +220,11 @@ func (s *Service) Put(ctx context.Context, req *meta_storagepb.PutRequest) (*met revision = res.Header.GetRevision() } if err != nil { - return &meta_storagepb.PutResponse{Header: s.wrapErrorAndRevision(revision, meta_storagepb.ErrorType_UNKNOWN, err.Error())}, nil + return &meta_storagepb.PutResponse{Header: wrapErrorAndRevision(revision, meta_storagepb.ErrorType_UNKNOWN, err.Error())}, nil } resp := &meta_storagepb.PutResponse{ - Header: &meta_storagepb.ResponseHeader{ClusterId: s.manager.ClusterID(), Revision: revision}, + Header: &meta_storagepb.ResponseHeader{ClusterId: keypath.ClusterID(), Revision: revision}, } if res.PrevKv != nil { resp.PrevKv = &meta_storagepb.KeyValue{Key: res.PrevKv.Key, Value: res.PrevKv.Value} @@ -251,11 +252,11 @@ func (s *Service) Delete(ctx context.Context, req *meta_storagepb.DeleteRequest) revision = res.Header.GetRevision() } if err != nil { - return &meta_storagepb.DeleteResponse{Header: s.wrapErrorAndRevision(revision, meta_storagepb.ErrorType_UNKNOWN, err.Error())}, nil + return &meta_storagepb.DeleteResponse{Header: wrapErrorAndRevision(revision, meta_storagepb.ErrorType_UNKNOWN, err.Error())}, nil } resp := &meta_storagepb.DeleteResponse{ - Header: &meta_storagepb.ResponseHeader{ClusterId: s.manager.ClusterID(), Revision: revision}, + Header: &meta_storagepb.ResponseHeader{ClusterId: keypath.ClusterID(), Revision: revision}, } resp.PrevKvs = make([]*meta_storagepb.KeyValue, len(res.PrevKvs)) for i, kv := range res.PrevKvs { @@ -264,16 +265,16 @@ func (s *Service) Delete(ctx context.Context, req *meta_storagepb.DeleteRequest) return resp, nil } -func (s *Service) wrapErrorAndRevision(revision int64, errorType meta_storagepb.ErrorType, message string) *meta_storagepb.ResponseHeader { - return s.errorHeader(revision, &meta_storagepb.Error{ +func wrapErrorAndRevision(revision int64, errorType meta_storagepb.ErrorType, message string) *meta_storagepb.ResponseHeader { + return errorHeader(revision, &meta_storagepb.Error{ Type: errorType, Message: message, }) } -func (s *Service) errorHeader(revision int64, err *meta_storagepb.Error) *meta_storagepb.ResponseHeader { +func errorHeader(revision int64, err *meta_storagepb.Error) *meta_storagepb.ResponseHeader { return &meta_storagepb.ResponseHeader{ - ClusterId: s.manager.ClusterID(), + ClusterId: keypath.ClusterID(), Revision: revision, Error: err, } diff --git a/pkg/mcs/metastorage/server/install/install.go b/pkg/mcs/metastorage/server/install/install.go index defc24d5f99..e0bd38baa0f 100644 --- a/pkg/mcs/metastorage/server/install/install.go +++ b/pkg/mcs/metastorage/server/install/install.go @@ -25,5 +25,5 @@ func init() { // Install registers the API group and grpc service. func Install(register *registry.ServiceRegistry) { - register.RegisterService("MetaStorage", ms_server.NewService[ms_server.ClusterIDProvider]) + register.RegisterService("MetaStorage", ms_server.NewService) } diff --git a/pkg/mcs/metastorage/server/manager.go b/pkg/mcs/metastorage/server/manager.go index 17fee85c08b..49fc58c6b7d 100644 --- a/pkg/mcs/metastorage/server/manager.go +++ b/pkg/mcs/metastorage/server/manager.go @@ -25,19 +25,13 @@ import ( // Manager is the manager of resource group. type Manager struct { - srv bs.Server - clusterID uint64 - client *clientv3.Client - storage *endpoint.StorageEndpoint -} - -// ClusterIDProvider is used to get cluster ID from the given `bs.server` -type ClusterIDProvider interface { - ClusterID() uint64 + srv bs.Server + client *clientv3.Client + storage *endpoint.StorageEndpoint } // NewManager returns a new Manager. -func NewManager[T ClusterIDProvider](srv bs.Server) *Manager { +func NewManager(srv bs.Server) *Manager { m := &Manager{} // The first initialization after the server is started. srv.AddStartCallback(func() { @@ -48,7 +42,6 @@ func NewManager[T ClusterIDProvider](srv bs.Server) *Manager { ) m.client = srv.GetClient() m.srv = srv - m.clusterID = srv.(T).ClusterID() }) return m } @@ -57,8 +50,3 @@ func NewManager[T ClusterIDProvider](srv bs.Server) *Manager { func (m *Manager) GetClient() *clientv3.Client { return m.client } - -// ClusterID returns the cluster ID. -func (m *Manager) ClusterID() uint64 { - return m.clusterID -} diff --git a/pkg/mcs/resourcemanager/server/server.go b/pkg/mcs/resourcemanager/server/server.go index ccac4d5486a..05b01094801 100644 --- a/pkg/mcs/resourcemanager/server/server.go +++ b/pkg/mcs/resourcemanager/server/server.go @@ -65,8 +65,7 @@ type Server struct { serverLoopCancel func() serverLoopWg sync.WaitGroup - cfg *Config - clusterID uint64 + cfg *Config // for the primary election of resource manager participant *member.Participant @@ -113,7 +112,7 @@ func (s *Server) Run() (err error) { return err } - if s.clusterID, s.serviceID, s.serviceRegister, err = utils.Register(s, constant.ResourceManagerServiceName); err != nil { + if s.serviceID, s.serviceRegister, err = utils.Register(s, constant.ResourceManagerServiceName); err != nil { return err } @@ -310,7 +309,7 @@ func (s *Server) startServer() (err error) { Id: uniqueID, // id is unique among all participants ListenUrls: []string{s.cfg.GetAdvertiseListenAddr()}, } - s.participant.InitInfo(p, keypath.ResourceManagerSvcRootPath(s.clusterID), constant.PrimaryKey, "primary election") + s.participant.InitInfo(p, keypath.ResourceManagerSvcRootPath(), constant.PrimaryKey, "primary election") s.service = &Service{ ctx: s.Context(), diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index 03b133e0488..e4949446da9 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -33,6 +33,7 @@ import ( "github.com/tikv/pd/pkg/statistics/buckets" "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/logutil" "go.uber.org/zap" ) @@ -53,7 +54,6 @@ type Cluster struct { coordinator *schedule.Coordinator checkMembershipCh chan struct{} apiServerLeader atomic.Value - clusterID uint64 running atomic.Bool // heartbeatRunner is used to process the subtree update task asynchronously. @@ -78,7 +78,14 @@ const ( var syncRunner = ratelimit.NewSyncRunner() // NewCluster creates a new cluster. -func NewCluster(parentCtx context.Context, persistConfig *config.PersistConfig, storage storage.Storage, basicCluster *core.BasicCluster, hbStreams *hbstream.HeartbeatStreams, clusterID uint64, checkMembershipCh chan struct{}) (*Cluster, error) { +func NewCluster( + parentCtx context.Context, + persistConfig *config.PersistConfig, + storage storage.Storage, + basicCluster *core.BasicCluster, + hbStreams *hbstream.HeartbeatStreams, + checkMembershipCh chan struct{}, +) (*Cluster, error) { ctx, cancel := context.WithCancel(parentCtx) labelerManager, err := labeler.NewRegionLabeler(ctx, storage, regionLabelGCInterval) if err != nil { @@ -97,7 +104,6 @@ func NewCluster(parentCtx context.Context, persistConfig *config.PersistConfig, labelStats: statistics.NewLabelStatistics(), regionStats: statistics.NewRegionStatistics(basicCluster, persistConfig, ruleManager), storage: storage, - clusterID: clusterID, checkMembershipCh: checkMembershipCh, heartbeatRunner: ratelimit.NewConcurrentRunner(heartbeatTaskRunner, ratelimit.NewConcurrencyLimiter(uint64(runtime.NumCPU()*2)), time.Minute), @@ -225,7 +231,7 @@ func (c *Cluster) AllocID() (uint64, error) { } ctx, cancel := context.WithTimeout(c.ctx, requestTimeout) defer cancel() - resp, err := client.AllocID(ctx, &pdpb.AllocIDRequest{Header: &pdpb.RequestHeader{ClusterId: c.clusterID}}) + resp, err := client.AllocID(ctx, &pdpb.AllocIDRequest{Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}}) if err != nil { c.triggerMembershipCheck() return 0, err diff --git a/pkg/mcs/scheduling/server/config/watcher.go b/pkg/mcs/scheduling/server/config/watcher.go index 18a568087e7..f0af61c9dd2 100644 --- a/pkg/mcs/scheduling/server/config/watcher.go +++ b/pkg/mcs/scheduling/server/config/watcher.go @@ -78,7 +78,6 @@ type persistedConfig struct { func NewWatcher( ctx context.Context, etcdClient *clientv3.Client, - clusterID uint64, persistConfig *PersistConfig, storage storage.Storage, ) (*Watcher, error) { @@ -86,9 +85,9 @@ func NewWatcher( cw := &Watcher{ ctx: ctx, cancel: cancel, - configPath: keypath.ConfigPath(clusterID), + configPath: keypath.ConfigPath(), ttlConfigPrefix: sc.TTLConfigPrefix, - schedulerConfigPathPrefix: keypath.SchedulerConfigPathPrefix(clusterID), + schedulerConfigPathPrefix: keypath.SchedulerConfigPathPrefix(), etcdClient: etcdClient, PersistConfig: persistConfig, storage: storage, diff --git a/pkg/mcs/scheduling/server/grpc_service.go b/pkg/mcs/scheduling/server/grpc_service.go index 7eb2554f7f2..f4fe606b403 100644 --- a/pkg/mcs/scheduling/server/grpc_service.go +++ b/pkg/mcs/scheduling/server/grpc_service.go @@ -30,6 +30,7 @@ import ( "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/mcs/registry" "github.com/tikv/pd/pkg/utils/apiutil" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/versioninfo" "go.uber.org/zap" @@ -145,7 +146,7 @@ func (s *Service) RegionHeartbeat(stream schedulingpb.Scheduling_RegionHeartbeat c := s.GetCluster() if c == nil { - resp := &schedulingpb.RegionHeartbeatResponse{Header: s.notBootstrappedHeader()} + resp := &schedulingpb.RegionHeartbeatResponse{Header: notBootstrappedHeader()} err := server.Send(resp) return errors.WithStack(err) } @@ -177,7 +178,7 @@ func (s *Service) StoreHeartbeat(_ context.Context, request *schedulingpb.StoreH if c == nil { // TODO: add metrics log.Info("cluster isn't initialized") - return &schedulingpb.StoreHeartbeatResponse{Header: s.notBootstrappedHeader()}, nil + return &schedulingpb.StoreHeartbeatResponse{Header: notBootstrappedHeader()}, nil } if c.GetStore(request.GetStats().GetStoreId()) == nil { @@ -188,18 +189,18 @@ func (s *Service) StoreHeartbeat(_ context.Context, request *schedulingpb.StoreH if err := c.HandleStoreHeartbeat(request); err != nil { log.Error("handle store heartbeat failed", zap.Error(err)) } - return &schedulingpb.StoreHeartbeatResponse{Header: &schedulingpb.ResponseHeader{ClusterId: s.clusterID}}, nil + return &schedulingpb.StoreHeartbeatResponse{Header: wrapHeader()}, nil } // SplitRegions split regions by the given split keys func (s *Service) SplitRegions(ctx context.Context, request *schedulingpb.SplitRegionsRequest) (*schedulingpb.SplitRegionsResponse, error) { c := s.GetCluster() if c == nil { - return &schedulingpb.SplitRegionsResponse{Header: s.notBootstrappedHeader()}, nil + return &schedulingpb.SplitRegionsResponse{Header: notBootstrappedHeader()}, nil } finishedPercentage, newRegionIDs := c.GetRegionSplitter().SplitRegions(ctx, request.GetSplitKeys(), int(request.GetRetryLimit())) return &schedulingpb.SplitRegionsResponse{ - Header: s.header(), + Header: wrapHeader(), RegionsId: newRegionIDs, FinishedPercentage: uint64(finishedPercentage), }, nil @@ -209,12 +210,12 @@ func (s *Service) SplitRegions(ctx context.Context, request *schedulingpb.SplitR func (s *Service) ScatterRegions(_ context.Context, request *schedulingpb.ScatterRegionsRequest) (*schedulingpb.ScatterRegionsResponse, error) { c := s.GetCluster() if c == nil { - return &schedulingpb.ScatterRegionsResponse{Header: s.notBootstrappedHeader()}, nil + return &schedulingpb.ScatterRegionsResponse{Header: notBootstrappedHeader()}, nil } opsCount, failures, err := c.GetRegionScatterer().ScatterRegionsByID(request.GetRegionsId(), request.GetGroup(), int(request.GetRetryLimit()), request.GetSkipStoreLimit()) if err != nil { - header := s.errorHeader(&schedulingpb.Error{ + header := errorHeader(&schedulingpb.Error{ Type: schedulingpb.ErrorType_UNKNOWN, Message: err.Error(), }) @@ -232,7 +233,7 @@ func (s *Service) ScatterRegions(_ context.Context, request *schedulingpb.Scatte }())) } return &schedulingpb.ScatterRegionsResponse{ - Header: s.header(), + Header: wrapHeader(), FinishedPercentage: uint64(percentage), }, nil } @@ -241,14 +242,14 @@ func (s *Service) ScatterRegions(_ context.Context, request *schedulingpb.Scatte func (s *Service) GetOperator(_ context.Context, request *schedulingpb.GetOperatorRequest) (*schedulingpb.GetOperatorResponse, error) { c := s.GetCluster() if c == nil { - return &schedulingpb.GetOperatorResponse{Header: s.notBootstrappedHeader()}, nil + return &schedulingpb.GetOperatorResponse{Header: notBootstrappedHeader()}, nil } opController := c.GetCoordinator().GetOperatorController() requestID := request.GetRegionId() r := opController.GetOperatorStatus(requestID) if r == nil { - header := s.errorHeader(&schedulingpb.Error{ + header := errorHeader(&schedulingpb.Error{ Type: schedulingpb.ErrorType_UNKNOWN, Message: "region not found", }) @@ -256,7 +257,7 @@ func (s *Service) GetOperator(_ context.Context, request *schedulingpb.GetOperat } return &schedulingpb.GetOperatorResponse{ - Header: s.header(), + Header: wrapHeader(), RegionId: requestID, Desc: []byte(r.Desc()), Kind: []byte(r.Kind().String()), @@ -268,12 +269,12 @@ func (s *Service) GetOperator(_ context.Context, request *schedulingpb.GetOperat func (s *Service) AskBatchSplit(_ context.Context, request *schedulingpb.AskBatchSplitRequest) (*schedulingpb.AskBatchSplitResponse, error) { c := s.GetCluster() if c == nil { - return &schedulingpb.AskBatchSplitResponse{Header: s.notBootstrappedHeader()}, nil + return &schedulingpb.AskBatchSplitResponse{Header: notBootstrappedHeader()}, nil } if request.GetRegion() == nil { return &schedulingpb.AskBatchSplitResponse{ - Header: s.wrapErrorToHeader(schedulingpb.ErrorType_UNKNOWN, + Header: wrapErrorToHeader(schedulingpb.ErrorType_UNKNOWN, "missing region for split"), }, nil } @@ -327,7 +328,7 @@ func (s *Service) AskBatchSplit(_ context.Context, request *schedulingpb.AskBatc c.GetCoordinator().GetCheckerController().AddPendingProcessedRegions(false, recordRegions...) return &schedulingpb.AskBatchSplitResponse{ - Header: s.header(), + Header: wrapHeader(), Ids: splitIDs, }, nil } @@ -343,28 +344,28 @@ func (s *Service) RegisterRESTHandler(userDefineHandlers map[string]http.Handler return apiutil.RegisterUserDefinedHandlers(userDefineHandlers, &group, handler) } -func (s *Service) errorHeader(err *schedulingpb.Error) *schedulingpb.ResponseHeader { +func errorHeader(err *schedulingpb.Error) *schedulingpb.ResponseHeader { return &schedulingpb.ResponseHeader{ - ClusterId: s.clusterID, + ClusterId: keypath.ClusterID(), Error: err, } } -func (s *Service) notBootstrappedHeader() *schedulingpb.ResponseHeader { - return s.errorHeader(&schedulingpb.Error{ +func notBootstrappedHeader() *schedulingpb.ResponseHeader { + return errorHeader(&schedulingpb.Error{ Type: schedulingpb.ErrorType_NOT_BOOTSTRAPPED, Message: "cluster is not initialized", }) } -func (s *Service) header() *schedulingpb.ResponseHeader { - if s.clusterID == 0 { - return s.wrapErrorToHeader(schedulingpb.ErrorType_NOT_BOOTSTRAPPED, "cluster id is not ready") +func wrapHeader() *schedulingpb.ResponseHeader { + if keypath.ClusterID() == 0 { + return wrapErrorToHeader(schedulingpb.ErrorType_NOT_BOOTSTRAPPED, "cluster id is not ready") } - return &schedulingpb.ResponseHeader{ClusterId: s.clusterID} + return &schedulingpb.ResponseHeader{ClusterId: keypath.ClusterID()} } -func (s *Service) wrapErrorToHeader( +func wrapErrorToHeader( errorType schedulingpb.ErrorType, message string) *schedulingpb.ResponseHeader { - return s.errorHeader(&schedulingpb.Error{Type: errorType, Message: message}) + return errorHeader(&schedulingpb.Error{Type: errorType, Message: message}) } diff --git a/pkg/mcs/scheduling/server/meta/watcher.go b/pkg/mcs/scheduling/server/meta/watcher.go index 40f8c9a4894..9d54a636b9e 100644 --- a/pkg/mcs/scheduling/server/meta/watcher.go +++ b/pkg/mcs/scheduling/server/meta/watcher.go @@ -33,10 +33,9 @@ import ( // Watcher is used to watch the PD API server for any meta changes. type Watcher struct { - wg sync.WaitGroup - ctx context.Context - cancel context.CancelFunc - clusterID uint64 + wg sync.WaitGroup + ctx context.Context + cancel context.CancelFunc // storePathPrefix is the path of the store in etcd: // - Key: /pd/{cluster_id}/raft/s/ // - Value: meta store proto. @@ -51,15 +50,13 @@ type Watcher struct { func NewWatcher( ctx context.Context, etcdClient *clientv3.Client, - clusterID uint64, basicCluster *core.BasicCluster, ) (*Watcher, error) { ctx, cancel := context.WithCancel(ctx) w := &Watcher{ ctx: ctx, cancel: cancel, - clusterID: clusterID, - storePathPrefix: keypath.StorePathPrefix(clusterID), + storePathPrefix: keypath.StorePathPrefix(), etcdClient: etcdClient, basicCluster: basicCluster, } @@ -95,7 +92,7 @@ func (w *Watcher) initializeStoreWatcher() error { } deleteFn := func(kv *mvccpb.KeyValue) error { key := string(kv.Key) - storeID, err := keypath.ExtractStoreIDFromPath(w.clusterID, key) + storeID, err := keypath.ExtractStoreIDFromPath(key) if err != nil { return err } diff --git a/pkg/mcs/scheduling/server/rule/watcher.go b/pkg/mcs/scheduling/server/rule/watcher.go index 49a9dcea85a..790dd9c2f81 100644 --- a/pkg/mcs/scheduling/server/rule/watcher.go +++ b/pkg/mcs/scheduling/server/rule/watcher.go @@ -76,7 +76,6 @@ type Watcher struct { func NewWatcher( ctx context.Context, etcdClient *clientv3.Client, - clusterID uint64, ruleStorage endpoint.RuleStorage, checkerController *checker.Controller, ruleManager *placement.RuleManager, @@ -86,10 +85,10 @@ func NewWatcher( rw := &Watcher{ ctx: ctx, cancel: cancel, - rulesPathPrefix: keypath.RulesPathPrefix(clusterID), - ruleCommonPathPrefix: keypath.RuleCommonPathPrefix(clusterID), - ruleGroupPathPrefix: keypath.RuleGroupPathPrefix(clusterID), - regionLabelPathPrefix: keypath.RegionLabelPathPrefix(clusterID), + rulesPathPrefix: keypath.RulesPathPrefix(), + ruleCommonPathPrefix: keypath.RuleCommonPathPrefix(), + ruleGroupPathPrefix: keypath.RuleGroupPathPrefix(), + regionLabelPathPrefix: keypath.RegionLabelPathPrefix(), etcdClient: etcdClient, ruleStorage: ruleStorage, checkerController: checkerController, diff --git a/pkg/mcs/scheduling/server/rule/watcher_test.go b/pkg/mcs/scheduling/server/rule/watcher_test.go index d3d20888c2d..40469eef2a8 100644 --- a/pkg/mcs/scheduling/server/rule/watcher_test.go +++ b/pkg/mcs/scheduling/server/rule/watcher_test.go @@ -66,10 +66,10 @@ func runWatcherLoadLabelRule(ctx context.Context, re *require.Assertions, client rw := &Watcher{ ctx: ctx, cancel: cancel, - rulesPathPrefix: keypath.RulesPathPrefix(clusterID), - ruleCommonPathPrefix: keypath.RuleCommonPathPrefix(clusterID), - ruleGroupPathPrefix: keypath.RuleGroupPathPrefix(clusterID), - regionLabelPathPrefix: keypath.RegionLabelPathPrefix(clusterID), + rulesPathPrefix: keypath.RulesPathPrefix(), + ruleCommonPathPrefix: keypath.RuleCommonPathPrefix(), + ruleGroupPathPrefix: keypath.RuleGroupPathPrefix(), + regionLabelPathPrefix: keypath.RegionLabelPathPrefix(), etcdClient: client, ruleStorage: storage, regionLabeler: labelerManager, @@ -101,7 +101,7 @@ func prepare(t require.TestingT) (context.Context, *clientv3.Client, func()) { } value, err := json.Marshal(rule) re.NoError(err) - key := keypath.RegionLabelPathPrefix(clusterID) + "/" + rule.ID + key := keypath.RegionLabelPathPrefix() + "/" + rule.ID _, err = clientv3.NewKV(client).Put(ctx, key, string(value)) re.NoError(err) } diff --git a/pkg/mcs/scheduling/server/server.go b/pkg/mcs/scheduling/server/server.go index c4f424aedcb..a613b54b82d 100644 --- a/pkg/mcs/scheduling/server/server.go +++ b/pkg/mcs/scheduling/server/server.go @@ -87,7 +87,6 @@ type Server struct { serverLoopWg sync.WaitGroup cfg *config.Config - clusterID uint64 persistConfig *config.PersistConfig basicCluster *core.BasicCluster @@ -157,7 +156,7 @@ func (s *Server) Run() (err error) { return err } - if s.clusterID, s.serviceID, s.serviceRegister, err = utils.Register(s, constant.SchedulingServiceName); err != nil { + if s.serviceID, s.serviceRegister, err = utils.Register(s, constant.SchedulingServiceName); err != nil { return err } @@ -459,7 +458,7 @@ func (s *Server) startServer() (err error) { Id: uniqueID, // id is unique among all participants ListenUrls: []string{s.cfg.GetAdvertiseListenAddr()}, } - s.participant.InitInfo(p, keypath.SchedulingSvcRootPath(s.clusterID), constant.PrimaryKey, "primary election") + s.participant.InitInfo(p, keypath.SchedulingSvcRootPath(), constant.PrimaryKey, "primary election") s.service = &Service{Server: s} s.AddServiceReadyCallback(s.startCluster) @@ -493,8 +492,8 @@ func (s *Server) startCluster(context.Context) error { if err != nil { return err } - s.hbStreams = hbstream.NewHeartbeatStreams(s.Context(), s.clusterID, constant.SchedulingServiceName, s.basicCluster) - s.cluster, err = NewCluster(s.Context(), s.persistConfig, s.storage, s.basicCluster, s.hbStreams, s.clusterID, s.checkMembershipCh) + s.hbStreams = hbstream.NewHeartbeatStreams(s.Context(), constant.SchedulingServiceName, s.basicCluster) + s.cluster, err = NewCluster(s.Context(), s.persistConfig, s.storage, s.basicCluster, s.hbStreams, s.checkMembershipCh) if err != nil { return err } @@ -515,11 +514,11 @@ func (s *Server) stopCluster() { } func (s *Server) startMetaConfWatcher() (err error) { - s.metaWatcher, err = meta.NewWatcher(s.Context(), s.GetClient(), s.clusterID, s.basicCluster) + s.metaWatcher, err = meta.NewWatcher(s.Context(), s.GetClient(), s.basicCluster) if err != nil { return err } - s.configWatcher, err = config.NewWatcher(s.Context(), s.GetClient(), s.clusterID, s.persistConfig, s.storage) + s.configWatcher, err = config.NewWatcher(s.Context(), s.GetClient(), s.persistConfig, s.storage) if err != nil { return err } @@ -527,7 +526,7 @@ func (s *Server) startMetaConfWatcher() (err error) { } func (s *Server) startRuleWatcher() (err error) { - s.ruleWatcher, err = rule.NewWatcher(s.Context(), s.GetClient(), s.clusterID, s.storage, + s.ruleWatcher, err = rule.NewWatcher(s.Context(), s.GetClient(), s.storage, s.cluster.GetCoordinator().GetCheckerController(), s.cluster.GetRuleManager(), s.cluster.GetRegionLabeler()) return err } diff --git a/pkg/mcs/tso/server/grpc_service.go b/pkg/mcs/tso/server/grpc_service.go index 44083eb9a63..33d158fd785 100644 --- a/pkg/mcs/tso/server/grpc_service.go +++ b/pkg/mcs/tso/server/grpc_service.go @@ -27,6 +27,7 @@ import ( bs "github.com/tikv/pd/pkg/basicserver" "github.com/tikv/pd/pkg/mcs/registry" "github.com/tikv/pd/pkg/utils/apiutil" + "github.com/tikv/pd/pkg/utils/keypath" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -103,10 +104,10 @@ func (s *Service) Tso(stream tsopb.TSO_TsoServer) error { } header := request.GetHeader() clusterID := header.GetClusterId() - if clusterID != s.clusterID { + if clusterID != keypath.ClusterID() { return status.Errorf( codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", - s.clusterID, clusterID) + keypath.ClusterID(), clusterID) } keyspaceID := header.GetKeyspaceId() keyspaceGroupID := header.GetKeyspaceGroupId() @@ -122,7 +123,7 @@ func (s *Service) Tso(stream tsopb.TSO_TsoServer) error { keyspaceGroupIDStr := strconv.FormatUint(uint64(keyspaceGroupID), 10) tsoHandleDuration.WithLabelValues(keyspaceGroupIDStr).Observe(time.Since(start).Seconds()) response := &tsopb.TsoResponse{ - Header: s.header(keyspaceGroupBelongTo), + Header: wrapHeader(keyspaceGroupBelongTo), Timestamp: &ts, Count: count, } @@ -139,7 +140,7 @@ func (s *Service) FindGroupByKeyspaceID( respKeyspaceGroup := request.GetHeader().GetKeyspaceGroupId() if errorType, err := s.validRequest(request.GetHeader()); err != nil { return &tsopb.FindGroupByKeyspaceIDResponse{ - Header: s.wrapErrorToHeader(errorType, err.Error(), respKeyspaceGroup), + Header: wrapErrorToHeader(errorType, err.Error(), respKeyspaceGroup), }, nil } @@ -147,12 +148,12 @@ func (s *Service) FindGroupByKeyspaceID( am, keyspaceGroup, keyspaceGroupID, err := s.keyspaceGroupManager.FindGroupByKeyspaceID(keyspaceID) if err != nil { return &tsopb.FindGroupByKeyspaceIDResponse{ - Header: s.wrapErrorToHeader(tsopb.ErrorType_UNKNOWN, err.Error(), keyspaceGroupID), + Header: wrapErrorToHeader(tsopb.ErrorType_UNKNOWN, err.Error(), keyspaceGroupID), }, nil } if keyspaceGroup == nil { return &tsopb.FindGroupByKeyspaceIDResponse{ - Header: s.wrapErrorToHeader( + Header: wrapErrorToHeader( tsopb.ErrorType_UNKNOWN, "keyspace group not found", keyspaceGroupID), }, nil } @@ -175,7 +176,7 @@ func (s *Service) FindGroupByKeyspaceID( } return &tsopb.FindGroupByKeyspaceIDResponse{ - Header: s.header(keyspaceGroupID), + Header: wrapHeader(keyspaceGroupID), KeyspaceGroup: &tsopb.KeyspaceGroup{ Id: keyspaceGroupID, UserKind: keyspaceGroup.UserKind, @@ -193,14 +194,14 @@ func (s *Service) GetMinTS( respKeyspaceGroup := request.GetHeader().GetKeyspaceGroupId() if errorType, err := s.validRequest(request.GetHeader()); err != nil { return &tsopb.GetMinTSResponse{ - Header: s.wrapErrorToHeader(errorType, err.Error(), respKeyspaceGroup), + Header: wrapErrorToHeader(errorType, err.Error(), respKeyspaceGroup), }, nil } minTS, kgAskedCount, kgTotalCount, err := s.keyspaceGroupManager.GetMinTS(request.GetDcLocation()) if err != nil { return &tsopb.GetMinTSResponse{ - Header: s.wrapErrorToHeader( + Header: wrapErrorToHeader( tsopb.ErrorType_UNKNOWN, err.Error(), respKeyspaceGroup), Timestamp: &minTS, KeyspaceGroupsServing: kgAskedCount, @@ -209,7 +210,7 @@ func (s *Service) GetMinTS( } return &tsopb.GetMinTSResponse{ - Header: s.header(respKeyspaceGroup), + Header: wrapHeader(respKeyspaceGroup), Timestamp: &minTS, KeyspaceGroupsServing: kgAskedCount, KeyspaceGroupsTotal: kgTotalCount, @@ -220,29 +221,29 @@ func (s *Service) validRequest(header *tsopb.RequestHeader) (tsopb.ErrorType, er if s.IsClosed() || s.keyspaceGroupManager == nil { return tsopb.ErrorType_NOT_BOOTSTRAPPED, ErrNotStarted } - if header == nil || header.GetClusterId() != s.clusterID { + if header == nil || header.GetClusterId() != keypath.ClusterID() { return tsopb.ErrorType_CLUSTER_MISMATCHED, ErrClusterMismatched } return tsopb.ErrorType_OK, nil } -func (s *Service) header(keyspaceGroupBelongTo uint32) *tsopb.ResponseHeader { - if s.clusterID == 0 { - return s.wrapErrorToHeader( +func wrapHeader(keyspaceGroupBelongTo uint32) *tsopb.ResponseHeader { + if keypath.ClusterID() == 0 { + return wrapErrorToHeader( tsopb.ErrorType_NOT_BOOTSTRAPPED, "cluster id is not ready", keyspaceGroupBelongTo) } - return &tsopb.ResponseHeader{ClusterId: s.clusterID, KeyspaceGroupId: keyspaceGroupBelongTo} + return &tsopb.ResponseHeader{ClusterId: keypath.ClusterID(), KeyspaceGroupId: keyspaceGroupBelongTo} } -func (s *Service) wrapErrorToHeader( +func wrapErrorToHeader( errorType tsopb.ErrorType, message string, keyspaceGroupBelongTo uint32, ) *tsopb.ResponseHeader { - return s.errorHeader(&tsopb.Error{Type: errorType, Message: message}, keyspaceGroupBelongTo) + return errorHeader(&tsopb.Error{Type: errorType, Message: message}, keyspaceGroupBelongTo) } -func (s *Service) errorHeader(err *tsopb.Error, keyspaceGroupBelongTo uint32) *tsopb.ResponseHeader { +func errorHeader(err *tsopb.Error, keyspaceGroupBelongTo uint32) *tsopb.ResponseHeader { return &tsopb.ResponseHeader{ - ClusterId: s.clusterID, + ClusterId: keypath.ClusterID(), Error: err, KeyspaceGroupId: keyspaceGroupBelongTo, } diff --git a/pkg/mcs/tso/server/server.go b/pkg/mcs/tso/server/server.go index 002ac3db91b..270cfbe13e9 100644 --- a/pkg/mcs/tso/server/server.go +++ b/pkg/mcs/tso/server/server.go @@ -72,8 +72,7 @@ type Server struct { serverLoopCancel func() serverLoopWg sync.WaitGroup - cfg *Config - clusterID uint64 + cfg *Config service *Service keyspaceGroupManager *tso.KeyspaceGroupManager @@ -156,7 +155,7 @@ func (s *Server) Run() (err error) { return err } - if s.clusterID, s.serviceID, s.serviceRegister, err = utils.Register(s, constant.TSOServiceName); err != nil { + if s.serviceID, s.serviceRegister, err = utils.Register(s, constant.TSOServiceName); err != nil { return err } @@ -258,11 +257,6 @@ func (*Server) AddServiceReadyCallback(...func(context.Context) error) { // Implement the other methods -// ClusterID returns the cluster ID of this server. -func (s *Server) ClusterID() uint64 { - return s.clusterID -} - // IsClosed checks if the server loop is closed func (s *Server) IsClosed() bool { return atomic.LoadInt64(&s.isRunning) == 0 @@ -303,8 +297,9 @@ func (s *Server) ValidateRequest(header *tsopb.RequestHeader) error { if s.IsClosed() { return ErrNotStarted } - if header.GetClusterId() != s.clusterID { - return status.Errorf(codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", s.clusterID, header.GetClusterId()) + if header.GetClusterId() != keypath.ClusterID() { + return status.Errorf(codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", + keypath.ClusterID(), header.GetClusterId()) } return nil } @@ -354,8 +349,9 @@ func (s *Server) GetTLSConfig() *grpcutil.TLSConfig { } func (s *Server) startServer() (err error) { + clusterID := keypath.ClusterID() // It may lose accuracy if use float64 to store uint64. So we store the cluster id in label. - metaDataGauge.WithLabelValues(fmt.Sprintf("cluster%d", s.clusterID)).Set(0) + metaDataGauge.WithLabelValues(fmt.Sprintf("cluster%d", clusterID)).Set(0) // The independent TSO service still reuses PD version info since PD and TSO are just // different service modes provided by the same pd-server binary bs.ServerInfoGauge.WithLabelValues(versioninfo.PDReleaseVersion, versioninfo.PDGitHash).Set(float64(time.Now().Unix())) @@ -363,11 +359,11 @@ func (s *Server) startServer() (err error) { // Initialize the TSO service. s.serverLoopCtx, s.serverLoopCancel = context.WithCancel(s.Context()) - legacySvcRootPath := keypath.LegacyRootPath(s.clusterID) - tsoSvcRootPath := keypath.TSOSvcRootPath(s.clusterID) + legacySvcRootPath := keypath.LegacyRootPath() + tsoSvcRootPath := keypath.TSOSvcRootPath() s.keyspaceGroupManager = tso.NewKeyspaceGroupManager( - s.serverLoopCtx, s.serviceID, s.GetClient(), s.GetHTTPClient(), s.cfg.AdvertiseListenAddr, - s.clusterID, legacySvcRootPath, tsoSvcRootPath, s.cfg) + s.serverLoopCtx, s.serviceID, s.GetClient(), s.GetHTTPClient(), + s.cfg.AdvertiseListenAddr, legacySvcRootPath, tsoSvcRootPath, s.cfg) if err := s.keyspaceGroupManager.Initialize(); err != nil { return err } diff --git a/pkg/mcs/utils/constant/constant.go b/pkg/mcs/utils/constant/constant.go index cd01c94f3e0..e8700ffbbc6 100644 --- a/pkg/mcs/utils/constant/constant.go +++ b/pkg/mcs/utils/constant/constant.go @@ -17,8 +17,6 @@ package constant import "time" const ( - // ClusterIDPath is the path to store cluster id - ClusterIDPath = "/pd/cluster_id" // RetryInterval is the interval to retry. // Note: the interval must be less than the timeout of tidb and tikv, which is 2s by default in tikv. RetryInterval = 500 * time.Millisecond diff --git a/pkg/mcs/utils/expected_primary.go b/pkg/mcs/utils/expected_primary.go index bb6d3b0fc37..448344cf08d 100644 --- a/pkg/mcs/utils/expected_primary.go +++ b/pkg/mcs/utils/expected_primary.go @@ -154,11 +154,6 @@ func TransferPrimary(client *clientv3.Client, lease *election.Lease, serviceName r := rand.New(rand.NewSource(time.Now().UnixNano())) nextPrimaryID := r.Intn(len(primaryIDs)) - clusterID, err := etcdutil.GetClusterID(client, constant.ClusterIDPath) - if err != nil { - return errors.Errorf("failed to get cluster ID: %v", err) - } - // update expected primary flag grantResp, err := client.Grant(client.Ctx(), constant.DefaultLeaderLease) if err != nil { @@ -173,9 +168,9 @@ func TransferPrimary(client *clientv3.Client, lease *election.Lease, serviceName var primaryPath string switch serviceName { case constant.SchedulingServiceName: - primaryPath = keypath.SchedulingPrimaryPath(clusterID) + primaryPath = keypath.SchedulingPrimaryPath() case constant.TSOServiceName: - tsoRootPath := keypath.TSOSvcRootPath(clusterID) + tsoRootPath := keypath.TSOSvcRootPath() primaryPath = keypath.KeyspaceGroupPrimaryPath(tsoRootPath, keyspaceGroupID) } _, err = markExpectedPrimaryFlag(client, primaryPath, primaryIDs[nextPrimaryID], grantResp.ID) diff --git a/pkg/mcs/utils/util.go b/pkg/mcs/utils/util.go index dfcfc0e312d..253c846d167 100644 --- a/pkg/mcs/utils/util.go +++ b/pkg/mcs/utils/util.go @@ -20,7 +20,6 @@ import ( "net/http" "os" "path/filepath" - "strconv" "strings" "sync" "time" @@ -34,6 +33,7 @@ import ( "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/mcs/discovery" "github.com/tikv/pd/pkg/mcs/utils/constant" + "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/apiutil/multiservicesapi" "github.com/tikv/pd/pkg/utils/etcdutil" @@ -47,28 +47,6 @@ import ( "google.golang.org/grpc/keepalive" ) -// InitClusterID initializes the cluster ID. -func InitClusterID(ctx context.Context, client *clientv3.Client) (id uint64, err error) { - ticker := time.NewTicker(constant.RetryInterval) - defer ticker.Stop() - retryTimes := 0 - for { - if clusterID, err := etcdutil.GetClusterID(client, constant.ClusterIDPath); err == nil && clusterID != 0 { - return clusterID, nil - } - select { - case <-ctx.Done(): - return 0, err - case <-ticker.C: - retryTimes++ - if retryTimes/500 > 0 { - log.Warn("etcd is not ready, retrying", errs.ZapError(err)) - retryTimes /= 500 - } - } - } -} - // PromHandler is a handler to get prometheus metrics. func PromHandler() gin.HandlerFunc { return func(c *gin.Context) { @@ -279,15 +257,10 @@ func StopGRPCServer(s server) { } // Register registers the service. -func Register(s server, serviceName string) (uint64, *discovery.ServiceRegistryEntry, *discovery.ServiceRegister, error) { - var ( - clusterID uint64 - err error - ) - if clusterID, err = InitClusterID(s.Context(), s.GetEtcdClient()); err != nil { - return 0, nil, nil, err +func Register(s server, serviceName string) (*discovery.ServiceRegistryEntry, *discovery.ServiceRegister, error) { + if err := endpoint.InitClusterIDForMs(s.Context(), s.GetEtcdClient()); err != nil { + return nil, nil, err } - log.Info("init cluster id", zap.Uint64("cluster-id", clusterID)) execPath, err := os.Executable() deployPath := filepath.Dir(execPath) if err != nil { @@ -303,15 +276,16 @@ func Register(s server, serviceName string) (uint64, *discovery.ServiceRegistryE } serializedEntry, err := serviceID.Serialize() if err != nil { - return 0, nil, nil, err + return nil, nil, err } - serviceRegister := discovery.NewServiceRegister(s.Context(), s.GetEtcdClient(), strconv.FormatUint(clusterID, 10), - serviceName, s.GetAdvertiseListenAddr(), serializedEntry, discovery.DefaultLeaseInSeconds) + serviceRegister := discovery.NewServiceRegister(s.Context(), s.GetEtcdClient(), + serviceName, s.GetAdvertiseListenAddr(), serializedEntry, + discovery.DefaultLeaseInSeconds) if err := serviceRegister.Register(); err != nil { log.Error("failed to register the service", zap.String("service-name", serviceName), errs.ZapError(err)) - return 0, nil, nil, err + return nil, nil, err } - return clusterID, serviceID, serviceRegister, nil + return serviceID, serviceRegister, nil } // Exit exits the program with the given code. diff --git a/pkg/mock/mockcluster/mockcluster.go b/pkg/mock/mockcluster/mockcluster.go index 1deed77b8dd..2200b34aec7 100644 --- a/pkg/mock/mockcluster/mockcluster.go +++ b/pkg/mock/mockcluster/mockcluster.go @@ -55,7 +55,6 @@ type Cluster struct { *labeler.RegionLabeler *statistics.HotStat *config.PersistOptions - ID uint64 pendingProcessedRegions map[uint64]struct{} *buckets.HotBucketCache storage.Storage diff --git a/pkg/mock/mockhbstream/mockhbstream_test.go b/pkg/mock/mockhbstream/mockhbstream_test.go index aa1ca85279b..87a39b028b9 100644 --- a/pkg/mock/mockhbstream/mockhbstream_test.go +++ b/pkg/mock/mockhbstream/mockhbstream_test.go @@ -38,7 +38,7 @@ func TestActivity(t *testing.T) { cluster.AddRegionStore(2, 0) cluster.AddLeaderRegion(1, 1) region := cluster.GetRegion(1) - hbs := hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true) + hbs := hbstream.NewTestHeartbeatStreams(ctx, cluster, true) stream1, stream2 := NewHeartbeatStream(), NewHeartbeatStream() // Active stream is stream1. diff --git a/pkg/schedule/checker/merge_checker_test.go b/pkg/schedule/checker/merge_checker_test.go index 03b3a5f83a3..00aaafa2cfd 100644 --- a/pkg/schedule/checker/merge_checker_test.go +++ b/pkg/schedule/checker/merge_checker_test.go @@ -481,7 +481,7 @@ func (suite *mergeCheckerTestSuite) TestStoreLimitWithMerge() { } mc := NewMergeChecker(suite.ctx, tc, tc.GetCheckerConfig()) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc, false /* no need to run */) oc := operator.NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) regions[2] = regions[2].Clone( diff --git a/pkg/schedule/hbstream/heartbeat_streams.go b/pkg/schedule/hbstream/heartbeat_streams.go index 01beacab449..ef0440a9f77 100644 --- a/pkg/schedule/hbstream/heartbeat_streams.go +++ b/pkg/schedule/hbstream/heartbeat_streams.go @@ -28,6 +28,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/mcs/utils/constant" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/logutil" "go.uber.org/zap" ) @@ -74,22 +75,22 @@ type HeartbeatStreams struct { } // NewHeartbeatStreams creates a new HeartbeatStreams which enable background running by default. -func NewHeartbeatStreams(ctx context.Context, clusterID uint64, typ string, storeInformer core.StoreSetInformer) *HeartbeatStreams { - return newHbStreams(ctx, clusterID, typ, storeInformer, true) +func NewHeartbeatStreams(ctx context.Context, typ string, storeInformer core.StoreSetInformer) *HeartbeatStreams { + return newHbStreams(ctx, typ, storeInformer, true) } // NewTestHeartbeatStreams creates a new HeartbeatStreams for test purpose only. // Please use NewHeartbeatStreams for other usage. -func NewTestHeartbeatStreams(ctx context.Context, clusterID uint64, storeInformer core.StoreSetInformer, needRun bool) *HeartbeatStreams { - return newHbStreams(ctx, clusterID, "", storeInformer, needRun) +func NewTestHeartbeatStreams(ctx context.Context, storeInformer core.StoreSetInformer, needRun bool) *HeartbeatStreams { + return newHbStreams(ctx, "", storeInformer, needRun) } -func newHbStreams(ctx context.Context, clusterID uint64, typ string, storeInformer core.StoreSetInformer, needRun bool) *HeartbeatStreams { +func newHbStreams(ctx context.Context, typ string, storeInformer core.StoreSetInformer, needRun bool) *HeartbeatStreams { hbStreamCtx, hbStreamCancel := context.WithCancel(ctx) hs := &HeartbeatStreams{ hbStreamCtx: hbStreamCtx, hbStreamCancel: hbStreamCancel, - clusterID: clusterID, + clusterID: keypath.ClusterID(), streams: make(map[uint64]HeartbeatStream), msgCh: make(chan core.RegionHeartbeatResponse, heartbeatChanCapacity), streamCh: make(chan streamUpdate, 1), diff --git a/pkg/schedule/operator/operator_controller_test.go b/pkg/schedule/operator/operator_controller_test.go index 6d26613f640..69600f80536 100644 --- a/pkg/schedule/operator/operator_controller_test.go +++ b/pkg/schedule/operator/operator_controller_test.go @@ -136,7 +136,7 @@ func (suite *operatorControllerTestSuite) TestOperatorStatus() { re := suite.Require() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc, false /* no need to run */) oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 2) tc.AddLeaderStore(2, 0) @@ -172,7 +172,7 @@ func (suite *operatorControllerTestSuite) TestFastFailOperator() { re := suite.Require() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc, false /* no need to run */) oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 2) tc.AddLeaderStore(2, 0) @@ -207,7 +207,7 @@ func (suite *operatorControllerTestSuite) TestFastFailWithUnhealthyStore() { re := suite.Require() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc, false /* no need to run */) oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 2) tc.AddLeaderStore(2, 0) @@ -228,7 +228,7 @@ func (suite *operatorControllerTestSuite) TestCheckAddUnexpectedStatus() { opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc, false /* no need to run */) oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 0) tc.AddLeaderStore(2, 1) @@ -294,7 +294,7 @@ func (suite *operatorControllerTestSuite) TestConcurrentRemoveOperator() { re := suite.Require() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc, false /* no need to run */) oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 0) tc.AddLeaderStore(2, 1) @@ -336,7 +336,7 @@ func (suite *operatorControllerTestSuite) TestPollDispatchRegion() { re := suite.Require() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc, false /* no need to run */) oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 2) tc.AddLeaderStore(2, 1) @@ -411,7 +411,7 @@ func (suite *operatorControllerTestSuite) TestPollDispatchRegionForMergeRegion() re := suite.Require() opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(suite.ctx, opts) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster, false /* no need to run */) controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetSharedConfig(), stream) cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) cluster.AddLabelsStore(2, 1, map[string]string{"host": "host2"}) @@ -495,7 +495,7 @@ func (suite *operatorControllerTestSuite) TestCheckOperatorLightly() { re := suite.Require() opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(suite.ctx, opts) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster, false /* no need to run */) controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetSharedConfig(), stream) cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) cluster.AddLabelsStore(2, 1, map[string]string{"host": "host2"}) @@ -535,7 +535,7 @@ func (suite *operatorControllerTestSuite) TestStoreLimit() { re := suite.Require() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc, false /* no need to run */) oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.AddLeaderStore(1, 0) tc.UpdateLeaderCount(1, 1000) @@ -603,7 +603,7 @@ func (suite *operatorControllerTestSuite) TestStoreLimit() { func (suite *operatorControllerTestSuite) TestDispatchOutdatedRegion() { re := suite.Require() cluster := mockcluster.NewCluster(suite.ctx, mockconfig.NewTestOptions()) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster, false /* no need to run */) controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetSharedConfig(), stream) cluster.AddLeaderStore(1, 2) @@ -654,7 +654,7 @@ func (suite *operatorControllerTestSuite) TestDispatchOutdatedRegion() { func (suite *operatorControllerTestSuite) TestCalcInfluence() { re := suite.Require() cluster := mockcluster.NewCluster(suite.ctx, mockconfig.NewTestOptions()) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster, false /* no need to run */) controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetSharedConfig(), stream) epoch := &metapb.RegionEpoch{ConfVer: 0, Version: 0} @@ -732,7 +732,7 @@ func (suite *operatorControllerTestSuite) TestCalcInfluence() { func (suite *operatorControllerTestSuite) TestDispatchUnfinishedStep() { re := suite.Require() cluster := mockcluster.NewCluster(suite.ctx, mockconfig.NewTestOptions()) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster, false /* no need to run */) controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetSharedConfig(), stream) // Create a new region with epoch(0, 0) @@ -870,7 +870,7 @@ func (suite *operatorControllerTestSuite) TestAddWaitingOperator() { re := suite.Require() opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(suite.ctx, opts) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster, false /* no need to run */) controller := NewController(suite.ctx, cluster.GetBasicCluster(), cluster.GetSharedConfig(), stream) cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) cluster.AddLabelsStore(2, 1, map[string]string{"host": "host2"}) @@ -940,7 +940,7 @@ func (suite *operatorControllerTestSuite) TestInvalidStoreId() { re := suite.Require() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc, false /* no need to run */) oc := NewController(suite.ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // If PD and store 3 are gone, PD will not have info of store 3 after recreating it. tc.AddRegionStore(1, 1) @@ -962,7 +962,7 @@ func TestConcurrentAddOperatorAndSetStoreLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false /* no need to run */) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false /* no need to run */) oc := NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) regionNum := 1000 diff --git a/pkg/schedule/scatter/region_scatterer_test.go b/pkg/schedule/scatter/region_scatterer_test.go index 4dbe60d764e..fdaed888309 100644 --- a/pkg/schedule/scatter/region_scatterer_test.go +++ b/pkg/schedule/scatter/region_scatterer_test.go @@ -91,7 +91,7 @@ func scatter(re *require.Assertions, numStores, numRegions uint64, useRules bool defer cancel() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) @@ -171,7 +171,7 @@ func scatterSpecial(re *require.Assertions, numOrdinaryStores, numSpecialStores, defer cancel() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) @@ -249,7 +249,7 @@ func TestStoreLimit(t *testing.T) { defer cancel() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add stores 1~6. @@ -281,7 +281,7 @@ func TestScatterCheck(t *testing.T) { defer cancel() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 5 stores. for i := uint64(1); i <= 5; i++ { @@ -330,7 +330,7 @@ func TestSomeStoresFilteredScatterGroupInConcurrency(t *testing.T) { defer cancel() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 5 connected stores. for i := uint64(1); i <= 5; i++ { @@ -375,7 +375,7 @@ func TestScatterGroupInConcurrency(t *testing.T) { defer cancel() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 5 stores. for i := uint64(1); i <= 5; i++ { @@ -447,7 +447,7 @@ func TestScatterForManyRegion(t *testing.T) { defer cancel() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 60 stores. for i := uint64(1); i <= 60; i++ { @@ -475,7 +475,7 @@ func TestScattersGroup(t *testing.T) { defer cancel() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 5 stores. for i := uint64(1); i <= 5; i++ { @@ -563,7 +563,7 @@ func TestRegionHasLearner(t *testing.T) { group := "group" opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 8 stores. voterCount := uint64(6) @@ -651,7 +651,7 @@ func TestSelectedStoresTooFewPeers(t *testing.T) { defer cancel() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 4 stores. for i := uint64(1); i <= 4; i++ { @@ -692,7 +692,7 @@ func TestSelectedStoresTooManyPeers(t *testing.T) { defer cancel() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 4 stores. for i := uint64(1); i <= 5; i++ { @@ -729,7 +729,7 @@ func TestBalanceLeader(t *testing.T) { defer cancel() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 3 stores for i := uint64(2); i <= 4; i++ { @@ -760,7 +760,7 @@ func TestBalanceRegion(t *testing.T) { opt := mockconfig.NewTestOptions() opt.SetLocationLabels([]string{"host"}) tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add 6 stores in 3 hosts. for i := uint64(2); i <= 7; i++ { @@ -810,7 +810,7 @@ func TestRemoveStoreLimit(t *testing.T) { defer cancel() opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) - stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) + stream := hbstream.NewTestHeartbeatStreams(ctx, tc, false) oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSharedConfig(), stream) // Add stores 1~6. diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index 03e38d14da6..0e06b9333e9 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -53,7 +53,7 @@ func prepareSchedulersTest(needToRunStream ...bool) (func(), config.SchedulerCon if len(needToRunStream) == 0 { stream = nil } else { - stream = hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, needToRunStream[0]) + stream = hbstream.NewTestHeartbeatStreams(ctx, tc, needToRunStream[0]) } oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSchedulerConfig(), stream) tc.SetHotRegionCacheHitsThreshold(1) diff --git a/pkg/storage/endpoint/cluster_id.go b/pkg/storage/endpoint/cluster_id.go new file mode 100644 index 00000000000..974b65d6c6c --- /dev/null +++ b/pkg/storage/endpoint/cluster_id.go @@ -0,0 +1,147 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package endpoint + +import ( + "context" + "math/rand" + "time" + + "github.com/pingcap/log" + "github.com/tikv/pd/pkg/errs" + "github.com/tikv/pd/pkg/mcs/utils/constant" + "github.com/tikv/pd/pkg/utils/etcdutil" + "github.com/tikv/pd/pkg/utils/keypath" + "github.com/tikv/pd/pkg/utils/typeutil" + clientv3 "go.etcd.io/etcd/client/v3" + "go.uber.org/zap" +) + +// InitClusterID creates a cluster ID if it hasn't existed. +// This function assumes the cluster ID has already existed and always use a +// cheaper read to retrieve it; if it doesn't exist, invoke the more expensive +// operation initOrGetClusterID(). +func InitClusterID(c *clientv3.Client) (uint64, error) { + clusterID, err := getClusterIDFromEtcd(c) + if err != nil { + return 0, err + } + + if clusterID != 0 { + log.Info("existed cluster id", zap.Uint64("cluster-id", clusterID)) + return clusterID, nil + } + + // If no key exist, generate a random cluster ID. + clusterID, err = initOrGetClusterID(c) + if err != nil { + return 0, err + } + keypath.SetClusterID(clusterID) + log.Info("init cluster id", zap.Uint64("cluster-id", clusterID)) + return clusterID, nil +} + +// getClusterIDFromEtcd gets the cluster ID from etcd if local cache is not set. +func getClusterIDFromEtcd(c *clientv3.Client) (clusterID uint64, err error) { + if id := keypath.ClusterID(); id != 0 { + return id, nil + } + // Get any cluster key to parse the cluster ID. + resp, err := etcdutil.EtcdKVGet(c, keypath.ClusterIDPath) + if err != nil { + return 0, err + } + // If no key exist, generate a random cluster ID. + if len(resp.Kvs) == 0 { + return 0, nil + } + id, err := typeutil.BytesToUint64(resp.Kvs[0].Value) + if err != nil { + return 0, err + } + keypath.SetClusterID(id) + return id, nil +} + +// initOrGetClusterID creates a cluster ID with a CAS operation, +// if the cluster ID doesn't exist. +func initOrGetClusterID(c *clientv3.Client) (uint64, error) { + ctx, cancel := context.WithTimeout(c.Ctx(), etcdutil.DefaultRequestTimeout) + defer cancel() + + var ( + // Generate a random cluster ID. + r = rand.New(rand.NewSource(time.Now().UnixNano())) + ts = uint64(time.Now().Unix()) + clusterID = (ts << 32) + uint64(r.Uint32()) + value = typeutil.Uint64ToBytes(clusterID) + key = keypath.ClusterIDPath + ) + + // Multiple servers may try to init the cluster ID at the same time. + // Only one server can commit this transaction, then other servers + // can get the committed cluster ID. + resp, err := c.Txn(ctx). + If(clientv3.Compare(clientv3.CreateRevision(key), "=", 0)). + Then(clientv3.OpPut(key, string(value))). + Else(clientv3.OpGet(key)). + Commit() + if err != nil { + return 0, errs.ErrEtcdTxnInternal.Wrap(err).GenWithStackByCause() + } + + // Txn commits ok, return the generated cluster ID. + if resp.Succeeded { + return clusterID, nil + } + + // Otherwise, parse the committed cluster ID. + if len(resp.Responses) == 0 { + return 0, errs.ErrEtcdTxnConflict.FastGenByArgs() + } + + response := resp.Responses[0].GetResponseRange() + if response == nil || len(response.Kvs) != 1 { + return 0, errs.ErrEtcdTxnConflict.FastGenByArgs() + } + + return typeutil.BytesToUint64(response.Kvs[0].Value) +} + +// InitClusterIDForMs initializes the cluster ID for microservice. +func InitClusterIDForMs(ctx context.Context, client *clientv3.Client) (err error) { + ticker := time.NewTicker(constant.RetryInterval) + defer ticker.Stop() + retryTimes := 0 + for { + // Microservice should not generate cluster ID by itself. + if clusterID, err := getClusterIDFromEtcd(client); err == nil && clusterID != 0 { + keypath.SetClusterID(clusterID) + log.Info("init cluster id", zap.Uint64("cluster-id", clusterID)) + return nil + } + select { + case <-ctx.Done(): + return err + case <-ticker.C: + retryTimes++ + if retryTimes/500 > 0 { + log.Warn("etcd is not ready, retrying", errs.ZapError(err)) + retryTimes /= 500 + } + } + } +} diff --git a/pkg/storage/endpoint/cluster_id_test.go b/pkg/storage/endpoint/cluster_id_test.go new file mode 100644 index 00000000000..5ce1600044d --- /dev/null +++ b/pkg/storage/endpoint/cluster_id_test.go @@ -0,0 +1,48 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package endpoint + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/tikv/pd/pkg/utils/etcdutil" + "github.com/tikv/pd/pkg/utils/keypath" +) + +func TestInitClusterID(t *testing.T) { + re := require.New(t) + _, client, clean := etcdutil.NewTestEtcdCluster(t, 1) + defer clean() + + id, err := getClusterIDFromEtcd(client) + re.NoError(err) + re.Equal(uint64(0), id) + re.Equal(uint64(0), keypath.ClusterID()) + + clusterID, err := InitClusterID(client) + re.NoError(err) + re.NotZero(clusterID) + re.Equal(clusterID, keypath.ClusterID()) + + clusterID1, err := InitClusterID(client) + re.NoError(err) + re.Equal(clusterID, clusterID1) + + id, err = getClusterIDFromEtcd(client) + re.NoError(err) + re.Equal(clusterID, id) + re.Equal(clusterID, keypath.ClusterID()) +} diff --git a/pkg/syncer/client.go b/pkg/syncer/client.go index bf7f91cfc60..dd6edbd8d73 100644 --- a/pkg/syncer/client.go +++ b/pkg/syncer/client.go @@ -29,6 +29,7 @@ import ( "github.com/tikv/pd/pkg/ratelimit" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/grpcutil" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/logutil" "go.uber.org/zap" "google.golang.org/grpc" @@ -67,7 +68,7 @@ func (s *RegionSyncer) syncRegion(ctx context.Context, conn *grpc.ClientConn) (C return nil, err } err = syncStream.Send(&pdpb.SyncRegionRequest{ - Header: &pdpb.RequestHeader{ClusterId: s.server.ClusterID()}, + Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}, Member: s.server.GetMemberInfo(), StartIndex: s.history.getNextIndex(), }) diff --git a/pkg/syncer/server.go b/pkg/syncer/server.go index 2cdc01053f6..6009bba1d7d 100644 --- a/pkg/syncer/server.go +++ b/pkg/syncer/server.go @@ -33,6 +33,7 @@ import ( "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/storage/kv" "github.com/tikv/pd/pkg/utils/grpcutil" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/syncutil" "go.uber.org/zap" "google.golang.org/grpc/codes" @@ -61,7 +62,6 @@ type ServerStream interface { // Server is the abstraction of the syncer storage server. type Server interface { LoopContext() context.Context - ClusterID() uint64 GetMemberInfo() *pdpb.Member GetLeader() *pdpb.Member GetStorage() storage.Storage @@ -153,7 +153,7 @@ func (s *RegionSyncer) RunServer(ctx context.Context, regionNotifier <-chan *cor s.history.record(region) } regions := &pdpb.SyncRegionResponse{ - Header: &pdpb.ResponseHeader{ClusterId: s.server.ClusterID()}, + Header: &pdpb.ResponseHeader{ClusterId: keypath.ClusterID()}, Regions: requests, StartIndex: startIndex, RegionStats: stats, @@ -163,7 +163,7 @@ func (s *RegionSyncer) RunServer(ctx context.Context, regionNotifier <-chan *cor s.broadcast(regions) case <-ticker.C: alive := &pdpb.SyncRegionResponse{ - Header: &pdpb.ResponseHeader{ClusterId: s.server.ClusterID()}, + Header: &pdpb.ResponseHeader{ClusterId: keypath.ClusterID()}, StartIndex: s.history.getNextIndex(), } s.broadcast(alive) @@ -205,8 +205,8 @@ func (s *RegionSyncer) Sync(ctx context.Context, stream pdpb.PD_SyncRegionsServe return errors.WithStack(err) } clusterID := request.GetHeader().GetClusterId() - if clusterID != s.server.ClusterID() { - return status.Errorf(codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", s.server.ClusterID(), clusterID) + if clusterID != keypath.ClusterID() { + return status.Errorf(codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", keypath.ClusterID(), clusterID) } log.Info("establish sync region stream", zap.String("requested-server", request.GetMember().GetName()), @@ -230,7 +230,7 @@ func (s *RegionSyncer) syncHistoryRegion(ctx context.Context, request *pdpb.Sync zap.String("requested-server", name), zap.String("server", s.server.Name()), zap.Uint64("last-index", startIndex)) // still send a response to follower to show the history region sync. resp := &pdpb.SyncRegionResponse{ - Header: &pdpb.ResponseHeader{ClusterId: s.server.ClusterID()}, + Header: &pdpb.ResponseHeader{ClusterId: keypath.ClusterID()}, Regions: nil, StartIndex: startIndex, RegionStats: nil, @@ -275,7 +275,7 @@ func (s *RegionSyncer) syncHistoryRegion(ctx context.Context, request *pdpb.Sync continue } resp := &pdpb.SyncRegionResponse{ - Header: &pdpb.ResponseHeader{ClusterId: s.server.ClusterID()}, + Header: &pdpb.ResponseHeader{ClusterId: keypath.ClusterID()}, Regions: metas, StartIndex: uint64(lastIndex), RegionStats: stats, @@ -327,7 +327,7 @@ func (s *RegionSyncer) syncHistoryRegion(ctx context.Context, request *pdpb.Sync } } resp := &pdpb.SyncRegionResponse{ - Header: &pdpb.ResponseHeader{ClusterId: s.server.ClusterID()}, + Header: &pdpb.ResponseHeader{ClusterId: keypath.ClusterID()}, Regions: regions, StartIndex: startIndex, RegionStats: stats, diff --git a/pkg/tso/keyspace_group_manager.go b/pkg/tso/keyspace_group_manager.go index 9288e70d968..4c49153814d 100644 --- a/pkg/tso/keyspace_group_manager.go +++ b/pkg/tso/keyspace_group_manager.go @@ -400,7 +400,6 @@ func NewKeyspaceGroupManager( etcdClient *clientv3.Client, httpClient *http.Client, electionNamePrefix string, - clusterID uint64, legacySvcRootPath string, tsoSvcRootPath string, cfg ServiceConfig, @@ -419,7 +418,7 @@ func NewKeyspaceGroupManager( etcdClient: etcdClient, httpClient: httpClient, electionNamePrefix: electionNamePrefix, - tsoServiceKey: discovery.TSOPath(clusterID), + tsoServiceKey: keypath.TSOPath(), legacySvcRootPath: legacySvcRootPath, tsoSvcRootPath: tsoSvcRootPath, primaryPriorityCheckInterval: defaultPrimaryPriorityCheckInterval, diff --git a/pkg/tso/keyspace_group_manager_test.go b/pkg/tso/keyspace_group_manager_test.go index 3e80ca609b3..71d65a80785 100644 --- a/pkg/tso/keyspace_group_manager_test.go +++ b/pkg/tso/keyspace_group_manager_test.go @@ -153,8 +153,10 @@ func (suite *keyspaceGroupManagerTestSuite) TestNewKeyspaceGroupManager() { re := suite.Require() tsoServiceID := &discovery.ServiceRegistryEntry{ServiceAddr: suite.cfg.AdvertiseListenAddr} - clusterID := rand.Uint64() + clusterID, err := endpoint.InitClusterID(suite.etcdClient) + re.NoError(err) clusterIDStr := strconv.FormatUint(clusterID, 10) + keypath.SetClusterID(clusterID) legacySvcRootPath := path.Join("/pd", clusterIDStr) tsoSvcRootPath := path.Join(constant.MicroserviceRootPath, clusterIDStr, "tso") @@ -162,10 +164,9 @@ func (suite *keyspaceGroupManagerTestSuite) TestNewKeyspaceGroupManager() { kgm := NewKeyspaceGroupManager( suite.ctx, tsoServiceID, suite.etcdClient, nil, electionNamePrefix, - clusterID, legacySvcRootPath, tsoSvcRootPath, suite.cfg) + legacySvcRootPath, tsoSvcRootPath, suite.cfg) defer kgm.Close() - err := kgm.Initialize() - re.NoError(err) + re.NoError(kgm.Initialize()) re.Equal(tsoServiceID, kgm.tsoServiceID) re.Equal(suite.etcdClient, kgm.etcdClient) @@ -805,7 +806,7 @@ func (suite *keyspaceGroupManagerTestSuite) newKeyspaceGroupManager( kgm := NewKeyspaceGroupManager( suite.ctx, tsoServiceID, suite.etcdClient, nil, electionNamePrefix, - clusterID, legacySvcRootPath, tsoSvcRootPath, cfg) + legacySvcRootPath, tsoSvcRootPath, cfg) if loadKeyspaceGroupsBatchSize != 0 { kgm.loadKeyspaceGroupsBatchSize = loadKeyspaceGroupsBatchSize } @@ -1045,7 +1046,7 @@ func (suite *keyspaceGroupManagerTestSuite) TestPrimaryPriorityChange() { var err error defaultPriority := constant.DefaultKeyspaceGroupReplicaPriority - clusterID, err := etcdutil.InitOrGetClusterID(suite.etcdClient, "/pd/cluster_id") + clusterID, err := endpoint.InitClusterID(suite.etcdClient) re.NoError(err) clusterIDStr := strconv.FormatUint(clusterID, 10) rootPath := path.Join("/pd", clusterIDStr) @@ -1056,10 +1057,10 @@ func (suite *keyspaceGroupManagerTestSuite) TestPrimaryPriorityChange() { // Register TSO server 1 cfg1.Name = "tso1" - err = suite.registerTSOServer(re, clusterIDStr, svcAddr1, cfg1) + err = suite.registerTSOServer(re, svcAddr1, cfg1) re.NoError(err) defer func() { - re.NoError(suite.deregisterTSOServer(clusterIDStr, svcAddr1)) + re.NoError(suite.deregisterTSOServer(svcAddr1)) }() // Create three keyspace groups on two TSO servers with default replica priority. @@ -1105,7 +1106,7 @@ func (suite *keyspaceGroupManagerTestSuite) TestPrimaryPriorityChange() { // Create the Second TSO server. cfg2.Name = "tso2" - err = suite.registerTSOServer(re, clusterIDStr, svcAddr2, cfg2) + err = suite.registerTSOServer(re, svcAddr2, cfg2) re.NoError(err) mgr2 := suite.newKeyspaceGroupManager(1, clusterID, cfg2) re.NotNil(mgr2) @@ -1116,15 +1117,15 @@ func (suite *keyspaceGroupManagerTestSuite) TestPrimaryPriorityChange() { // Shutdown the second TSO server. mgr2.Close() - re.NoError(suite.deregisterTSOServer(clusterIDStr, svcAddr2)) + re.NoError(suite.deregisterTSOServer(svcAddr2)) // The primaries should move back to the first TSO server. waitForPrimariesServing(re, []*KeyspaceGroupManager{mgr1, mgr1, mgr1}, ids) // Restart the Second TSO server. - err = suite.registerTSOServer(re, clusterIDStr, svcAddr2, cfg2) + err = suite.registerTSOServer(re, svcAddr2, cfg2) re.NoError(err) defer func() { - re.NoError(suite.deregisterTSOServer(clusterIDStr, svcAddr2)) + re.NoError(suite.deregisterTSOServer(svcAddr2)) }() mgr2 = suite.newKeyspaceGroupManager(1, clusterID, cfg2) re.NotNil(mgr2) @@ -1151,19 +1152,19 @@ func (suite *keyspaceGroupManagerTestSuite) TestPrimaryPriorityChange() { // Register TSO server. func (suite *keyspaceGroupManagerTestSuite) registerTSOServer( - re *require.Assertions, clusterID, svcAddr string, cfg *TestServiceConfig, + re *require.Assertions, svcAddr string, cfg *TestServiceConfig, ) error { serviceID := &discovery.ServiceRegistryEntry{ServiceAddr: cfg.GetAdvertiseListenAddr(), Name: cfg.Name} serializedEntry, err := serviceID.Serialize() re.NoError(err) - serviceKey := discovery.RegistryPath(clusterID, constant.TSOServiceName, svcAddr) + serviceKey := keypath.RegistryPath(constant.TSOServiceName, svcAddr) _, err = suite.etcdClient.Put(suite.ctx, serviceKey, serializedEntry) return err } // Deregister TSO server. -func (suite *keyspaceGroupManagerTestSuite) deregisterTSOServer(clusterID, svcAddr string) error { - serviceKey := discovery.RegistryPath(clusterID, constant.TSOServiceName, svcAddr) +func (suite *keyspaceGroupManagerTestSuite) deregisterTSOServer(svcAddr string) error { + serviceKey := keypath.RegistryPath(constant.TSOServiceName, svcAddr) if _, err := suite.etcdClient.Delete(suite.ctx, serviceKey); err != nil { return err } diff --git a/pkg/tso/util_test.go b/pkg/tso/util_test.go index df8a7854e11..f31f8781ded 100644 --- a/pkg/tso/util_test.go +++ b/pkg/tso/util_test.go @@ -74,15 +74,15 @@ func TestExtractKeyspaceGroupIDFromKeyspaceGroupMembershipPath(t *testing.T) { func TestExtractKeyspaceGroupIDFromKeyspaceGroupPrimaryPath(t *testing.T) { re := require.New(t) - compiledRegexp := keypath.GetCompiledNonDefaultIDRegexp(uint64(111)) + compiledRegexp := keypath.GetCompiledNonDefaultIDRegexp() rightCases := []struct { path string id uint32 }{ - {path: "/ms/111/tso/keyspace_groups/election/00001/primary", id: 1}, - {path: "/ms/111/tso/keyspace_groups/election/12345/primary", id: 12345}, - {path: "/ms/111/tso/keyspace_groups/election/99999/primary", id: 99999}, + {path: "/ms/0/tso/keyspace_groups/election/00001/primary", id: 1}, + {path: "/ms/0/tso/keyspace_groups/election/12345/primary", id: 12345}, + {path: "/ms/0/tso/keyspace_groups/election/99999/primary", id: 99999}, } for _, tt := range rightCases { diff --git a/pkg/unsaferecovery/unsafe_recovery_controller_test.go b/pkg/unsaferecovery/unsafe_recovery_controller_test.go index 5284b372758..feaf5ba7430 100644 --- a/pkg/unsaferecovery/unsafe_recovery_controller_test.go +++ b/pkg/unsaferecovery/unsafe_recovery_controller_test.go @@ -194,7 +194,7 @@ func TestFinished(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(3, "6.0.0") { cluster.PutStore(store) @@ -274,7 +274,7 @@ func TestFailed(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(3, "6.0.0") { cluster.PutStore(store) @@ -367,7 +367,7 @@ func TestForceLeaderFail(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(4, "6.0.0") { cluster.PutStore(store) @@ -447,7 +447,7 @@ func TestAffectedTableID(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(3, "6.0.0") { cluster.PutStore(store) @@ -488,7 +488,7 @@ func TestForceLeaderForCommitMerge(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(3, "6.0.0") { cluster.PutStore(store) @@ -564,7 +564,7 @@ func TestAutoDetectMode(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(1, "6.0.0") { cluster.PutStore(store) @@ -617,7 +617,7 @@ func TestAutoDetectWithOneLearner(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(1, "6.0.0") { cluster.PutStore(store) @@ -658,7 +658,7 @@ func TestOneLearner(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(3, "6.0.0") { cluster.PutStore(store) @@ -713,7 +713,7 @@ func TestTiflashLearnerPeer(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(5, "6.0.0") { if store.GetID() == 3 { @@ -888,7 +888,7 @@ func TestUninitializedPeer(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(3, "6.0.0") { cluster.PutStore(store) @@ -944,7 +944,7 @@ func TestJointState(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(5, "6.0.0") { cluster.PutStore(store) @@ -1137,7 +1137,7 @@ func TestExecutionTimeout(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(3, "6.0.0") { cluster.PutStore(store) @@ -1169,7 +1169,7 @@ func TestNoHeartbeatTimeout(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(3, "6.0.0") { cluster.PutStore(store) @@ -1192,7 +1192,7 @@ func TestExitForceLeader(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(3, "6.0.0") { cluster.PutStore(store) @@ -1270,7 +1270,7 @@ func TestStep(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(3, "6.0.0") { cluster.PutStore(store) @@ -1325,7 +1325,7 @@ func TestOnHealthyRegions(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(5, "6.0.0") { cluster.PutStore(store) @@ -1401,7 +1401,7 @@ func TestCreateEmptyRegion(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(3, "6.0.0") { cluster.PutStore(store) @@ -1510,7 +1510,7 @@ func TestRangeOverlap1(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(5, "6.0.0") { cluster.PutStore(store) @@ -1605,7 +1605,7 @@ func TestRangeOverlap2(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(5, "6.0.0") { cluster.PutStore(store) @@ -1699,7 +1699,7 @@ func TestRemoveFailedStores(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() stores := newTestStores(2, "5.3.0") stores[1] = stores[1].Clone(core.SetLastHeartbeatTS(time.Now())) @@ -1740,7 +1740,7 @@ func TestRunning(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() stores := newTestStores(2, "5.3.0") stores[1] = stores[1].Clone(core.SetLastHeartbeatTS(time.Now())) @@ -1762,7 +1762,7 @@ func TestEpochComparison(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() for _, store := range newTestStores(3, "6.0.0") { cluster.PutStore(store) @@ -1864,7 +1864,7 @@ func TestSelectLeader(t *testing.T) { opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) - coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.ID, cluster, true)) + coordinator := schedule.NewCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster, true)) coordinator.Run() stores := newTestStores(6, "6.0.0") labels := []*metapb.StoreLabel{ diff --git a/pkg/utils/etcdutil/etcdutil.go b/pkg/utils/etcdutil/etcdutil.go index b63d5b5d0d9..70beae943f8 100644 --- a/pkg/utils/etcdutil/etcdutil.go +++ b/pkg/utils/etcdutil/etcdutil.go @@ -17,7 +17,6 @@ package etcdutil import ( "context" "crypto/tls" - "math/rand" "net/http" "net/url" "strings" @@ -32,7 +31,6 @@ import ( "github.com/tikv/pd/pkg/utils/grpcutil" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/syncutil" - "github.com/tikv/pd/pkg/utils/typeutil" "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" @@ -299,79 +297,6 @@ func CreateHTTPClient(tlsConfig *tls.Config) *http.Client { return cli } -// InitClusterID creates a cluster ID for the given key if it hasn't existed. -// This function assumes the cluster ID has already existed and always use a -// cheaper read to retrieve it; if it doesn't exist, invoke the more expensive -// operation InitOrGetClusterID(). -func InitClusterID(c *clientv3.Client, key string) (clusterID uint64, err error) { - // Get any cluster key to parse the cluster ID. - resp, err := EtcdKVGet(c, key) - if err != nil { - return 0, err - } - // If no key exist, generate a random cluster ID. - if len(resp.Kvs) == 0 { - return InitOrGetClusterID(c, key) - } - return typeutil.BytesToUint64(resp.Kvs[0].Value) -} - -// GetClusterID gets the cluster ID for the given key. -func GetClusterID(c *clientv3.Client, key string) (clusterID uint64, err error) { - // Get any cluster key to parse the cluster ID. - resp, err := EtcdKVGet(c, key) - if err != nil { - return 0, err - } - // If no key exist, generate a random cluster ID. - if len(resp.Kvs) == 0 { - return 0, nil - } - return typeutil.BytesToUint64(resp.Kvs[0].Value) -} - -// InitOrGetClusterID creates a cluster ID for the given key with a CAS operation, -// if the cluster ID doesn't exist. -func InitOrGetClusterID(c *clientv3.Client, key string) (uint64, error) { - ctx, cancel := context.WithTimeout(c.Ctx(), DefaultRequestTimeout) - defer cancel() - - // Generate a random cluster ID. - r := rand.New(rand.NewSource(time.Now().UnixNano())) - ts := uint64(time.Now().Unix()) - clusterID := (ts << 32) + uint64(r.Uint32()) - value := typeutil.Uint64ToBytes(clusterID) - - // Multiple servers may try to init the cluster ID at the same time. - // Only one server can commit this transaction, then other servers - // can get the committed cluster ID. - resp, err := c.Txn(ctx). - If(clientv3.Compare(clientv3.CreateRevision(key), "=", 0)). - Then(clientv3.OpPut(key, string(value))). - Else(clientv3.OpGet(key)). - Commit() - if err != nil { - return 0, errs.ErrEtcdTxnInternal.Wrap(err).GenWithStackByCause() - } - - // Txn commits ok, return the generated cluster ID. - if resp.Succeeded { - return clusterID, nil - } - - // Otherwise, parse the committed cluster ID. - if len(resp.Responses) == 0 { - return 0, errs.ErrEtcdTxnConflict.FastGenByArgs() - } - - response := resp.Responses[0].GetResponseRange() - if response == nil || len(response.Kvs) != 1 { - return 0, errs.ErrEtcdTxnConflict.FastGenByArgs() - } - - return typeutil.BytesToUint64(response.Kvs[0].Value) -} - const ( defaultEtcdRetryInterval = time.Second defaultLoadFromEtcdRetryTimes = 3 diff --git a/pkg/utils/etcdutil/etcdutil_test.go b/pkg/utils/etcdutil/etcdutil_test.go index 623da37aa98..92ec8967d03 100644 --- a/pkg/utils/etcdutil/etcdutil_test.go +++ b/pkg/utils/etcdutil/etcdutil_test.go @@ -156,25 +156,6 @@ func TestEtcdKVPutWithTTL(t *testing.T) { re.Equal(int64(0), resp.Count) } -func TestInitClusterID(t *testing.T) { - re := require.New(t) - _, client, clean := NewTestEtcdCluster(t, 1) - defer clean() - pdClusterIDPath := "test/TestInitClusterID/pd/cluster_id" - // Get any cluster key to parse the cluster ID. - resp, err := EtcdKVGet(client, pdClusterIDPath) - re.NoError(err) - re.Empty(resp.Kvs) - - clusterID, err := InitClusterID(client, pdClusterIDPath) - re.NoError(err) - re.NotZero(clusterID) - - clusterID1, err := InitClusterID(client, pdClusterIDPath) - re.NoError(err) - re.Equal(clusterID, clusterID1) -} - func TestEtcdClientSync(t *testing.T) { re := require.New(t) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/utils/etcdutil/fastTick", "return(true)")) diff --git a/pkg/utils/keypath/cluster_id.go b/pkg/utils/keypath/cluster_id.go new file mode 100644 index 00000000000..e1117f15738 --- /dev/null +++ b/pkg/utils/keypath/cluster_id.go @@ -0,0 +1,40 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package keypath + +import "sync/atomic" + +// clusterID is the unique ID for the cluster. We put it in this package is +// because it is always used with key path. +var clusterID atomic.Value + +// ClusterID returns the cluster ID. +func ClusterID() uint64 { + id := clusterID.Load() + if id == nil { + return 0 + } + return id.(uint64) +} + +// SetClusterID sets the cluster ID. +func SetClusterID(id uint64) { + clusterID.Store(id) +} + +// ResetClusterID resets the cluster ID to 0. It's only used in tests. +func ResetClusterID() { + clusterID.Store(uint64(0)) +} diff --git a/pkg/utils/keypath/key_path.go b/pkg/utils/keypath/key_path.go index 3696a35c4d3..5f3aafeca36 100644 --- a/pkg/utils/keypath/key_path.go +++ b/pkg/utils/keypath/key_path.go @@ -76,11 +76,14 @@ const ( // we use uint64 to represent ID, the max length of uint64 is 20. keyLen = 20 + + // ClusterIDPath is the path to store cluster id + ClusterIDPath = "/pd/cluster_id" ) // PDRootPath returns the PD root path. -func PDRootPath(clusterID uint64) string { - return path.Join(pdRootPath, strconv.FormatUint(clusterID, 10)) +func PDRootPath() string { + return path.Join(pdRootPath, strconv.FormatUint(ClusterID(), 10)) } // AppendToRootPath appends the given key to the rootPath. @@ -99,33 +102,33 @@ func ClusterBootstrapTimeKey() string { } // ConfigPath returns the path to save the PD config. -func ConfigPath(clusterID uint64) string { - return path.Join(PDRootPath(clusterID), Config) +func ConfigPath() string { + return path.Join(PDRootPath(), Config) } // SchedulerConfigPathPrefix returns the path prefix to save the scheduler config. -func SchedulerConfigPathPrefix(clusterID uint64) string { - return path.Join(PDRootPath(clusterID), CustomSchedulerConfigPath) +func SchedulerConfigPathPrefix() string { + return path.Join(PDRootPath(), CustomSchedulerConfigPath) } // RulesPathPrefix returns the path prefix to save the placement rules. -func RulesPathPrefix(clusterID uint64) string { - return path.Join(PDRootPath(clusterID), RulesPath) +func RulesPathPrefix() string { + return path.Join(PDRootPath(), RulesPath) } // RuleCommonPathPrefix returns the path prefix to save the placement rule common config. -func RuleCommonPathPrefix(clusterID uint64) string { - return path.Join(PDRootPath(clusterID), ruleCommonPath) +func RuleCommonPathPrefix() string { + return path.Join(PDRootPath(), ruleCommonPath) } // RuleGroupPathPrefix returns the path prefix to save the placement rule groups. -func RuleGroupPathPrefix(clusterID uint64) string { - return path.Join(PDRootPath(clusterID), RuleGroupPath) +func RuleGroupPathPrefix() string { + return path.Join(PDRootPath(), RuleGroupPath) } // RegionLabelPathPrefix returns the path prefix to save the region label. -func RegionLabelPathPrefix(clusterID uint64) string { - return path.Join(PDRootPath(clusterID), RegionLabelPath) +func RegionLabelPathPrefix() string { + return path.Join(PDRootPath(), RegionLabelPath) } // SchedulerConfigPath returns the path to save the scheduler config. @@ -139,13 +142,13 @@ func StorePath(storeID uint64) string { } // StorePathPrefix returns the store meta info key path prefix. -func StorePathPrefix(clusterID uint64) string { - return path.Join(PDRootPath(clusterID), ClusterPath, "s") + "/" +func StorePathPrefix() string { + return path.Join(PDRootPath(), ClusterPath, "s") + "/" } // ExtractStoreIDFromPath extracts the store ID from the given path. -func ExtractStoreIDFromPath(clusterID uint64, path string) (uint64, error) { - idStr := strings.TrimLeft(strings.TrimPrefix(path, StorePathPrefix(clusterID)), "0") +func ExtractStoreIDFromPath(path string) (uint64, error) { + idStr := strings.TrimLeft(strings.TrimPrefix(path, StorePathPrefix()), "0") return strconv.ParseUint(idStr, 10, 64) } @@ -316,31 +319,31 @@ func GetCompiledKeyspaceGroupIDRegexp() *regexp.Regexp { // ResourceManagerSvcRootPath returns the root path of resource manager service. // Path: /ms/{cluster_id}/resource_manager -func ResourceManagerSvcRootPath(clusterID uint64) string { - return svcRootPath(clusterID, constant.ResourceManagerServiceName) +func ResourceManagerSvcRootPath() string { + return svcRootPath(constant.ResourceManagerServiceName) } // SchedulingSvcRootPath returns the root path of scheduling service. // Path: /ms/{cluster_id}/scheduling -func SchedulingSvcRootPath(clusterID uint64) string { - return svcRootPath(clusterID, constant.SchedulingServiceName) +func SchedulingSvcRootPath() string { + return svcRootPath(constant.SchedulingServiceName) } // TSOSvcRootPath returns the root path of tso service. // Path: /ms/{cluster_id}/tso -func TSOSvcRootPath(clusterID uint64) string { - return svcRootPath(clusterID, constant.TSOServiceName) +func TSOSvcRootPath() string { + return svcRootPath(constant.TSOServiceName) } -func svcRootPath(clusterID uint64, svcName string) string { - c := strconv.FormatUint(clusterID, 10) +func svcRootPath(svcName string) string { + c := strconv.FormatUint(ClusterID(), 10) return path.Join(constant.MicroserviceRootPath, c, svcName) } // LegacyRootPath returns the root path of legacy pd service. // Path: /pd/{cluster_id} -func LegacyRootPath(clusterID uint64) string { - return path.Join(pdRootPath, strconv.FormatUint(clusterID, 10)) +func LegacyRootPath() string { + return path.Join(pdRootPath, strconv.FormatUint(ClusterID(), 10)) } // KeyspaceGroupPrimaryPath returns the path of keyspace group primary. @@ -353,8 +356,8 @@ func KeyspaceGroupPrimaryPath(rootPath string, keyspaceGroupID uint32) string { // SchedulingPrimaryPath returns the path of scheduling primary. // Path: /ms/{cluster_id}/scheduling/primary -func SchedulingPrimaryPath(clusterID uint64) string { - return path.Join(SchedulingSvcRootPath(clusterID), constant.PrimaryKey) +func SchedulingPrimaryPath() string { + return path.Join(SchedulingSvcRootPath(), constant.PrimaryKey) } // KeyspaceGroupsElectionPath returns the path of keyspace groups election. @@ -368,8 +371,8 @@ func KeyspaceGroupsElectionPath(rootPath string, keyspaceGroupID uint32) string } // GetCompiledNonDefaultIDRegexp returns the compiled regular expression for matching non-default keyspace group id. -func GetCompiledNonDefaultIDRegexp(clusterID uint64) *regexp.Regexp { - rootPath := TSOSvcRootPath(clusterID) +func GetCompiledNonDefaultIDRegexp() *regexp.Regexp { + rootPath := TSOSvcRootPath() pattern := strings.Join([]string{rootPath, constant.KeyspaceGroupsKey, keyspaceGroupsElectionKey, `(\d{5})`, constant.PrimaryKey + `$`}, "/") return regexp.MustCompile(pattern) } @@ -427,11 +430,32 @@ func TimestampPath(tsPath string) string { // /pd/{cluster_id}/timestamp // 2. for the non-default keyspace groups: // /ms/{cluster_id}/tso/{group}/gta/timestamp -func FullTimestampPath(clusterID uint64, groupID uint32) string { - rootPath := TSOSvcRootPath(clusterID) +func FullTimestampPath(groupID uint32) string { + rootPath := TSOSvcRootPath() tsPath := TimestampPath(KeyspaceGroupGlobalTSPath(groupID)) if groupID == constant.DefaultKeyspaceGroupID { - rootPath = LegacyRootPath(clusterID) + rootPath = LegacyRootPath() } return path.Join(rootPath, tsPath) } + +const ( + registryKey = "registry" +) + +// RegistryPath returns the full path to store microservice addresses. +func RegistryPath(serviceName, serviceAddr string) string { + return strings.Join([]string{constant.MicroserviceRootPath, + strconv.FormatUint(ClusterID(), 10), serviceName, registryKey, serviceAddr}, "/") +} + +// ServicePath returns the path to store microservice addresses. +func ServicePath(serviceName string) string { + return strings.Join([]string{constant.MicroserviceRootPath, + strconv.FormatUint(ClusterID(), 10), serviceName, registryKey, ""}, "/") +} + +// TSOPath returns the path to store TSO addresses. +func TSOPath() string { + return ServicePath("tso") +} diff --git a/server/api/label_test.go b/server/api/label_test.go index b8191a83753..70e4ab80066 100644 --- a/server/api/label_test.go +++ b/server/api/label_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/response" + "github.com/tikv/pd/pkg/utils/keypath" tu "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" @@ -306,7 +307,7 @@ func (suite *strictlyLabelsStoreTestSuite) TestStoreMatch() { for _, testCase := range testCases { resp, err := suite.grpcSvr.PutStore(context.Background(), &pdpb.PutStoreRequest{ - Header: &pdpb.RequestHeader{ClusterId: suite.svr.ClusterID()}, + Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}, Store: &metapb.Store{ Id: testCase.store.Id, Address: testCase.store.Address, @@ -335,7 +336,7 @@ func (suite *strictlyLabelsStoreTestSuite) TestStoreMatch() { tu.StatusOK(re))) for _, testCase := range testCases { resp, err := suite.grpcSvr.PutStore(context.Background(), &pdpb.PutStoreRequest{ - Header: &pdpb.RequestHeader{ClusterId: suite.svr.ClusterID()}, + Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}, Store: &metapb.Store{ Id: testCase.store.Id, Address: testCase.store.Address, diff --git a/server/api/member.go b/server/api/member.go index 10b7a06e121..02cae4bca1e 100644 --- a/server/api/member.go +++ b/server/api/member.go @@ -28,6 +28,7 @@ import ( "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/etcdutil" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/server" "github.com/unrolled/render" "go.uber.org/zap" @@ -61,7 +62,7 @@ func (h *memberHandler) GetMembers(w http.ResponseWriter, _ *http.Request) { } func getMembers(svr *server.Server) (*pdpb.GetMembersResponse, error) { - req := &pdpb.GetMembersRequest{Header: &pdpb.RequestHeader{ClusterId: svr.ClusterID()}} + req := &pdpb.GetMembersRequest{Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}} grpcServer := &server.GrpcServer{Server: svr} members, err := grpcServer.GetMembers(context.Background(), req) if err != nil { diff --git a/server/api/server_test.go b/server/api/server_test.go index 96c793df527..f2ff4ffb452 100644 --- a/server/api/server_test.go +++ b/server/api/server_test.go @@ -31,6 +31,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/assertutil" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/versioninfo" @@ -128,7 +129,7 @@ func mustNewCluster(re *require.Assertions, num int, opts ...func(cfg *config.Co func mustBootstrapCluster(re *require.Assertions, s *server.Server) { grpcPDClient := testutil.MustNewGrpcClient(re, s.GetAddr()) req := &pdpb.BootstrapRequest{ - Header: testutil.NewRequestHeader(s.ClusterID()), + Header: testutil.NewRequestHeader(keypath.ClusterID()), Store: store, Region: region, } @@ -158,7 +159,7 @@ func mustPutRegion(re *require.Assertions, svr *server.Server, regionID, storeID func mustPutStore(re *require.Assertions, svr *server.Server, id uint64, state metapb.StoreState, nodeState metapb.NodeState, labels []*metapb.StoreLabel) { s := &server.GrpcServer{Server: svr} _, err := s.PutStore(context.Background(), &pdpb.PutStoreRequest{ - Header: &pdpb.RequestHeader{ClusterId: svr.ClusterID()}, + Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}, Store: &metapb.Store{ Id: id, Address: fmt.Sprintf("tikv%d", id), @@ -171,7 +172,7 @@ func mustPutStore(re *require.Assertions, svr *server.Server, id uint64, state m re.NoError(err) if state == metapb.StoreState_Up { _, err = s.StoreHeartbeat(context.Background(), &pdpb.StoreHeartbeatRequest{ - Header: &pdpb.RequestHeader{ClusterId: svr.ClusterID()}, + Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}, Stats: &pdpb.StoreStats{StoreId: id}, }) re.NoError(err) diff --git a/server/api/store_test.go b/server/api/store_test.go index 05689333395..47c7045ae02 100644 --- a/server/api/store_test.go +++ b/server/api/store_test.go @@ -31,6 +31,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/response" + "github.com/tikv/pd/pkg/utils/keypath" tu "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/pkg/versioninfo" @@ -176,7 +177,7 @@ func (suite *storeTestSuite) TestStoresList() { LastHeartbeat: time.Now().UnixNano() - int64(1*time.Hour), } _, err = s.PutStore(context.Background(), &pdpb.PutStoreRequest{ - Header: &pdpb.RequestHeader{ClusterId: suite.svr.ClusterID()}, + Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}, Store: store, }) re.NoError(err) @@ -190,7 +191,7 @@ func (suite *storeTestSuite) TestStoresList() { // disconnect store store.LastHeartbeat = time.Now().UnixNano() - int64(1*time.Minute) _, err = s.PutStore(context.Background(), &pdpb.PutStoreRequest{ - Header: &pdpb.RequestHeader{ClusterId: suite.svr.ClusterID()}, + Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}, Store: store, }) re.NoError(err) @@ -207,7 +208,7 @@ func (suite *storeTestSuite) TestStoreGet() { url := fmt.Sprintf("%s/store/1", suite.urlPrefix) suite.grpcSvr.StoreHeartbeat( context.Background(), &pdpb.StoreHeartbeatRequest{ - Header: &pdpb.RequestHeader{ClusterId: suite.svr.ClusterID()}, + Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}, Stats: &pdpb.StoreStats{ StoreId: 1, Capacity: 1798985089024, diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index ce9f06c9ba0..dcf91f71b59 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -164,10 +164,9 @@ type RaftCluster struct { prevStoreLimit map[uint64]map[storelimit.Type]float64 // This below fields are all read-only, we cannot update itself after the raft cluster starts. - clusterID uint64 - id id.Allocator - opt *config.PersistOptions - limiter *StoreLimiter + id id.Allocator + opt *config.PersistOptions + limiter *StoreLimiter *schedulingController ruleManager *placement.RuleManager regionLabeler *labeler.RegionLabeler @@ -200,11 +199,18 @@ type Status struct { } // NewRaftCluster create a new cluster. -func NewRaftCluster(ctx context.Context, clusterID uint64, member *member.EmbeddedEtcdMember, basicCluster *core.BasicCluster, storage storage.Storage, regionSyncer *syncer.RegionSyncer, etcdClient *clientv3.Client, - httpClient *http.Client, tsoAllocator *tso.AllocatorManager) *RaftCluster { +func NewRaftCluster( + ctx context.Context, + member *member.EmbeddedEtcdMember, + basicCluster *core.BasicCluster, + storage storage.Storage, + regionSyncer *syncer.RegionSyncer, + etcdClient *clientv3.Client, + httpClient *http.Client, + tsoAllocator *tso.AllocatorManager, +) *RaftCluster { return &RaftCluster{ serverCtx: ctx, - clusterID: clusterID, member: member, regionSyncer: regionSyncer, httpClient: httpClient, @@ -382,7 +388,7 @@ func (c *RaftCluster) Start(s Server) error { func (c *RaftCluster) checkSchedulingService() { if c.isAPIServiceMode { - servers, err := discovery.Discover(c.etcdClient, strconv.FormatUint(c.clusterID, 10), constant.SchedulingServiceName) + servers, err := discovery.Discover(c.etcdClient, constant.SchedulingServiceName) if c.opt.GetMicroServiceConfig().IsSchedulingFallbackEnabled() && (err != nil || len(servers) == 0) { c.startSchedulingJobs(c, c.hbstreams) c.UnsetServiceIndependent(constant.SchedulingServiceName) @@ -2103,8 +2109,8 @@ func (c *RaftCluster) GetMetaCluster() *metapb.Cluster { func (c *RaftCluster) PutMetaCluster(meta *metapb.Cluster) error { c.Lock() defer c.Unlock() - if meta.GetId() != c.clusterID { - return errors.Errorf("invalid cluster %v, mismatch cluster id %d", meta, c.clusterID) + if meta.GetId() != keypath.ClusterID() { + return errors.Errorf("invalid cluster %v, mismatch cluster id %d", meta, keypath.ClusterID()) } return c.putMetaLocked(typeutil.DeepClone(meta, core.ClusterFactory)) } diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index a382651cc31..ac7bf5f1443 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -2619,7 +2619,7 @@ func prepare(setCfg func(*sc.ScheduleConfig), setTc func(*testCluster), run func setCfg(cfg) } tc := newTestCluster(ctx, opt) - hbStreams := hbstream.NewTestHeartbeatStreams(ctx, tc.meta.GetId(), tc, true /* need to run */) + hbStreams := hbstream.NewTestHeartbeatStreams(ctx, tc, true /* need to run */) if setTc != nil { setTc(tc) } diff --git a/server/forward.go b/server/forward.go index 26e3869806d..48f8f84aaa4 100644 --- a/server/forward.go +++ b/server/forward.go @@ -30,6 +30,7 @@ import ( "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/tso" "github.com/tikv/pd/pkg/utils/grpcutil" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/tsoutil" "github.com/tikv/pd/server/cluster" @@ -423,7 +424,7 @@ func (s *GrpcServer) getGlobalTSO(ctx context.Context) (pdpb.Timestamp, error) { } request := &tsopb.TsoRequest{ Header: &tsopb.RequestHeader{ - ClusterId: s.ClusterID(), + ClusterId: keypath.ClusterID(), KeyspaceId: constant.DefaultKeyspaceID, KeyspaceGroupId: constant.DefaultKeyspaceGroupID, }, diff --git a/server/gc_service.go b/server/gc_service.go index d88dc8488d6..114482fdd39 100644 --- a/server/gc_service.go +++ b/server/gc_service.go @@ -49,12 +49,12 @@ func (s *GrpcServer) GetGCSafePointV2(ctx context.Context, request *pdpb.GetGCSa if err != nil { return &pdpb.GetGCSafePointV2Response{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, err } return &pdpb.GetGCSafePointV2Response{ - Header: s.header(), + Header: wrapHeader(), SafePoint: safePoint.SafePoint, }, nil } @@ -91,7 +91,7 @@ func (s *GrpcServer) UpdateGCSafePointV2(ctx context.Context, request *pdpb.Upda } return &pdpb.UpdateGCSafePointV2Response{ - Header: s.header(), + Header: wrapHeader(), NewSafePoint: newSafePoint, }, nil } @@ -133,7 +133,7 @@ func (s *GrpcServer) UpdateServiceSafePointV2(ctx context.Context, request *pdpb return nil, err } return &pdpb.UpdateServiceSafePointV2Response{ - Header: s.header(), + Header: wrapHeader(), ServiceId: []byte(minServiceSafePoint.ServiceID), Ttl: minServiceSafePoint.ExpiredAt - now.Unix(), MinSafePoint: minServiceSafePoint.SafePoint, @@ -158,10 +158,10 @@ func (s *GrpcServer) WatchGCSafePointV2(request *pdpb.WatchGCSafePointV2Request, if res.Err() != nil { var resp pdpb.WatchGCSafePointV2Response if revision < res.CompactRevision { - resp.Header = s.wrapErrorToHeader(pdpb.ErrorType_DATA_COMPACTED, + resp.Header = wrapErrorToHeader(pdpb.ErrorType_DATA_COMPACTED, fmt.Sprintf("required watch revision: %d is smaller than current compact/min revision %d.", revision, res.CompactRevision)) } else { - resp.Header = s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, + resp.Header = wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, fmt.Sprintf("watch channel meet other error %s.", res.Err().Error())) } if err := stream.Send(&resp); err != nil { @@ -185,7 +185,7 @@ func (s *GrpcServer) WatchGCSafePointV2(request *pdpb.WatchGCSafePointV2Request, }) } if len(safePointEvents) > 0 { - if err := stream.Send(&pdpb.WatchGCSafePointV2Response{Header: s.header(), Events: safePointEvents, Revision: res.Header.GetRevision()}); err != nil { + if err := stream.Send(&pdpb.WatchGCSafePointV2Response{Header: wrapHeader(), Events: safePointEvents, Revision: res.Header.GetRevision()}); err != nil { return err } } @@ -226,12 +226,12 @@ func (s *GrpcServer) GetAllGCSafePointV2(ctx context.Context, request *pdpb.GetA if err != nil { return &pdpb.GetAllGCSafePointV2Response{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, err } return &pdpb.GetAllGCSafePointV2Response{ - Header: s.header(), + Header: wrapHeader(), GcSafePoints: gcSafePoints, Revision: revision, }, nil diff --git a/server/grpc_service.go b/server/grpc_service.go index ec03819ccaf..25d5d3ed8e7 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -42,6 +42,7 @@ import ( "github.com/tikv/pd/pkg/storage/kv" "github.com/tikv/pd/pkg/tso" "github.com/tikv/pd/pkg/utils/grpcutil" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/tikv/pd/pkg/utils/tsoutil" @@ -268,7 +269,7 @@ func (s *GrpcServer) GetClusterInfo(context.Context, *pdpb.GetClusterInfoRequest // at startup and needs to get the cluster ID with the first request (i.e. GetMembers). if s.IsClosed() { return &pdpb.GetClusterInfoResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, errs.ErrServerNotStarted.FastGenByArgs().Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, errs.ErrServerNotStarted.FastGenByArgs().Error()), }, nil } @@ -282,7 +283,7 @@ func (s *GrpcServer) GetClusterInfo(context.Context, *pdpb.GetClusterInfoRequest } return &pdpb.GetClusterInfoResponse{ - Header: s.header(), + Header: wrapHeader(), ServiceModes: svcModes, TsoUrls: tsoServiceAddrs, }, nil @@ -301,7 +302,7 @@ func (s *GrpcServer) GetMinTS( defer done() } else { return &pdpb.GetMinTSResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -330,13 +331,13 @@ func (s *GrpcServer) GetMinTS( } if err != nil { return &pdpb.GetMinTSResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), Timestamp: minTS, }, nil } return &pdpb.GetMinTSResponse{ - Header: s.header(), + Header: wrapHeader(), Timestamp: minTS, }, nil } @@ -426,7 +427,7 @@ func (s *GrpcServer) getMinTSFromSingleServer( resp, err := tsopb.NewTSOClient(cc).GetMinTS( cctx, &tsopb.GetMinTSRequest{ Header: &tsopb.RequestHeader{ - ClusterId: s.ClusterID(), + ClusterId: keypath.ClusterID(), }, DcLocation: dcLocation, }) @@ -458,7 +459,7 @@ func (s *GrpcServer) GetMembers(context.Context, *pdpb.GetMembersRequest) (*pdpb defer done() } else { return &pdpb.GetMembersResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -466,13 +467,13 @@ func (s *GrpcServer) GetMembers(context.Context, *pdpb.GetMembersRequest) (*pdpb // at startup and needs to get the cluster ID with the first request (i.e. GetMembers). if s.IsClosed() { return &pdpb.GetMembersResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, errs.ErrServerNotStarted.FastGenByArgs().Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, errs.ErrServerNotStarted.FastGenByArgs().Error()), }, nil } members, err := cluster.GetMembers(s.GetClient()) if err != nil { return &pdpb.GetMembersResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } @@ -492,7 +493,7 @@ func (s *GrpcServer) GetMembers(context.Context, *pdpb.GetMembersRequest) (*pdpb } if err != nil { return &pdpb.GetMembersResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } @@ -505,7 +506,7 @@ func (s *GrpcServer) GetMembers(context.Context, *pdpb.GetMembersRequest) (*pdpb } return &pdpb.GetMembersResponse{ - Header: s.header(), + Header: wrapHeader(), Members: members, Leader: pdLeader, EtcdLeader: etcdLeader, @@ -574,7 +575,7 @@ func (s *GrpcServer) Tso(stream pdpb.PD_TsoServer) error { if s.IsClosed() { return status.Errorf(codes.Unknown, "server not started") } - if clusterID := s.ClusterID(); request.GetHeader().GetClusterId() != clusterID { + if clusterID := keypath.ClusterID(); request.GetHeader().GetClusterId() != clusterID { return status.Errorf(codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", clusterID, request.GetHeader().GetClusterId()) } @@ -587,7 +588,7 @@ func (s *GrpcServer) Tso(stream pdpb.PD_TsoServer) error { return status.Error(codes.Unknown, err.Error()) } response := &pdpb.TsoResponse{ - Header: s.header(), + Header: wrapHeader(), Timestamp: &ts, Count: count, } @@ -606,7 +607,7 @@ func (s *GrpcServer) Bootstrap(ctx context.Context, request *pdpb.BootstrapReque defer done() } else { return &pdpb.BootstrapResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -626,18 +627,18 @@ func (s *GrpcServer) Bootstrap(ctx context.Context, request *pdpb.BootstrapReque Message: "cluster is already bootstrapped", } return &pdpb.BootstrapResponse{ - Header: s.errorHeader(err), + Header: errorHeader(err), }, nil } res, err := s.bootstrapCluster(request) if err != nil { return &pdpb.BootstrapResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } - res.Header = s.header() + res.Header = wrapHeader() return res, nil } @@ -650,7 +651,7 @@ func (s *GrpcServer) IsBootstrapped(ctx context.Context, request *pdpb.IsBootstr defer done() } else { return &pdpb.IsBootstrappedResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -665,7 +666,7 @@ func (s *GrpcServer) IsBootstrapped(ctx context.Context, request *pdpb.IsBootstr rc := s.GetRaftCluster() return &pdpb.IsBootstrappedResponse{ - Header: s.header(), + Header: wrapHeader(), Bootstrapped: rc != nil, }, nil } @@ -679,7 +680,7 @@ func (s *GrpcServer) AllocID(ctx context.Context, request *pdpb.AllocIDRequest) defer done() } else { return &pdpb.AllocIDResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -696,12 +697,12 @@ func (s *GrpcServer) AllocID(ctx context.Context, request *pdpb.AllocIDRequest) id, err := s.idAllocator.Alloc() if err != nil { return &pdpb.AllocIDResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } return &pdpb.AllocIDResponse{ - Header: s.header(), + Header: wrapHeader(), Id: id, }, nil } @@ -715,7 +716,7 @@ func (s *GrpcServer) IsSnapshotRecovering(ctx context.Context, _ *pdpb.IsSnapsho defer done() } else { return &pdpb.IsSnapshotRecoveringResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -723,11 +724,11 @@ func (s *GrpcServer) IsSnapshotRecovering(ctx context.Context, _ *pdpb.IsSnapsho marked, err := s.Server.IsSnapshotRecovering(ctx) if err != nil { return &pdpb.IsSnapshotRecoveringResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } return &pdpb.IsSnapshotRecoveringResponse{ - Header: s.header(), + Header: wrapHeader(), Marked: marked, }, nil } @@ -741,7 +742,7 @@ func (s *GrpcServer) GetStore(ctx context.Context, request *pdpb.GetStoreRequest defer done() } else { return &pdpb.GetStoreResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -755,19 +756,19 @@ func (s *GrpcServer) GetStore(ctx context.Context, request *pdpb.GetStoreRequest } rc := s.GetRaftCluster() if rc == nil { - return &pdpb.GetStoreResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.GetStoreResponse{Header: notBootstrappedHeader()}, nil } storeID := request.GetStoreId() store := rc.GetStore(storeID) if store == nil { return &pdpb.GetStoreResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, fmt.Sprintf("invalid store ID %d, not found", storeID)), }, nil } return &pdpb.GetStoreResponse{ - Header: s.header(), + Header: wrapHeader(), Store: store.GetMeta(), Stats: store.GetStoreStats(), }, nil @@ -797,7 +798,7 @@ func (s *GrpcServer) PutStore(ctx context.Context, request *pdpb.PutStoreRequest defer done() } else { return &pdpb.PutStoreResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -812,27 +813,27 @@ func (s *GrpcServer) PutStore(ctx context.Context, request *pdpb.PutStoreRequest rc := s.GetRaftCluster() if rc == nil { - return &pdpb.PutStoreResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.PutStoreResponse{Header: notBootstrappedHeader()}, nil } store := request.GetStore() if pberr := checkStore(rc, store.GetId()); pberr != nil { return &pdpb.PutStoreResponse{ - Header: s.errorHeader(pberr), + Header: errorHeader(pberr), }, nil } // NOTE: can be removed when placement rules feature is enabled by default. if !s.GetConfig().Replication.EnablePlacementRules && core.IsStoreContainLabel(store, core.EngineKey, core.EngineTiFlash) { return &pdpb.PutStoreResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, "placement rules is disabled"), }, nil } if err := rc.PutMetaStore(store); err != nil { return &pdpb.PutStoreResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } @@ -840,7 +841,7 @@ func (s *GrpcServer) PutStore(ctx context.Context, request *pdpb.PutStoreRequest CheckPDVersionWithClusterVersion(s.persistOptions) return &pdpb.PutStoreResponse{ - Header: s.header(), + Header: wrapHeader(), ReplicationStatus: rc.GetReplicationMode().GetReplicationStatus(), }, nil } @@ -854,7 +855,7 @@ func (s *GrpcServer) GetAllStores(ctx context.Context, request *pdpb.GetAllStore defer done() } else { return &pdpb.GetAllStoresResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -869,7 +870,7 @@ func (s *GrpcServer) GetAllStores(ctx context.Context, request *pdpb.GetAllStore rc := s.GetRaftCluster() if rc == nil { - return &pdpb.GetAllStoresResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.GetAllStoresResponse{Header: notBootstrappedHeader()}, nil } // Don't return tombstone stores. @@ -885,7 +886,7 @@ func (s *GrpcServer) GetAllStores(ctx context.Context, request *pdpb.GetAllStore } return &pdpb.GetAllStoresResponse{ - Header: s.header(), + Header: wrapHeader(), Stores: stores, }, nil } @@ -899,7 +900,7 @@ func (s *GrpcServer) StoreHeartbeat(ctx context.Context, request *pdpb.StoreHear defer done() } else { return &pdpb.StoreHeartbeatResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, errs.ErrRateLimitExceeded.FastGenByArgs().Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, errs.ErrRateLimitExceeded.FastGenByArgs().Error()), }, nil } } @@ -917,24 +918,24 @@ func (s *GrpcServer) StoreHeartbeat(ctx context.Context, request *pdpb.StoreHear } rc := s.GetRaftCluster() if rc == nil { - return &pdpb.StoreHeartbeatResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.StoreHeartbeatResponse{Header: notBootstrappedHeader()}, nil } if pberr := checkStore(rc, request.GetStats().GetStoreId()); pberr != nil { return &pdpb.StoreHeartbeatResponse{ - Header: s.errorHeader(pberr), + Header: errorHeader(pberr), }, nil } storeID := request.GetStats().GetStoreId() store := rc.GetStore(storeID) if store == nil { return &pdpb.StoreHeartbeatResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, fmt.Sprintf("store %v not found", storeID)), }, nil } - resp := &pdpb.StoreHeartbeatResponse{Header: s.header()} + resp := &pdpb.StoreHeartbeatResponse{Header: wrapHeader()} // Bypass stats handling if the store report for unsafe recover is not empty. if request.GetStoreReport() == nil { storeAddress := store.GetAddress() @@ -944,7 +945,7 @@ func (s *GrpcServer) StoreHeartbeat(ctx context.Context, request *pdpb.StoreHear err := rc.HandleStoreHeartbeat(request, resp) if err != nil { return &pdpb.StoreHeartbeatResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } @@ -1130,7 +1131,7 @@ func (s *GrpcServer) ReportBuckets(stream pdpb.PD_ReportBucketsServer) error { rc := s.GetRaftCluster() if rc == nil { resp := &pdpb.ReportBucketsResponse{ - Header: s.notBootstrappedHeader(), + Header: notBootstrappedHeader(), } err := server.send(resp) return errors.WithStack(err) @@ -1243,7 +1244,7 @@ func (s *GrpcServer) RegionHeartbeat(stream pdpb.PD_RegionHeartbeatServer) error rc := s.GetRaftCluster() if rc == nil { resp := &pdpb.RegionHeartbeatResponse{ - Header: s.notBootstrappedHeader(), + Header: notBootstrappedHeader(), } err := server.Send(resp) return errors.WithStack(err) @@ -1401,7 +1402,7 @@ func (s *GrpcServer) GetRegion(ctx context.Context, request *pdpb.GetRegionReque defer done() } else { return &pdpb.GetRegionResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -1422,22 +1423,22 @@ func (s *GrpcServer) GetRegion(ctx context.Context, request *pdpb.GetRegionReque if *followerHandle { rc = s.cluster if !rc.GetRegionSyncer().IsRunning() { - return &pdpb.GetRegionResponse{Header: s.regionNotFound()}, nil + return &pdpb.GetRegionResponse{Header: regionNotFound()}, nil } region = rc.GetRegionByKey(request.GetRegionKey()) if region == nil { log.Warn("follower get region nil", zap.String("key", string(request.GetRegionKey()))) - return &pdpb.GetRegionResponse{Header: s.regionNotFound()}, nil + return &pdpb.GetRegionResponse{Header: regionNotFound()}, nil } } else { rc = s.GetRaftCluster() if rc == nil { - return &pdpb.GetRegionResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.GetRegionResponse{Header: notBootstrappedHeader()}, nil } region = rc.GetRegionByKey(request.GetRegionKey()) if region == nil { log.Warn("leader get region nil", zap.String("key", string(request.GetRegionKey()))) - return &pdpb.GetRegionResponse{Header: s.header()}, nil + return &pdpb.GetRegionResponse{Header: wrapHeader()}, nil } } @@ -1447,7 +1448,7 @@ func (s *GrpcServer) GetRegion(ctx context.Context, request *pdpb.GetRegionReque buckets = region.GetBuckets() } return &pdpb.GetRegionResponse{ - Header: s.header(), + Header: wrapHeader(), Region: region.GetMeta(), Leader: region.GetLeader(), DownPeers: region.GetDownPeers(), @@ -1465,7 +1466,7 @@ func (s *GrpcServer) GetPrevRegion(ctx context.Context, request *pdpb.GetRegionR defer done() } else { return &pdpb.GetRegionResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -1484,21 +1485,21 @@ func (s *GrpcServer) GetPrevRegion(ctx context.Context, request *pdpb.GetRegionR // no need to check running status rc = s.cluster if !rc.GetRegionSyncer().IsRunning() { - return &pdpb.GetRegionResponse{Header: s.regionNotFound()}, nil + return &pdpb.GetRegionResponse{Header: regionNotFound()}, nil } } else { rc = s.GetRaftCluster() if rc == nil { - return &pdpb.GetRegionResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.GetRegionResponse{Header: notBootstrappedHeader()}, nil } } region := rc.GetPrevRegionByKey(request.GetRegionKey()) if region == nil { if *followerHandle { - return &pdpb.GetRegionResponse{Header: s.regionNotFound()}, nil + return &pdpb.GetRegionResponse{Header: regionNotFound()}, nil } - return &pdpb.GetRegionResponse{Header: s.header()}, nil + return &pdpb.GetRegionResponse{Header: wrapHeader()}, nil } var buckets *metapb.Buckets // FIXME: If the bucket is disabled dynamically, the bucket information is returned unexpectedly @@ -1506,7 +1507,7 @@ func (s *GrpcServer) GetPrevRegion(ctx context.Context, request *pdpb.GetRegionR buckets = region.GetBuckets() } return &pdpb.GetRegionResponse{ - Header: s.header(), + Header: wrapHeader(), Region: region.GetMeta(), Leader: region.GetLeader(), DownPeers: region.GetDownPeers(), @@ -1524,7 +1525,7 @@ func (s *GrpcServer) GetRegionByID(ctx context.Context, request *pdpb.GetRegionB defer done() } else { return &pdpb.GetRegionResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -1542,12 +1543,12 @@ func (s *GrpcServer) GetRegionByID(ctx context.Context, request *pdpb.GetRegionB if *followerHandle { rc = s.cluster if !rc.GetRegionSyncer().IsRunning() { - return &pdpb.GetRegionResponse{Header: s.regionNotFound()}, nil + return &pdpb.GetRegionResponse{Header: regionNotFound()}, nil } } else { rc = s.GetRaftCluster() if rc == nil { - return &pdpb.GetRegionResponse{Header: s.regionNotFound()}, nil + return &pdpb.GetRegionResponse{Header: regionNotFound()}, nil } } region := rc.GetRegion(request.GetRegionId()) @@ -1558,16 +1559,16 @@ func (s *GrpcServer) GetRegionByID(ctx context.Context, request *pdpb.GetRegionB }) if region == nil { if *followerHandle { - return &pdpb.GetRegionResponse{Header: s.regionNotFound()}, nil + return &pdpb.GetRegionResponse{Header: regionNotFound()}, nil } - return &pdpb.GetRegionResponse{Header: s.header()}, nil + return &pdpb.GetRegionResponse{Header: wrapHeader()}, nil } var buckets *metapb.Buckets if !*followerHandle && rc.GetStoreConfig().IsEnableRegionBucket() && request.GetNeedBuckets() { buckets = region.GetBuckets() } return &pdpb.GetRegionResponse{ - Header: s.header(), + Header: wrapHeader(), Region: region.GetMeta(), Leader: region.GetLeader(), DownPeers: region.GetDownPeers(), @@ -1586,7 +1587,7 @@ func (s *GrpcServer) ScanRegions(ctx context.Context, request *pdpb.ScanRegionsR defer done() } else { return &pdpb.ScanRegionsResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -1604,19 +1605,19 @@ func (s *GrpcServer) ScanRegions(ctx context.Context, request *pdpb.ScanRegionsR if *followerHandle { rc = s.cluster if !rc.GetRegionSyncer().IsRunning() { - return &pdpb.ScanRegionsResponse{Header: s.regionNotFound()}, nil + return &pdpb.ScanRegionsResponse{Header: regionNotFound()}, nil } } else { rc = s.GetRaftCluster() if rc == nil { - return &pdpb.ScanRegionsResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.ScanRegionsResponse{Header: notBootstrappedHeader()}, nil } } regions := rc.ScanRegions(request.GetStartKey(), request.GetEndKey(), int(request.GetLimit())) if *followerHandle && len(regions) == 0 { - return &pdpb.ScanRegionsResponse{Header: s.regionNotFound()}, nil + return &pdpb.ScanRegionsResponse{Header: regionNotFound()}, nil } - resp := &pdpb.ScanRegionsResponse{Header: s.header()} + resp := &pdpb.ScanRegionsResponse{Header: wrapHeader()} for _, r := range regions { leader := r.GetLeader() if leader == nil { @@ -1644,7 +1645,7 @@ func (s *GrpcServer) BatchScanRegions(ctx context.Context, request *pdpb.BatchSc defer done() } else { return &pdpb.BatchScanRegionsResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -1662,12 +1663,12 @@ func (s *GrpcServer) BatchScanRegions(ctx context.Context, request *pdpb.BatchSc if *followerHandle { rc = s.cluster if !rc.GetRegionSyncer().IsRunning() { - return &pdpb.BatchScanRegionsResponse{Header: s.regionNotFound()}, nil + return &pdpb.BatchScanRegionsResponse{Header: regionNotFound()}, nil } } else { rc = s.GetRaftCluster() if rc == nil { - return &pdpb.BatchScanRegionsResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.BatchScanRegionsResponse{Header: notBootstrappedHeader()}, nil } } needBucket := request.GetNeedBuckets() && !*followerHandle && rc.GetStoreConfig().IsEnableRegionBucket() @@ -1678,11 +1679,11 @@ func (s *GrpcServer) BatchScanRegions(ctx context.Context, request *pdpb.BatchSc for i, reqRange := range reqRanges { if i > 0 { if bytes.Compare(reqRange.StartKey, reqRanges[i-1].EndKey) < 0 { - return &pdpb.BatchScanRegionsResponse{Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, "invalid key range, ranges overlapped")}, nil + return &pdpb.BatchScanRegionsResponse{Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, "invalid key range, ranges overlapped")}, nil } } if len(reqRange.EndKey) > 0 && bytes.Compare(reqRange.StartKey, reqRange.EndKey) > 0 { - return &pdpb.BatchScanRegionsResponse{Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, "invalid key range, start key > end key")}, nil + return &pdpb.BatchScanRegionsResponse{Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, "invalid key range, start key > end key")}, nil } keyRanges.Append(reqRange.StartKey, reqRange.EndKey) } @@ -1695,11 +1696,11 @@ func (s *GrpcServer) BatchScanRegions(ctx context.Context, request *pdpb.BatchSc if err != nil { if errs.ErrRegionNotAdjacent.Equal(multierr.Errors(err)[0]) { return &pdpb.BatchScanRegionsResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_REGIONS_NOT_CONTAIN_ALL_KEY_RANGE, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_REGIONS_NOT_CONTAIN_ALL_KEY_RANGE, err.Error()), }, nil } return &pdpb.BatchScanRegionsResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } regions := make([]*pdpb.Region, 0, len(res)) @@ -1721,9 +1722,9 @@ func (s *GrpcServer) BatchScanRegions(ctx context.Context, request *pdpb.BatchSc }) } if *followerHandle && len(regions) == 0 { - return &pdpb.BatchScanRegionsResponse{Header: s.regionNotFound()}, nil + return &pdpb.BatchScanRegionsResponse{Header: regionNotFound()}, nil } - resp := &pdpb.BatchScanRegionsResponse{Header: s.header(), Regions: regions} + resp := &pdpb.BatchScanRegionsResponse{Header: wrapHeader(), Regions: regions} return resp, nil } @@ -1736,7 +1737,7 @@ func (s *GrpcServer) AskSplit(ctx context.Context, request *pdpb.AskSplitRequest defer done() } else { return &pdpb.AskSplitResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -1751,23 +1752,23 @@ func (s *GrpcServer) AskSplit(ctx context.Context, request *pdpb.AskSplitRequest rc := s.GetRaftCluster() if rc == nil { - return &pdpb.AskSplitResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.AskSplitResponse{Header: notBootstrappedHeader()}, nil } if request.GetRegion() == nil { return &pdpb.AskSplitResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_REGION_NOT_FOUND, + Header: wrapErrorToHeader(pdpb.ErrorType_REGION_NOT_FOUND, "missing region for split"), }, nil } split, err := rc.HandleAskSplit(request) if err != nil { return &pdpb.AskSplitResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } return &pdpb.AskSplitResponse{ - Header: s.header(), + Header: wrapHeader(), NewRegionId: split.NewRegionId, NewPeerIds: split.NewPeerIds, }, nil @@ -1782,21 +1783,21 @@ func (s *GrpcServer) AskBatchSplit(ctx context.Context, request *pdpb.AskBatchSp defer done() } else { return &pdpb.AskBatchSplitResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } rc := s.GetRaftCluster() if rc == nil { - return &pdpb.AskBatchSplitResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.AskBatchSplitResponse{Header: notBootstrappedHeader()}, nil } if rc.IsServiceIndependent(constant.SchedulingServiceName) { forwardCli, err := s.updateSchedulingClient(ctx) if err != nil { return &pdpb.AskBatchSplitResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } cli := forwardCli.getClient() @@ -1832,19 +1833,19 @@ func (s *GrpcServer) AskBatchSplit(ctx context.Context, request *pdpb.AskBatchSp } if request.GetRegion() == nil { return &pdpb.AskBatchSplitResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_REGION_NOT_FOUND, + Header: wrapErrorToHeader(pdpb.ErrorType_REGION_NOT_FOUND, "missing region for split"), }, nil } split, err := rc.HandleAskBatchSplit(request) if err != nil { return &pdpb.AskBatchSplitResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } return &pdpb.AskBatchSplitResponse{ - Header: s.header(), + Header: wrapHeader(), Ids: split.Ids, }, nil } @@ -1858,7 +1859,7 @@ func (s *GrpcServer) ReportSplit(ctx context.Context, request *pdpb.ReportSplitR defer done() } else { return &pdpb.ReportSplitResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -1873,17 +1874,17 @@ func (s *GrpcServer) ReportSplit(ctx context.Context, request *pdpb.ReportSplitR rc := s.GetRaftCluster() if rc == nil { - return &pdpb.ReportSplitResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.ReportSplitResponse{Header: notBootstrappedHeader()}, nil } _, err := rc.HandleReportSplit(request) if err != nil { return &pdpb.ReportSplitResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } return &pdpb.ReportSplitResponse{ - Header: s.header(), + Header: wrapHeader(), }, nil } @@ -1896,7 +1897,7 @@ func (s *GrpcServer) ReportBatchSplit(ctx context.Context, request *pdpb.ReportB defer done() } else { return &pdpb.ReportBatchSplitResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -1911,18 +1912,18 @@ func (s *GrpcServer) ReportBatchSplit(ctx context.Context, request *pdpb.ReportB rc := s.GetRaftCluster() if rc == nil { - return &pdpb.ReportBatchSplitResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.ReportBatchSplitResponse{Header: notBootstrappedHeader()}, nil } _, err := rc.HandleBatchReportSplit(request) if err != nil { return &pdpb.ReportBatchSplitResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } return &pdpb.ReportBatchSplitResponse{ - Header: s.header(), + Header: wrapHeader(), }, nil } @@ -1935,7 +1936,7 @@ func (s *GrpcServer) GetClusterConfig(ctx context.Context, request *pdpb.GetClus defer done() } else { return &pdpb.GetClusterConfigResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -1950,10 +1951,10 @@ func (s *GrpcServer) GetClusterConfig(ctx context.Context, request *pdpb.GetClus rc := s.GetRaftCluster() if rc == nil { - return &pdpb.GetClusterConfigResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.GetClusterConfigResponse{Header: notBootstrappedHeader()}, nil } return &pdpb.GetClusterConfigResponse{ - Header: s.header(), + Header: wrapHeader(), Cluster: rc.GetMetaCluster(), }, nil } @@ -1967,7 +1968,7 @@ func (s *GrpcServer) PutClusterConfig(ctx context.Context, request *pdpb.PutClus defer done() } else { return &pdpb.PutClusterConfigResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -1982,12 +1983,12 @@ func (s *GrpcServer) PutClusterConfig(ctx context.Context, request *pdpb.PutClus rc := s.GetRaftCluster() if rc == nil { - return &pdpb.PutClusterConfigResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.PutClusterConfigResponse{Header: notBootstrappedHeader()}, nil } conf := request.GetCluster() if err := rc.PutMetaCluster(conf); err != nil { return &pdpb.PutClusterConfigResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } @@ -1995,7 +1996,7 @@ func (s *GrpcServer) PutClusterConfig(ctx context.Context, request *pdpb.PutClus log.Info("put cluster config ok", zap.Reflect("config", conf)) return &pdpb.PutClusterConfigResponse{ - Header: s.header(), + Header: wrapHeader(), }, nil } @@ -2008,21 +2009,21 @@ func (s *GrpcServer) ScatterRegion(ctx context.Context, request *pdpb.ScatterReg defer done() } else { return &pdpb.ScatterRegionResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } rc := s.GetRaftCluster() if rc == nil { - return &pdpb.ScatterRegionResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.ScatterRegionResponse{Header: notBootstrappedHeader()}, nil } if rc.IsServiceIndependent(constant.SchedulingServiceName) { forwardCli, err := s.updateSchedulingClient(ctx) if err != nil { return &pdpb.ScatterRegionResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } cli := forwardCli.getClient() @@ -2037,7 +2038,7 @@ func (s *GrpcServer) ScatterRegion(ctx context.Context, request *pdpb.ScatterReg } if len(regionsID) == 0 { return &pdpb.ScatterRegionResponse{ - Header: s.invalidValue("regions id is required"), + Header: invalidValue("regions id is required"), }, nil } req := &schedulingpb.ScatterRegionsRequest{ @@ -2076,7 +2077,7 @@ func (s *GrpcServer) ScatterRegion(ctx context.Context, request *pdpb.ScatterReg return nil, err } return &pdpb.ScatterRegionResponse{ - Header: s.header(), + Header: wrapHeader(), FinishedPercentage: uint64(percentage), }, nil } @@ -2086,7 +2087,7 @@ func (s *GrpcServer) ScatterRegion(ctx context.Context, request *pdpb.ScatterReg if region == nil { if request.GetRegion() == nil { return &pdpb.ScatterRegionResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_REGION_NOT_FOUND, + Header: wrapErrorToHeader(pdpb.ErrorType_REGION_NOT_FOUND, "region %d not found"), }, nil } @@ -2101,14 +2102,14 @@ func (s *GrpcServer) ScatterRegion(ctx context.Context, request *pdpb.ScatterReg if op != nil { if !rc.GetOperatorController().AddOperator(op) { return &pdpb.ScatterRegionResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, "operator canceled because cannot add an operator to the execute queue"), }, nil } } return &pdpb.ScatterRegionResponse{ - Header: s.header(), + Header: wrapHeader(), FinishedPercentage: 100, }, nil } @@ -2122,7 +2123,7 @@ func (s *GrpcServer) GetGCSafePoint(ctx context.Context, request *pdpb.GetGCSafe defer done() } else { return &pdpb.GetGCSafePointResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -2137,7 +2138,7 @@ func (s *GrpcServer) GetGCSafePoint(ctx context.Context, request *pdpb.GetGCSafe rc := s.GetRaftCluster() if rc == nil { - return &pdpb.GetGCSafePointResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.GetGCSafePointResponse{Header: notBootstrappedHeader()}, nil } safePoint, err := s.gcSafePointManager.LoadGCSafePoint() @@ -2146,7 +2147,7 @@ func (s *GrpcServer) GetGCSafePoint(ctx context.Context, request *pdpb.GetGCSafe } return &pdpb.GetGCSafePointResponse{ - Header: s.header(), + Header: wrapHeader(), SafePoint: safePoint, }, nil } @@ -2181,7 +2182,7 @@ func (s *GrpcServer) UpdateGCSafePoint(ctx context.Context, request *pdpb.Update defer done() } else { return &pdpb.UpdateGCSafePointResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -2196,7 +2197,7 @@ func (s *GrpcServer) UpdateGCSafePoint(ctx context.Context, request *pdpb.Update rc := s.GetRaftCluster() if rc == nil { - return &pdpb.UpdateGCSafePointResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.UpdateGCSafePointResponse{Header: notBootstrappedHeader()}, nil } newSafePoint := request.GetSafePoint() @@ -2216,7 +2217,7 @@ func (s *GrpcServer) UpdateGCSafePoint(ctx context.Context, request *pdpb.Update } return &pdpb.UpdateGCSafePointResponse{ - Header: s.header(), + Header: wrapHeader(), NewSafePoint: newSafePoint, }, nil } @@ -2230,7 +2231,7 @@ func (s *GrpcServer) UpdateServiceGCSafePoint(ctx context.Context, request *pdpb defer done() } else { return &pdpb.UpdateServiceGCSafePointResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -2245,7 +2246,7 @@ func (s *GrpcServer) UpdateServiceGCSafePoint(ctx context.Context, request *pdpb rc := s.GetRaftCluster() if rc == nil { - return &pdpb.UpdateServiceGCSafePointResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.UpdateServiceGCSafePointResponse{Header: notBootstrappedHeader()}, nil } var storage endpoint.GCSafePointStorage = s.storage if request.TTL <= 0 { @@ -2270,7 +2271,7 @@ func (s *GrpcServer) UpdateServiceGCSafePoint(ctx context.Context, request *pdpb zap.Uint64("safepoint", request.GetSafePoint())) } return &pdpb.UpdateServiceGCSafePointResponse{ - Header: s.header(), + Header: wrapHeader(), ServiceId: []byte(min.ServiceID), TTL: min.ExpiredAt - now.Unix(), MinSafePoint: min.SafePoint, @@ -2286,21 +2287,21 @@ func (s *GrpcServer) GetOperator(ctx context.Context, request *pdpb.GetOperatorR defer done() } else { return &pdpb.GetOperatorResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } rc := s.GetRaftCluster() if rc == nil { - return &pdpb.GetOperatorResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.GetOperatorResponse{Header: notBootstrappedHeader()}, nil } if rc.IsServiceIndependent(constant.SchedulingServiceName) { forwardCli, err := s.updateSchedulingClient(ctx) if err != nil { return &pdpb.GetOperatorResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } cli := forwardCli.getClient() @@ -2335,7 +2336,7 @@ func (s *GrpcServer) GetOperator(ctx context.Context, request *pdpb.GetOperatorR requestID := request.GetRegionId() r := opController.GetOperatorStatus(requestID) if r == nil { - header := s.errorHeader(&pdpb.Error{ + header := errorHeader(&pdpb.Error{ Type: pdpb.ErrorType_REGION_NOT_FOUND, Message: "Not Found", }) @@ -2343,7 +2344,7 @@ func (s *GrpcServer) GetOperator(ctx context.Context, request *pdpb.GetOperatorR } return &pdpb.GetOperatorResponse{ - Header: s.header(), + Header: wrapHeader(), RegionId: requestID, Desc: []byte(r.Desc()), Kind: []byte(r.Kind().String()), @@ -2372,36 +2373,36 @@ func (s *GrpcServer) validateRoleInRequest(ctx context.Context, header *pdpb.Req } *allowFollower = true } - if clusterID := s.ClusterID(); header.GetClusterId() != clusterID { + if clusterID := keypath.ClusterID(); header.GetClusterId() != clusterID { return status.Errorf(codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", clusterID, header.GetClusterId()) } return nil } -func (s *GrpcServer) header() *pdpb.ResponseHeader { - clusterID := s.ClusterID() +func wrapHeader() *pdpb.ResponseHeader { + clusterID := keypath.ClusterID() if clusterID == 0 { - return s.wrapErrorToHeader(pdpb.ErrorType_NOT_BOOTSTRAPPED, "cluster id is not ready") + return wrapErrorToHeader(pdpb.ErrorType_NOT_BOOTSTRAPPED, "cluster id is not ready") } return &pdpb.ResponseHeader{ClusterId: clusterID} } -func (s *GrpcServer) wrapErrorToHeader(errorType pdpb.ErrorType, message string) *pdpb.ResponseHeader { - return s.errorHeader(&pdpb.Error{ +func wrapErrorToHeader(errorType pdpb.ErrorType, message string) *pdpb.ResponseHeader { + return errorHeader(&pdpb.Error{ Type: errorType, Message: message, }) } -func (s *GrpcServer) errorHeader(err *pdpb.Error) *pdpb.ResponseHeader { +func errorHeader(err *pdpb.Error) *pdpb.ResponseHeader { return &pdpb.ResponseHeader{ - ClusterId: s.ClusterID(), + ClusterId: keypath.ClusterID(), Error: err, } } -func (s *GrpcServer) notBootstrappedHeader() *pdpb.ResponseHeader { - return s.errorHeader(&pdpb.Error{ +func notBootstrappedHeader() *pdpb.ResponseHeader { + return errorHeader(&pdpb.Error{ Type: pdpb.ErrorType_NOT_BOOTSTRAPPED, Message: "cluster is not bootstrapped", }) @@ -2409,21 +2410,21 @@ func (s *GrpcServer) notBootstrappedHeader() *pdpb.ResponseHeader { func (s *GrpcServer) incompatibleVersion(tag string) *pdpb.ResponseHeader { msg := fmt.Sprintf("%s incompatible with current cluster version %s", tag, s.persistOptions.GetClusterVersion()) - return s.errorHeader(&pdpb.Error{ + return errorHeader(&pdpb.Error{ Type: pdpb.ErrorType_INCOMPATIBLE_VERSION, Message: msg, }) } -func (s *GrpcServer) invalidValue(msg string) *pdpb.ResponseHeader { - return s.errorHeader(&pdpb.Error{ +func invalidValue(msg string) *pdpb.ResponseHeader { + return errorHeader(&pdpb.Error{ Type: pdpb.ErrorType_INVALID_VALUE, Message: msg, }) } -func (s *GrpcServer) regionNotFound() *pdpb.ResponseHeader { - return s.errorHeader(&pdpb.Error{ +func regionNotFound() *pdpb.ResponseHeader { + return errorHeader(&pdpb.Error{ Type: pdpb.ErrorType_REGION_NOT_FOUND, Message: "region not found", }) @@ -2501,7 +2502,7 @@ func (s *GrpcServer) SyncMaxTS(_ context.Context, request *pdpb.SyncMaxTSRequest defer done() } else { return &pdpb.SyncMaxTSResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -2509,7 +2510,7 @@ func (s *GrpcServer) SyncMaxTS(_ context.Context, request *pdpb.SyncMaxTSRequest // There is no dc-location found in this server, return err. if tsoAllocatorManager.GetClusterDCLocationsNumber() == 0 { return &pdpb.SyncMaxTSResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, "empty cluster dc-location found, checker may not work properly"), }, nil } @@ -2517,7 +2518,7 @@ func (s *GrpcServer) SyncMaxTS(_ context.Context, request *pdpb.SyncMaxTSRequest allocatorLeaders, err := tsoAllocatorManager.GetHoldingLocalAllocatorLeaders() if err != nil { return &pdpb.SyncMaxTSResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } if !request.GetSkipCheck() { @@ -2532,7 +2533,7 @@ func (s *GrpcServer) SyncMaxTS(_ context.Context, request *pdpb.SyncMaxTSRequest currentLocalTSO, err := allocator.GetCurrentTSO() if err != nil { return &pdpb.SyncMaxTSResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } if tsoutil.CompareTimestamp(currentLocalTSO, maxLocalTS) > 0 { @@ -2551,13 +2552,13 @@ func (s *GrpcServer) SyncMaxTS(_ context.Context, request *pdpb.SyncMaxTSRequest if maxLocalTS == nil { return &pdpb.SyncMaxTSResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, "local tso allocator leaders have changed during the sync, should retry"), }, nil } if request.GetMaxTs() == nil { return &pdpb.SyncMaxTSResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, "empty maxTS in the request, should retry"), }, nil } @@ -2573,7 +2574,7 @@ func (s *GrpcServer) SyncMaxTS(_ context.Context, request *pdpb.SyncMaxTSRequest maxLocalTS.Logical += 1 } return &pdpb.SyncMaxTSResponse{ - Header: s.header(), + Header: wrapHeader(), MaxLocalTs: maxLocalTS, SyncedDcs: syncedDCs, }, nil @@ -2586,13 +2587,13 @@ func (s *GrpcServer) SyncMaxTS(_ context.Context, request *pdpb.SyncMaxTSRequest } if err := allocator.WriteTSO(request.GetMaxTs()); err != nil { return &pdpb.SyncMaxTSResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } syncedDCs = append(syncedDCs, allocator.GetDCLocation()) } return &pdpb.SyncMaxTSResponse{ - Header: s.header(), + Header: wrapHeader(), SyncedDcs: syncedDCs, }, nil } @@ -2606,21 +2607,21 @@ func (s *GrpcServer) SplitRegions(ctx context.Context, request *pdpb.SplitRegion defer done() } else { return &pdpb.SplitRegionsResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } rc := s.GetRaftCluster() if rc == nil { - return &pdpb.SplitRegionsResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.SplitRegionsResponse{Header: notBootstrappedHeader()}, nil } if rc.IsServiceIndependent(constant.SchedulingServiceName) { forwardCli, err := s.updateSchedulingClient(ctx) if err != nil { return &pdpb.SplitRegionsResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } cli := forwardCli.getClient() @@ -2655,7 +2656,7 @@ func (s *GrpcServer) SplitRegions(ctx context.Context, request *pdpb.SplitRegion finishedPercentage, newRegionIDs := rc.GetRegionSplitter().SplitRegions(ctx, request.GetSplitKeys(), int(request.GetRetryLimit())) return &pdpb.SplitRegionsResponse{ - Header: s.header(), + Header: wrapHeader(), RegionsId: newRegionIDs, FinishedPercentage: uint64(finishedPercentage), }, nil @@ -2672,7 +2673,7 @@ func (s *GrpcServer) SplitAndScatterRegions(ctx context.Context, request *pdpb.S defer done() } else { return &pdpb.SplitAndScatterRegionsResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -2686,7 +2687,7 @@ func (s *GrpcServer) SplitAndScatterRegions(ctx context.Context, request *pdpb.S } rc := s.GetRaftCluster() if rc == nil { - return &pdpb.SplitAndScatterRegionsResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.SplitAndScatterRegionsResponse{Header: notBootstrappedHeader()}, nil } splitFinishedPercentage, newRegionIDs := rc.GetRegionSplitter().SplitRegions(ctx, request.GetSplitKeys(), int(request.GetRetryLimit())) scatterFinishedPercentage, err := scatterRegions(rc, newRegionIDs, request.GetGroup(), int(request.GetRetryLimit()), false) @@ -2694,7 +2695,7 @@ func (s *GrpcServer) SplitAndScatterRegions(ctx context.Context, request *pdpb.S return nil, err } return &pdpb.SplitAndScatterRegionsResponse{ - Header: s.header(), + Header: wrapHeader(), RegionsId: newRegionIDs, SplitFinishedPercentage: uint64(splitFinishedPercentage), ScatterFinishedPercentage: uint64(scatterFinishedPercentage), @@ -2738,7 +2739,7 @@ func (s *GrpcServer) GetDCLocationInfo(ctx context.Context, request *pdpb.GetDCL defer done() } else { return &pdpb.GetDCLocationInfoResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -2747,12 +2748,12 @@ func (s *GrpcServer) GetDCLocationInfo(ctx context.Context, request *pdpb.GetDCL if !ok { am.ClusterDCLocationChecker() return &pdpb.GetDCLocationInfoResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, fmt.Sprintf("dc-location %s is not found", request.GetDcLocation())), }, nil } resp := &pdpb.GetDCLocationInfoResponse{ - Header: s.header(), + Header: wrapHeader(), Suffix: info.Suffix, } // Because the number of suffix bits is changing dynamically according to the dc-location number, @@ -2765,7 +2766,7 @@ func (s *GrpcServer) GetDCLocationInfo(ctx context.Context, request *pdpb.GetDCL // Please take a look at https://github.com/tikv/pd/issues/3260 for more details. if resp.MaxTs, err = am.GetMaxLocalTSO(ctx); err != nil { return &pdpb.GetDCLocationInfoResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } return resp, nil @@ -2927,10 +2928,10 @@ func (s *GrpcServer) WatchGlobalConfig(req *pdpb.WatchGlobalConfigRequest, serve if res.Err() != nil { var resp pdpb.WatchGlobalConfigResponse if revision < res.CompactRevision { - resp.Header = s.wrapErrorToHeader(pdpb.ErrorType_DATA_COMPACTED, + resp.Header = wrapErrorToHeader(pdpb.ErrorType_DATA_COMPACTED, fmt.Sprintf("required watch revision: %d is smaller than current compact/min revision %d.", revision, res.CompactRevision)) } else { - resp.Header = s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, + resp.Header = wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, fmt.Sprintf("watch channel meet other error %s.", res.Err().Error())) } if err := server.Send(&resp); err != nil { @@ -3000,7 +3001,7 @@ func (s *GrpcServer) ReportMinResolvedTS(ctx context.Context, request *pdpb.Repo defer done() } else { return &pdpb.ReportMinResolvedTsResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -3015,7 +3016,7 @@ func (s *GrpcServer) ReportMinResolvedTS(ctx context.Context, request *pdpb.Repo rc := s.GetRaftCluster() if rc == nil { - return &pdpb.ReportMinResolvedTsResponse{Header: s.notBootstrappedHeader()}, nil + return &pdpb.ReportMinResolvedTsResponse{Header: notBootstrappedHeader()}, nil } storeID := request.GetStoreId() @@ -3027,7 +3028,7 @@ func (s *GrpcServer) ReportMinResolvedTS(ctx context.Context, request *pdpb.Repo zap.Uint64("store", storeID), zap.Uint64("min resolved-ts", minResolvedTS)) return &pdpb.ReportMinResolvedTsResponse{ - Header: s.header(), + Header: wrapHeader(), }, nil } @@ -3040,7 +3041,7 @@ func (s *GrpcServer) SetExternalTimestamp(ctx context.Context, request *pdpb.Set defer done() } else { return &pdpb.SetExternalTimestampResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -3062,10 +3063,10 @@ func (s *GrpcServer) SetExternalTimestamp(ctx context.Context, request *pdpb.Set log.Debug("try to set external timestamp", zap.Uint64("external-ts", externalTS), zap.Uint64("global-ts", globalTS)) if err := s.SetExternalTS(externalTS, globalTS); err != nil { - return &pdpb.SetExternalTimestampResponse{Header: s.invalidValue(err.Error())}, nil + return &pdpb.SetExternalTimestampResponse{Header: invalidValue(err.Error())}, nil } return &pdpb.SetExternalTimestampResponse{ - Header: s.header(), + Header: wrapHeader(), }, nil } @@ -3078,7 +3079,7 @@ func (s *GrpcServer) GetExternalTimestamp(ctx context.Context, request *pdpb.Get defer done() } else { return &pdpb.GetExternalTimestampResponse{ - Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), + Header: wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), }, nil } } @@ -3093,7 +3094,7 @@ func (s *GrpcServer) GetExternalTimestamp(ctx context.Context, request *pdpb.Get timestamp := s.GetExternalTS() return &pdpb.GetExternalTimestampResponse{ - Header: s.header(), + Header: wrapHeader(), Timestamp: timestamp, }, nil } diff --git a/server/keyspace_service.go b/server/keyspace_service.go index 8a0b3a7b1f0..967457198a9 100644 --- a/server/keyspace_service.go +++ b/server/keyspace_service.go @@ -35,14 +35,14 @@ type KeyspaceServer struct { } // getErrorHeader returns corresponding ResponseHeader based on err. -func (s *KeyspaceServer) getErrorHeader(err error) *pdpb.ResponseHeader { +func getErrorHeader(err error) *pdpb.ResponseHeader { switch err { case keyspace.ErrKeyspaceExists: - return s.wrapErrorToHeader(pdpb.ErrorType_DUPLICATED_ENTRY, err.Error()) + return wrapErrorToHeader(pdpb.ErrorType_DUPLICATED_ENTRY, err.Error()) case keyspace.ErrKeyspaceNotFound: - return s.wrapErrorToHeader(pdpb.ErrorType_ENTRY_NOT_FOUND, err.Error()) + return wrapErrorToHeader(pdpb.ErrorType_ENTRY_NOT_FOUND, err.Error()) default: - return s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()) + return wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()) } } @@ -58,10 +58,10 @@ func (s *KeyspaceServer) LoadKeyspace(_ context.Context, request *keyspacepb.Loa manager := s.GetKeyspaceManager() meta, err := manager.LoadKeyspace(request.GetName()) if err != nil { - return &keyspacepb.LoadKeyspaceResponse{Header: s.getErrorHeader(err)}, nil + return &keyspacepb.LoadKeyspaceResponse{Header: getErrorHeader(err)}, nil } return &keyspacepb.LoadKeyspaceResponse{ - Header: s.header(), + Header: wrapHeader(), Keyspace: meta, }, nil } @@ -97,7 +97,7 @@ func (s *KeyspaceServer) WatchKeyspaces(request *keyspacepb.WatchKeyspacesReques keyspaces = keyspaces[:0] }() err := stream.Send(&keyspacepb.WatchKeyspacesResponse{ - Header: s.header(), + Header: wrapHeader(), Keyspaces: keyspaces}) if err != nil { defer cancel() // cancel context to stop watcher @@ -137,10 +137,10 @@ func (s *KeyspaceServer) UpdateKeyspaceState(_ context.Context, request *keyspac manager := s.GetKeyspaceManager() meta, err := manager.UpdateKeyspaceStateByID(request.GetId(), request.GetState(), time.Now().Unix()) if err != nil { - return &keyspacepb.UpdateKeyspaceStateResponse{Header: s.getErrorHeader(err)}, nil + return &keyspacepb.UpdateKeyspaceStateResponse{Header: getErrorHeader(err)}, nil } return &keyspacepb.UpdateKeyspaceStateResponse{ - Header: s.header(), + Header: wrapHeader(), Keyspace: meta, }, nil } @@ -154,11 +154,11 @@ func (s *KeyspaceServer) GetAllKeyspaces(_ context.Context, request *keyspacepb. manager := s.GetKeyspaceManager() keyspaces, err := manager.LoadRangeKeyspace(request.StartId, int(request.Limit)) if err != nil { - return &keyspacepb.GetAllKeyspacesResponse{Header: s.getErrorHeader(err)}, nil + return &keyspacepb.GetAllKeyspacesResponse{Header: getErrorHeader(err)}, nil } return &keyspacepb.GetAllKeyspacesResponse{ - Header: s.header(), + Header: wrapHeader(), Keyspaces: keyspaces, }, nil } diff --git a/server/server.go b/server/server.go index cc3270de950..760b185a6ff 100644 --- a/server/server.go +++ b/server/server.go @@ -63,6 +63,7 @@ import ( "github.com/tikv/pd/pkg/schedule/hbstream" "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/storage" + "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" "github.com/tikv/pd/pkg/syncer" "github.com/tikv/pd/pkg/systimemon" @@ -90,9 +91,8 @@ import ( const ( serverMetricsInterval = time.Minute // pdRootPath for all pd servers. - pdRootPath = "/pd" - pdAPIPrefix = "/pd/" - pdClusterIDPath = "/pd/cluster_id" + pdRootPath = "/pd" + pdAPIPrefix = "/pd/" // idAllocPath for idAllocator to save persistent window's end. idAllocPath = "alloc_id" idAllocLabel = "idalloc" @@ -155,9 +155,7 @@ type Server struct { electionClient *clientv3.Client // http client httpClient *http.Client - // PD cluster ID. - clusterID atomic.Uint64 - rootPath string + rootPath string // Server services. // for id allocator, we can use one allocator for @@ -300,7 +298,7 @@ func CreateServer(ctx context.Context, cfg *config.Config, services []string, le failpoint.Inject("useGlobalRegistry", func() { s.registry = registry.ServerServiceRegistry }) - s.registry.RegisterService("MetaStorage", ms_server.NewService[*Server]) + s.registry.RegisterService("MetaStorage", ms_server.NewService) s.registry.RegisterService("ResourceManager", rm_server.NewService[*Server]) // Register the micro services REST path. s.registry.InstallAllRESTHandler(s, etcdCfg.UserHandlers) @@ -427,18 +425,16 @@ func (s *Server) AddStartCallback(callbacks ...func()) { } func (s *Server) startServer(ctx context.Context) error { - clusterID, err := etcdutil.InitClusterID(s.client, pdClusterIDPath) + clusterID, err := endpoint.InitClusterID(s.client) if err != nil { log.Error("failed to init cluster id", errs.ZapError(err)) return err } - s.clusterID.Store(clusterID) - log.Info("init cluster id", zap.Uint64("cluster-id", clusterID)) // It may lose accuracy if use float64 to store uint64. So we store the cluster id in label. metadataGauge.WithLabelValues(fmt.Sprintf("cluster%d", clusterID)).Set(0) bs.ServerInfoGauge.WithLabelValues(versioninfo.PDReleaseVersion, versioninfo.PDGitHash).Set(float64(time.Now().Unix())) - s.rootPath = keypath.PDRootPath(clusterID) + s.rootPath = keypath.PDRootPath() s.member.InitMemberInfo(s.cfg.AdvertiseClientUrls, s.cfg.AdvertisePeerUrls, s.Name(), s.rootPath) if err := s.member.SetMemberDeployPath(s.member.ID()); err != nil { return err @@ -489,7 +485,7 @@ func (s *Server) startServer(ctx context.Context) error { s.gcSafePointManager = gc.NewSafePointManager(s.storage, s.cfg.PDServerCfg) s.basicCluster = core.NewBasicCluster() - s.cluster = cluster.NewRaftCluster(ctx, clusterID, s.GetMember(), s.GetBasicCluster(), s.GetStorage(), syncer.NewRegionSyncer(s), s.client, s.httpClient, s.tsoAllocatorManager) + s.cluster = cluster.NewRaftCluster(ctx, s.GetMember(), s.GetBasicCluster(), s.GetStorage(), syncer.NewRegionSyncer(s), s.client, s.httpClient, s.tsoAllocatorManager) keyspaceIDAllocator := id.NewAllocator(&id.AllocatorParams{ Client: s.client, RootPath: s.rootPath, @@ -499,11 +495,11 @@ func (s *Server) startServer(ctx context.Context) error { Step: keyspace.AllocStep, }) if s.IsAPIServiceMode() { - s.keyspaceGroupManager = keyspace.NewKeyspaceGroupManager(s.ctx, s.storage, s.client, clusterID) + s.keyspaceGroupManager = keyspace.NewKeyspaceGroupManager(s.ctx, s.storage, s.client) } s.keyspaceManager = keyspace.NewKeyspaceManager(s.ctx, s.storage, s.cluster, keyspaceIDAllocator, &s.cfg.Keyspace, s.keyspaceGroupManager) s.safePointV2Manager = gc.NewSafePointManagerV2(s.ctx, s.storage, s.storage, s.storage) - s.hbStreams = hbstream.NewHeartbeatStreams(ctx, clusterID, "", s.cluster) + s.hbStreams = hbstream.NewHeartbeatStreams(ctx, "", s.cluster) // initial hot_region_storage in here. s.hotRegionStorage, err = storage.NewHotRegionsStorage( @@ -708,13 +704,13 @@ func (s *Server) collectEtcdStateMetrics() { } func (s *Server) bootstrapCluster(req *pdpb.BootstrapRequest) (*pdpb.BootstrapResponse, error) { - clusterID := s.ClusterID() + clusterID := keypath.ClusterID() log.Info("try to bootstrap raft cluster", zap.Uint64("cluster-id", clusterID), zap.String("request", fmt.Sprintf("%v", req))) - if err := checkBootstrapRequest(clusterID, req); err != nil { + if err := checkBootstrapRequest(req); err != nil { return nil, err } @@ -937,11 +933,6 @@ func (s *Server) Name() string { return s.cfg.Name } -// ClusterID returns the cluster ID of this server. -func (s *Server) ClusterID() uint64 { - return s.clusterID.Load() -} - // StartTimestamp returns the start timestamp of this server func (s *Server) StartTimestamp() int64 { return s.startTimestamp @@ -1438,7 +1429,7 @@ func (s *Server) DirectlyGetRaftCluster() *cluster.RaftCluster { // GetCluster gets cluster. func (s *Server) GetCluster() *metapb.Cluster { return &metapb.Cluster{ - Id: s.ClusterID(), + Id: keypath.ClusterID(), MaxPeerCount: uint32(s.persistOptions.GetMaxReplicas()), } } @@ -2007,7 +1998,7 @@ func (s *Server) SetServicePrimaryAddr(serviceName, addr string) { func (s *Server) initTSOPrimaryWatcher() { serviceName := constant.TSOServiceName - tsoRootPath := keypath.TSOSvcRootPath(s.ClusterID()) + tsoRootPath := keypath.TSOSvcRootPath() tsoServicePrimaryKey := keypath.KeyspaceGroupPrimaryPath(tsoRootPath, constant.DefaultKeyspaceGroupID) s.tsoPrimaryWatcher = s.initServicePrimaryWatcher(serviceName, tsoServicePrimaryKey) s.tsoPrimaryWatcher.StartWatchLoop() @@ -2015,7 +2006,7 @@ func (s *Server) initTSOPrimaryWatcher() { func (s *Server) initSchedulingPrimaryWatcher() { serviceName := constant.SchedulingServiceName - primaryKey := keypath.SchedulingPrimaryPath(s.ClusterID()) + primaryKey := keypath.SchedulingPrimaryPath() s.schedulingPrimaryWatcher = s.initServicePrimaryWatcher(serviceName, primaryKey) s.schedulingPrimaryWatcher.StartWatchLoop() } diff --git a/server/util.go b/server/util.go index b80a07ab28a..0b396b0f5df 100644 --- a/server/util.go +++ b/server/util.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/utils/apiutil" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/versioninfo" "github.com/tikv/pd/server/config" "github.com/urfave/negroni" @@ -56,7 +57,8 @@ func CheckPDVersionWithClusterVersion(opt *config.PersistOptions) { } } -func checkBootstrapRequest(clusterID uint64, req *pdpb.BootstrapRequest) error { +func checkBootstrapRequest(req *pdpb.BootstrapRequest) error { + clusterID := keypath.ClusterID() // TODO: do more check for request fields validation. storeMeta := req.GetStore() diff --git a/tests/cluster.go b/tests/cluster.go index 652db045d34..6ad15e3291f 100644 --- a/tests/cluster.go +++ b/tests/cluster.go @@ -38,6 +38,7 @@ import ( "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/pkg/swaggerserver" "github.com/tikv/pd/pkg/tso" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/tikv/pd/pkg/utils/testutil" @@ -220,10 +221,8 @@ func (s *TestServer) GetServer() *server.Server { } // GetClusterID returns the cluster ID. -func (s *TestServer) GetClusterID() uint64 { - s.RLock() - defer s.RUnlock() - return s.server.ClusterID() +func (*TestServer) GetClusterID() uint64 { + return keypath.ClusterID() } // GetLeader returns current leader of PD cluster. @@ -307,7 +306,7 @@ func (s *TestServer) IsAllocatorLeader(dcLocation string) bool { func (s *TestServer) GetEtcdLeader() (string, error) { s.RLock() defer s.RUnlock() - req := &pdpb.GetMembersRequest{Header: &pdpb.RequestHeader{ClusterId: s.server.ClusterID()}} + req := &pdpb.GetMembersRequest{Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}} members, _ := s.grpcServer.GetMembers(context.TODO(), req) if members.Header.GetError() != nil { return "", errors.WithStack(errors.New(members.Header.GetError().String())) @@ -319,7 +318,7 @@ func (s *TestServer) GetEtcdLeader() (string, error) { func (s *TestServer) GetEtcdLeaderID() (uint64, error) { s.RLock() defer s.RUnlock() - req := &pdpb.GetMembersRequest{Header: &pdpb.RequestHeader{ClusterId: s.server.ClusterID()}} + req := &pdpb.GetMembersRequest{Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}} members, err := s.grpcServer.GetMembers(context.TODO(), req) if err != nil { return 0, errors.WithStack(err) @@ -412,7 +411,7 @@ func (s *TestServer) GetStoreRegions(storeID uint64) []*core.RegionInfo { // BootstrapCluster is used to bootstrap the cluster. func (s *TestServer) BootstrapCluster() error { bootstrapReq := &pdpb.BootstrapRequest{ - Header: &pdpb.RequestHeader{ClusterId: s.GetClusterID()}, + Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}, Store: &metapb.Store{Id: 1, Address: "mock://1", LastHeartbeat: time.Now().UnixNano()}, Region: &metapb.Region{Id: 2, Peers: []*metapb.Peer{{Id: 3, StoreId: 1, Role: metapb.PeerRole_Voter}}}, } diff --git a/tests/integrations/client/client_test.go b/tests/integrations/client/client_test.go index dfa7c15120e..bcd9059682d 100644 --- a/tests/integrations/client/client_test.go +++ b/tests/integrations/client/client_test.go @@ -49,6 +49,7 @@ import ( "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/tso" "github.com/tikv/pd/pkg/utils/assertutil" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/utils/tsoutil" "github.com/tikv/pd/pkg/utils/typeutil" @@ -69,39 +70,6 @@ func TestMain(m *testing.M) { goleak.VerifyTestMain(m, testutil.LeakOptions...) } -func TestClientClusterIDCheck(t *testing.T) { - re := require.New(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - // Create the cluster #1. - cluster1, err := tests.NewTestCluster(ctx, 3) - re.NoError(err) - defer cluster1.Destroy() - endpoints1 := runServer(re, cluster1) - // Create the cluster #2. - cluster2, err := tests.NewTestCluster(ctx, 3) - re.NoError(err) - defer cluster2.Destroy() - endpoints2 := runServer(re, cluster2) - // Try to create a client with the mixed endpoints. - _, err = pd.NewClientWithContext( - ctx, append(endpoints1, endpoints2...), - pd.SecurityOption{}, pd.WithMaxErrorRetry(1), - ) - re.Error(err) - re.Contains(err.Error(), "unmatched cluster id") - // updateMember should fail due to unmatched cluster ID found. - re.NoError(failpoint.Enable("github.com/tikv/pd/client/skipClusterIDCheck", `return(true)`)) - re.NoError(failpoint.Enable("github.com/tikv/pd/client/skipFirstUpdateMember", `return(true)`)) - _, err = pd.NewClientWithContext(ctx, []string{endpoints1[0], endpoints2[0]}, - pd.SecurityOption{}, pd.WithMaxErrorRetry(1), - ) - re.Error(err) - re.Contains(err.Error(), "ErrClientGetMember") - re.NoError(failpoint.Disable("github.com/tikv/pd/client/skipFirstUpdateMember")) - re.NoError(failpoint.Disable("github.com/tikv/pd/client/skipClusterIDCheck")) -} - func TestClientLeaderChange(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -706,7 +674,7 @@ func (suite *followerForwardAndHandleTestSuite) SetupSuite() { Peers: peers, } req := &pdpb.RegionHeartbeatRequest{ - Header: newHeader(leader.GetServer()), + Header: newHeader(), Region: region, Leader: peers[0], } @@ -1240,7 +1208,7 @@ func (suite *clientTestSuite) SetupSuite() { suite.grpcSvr = &server.GrpcServer{Server: suite.srv} server.MustWaitLeader(re, []*server.Server{suite.srv}) - bootstrapServer(re, newHeader(suite.srv), suite.grpcPDClient) + bootstrapServer(re, newHeader(), suite.grpcPDClient) suite.ctx, suite.clean = context.WithCancel(context.Background()) suite.client = setupCli(suite.ctx, re, suite.srv.GetEndpoints()) @@ -1254,7 +1222,7 @@ func (suite *clientTestSuite) SetupSuite() { now := time.Now().UnixNano() for _, store := range stores { suite.grpcSvr.PutStore(context.Background(), &pdpb.PutStoreRequest{ - Header: newHeader(suite.srv), + Header: newHeader(), Store: &metapb.Store{ Id: store.Id, Address: store.Address, @@ -1279,9 +1247,9 @@ func (suite *clientTestSuite) TearDownSuite() { suite.cleanup() } -func newHeader(srv *server.Server) *pdpb.RequestHeader { +func newHeader() *pdpb.RequestHeader { return &pdpb.RequestHeader{ - ClusterId: srv.ClusterID(), + ClusterId: keypath.ClusterID(), } } @@ -1317,7 +1285,7 @@ func (suite *clientTestSuite) TestGetRegion() { Peers: peers, } req := &pdpb.RegionHeartbeatRequest{ - Header: newHeader(suite.srv), + Header: newHeader(), Region: region, Leader: peers[0], } @@ -1334,7 +1302,7 @@ func (suite *clientTestSuite) TestGetRegion() { r.Buckets == nil }) breq := &pdpb.ReportBucketsRequest{ - Header: newHeader(suite.srv), + Header: newHeader(), Buckets: &metapb.Buckets{ RegionId: regionID, Version: 1, @@ -1397,7 +1365,7 @@ func (suite *clientTestSuite) TestGetPrevRegion() { } regions = append(regions, r) req := &pdpb.RegionHeartbeatRequest{ - Header: newHeader(suite.srv), + Header: newHeader(), Region: r, Leader: peers[0], } @@ -1436,7 +1404,7 @@ func (suite *clientTestSuite) TestScanRegions() { } regions = append(regions, r) req := &pdpb.RegionHeartbeatRequest{ - Header: newHeader(suite.srv), + Header: newHeader(), Region: r, Leader: peers[0], } @@ -1507,7 +1475,7 @@ func (suite *clientTestSuite) TestGetRegionByID() { Peers: peers, } req := &pdpb.RegionHeartbeatRequest{ - Header: newHeader(suite.srv), + Header: newHeader(), Region: region, Leader: peers[0], } @@ -1604,7 +1572,7 @@ func (suite *clientTestSuite) TestGetStore() { func (suite *clientTestSuite) checkGCSafePoint(re *require.Assertions, expectedSafePoint uint64) { req := &pdpb.GetGCSafePointRequest{ - Header: newHeader(suite.srv), + Header: newHeader(), } resp, err := suite.grpcSvr.GetGCSafePoint(context.Background(), req) re.NoError(err) @@ -1794,7 +1762,7 @@ func (suite *clientTestSuite) TestScatterRegion() { EndKey: []byte("ggg"), } req := &pdpb.RegionHeartbeatRequest{ - Header: newHeader(suite.srv), + Header: newHeader(), Region: region, Leader: peers[0], } @@ -2056,7 +2024,7 @@ func (suite *clientTestSuite) TestBatchScanRegions() { } regions = append(regions, r) req := &pdpb.RegionHeartbeatRequest{ - Header: newHeader(suite.srv), + Header: newHeader(), Region: r, Leader: peers[0], } @@ -2186,7 +2154,7 @@ func (suite *clientTestSuite) TestBatchScanRegions() { ) re.ErrorContains(err, "found a hole region in the last") req := &pdpb.RegionHeartbeatRequest{ - Header: newHeader(suite.srv), + Header: newHeader(), Region: &metapb.Region{ Id: 100, RegionEpoch: &metapb.RegionEpoch{ diff --git a/tests/integrations/client/gc_client_test.go b/tests/integrations/client/gc_client_test.go index ba1c5a58a4b..27912a09550 100644 --- a/tests/integrations/client/gc_client_test.go +++ b/tests/integrations/client/gc_client_test.go @@ -79,7 +79,7 @@ func (suite *gcClientTestSuite) SetupSuite() { addr := suite.server.GetAddr() suite.client, err = pd.NewClientWithContext(suite.server.Context(), []string{addr}, pd.SecurityOption{}) re.NoError(err) - rootPath := path.Join("/pd", strconv.FormatUint(suite.server.ClusterID(), 10)) + rootPath := path.Join("/pd", strconv.FormatUint(keypath.ClusterID(), 10)) suite.gcSafePointV2Prefix = path.Join(rootPath, keypath.GCSafePointV2Prefix()) // Enable the fail-point to skip checking keyspace validity. re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/gc/checkKeyspace", "return(true)")) diff --git a/tests/integrations/mcs/discovery/register_test.go b/tests/integrations/mcs/discovery/register_test.go index 7251531eb39..63a40a1fb5b 100644 --- a/tests/integrations/mcs/discovery/register_test.go +++ b/tests/integrations/mcs/discovery/register_test.go @@ -85,7 +85,7 @@ func (suite *serverRegisterTestSuite) checkServerRegister(serviceName string) { client := suite.pdLeader.GetEtcdClient() // test API server discovery - endpoints, err := discovery.Discover(client, suite.clusterID, serviceName) + endpoints, err := discovery.Discover(client, serviceName) re.NoError(err) returnedEntry := &discovery.ServiceRegistryEntry{} returnedEntry.Deserialize([]byte(endpoints[0])) @@ -99,7 +99,7 @@ func (suite *serverRegisterTestSuite) checkServerRegister(serviceName string) { // test API server discovery after unregister cleanup() - endpoints, err = discovery.Discover(client, suite.clusterID, serviceName) + endpoints, err = discovery.Discover(client, serviceName) re.NoError(err) re.Empty(endpoints) testutil.Eventually(re, func() bool { @@ -141,7 +141,7 @@ func (suite *serverRegisterTestSuite) checkServerPrimaryChange(serviceName strin expectedPrimary = tests.WaitForPrimaryServing(re, serverMap) // test API server discovery client := suite.pdLeader.GetEtcdClient() - endpoints, err := discovery.Discover(client, suite.clusterID, serviceName) + endpoints, err := discovery.Discover(client, serviceName) re.NoError(err) re.Len(endpoints, serverNum-1) diff --git a/tests/integrations/mcs/scheduling/config_test.go b/tests/integrations/mcs/scheduling/config_test.go index fcc9a78b0f3..6a41ad0823e 100644 --- a/tests/integrations/mcs/scheduling/config_test.go +++ b/tests/integrations/mcs/scheduling/config_test.go @@ -86,7 +86,6 @@ func (suite *configTestSuite) TestConfigWatch() { watcher, err := config.NewWatcher( suite.ctx, suite.pdLeaderServer.GetEtcdClient(), - suite.cluster.GetCluster().GetId(), config.NewPersistConfig(config.NewConfig(), cache.NewStringTTL(suite.ctx, sc.DefaultGCInterval, sc.DefaultTTL)), endpoint.NewStorageEndpoint(kv.NewMemoryKV(), nil), ) @@ -146,7 +145,6 @@ func (suite *configTestSuite) TestSchedulerConfigWatch() { watcher, err := config.NewWatcher( suite.ctx, suite.pdLeaderServer.GetEtcdClient(), - suite.cluster.GetCluster().GetId(), config.NewPersistConfig(config.NewConfig(), cache.NewStringTTL(suite.ctx, sc.DefaultGCInterval, sc.DefaultTTL)), storage, ) diff --git a/tests/integrations/mcs/scheduling/meta_test.go b/tests/integrations/mcs/scheduling/meta_test.go index e55f0281d72..0798e9f129e 100644 --- a/tests/integrations/mcs/scheduling/meta_test.go +++ b/tests/integrations/mcs/scheduling/meta_test.go @@ -76,7 +76,6 @@ func (suite *metaTestSuite) TestStoreWatch() { _, err := meta.NewWatcher( suite.ctx, suite.pdLeaderServer.GetEtcdClient(), - suite.cluster.GetCluster().GetId(), cluster, ) re.NoError(err) diff --git a/tests/integrations/mcs/tso/keyspace_group_manager_test.go b/tests/integrations/mcs/tso/keyspace_group_manager_test.go index 60ec4843130..f266a76cb16 100644 --- a/tests/integrations/mcs/tso/keyspace_group_manager_test.go +++ b/tests/integrations/mcs/tso/keyspace_group_manager_test.go @@ -219,10 +219,9 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestKeyspacesServedByNonDefaultKe // Make sure every keyspace group is using the right timestamp path // for loading/saving timestamp from/to etcd and the right primary path // for primary election. - clusterID := suite.pdLeaderServer.GetClusterID() - rootPath := keypath.TSOSvcRootPath(clusterID) + rootPath := keypath.TSOSvcRootPath() primaryPath := keypath.KeyspaceGroupPrimaryPath(rootPath, param.keyspaceGroupID) - timestampPath := keypath.FullTimestampPath(clusterID, param.keyspaceGroupID) + timestampPath := keypath.FullTimestampPath(param.keyspaceGroupID) re.Equal(timestampPath, am.GetTimestampPath(tsopkg.GlobalDCLocation)) re.Equal(primaryPath, am.GetMember().GetLeaderPath()) diff --git a/tests/integrations/mcs/tso/server_test.go b/tests/integrations/mcs/tso/server_test.go index e4d1ff319db..aa767ecfbef 100644 --- a/tests/integrations/mcs/tso/server_test.go +++ b/tests/integrations/mcs/tso/server_test.go @@ -20,7 +20,6 @@ import ( "fmt" "io" "net/http" - "strconv" "strings" "testing" "time" @@ -252,11 +251,10 @@ func (suite *APIServerForward) ShutDown() { re := suite.re etcdClient := suite.pdLeader.GetEtcdClient() - clusterID := strconv.FormatUint(suite.pdLeader.GetClusterID(), 10) - endpoints, err := discovery.Discover(etcdClient, clusterID, constant.TSOServiceName) + endpoints, err := discovery.Discover(etcdClient, constant.TSOServiceName) re.NoError(err) if len(endpoints) != 0 { - endpoints, err = discovery.Discover(etcdClient, clusterID, constant.TSOServiceName) + endpoints, err = discovery.Discover(etcdClient, constant.TSOServiceName) re.NoError(err) re.Empty(endpoints) } @@ -524,11 +522,10 @@ func (suite *CommonTestSuite) TearDownSuite() { re := suite.Require() suite.tsoCluster.Destroy() etcdClient := suite.pdLeader.GetEtcdClient() - clusterID := strconv.FormatUint(suite.pdLeader.GetClusterID(), 10) - endpoints, err := discovery.Discover(etcdClient, clusterID, constant.TSOServiceName) + endpoints, err := discovery.Discover(etcdClient, constant.TSOServiceName) re.NoError(err) if len(endpoints) != 0 { - endpoints, err = discovery.Discover(etcdClient, clusterID, constant.TSOServiceName) + endpoints, err = discovery.Discover(etcdClient, constant.TSOServiceName) re.NoError(err) re.Empty(endpoints) } diff --git a/tests/integrations/tso/consistency_test.go b/tests/integrations/tso/consistency_test.go index f9bafb9e71a..2ef2ebe3077 100644 --- a/tests/integrations/tso/consistency_test.go +++ b/tests/integrations/tso/consistency_test.go @@ -26,6 +26,7 @@ import ( "github.com/stretchr/testify/suite" tso "github.com/tikv/pd/pkg/mcs/tso/server" tsopkg "github.com/tikv/pd/pkg/tso" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/tempurl" tu "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/utils/tsoutil" @@ -100,16 +101,9 @@ func (suite *tsoConsistencyTestSuite) TearDownSuite() { suite.cluster.Destroy() } -func (suite *tsoConsistencyTestSuite) getClusterID() uint64 { - if suite.legacy { - return suite.pdLeaderServer.GetServer().ClusterID() - } - return suite.tsoServer.ClusterID() -} - func (suite *tsoConsistencyTestSuite) request(ctx context.Context, count uint32) *pdpb.Timestamp { re := suite.Require() - clusterID := suite.getClusterID() + clusterID := keypath.ClusterID() if suite.legacy { req := &pdpb.TsoRequest{ Header: &pdpb.RequestHeader{ClusterId: clusterID}, diff --git a/tests/integrations/tso/server_test.go b/tests/integrations/tso/server_test.go index 828518d72c0..c0d71050964 100644 --- a/tests/integrations/tso/server_test.go +++ b/tests/integrations/tso/server_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/suite" tso "github.com/tikv/pd/pkg/mcs/tso/server" tsopkg "github.com/tikv/pd/pkg/tso" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/tempurl" tu "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/tests" @@ -98,13 +99,6 @@ func (suite *tsoServerTestSuite) TearDownSuite() { suite.cluster.Destroy() } -func (suite *tsoServerTestSuite) getClusterID() uint64 { - if suite.legacy { - return suite.pdLeaderServer.GetServer().ClusterID() - } - return suite.tsoServer.ClusterID() -} - func (suite *tsoServerTestSuite) resetTS(ts uint64, ignoreSmaller, skipUpperBoundCheck bool) { var err error if suite.legacy { @@ -120,7 +114,7 @@ func (suite *tsoServerTestSuite) resetTS(ts uint64, ignoreSmaller, skipUpperBoun func (suite *tsoServerTestSuite) request(ctx context.Context, count uint32) (err error) { re := suite.Require() - clusterID := suite.getClusterID() + clusterID := keypath.ClusterID() if suite.legacy { req := &pdpb.TsoRequest{ Header: &pdpb.RequestHeader{ClusterId: clusterID}, diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index 82c8a5766d3..c0143760bdd 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -912,7 +912,7 @@ func TestLoadClusterInfo(t *testing.T) { tc.WaitLeader() leaderServer := tc.GetLeaderServer() svr := leaderServer.GetServer() - rc := cluster.NewRaftCluster(ctx, svr.ClusterID(), svr.GetMember(), svr.GetBasicCluster(), svr.GetStorage(), syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient(), svr.GetTSOAllocatorManager()) + rc := cluster.NewRaftCluster(ctx, svr.GetMember(), svr.GetBasicCluster(), svr.GetStorage(), syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient(), svr.GetTSOAllocatorManager()) // Cluster is not bootstrapped. rc.InitCluster(svr.GetAllocator(), svr.GetPersistOptions(), svr.GetHBStreams(), svr.GetKeyspaceGroupManager()) @@ -952,7 +952,8 @@ func TestLoadClusterInfo(t *testing.T) { } re.NoError(testStorage.Flush()) - raftCluster = cluster.NewRaftCluster(ctx, svr.ClusterID(), svr.GetMember(), basicCluster, testStorage, syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient(), svr.GetTSOAllocatorManager()) + raftCluster = cluster.NewRaftCluster(ctx, svr.GetMember(), basicCluster, + testStorage, syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient(), svr.GetTSOAllocatorManager()) raftCluster.InitCluster(mockid.NewIDAllocator(), svr.GetPersistOptions(), svr.GetHBStreams(), svr.GetKeyspaceGroupManager()) raftCluster, err = raftCluster.LoadClusterInfo() re.NoError(err) @@ -1666,7 +1667,9 @@ func TestTransferLeaderBack(t *testing.T) { tc.WaitLeader() leaderServer := tc.GetLeaderServer() svr := leaderServer.GetServer() - rc := cluster.NewRaftCluster(ctx, svr.ClusterID(), svr.GetMember(), svr.GetBasicCluster(), svr.GetStorage(), syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient(), svr.GetTSOAllocatorManager()) + rc := cluster.NewRaftCluster(ctx, svr.GetMember(), svr.GetBasicCluster(), + svr.GetStorage(), syncer.NewRegionSyncer(svr), svr.GetClient(), + svr.GetHTTPClient(), svr.GetTSOAllocatorManager()) rc.InitCluster(svr.GetAllocator(), svr.GetPersistOptions(), svr.GetHBStreams(), svr.GetKeyspaceGroupManager()) storage := rc.GetStorage() meta := &metapb.Cluster{Id: 123} diff --git a/tests/server/join/join_test.go b/tests/server/join/join_test.go index 5d087caf5e4..ea5eaaa35f4 100644 --- a/tests/server/join/join_test.go +++ b/tests/server/join/join_test.go @@ -62,7 +62,6 @@ func TestSimpleJoin(t *testing.T) { members, err = etcdutil.ListEtcdMembers(ctx, client) re.NoError(err) re.Len(members.Members, 2) - re.Equal(pd1.GetClusterID(), pd2.GetClusterID()) // Wait for all nodes becoming healthy. time.Sleep(time.Second * 5) @@ -78,7 +77,6 @@ func TestSimpleJoin(t *testing.T) { members, err = etcdutil.ListEtcdMembers(ctx, client) re.NoError(err) re.Len(members.Members, 3) - re.Equal(pd1.GetClusterID(), pd3.GetClusterID()) } // A failed PD tries to join the previous cluster but it has been deleted diff --git a/tests/server/server_test.go b/tests/server/server_test.go index 3f1769a97f8..fc562825460 100644 --- a/tests/server/server_test.go +++ b/tests/server/server_test.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/stretchr/testify/require" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/tempurl" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server/config" @@ -87,29 +88,23 @@ func TestClusterID(t *testing.T) { err = cluster.RunInitialServers() re.NoError(err) - clusterID := cluster.GetServer("pd1").GetClusterID() - for _, s := range cluster.GetServers() { - re.Equal(clusterID, s.GetClusterID()) - } + clusterID := keypath.ClusterID() + keypath.ResetClusterID() // Restart all PDs. - err = cluster.StopAll() - re.NoError(err) - err = cluster.RunInitialServers() - re.NoError(err) + re.NoError(cluster.StopAll()) + re.NoError(cluster.RunInitialServers()) - // All PDs should have the same cluster ID as before. - for _, s := range cluster.GetServers() { - re.Equal(clusterID, s.GetClusterID()) - } + // PD should have the same cluster ID as before. + re.Equal(clusterID, keypath.ClusterID()) + keypath.ResetClusterID() cluster2, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.InitialClusterToken = "foobar" }) defer cluster2.Destroy() re.NoError(err) err = cluster2.RunInitialServers() re.NoError(err) - clusterID2 := cluster2.GetServer("pd1").GetClusterID() - re.NotEqual(clusterID, clusterID2) + re.NotEqual(clusterID, keypath.ClusterID()) } func TestLeader(t *testing.T) { diff --git a/tests/testutil.go b/tests/testutil.go index 03ec7bbe805..b5ea0a9f53a 100644 --- a/tests/testutil.go +++ b/tests/testutil.go @@ -42,6 +42,7 @@ import ( tso "github.com/tikv/pd/pkg/mcs/tso/server" "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/mock/mockid" + "github.com/tikv/pd/pkg/utils/keypath" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/versioninfo" @@ -218,7 +219,7 @@ func MustPutStore(re *require.Assertions, cluster *TestCluster, store *metapb.St svr := cluster.GetLeaderServer().GetServer() grpcServer := &server.GrpcServer{Server: svr} _, err := grpcServer.PutStore(context.Background(), &pdpb.PutStoreRequest{ - Header: &pdpb.RequestHeader{ClusterId: svr.ClusterID()}, + Header: &pdpb.RequestHeader{ClusterId: keypath.ClusterID()}, Store: store, }) re.NoError(err)