From 6c2fd517eeadfba4257bee46a06720e0e4359f17 Mon Sep 17 00:00:00 2001 From: Chunzhu Li Date: Tue, 17 Oct 2023 16:21:29 +0800 Subject: [PATCH 01/15] dm: support dm worker incremental validator cutover switch (#9867) close pingcap/tiflow#9868 --- dm/ctl/master/update_validation.go | 89 +++ dm/ctl/master/validation_cmd.go | 1 + dm/master/server.go | 69 +++ dm/master/workerrpc/interface.go | 3 + dm/master/workerrpc/rawgrpc.go | 2 + dm/pb/dmmaster.pb.go | 938 ++++++++++++++++++++++++----- dm/pb/dmworker.pb.go | 885 +++++++++++++++++++++------ dm/pbmock/dmmaster.go | 35 ++ dm/pbmock/dmworker.go | 35 ++ dm/proto/dmmaster.proto | 14 + dm/proto/dmworker.proto | 9 + dm/syncer/data_validator.go | 38 +- dm/worker/server.go | 25 + dm/worker/source_worker.go | 8 + dm/worker/subtask.go | 8 + 15 files changed, 1826 insertions(+), 333 deletions(-) create mode 100644 dm/ctl/master/update_validation.go diff --git a/dm/ctl/master/update_validation.go b/dm/ctl/master/update_validation.go new file mode 100644 index 00000000000..77c45b368a0 --- /dev/null +++ b/dm/ctl/master/update_validation.go @@ -0,0 +1,89 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package master + +import ( + "context" + + "github.com/pingcap/tiflow/dm/ctl/common" + "github.com/pingcap/tiflow/dm/pb" + "github.com/spf13/cobra" +) + +const ( + UpdateValidationOp = "update" +) + +type validationUpdateArgs struct { + sources []string + taskName string + + cutoverBinlogPos string + cutoverBinlogGTID string +} + +func NewUpdateValidationCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "update [task-name]", + Short: "update validation config of the completeness of the data", + RunE: updateValidation, + } + cmd.Flags().String("cutover-binlog-pos", "", "specify the cutover binlog name for validation, should include binlog name and pos in brackets, e.g. '(mysql-bin.000001, 5989)'") + cmd.Flags().String("cutover-binlog-gtid", "", "specify the cutover binlog gtid for validation, only valid when source config's gtid is enabled, e.g. '1642618e-cf65-11ec-9e3d-0242ac110002:1-30'") + return cmd +} + +func updateValidation(cmd *cobra.Command, _ []string) error { + args, msg, ok := parseValidationUpdateArgs(cmd) + if !ok { + return printUsageAndFailWithMessage(cmd, msg) + } + req := &pb.UpdateValidationRequest{ + TaskName: args.taskName, + Sources: args.sources, + BinlogPos: args.cutoverBinlogPos, + BinlogGTID: args.cutoverBinlogGTID, + } + + resp := &pb.UpdateValidationResponse{} + err := common.SendRequest(context.Background(), "UpdateValidation", req, &resp) + if err != nil { + return err + } + common.PrettyPrintResponse(resp) + return nil +} + +func parseValidationUpdateArgs(cmd *cobra.Command) (validationUpdateArgs, string, bool) { + var err error + args := validationUpdateArgs{} + if args.sources, err = common.GetSourceArgs(cmd); err != nil { + return args, err.Error(), false + } + if args.cutoverBinlogPos, err = cmd.Flags().GetString("cutover-binlog-name"); err != nil { + return args, err.Error(), false + } + if args.cutoverBinlogGTID, err = cmd.Flags().GetString("cutover-binlog-gtid"); err != nil { + return args, err.Error(), false + } + + if len(cmd.Flags().Args()) != 1 { + return args, "`task-name` should be set", false + } + args.taskName = cmd.Flags().Arg(0) + if len(args.taskName) == 0 { + return args, "`task-name` should be set", false + } + return args, "", true +} diff --git a/dm/ctl/master/validation_cmd.go b/dm/ctl/master/validation_cmd.go index ff8cec5c60b..7ed028fc248 100644 --- a/dm/ctl/master/validation_cmd.go +++ b/dm/ctl/master/validation_cmd.go @@ -23,6 +23,7 @@ func NewValidationCmd() *cobra.Command { cmd.AddCommand( NewStartValidationCmd(), NewStopValidationCmd(), + NewUpdateValidationCmd(), NewQueryValidationErrorCmd(), NewQueryValidationStatusCmd(), NewIgnoreValidationErrorCmd(), diff --git a/dm/master/server.go b/dm/master/server.go index fd331c14c3e..684a23e053d 100644 --- a/dm/master/server.go +++ b/dm/master/server.go @@ -3218,3 +3218,72 @@ func genValidationWorkerErrorResp(req *workerrpc.Request, err error, logMsg, wor return nil } } + +func (s *Server) UpdateValidation(ctx context.Context, req *pb.UpdateValidationRequest) (*pb.UpdateValidationResponse, error) { + var ( + resp2 *pb.UpdateValidationResponse + err error + ) + shouldRet := s.sharedLogic(ctx, req, &resp2, &err) + if shouldRet { + return resp2, err + } + sources := req.Sources + if len(sources) == 0 { + sources = s.getTaskSourceNameList(req.TaskName) + if len(sources) == 0 { + return &pb.UpdateValidationResponse{ + Result: false, + Msg: fmt.Sprintf("task %s has no source or not exist, please check the task name and status", req.TaskName), + }, nil + } + } + + workerReq := workerrpc.Request{ + Type: workerrpc.CmdUpdateValidation, + UpdateValidation: &pb.UpdateValidationWorkerRequest{ + TaskName: req.TaskName, + BinlogPos: req.BinlogPos, + BinlogGTID: req.BinlogGTID, + }, + } + + workerRespCh := make(chan *pb.CommonWorkerResponse, len(sources)) + var wg sync.WaitGroup + for _, source := range sources { + wg.Add(1) + go func(source string) { + defer wg.Done() + worker := s.scheduler.GetWorkerBySource(source) + if worker == nil { + workerRespCh <- errorCommonWorkerResponse(fmt.Sprintf("source %s relevant worker-client not found", source), source, "") + return + } + var workerResp *pb.CommonWorkerResponse + resp, err := worker.SendRequest(ctx, &workerReq, s.cfg.RPCTimeout) + if err != nil { + workerResp = errorCommonWorkerResponse(err.Error(), source, worker.BaseInfo().Name) + } else { + workerResp = resp.UpdateValidation + } + workerResp.Source = source + workerRespCh <- workerResp + }(source) + } + wg.Wait() + + workerResps := make([]*pb.CommonWorkerResponse, 0, len(sources)) + for len(workerRespCh) > 0 { + workerResp := <-workerRespCh + workerResps = append(workerResps, workerResp) + } + + sort.Slice(workerResps, func(i, j int) bool { + return workerResps[i].Source < workerResps[j].Source + }) + + return &pb.UpdateValidationResponse{ + Result: true, + Sources: workerResps, + }, nil +} diff --git a/dm/master/workerrpc/interface.go b/dm/master/workerrpc/interface.go index 41b39d15d95..06b229e135e 100644 --- a/dm/master/workerrpc/interface.go +++ b/dm/master/workerrpc/interface.go @@ -52,6 +52,7 @@ const ( CmdGetValidationStatus CmdGetValidationError CmdOperateValidationError + CmdUpdateValidation ) // Request wraps all dm-worker rpc requests. @@ -73,6 +74,7 @@ type Request struct { GetValidationStatus *pb.GetValidationStatusRequest GetValidationError *pb.GetValidationErrorRequest OperateValidationError *pb.OperateValidationErrorRequest + UpdateValidation *pb.UpdateValidationWorkerRequest } // Response wraps all dm-worker rpc responses. @@ -94,6 +96,7 @@ type Response struct { GetValidationStatus *pb.GetValidationStatusResponse GetValidationError *pb.GetValidationErrorResponse OperateValidationError *pb.OperateValidationErrorResponse + UpdateValidation *pb.CommonWorkerResponse } // Client is a client that sends RPC. diff --git a/dm/master/workerrpc/rawgrpc.go b/dm/master/workerrpc/rawgrpc.go index 9b6c0cdff24..cb0385368d0 100644 --- a/dm/master/workerrpc/rawgrpc.go +++ b/dm/master/workerrpc/rawgrpc.go @@ -124,6 +124,8 @@ func callRPC(ctx context.Context, client pb.WorkerClient, req *Request) (*Respon resp.GetValidationError, err = client.GetValidatorError(ctx, req.GetValidationError) case CmdOperateValidationError: resp.OperateValidationError, err = client.OperateValidatorError(ctx, req.OperateValidationError) + case CmdUpdateValidation: + resp.UpdateValidation, err = client.UpdateValidator(ctx, req.UpdateValidation) default: return nil, terror.ErrMasterGRPCInvalidReqType.Generate(req.Type) } diff --git a/dm/pb/dmmaster.pb.go b/dm/pb/dmmaster.pb.go index 1917e98f5ff..3e5a5818aad 100644 --- a/dm/pb/dmmaster.pb.go +++ b/dm/pb/dmmaster.pb.go @@ -3527,6 +3527,134 @@ func (m *StopValidationResponse) GetSources() []*CommonWorkerResponse { return nil } +type UpdateValidationRequest struct { + TaskName string `protobuf:"bytes,1,opt,name=taskName,proto3" json:"taskName,omitempty"` + Sources []string `protobuf:"bytes,2,rep,name=sources,proto3" json:"sources,omitempty"` + BinlogPos string `protobuf:"bytes,3,opt,name=binlogPos,proto3" json:"binlogPos,omitempty"` + BinlogGTID string `protobuf:"bytes,4,opt,name=binlogGTID,proto3" json:"binlogGTID,omitempty"` +} + +func (m *UpdateValidationRequest) Reset() { *m = UpdateValidationRequest{} } +func (m *UpdateValidationRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateValidationRequest) ProtoMessage() {} +func (*UpdateValidationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f9bef11f2a341f03, []int{53} +} +func (m *UpdateValidationRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateValidationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateValidationRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateValidationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateValidationRequest.Merge(m, src) +} +func (m *UpdateValidationRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateValidationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateValidationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateValidationRequest proto.InternalMessageInfo + +func (m *UpdateValidationRequest) GetTaskName() string { + if m != nil { + return m.TaskName + } + return "" +} + +func (m *UpdateValidationRequest) GetSources() []string { + if m != nil { + return m.Sources + } + return nil +} + +func (m *UpdateValidationRequest) GetBinlogPos() string { + if m != nil { + return m.BinlogPos + } + return "" +} + +func (m *UpdateValidationRequest) GetBinlogGTID() string { + if m != nil { + return m.BinlogGTID + } + return "" +} + +type UpdateValidationResponse struct { + Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` + Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` + Sources []*CommonWorkerResponse `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` +} + +func (m *UpdateValidationResponse) Reset() { *m = UpdateValidationResponse{} } +func (m *UpdateValidationResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateValidationResponse) ProtoMessage() {} +func (*UpdateValidationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f9bef11f2a341f03, []int{54} +} +func (m *UpdateValidationResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateValidationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateValidationResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateValidationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateValidationResponse.Merge(m, src) +} +func (m *UpdateValidationResponse) XXX_Size() int { + return m.Size() +} +func (m *UpdateValidationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateValidationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateValidationResponse proto.InternalMessageInfo + +func (m *UpdateValidationResponse) GetResult() bool { + if m != nil { + return m.Result + } + return false +} + +func (m *UpdateValidationResponse) GetMsg() string { + if m != nil { + return m.Msg + } + return "" +} + +func (m *UpdateValidationResponse) GetSources() []*CommonWorkerResponse { + if m != nil { + return m.Sources + } + return nil +} + func init() { proto.RegisterEnum("pb.UnlockDDLLockOp", UnlockDDLLockOp_name, UnlockDDLLockOp_value) proto.RegisterEnum("pb.SourceOp", SourceOp_name, SourceOp_value) @@ -3586,160 +3714,166 @@ func init() { proto.RegisterType((*StartValidationResponse)(nil), "pb.StartValidationResponse") proto.RegisterType((*StopValidationRequest)(nil), "pb.StopValidationRequest") proto.RegisterType((*StopValidationResponse)(nil), "pb.StopValidationResponse") + proto.RegisterType((*UpdateValidationRequest)(nil), "pb.UpdateValidationRequest") + proto.RegisterType((*UpdateValidationResponse)(nil), "pb.UpdateValidationResponse") } func init() { proto.RegisterFile("dmmaster.proto", fileDescriptor_f9bef11f2a341f03) } var fileDescriptor_f9bef11f2a341f03 = []byte{ - // 2366 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0x4b, 0x6f, 0x1b, 0xc9, - 0xd1, 0x1c, 0x92, 0xa2, 0xc8, 0xa2, 0x44, 0x53, 0x2d, 0x89, 0x1a, 0x8f, 0x65, 0x5a, 0xee, 0x7d, - 0x40, 0x10, 0x3e, 0x58, 0xb0, 0xbe, 0x9c, 0x16, 0xd8, 0x20, 0x6b, 0xd1, 0x6b, 0x0b, 0x91, 0xd7, - 0x9b, 0x91, 0xed, 0x64, 0x11, 0x20, 0x9b, 0x21, 0xd9, 0xa4, 0x08, 0x0d, 0x67, 0xc6, 0x33, 0x43, - 0x69, 0x0d, 0x63, 0x73, 0xc8, 0x29, 0xa7, 0x3c, 0xb0, 0x41, 0xf6, 0x98, 0x43, 0xfe, 0x40, 0x7e, - 0x46, 0x8e, 0x0b, 0xe4, 0x92, 0x4b, 0x80, 0xc0, 0xce, 0x3d, 0x7f, 0x21, 0xe8, 0xea, 0x9e, 0x99, - 0x9e, 0x07, 0xb9, 0xe1, 0x02, 0x11, 0x72, 0x9b, 0xaa, 0x6a, 0xd6, 0xbb, 0xab, 0xab, 0x0a, 0x84, - 0xd6, 0x70, 0x3a, 0xb5, 0x82, 0x90, 0xf9, 0xf7, 0x3c, 0xdf, 0x0d, 0x5d, 0x52, 0xf6, 0xfa, 0x46, - 0x6b, 0x38, 0xbd, 0x72, 0xfd, 0x8b, 0x08, 0x67, 0xec, 0x8e, 0x5d, 0x77, 0x6c, 0xb3, 0x43, 0xcb, - 0x9b, 0x1c, 0x5a, 0x8e, 0xe3, 0x86, 0x56, 0x38, 0x71, 0x9d, 0x40, 0x50, 0xe9, 0x2f, 0xa0, 0x7d, - 0x16, 0x5a, 0x7e, 0xf8, 0xcc, 0x0a, 0x2e, 0x4c, 0xf6, 0x72, 0xc6, 0x82, 0x90, 0x10, 0xa8, 0x86, - 0x56, 0x70, 0xa1, 0x6b, 0x7b, 0xda, 0x7e, 0xc3, 0xc4, 0x6f, 0xa2, 0xc3, 0x6a, 0xe0, 0xce, 0xfc, - 0x01, 0x0b, 0xf4, 0xf2, 0x5e, 0x65, 0xbf, 0x61, 0x46, 0x20, 0xe9, 0x02, 0xf8, 0x6c, 0xea, 0x5e, - 0xb2, 0x27, 0x2c, 0xb4, 0xf4, 0xca, 0x9e, 0xb6, 0x5f, 0x37, 0x15, 0x0c, 0xd9, 0x85, 0x46, 0x80, - 0x12, 0x26, 0x53, 0xa6, 0x57, 0x91, 0x65, 0x82, 0xa0, 0x5f, 0x69, 0xb0, 0xa1, 0x28, 0x10, 0x78, - 0xae, 0x13, 0x30, 0xd2, 0x81, 0x9a, 0xcf, 0x82, 0x99, 0x1d, 0xa2, 0x0e, 0x75, 0x53, 0x42, 0xa4, - 0x0d, 0x95, 0x69, 0x30, 0xd6, 0xcb, 0xc8, 0x85, 0x7f, 0x92, 0xa3, 0x44, 0xaf, 0xca, 0x5e, 0x65, - 0xbf, 0x79, 0xa4, 0xdf, 0xf3, 0xfa, 0xf7, 0x8e, 0xdd, 0xe9, 0xd4, 0x75, 0x7e, 0x8c, 0x6e, 0x88, - 0x98, 0x26, 0x1a, 0xef, 0x41, 0x73, 0x70, 0xce, 0x06, 0x5c, 0x1c, 0x17, 0x21, 0x74, 0x52, 0x51, - 0xf4, 0x67, 0x40, 0x9e, 0x7a, 0xcc, 0xb7, 0x42, 0xa6, 0xfa, 0xc5, 0x80, 0xb2, 0xeb, 0xa1, 0x46, - 0xad, 0x23, 0xe0, 0x62, 0x38, 0xf1, 0xa9, 0x67, 0x96, 0x5d, 0x8f, 0xfb, 0xcc, 0xb1, 0xa6, 0x4c, - 0xaa, 0x86, 0xdf, 0xaa, 0xcf, 0x2a, 0x29, 0x9f, 0xd1, 0xdf, 0x68, 0xb0, 0x99, 0x12, 0x20, 0xed, - 0x5e, 0x24, 0x21, 0xf1, 0x49, 0xb9, 0xc8, 0x27, 0x95, 0x42, 0x9f, 0x54, 0xff, 0x43, 0x9f, 0xd0, - 0x8f, 0x60, 0xe3, 0xb9, 0x37, 0xcc, 0x18, 0xbc, 0x54, 0x22, 0xd0, 0xdf, 0x6b, 0x40, 0x54, 0x1e, - 0xff, 0x23, 0xb1, 0xfc, 0x18, 0x3a, 0x3f, 0x9a, 0x31, 0xff, 0xd5, 0x59, 0x68, 0x85, 0xb3, 0xe0, - 0x74, 0x12, 0x84, 0x8a, 0x79, 0x18, 0x33, 0xad, 0x38, 0x66, 0x19, 0xf3, 0x2e, 0x61, 0x27, 0xc7, - 0x67, 0x69, 0x13, 0xef, 0x67, 0x4d, 0xdc, 0xe1, 0x26, 0x2a, 0x7c, 0xf3, 0x91, 0x39, 0x86, 0xcd, - 0xb3, 0x73, 0xf7, 0xaa, 0xd7, 0x3b, 0x3d, 0x75, 0x07, 0x17, 0xc1, 0x77, 0x8b, 0xcd, 0x1f, 0x35, - 0x58, 0x95, 0x1c, 0x48, 0x0b, 0xca, 0x27, 0x3d, 0xf9, 0xbb, 0xf2, 0x49, 0x2f, 0xe6, 0x54, 0x56, - 0x38, 0x11, 0xa8, 0x4e, 0xdd, 0x21, 0x93, 0x59, 0x85, 0xdf, 0x64, 0x0b, 0x56, 0xdc, 0x2b, 0x87, - 0xf9, 0xd2, 0xc9, 0x02, 0xe0, 0x27, 0x7b, 0xbd, 0xd3, 0x40, 0x5f, 0x41, 0x81, 0xf8, 0xcd, 0xfd, - 0x11, 0xbc, 0x72, 0x06, 0x6c, 0xa8, 0xd7, 0x10, 0x2b, 0x21, 0x62, 0x40, 0x7d, 0xe6, 0x48, 0xca, - 0x2a, 0x52, 0x62, 0x98, 0x0e, 0x60, 0x2b, 0x6d, 0xe6, 0xd2, 0xbe, 0xbd, 0x0b, 0x2b, 0x36, 0xff, - 0xa9, 0xf4, 0x6c, 0x93, 0x7b, 0x56, 0xb2, 0x33, 0x05, 0x85, 0xfe, 0x5d, 0x83, 0xad, 0xe7, 0x0e, - 0xff, 0x8e, 0x08, 0xd2, 0x9b, 0x59, 0x9f, 0x50, 0x58, 0xf3, 0x99, 0x67, 0x5b, 0x03, 0xf6, 0x14, - 0x4d, 0x16, 0x62, 0x52, 0x38, 0x9e, 0x7a, 0x23, 0xd7, 0x1f, 0x30, 0x13, 0x6b, 0x9d, 0xac, 0x7c, - 0x2a, 0x8a, 0xbc, 0x83, 0xd7, 0xb9, 0x8a, 0xd7, 0x79, 0x93, 0xab, 0x93, 0x92, 0x2d, 0xef, 0xb5, - 0x12, 0xb4, 0x95, 0x74, 0x65, 0x35, 0xa0, 0x3e, 0xb4, 0x42, 0xab, 0x6f, 0x05, 0x4c, 0xaf, 0xa1, - 0x02, 0x31, 0xcc, 0x83, 0x11, 0x5a, 0x7d, 0x9b, 0xe9, 0xab, 0x22, 0x18, 0x08, 0xd0, 0x8f, 0x60, - 0x3b, 0x63, 0xde, 0xb2, 0x5e, 0xa4, 0x26, 0xdc, 0x94, 0x95, 0x29, 0xba, 0x72, 0xb6, 0xf5, 0x2a, - 0x72, 0xd3, 0x2d, 0xa5, 0x3e, 0xa1, 0x7f, 0x91, 0x9a, 0x37, 0x24, 0x93, 0x7d, 0x5f, 0x6b, 0x60, - 0x14, 0x31, 0x95, 0xca, 0x2d, 0xe4, 0xfa, 0xdf, 0x2d, 0x7b, 0x5f, 0x6b, 0xb0, 0xf3, 0xe9, 0xcc, - 0x1f, 0x17, 0x19, 0xab, 0xd8, 0xa3, 0xe5, 0x02, 0x33, 0x71, 0xac, 0x41, 0x38, 0xb9, 0x64, 0x52, - 0xab, 0x18, 0xc6, 0xdb, 0xc4, 0x5f, 0x3a, 0xae, 0x58, 0xc5, 0xc4, 0x6f, 0x7e, 0x7e, 0x34, 0xb1, - 0x19, 0x16, 0x1b, 0x71, 0x79, 0x62, 0x18, 0xef, 0xca, 0xac, 0xdf, 0x9b, 0xf8, 0xfa, 0x0a, 0x52, - 0x24, 0x44, 0xbf, 0x00, 0x3d, 0xaf, 0xd8, 0x75, 0x94, 0x54, 0x7a, 0x09, 0xed, 0x63, 0x5e, 0x3f, - 0xbf, 0xed, 0x25, 0xe8, 0x40, 0x8d, 0xf9, 0xfe, 0xb1, 0x23, 0x22, 0x53, 0x31, 0x25, 0xc4, 0xfd, - 0x76, 0x65, 0xf9, 0x0e, 0x27, 0x08, 0x27, 0x44, 0xe0, 0xb7, 0xb4, 0x02, 0x1f, 0xc2, 0x86, 0x22, - 0x77, 0xe9, 0xc4, 0xfd, 0x95, 0x06, 0x5b, 0x32, 0xc9, 0xce, 0xd0, 0x92, 0x48, 0xf7, 0x5d, 0x25, - 0xbd, 0xd6, 0xb8, 0xf9, 0x82, 0x9c, 0xe4, 0xd7, 0xc0, 0x75, 0x46, 0x93, 0xb1, 0x4c, 0x5a, 0x09, - 0xf1, 0x98, 0x09, 0x87, 0x9c, 0xf4, 0xe4, 0xeb, 0x1d, 0xc3, 0xbc, 0xe5, 0x11, 0x2d, 0xd6, 0x27, - 0x49, 0x44, 0x15, 0x0c, 0x9d, 0xc1, 0x76, 0x46, 0x93, 0x6b, 0x09, 0xdc, 0x43, 0xd8, 0x36, 0xd9, - 0x78, 0xc2, 0xfb, 0xc1, 0xe8, 0xc8, 0xc2, 0x87, 0xce, 0x1a, 0x0e, 0x7d, 0x16, 0x04, 0x52, 0x6c, - 0x04, 0xd2, 0x07, 0xd0, 0xc9, 0xb2, 0x59, 0x3a, 0x18, 0xdf, 0x87, 0xad, 0xa7, 0xa3, 0x91, 0x3d, - 0x71, 0xd8, 0x13, 0x36, 0xed, 0xa7, 0x34, 0x09, 0x5f, 0x79, 0xb1, 0x26, 0xfc, 0xbb, 0xa8, 0x75, - 0xe2, 0x85, 0x2c, 0xf3, 0xfb, 0xa5, 0x55, 0xf8, 0x5e, 0x9c, 0x0e, 0xa7, 0xcc, 0x1a, 0x26, 0x2a, - 0xe4, 0xd2, 0x41, 0x90, 0x45, 0x3a, 0xa0, 0xe0, 0xf4, 0xaf, 0x96, 0x16, 0xfc, 0x6b, 0x0d, 0xe0, - 0x09, 0x76, 0xe5, 0x27, 0xce, 0xc8, 0x2d, 0x74, 0xbe, 0x01, 0xf5, 0x29, 0xda, 0x75, 0xd2, 0xc3, - 0x5f, 0x56, 0xcd, 0x18, 0xe6, 0x95, 0xdd, 0xb2, 0x27, 0xf1, 0x83, 0x22, 0x00, 0xfe, 0x0b, 0x8f, - 0x31, 0xff, 0xb9, 0x79, 0x2a, 0xaa, 0x5b, 0xc3, 0x8c, 0x61, 0x9e, 0x8e, 0x03, 0x7b, 0xc2, 0x9c, - 0x10, 0xa9, 0xe2, 0x11, 0x51, 0x30, 0xb4, 0x0f, 0x20, 0x02, 0x39, 0x57, 0x1f, 0x02, 0x55, 0x1e, - 0xfd, 0x28, 0x04, 0xfc, 0x9b, 0xeb, 0x11, 0x84, 0xd6, 0x38, 0xea, 0x01, 0x04, 0x80, 0xe5, 0x0a, - 0xd3, 0x4d, 0xa6, 0xbd, 0x84, 0xe8, 0x29, 0xb4, 0x79, 0x4b, 0x24, 0x9c, 0x26, 0x62, 0x16, 0xb9, - 0x46, 0x4b, 0xb2, 0xba, 0xa8, 0x4b, 0x8e, 0x64, 0x57, 0x12, 0xd9, 0xf4, 0x13, 0xc1, 0x4d, 0x78, - 0x71, 0x2e, 0xb7, 0x7d, 0x58, 0x15, 0xd3, 0x8f, 0x78, 0x70, 0x9a, 0x47, 0x2d, 0x1e, 0xce, 0xc4, - 0xf5, 0x66, 0x44, 0x8e, 0xf8, 0x09, 0x2f, 0x2c, 0xe2, 0x27, 0x2e, 0x71, 0x8a, 0x5f, 0xe2, 0x3a, - 0x33, 0x22, 0xd3, 0x3f, 0x69, 0xb0, 0x2a, 0xd8, 0x04, 0xe4, 0x1e, 0xd4, 0x6c, 0xb4, 0x1a, 0x59, - 0x35, 0x8f, 0xb6, 0x30, 0xa7, 0x32, 0xbe, 0x78, 0x5c, 0x32, 0xe5, 0x29, 0x7e, 0x5e, 0xa8, 0x85, - 0x5e, 0x50, 0xce, 0xab, 0xd6, 0xf2, 0xf3, 0xe2, 0x14, 0x3f, 0x2f, 0xc4, 0xa2, 0x87, 0x94, 0xf3, - 0xaa, 0x35, 0xfc, 0xbc, 0x38, 0xf5, 0xa0, 0x0e, 0x35, 0x91, 0x4b, 0xf4, 0x25, 0x6c, 0x20, 0xdf, - 0xd4, 0x0d, 0xec, 0xa4, 0xd4, 0xad, 0xc7, 0x6a, 0x75, 0x52, 0x6a, 0xd5, 0x63, 0xf1, 0x9d, 0x94, - 0xf8, 0x7a, 0x24, 0x86, 0xa7, 0x07, 0x0f, 0x5f, 0x94, 0x8d, 0x02, 0xa0, 0x0c, 0x88, 0x2a, 0x72, - 0xe9, 0xb2, 0xf7, 0x1e, 0xac, 0x0a, 0xe5, 0x53, 0x5d, 0x9c, 0x74, 0xb5, 0x19, 0xd1, 0xe8, 0x1f, - 0xca, 0x49, 0xad, 0x1f, 0x9c, 0xb3, 0xa9, 0x35, 0xbf, 0xd6, 0x23, 0x39, 0x19, 0xd2, 0x72, 0x9d, - 0xee, 0xdc, 0x21, 0x2d, 0xd5, 0x7e, 0x55, 0xe7, 0xb5, 0x5f, 0x2b, 0x4a, 0xfb, 0x85, 0x97, 0x03, - 0xe5, 0xc9, 0x76, 0x4d, 0x42, 0xfc, 0xf4, 0xc8, 0x9e, 0x05, 0xe7, 0xd8, 0xac, 0xd5, 0x4d, 0x01, - 0x70, 0x6d, 0x78, 0xef, 0xab, 0xd7, 0x11, 0x89, 0xdf, 0xfc, 0x2a, 0x8f, 0x7c, 0x77, 0x2a, 0x9e, - 0x0d, 0xbd, 0x21, 0x86, 0xe9, 0x04, 0x13, 0xd1, 0x9f, 0x59, 0xfe, 0x98, 0x85, 0x3a, 0x24, 0x74, - 0x81, 0x51, 0x5f, 0x1e, 0xe9, 0x97, 0x6b, 0x79, 0x79, 0x0e, 0x60, 0xeb, 0x11, 0x0b, 0xcf, 0x66, - 0x7d, 0xfe, 0x76, 0x1f, 0x8f, 0xc6, 0x0b, 0x1e, 0x1e, 0xfa, 0x1c, 0xb6, 0x33, 0x67, 0x97, 0x56, - 0x91, 0x40, 0x75, 0x30, 0x1a, 0x47, 0x01, 0xc3, 0x6f, 0xda, 0x83, 0xf5, 0x47, 0x2c, 0x54, 0x64, - 0xdf, 0x51, 0x9e, 0x1a, 0xd9, 0x57, 0x1e, 0x8f, 0xc6, 0xcf, 0x5e, 0x79, 0x6c, 0xc1, 0xbb, 0x73, - 0x0a, 0xad, 0x88, 0xcb, 0xd2, 0x5a, 0xb5, 0xa1, 0x32, 0x18, 0xc5, 0x1d, 0xe9, 0x60, 0x34, 0xa6, - 0xdb, 0xb0, 0xf9, 0x88, 0xc9, 0x7b, 0x9d, 0x68, 0x46, 0xf7, 0xd1, 0x5b, 0x0a, 0x5a, 0x8a, 0x92, - 0x0c, 0xb4, 0x84, 0xc1, 0xef, 0x34, 0x20, 0x8f, 0x2d, 0x67, 0x68, 0xb3, 0x87, 0xbe, 0xef, 0xfa, - 0x73, 0xdb, 0x70, 0xa4, 0x7e, 0xa7, 0x24, 0xdf, 0x85, 0x46, 0x7f, 0xe2, 0xd8, 0xee, 0xf8, 0x53, - 0x37, 0x88, 0x5a, 0xb2, 0x18, 0x81, 0x29, 0xfa, 0xd2, 0x8e, 0x87, 0x3b, 0xfe, 0x4d, 0x03, 0xd8, - 0x4c, 0xa9, 0x74, 0x2d, 0x09, 0xf6, 0x08, 0xb6, 0x9f, 0xf9, 0x96, 0x13, 0x8c, 0x98, 0x9f, 0x6e, - 0xee, 0x92, 0xf7, 0x48, 0x53, 0xdf, 0x23, 0xa5, 0x6c, 0x09, 0xc9, 0x12, 0xe2, 0xcd, 0x4d, 0x96, - 0xd1, 0xd2, 0x0f, 0xfc, 0x30, 0x5e, 0xde, 0xa4, 0xe6, 0x85, 0xdb, 0x4a, 0x54, 0xd6, 0x95, 0x31, - 0xe6, 0xc5, 0x51, 0xd4, 0x68, 0x4a, 0x4d, 0xcb, 0x73, 0x34, 0x15, 0xa1, 0x89, 0x34, 0x0d, 0xe3, - 0x12, 0x77, 0x9d, 0xcd, 0xff, 0x9f, 0x35, 0xe8, 0xe0, 0x3e, 0xee, 0x85, 0x65, 0x4f, 0x86, 0xb8, - 0x2a, 0x4c, 0x2e, 0x14, 0x4c, 0xdd, 0x21, 0xfb, 0xfc, 0xd2, 0xb2, 0x67, 0xd2, 0xdd, 0x8f, 0x4b, - 0x66, 0x83, 0xe3, 0x5e, 0x70, 0x14, 0x39, 0x80, 0x36, 0x76, 0xf3, 0x9f, 0xf3, 0xa1, 0x47, 0x1e, - 0x43, 0x75, 0x1e, 0x6b, 0x66, 0x2b, 0xee, 0xf3, 0xc5, 0xd9, 0x85, 0x65, 0x97, 0xe7, 0xac, 0xd2, - 0x5a, 0xc7, 0xf0, 0x83, 0x9a, 0x58, 0x4b, 0x3c, 0x68, 0x2a, 0x83, 0x04, 0xbd, 0x82, 0x9d, 0x9c, - 0xc6, 0xd7, 0xe2, 0xab, 0x27, 0xb0, 0x7d, 0x16, 0xba, 0x5e, 0xde, 0x53, 0x0b, 0x27, 0xc7, 0xd8, - 0xb8, 0x72, 0xda, 0x38, 0x7a, 0xc9, 0x3d, 0x9f, 0x66, 0x77, 0x1d, 0x66, 0x1c, 0xfc, 0x00, 0x6e, - 0x64, 0xf6, 0x12, 0x64, 0x03, 0xd6, 0x4f, 0x9c, 0x4b, 0xae, 0x88, 0x40, 0xb4, 0x4b, 0x64, 0x0d, - 0xea, 0x67, 0x17, 0x13, 0x8f, 0xc3, 0x6d, 0x8d, 0x43, 0x0f, 0xbf, 0x60, 0x03, 0x84, 0xca, 0x07, - 0x7d, 0xa8, 0x47, 0x33, 0x15, 0xd9, 0x84, 0x1b, 0xf2, 0xa7, 0x11, 0xaa, 0x5d, 0x22, 0x37, 0xa0, - 0x89, 0x21, 0x12, 0xa8, 0xb6, 0x46, 0xda, 0xb0, 0x26, 0x56, 0x85, 0x12, 0x53, 0x26, 0x2d, 0x00, - 0x6e, 0xbd, 0x84, 0x2b, 0x08, 0x9f, 0xbb, 0x57, 0x12, 0xae, 0x1e, 0xfc, 0x10, 0xea, 0x51, 0xa3, - 0xae, 0xc8, 0x88, 0x50, 0xed, 0x12, 0xd7, 0xf9, 0xe1, 0xe5, 0x64, 0x10, 0xc6, 0x28, 0x8d, 0xec, - 0xc0, 0xe6, 0xb1, 0xe5, 0x0c, 0x98, 0x9d, 0x26, 0x94, 0x0f, 0x1c, 0x58, 0x95, 0x6f, 0x01, 0x57, - 0x4d, 0xf2, 0xe2, 0xa0, 0x30, 0x94, 0xbf, 0x4c, 0x08, 0x69, 0x5c, 0x0d, 0x51, 0xa8, 0x11, 0x46, - 0x35, 0x85, 0x1f, 0x11, 0x16, 0x6a, 0xa2, 0x8a, 0x08, 0x57, 0xc9, 0x16, 0xb4, 0xf1, 0xd7, 0x6c, - 0xea, 0xd9, 0x56, 0x28, 0xb0, 0x2b, 0x07, 0x3d, 0x68, 0xc4, 0xc5, 0x80, 0x1f, 0x91, 0x12, 0x63, - 0x5c, 0xbb, 0xc4, 0x3d, 0x82, 0x2e, 0x42, 0xdc, 0x8b, 0xa3, 0xb6, 0x26, 0x9c, 0xe6, 0x7a, 0x11, - 0xa2, 0x7c, 0xf4, 0xaf, 0x0d, 0xa8, 0x09, 0x65, 0xc8, 0x67, 0xd0, 0x88, 0xb7, 0xe6, 0x04, 0x3b, - 0xc2, 0xec, 0x16, 0xdf, 0xd8, 0xce, 0x60, 0x45, 0xd8, 0xe9, 0x9d, 0x5f, 0xfe, 0xf5, 0x9f, 0x5f, - 0x95, 0x6f, 0xd2, 0xad, 0x43, 0xcb, 0x9b, 0x04, 0x87, 0x97, 0xf7, 0x2d, 0xdb, 0x3b, 0xb7, 0xee, - 0x1f, 0xf2, 0x34, 0x0c, 0x3e, 0xd0, 0x0e, 0xc8, 0x08, 0x9a, 0xca, 0x6a, 0x9a, 0x74, 0x38, 0x9b, - 0xfc, 0x32, 0xdc, 0xd8, 0xc9, 0xe1, 0xa5, 0x80, 0xf7, 0x51, 0xc0, 0x9e, 0x71, 0xab, 0x48, 0xc0, - 0xe1, 0x6b, 0xfe, 0xcc, 0x7e, 0xc9, 0xe5, 0x7c, 0x08, 0x90, 0x6c, 0x8b, 0x09, 0x6a, 0x9b, 0xdb, - 0x40, 0x1b, 0x9d, 0x2c, 0x5a, 0x0a, 0x29, 0x11, 0x1b, 0x9a, 0xca, 0xda, 0x94, 0x18, 0x99, 0x3d, - 0xaa, 0xb2, 0xe7, 0x35, 0x6e, 0x15, 0xd2, 0x24, 0xa7, 0x77, 0x51, 0xdd, 0x2e, 0xd9, 0xcd, 0xa8, - 0x1b, 0xe0, 0x51, 0xa9, 0x2f, 0x39, 0x86, 0x35, 0x75, 0x3b, 0x49, 0xd0, 0xfa, 0x82, 0xb5, 0xac, - 0xa1, 0xe7, 0x09, 0xb1, 0xca, 0x1f, 0xc3, 0x7a, 0xea, 0xa2, 0x11, 0x3d, 0xb7, 0x13, 0x8c, 0xd8, - 0xdc, 0x2c, 0xa0, 0xc4, 0x7c, 0x3e, 0x83, 0x4e, 0x7e, 0x9b, 0x86, 0x5e, 0xbc, 0xad, 0x04, 0x25, - 0xbf, 0xd1, 0x32, 0xba, 0xf3, 0xc8, 0x31, 0xeb, 0xa7, 0xd0, 0xce, 0x6e, 0x9d, 0x08, 0xba, 0x6f, - 0xce, 0x92, 0xcc, 0xd8, 0x2d, 0x26, 0xc6, 0x0c, 0x3f, 0x80, 0x46, 0xbc, 0xd4, 0x11, 0x89, 0x9a, - 0xdd, 0x2d, 0x89, 0x44, 0xcd, 0x6d, 0x7e, 0x68, 0x89, 0x8c, 0x61, 0x3d, 0xb5, 0x46, 0x11, 0xfe, - 0x2a, 0xda, 0xf1, 0x08, 0x7f, 0x15, 0xee, 0x5c, 0xe8, 0x5d, 0x0c, 0xf0, 0x2d, 0xa3, 0x93, 0x0d, - 0xb0, 0x28, 0x7f, 0x3c, 0x15, 0x4f, 0xa0, 0x95, 0xde, 0x78, 0x90, 0x9b, 0xe2, 0xfd, 0x2e, 0x58, - 0xa6, 0x18, 0x46, 0x11, 0x29, 0xd6, 0xd9, 0x87, 0xf5, 0xd4, 0xe2, 0x42, 0xea, 0x5c, 0xb0, 0x0b, - 0x91, 0x3a, 0x17, 0x6d, 0x39, 0xe8, 0xff, 0xa1, 0xce, 0xef, 0x1f, 0xbc, 0x9b, 0xd1, 0x59, 0xce, - 0x3f, 0x87, 0xaf, 0x79, 0x03, 0xfb, 0x65, 0x94, 0x9c, 0x17, 0xb1, 0x9f, 0x44, 0x89, 0x4b, 0xf9, - 0x29, 0xb5, 0xfc, 0x48, 0xf9, 0x29, 0xbd, 0xe0, 0xa0, 0xef, 0xa1, 0xcc, 0x3b, 0x86, 0x91, 0x91, - 0x29, 0xe6, 0xc3, 0xc3, 0xd7, 0xae, 0x87, 0xd7, 0xf6, 0xa7, 0x00, 0xc9, 0x84, 0x27, 0xae, 0x6d, - 0x6e, 0xc8, 0x14, 0xd7, 0x36, 0x3f, 0x08, 0xd2, 0x2e, 0xca, 0xd0, 0x49, 0xa7, 0xd8, 0x2e, 0x32, - 0x4a, 0x22, 0x2e, 0x26, 0xa7, 0x54, 0xc4, 0xd5, 0x49, 0x2f, 0x1d, 0xf1, 0xd4, 0xac, 0x43, 0xf7, - 0x50, 0x8a, 0x61, 0x6c, 0x67, 0x23, 0x8e, 0xc7, 0xb8, 0x11, 0x36, 0x0e, 0x0b, 0xc9, 0x0c, 0x22, - 0xe4, 0x14, 0x8d, 0x30, 0x42, 0x4e, 0xe1, 0xc0, 0x12, 0x55, 0x3a, 0xd2, 0xcd, 0xca, 0x99, 0xf5, - 0xd5, 0x62, 0x47, 0x9e, 0x41, 0x4d, 0x0c, 0x15, 0x64, 0x43, 0x32, 0x53, 0xf8, 0x13, 0x15, 0x25, - 0x19, 0xbf, 0x83, 0x8c, 0x6f, 0x93, 0x45, 0x25, 0x94, 0xfc, 0x1c, 0x9a, 0x4a, 0x1f, 0x2e, 0xea, - 0x74, 0x7e, 0x56, 0x10, 0x75, 0xba, 0xa0, 0x61, 0x9f, 0xeb, 0x25, 0xc6, 0x4f, 0xe1, 0xb5, 0x38, - 0x86, 0x35, 0x75, 0x4e, 0x11, 0x45, 0xaf, 0x60, 0xa0, 0x31, 0xf4, 0x3c, 0x21, 0xbe, 0x10, 0x27, - 0xd0, 0x4a, 0x37, 0xdc, 0xe2, 0x6e, 0x15, 0x76, 0xf3, 0xe2, 0x6e, 0x15, 0xf7, 0xe7, 0xb4, 0xc4, - 0xf5, 0x51, 0x3b, 0x62, 0xa2, 0x3e, 0x41, 0xa9, 0xa2, 0xa4, 0xe7, 0x09, 0x31, 0x93, 0x53, 0xb8, - 0x91, 0xe9, 0x16, 0xc5, 0xdb, 0x51, 0xdc, 0xf4, 0x8a, 0xb7, 0x63, 0x4e, 0x7b, 0x29, 0xac, 0x4b, - 0xf7, 0x6c, 0xc2, 0xba, 0xc2, 0xb6, 0xd0, 0x30, 0x8a, 0x48, 0x31, 0xab, 0x9f, 0xe0, 0xb0, 0x98, - 0x90, 0xe4, 0xc3, 0xd6, 0x95, 0xbe, 0xcd, 0x12, 0x22, 0xa6, 0x77, 0xe6, 0xd2, 0x63, 0xce, 0xcf, - 0x81, 0xa4, 0x0e, 0x88, 0x84, 0xb9, 0x9d, 0xfb, 0x61, 0x2a, 0x6f, 0xba, 0xf3, 0xc8, 0x31, 0x5b, - 0x2b, 0x7e, 0x86, 0xb2, 0xac, 0xef, 0x2a, 0xfe, 0x9f, 0xc3, 0x9e, 0x2e, 0x3a, 0x12, 0x89, 0x78, - 0xa0, 0xff, 0xe5, 0x4d, 0x57, 0xfb, 0xe6, 0x4d, 0x57, 0xfb, 0xc7, 0x9b, 0xae, 0xf6, 0xdb, 0xb7, - 0xdd, 0xd2, 0x37, 0x6f, 0xbb, 0xa5, 0xbf, 0xbd, 0xed, 0x96, 0xfa, 0x35, 0xfc, 0xfb, 0xc2, 0xff, - 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x85, 0xf5, 0x22, 0xfb, 0x02, 0x21, 0x00, 0x00, + // 2422 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4d, 0x6f, 0x1b, 0xd7, + 0x91, 0x4b, 0x52, 0x12, 0x35, 0xfa, 0x30, 0xf5, 0x24, 0x51, 0xeb, 0xb5, 0x4c, 0x2b, 0x9b, 0x0f, + 0x08, 0x42, 0x61, 0x21, 0x6a, 0x4f, 0x01, 0x52, 0x34, 0x16, 0x1d, 0x5b, 0xa8, 0x1c, 0xa7, 0x2b, + 0xd9, 0x6d, 0x50, 0xa0, 0xe9, 0x92, 0x7c, 0xa4, 0x08, 0x2d, 0x77, 0x37, 0xbb, 0x4b, 0x29, 0x86, + 0x91, 0x1e, 0x7a, 0xea, 0xa5, 0x5f, 0x48, 0xd1, 0x1c, 0x7b, 0xe8, 0x1f, 0xe8, 0x4f, 0xe8, 0xb1, + 0xc7, 0x00, 0xbd, 0xf4, 0x52, 0xa0, 0xb0, 0xfb, 0x43, 0x8a, 0x37, 0xf3, 0x76, 0xf7, 0xed, 0x07, + 0x99, 0x32, 0x40, 0x85, 0xde, 0x76, 0x66, 0x1e, 0xe7, 0xfb, 0xcd, 0x9b, 0x19, 0x09, 0xd6, 0xfb, + 0xe3, 0xb1, 0x1d, 0x46, 0x3c, 0xb8, 0xef, 0x07, 0x5e, 0xe4, 0xb1, 0xaa, 0xdf, 0x35, 0xd6, 0xfb, + 0xe3, 0x6b, 0x2f, 0xb8, 0x8c, 0x71, 0xc6, 0xee, 0xd0, 0xf3, 0x86, 0x0e, 0x3f, 0xb4, 0xfd, 0xd1, + 0xa1, 0xed, 0xba, 0x5e, 0x64, 0x47, 0x23, 0xcf, 0x0d, 0x89, 0x6a, 0xfe, 0x02, 0x9a, 0x67, 0x91, + 0x1d, 0x44, 0xe7, 0x76, 0x78, 0x69, 0xf1, 0xcf, 0x26, 0x3c, 0x8c, 0x18, 0x83, 0x7a, 0x64, 0x87, + 0x97, 0xba, 0xb6, 0xa7, 0xed, 0x2f, 0x5b, 0xf8, 0xcd, 0x74, 0x58, 0x0a, 0xbd, 0x49, 0xd0, 0xe3, + 0xa1, 0x5e, 0xdd, 0xab, 0xed, 0x2f, 0x5b, 0x31, 0xc8, 0xda, 0x00, 0x01, 0x1f, 0x7b, 0x57, 0xfc, + 0x09, 0x8f, 0x6c, 0xbd, 0xb6, 0xa7, 0xed, 0x37, 0x2c, 0x05, 0xc3, 0x76, 0x61, 0x39, 0x44, 0x09, + 0xa3, 0x31, 0xd7, 0xeb, 0xc8, 0x32, 0x45, 0x98, 0x5f, 0x6a, 0xb0, 0xa1, 0x28, 0x10, 0xfa, 0x9e, + 0x1b, 0x72, 0xd6, 0x82, 0xc5, 0x80, 0x87, 0x13, 0x27, 0x42, 0x1d, 0x1a, 0x96, 0x84, 0x58, 0x13, + 0x6a, 0xe3, 0x70, 0xa8, 0x57, 0x91, 0x8b, 0xf8, 0x64, 0x47, 0xa9, 0x5e, 0xb5, 0xbd, 0xda, 0xfe, + 0xca, 0x91, 0x7e, 0xdf, 0xef, 0xde, 0x3f, 0xf6, 0xc6, 0x63, 0xcf, 0xfd, 0x31, 0xba, 0x21, 0x66, + 0x9a, 0x6a, 0xbc, 0x07, 0x2b, 0xbd, 0x0b, 0xde, 0x13, 0xe2, 0x84, 0x08, 0xd2, 0x49, 0x45, 0x99, + 0x3f, 0x03, 0xf6, 0xd4, 0xe7, 0x81, 0x1d, 0x71, 0xd5, 0x2f, 0x06, 0x54, 0x3d, 0x1f, 0x35, 0x5a, + 0x3f, 0x02, 0x21, 0x46, 0x10, 0x9f, 0xfa, 0x56, 0xd5, 0xf3, 0x85, 0xcf, 0x5c, 0x7b, 0xcc, 0xa5, + 0x6a, 0xf8, 0xad, 0xfa, 0xac, 0x96, 0xf1, 0x99, 0xf9, 0x5b, 0x0d, 0x36, 0x33, 0x02, 0xa4, 0xdd, + 0xb3, 0x24, 0xa4, 0x3e, 0xa9, 0x96, 0xf9, 0xa4, 0x56, 0xea, 0x93, 0xfa, 0x7f, 0xe9, 0x13, 0xf3, + 0x03, 0xd8, 0x78, 0xe6, 0xf7, 0x73, 0x06, 0xcf, 0x95, 0x08, 0xe6, 0x1f, 0x34, 0x60, 0x2a, 0x8f, + 0xff, 0x93, 0x58, 0x7e, 0x08, 0xad, 0x1f, 0x4d, 0x78, 0xf0, 0xe2, 0x2c, 0xb2, 0xa3, 0x49, 0x78, + 0x3a, 0x0a, 0x23, 0xc5, 0x3c, 0x8c, 0x99, 0x56, 0x1e, 0xb3, 0x9c, 0x79, 0x57, 0xb0, 0x53, 0xe0, + 0x33, 0xb7, 0x89, 0xef, 0xe6, 0x4d, 0xdc, 0x11, 0x26, 0x2a, 0x7c, 0x8b, 0x91, 0x39, 0x86, 0xcd, + 0xb3, 0x0b, 0xef, 0xba, 0xd3, 0x39, 0x3d, 0xf5, 0x7a, 0x97, 0xe1, 0xb7, 0x8b, 0xcd, 0x9f, 0x34, + 0x58, 0x92, 0x1c, 0xd8, 0x3a, 0x54, 0x4f, 0x3a, 0xf2, 0x77, 0xd5, 0x93, 0x4e, 0xc2, 0xa9, 0xaa, + 0x70, 0x62, 0x50, 0x1f, 0x7b, 0x7d, 0x2e, 0xb3, 0x0a, 0xbf, 0xd9, 0x16, 0x2c, 0x78, 0xd7, 0x2e, + 0x0f, 0xa4, 0x93, 0x09, 0x10, 0x27, 0x3b, 0x9d, 0xd3, 0x50, 0x5f, 0x40, 0x81, 0xf8, 0x2d, 0xfc, + 0x11, 0xbe, 0x70, 0x7b, 0xbc, 0xaf, 0x2f, 0x22, 0x56, 0x42, 0xcc, 0x80, 0xc6, 0xc4, 0x95, 0x94, + 0x25, 0xa4, 0x24, 0xb0, 0xd9, 0x83, 0xad, 0xac, 0x99, 0x73, 0xfb, 0xf6, 0x0d, 0x58, 0x70, 0xc4, + 0x4f, 0xa5, 0x67, 0x57, 0x84, 0x67, 0x25, 0x3b, 0x8b, 0x28, 0xe6, 0x3f, 0x35, 0xd8, 0x7a, 0xe6, + 0x8a, 0xef, 0x98, 0x20, 0xbd, 0x99, 0xf7, 0x89, 0x09, 0xab, 0x01, 0xf7, 0x1d, 0xbb, 0xc7, 0x9f, + 0xa2, 0xc9, 0x24, 0x26, 0x83, 0x13, 0xa9, 0x37, 0xf0, 0x82, 0x1e, 0xb7, 0xb0, 0xd6, 0xc9, 0xca, + 0xa7, 0xa2, 0xd8, 0x9b, 0x78, 0x9d, 0xeb, 0x78, 0x9d, 0x37, 0x85, 0x3a, 0x19, 0xd9, 0xf2, 0x5e, + 0x2b, 0x41, 0x5b, 0xc8, 0x56, 0x56, 0x03, 0x1a, 0x7d, 0x3b, 0xb2, 0xbb, 0x76, 0xc8, 0xf5, 0x45, + 0x54, 0x20, 0x81, 0x45, 0x30, 0x22, 0xbb, 0xeb, 0x70, 0x7d, 0x89, 0x82, 0x81, 0x80, 0xf9, 0x01, + 0x6c, 0xe7, 0xcc, 0x9b, 0xd7, 0x8b, 0xa6, 0x05, 0xb7, 0x65, 0x65, 0x8a, 0xaf, 0x9c, 0x63, 0xbf, + 0x88, 0xdd, 0x74, 0x47, 0xa9, 0x4f, 0xe8, 0x5f, 0xa4, 0x16, 0x0d, 0xc9, 0x65, 0xdf, 0x57, 0x1a, + 0x18, 0x65, 0x4c, 0xa5, 0x72, 0x33, 0xb9, 0xfe, 0x6f, 0xcb, 0xde, 0x57, 0x1a, 0xec, 0x7c, 0x3c, + 0x09, 0x86, 0x65, 0xc6, 0x2a, 0xf6, 0x68, 0x85, 0xc0, 0x8c, 0x5c, 0xbb, 0x17, 0x8d, 0xae, 0xb8, + 0xd4, 0x2a, 0x81, 0xf1, 0x36, 0x89, 0x97, 0x4e, 0x28, 0x56, 0xb3, 0xf0, 0x5b, 0x9c, 0x1f, 0x8c, + 0x1c, 0x8e, 0xc5, 0x86, 0x2e, 0x4f, 0x02, 0xe3, 0x5d, 0x99, 0x74, 0x3b, 0xa3, 0x40, 0x5f, 0x40, + 0x8a, 0x84, 0xcc, 0xcf, 0x41, 0x2f, 0x2a, 0x76, 0x13, 0x25, 0xd5, 0xbc, 0x82, 0xe6, 0xb1, 0xa8, + 0x9f, 0xdf, 0xf4, 0x12, 0xb4, 0x60, 0x91, 0x07, 0xc1, 0xb1, 0x4b, 0x91, 0xa9, 0x59, 0x12, 0x12, + 0x7e, 0xbb, 0xb6, 0x03, 0x57, 0x10, 0xc8, 0x09, 0x31, 0xf8, 0x0d, 0xad, 0xc0, 0xfb, 0xb0, 0xa1, + 0xc8, 0x9d, 0x3b, 0x71, 0x7f, 0xa5, 0xc1, 0x96, 0x4c, 0xb2, 0x33, 0xb4, 0x24, 0xd6, 0x7d, 0x57, + 0x49, 0xaf, 0x55, 0x61, 0x3e, 0x91, 0xd3, 0xfc, 0xea, 0x79, 0xee, 0x60, 0x34, 0x94, 0x49, 0x2b, + 0x21, 0x11, 0x33, 0x72, 0xc8, 0x49, 0x47, 0xbe, 0xde, 0x09, 0x2c, 0x5a, 0x1e, 0x6a, 0xb1, 0x3e, + 0x4a, 0x23, 0xaa, 0x60, 0xcc, 0x09, 0x6c, 0xe7, 0x34, 0xb9, 0x91, 0xc0, 0x3d, 0x84, 0x6d, 0x8b, + 0x0f, 0x47, 0xa2, 0x1f, 0x8c, 0x8f, 0xcc, 0x7c, 0xe8, 0xec, 0x7e, 0x3f, 0xe0, 0x61, 0x28, 0xc5, + 0xc6, 0xa0, 0xf9, 0x00, 0x5a, 0x79, 0x36, 0x73, 0x07, 0xe3, 0xfb, 0xb0, 0xf5, 0x74, 0x30, 0x70, + 0x46, 0x2e, 0x7f, 0xc2, 0xc7, 0xdd, 0x8c, 0x26, 0xd1, 0x0b, 0x3f, 0xd1, 0x44, 0x7c, 0x97, 0xb5, + 0x4e, 0xa2, 0x90, 0xe5, 0x7e, 0x3f, 0xb7, 0x0a, 0xdf, 0x4b, 0xd2, 0xe1, 0x94, 0xdb, 0xfd, 0x54, + 0x85, 0x42, 0x3a, 0x10, 0x99, 0xd2, 0x01, 0x05, 0x67, 0x7f, 0x35, 0xb7, 0xe0, 0xdf, 0x68, 0x00, + 0x4f, 0xb0, 0x2b, 0x3f, 0x71, 0x07, 0x5e, 0xa9, 0xf3, 0x0d, 0x68, 0x8c, 0xd1, 0xae, 0x93, 0x0e, + 0xfe, 0xb2, 0x6e, 0x25, 0xb0, 0xa8, 0xec, 0xb6, 0x33, 0x4a, 0x1e, 0x14, 0x02, 0xc4, 0x2f, 0x7c, + 0xce, 0x83, 0x67, 0xd6, 0x29, 0x55, 0xb7, 0x65, 0x2b, 0x81, 0x45, 0x3a, 0xf6, 0x9c, 0x11, 0x77, + 0x23, 0xa4, 0xd2, 0x23, 0xa2, 0x60, 0xcc, 0x2e, 0x00, 0x05, 0x72, 0xaa, 0x3e, 0x0c, 0xea, 0x22, + 0xfa, 0x71, 0x08, 0xc4, 0xb7, 0xd0, 0x23, 0x8c, 0xec, 0x61, 0xdc, 0x03, 0x10, 0x80, 0xe5, 0x0a, + 0xd3, 0x4d, 0xa6, 0xbd, 0x84, 0xcc, 0x53, 0x68, 0x8a, 0x96, 0x88, 0x9c, 0x46, 0x31, 0x8b, 0x5d, + 0xa3, 0xa5, 0x59, 0x5d, 0xd6, 0x25, 0xc7, 0xb2, 0x6b, 0xa9, 0x6c, 0xf3, 0x23, 0xe2, 0x46, 0x5e, + 0x9c, 0xca, 0x6d, 0x1f, 0x96, 0x68, 0xfa, 0xa1, 0x07, 0x67, 0xe5, 0x68, 0x5d, 0x84, 0x33, 0x75, + 0xbd, 0x15, 0x93, 0x63, 0x7e, 0xe4, 0x85, 0x59, 0xfc, 0xe8, 0x12, 0x67, 0xf8, 0xa5, 0xae, 0xb3, + 0x62, 0xb2, 0xf9, 0x67, 0x0d, 0x96, 0x88, 0x4d, 0xc8, 0xee, 0xc3, 0xa2, 0x83, 0x56, 0x23, 0xab, + 0x95, 0xa3, 0x2d, 0xcc, 0xa9, 0x9c, 0x2f, 0x1e, 0x57, 0x2c, 0x79, 0x4a, 0x9c, 0x27, 0xb5, 0xd0, + 0x0b, 0xca, 0x79, 0xd5, 0x5a, 0x71, 0x9e, 0x4e, 0x89, 0xf3, 0x24, 0x16, 0x3d, 0xa4, 0x9c, 0x57, + 0xad, 0x11, 0xe7, 0xe9, 0xd4, 0x83, 0x06, 0x2c, 0x52, 0x2e, 0x99, 0x9f, 0xc1, 0x06, 0xf2, 0xcd, + 0xdc, 0xc0, 0x56, 0x46, 0xdd, 0x46, 0xa2, 0x56, 0x2b, 0xa3, 0x56, 0x23, 0x11, 0xdf, 0xca, 0x88, + 0x6f, 0xc4, 0x62, 0x44, 0x7a, 0x88, 0xf0, 0xc5, 0xd9, 0x48, 0x80, 0xc9, 0x81, 0xa9, 0x22, 0xe7, + 0x2e, 0x7b, 0x6f, 0xc3, 0x12, 0x29, 0x9f, 0xe9, 0xe2, 0xa4, 0xab, 0xad, 0x98, 0x66, 0xfe, 0xb1, + 0x9a, 0xd6, 0xfa, 0xde, 0x05, 0x1f, 0xdb, 0xd3, 0x6b, 0x3d, 0x92, 0xd3, 0x21, 0xad, 0xd0, 0xe9, + 0x4e, 0x1d, 0xd2, 0x32, 0xed, 0x57, 0x7d, 0x5a, 0xfb, 0xb5, 0xa0, 0xb4, 0x5f, 0x78, 0x39, 0x50, + 0x9e, 0x6c, 0xd7, 0x24, 0x24, 0x4e, 0x0f, 0x9c, 0x49, 0x78, 0x81, 0xcd, 0x5a, 0xc3, 0x22, 0x40, + 0x68, 0x23, 0x7a, 0x5f, 0xbd, 0x81, 0x48, 0xfc, 0x16, 0x57, 0x79, 0x10, 0x78, 0x63, 0x7a, 0x36, + 0xf4, 0x65, 0x1a, 0xa6, 0x53, 0x4c, 0x4c, 0x3f, 0xb7, 0x83, 0x21, 0x8f, 0x74, 0x48, 0xe9, 0x84, + 0x51, 0x5f, 0x1e, 0xe9, 0x97, 0x1b, 0x79, 0x79, 0x0e, 0x60, 0xeb, 0x11, 0x8f, 0xce, 0x26, 0x5d, + 0xf1, 0x76, 0x1f, 0x0f, 0x86, 0x33, 0x1e, 0x1e, 0xf3, 0x19, 0x6c, 0xe7, 0xce, 0xce, 0xad, 0x22, + 0x83, 0x7a, 0x6f, 0x30, 0x8c, 0x03, 0x86, 0xdf, 0x66, 0x07, 0xd6, 0x1e, 0xf1, 0x48, 0x91, 0x7d, + 0x4f, 0x79, 0x6a, 0x64, 0x5f, 0x79, 0x3c, 0x18, 0x9e, 0xbf, 0xf0, 0xf9, 0x8c, 0x77, 0xe7, 0x14, + 0xd6, 0x63, 0x2e, 0x73, 0x6b, 0xd5, 0x84, 0x5a, 0x6f, 0x90, 0x74, 0xa4, 0xbd, 0xc1, 0xd0, 0xdc, + 0x86, 0xcd, 0x47, 0x5c, 0xde, 0xeb, 0x54, 0x33, 0x73, 0x1f, 0xbd, 0xa5, 0xa0, 0xa5, 0x28, 0xc9, + 0x40, 0x4b, 0x19, 0xfc, 0x5e, 0x03, 0xf6, 0xd8, 0x76, 0xfb, 0x0e, 0x7f, 0x18, 0x04, 0x5e, 0x30, + 0xb5, 0x0d, 0x47, 0xea, 0xb7, 0x4a, 0xf2, 0x5d, 0x58, 0xee, 0x8e, 0x5c, 0xc7, 0x1b, 0x7e, 0xec, + 0x85, 0x71, 0x4b, 0x96, 0x20, 0x30, 0x45, 0x3f, 0x73, 0x92, 0xe1, 0x4e, 0x7c, 0x9b, 0x21, 0x6c, + 0x66, 0x54, 0xba, 0x91, 0x04, 0x7b, 0x04, 0xdb, 0xe7, 0x81, 0xed, 0x86, 0x03, 0x1e, 0x64, 0x9b, + 0xbb, 0xf4, 0x3d, 0xd2, 0xd4, 0xf7, 0x48, 0x29, 0x5b, 0x24, 0x59, 0x42, 0xa2, 0xb9, 0xc9, 0x33, + 0x9a, 0xfb, 0x81, 0xef, 0x27, 0xcb, 0x9b, 0xcc, 0xbc, 0x70, 0x57, 0x89, 0xca, 0x9a, 0x32, 0xc6, + 0x3c, 0x3f, 0x8a, 0x1b, 0x4d, 0xa9, 0x69, 0x75, 0x8a, 0xa6, 0x14, 0x9a, 0x58, 0xd3, 0x28, 0x29, + 0x71, 0x37, 0xd9, 0xfc, 0xff, 0x45, 0x83, 0x16, 0xee, 0xe3, 0x9e, 0xdb, 0xce, 0xa8, 0x8f, 0xab, + 0xc2, 0xf4, 0x42, 0xc1, 0xd8, 0xeb, 0xf3, 0x4f, 0xaf, 0x6c, 0x67, 0x22, 0xdd, 0xfd, 0xb8, 0x62, + 0x2d, 0x0b, 0xdc, 0x73, 0x81, 0x62, 0x07, 0xd0, 0xc4, 0x6e, 0xfe, 0x53, 0x31, 0xf4, 0xc8, 0x63, + 0xa8, 0xce, 0x63, 0xcd, 0x5a, 0x4f, 0xfa, 0x7c, 0x3a, 0x3b, 0xb3, 0xec, 0x8a, 0x9c, 0x55, 0x5a, + 0xeb, 0x04, 0x7e, 0xb0, 0x48, 0x6b, 0x89, 0x07, 0x2b, 0xca, 0x20, 0x61, 0x5e, 0xc3, 0x4e, 0x41, + 0xe3, 0x1b, 0xf1, 0xd5, 0x13, 0xd8, 0x3e, 0x8b, 0x3c, 0xbf, 0xe8, 0xa9, 0x99, 0x93, 0x63, 0x62, + 0x5c, 0x35, 0x6b, 0x9c, 0x79, 0x25, 0x3c, 0x9f, 0x65, 0x77, 0x23, 0x66, 0xfc, 0x5a, 0x83, 0x1d, + 0xda, 0xdb, 0x15, 0x2d, 0x51, 0xf5, 0xd5, 0xb2, 0xfa, 0xce, 0x58, 0x09, 0x67, 0x8a, 0x4a, 0x2d, + 0x5f, 0x54, 0xda, 0x00, 0x04, 0x3c, 0x3a, 0x3f, 0xe9, 0xc4, 0xd3, 0x53, 0x8a, 0x11, 0x93, 0x6f, + 0x51, 0x9d, 0x9b, 0xf0, 0xc4, 0xc1, 0x0f, 0xe0, 0x56, 0x6e, 0x43, 0xc3, 0x36, 0x60, 0xed, 0xc4, + 0xbd, 0x12, 0x8a, 0x10, 0xa2, 0x59, 0x61, 0xab, 0xd0, 0x38, 0xbb, 0x1c, 0xf9, 0x02, 0x6e, 0x6a, + 0x02, 0x7a, 0xf8, 0x39, 0xef, 0x21, 0x54, 0x3d, 0xe8, 0x42, 0x23, 0x9e, 0x2e, 0xd9, 0x26, 0xdc, + 0x92, 0x3f, 0x8d, 0x51, 0xcd, 0x0a, 0xbb, 0x05, 0x2b, 0x98, 0xac, 0x84, 0x6a, 0x6a, 0xac, 0x09, + 0xab, 0x64, 0xad, 0xc4, 0x54, 0xd9, 0x3a, 0x80, 0xc8, 0x03, 0x09, 0xd7, 0x10, 0xbe, 0xf0, 0xae, + 0x25, 0x5c, 0x3f, 0xf8, 0x21, 0x34, 0xe2, 0x91, 0x45, 0x91, 0x11, 0xa3, 0x9a, 0x15, 0xa1, 0xf3, + 0xc3, 0xab, 0x51, 0x2f, 0x4a, 0x50, 0x1a, 0xdb, 0x81, 0xcd, 0x63, 0xdb, 0xed, 0x71, 0x27, 0x4b, + 0xa8, 0x1e, 0xb8, 0xb0, 0x24, 0x5f, 0x45, 0xa1, 0x9a, 0xe4, 0x25, 0x40, 0x32, 0x54, 0xbc, 0xd1, + 0x08, 0x69, 0x42, 0x0d, 0x7a, 0xb2, 0x10, 0x46, 0x35, 0xc9, 0x8f, 0x08, 0x93, 0x9a, 0xa8, 0x22, + 0xc2, 0x75, 0xb6, 0x05, 0x4d, 0xfc, 0x35, 0x1f, 0xfb, 0x8e, 0x1d, 0x11, 0x76, 0xe1, 0xa0, 0x03, + 0xcb, 0x49, 0x59, 0x14, 0x47, 0xa4, 0xc4, 0x04, 0xd7, 0xac, 0x08, 0x8f, 0xa0, 0x8b, 0x10, 0xf7, + 0xfc, 0xa8, 0xa9, 0x91, 0xd3, 0x3c, 0x3f, 0x46, 0x54, 0x8f, 0xfe, 0xca, 0x60, 0x91, 0x94, 0x61, + 0x9f, 0xc0, 0x72, 0xf2, 0xf7, 0x03, 0x86, 0xbd, 0x71, 0xfe, 0xef, 0x19, 0xc6, 0x76, 0x0e, 0x4b, + 0x61, 0x37, 0xef, 0xfd, 0xf2, 0xef, 0xff, 0xfe, 0xb2, 0x7a, 0xdb, 0xdc, 0x3a, 0xb4, 0xfd, 0x51, + 0x78, 0x78, 0xf5, 0xae, 0xed, 0xf8, 0x17, 0xf6, 0xbb, 0x87, 0x22, 0xc1, 0xc3, 0xf7, 0xb4, 0x03, + 0x36, 0x80, 0x15, 0x65, 0x49, 0xcf, 0x5a, 0x82, 0x4d, 0xf1, 0xcf, 0x02, 0xc6, 0x4e, 0x01, 0x2f, + 0x05, 0xbc, 0x83, 0x02, 0xf6, 0x8c, 0x3b, 0x65, 0x02, 0x0e, 0x5f, 0x8a, 0x86, 0xe3, 0x0b, 0x21, + 0xe7, 0x7d, 0x80, 0x74, 0x6f, 0xce, 0x50, 0xdb, 0xc2, 0x2e, 0xde, 0x68, 0xe5, 0xd1, 0x52, 0x48, + 0x85, 0x39, 0xb0, 0xa2, 0x2c, 0x90, 0x99, 0x91, 0xdb, 0x28, 0x2b, 0x1b, 0x6f, 0xe3, 0x4e, 0x29, + 0x4d, 0x72, 0x7a, 0x0b, 0xd5, 0x6d, 0xb3, 0xdd, 0x9c, 0xba, 0x21, 0x1e, 0x95, 0xfa, 0xb2, 0x63, + 0x58, 0x55, 0xf7, 0xb4, 0x0c, 0xad, 0x2f, 0x59, 0x50, 0x1b, 0x7a, 0x91, 0x90, 0xa8, 0xfc, 0x21, + 0xac, 0x65, 0x2e, 0x1a, 0xd3, 0x0b, 0xdb, 0xd1, 0x98, 0xcd, 0xed, 0x12, 0x4a, 0xc2, 0xe7, 0x13, + 0x68, 0x15, 0xf7, 0x8a, 0xe8, 0xc5, 0xbb, 0x4a, 0x50, 0x8a, 0xbb, 0x3d, 0xa3, 0x3d, 0x8d, 0x9c, + 0xb0, 0x7e, 0x0a, 0xcd, 0xfc, 0xfe, 0x8d, 0xa1, 0xfb, 0xa6, 0xac, 0x0b, 0x8d, 0xdd, 0x72, 0x62, + 0xc2, 0xf0, 0x3d, 0x58, 0x4e, 0xd6, 0x5b, 0x94, 0xa8, 0xf9, 0x2d, 0x1b, 0x25, 0x6a, 0x61, 0x07, + 0x66, 0x56, 0xd8, 0x10, 0xd6, 0x32, 0x0b, 0x25, 0xf2, 0x57, 0xd9, 0xb6, 0x8b, 0xfc, 0x55, 0xba, + 0x7d, 0x32, 0xdf, 0xc0, 0x00, 0xdf, 0x31, 0x5a, 0xf9, 0x00, 0x53, 0xf9, 0x13, 0xa9, 0x78, 0x02, + 0xeb, 0xd9, 0xdd, 0x0f, 0xbb, 0x4d, 0x9d, 0x4c, 0xc9, 0x5a, 0xc9, 0x30, 0xca, 0x48, 0x89, 0xce, + 0x01, 0xac, 0x65, 0x56, 0x38, 0x52, 0xe7, 0x92, 0xad, 0x90, 0xd4, 0xb9, 0x6c, 0xdf, 0x63, 0x7e, + 0x07, 0x75, 0x7e, 0xe7, 0xe0, 0xad, 0x9c, 0xce, 0x72, 0x12, 0x3c, 0x7c, 0x29, 0x5a, 0xf9, 0x2f, + 0xe2, 0xe4, 0xbc, 0x4c, 0xfc, 0x44, 0x25, 0x2e, 0xe3, 0xa7, 0xcc, 0x1a, 0x28, 0xe3, 0xa7, 0xec, + 0xaa, 0xc7, 0x7c, 0x1b, 0x65, 0xde, 0x33, 0x8c, 0x9c, 0x4c, 0x9a, 0x94, 0x0f, 0x5f, 0x7a, 0x3e, + 0x5e, 0xdb, 0x9f, 0x02, 0xa4, 0xb3, 0x2e, 0x5d, 0xdb, 0xc2, 0xb8, 0x4d, 0xd7, 0xb6, 0x38, 0x12, + 0x9b, 0x6d, 0x94, 0xa1, 0xb3, 0x56, 0xb9, 0x5d, 0x6c, 0x90, 0x46, 0x9c, 0x66, 0xc8, 0x4c, 0xc4, + 0xd5, 0x99, 0x37, 0x1b, 0xf1, 0xcc, 0xd4, 0x67, 0xee, 0xa1, 0x14, 0xc3, 0xd8, 0xce, 0x47, 0x1c, + 0x8f, 0x09, 0x23, 0x1c, 0x1c, 0x9b, 0xd2, 0x69, 0x8c, 0xe4, 0x94, 0x0d, 0x73, 0x24, 0xa7, 0x74, + 0x74, 0x8b, 0x2b, 0x1d, 0x6b, 0xe7, 0xe5, 0x4c, 0xba, 0x6a, 0xb1, 0x63, 0xe7, 0xb0, 0x48, 0xe3, + 0x15, 0xdb, 0x90, 0xcc, 0x14, 0xfe, 0x4c, 0x45, 0x49, 0xc6, 0x6f, 0x22, 0xe3, 0xbb, 0x6c, 0x56, + 0x09, 0x65, 0x3f, 0x87, 0x15, 0x65, 0x22, 0xa1, 0x3a, 0x5d, 0x9c, 0x9a, 0xa8, 0x4e, 0x97, 0x8c, + 0x2e, 0x53, 0xbd, 0xc4, 0xc5, 0x29, 0xbc, 0x16, 0xc7, 0xb0, 0xaa, 0x4e, 0x6c, 0x54, 0xf4, 0x4a, + 0x46, 0x3b, 0x43, 0x2f, 0x12, 0x92, 0x0b, 0x71, 0x02, 0xeb, 0xd9, 0xd1, 0x83, 0xee, 0x56, 0xe9, + 0x5c, 0x43, 0x77, 0xab, 0x7c, 0x52, 0x31, 0x2b, 0x42, 0x1f, 0x75, 0x36, 0x60, 0xea, 0x13, 0x94, + 0x29, 0x4a, 0x7a, 0x91, 0x90, 0x30, 0x39, 0x85, 0x5b, 0xb9, 0xbe, 0x99, 0xde, 0x8e, 0xf2, 0xf6, + 0x9f, 0xde, 0x8e, 0x29, 0x8d, 0x36, 0x59, 0x97, 0xed, 0x5e, 0xc9, 0xba, 0xd2, 0x06, 0xd9, 0x30, + 0xca, 0x48, 0x09, 0xab, 0x9f, 0xe0, 0xd8, 0x9c, 0x92, 0xe4, 0xc3, 0xd6, 0x96, 0xbe, 0xcd, 0x13, + 0x62, 0xa6, 0xf7, 0xa6, 0xd2, 0x13, 0xce, 0xcf, 0x80, 0x65, 0x0e, 0x50, 0xc2, 0xdc, 0x2d, 0xfc, + 0x30, 0x93, 0x37, 0xed, 0x69, 0xe4, 0x84, 0xad, 0x9d, 0x3c, 0x43, 0x79, 0xd6, 0x6f, 0x28, 0xfe, + 0x9f, 0xc2, 0xde, 0x9c, 0x75, 0x44, 0x7d, 0x8e, 0xf2, 0x4d, 0x31, 0x3d, 0x47, 0x53, 0x3a, 0x77, + 0x7a, 0x8e, 0xa6, 0xf5, 0xd1, 0x66, 0xe5, 0x81, 0xfe, 0xb7, 0x57, 0x6d, 0xed, 0xeb, 0x57, 0x6d, + 0xed, 0x5f, 0xaf, 0xda, 0xda, 0xef, 0x5e, 0xb7, 0x2b, 0x5f, 0xbf, 0x6e, 0x57, 0xfe, 0xf1, 0xba, + 0x5d, 0xe9, 0x2e, 0xe2, 0x7f, 0x86, 0x7c, 0xf7, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc9, 0x28, + 0xf0, 0xb8, 0x5d, 0x22, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -3793,6 +3927,7 @@ type MasterClient interface { GetValidationStatus(ctx context.Context, in *GetValidationStatusRequest, opts ...grpc.CallOption) (*GetValidationStatusResponse, error) GetValidationError(ctx context.Context, in *GetValidationErrorRequest, opts ...grpc.CallOption) (*GetValidationErrorResponse, error) OperateValidationError(ctx context.Context, in *OperateValidationErrorRequest, opts ...grpc.CallOption) (*OperateValidationErrorResponse, error) + UpdateValidation(ctx context.Context, in *UpdateValidationRequest, opts ...grpc.CallOption) (*UpdateValidationResponse, error) } type masterClient struct { @@ -4037,6 +4172,15 @@ func (c *masterClient) OperateValidationError(ctx context.Context, in *OperateVa return out, nil } +func (c *masterClient) UpdateValidation(ctx context.Context, in *UpdateValidationRequest, opts ...grpc.CallOption) (*UpdateValidationResponse, error) { + out := new(UpdateValidationResponse) + err := c.cc.Invoke(ctx, "/pb.Master/UpdateValidation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // MasterServer is the server API for Master service. type MasterServer interface { StartTask(context.Context, *StartTaskRequest) (*StartTaskResponse, error) @@ -4078,6 +4222,7 @@ type MasterServer interface { GetValidationStatus(context.Context, *GetValidationStatusRequest) (*GetValidationStatusResponse, error) GetValidationError(context.Context, *GetValidationErrorRequest) (*GetValidationErrorResponse, error) OperateValidationError(context.Context, *OperateValidationErrorRequest) (*OperateValidationErrorResponse, error) + UpdateValidation(context.Context, *UpdateValidationRequest) (*UpdateValidationResponse, error) } // UnimplementedMasterServer can be embedded to have forward compatible implementations. @@ -4162,6 +4307,9 @@ func (*UnimplementedMasterServer) GetValidationError(ctx context.Context, req *G func (*UnimplementedMasterServer) OperateValidationError(ctx context.Context, req *OperateValidationErrorRequest) (*OperateValidationErrorResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OperateValidationError not implemented") } +func (*UnimplementedMasterServer) UpdateValidation(ctx context.Context, req *UpdateValidationRequest) (*UpdateValidationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateValidation not implemented") +} func RegisterMasterServer(s *grpc.Server, srv MasterServer) { s.RegisterService(&_Master_serviceDesc, srv) @@ -4635,6 +4783,24 @@ func _Master_OperateValidationError_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } +func _Master_UpdateValidation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateValidationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MasterServer).UpdateValidation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Master/UpdateValidation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MasterServer).UpdateValidation(ctx, req.(*UpdateValidationRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Master_serviceDesc = grpc.ServiceDesc{ ServiceName: "pb.Master", HandlerType: (*MasterServer)(nil), @@ -4743,6 +4909,10 @@ var _Master_serviceDesc = grpc.ServiceDesc{ MethodName: "OperateValidationError", Handler: _Master_OperateValidationError_Handler, }, + { + MethodName: "UpdateValidation", + Handler: _Master_UpdateValidation_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "dmmaster.proto", @@ -7425,6 +7595,113 @@ func (m *StopValidationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *UpdateValidationRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateValidationRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateValidationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BinlogGTID) > 0 { + i -= len(m.BinlogGTID) + copy(dAtA[i:], m.BinlogGTID) + i = encodeVarintDmmaster(dAtA, i, uint64(len(m.BinlogGTID))) + i-- + dAtA[i] = 0x22 + } + if len(m.BinlogPos) > 0 { + i -= len(m.BinlogPos) + copy(dAtA[i:], m.BinlogPos) + i = encodeVarintDmmaster(dAtA, i, uint64(len(m.BinlogPos))) + i-- + dAtA[i] = 0x1a + } + if len(m.Sources) > 0 { + for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Sources[iNdEx]) + copy(dAtA[i:], m.Sources[iNdEx]) + i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.TaskName) > 0 { + i -= len(m.TaskName) + copy(dAtA[i:], m.TaskName) + i = encodeVarintDmmaster(dAtA, i, uint64(len(m.TaskName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateValidationResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateValidationResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateValidationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Sources) > 0 { + for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDmmaster(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Msg) > 0 { + i -= len(m.Msg) + copy(dAtA[i:], m.Msg) + i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) + i-- + dAtA[i] = 0x12 + } + if m.Result { + i-- + if m.Result { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func encodeVarintDmmaster(dAtA []byte, offset int, v uint64) int { offset -= sovDmmaster(v) base := offset @@ -8644,6 +8921,55 @@ func (m *StopValidationResponse) Size() (n int) { return n } +func (m *UpdateValidationRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskName) + if l > 0 { + n += 1 + l + sovDmmaster(uint64(l)) + } + if len(m.Sources) > 0 { + for _, s := range m.Sources { + l = len(s) + n += 1 + l + sovDmmaster(uint64(l)) + } + } + l = len(m.BinlogPos) + if l > 0 { + n += 1 + l + sovDmmaster(uint64(l)) + } + l = len(m.BinlogGTID) + if l > 0 { + n += 1 + l + sovDmmaster(uint64(l)) + } + return n +} + +func (m *UpdateValidationResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result { + n += 2 + } + l = len(m.Msg) + if l > 0 { + n += 1 + l + sovDmmaster(uint64(l)) + } + if len(m.Sources) > 0 { + for _, e := range m.Sources { + l = e.Size() + n += 1 + l + sovDmmaster(uint64(l)) + } + } + return n +} + func sovDmmaster(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -15998,6 +16324,320 @@ func (m *StopValidationResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *UpdateValidationRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmmaster + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateValidationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateValidationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmmaster + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDmmaster + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDmmaster + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmmaster + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDmmaster + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDmmaster + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogPos", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmmaster + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDmmaster + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDmmaster + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BinlogPos = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogGTID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmmaster + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDmmaster + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDmmaster + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BinlogGTID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDmmaster(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDmmaster + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateValidationResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmmaster + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateValidationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateValidationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmmaster + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Result = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmmaster + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDmmaster + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDmmaster + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmmaster + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDmmaster + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDmmaster + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sources = append(m.Sources, &CommonWorkerResponse{}) + if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDmmaster(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDmmaster + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipDmmaster(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/dm/pb/dmworker.pb.go b/dm/pb/dmworker.pb.go index 8e8568751ef..f340aabbb3c 100644 --- a/dm/pb/dmworker.pb.go +++ b/dm/pb/dmworker.pb.go @@ -3501,6 +3501,118 @@ func (m *OperateValidationErrorResponse) GetMsg() string { return "" } +type UpdateValidationWorkerRequest struct { + TaskName string `protobuf:"bytes,1,opt,name=taskName,proto3" json:"taskName,omitempty"` + BinlogPos string `protobuf:"bytes,2,opt,name=binlogPos,proto3" json:"binlogPos,omitempty"` + BinlogGTID string `protobuf:"bytes,3,opt,name=binlogGTID,proto3" json:"binlogGTID,omitempty"` +} + +func (m *UpdateValidationWorkerRequest) Reset() { *m = UpdateValidationWorkerRequest{} } +func (m *UpdateValidationWorkerRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateValidationWorkerRequest) ProtoMessage() {} +func (*UpdateValidationWorkerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_51a1b9e17fd67b10, []int{42} +} +func (m *UpdateValidationWorkerRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateValidationWorkerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateValidationWorkerRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateValidationWorkerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateValidationWorkerRequest.Merge(m, src) +} +func (m *UpdateValidationWorkerRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateValidationWorkerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateValidationWorkerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateValidationWorkerRequest proto.InternalMessageInfo + +func (m *UpdateValidationWorkerRequest) GetTaskName() string { + if m != nil { + return m.TaskName + } + return "" +} + +func (m *UpdateValidationWorkerRequest) GetBinlogPos() string { + if m != nil { + return m.BinlogPos + } + return "" +} + +func (m *UpdateValidationWorkerRequest) GetBinlogGTID() string { + if m != nil { + return m.BinlogGTID + } + return "" +} + +type UpdateValidationWorkerResponse struct { + Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` + Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` +} + +func (m *UpdateValidationWorkerResponse) Reset() { *m = UpdateValidationWorkerResponse{} } +func (m *UpdateValidationWorkerResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateValidationWorkerResponse) ProtoMessage() {} +func (*UpdateValidationWorkerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_51a1b9e17fd67b10, []int{43} +} +func (m *UpdateValidationWorkerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateValidationWorkerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateValidationWorkerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateValidationWorkerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateValidationWorkerResponse.Merge(m, src) +} +func (m *UpdateValidationWorkerResponse) XXX_Size() int { + return m.Size() +} +func (m *UpdateValidationWorkerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateValidationWorkerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateValidationWorkerResponse proto.InternalMessageInfo + +func (m *UpdateValidationWorkerResponse) GetResult() bool { + if m != nil { + return m.Result + } + return false +} + +func (m *UpdateValidationWorkerResponse) GetMsg() string { + if m != nil { + return m.Msg + } + return "" +} + func init() { proto.RegisterEnum("pb.TaskOp", TaskOp_name, TaskOp_value) proto.RegisterEnum("pb.Stage", Stage_name, Stage_value) @@ -3555,192 +3667,198 @@ func init() { proto.RegisterType((*GetValidationErrorResponse)(nil), "pb.GetValidationErrorResponse") proto.RegisterType((*OperateValidationErrorRequest)(nil), "pb.OperateValidationErrorRequest") proto.RegisterType((*OperateValidationErrorResponse)(nil), "pb.OperateValidationErrorResponse") + proto.RegisterType((*UpdateValidationWorkerRequest)(nil), "pb.UpdateValidationWorkerRequest") + proto.RegisterType((*UpdateValidationWorkerResponse)(nil), "pb.UpdateValidationWorkerResponse") } func init() { proto.RegisterFile("dmworker.proto", fileDescriptor_51a1b9e17fd67b10) } var fileDescriptor_51a1b9e17fd67b10 = []byte{ - // 2880 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x3a, 0xcd, 0x6f, 0x24, 0x57, - 0xf1, 0xd3, 0xdd, 0xf3, 0x59, 0x33, 0xb6, 0xdb, 0x6f, 0xbd, 0xfb, 0x9b, 0x38, 0xbb, 0x13, 0xa7, - 0x37, 0xca, 0xcf, 0xb1, 0x82, 0x95, 0x98, 0xa0, 0xa0, 0x48, 0x90, 0x64, 0xed, 0x8d, 0x77, 0x83, - 0x37, 0xde, 0x6d, 0x3b, 0xcb, 0x09, 0x89, 0x76, 0xcf, 0xf3, 0xb8, 0x71, 0x4f, 0x77, 0x6f, 0x77, - 0x8f, 0x2d, 0x1f, 0x10, 0x17, 0xc4, 0x15, 0x2e, 0x20, 0x81, 0xb8, 0x80, 0x84, 0xc4, 0x89, 0x03, - 0x7f, 0x00, 0x47, 0xc8, 0x31, 0xe2, 0xc4, 0x11, 0x65, 0xff, 0x06, 0xae, 0x08, 0x55, 0xbd, 0xf7, - 0xba, 0x5f, 0xcf, 0x87, 0x37, 0x8b, 0xc4, 0xad, 0xeb, 0xe3, 0x55, 0xd5, 0xab, 0xaf, 0x57, 0x35, - 0x36, 0x2c, 0x0f, 0xc7, 0x97, 0x71, 0x7a, 0xce, 0xd3, 0xed, 0x24, 0x8d, 0xf3, 0x98, 0x99, 0xc9, - 0x89, 0xb3, 0x09, 0xec, 0xc9, 0x84, 0xa7, 0x57, 0x47, 0xb9, 0x97, 0x4f, 0x32, 0x97, 0x3f, 0x9b, - 0xf0, 0x2c, 0x67, 0x0c, 0xea, 0x91, 0x37, 0xe6, 0x7d, 0x63, 0xc3, 0xd8, 0xec, 0xb8, 0xf4, 0xed, - 0x24, 0xb0, 0xb6, 0x1b, 0x8f, 0xc7, 0x71, 0xf4, 0x7d, 0x92, 0xe1, 0xf2, 0x2c, 0x89, 0xa3, 0x8c, - 0xb3, 0x5b, 0xd0, 0x4c, 0x79, 0x36, 0x09, 0x73, 0xe2, 0x6e, 0xbb, 0x12, 0x62, 0x36, 0x58, 0xe3, - 0x6c, 0xd4, 0x37, 0x49, 0x04, 0x7e, 0x22, 0x67, 0x16, 0x4f, 0x52, 0x9f, 0xf7, 0x2d, 0x42, 0x4a, - 0x08, 0xf1, 0xc2, 0xae, 0x7e, 0x5d, 0xe0, 0x05, 0xe4, 0xfc, 0xc9, 0x80, 0x1b, 0x15, 0xe3, 0x5e, - 0x5a, 0xe3, 0x7b, 0xd0, 0x13, 0x3a, 0x84, 0x04, 0xd2, 0xdb, 0xdd, 0xb1, 0xb7, 0x93, 0x93, 0xed, - 0x23, 0x0d, 0xef, 0x56, 0xb8, 0xd8, 0xfb, 0xb0, 0x94, 0x4d, 0x4e, 0x8e, 0xbd, 0xec, 0x5c, 0x1e, - 0xab, 0x6f, 0x58, 0x9b, 0xdd, 0x9d, 0x55, 0x3a, 0xa6, 0x13, 0xdc, 0x2a, 0x9f, 0xf3, 0x07, 0x03, - 0xba, 0xbb, 0x67, 0xdc, 0x97, 0x30, 0x1a, 0x9a, 0x78, 0x59, 0xc6, 0x87, 0xca, 0x50, 0x01, 0xb1, - 0x35, 0x68, 0xe4, 0x71, 0xee, 0x85, 0x64, 0x6a, 0xc3, 0x15, 0x00, 0x1b, 0x00, 0x64, 0x13, 0xdf, - 0xe7, 0x59, 0x76, 0x3a, 0x09, 0xc9, 0xd4, 0x86, 0xab, 0x61, 0x50, 0xda, 0xa9, 0x17, 0x84, 0x7c, - 0x48, 0x6e, 0x6a, 0xb8, 0x12, 0x62, 0x7d, 0x68, 0x5d, 0x7a, 0x69, 0x14, 0x44, 0xa3, 0x7e, 0x83, - 0x08, 0x0a, 0xc4, 0x13, 0x43, 0x9e, 0x7b, 0x41, 0xd8, 0x6f, 0x6e, 0x18, 0x9b, 0x3d, 0x57, 0x42, - 0xce, 0xbf, 0x0d, 0x80, 0xbd, 0xc9, 0x38, 0x91, 0x66, 0x6e, 0x40, 0x97, 0x2c, 0x38, 0xf6, 0x4e, - 0x42, 0x9e, 0x91, 0xad, 0x96, 0xab, 0xa3, 0xd8, 0x26, 0xac, 0xf8, 0xf1, 0x38, 0x09, 0x79, 0xce, - 0x87, 0x92, 0x0b, 0x4d, 0x37, 0xdc, 0x69, 0x34, 0x7b, 0x03, 0x96, 0x4e, 0x83, 0x28, 0xc8, 0xce, - 0xf8, 0xf0, 0xde, 0x55, 0xce, 0x85, 0xcb, 0x0d, 0xb7, 0x8a, 0x64, 0x0e, 0xf4, 0x14, 0xc2, 0x8d, - 0x2f, 0x33, 0xba, 0x90, 0xe1, 0x56, 0x70, 0xec, 0x6d, 0x58, 0xe5, 0x59, 0x1e, 0x8c, 0xbd, 0x9c, - 0x1f, 0xa3, 0x29, 0xc4, 0xd8, 0x20, 0xc6, 0x59, 0x02, 0xc6, 0xfe, 0x24, 0xc9, 0xe8, 0x9e, 0x96, - 0x8b, 0x9f, 0x6c, 0x1d, 0xda, 0x49, 0x1a, 0x8f, 0x52, 0x9e, 0x65, 0xfd, 0x16, 0xa5, 0x44, 0x01, - 0x3b, 0x5f, 0x18, 0x00, 0x07, 0xb1, 0x37, 0x94, 0x0e, 0x98, 0x31, 0x5a, 0xb8, 0x60, 0xca, 0xe8, - 0x01, 0x00, 0xf9, 0x44, 0xb0, 0x98, 0xc4, 0xa2, 0x61, 0x2a, 0x0a, 0xad, 0xaa, 0x42, 0x3c, 0x3b, - 0xe6, 0xb9, 0x77, 0x2f, 0x88, 0xc2, 0x78, 0x24, 0xd3, 0x5c, 0xc3, 0xb0, 0x37, 0x61, 0xb9, 0x84, - 0xf6, 0x8f, 0x1f, 0xee, 0xd1, 0x4d, 0x3b, 0xee, 0x14, 0x76, 0xf6, 0x9a, 0xce, 0x2f, 0x0d, 0x58, - 0x3a, 0x3a, 0xf3, 0xd2, 0x61, 0x10, 0x8d, 0xf6, 0xd3, 0x78, 0x92, 0x60, 0xd4, 0x73, 0x2f, 0x1d, - 0xf1, 0x5c, 0x96, 0xaf, 0x84, 0xb0, 0xa8, 0xf7, 0xf6, 0x0e, 0xd0, 0x72, 0x0b, 0x8b, 0x1a, 0xbf, - 0xc5, 0xcd, 0xd3, 0x2c, 0x3f, 0x88, 0x7d, 0x2f, 0x0f, 0xe2, 0x48, 0x1a, 0x5e, 0x45, 0x52, 0xe1, - 0x5e, 0x45, 0x3e, 0x65, 0x9e, 0x45, 0x85, 0x4b, 0x10, 0xde, 0x78, 0x12, 0x49, 0x4a, 0x83, 0x28, - 0x05, 0xec, 0xfc, 0xab, 0x0e, 0x70, 0x74, 0x15, 0xf9, 0x53, 0x39, 0x76, 0xff, 0x82, 0x47, 0x79, - 0x35, 0xc7, 0x04, 0x0a, 0x85, 0x89, 0x94, 0x4b, 0x94, 0x73, 0x0b, 0x98, 0xdd, 0x86, 0x4e, 0xca, - 0x7d, 0x1e, 0xe5, 0x48, 0xb4, 0x88, 0x58, 0x22, 0x30, 0x9b, 0xc6, 0x5e, 0x96, 0xf3, 0xb4, 0xe2, - 0xde, 0x0a, 0x8e, 0x6d, 0x81, 0xad, 0xc3, 0xfb, 0x79, 0x30, 0x94, 0x2e, 0x9e, 0xc1, 0xa3, 0x3c, - 0xba, 0x84, 0x92, 0xd7, 0x14, 0xf2, 0x74, 0x1c, 0xca, 0xd3, 0x61, 0x92, 0x27, 0xb2, 0x6c, 0x06, - 0x8f, 0xf2, 0x4e, 0xc2, 0xd8, 0x3f, 0x0f, 0xa2, 0x11, 0x05, 0xa0, 0x4d, 0xae, 0xaa, 0xe0, 0xd8, - 0x77, 0xc0, 0x9e, 0x44, 0x29, 0xcf, 0xe2, 0xf0, 0x82, 0x0f, 0x29, 0x8e, 0x59, 0xbf, 0xa3, 0xb5, - 0x1d, 0x3d, 0xc2, 0xee, 0x0c, 0xab, 0x16, 0x21, 0x10, 0x9d, 0x46, 0x46, 0x68, 0x00, 0x70, 0x42, - 0x86, 0x1c, 0x5f, 0x25, 0xbc, 0xdf, 0x15, 0x79, 0x57, 0x62, 0xd8, 0x3b, 0x70, 0x23, 0xe3, 0x7e, - 0x1c, 0x0d, 0xb3, 0x7b, 0xfc, 0x2c, 0x88, 0x86, 0x8f, 0xc8, 0x17, 0xfd, 0x1e, 0xb9, 0x78, 0x1e, - 0x09, 0x33, 0x86, 0x0c, 0xdf, 0xdb, 0x3b, 0x38, 0xbc, 0x8c, 0x78, 0xda, 0x5f, 0x12, 0x19, 0x53, - 0x41, 0x62, 0xb8, 0xfd, 0x38, 0x3a, 0x0d, 0x03, 0x3f, 0x7f, 0x94, 0x8d, 0xfa, 0xcb, 0xc4, 0xa3, - 0xa3, 0x30, 0xa4, 0x79, 0x51, 0xd6, 0x2b, 0x22, 0xa4, 0x05, 0xa2, 0x48, 0x06, 0x37, 0xc9, 0xfa, - 0xb6, 0x96, 0x0c, 0xae, 0x9e, 0x0c, 0x48, 0x5c, 0xd5, 0x93, 0xc1, 0x4d, 0x32, 0xe7, 0xb7, 0x06, - 0xf4, 0xf4, 0xde, 0xae, 0xbd, 0x3a, 0xc6, 0x82, 0x57, 0xc7, 0xd4, 0x5f, 0x1d, 0xf6, 0x56, 0xf1, - 0xba, 0x88, 0xd7, 0x82, 0xfc, 0xff, 0x38, 0x8d, 0xb1, 0x0d, 0xbb, 0x44, 0x28, 0x1e, 0x9c, 0x77, - 0xa1, 0x9b, 0xf2, 0xd0, 0xbb, 0x2a, 0x9e, 0x09, 0xe4, 0x5f, 0x41, 0x7e, 0xb7, 0x44, 0xbb, 0x3a, - 0x8f, 0xf3, 0x37, 0x13, 0xba, 0x1a, 0x71, 0x26, 0x77, 0x8d, 0xaf, 0x99, 0xbb, 0xe6, 0x82, 0xdc, - 0xdd, 0x50, 0x26, 0x4d, 0x4e, 0xf6, 0x82, 0x54, 0x96, 0xb3, 0x8e, 0x2a, 0x38, 0x2a, 0xc5, 0xa2, - 0xa3, 0xb0, 0xdb, 0x6b, 0xa0, 0x56, 0x2a, 0xd3, 0x68, 0xb6, 0x0d, 0x8c, 0x50, 0xbb, 0x5e, 0xee, - 0x9f, 0x7d, 0x9e, 0xc8, 0xec, 0x69, 0x52, 0x0a, 0xce, 0xa1, 0xb0, 0xd7, 0xa0, 0x91, 0xe5, 0xde, - 0x88, 0x53, 0xa9, 0x2c, 0xef, 0x74, 0x28, 0xb5, 0x11, 0xe1, 0x0a, 0xbc, 0xe6, 0xfc, 0xf6, 0x0b, - 0x9c, 0xef, 0xfc, 0xd9, 0x82, 0xa5, 0xca, 0x6b, 0x3c, 0x6f, 0x6a, 0x29, 0x35, 0x9a, 0x0b, 0x34, - 0x6e, 0x40, 0x7d, 0x12, 0x05, 0x22, 0xd8, 0xcb, 0x3b, 0x3d, 0xa4, 0x7f, 0x1e, 0x05, 0x39, 0x56, - 0x87, 0x4b, 0x14, 0xcd, 0xa6, 0xfa, 0x8b, 0x12, 0xe2, 0x1d, 0xb8, 0x51, 0x96, 0xe6, 0xde, 0xde, - 0xc1, 0x41, 0xec, 0x9f, 0x17, 0xbd, 0x7c, 0x1e, 0x89, 0x31, 0x31, 0xb3, 0x50, 0x8b, 0x79, 0x50, - 0x13, 0x53, 0xcb, 0xff, 0x43, 0xc3, 0xc7, 0x29, 0x82, 0xbc, 0x24, 0x13, 0x4a, 0x1b, 0x2b, 0x1e, - 0xd4, 0x5c, 0x41, 0x67, 0x6f, 0x40, 0x7d, 0x38, 0x19, 0x27, 0xd2, 0x57, 0xcb, 0xc8, 0x57, 0x3e, - 0xeb, 0x0f, 0x6a, 0x2e, 0x51, 0x91, 0x2b, 0x8c, 0xbd, 0x61, 0xbf, 0x53, 0x72, 0x95, 0x6f, 0x1f, - 0x72, 0x21, 0x15, 0xb9, 0xb0, 0x67, 0x50, 0xff, 0x90, 0x5c, 0x65, 0xfb, 0x46, 0x2e, 0xa4, 0xb2, - 0xf7, 0x00, 0x2e, 0xbc, 0x30, 0x18, 0x8a, 0xc7, 0xa2, 0x4b, 0xbc, 0x6b, 0xc8, 0xfb, 0xb4, 0xc0, - 0xca, 0xac, 0xd7, 0xf8, 0xee, 0xb5, 0xa1, 0x99, 0x89, 0xf4, 0xff, 0x2e, 0xac, 0x56, 0x62, 0x76, - 0x10, 0x64, 0xe4, 0x60, 0x41, 0xee, 0x1b, 0x8b, 0x06, 0x2d, 0x75, 0x7e, 0x00, 0x40, 0x9e, 0xb8, - 0x9f, 0xa6, 0x71, 0xaa, 0x06, 0x3e, 0xa3, 0x18, 0xf8, 0x9c, 0x3b, 0xd0, 0x41, 0x0f, 0x5c, 0x43, - 0xc6, 0xab, 0x2f, 0x22, 0x27, 0xd0, 0xa3, 0x3b, 0x3f, 0x39, 0x58, 0xc0, 0xc1, 0x76, 0x60, 0x4d, - 0x4c, 0x5d, 0xa2, 0x08, 0x1e, 0xc7, 0x59, 0x40, 0x9e, 0x10, 0xe5, 0x38, 0x97, 0x86, 0xbd, 0x8c, - 0xa3, 0xb8, 0xa3, 0x27, 0x07, 0x6a, 0x2e, 0x50, 0xb0, 0xf3, 0x2d, 0xe8, 0xa0, 0x46, 0xa1, 0x6e, - 0x13, 0x9a, 0x44, 0x50, 0x7e, 0xb0, 0x8b, 0x20, 0x48, 0x83, 0x5c, 0x49, 0x77, 0x7e, 0x6e, 0x40, - 0x57, 0x34, 0x39, 0x71, 0xf2, 0x65, 0x7b, 0xdc, 0x46, 0xe5, 0xb8, 0xea, 0x12, 0xba, 0xc4, 0x6d, - 0x00, 0x6a, 0x53, 0x82, 0xa1, 0x5e, 0x26, 0x45, 0x89, 0x75, 0x35, 0x0e, 0x0c, 0x4c, 0x09, 0xcd, - 0x71, 0xed, 0xaf, 0x4d, 0xe8, 0xc9, 0x90, 0x0a, 0x96, 0xff, 0x51, 0xb1, 0xca, 0x7a, 0xaa, 0xeb, - 0xf5, 0xf4, 0xa6, 0xaa, 0xa7, 0x46, 0x79, 0x8d, 0x32, 0x8b, 0xca, 0x72, 0xba, 0x2b, 0xcb, 0xa9, - 0x49, 0x6c, 0x4b, 0xaa, 0x9c, 0x14, 0x97, 0xa8, 0xa6, 0xbb, 0xb2, 0x9a, 0x5a, 0x25, 0x53, 0x91, - 0x52, 0x45, 0x31, 0xdd, 0x95, 0xc5, 0xd4, 0x2e, 0x99, 0x8a, 0x30, 0xab, 0x5a, 0xba, 0xd7, 0x82, - 0x06, 0x85, 0xd3, 0xf9, 0x00, 0x6c, 0xdd, 0x35, 0x54, 0x13, 0x6f, 0x4a, 0x62, 0x25, 0x15, 0x34, - 0x26, 0x57, 0x9e, 0x7d, 0x06, 0x4b, 0x95, 0x56, 0x84, 0x2f, 0x7e, 0x90, 0xed, 0x7a, 0x91, 0xcf, - 0xc3, 0x62, 0xef, 0xd0, 0x30, 0x5a, 0x92, 0x99, 0xa5, 0x64, 0x29, 0xa2, 0x92, 0x64, 0xda, 0xf6, - 0x60, 0x55, 0xb6, 0x87, 0xbf, 0x1b, 0xd0, 0xd3, 0x0f, 0xe0, 0x02, 0x72, 0x3f, 0x4d, 0x77, 0xe3, - 0xa1, 0x88, 0x66, 0xc3, 0x55, 0x20, 0xa6, 0x3e, 0x7e, 0x86, 0x5e, 0x96, 0xc9, 0x0c, 0x2c, 0x60, - 0x49, 0x3b, 0xf2, 0xe3, 0x44, 0xed, 0x83, 0x05, 0x2c, 0x69, 0x07, 0xfc, 0x82, 0x87, 0xf2, 0x81, - 0x2a, 0x60, 0xd4, 0xf6, 0x88, 0x67, 0x19, 0xa6, 0x89, 0xe8, 0xab, 0x0a, 0xc4, 0x53, 0xae, 0x77, - 0xb9, 0xeb, 0x4d, 0x32, 0x2e, 0x67, 0xb6, 0x02, 0x46, 0xb7, 0xe0, 0xde, 0xea, 0xa5, 0xf1, 0x24, - 0x52, 0x93, 0x9a, 0x86, 0x71, 0x2e, 0x61, 0xf5, 0xf1, 0x24, 0x1d, 0x71, 0x4a, 0x62, 0xb5, 0x06, - 0xaf, 0x43, 0x3b, 0x88, 0x3c, 0x3f, 0x0f, 0x2e, 0xb8, 0xf4, 0x64, 0x01, 0x63, 0xfe, 0xe6, 0xc1, - 0x98, 0xcb, 0x51, 0x95, 0xbe, 0x91, 0xff, 0x34, 0x08, 0x39, 0xe5, 0xb5, 0xbc, 0x92, 0x82, 0xa9, - 0x44, 0xc5, 0x9b, 0x2c, 0x97, 0x5c, 0x01, 0x39, 0xbf, 0x31, 0x61, 0xfd, 0x30, 0xe1, 0xa9, 0x97, - 0x73, 0xb1, 0x58, 0x1f, 0xf9, 0x67, 0x7c, 0xec, 0x29, 0x13, 0x6e, 0x83, 0x19, 0x27, 0xa4, 0x5c, - 0xe6, 0xbb, 0x20, 0x1f, 0x26, 0xae, 0x19, 0x27, 0x64, 0x84, 0x97, 0x9d, 0x4b, 0xdf, 0xd2, 0xf7, - 0xc2, 0x2d, 0x7b, 0x1d, 0xda, 0x43, 0x2f, 0xf7, 0x4e, 0xbc, 0x8c, 0x2b, 0x9f, 0x2a, 0x98, 0x16, - 0x52, 0xdc, 0xdf, 0xa4, 0x47, 0x05, 0x40, 0x92, 0x48, 0x9b, 0xf4, 0xa6, 0x84, 0x90, 0xfb, 0x34, - 0x9c, 0x64, 0x67, 0xe4, 0xc6, 0xb6, 0x2b, 0x00, 0xb4, 0xa5, 0xc8, 0xf9, 0xb6, 0x7c, 0x2e, 0x06, - 0x00, 0xa7, 0x69, 0x3c, 0x16, 0x8d, 0x85, 0x1e, 0xa0, 0xb6, 0xab, 0x61, 0x14, 0xfd, 0x58, 0xac, - 0x2b, 0x50, 0xd2, 0x05, 0xc6, 0xc9, 0x61, 0xe9, 0xe9, 0xbb, 0x32, 0xed, 0x1f, 0xf1, 0xdc, 0x63, - 0xeb, 0x9a, 0x3b, 0x00, 0xdd, 0x81, 0x14, 0xe9, 0x8c, 0x17, 0x76, 0x0f, 0xd5, 0x72, 0x2c, 0xad, - 0xe5, 0x28, 0x0f, 0xd6, 0x29, 0xc5, 0xe9, 0xdb, 0x79, 0x0f, 0xd6, 0x64, 0x44, 0x9e, 0xbe, 0x8b, - 0x5a, 0x17, 0xc6, 0x42, 0x90, 0x85, 0x7a, 0xe7, 0xaf, 0x06, 0xdc, 0x9c, 0x3a, 0xf6, 0xd2, 0xbf, - 0x57, 0xbc, 0x0f, 0x75, 0x5c, 0xf8, 0xfa, 0x16, 0x95, 0xe6, 0x5d, 0xd4, 0x31, 0x57, 0xe4, 0x36, - 0x02, 0xf7, 0xa3, 0x3c, 0xbd, 0x72, 0xe9, 0xc0, 0xfa, 0xa7, 0xd0, 0x29, 0x50, 0x28, 0xf7, 0x9c, - 0x5f, 0xa9, 0xee, 0x7b, 0xce, 0xaf, 0x70, 0xa2, 0xb8, 0xf0, 0xc2, 0x89, 0x70, 0x8d, 0x7c, 0x60, - 0x2b, 0x8e, 0x75, 0x05, 0xfd, 0x03, 0xf3, 0xdb, 0x86, 0xf3, 0x63, 0xe8, 0x3f, 0xf0, 0xa2, 0x61, - 0x28, 0xf3, 0x51, 0x34, 0x05, 0xe9, 0x82, 0x57, 0x35, 0x17, 0x74, 0x51, 0x0a, 0x51, 0xaf, 0xc9, - 0xc6, 0xdb, 0xd0, 0x39, 0x51, 0xcf, 0xa1, 0x74, 0x7c, 0x89, 0xa0, 0x9c, 0x79, 0x16, 0x66, 0x72, - 0xad, 0xa4, 0x6f, 0xe7, 0x26, 0xdc, 0xd8, 0xe7, 0xb9, 0xd0, 0xbd, 0x7b, 0x3a, 0x92, 0x9a, 0x9d, - 0x4d, 0x58, 0xab, 0xa2, 0xa5, 0x73, 0x6d, 0xb0, 0xfc, 0xd3, 0xe2, 0xa9, 0xf1, 0x4f, 0x47, 0xce, - 0x11, 0xdc, 0x11, 0xd3, 0xd2, 0xe4, 0x04, 0x4d, 0xc0, 0xd6, 0xf7, 0x79, 0x32, 0xf4, 0x72, 0xae, - 0x2e, 0xb1, 0x03, 0x6b, 0x99, 0xa0, 0xed, 0x9e, 0x8e, 0x8e, 0xe3, 0x71, 0x78, 0x94, 0xa7, 0x41, - 0xa4, 0x64, 0xcc, 0xa5, 0x39, 0x07, 0x30, 0x58, 0x24, 0x54, 0x1a, 0xd2, 0x87, 0x96, 0xfc, 0xb1, - 0x46, 0x86, 0x59, 0x81, 0xb3, 0x71, 0x76, 0x46, 0xb0, 0xbe, 0xcf, 0xf3, 0x99, 0x99, 0xa9, 0x6c, - 0x3b, 0xa8, 0xe3, 0xb3, 0xf2, 0x79, 0x2c, 0x60, 0xf6, 0x0d, 0xe8, 0x9d, 0x06, 0x61, 0xce, 0x53, - 0xb9, 0x73, 0xcc, 0xe4, 0x7a, 0x85, 0xec, 0xfc, 0xd4, 0x02, 0x7b, 0x5a, 0x4d, 0x11, 0x27, 0x63, - 0x6e, 0xd7, 0x30, 0x2b, 0x5d, 0x83, 0x41, 0x7d, 0x8c, 0x8d, 0x5d, 0xd6, 0x0c, 0x7e, 0x97, 0x85, - 0x56, 0x5f, 0x50, 0x68, 0x9b, 0xb0, 0x22, 0xa7, 0xbf, 0x58, 0xed, 0x35, 0x72, 0x81, 0x98, 0x42, - 0xe3, 0xc0, 0x3c, 0x85, 0xa2, 0x75, 0x43, 0xf4, 0x9b, 0x79, 0x24, 0x6d, 0x1a, 0x6f, 0x7d, 0x8d, - 0x69, 0x3c, 0x11, 0x04, 0xf1, 0x93, 0x92, 0x74, 0x59, 0x5b, 0x08, 0x9f, 0x43, 0x62, 0x6f, 0xc3, - 0x6a, 0xc2, 0x23, 0x5c, 0xb4, 0x35, 0xfe, 0x0e, 0xf1, 0xcf, 0x12, 0xf0, 0x9a, 0xf4, 0x54, 0x6a, - 0xbc, 0x20, 0xae, 0x39, 0x85, 0x76, 0x7e, 0x6f, 0xc0, 0xcd, 0x32, 0x0c, 0xf4, 0x53, 0xd9, 0x0b, - 0xb6, 0xd3, 0x75, 0x68, 0x67, 0xa9, 0x4f, 0x9c, 0xea, 0xe5, 0x54, 0x30, 0x75, 0xf2, 0x2c, 0x17, - 0x34, 0xf9, 0xcc, 0x28, 0xf8, 0xc5, 0xb1, 0xe9, 0x43, 0x6b, 0x5c, 0x7d, 0x3e, 0x25, 0xe8, 0xfc, - 0xc5, 0x80, 0x57, 0xe7, 0x66, 0xe5, 0x7f, 0xf1, 0xb3, 0x2b, 0x14, 0xa1, 0xcb, 0x64, 0x33, 0xbb, - 0x7e, 0x4b, 0xc0, 0x79, 0xe3, 0x43, 0x58, 0xca, 0x4b, 0xcf, 0x70, 0xf5, 0xb3, 0xeb, 0x2b, 0xd5, - 0x83, 0x9a, 0xf3, 0xdc, 0x2a, 0xbf, 0x73, 0x0e, 0xaf, 0x54, 0xec, 0xaf, 0x74, 0xae, 0x1d, 0x9a, - 0xc2, 0x91, 0x97, 0xcb, 0xfe, 0x75, 0x4b, 0x13, 0x2c, 0xa6, 0x5e, 0xa2, 0xba, 0x05, 0x5f, 0xa5, - 0x10, 0xcd, 0x6a, 0x21, 0x3a, 0xbf, 0x33, 0x61, 0x65, 0x4a, 0x15, 0x5b, 0x06, 0x33, 0x18, 0xca, - 0x40, 0x9a, 0xc1, 0x70, 0x61, 0x51, 0xe9, 0xc1, 0xb5, 0xa6, 0x82, 0x8b, 0x6d, 0x24, 0xf5, 0xf7, - 0xbc, 0xdc, 0x93, 0xaf, 0xb4, 0x02, 0x2b, 0x61, 0x6f, 0x4c, 0x85, 0xbd, 0x0f, 0xad, 0x61, 0x96, - 0xd3, 0x29, 0x51, 0x3b, 0x0a, 0xc4, 0x06, 0x4c, 0xd9, 0x48, 0x3f, 0x00, 0x89, 0xb9, 0xa7, 0x44, - 0xb0, 0xed, 0x62, 0xf5, 0x6a, 0x5f, 0xeb, 0x13, 0xc9, 0x55, 0x4c, 0x3d, 0x1d, 0xd9, 0x3a, 0x70, - 0xea, 0xd1, 0x32, 0x0a, 0xaa, 0x19, 0xf5, 0x6c, 0xaa, 0xcd, 0xc9, 0x80, 0xbc, 0x74, 0x3e, 0xbd, - 0xa5, 0x86, 0x61, 0x91, 0x4a, 0x37, 0xaa, 0x19, 0x51, 0x99, 0x87, 0x7f, 0x65, 0xc0, 0x1d, 0xf5, - 0x64, 0xce, 0x4f, 0x84, 0xbb, 0xda, 0x13, 0x36, 0x2b, 0x49, 0x3e, 0x65, 0x34, 0x45, 0x7f, 0x1c, - 0x86, 0x62, 0xfd, 0x31, 0xd5, 0x14, 0xad, 0x30, 0x95, 0xcc, 0xb0, 0xa6, 0x5a, 0xf4, 0x1a, 0x59, - 0xfb, 0x50, 0xfc, 0x4c, 0x5f, 0x77, 0x05, 0xe0, 0x7c, 0x0a, 0x83, 0x45, 0x76, 0xbd, 0xac, 0x3f, - 0xb6, 0xce, 0xa1, 0x29, 0xe6, 0x1e, 0xb6, 0x04, 0x9d, 0x87, 0x11, 0xd5, 0xd0, 0x61, 0x62, 0xd7, - 0x58, 0x1b, 0xea, 0x47, 0x79, 0x9c, 0xd8, 0x06, 0xeb, 0x40, 0xe3, 0x31, 0x0e, 0xbe, 0xb6, 0xc9, - 0x00, 0x9a, 0xd8, 0x18, 0xc7, 0xdc, 0xb6, 0x10, 0x7d, 0x94, 0x7b, 0x69, 0x6e, 0xd7, 0x11, 0x2d, - 0x5e, 0x30, 0xbb, 0xc1, 0x96, 0x01, 0x3e, 0x9e, 0xe4, 0xb1, 0x64, 0x6b, 0x22, 0x6d, 0x8f, 0x87, - 0x3c, 0xe7, 0x76, 0x6b, 0xeb, 0x27, 0x74, 0x64, 0x84, 0x2f, 0x6d, 0x4f, 0xea, 0x22, 0xd8, 0xae, - 0xb1, 0x16, 0x58, 0x9f, 0xf1, 0x4b, 0xdb, 0x60, 0x5d, 0x68, 0xb9, 0x93, 0x28, 0x0a, 0xa2, 0x91, - 0xd0, 0x47, 0xaa, 0x87, 0xb6, 0x85, 0x04, 0x34, 0x28, 0xe1, 0x43, 0xbb, 0xce, 0x7a, 0xd0, 0xfe, - 0x44, 0xfe, 0xa2, 0x6e, 0x37, 0x90, 0x84, 0x6c, 0x78, 0xa6, 0x89, 0x24, 0x52, 0x8e, 0x50, 0x0b, - 0x21, 0x3a, 0x85, 0x50, 0x7b, 0xeb, 0x10, 0xda, 0x6a, 0xc9, 0x63, 0x2b, 0xd0, 0x95, 0x36, 0x20, - 0xca, 0xae, 0xe1, 0x85, 0xe8, 0x5d, 0xb6, 0x0d, 0xbc, 0x3c, 0xae, 0x6b, 0xb6, 0x89, 0x5f, 0xb8, - 0x93, 0xd9, 0x16, 0x39, 0xe4, 0x2a, 0xf2, 0xed, 0x3a, 0x32, 0xd2, 0x6c, 0x6f, 0x0f, 0xb7, 0x1e, - 0x41, 0x8b, 0x3e, 0x0f, 0x71, 0x64, 0x59, 0x96, 0xf2, 0x24, 0xc6, 0xae, 0xa1, 0x4f, 0x51, 0xbb, - 0xe0, 0x36, 0xd0, 0x37, 0x74, 0x1d, 0x01, 0x9b, 0x68, 0x82, 0xf0, 0x93, 0x40, 0x58, 0x5b, 0x3f, - 0x33, 0xa0, 0xad, 0xa6, 0x72, 0x76, 0x03, 0x56, 0x94, 0x93, 0x24, 0x4a, 0x48, 0xdc, 0xe7, 0xb9, - 0x40, 0xd8, 0x06, 0x29, 0x28, 0x40, 0x13, 0xfd, 0xea, 0xf2, 0x71, 0x7c, 0xc1, 0x25, 0xc6, 0x42, - 0x95, 0xb8, 0x04, 0x4a, 0xb8, 0x8e, 0x07, 0x10, 0xa6, 0x52, 0xb7, 0x1b, 0xec, 0x16, 0x30, 0x04, - 0x1f, 0x05, 0x23, 0x4c, 0x27, 0x31, 0x2a, 0x67, 0x76, 0x73, 0xeb, 0x23, 0x68, 0xab, 0x89, 0x54, - 0xb3, 0x43, 0xa1, 0x0a, 0x3b, 0x04, 0xc2, 0x36, 0x4a, 0xc5, 0x12, 0x63, 0x6e, 0x3d, 0xa5, 0x4d, - 0x0e, 0x07, 0x3a, 0xcd, 0x33, 0x12, 0x23, 0xd3, 0xeb, 0x3c, 0x48, 0x64, 0xc0, 0x79, 0x12, 0x7a, - 0x7e, 0x91, 0x60, 0x17, 0x3c, 0xcd, 0x6d, 0x0b, 0xbf, 0x1f, 0x46, 0x3f, 0xe2, 0x3e, 0x66, 0x18, - 0x86, 0x21, 0xc8, 0x72, 0xbb, 0xb1, 0x75, 0x00, 0xdd, 0xa7, 0xaa, 0xd1, 0x1f, 0x26, 0x78, 0x01, - 0x65, 0x5c, 0x89, 0xb5, 0x6b, 0xa8, 0x93, 0xb2, 0xb3, 0xc0, 0xda, 0x06, 0x5b, 0x85, 0x25, 0x8c, - 0x46, 0x89, 0x32, 0xb7, 0x9e, 0x00, 0x9b, 0x6d, 0x51, 0xe8, 0xb4, 0xd2, 0x60, 0xbb, 0x86, 0x96, - 0x7c, 0xc6, 0x2f, 0xf1, 0x9b, 0x62, 0xf8, 0x70, 0x14, 0xc5, 0x29, 0x27, 0x9a, 0x8a, 0x21, 0xfd, - 0x14, 0x87, 0x08, 0x6b, 0xeb, 0xe9, 0x54, 0x33, 0x3f, 0x4c, 0xb4, 0x74, 0x27, 0xd8, 0xae, 0x51, - 0xf2, 0x91, 0x14, 0x81, 0x90, 0x0e, 0x24, 0x31, 0x02, 0x63, 0xa2, 0xa2, 0xdd, 0x90, 0x7b, 0xa9, - 0x80, 0xad, 0x9d, 0x3f, 0x36, 0xa1, 0x29, 0x66, 0x56, 0xf6, 0x11, 0x74, 0xb5, 0x3f, 0x66, 0x32, - 0xea, 0xb4, 0xb3, 0x7f, 0x7a, 0x5d, 0xff, 0xbf, 0x19, 0xbc, 0x68, 0x0f, 0x4e, 0x8d, 0x7d, 0x08, - 0x50, 0xee, 0xa8, 0xec, 0x26, 0x0d, 0x3e, 0xd3, 0x3b, 0xeb, 0x7a, 0x9f, 0x7e, 0xdd, 0x98, 0xf3, - 0x87, 0x5a, 0xa7, 0xc6, 0xbe, 0x07, 0x4b, 0xb2, 0x07, 0x89, 0xd4, 0x62, 0x03, 0x6d, 0xc3, 0x98, - 0xb3, 0x7d, 0x5e, 0x2b, 0xec, 0x93, 0x42, 0x98, 0x48, 0x1f, 0xd6, 0x9f, 0xb3, 0xae, 0x08, 0x31, - 0xaf, 0x2c, 0x5c, 0x64, 0x9c, 0x1a, 0xdb, 0x87, 0xae, 0x58, 0x37, 0x44, 0x67, 0xbd, 0x8d, 0xbc, - 0x8b, 0xf6, 0x8f, 0x6b, 0x0d, 0xda, 0x85, 0x9e, 0xbe, 0x21, 0x30, 0xf2, 0xe4, 0x9c, 0x55, 0x42, - 0x08, 0x99, 0xb7, 0x4c, 0x38, 0x35, 0xe6, 0xc1, 0xad, 0xf9, 0x73, 0x3e, 0x7b, 0xbd, 0xfc, 0x19, - 0x76, 0xc1, 0x62, 0xb1, 0xee, 0x5c, 0xc7, 0x52, 0xa8, 0xf8, 0x01, 0xf4, 0x0b, 0xe5, 0x45, 0x5a, - 0xcb, 0xac, 0x18, 0x48, 0xd3, 0x16, 0xac, 0x06, 0xeb, 0xaf, 0x2d, 0xa4, 0x17, 0xe2, 0x8f, 0x61, - 0xb5, 0x64, 0x88, 0x85, 0xfb, 0xd8, 0x9d, 0x99, 0x73, 0x15, 0xb7, 0x0e, 0x16, 0x91, 0x0b, 0xa9, - 0x3f, 0x2c, 0x97, 0xdb, 0xaa, 0xe4, 0xd7, 0xf5, 0xd8, 0xce, 0x97, 0xee, 0x5c, 0xc7, 0xa2, 0x34, - 0xdc, 0xeb, 0x7f, 0xf1, 0xd5, 0xc0, 0xf8, 0xf2, 0xab, 0x81, 0xf1, 0xcf, 0xaf, 0x06, 0xc6, 0x2f, - 0x9e, 0x0f, 0x6a, 0x5f, 0x3e, 0x1f, 0xd4, 0xfe, 0xf1, 0x7c, 0x50, 0x3b, 0x69, 0xd2, 0xbf, 0x2b, - 0x7c, 0xf3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x83, 0xfc, 0xb2, 0x14, 0xc0, 0x20, 0x00, 0x00, + // 2929 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x3a, 0xcd, 0x6f, 0xe5, 0x56, + 0xf5, 0xcf, 0xf6, 0xfb, 0x3c, 0xef, 0x25, 0x71, 0xee, 0x64, 0xe6, 0xf7, 0x9a, 0xce, 0xbc, 0xa6, + 0x9e, 0xaa, 0xbf, 0x34, 0x2a, 0x51, 0x1b, 0x8a, 0x8a, 0x2a, 0x41, 0xdb, 0x49, 0xa6, 0x99, 0x29, + 0x99, 0x66, 0xc6, 0x49, 0x87, 0x15, 0x12, 0x8e, 0xdf, 0xcd, 0x8b, 0x89, 0x9f, 0xed, 0xb1, 0xfd, + 0x12, 0x65, 0x81, 0xd8, 0x20, 0xb6, 0xb0, 0x01, 0x09, 0xc4, 0x06, 0x24, 0xb6, 0x2c, 0xf8, 0x03, + 0x58, 0x42, 0x97, 0x15, 0x2b, 0x96, 0xa8, 0xf3, 0x37, 0xb0, 0x61, 0x81, 0xd0, 0x39, 0xf7, 0x5e, + 0xfb, 0xfa, 0x7d, 0x64, 0x3a, 0x48, 0xec, 0x7c, 0x3e, 0x7c, 0xee, 0xb9, 0xe7, 0xfb, 0xf8, 0x3d, + 0x58, 0x1e, 0x8e, 0x2f, 0xe3, 0xf4, 0x9c, 0xa7, 0xdb, 0x49, 0x1a, 0xe7, 0x31, 0x33, 0x93, 0x13, + 0x67, 0x13, 0xd8, 0x93, 0x09, 0x4f, 0xaf, 0x8e, 0x72, 0x2f, 0x9f, 0x64, 0x2e, 0x7f, 0x36, 0xe1, + 0x59, 0xce, 0x18, 0xd4, 0x23, 0x6f, 0xcc, 0xfb, 0xc6, 0x86, 0xb1, 0xd9, 0x71, 0xe9, 0xd9, 0x49, + 0x60, 0x6d, 0x37, 0x1e, 0x8f, 0xe3, 0xe8, 0xfb, 0x24, 0xc3, 0xe5, 0x59, 0x12, 0x47, 0x19, 0x67, + 0xb7, 0xa0, 0x99, 0xf2, 0x6c, 0x12, 0xe6, 0xc4, 0xdd, 0x76, 0x25, 0xc4, 0x6c, 0xb0, 0xc6, 0xd9, + 0xa8, 0x6f, 0x92, 0x08, 0x7c, 0x44, 0xce, 0x2c, 0x9e, 0xa4, 0x3e, 0xef, 0x5b, 0x84, 0x94, 0x10, + 0xe2, 0x85, 0x5e, 0xfd, 0xba, 0xc0, 0x0b, 0xc8, 0xf9, 0xa3, 0x01, 0x37, 0x2a, 0xca, 0xbd, 0xf4, + 0x89, 0xef, 0x41, 0x4f, 0x9c, 0x21, 0x24, 0xd0, 0xb9, 0xdd, 0x1d, 0x7b, 0x3b, 0x39, 0xd9, 0x3e, + 0xd2, 0xf0, 0x6e, 0x85, 0x8b, 0xbd, 0x0f, 0x4b, 0xd9, 0xe4, 0xe4, 0xd8, 0xcb, 0xce, 0xe5, 0x6b, + 0xf5, 0x0d, 0x6b, 0xb3, 0xbb, 0xb3, 0x4a, 0xaf, 0xe9, 0x04, 0xb7, 0xca, 0xe7, 0xfc, 0xc1, 0x80, + 0xee, 0xee, 0x19, 0xf7, 0x25, 0x8c, 0x8a, 0x26, 0x5e, 0x96, 0xf1, 0xa1, 0x52, 0x54, 0x40, 0x6c, + 0x0d, 0x1a, 0x79, 0x9c, 0x7b, 0x21, 0xa9, 0xda, 0x70, 0x05, 0xc0, 0x06, 0x00, 0xd9, 0xc4, 0xf7, + 0x79, 0x96, 0x9d, 0x4e, 0x42, 0x52, 0xb5, 0xe1, 0x6a, 0x18, 0x94, 0x76, 0xea, 0x05, 0x21, 0x1f, + 0x92, 0x99, 0x1a, 0xae, 0x84, 0x58, 0x1f, 0x5a, 0x97, 0x5e, 0x1a, 0x05, 0xd1, 0xa8, 0xdf, 0x20, + 0x82, 0x02, 0xf1, 0x8d, 0x21, 0xcf, 0xbd, 0x20, 0xec, 0x37, 0x37, 0x8c, 0xcd, 0x9e, 0x2b, 0x21, + 0xe7, 0xdf, 0x06, 0xc0, 0xde, 0x64, 0x9c, 0x48, 0x35, 0x37, 0xa0, 0x4b, 0x1a, 0x1c, 0x7b, 0x27, + 0x21, 0xcf, 0x48, 0x57, 0xcb, 0xd5, 0x51, 0x6c, 0x13, 0x56, 0xfc, 0x78, 0x9c, 0x84, 0x3c, 0xe7, + 0x43, 0xc9, 0x85, 0xaa, 0x1b, 0xee, 0x34, 0x9a, 0xbd, 0x01, 0x4b, 0xa7, 0x41, 0x14, 0x64, 0x67, + 0x7c, 0x78, 0xef, 0x2a, 0xe7, 0xc2, 0xe4, 0x86, 0x5b, 0x45, 0x32, 0x07, 0x7a, 0x0a, 0xe1, 0xc6, + 0x97, 0x19, 0x5d, 0xc8, 0x70, 0x2b, 0x38, 0xf6, 0x36, 0xac, 0xf2, 0x2c, 0x0f, 0xc6, 0x5e, 0xce, + 0x8f, 0x51, 0x15, 0x62, 0x6c, 0x10, 0xe3, 0x2c, 0x01, 0x7d, 0x7f, 0x92, 0x64, 0x74, 0x4f, 0xcb, + 0xc5, 0x47, 0xb6, 0x0e, 0xed, 0x24, 0x8d, 0x47, 0x29, 0xcf, 0xb2, 0x7e, 0x8b, 0x42, 0xa2, 0x80, + 0x9d, 0x2f, 0x0c, 0x80, 0x83, 0xd8, 0x1b, 0x4a, 0x03, 0xcc, 0x28, 0x2d, 0x4c, 0x30, 0xa5, 0xf4, + 0x00, 0x80, 0x6c, 0x22, 0x58, 0x4c, 0x62, 0xd1, 0x30, 0x95, 0x03, 0xad, 0xea, 0x81, 0xf8, 0xee, + 0x98, 0xe7, 0xde, 0xbd, 0x20, 0x0a, 0xe3, 0x91, 0x0c, 0x73, 0x0d, 0xc3, 0xde, 0x84, 0xe5, 0x12, + 0xda, 0x3f, 0x7e, 0xb8, 0x47, 0x37, 0xed, 0xb8, 0x53, 0xd8, 0xd9, 0x6b, 0x3a, 0xbf, 0x34, 0x60, + 0xe9, 0xe8, 0xcc, 0x4b, 0x87, 0x41, 0x34, 0xda, 0x4f, 0xe3, 0x49, 0x82, 0x5e, 0xcf, 0xbd, 0x74, + 0xc4, 0x73, 0x99, 0xbe, 0x12, 0xc2, 0xa4, 0xde, 0xdb, 0x3b, 0x40, 0xcd, 0x2d, 0x4c, 0x6a, 0x7c, + 0x16, 0x37, 0x4f, 0xb3, 0xfc, 0x20, 0xf6, 0xbd, 0x3c, 0x88, 0x23, 0xa9, 0x78, 0x15, 0x49, 0x89, + 0x7b, 0x15, 0xf9, 0x14, 0x79, 0x16, 0x25, 0x2e, 0x41, 0x78, 0xe3, 0x49, 0x24, 0x29, 0x0d, 0xa2, + 0x14, 0xb0, 0xf3, 0xcf, 0x3a, 0xc0, 0xd1, 0x55, 0xe4, 0x4f, 0xc5, 0xd8, 0xfd, 0x0b, 0x1e, 0xe5, + 0xd5, 0x18, 0x13, 0x28, 0x14, 0x26, 0x42, 0x2e, 0x51, 0xc6, 0x2d, 0x60, 0x76, 0x1b, 0x3a, 0x29, + 0xf7, 0x79, 0x94, 0x23, 0xd1, 0x22, 0x62, 0x89, 0xc0, 0x68, 0x1a, 0x7b, 0x59, 0xce, 0xd3, 0x8a, + 0x79, 0x2b, 0x38, 0xb6, 0x05, 0xb6, 0x0e, 0xef, 0xe7, 0xc1, 0x50, 0x9a, 0x78, 0x06, 0x8f, 0xf2, + 0xe8, 0x12, 0x4a, 0x5e, 0x53, 0xc8, 0xd3, 0x71, 0x28, 0x4f, 0x87, 0x49, 0x9e, 0x88, 0xb2, 0x19, + 0x3c, 0xca, 0x3b, 0x09, 0x63, 0xff, 0x3c, 0x88, 0x46, 0xe4, 0x80, 0x36, 0x99, 0xaa, 0x82, 0x63, + 0xdf, 0x01, 0x7b, 0x12, 0xa5, 0x3c, 0x8b, 0xc3, 0x0b, 0x3e, 0x24, 0x3f, 0x66, 0xfd, 0x8e, 0x56, + 0x76, 0x74, 0x0f, 0xbb, 0x33, 0xac, 0x9a, 0x87, 0x40, 0x54, 0x1a, 0xe9, 0xa1, 0x01, 0xc0, 0x09, + 0x29, 0x72, 0x7c, 0x95, 0xf0, 0x7e, 0x57, 0xc4, 0x5d, 0x89, 0x61, 0xef, 0xc0, 0x8d, 0x8c, 0xfb, + 0x71, 0x34, 0xcc, 0xee, 0xf1, 0xb3, 0x20, 0x1a, 0x3e, 0x22, 0x5b, 0xf4, 0x7b, 0x64, 0xe2, 0x79, + 0x24, 0x8c, 0x18, 0x52, 0x7c, 0x6f, 0xef, 0xe0, 0xf0, 0x32, 0xe2, 0x69, 0x7f, 0x49, 0x44, 0x4c, + 0x05, 0x89, 0xee, 0xf6, 0xe3, 0xe8, 0x34, 0x0c, 0xfc, 0xfc, 0x51, 0x36, 0xea, 0x2f, 0x13, 0x8f, + 0x8e, 0x42, 0x97, 0xe6, 0x45, 0x5a, 0xaf, 0x08, 0x97, 0x16, 0x88, 0x22, 0x18, 0xdc, 0x24, 0xeb, + 0xdb, 0x5a, 0x30, 0xb8, 0x7a, 0x30, 0x20, 0x71, 0x55, 0x0f, 0x06, 0x37, 0xc9, 0x9c, 0xdf, 0x1a, + 0xd0, 0xd3, 0x6b, 0xbb, 0xd6, 0x75, 0x8c, 0x05, 0x5d, 0xc7, 0xd4, 0xbb, 0x0e, 0x7b, 0xab, 0xe8, + 0x2e, 0xa2, 0x5b, 0x90, 0xfd, 0x1f, 0xa7, 0x31, 0x96, 0x61, 0x97, 0x08, 0x45, 0xc3, 0x79, 0x17, + 0xba, 0x29, 0x0f, 0xbd, 0xab, 0xa2, 0x4d, 0x20, 0xff, 0x0a, 0xf2, 0xbb, 0x25, 0xda, 0xd5, 0x79, + 0x9c, 0xbf, 0x9a, 0xd0, 0xd5, 0x88, 0x33, 0xb1, 0x6b, 0x7c, 0xcd, 0xd8, 0x35, 0x17, 0xc4, 0xee, + 0x86, 0x52, 0x69, 0x72, 0xb2, 0x17, 0xa4, 0x32, 0x9d, 0x75, 0x54, 0xc1, 0x51, 0x49, 0x16, 0x1d, + 0x85, 0xd5, 0x5e, 0x03, 0xb5, 0x54, 0x99, 0x46, 0xb3, 0x6d, 0x60, 0x84, 0xda, 0xf5, 0x72, 0xff, + 0xec, 0xf3, 0x44, 0x46, 0x4f, 0x93, 0x42, 0x70, 0x0e, 0x85, 0xbd, 0x06, 0x8d, 0x2c, 0xf7, 0x46, + 0x9c, 0x52, 0x65, 0x79, 0xa7, 0x43, 0xa1, 0x8d, 0x08, 0x57, 0xe0, 0x35, 0xe3, 0xb7, 0x5f, 0x60, + 0x7c, 0xe7, 0x4f, 0x16, 0x2c, 0x55, 0xba, 0xf1, 0xbc, 0xa9, 0xa5, 0x3c, 0xd1, 0x5c, 0x70, 0xe2, + 0x06, 0xd4, 0x27, 0x51, 0x20, 0x9c, 0xbd, 0xbc, 0xd3, 0x43, 0xfa, 0xe7, 0x51, 0x90, 0x63, 0x76, + 0xb8, 0x44, 0xd1, 0x74, 0xaa, 0xbf, 0x28, 0x20, 0xde, 0x81, 0x1b, 0x65, 0x6a, 0xee, 0xed, 0x1d, + 0x1c, 0xc4, 0xfe, 0x79, 0x51, 0xcb, 0xe7, 0x91, 0x18, 0x13, 0x33, 0x0b, 0x95, 0x98, 0x07, 0x35, + 0x31, 0xb5, 0xfc, 0x3f, 0x34, 0x7c, 0x9c, 0x22, 0xc8, 0x4a, 0x32, 0xa0, 0xb4, 0xb1, 0xe2, 0x41, + 0xcd, 0x15, 0x74, 0xf6, 0x06, 0xd4, 0x87, 0x93, 0x71, 0x22, 0x6d, 0xb5, 0x8c, 0x7c, 0x65, 0x5b, + 0x7f, 0x50, 0x73, 0x89, 0x8a, 0x5c, 0x61, 0xec, 0x0d, 0xfb, 0x9d, 0x92, 0xab, 0xec, 0x7d, 0xc8, + 0x85, 0x54, 0xe4, 0xc2, 0x9a, 0x41, 0xf5, 0x43, 0x72, 0x95, 0xe5, 0x1b, 0xb9, 0x90, 0xca, 0xde, + 0x03, 0xb8, 0xf0, 0xc2, 0x60, 0x28, 0x9a, 0x45, 0x97, 0x78, 0xd7, 0x90, 0xf7, 0x69, 0x81, 0x95, + 0x51, 0xaf, 0xf1, 0xdd, 0x6b, 0x43, 0x33, 0x13, 0xe1, 0xff, 0x5d, 0x58, 0xad, 0xf8, 0xec, 0x20, + 0xc8, 0xc8, 0xc0, 0x82, 0xdc, 0x37, 0x16, 0x0d, 0x5a, 0xea, 0xfd, 0x01, 0x00, 0x59, 0xe2, 0x7e, + 0x9a, 0xc6, 0xa9, 0x1a, 0xf8, 0x8c, 0x62, 0xe0, 0x73, 0xee, 0x40, 0x07, 0x2d, 0x70, 0x0d, 0x19, + 0xaf, 0xbe, 0x88, 0x9c, 0x40, 0x8f, 0xee, 0xfc, 0xe4, 0x60, 0x01, 0x07, 0xdb, 0x81, 0x35, 0x31, + 0x75, 0x89, 0x24, 0x78, 0x1c, 0x67, 0x01, 0x59, 0x42, 0xa4, 0xe3, 0x5c, 0x1a, 0xd6, 0x32, 0x8e, + 0xe2, 0x8e, 0x9e, 0x1c, 0xa8, 0xb9, 0x40, 0xc1, 0xce, 0xb7, 0xa0, 0x83, 0x27, 0x8a, 0xe3, 0x36, + 0xa1, 0x49, 0x04, 0x65, 0x07, 0xbb, 0x70, 0x82, 0x54, 0xc8, 0x95, 0x74, 0xe7, 0xe7, 0x06, 0x74, + 0x45, 0x91, 0x13, 0x6f, 0xbe, 0x6c, 0x8d, 0xdb, 0xa8, 0xbc, 0xae, 0xaa, 0x84, 0x2e, 0x71, 0x1b, + 0x80, 0xca, 0x94, 0x60, 0xa8, 0x97, 0x41, 0x51, 0x62, 0x5d, 0x8d, 0x03, 0x1d, 0x53, 0x42, 0x73, + 0x4c, 0xfb, 0x6b, 0x13, 0x7a, 0xd2, 0xa5, 0x82, 0xe5, 0x7f, 0x94, 0xac, 0x32, 0x9f, 0xea, 0x7a, + 0x3e, 0xbd, 0xa9, 0xf2, 0xa9, 0x51, 0x5e, 0xa3, 0x8c, 0xa2, 0x32, 0x9d, 0xee, 0xca, 0x74, 0x6a, + 0x12, 0xdb, 0x92, 0x4a, 0x27, 0xc5, 0x25, 0xb2, 0xe9, 0xae, 0xcc, 0xa6, 0x56, 0xc9, 0x54, 0x84, + 0x54, 0x91, 0x4c, 0x77, 0x65, 0x32, 0xb5, 0x4b, 0xa6, 0xc2, 0xcd, 0x2a, 0x97, 0xee, 0xb5, 0xa0, + 0x41, 0xee, 0x74, 0x3e, 0x00, 0x5b, 0x37, 0x0d, 0xe5, 0xc4, 0x9b, 0x92, 0x58, 0x09, 0x05, 0x8d, + 0xc9, 0x95, 0xef, 0x3e, 0x83, 0xa5, 0x4a, 0x29, 0xc2, 0x8e, 0x1f, 0x64, 0xbb, 0x5e, 0xe4, 0xf3, + 0xb0, 0xd8, 0x3b, 0x34, 0x8c, 0x16, 0x64, 0x66, 0x29, 0x59, 0x8a, 0xa8, 0x04, 0x99, 0xb6, 0x3d, + 0x58, 0x95, 0xed, 0xe1, 0x6f, 0x06, 0xf4, 0xf4, 0x17, 0x70, 0x01, 0xb9, 0x9f, 0xa6, 0xbb, 0xf1, + 0x50, 0x78, 0xb3, 0xe1, 0x2a, 0x10, 0x43, 0x1f, 0x1f, 0x43, 0x2f, 0xcb, 0x64, 0x04, 0x16, 0xb0, + 0xa4, 0x1d, 0xf9, 0x71, 0xa2, 0xf6, 0xc1, 0x02, 0x96, 0xb4, 0x03, 0x7e, 0xc1, 0x43, 0xd9, 0xa0, + 0x0a, 0x18, 0x4f, 0x7b, 0xc4, 0xb3, 0x0c, 0xc3, 0x44, 0xd4, 0x55, 0x05, 0xe2, 0x5b, 0xae, 0x77, + 0xb9, 0xeb, 0x4d, 0x32, 0x2e, 0x67, 0xb6, 0x02, 0x46, 0xb3, 0xe0, 0xde, 0xea, 0xa5, 0xf1, 0x24, + 0x52, 0x93, 0x9a, 0x86, 0x71, 0x2e, 0x61, 0xf5, 0xf1, 0x24, 0x1d, 0x71, 0x0a, 0x62, 0xb5, 0x06, + 0xaf, 0x43, 0x3b, 0x88, 0x3c, 0x3f, 0x0f, 0x2e, 0xb8, 0xb4, 0x64, 0x01, 0x63, 0xfc, 0xe6, 0xc1, + 0x98, 0xcb, 0x51, 0x95, 0x9e, 0x91, 0xff, 0x34, 0x08, 0x39, 0xc5, 0xb5, 0xbc, 0x92, 0x82, 0x29, + 0x45, 0x45, 0x4f, 0x96, 0x4b, 0xae, 0x80, 0x9c, 0xdf, 0x98, 0xb0, 0x7e, 0x98, 0xf0, 0xd4, 0xcb, + 0xb9, 0x58, 0xac, 0x8f, 0xfc, 0x33, 0x3e, 0xf6, 0x94, 0x0a, 0xb7, 0xc1, 0x8c, 0x13, 0x3a, 0x5c, + 0xc6, 0xbb, 0x20, 0x1f, 0x26, 0xae, 0x19, 0x27, 0xa4, 0x84, 0x97, 0x9d, 0x4b, 0xdb, 0xd2, 0xf3, + 0xc2, 0x2d, 0x7b, 0x1d, 0xda, 0x43, 0x2f, 0xf7, 0x4e, 0xbc, 0x8c, 0x2b, 0x9b, 0x2a, 0x98, 0x16, + 0x52, 0xdc, 0xdf, 0xa4, 0x45, 0x05, 0x40, 0x92, 0xe8, 0x34, 0x69, 0x4d, 0x09, 0x21, 0xf7, 0x69, + 0x38, 0xc9, 0xce, 0xc8, 0x8c, 0x6d, 0x57, 0x00, 0xa8, 0x4b, 0x11, 0xf3, 0x6d, 0xd9, 0x2e, 0x06, + 0x00, 0xa7, 0x69, 0x3c, 0x16, 0x85, 0x85, 0x1a, 0x50, 0xdb, 0xd5, 0x30, 0x8a, 0x7e, 0x2c, 0xd6, + 0x15, 0x28, 0xe9, 0x02, 0xe3, 0xe4, 0xb0, 0xf4, 0xf4, 0x5d, 0x19, 0xf6, 0x8f, 0x78, 0xee, 0xb1, + 0x75, 0xcd, 0x1c, 0x80, 0xe6, 0x40, 0x8a, 0x34, 0xc6, 0x0b, 0xab, 0x87, 0x2a, 0x39, 0x96, 0x56, + 0x72, 0x94, 0x05, 0xeb, 0x14, 0xe2, 0xf4, 0xec, 0xbc, 0x07, 0x6b, 0xd2, 0x23, 0x4f, 0xdf, 0xc5, + 0x53, 0x17, 0xfa, 0x42, 0x90, 0xc5, 0xf1, 0xce, 0x5f, 0x0c, 0xb8, 0x39, 0xf5, 0xda, 0x4b, 0x7f, + 0xaf, 0x78, 0x1f, 0xea, 0xb8, 0xf0, 0xf5, 0x2d, 0x4a, 0xcd, 0xbb, 0x78, 0xc6, 0x5c, 0x91, 0xdb, + 0x08, 0xdc, 0x8f, 0xf2, 0xf4, 0xca, 0xa5, 0x17, 0xd6, 0x3f, 0x85, 0x4e, 0x81, 0x42, 0xb9, 0xe7, + 0xfc, 0x4a, 0x55, 0xdf, 0x73, 0x7e, 0x85, 0x13, 0xc5, 0x85, 0x17, 0x4e, 0x84, 0x69, 0x64, 0x83, + 0xad, 0x18, 0xd6, 0x15, 0xf4, 0x0f, 0xcc, 0x6f, 0x1b, 0xce, 0x8f, 0xa1, 0xff, 0xc0, 0x8b, 0x86, + 0xa1, 0x8c, 0x47, 0x51, 0x14, 0xa4, 0x09, 0x5e, 0xd5, 0x4c, 0xd0, 0x45, 0x29, 0x44, 0xbd, 0x26, + 0x1a, 0x6f, 0x43, 0xe7, 0x44, 0xb5, 0x43, 0x69, 0xf8, 0x12, 0x41, 0x31, 0xf3, 0x2c, 0xcc, 0xe4, + 0x5a, 0x49, 0xcf, 0xce, 0x4d, 0xb8, 0xb1, 0xcf, 0x73, 0x71, 0xf6, 0xee, 0xe9, 0x48, 0x9e, 0xec, + 0x6c, 0xc2, 0x5a, 0x15, 0x2d, 0x8d, 0x6b, 0x83, 0xe5, 0x9f, 0x16, 0xad, 0xc6, 0x3f, 0x1d, 0x39, + 0x47, 0x70, 0x47, 0x4c, 0x4b, 0x93, 0x13, 0x54, 0x01, 0x4b, 0xdf, 0xe7, 0xc9, 0xd0, 0xcb, 0xb9, + 0xba, 0xc4, 0x0e, 0xac, 0x65, 0x82, 0xb6, 0x7b, 0x3a, 0x3a, 0x8e, 0xc7, 0xe1, 0x51, 0x9e, 0x06, + 0x91, 0x92, 0x31, 0x97, 0xe6, 0x1c, 0xc0, 0x60, 0x91, 0x50, 0xa9, 0x48, 0x1f, 0x5a, 0xf2, 0x63, + 0x8d, 0x74, 0xb3, 0x02, 0x67, 0xfd, 0xec, 0x8c, 0x60, 0x7d, 0x9f, 0xe7, 0x33, 0x33, 0x53, 0x59, + 0x76, 0xf0, 0x8c, 0xcf, 0xca, 0xf6, 0x58, 0xc0, 0xec, 0x1b, 0xd0, 0x3b, 0x0d, 0xc2, 0x9c, 0xa7, + 0x72, 0xe7, 0x98, 0x89, 0xf5, 0x0a, 0xd9, 0xf9, 0xa9, 0x05, 0xf6, 0xf4, 0x31, 0x85, 0x9f, 0x8c, + 0xb9, 0x55, 0xc3, 0xac, 0x54, 0x0d, 0x06, 0xf5, 0x31, 0x16, 0x76, 0x99, 0x33, 0xf8, 0x5c, 0x26, + 0x5a, 0x7d, 0x41, 0xa2, 0x6d, 0xc2, 0x8a, 0x9c, 0xfe, 0x62, 0xb5, 0xd7, 0xc8, 0x05, 0x62, 0x0a, + 0x8d, 0x03, 0xf3, 0x14, 0x8a, 0xd6, 0x0d, 0x51, 0x6f, 0xe6, 0x91, 0xb4, 0x69, 0xbc, 0xf5, 0x35, + 0xa6, 0xf1, 0x44, 0x10, 0xc4, 0x27, 0x25, 0x69, 0xb2, 0xb6, 0x10, 0x3e, 0x87, 0xc4, 0xde, 0x86, + 0xd5, 0x84, 0x47, 0xb8, 0x68, 0x6b, 0xfc, 0x1d, 0xe2, 0x9f, 0x25, 0xe0, 0x35, 0xa9, 0x55, 0x6a, + 0xbc, 0x20, 0xae, 0x39, 0x85, 0x76, 0x7e, 0x6f, 0xc0, 0xcd, 0xd2, 0x0d, 0xf4, 0xa9, 0xec, 0x05, + 0xdb, 0xe9, 0x3a, 0xb4, 0xb3, 0xd4, 0x27, 0x4e, 0xd5, 0x39, 0x15, 0x4c, 0x95, 0x3c, 0xcb, 0x05, + 0x4d, 0xb6, 0x19, 0x05, 0xbf, 0xd8, 0x37, 0x7d, 0x68, 0x8d, 0xab, 0xed, 0x53, 0x82, 0xce, 0x9f, + 0x0d, 0x78, 0x75, 0x6e, 0x54, 0xfe, 0x17, 0x9f, 0x5d, 0xa1, 0x70, 0x5d, 0x26, 0x8b, 0xd9, 0xf5, + 0x5b, 0x02, 0xce, 0x1b, 0x1f, 0xc2, 0x52, 0x5e, 0x5a, 0x86, 0xab, 0xcf, 0xae, 0xaf, 0x54, 0x5f, + 0xd4, 0x8c, 0xe7, 0x56, 0xf9, 0x9d, 0x73, 0x78, 0xa5, 0xa2, 0x7f, 0xa5, 0x72, 0xed, 0xd0, 0x14, + 0x8e, 0xbc, 0x5c, 0xd6, 0xaf, 0x5b, 0x9a, 0x60, 0x31, 0xf5, 0x12, 0xd5, 0x2d, 0xf8, 0x2a, 0x89, + 0x68, 0x56, 0x13, 0xd1, 0xf9, 0x9d, 0x09, 0x2b, 0x53, 0x47, 0xb1, 0x65, 0x30, 0x83, 0xa1, 0x74, + 0xa4, 0x19, 0x0c, 0x17, 0x26, 0x95, 0xee, 0x5c, 0x6b, 0xca, 0xb9, 0x58, 0x46, 0x52, 0x7f, 0xcf, + 0xcb, 0x3d, 0xd9, 0xa5, 0x15, 0x58, 0x71, 0x7b, 0x63, 0xca, 0xed, 0x7d, 0x68, 0x0d, 0xb3, 0x9c, + 0xde, 0x12, 0xb9, 0xa3, 0x40, 0x2c, 0xc0, 0x14, 0x8d, 0xf4, 0x01, 0x48, 0xcc, 0x3d, 0x25, 0x82, + 0x6d, 0x17, 0xab, 0x57, 0xfb, 0x5a, 0x9b, 0x48, 0xae, 0x62, 0xea, 0xe9, 0xc8, 0xd2, 0x81, 0x53, + 0x8f, 0x16, 0x51, 0x50, 0x8d, 0xa8, 0x67, 0x53, 0x65, 0x4e, 0x3a, 0xe4, 0xa5, 0xe3, 0xe9, 0x2d, + 0x35, 0x0c, 0x8b, 0x50, 0xba, 0x51, 0x8d, 0x88, 0xca, 0x3c, 0xfc, 0x2b, 0x03, 0xee, 0xa8, 0x96, + 0x39, 0x3f, 0x10, 0xee, 0x6a, 0x2d, 0x6c, 0x56, 0x92, 0x6c, 0x65, 0x34, 0x45, 0x7f, 0x1c, 0x86, + 0x62, 0xfd, 0x31, 0xd5, 0x14, 0xad, 0x30, 0x95, 0xc8, 0xb0, 0xa6, 0x4a, 0xf4, 0x1a, 0x69, 0xfb, + 0x50, 0x7c, 0xa6, 0xaf, 0xbb, 0x02, 0x70, 0x3e, 0x85, 0xc1, 0x22, 0xbd, 0x5e, 0xd6, 0x1e, 0xce, + 0x15, 0xdc, 0x11, 0xcd, 0xa7, 0x14, 0xa5, 0x7e, 0x94, 0x79, 0x71, 0x07, 0xa9, 0x74, 0x64, 0x73, + 0xba, 0x23, 0x17, 0x1f, 0x0c, 0xe9, 0x23, 0xb4, 0xa5, 0x7f, 0x30, 0x44, 0x0c, 0x5e, 0x63, 0xd1, + 0xd1, 0x2f, 0x7b, 0x8d, 0xad, 0x73, 0x68, 0x8a, 0xf1, 0x8d, 0x2d, 0x41, 0xe7, 0x61, 0x44, 0xa5, + 0xe0, 0x30, 0xb1, 0x6b, 0xac, 0x0d, 0xf5, 0xa3, 0x3c, 0x4e, 0x6c, 0x83, 0x75, 0xa0, 0xf1, 0x18, + 0xe7, 0x77, 0xdb, 0x64, 0x00, 0x4d, 0xac, 0xef, 0x63, 0x6e, 0x5b, 0x88, 0x3e, 0xca, 0xbd, 0x34, + 0xb7, 0xeb, 0x88, 0x16, 0x0a, 0xd9, 0x0d, 0xb6, 0x0c, 0xf0, 0xf1, 0x24, 0x8f, 0x25, 0x5b, 0x13, + 0x69, 0x7b, 0x3c, 0xe4, 0x39, 0xb7, 0x5b, 0x5b, 0x3f, 0xa1, 0x57, 0x46, 0x38, 0x30, 0xf4, 0xe4, + 0x59, 0x04, 0xdb, 0x35, 0xd6, 0x02, 0xeb, 0x33, 0x7e, 0x69, 0x1b, 0xac, 0x0b, 0x2d, 0x77, 0x12, + 0x45, 0x41, 0x34, 0x12, 0xe7, 0xd1, 0xd1, 0x43, 0xdb, 0x42, 0x02, 0x2a, 0x94, 0xf0, 0xa1, 0x5d, + 0x67, 0x3d, 0x68, 0x7f, 0x22, 0x7f, 0x18, 0xb0, 0x1b, 0x48, 0x42, 0x36, 0x7c, 0xa7, 0x89, 0x24, + 0x3a, 0x1c, 0xa1, 0x16, 0x42, 0xf4, 0x16, 0x42, 0xed, 0xad, 0x43, 0x68, 0xab, 0x5d, 0x95, 0xad, + 0x40, 0x57, 0xea, 0x80, 0x28, 0xbb, 0x86, 0x17, 0xa2, 0xf1, 0xc2, 0x36, 0xf0, 0xf2, 0xb8, 0x75, + 0xda, 0x26, 0x3e, 0xe1, 0x6a, 0x69, 0x5b, 0x64, 0x90, 0xab, 0xc8, 0xb7, 0xeb, 0xc8, 0x48, 0x2b, + 0x8a, 0x3d, 0xdc, 0x7a, 0x04, 0x2d, 0x7a, 0x3c, 0xc4, 0xc9, 0x6b, 0x59, 0xca, 0x93, 0x18, 0xbb, + 0x86, 0x36, 0xc5, 0xd3, 0x05, 0xb7, 0x81, 0xb6, 0xa1, 0xeb, 0x08, 0xd8, 0x44, 0x15, 0x84, 0x9d, + 0x04, 0xc2, 0xda, 0xfa, 0x99, 0x01, 0x6d, 0xb5, 0x5c, 0xb0, 0x1b, 0xb0, 0xa2, 0x8c, 0x24, 0x51, + 0x42, 0xe2, 0x3e, 0xcf, 0x05, 0xc2, 0x36, 0xe8, 0x80, 0x02, 0x34, 0xd1, 0xae, 0x2e, 0x1f, 0xc7, + 0x17, 0x5c, 0x62, 0x2c, 0x3c, 0x12, 0x77, 0x59, 0x09, 0xd7, 0xf1, 0x05, 0x84, 0xa9, 0x62, 0xd9, + 0x0d, 0x76, 0x0b, 0x18, 0x82, 0x8f, 0x82, 0x11, 0x66, 0x85, 0x98, 0xf8, 0x33, 0xbb, 0xb9, 0xf5, + 0x11, 0xb4, 0xd5, 0x60, 0xad, 0xe9, 0xa1, 0x50, 0x85, 0x1e, 0x02, 0x61, 0x1b, 0xe5, 0xc1, 0x12, + 0x63, 0x6e, 0x3d, 0xa5, 0x85, 0x14, 0xe7, 0x52, 0xcd, 0x32, 0x12, 0x23, 0xc3, 0xeb, 0x3c, 0x48, + 0xa4, 0xc3, 0x79, 0x12, 0x7a, 0x7e, 0x11, 0x60, 0x17, 0x3c, 0xcd, 0x6d, 0x0b, 0x9f, 0x1f, 0x46, + 0x3f, 0xe2, 0x3e, 0x46, 0x18, 0xba, 0x21, 0xc8, 0x72, 0xbb, 0xb1, 0x75, 0x00, 0xdd, 0xa7, 0xaa, + 0x5f, 0x1d, 0x26, 0x78, 0x01, 0xa5, 0x5c, 0x89, 0xb5, 0x6b, 0x78, 0x26, 0x45, 0x67, 0x81, 0xb5, + 0x0d, 0xb6, 0x0a, 0x4b, 0xe8, 0x8d, 0x12, 0x65, 0x6e, 0x3d, 0x01, 0x36, 0x5b, 0x69, 0xd1, 0x68, + 0xa5, 0xc2, 0x76, 0x0d, 0x35, 0xf9, 0x8c, 0x5f, 0xe2, 0x33, 0xf9, 0xf0, 0xe1, 0x28, 0x8a, 0x53, + 0x4e, 0x34, 0xe5, 0x43, 0xfa, 0xa2, 0x88, 0x08, 0x6b, 0xeb, 0xe9, 0x54, 0x4f, 0x3a, 0x4c, 0xb4, + 0x70, 0x27, 0xd8, 0xae, 0x51, 0xf0, 0x91, 0x14, 0x81, 0x90, 0x06, 0x24, 0x31, 0x02, 0x63, 0xe2, + 0x41, 0xbb, 0x21, 0xf7, 0x52, 0x01, 0x5b, 0x3b, 0xff, 0x6a, 0x42, 0x53, 0xa4, 0x39, 0xfb, 0x08, + 0xba, 0xda, 0x6f, 0xb2, 0x8c, 0x1a, 0xc6, 0xec, 0x2f, 0xc8, 0xeb, 0xff, 0x37, 0x83, 0x17, 0xe5, + 0xc1, 0xa9, 0xb1, 0x0f, 0x01, 0xca, 0x55, 0x9b, 0xdd, 0xa4, 0xf9, 0x6d, 0x7a, 0xf5, 0x5e, 0xef, + 0xd3, 0x47, 0x9a, 0x39, 0xbf, 0x37, 0x3b, 0x35, 0xf6, 0x3d, 0x58, 0x92, 0xa5, 0x54, 0x84, 0x16, + 0x1b, 0x68, 0x8b, 0xd2, 0x9c, 0x25, 0xfa, 0x5a, 0x61, 0x9f, 0x14, 0xc2, 0x44, 0xf8, 0xb0, 0xfe, + 0x9c, 0xad, 0x4b, 0x88, 0x79, 0x65, 0xe1, 0x3e, 0xe6, 0xd4, 0xd8, 0x3e, 0x74, 0xc5, 0xd6, 0x24, + 0x1a, 0xc4, 0x6d, 0xe4, 0x5d, 0xb4, 0x46, 0x5d, 0xab, 0xd0, 0x2e, 0xf4, 0xf4, 0x45, 0x87, 0x91, + 0x25, 0xe7, 0x6c, 0x44, 0x42, 0xc8, 0xbc, 0x9d, 0xc8, 0xa9, 0x31, 0x0f, 0x6e, 0xcd, 0x5f, 0x57, + 0xd8, 0xeb, 0xe5, 0xd7, 0xe4, 0x05, 0xfb, 0xd1, 0xba, 0x73, 0x1d, 0x4b, 0x71, 0xc4, 0x0f, 0xa0, + 0x5f, 0x1c, 0x5e, 0x84, 0xb5, 0x8c, 0x8a, 0x81, 0x54, 0x6d, 0xc1, 0x86, 0xb3, 0xfe, 0xda, 0x42, + 0x7a, 0x21, 0xfe, 0x18, 0x56, 0x4b, 0x86, 0x58, 0x98, 0x8f, 0xdd, 0x99, 0x79, 0xaf, 0x62, 0xd6, + 0xc1, 0x22, 0x72, 0x21, 0xf5, 0x87, 0xe5, 0x8e, 0x5e, 0x95, 0xfc, 0xba, 0xee, 0xdb, 0xf9, 0xd2, + 0x9d, 0xeb, 0x58, 0x8a, 0x13, 0x1e, 0xc3, 0x4a, 0xa5, 0x41, 0x2a, 0xd9, 0xd7, 0x36, 0xec, 0xeb, + 0x02, 0xe2, 0x5e, 0xff, 0x8b, 0xaf, 0x06, 0xc6, 0x97, 0x5f, 0x0d, 0x8c, 0x7f, 0x7c, 0x35, 0x30, + 0x7e, 0xf1, 0x7c, 0x50, 0xfb, 0xf2, 0xf9, 0xa0, 0xf6, 0xf7, 0xe7, 0x83, 0xda, 0x49, 0x93, 0xfe, + 0xc7, 0xf1, 0xcd, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x58, 0x7f, 0xb1, 0xf4, 0xd9, 0x21, 0x00, + 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -3770,6 +3888,7 @@ type WorkerClient interface { GetWorkerValidatorStatus(ctx context.Context, in *GetValidationStatusRequest, opts ...grpc.CallOption) (*GetValidationStatusResponse, error) GetValidatorError(ctx context.Context, in *GetValidationErrorRequest, opts ...grpc.CallOption) (*GetValidationErrorResponse, error) OperateValidatorError(ctx context.Context, in *OperateValidationErrorRequest, opts ...grpc.CallOption) (*OperateValidationErrorResponse, error) + UpdateValidator(ctx context.Context, in *UpdateValidationWorkerRequest, opts ...grpc.CallOption) (*CommonWorkerResponse, error) } type workerClient struct { @@ -3870,6 +3989,15 @@ func (c *workerClient) OperateValidatorError(ctx context.Context, in *OperateVal return out, nil } +func (c *workerClient) UpdateValidator(ctx context.Context, in *UpdateValidationWorkerRequest, opts ...grpc.CallOption) (*CommonWorkerResponse, error) { + out := new(CommonWorkerResponse) + err := c.cc.Invoke(ctx, "/pb.Worker/UpdateValidator", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // WorkerServer is the server API for Worker service. type WorkerServer interface { QueryStatus(context.Context, *QueryStatusRequest) (*QueryStatusResponse, error) @@ -3887,6 +4015,7 @@ type WorkerServer interface { GetWorkerValidatorStatus(context.Context, *GetValidationStatusRequest) (*GetValidationStatusResponse, error) GetValidatorError(context.Context, *GetValidationErrorRequest) (*GetValidationErrorResponse, error) OperateValidatorError(context.Context, *OperateValidationErrorRequest) (*OperateValidationErrorResponse, error) + UpdateValidator(context.Context, *UpdateValidationWorkerRequest) (*CommonWorkerResponse, error) } // UnimplementedWorkerServer can be embedded to have forward compatible implementations. @@ -3923,6 +4052,9 @@ func (*UnimplementedWorkerServer) GetValidatorError(ctx context.Context, req *Ge func (*UnimplementedWorkerServer) OperateValidatorError(ctx context.Context, req *OperateValidationErrorRequest) (*OperateValidationErrorResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OperateValidatorError not implemented") } +func (*UnimplementedWorkerServer) UpdateValidator(ctx context.Context, req *UpdateValidationWorkerRequest) (*CommonWorkerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateValidator not implemented") +} func RegisterWorkerServer(s *grpc.Server, srv WorkerServer) { s.RegisterService(&_Worker_serviceDesc, srv) @@ -4108,6 +4240,24 @@ func _Worker_OperateValidatorError_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _Worker_UpdateValidator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateValidationWorkerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkerServer).UpdateValidator(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Worker/UpdateValidator", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkerServer).UpdateValidator(ctx, req.(*UpdateValidationWorkerRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Worker_serviceDesc = grpc.ServiceDesc{ ServiceName: "pb.Worker", HandlerType: (*WorkerServer)(nil), @@ -4152,6 +4302,10 @@ var _Worker_serviceDesc = grpc.ServiceDesc{ MethodName: "OperateValidatorError", Handler: _Worker_OperateValidatorError_Handler, }, + { + MethodName: "UpdateValidator", + Handler: _Worker_UpdateValidator_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "dmworker.proto", @@ -6619,6 +6773,90 @@ func (m *OperateValidationErrorResponse) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } +func (m *UpdateValidationWorkerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateValidationWorkerRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateValidationWorkerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BinlogGTID) > 0 { + i -= len(m.BinlogGTID) + copy(dAtA[i:], m.BinlogGTID) + i = encodeVarintDmworker(dAtA, i, uint64(len(m.BinlogGTID))) + i-- + dAtA[i] = 0x1a + } + if len(m.BinlogPos) > 0 { + i -= len(m.BinlogPos) + copy(dAtA[i:], m.BinlogPos) + i = encodeVarintDmworker(dAtA, i, uint64(len(m.BinlogPos))) + i-- + dAtA[i] = 0x12 + } + if len(m.TaskName) > 0 { + i -= len(m.TaskName) + copy(dAtA[i:], m.TaskName) + i = encodeVarintDmworker(dAtA, i, uint64(len(m.TaskName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateValidationWorkerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateValidationWorkerResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateValidationWorkerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Msg) > 0 { + i -= len(m.Msg) + copy(dAtA[i:], m.Msg) + i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) + i-- + dAtA[i] = 0x12 + } + if m.Result { + i-- + if m.Result { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func encodeVarintDmworker(dAtA []byte, offset int, v uint64) int { offset -= sovDmworker(v) base := offset @@ -7779,6 +8017,43 @@ func (m *OperateValidationErrorResponse) Size() (n int) { return n } +func (m *UpdateValidationWorkerRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TaskName) + if l > 0 { + n += 1 + l + sovDmworker(uint64(l)) + } + l = len(m.BinlogPos) + if l > 0 { + n += 1 + l + sovDmworker(uint64(l)) + } + l = len(m.BinlogGTID) + if l > 0 { + n += 1 + l + sovDmworker(uint64(l)) + } + return n +} + +func (m *UpdateValidationWorkerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result { + n += 2 + } + l = len(m.Msg) + if l > 0 { + n += 1 + l + sovDmworker(uint64(l)) + } + return n +} + func sovDmworker(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -14983,6 +15258,254 @@ func (m *OperateValidationErrorResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *UpdateValidationWorkerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmworker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateValidationWorkerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateValidationWorkerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmworker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDmworker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDmworker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogPos", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmworker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDmworker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDmworker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BinlogPos = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogGTID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmworker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDmworker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDmworker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BinlogGTID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDmworker(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDmworker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateValidationWorkerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmworker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateValidationWorkerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateValidationWorkerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmworker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Result = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDmworker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDmworker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDmworker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDmworker(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDmworker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipDmworker(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/dm/pbmock/dmmaster.go b/dm/pbmock/dmmaster.go index de9fe3628f2..977b67fb14d 100644 --- a/dm/pbmock/dmmaster.go +++ b/dm/pbmock/dmmaster.go @@ -556,6 +556,26 @@ func (mr *MockMasterClientMockRecorder) UpdateTask(arg0, arg1 interface{}, arg2 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTask", reflect.TypeOf((*MockMasterClient)(nil).UpdateTask), varargs...) } +// UpdateValidation mocks base method. +func (m *MockMasterClient) UpdateValidation(arg0 context.Context, arg1 *pb.UpdateValidationRequest, arg2 ...grpc.CallOption) (*pb.UpdateValidationResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateValidation", varargs...) + ret0, _ := ret[0].(*pb.UpdateValidationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateValidation indicates an expected call of UpdateValidation. +func (mr *MockMasterClientMockRecorder) UpdateValidation(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateValidation", reflect.TypeOf((*MockMasterClient)(nil).UpdateValidation), varargs...) +} + // MockMasterServer is a mock of MasterServer interface. type MockMasterServer struct { ctrl *gomock.Controller @@ -968,3 +988,18 @@ func (mr *MockMasterServerMockRecorder) UpdateTask(arg0, arg1 interface{}) *gomo mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTask", reflect.TypeOf((*MockMasterServer)(nil).UpdateTask), arg0, arg1) } + +// UpdateValidation mocks base method. +func (m *MockMasterServer) UpdateValidation(arg0 context.Context, arg1 *pb.UpdateValidationRequest) (*pb.UpdateValidationResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateValidation", arg0, arg1) + ret0, _ := ret[0].(*pb.UpdateValidationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateValidation indicates an expected call of UpdateValidation. +func (mr *MockMasterServerMockRecorder) UpdateValidation(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateValidation", reflect.TypeOf((*MockMasterServer)(nil).UpdateValidation), arg0, arg1) +} diff --git a/dm/pbmock/dmworker.go b/dm/pbmock/dmworker.go index 188e28cd8aa..20ef362f048 100644 --- a/dm/pbmock/dmworker.go +++ b/dm/pbmock/dmworker.go @@ -236,6 +236,26 @@ func (mr *MockWorkerClientMockRecorder) QueryStatus(arg0, arg1 interface{}, arg2 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryStatus", reflect.TypeOf((*MockWorkerClient)(nil).QueryStatus), varargs...) } +// UpdateValidator mocks base method. +func (m *MockWorkerClient) UpdateValidator(arg0 context.Context, arg1 *pb.UpdateValidationWorkerRequest, arg2 ...grpc.CallOption) (*pb.CommonWorkerResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateValidator", varargs...) + ret0, _ := ret[0].(*pb.CommonWorkerResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateValidator indicates an expected call of UpdateValidator. +func (mr *MockWorkerClientMockRecorder) UpdateValidator(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateValidator", reflect.TypeOf((*MockWorkerClient)(nil).UpdateValidator), varargs...) +} + // MockWorkerServer is a mock of WorkerServer interface. type MockWorkerServer struct { ctrl *gomock.Controller @@ -408,3 +428,18 @@ func (mr *MockWorkerServerMockRecorder) QueryStatus(arg0, arg1 interface{}) *gom mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryStatus", reflect.TypeOf((*MockWorkerServer)(nil).QueryStatus), arg0, arg1) } + +// UpdateValidator mocks base method. +func (m *MockWorkerServer) UpdateValidator(arg0 context.Context, arg1 *pb.UpdateValidationWorkerRequest) (*pb.CommonWorkerResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateValidator", arg0, arg1) + ret0, _ := ret[0].(*pb.CommonWorkerResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateValidator indicates an expected call of UpdateValidator. +func (mr *MockWorkerServerMockRecorder) UpdateValidator(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateValidator", reflect.TypeOf((*MockWorkerServer)(nil).UpdateValidator), arg0, arg1) +} diff --git a/dm/proto/dmmaster.proto b/dm/proto/dmmaster.proto index 3f8f08fafcf..8c282e4c46a 100644 --- a/dm/proto/dmmaster.proto +++ b/dm/proto/dmmaster.proto @@ -118,6 +118,8 @@ service Master { rpc GetValidationError(GetValidationErrorRequest) returns(GetValidationErrorResponse) {} rpc OperateValidationError(OperateValidationErrorRequest) returns(OperateValidationErrorResponse) {} + + rpc UpdateValidation(UpdateValidationRequest) returns(UpdateValidationResponse) {} } message StartTaskRequest { @@ -515,3 +517,15 @@ message StopValidationResponse { repeated CommonWorkerResponse sources = 3; } +message UpdateValidationRequest { + string taskName = 1; + repeated string sources = 2; + string binlogPos = 3; // binlog-pos (that's file:pos format) + string binlogGTID = 4; +} + +message UpdateValidationResponse { + bool result = 1; + string msg = 2; + repeated CommonWorkerResponse sources = 3; +} \ No newline at end of file diff --git a/dm/proto/dmworker.proto b/dm/proto/dmworker.proto index 792b7822368..585e330efec 100644 --- a/dm/proto/dmworker.proto +++ b/dm/proto/dmworker.proto @@ -28,6 +28,8 @@ service Worker { rpc GetValidatorError(GetValidationErrorRequest) returns(GetValidationErrorResponse) {} rpc OperateValidatorError(OperateValidationErrorRequest) returns(OperateValidationErrorResponse) {} + + rpc UpdateValidator(UpdateValidationWorkerRequest) returns(CommonWorkerResponse) {} } enum TaskOp { @@ -500,3 +502,10 @@ enum ValidationErrOp { ResolveErrOp = 2; ClearErrOp = 3; } + + +message UpdateValidationWorkerRequest { + string taskName = 1; + string binlogPos = 2; // binlog-pos (that's (file:pos) format) + string binlogGTID = 3; +} diff --git a/dm/syncer/data_validator.go b/dm/syncer/data_validator.go index 95103ba534c..08083bd2b78 100644 --- a/dm/syncer/data_validator.go +++ b/dm/syncer/data_validator.go @@ -22,6 +22,7 @@ import ( "sync" "time" + "github.com/go-mysql-org/go-mysql/mysql" "github.com/go-mysql-org/go-mysql/replication" "github.com/pingcap/errors" "github.com/pingcap/failpoint" @@ -34,6 +35,7 @@ import ( "github.com/pingcap/tiflow/dm/pkg/binlog" "github.com/pingcap/tiflow/dm/pkg/conn" tcontext "github.com/pingcap/tiflow/dm/pkg/context" + "github.com/pingcap/tiflow/dm/pkg/gtid" "github.com/pingcap/tiflow/dm/pkg/log" "github.com/pingcap/tiflow/dm/pkg/schema" "github.com/pingcap/tiflow/dm/pkg/terror" @@ -194,8 +196,10 @@ type DataValidator struct { validateInterval time.Duration checkInterval time.Duration - workers []*validateWorker - workerCnt int + cutOverLocation atomic.Pointer[binlog.Location] + + workers []*validateWorker + workerCnt int // whether we start to mark failed rows as error rows // if it's false, we don't mark failed row change as error to reduce false-positive @@ -954,7 +958,12 @@ func (v *DataValidator) processRowsEvent(header *replication.EventHeader, ev *re func (v *DataValidator) checkAndPersistCheckpointAndData(loc binlog.Location) error { metaFlushInterval := v.cfg.ValidatorCfg.MetaFlushInterval.Duration - if time.Since(v.lastFlushTime) > metaFlushInterval { + cutOverLocation := v.cutOverLocation.Load() + needCutOver := cutOverLocation != nil && binlog.CompareLocation(*cutOverLocation, loc, v.cfg.EnableGTID) <= 0 + if time.Since(v.lastFlushTime) > metaFlushInterval || needCutOver { + if needCutOver { + v.cutOverLocation.Store(nil) + } v.lastFlushTime = time.Now() if err := v.persistCheckpointAndData(loc); err != nil { v.L.Warn("failed to flush checkpoint: ", zap.Error(err)) @@ -1309,6 +1318,29 @@ func (v *DataValidator) OperateValidatorError(validateOp pb.ValidationErrOp, err return v.persistHelper.operateError(tctx, toDB, validateOp, errID, isAll) } +func (v *DataValidator) UpdateValidator(req *pb.UpdateValidationWorkerRequest) error { + var ( + pos *mysql.Position + gs mysql.GTIDSet + err error + ) + if len(req.BinlogPos) > 0 { + pos, err = binlog.VerifyBinlogPos(req.BinlogPos) + if err != nil { + return err + } + } + if len(req.BinlogGTID) > 0 { + gs, err = gtid.ParserGTID(v.cfg.Flavor, req.BinlogGTID) + if err != nil { + return err + } + } + cutOverLocation := binlog.NewLocation(*pos, gs) + v.cutOverLocation.Store(&cutOverLocation) + return nil +} + func (v *DataValidator) getErrorRowCount(timeout time.Duration) ([errorStateTypeCount]int64, error) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() diff --git a/dm/worker/server.go b/dm/worker/server.go index 67c47b7dbf3..663fd965a22 100644 --- a/dm/worker/server.go +++ b/dm/worker/server.go @@ -1080,3 +1080,28 @@ func (s *Server) OperateValidatorError(ctx context.Context, req *pb.OperateValid //nolint:nilerr return resp, nil } + +func (s *Server) UpdateValidator(ctx context.Context, req *pb.UpdateValidationWorkerRequest) (*pb.CommonWorkerResponse, error) { + log.L().Info("update validation", zap.Stringer("payload", req)) + w := s.getSourceWorker(true) + resp := &pb.CommonWorkerResponse{ + Result: true, + } + if w == nil { + log.L().Warn("fail to update validator, because no mysql source is being handled in the worker") + resp.Result = false + resp.Msg = terror.ErrWorkerNoStart.Error() + return resp, nil + } + err := w.UpdateWorkerValidator(req) + if err != nil { + resp.Result = false + resp.Msg = err.Error() + //nolint:nilerr + return resp, nil + } + resp.Source = w.cfg.SourceID + resp.Worker = s.cfg.Name + //nolint:nilerr + return resp, nil +} diff --git a/dm/worker/source_worker.go b/dm/worker/source_worker.go index d7dd1bc2613..35faf0f547c 100644 --- a/dm/worker/source_worker.go +++ b/dm/worker/source_worker.go @@ -1362,3 +1362,11 @@ func (w *SourceWorker) GetValidatorTableStatus(taskName string, filterStatus pb. } return st.GetValidatorTableStatus(filterStatus) } + +func (w *SourceWorker) UpdateWorkerValidator(req *pb.UpdateValidationWorkerRequest) error { + st := w.subTaskHolder.findSubTask(req.TaskName) + if st == nil { + return terror.ErrWorkerSubTaskNotFound.Generate(req.TaskName) + } + return st.UpdateValidator(req) +} diff --git a/dm/worker/subtask.go b/dm/worker/subtask.go index 374f3276632..016709e2711 100644 --- a/dm/worker/subtask.go +++ b/dm/worker/subtask.go @@ -877,6 +877,14 @@ func (st *SubTask) OperateValidatorError(op pb.ValidationErrOp, errID uint64, is return terror.ErrValidatorNotFound.Generate(cfg.Name, cfg.SourceID) } +func (st *SubTask) UpdateValidator(req *pb.UpdateValidationWorkerRequest) error { + if validator := st.getValidator(); validator != nil { + return validator.UpdateValidator(req) + } + cfg := st.getCfg() + return terror.ErrValidatorNotFound.Generate(cfg.Name, cfg.SourceID) +} + func (st *SubTask) getValidator() *syncer.DataValidator { st.RLock() defer st.RUnlock() From 5cfc753df9d86c4fe32e46ac17c96bce09172e0e Mon Sep 17 00:00:00 2001 From: Jianyuan Jiang Date: Tue, 17 Oct 2023 16:58:59 +0800 Subject: [PATCH 02/15] owner(ticdc): decouple changefeed from reactor (#9783) close pingcap/tiflow#9784 --- cdc/owner/changefeed.go | 270 +++++++++++---------------- cdc/owner/changefeed_test.go | 176 +++++++++-------- cdc/owner/feed_state_manager.go | 63 +++++-- cdc/owner/feed_state_manager_test.go | 124 ++++++------ cdc/owner/owner.go | 133 ++++++++++--- cdc/owner/owner_test.go | 40 ++-- 6 files changed, 451 insertions(+), 355 deletions(-) diff --git a/cdc/owner/changefeed.go b/cdc/owner/changefeed.go index bbf636bea20..2370380ebdb 100644 --- a/cdc/owner/changefeed.go +++ b/cdc/owner/changefeed.go @@ -34,7 +34,6 @@ import ( cdcContext "github.com/pingcap/tiflow/pkg/context" cerror "github.com/pingcap/tiflow/pkg/errors" "github.com/pingcap/tiflow/pkg/filter" - "github.com/pingcap/tiflow/pkg/orchestrator" "github.com/pingcap/tiflow/pkg/pdutil" redoCfg "github.com/pingcap/tiflow/pkg/redo" "github.com/pingcap/tiflow/pkg/sink/observer" @@ -47,6 +46,21 @@ import ( "go.uber.org/zap" ) +// Changefeed is the tick logic of changefeed. +type Changefeed interface { + // Tick is called periodically to drive the changefeed's internal logic. + // The main logic of changefeed is in this function, including the calculation of many kinds of ts, + // maintain table components, error handling, etc. + // + // It can be called in etcd ticks, so it should never be blocked. + // Tick Returns: checkpointTs, minTableBarrierTs + Tick(cdcContext.Context, *model.ChangeFeedInfo, + *model.ChangeFeedStatus, + map[model.CaptureID]*model.CaptureInfo) (model.Ts, model.Ts) +} + +var _ Changefeed = (*changefeed)(nil) + // newScheduler creates a new scheduler from context. // This function is factored out to facilitate unit testing. func newScheduler( @@ -64,8 +78,6 @@ func newScheduler( type changefeed struct { id model.ChangeFeedID - // state is read-only during the Tick, should only be updated by patch the etcd. - state *orchestrator.ChangefeedReactorState upstream *upstream.Upstream cfg *config.SchedulerConfig @@ -73,7 +85,7 @@ type changefeed struct { // barriers will be created when a changefeed is initialized // and will be destroyed when a changefeed is closed. barriers *barriers - feedStateManager *feedStateManager + feedStateManager FeedStateManager resolvedTs model.Ts // ddl related fields @@ -143,21 +155,29 @@ type changefeed struct { ) (observer.Observer, error) lastDDLTs uint64 // Timestamp of the last executed DDL. Only used for tests. + + // The latest changefeed info and status from meta storage. they are updated in every Tick. + latestInfo *model.ChangeFeedInfo + latestStatus *model.ChangeFeedStatus } -func newChangefeed( +// NewChangefeed creates a new changefeed. +func NewChangefeed( id model.ChangeFeedID, - state *orchestrator.ChangefeedReactorState, + cfInfo *model.ChangeFeedInfo, + cfStatus *model.ChangeFeedStatus, + feedStateManager FeedStateManager, up *upstream.Upstream, cfg *config.SchedulerConfig, ) *changefeed { c := &changefeed{ - id: id, - state: state, + id: id, + latestInfo: cfInfo, + latestStatus: cfStatus, // The scheduler will be created lazily. scheduler: nil, barriers: newBarriers(), - feedStateManager: newFeedStateManager(up, state.Info.Config), + feedStateManager: feedStateManager, upstream: up, errCh: make(chan error, defaultErrChSize), @@ -174,7 +194,10 @@ func newChangefeed( } func newChangefeed4Test( - id model.ChangeFeedID, state *orchestrator.ChangefeedReactorState, up *upstream.Upstream, + id model.ChangeFeedID, + cfInfo *model.ChangeFeedInfo, + cfStatus *model.ChangeFeedStatus, + cfstateManager FeedStateManager, up *upstream.Upstream, newDDLPuller func(ctx context.Context, replicaConfig *config.ReplicaConfig, up *upstream.Upstream, @@ -198,7 +221,7 @@ func newChangefeed4Test( ) (observer.Observer, error), ) *changefeed { cfg := config.NewDefaultSchedulerConfig() - c := newChangefeed(id, state, up, cfg) + c := NewChangefeed(id, cfInfo, cfStatus, cfstateManager, up, cfg) c.newDDLPuller = newDDLPuller c.newSink = newSink c.newScheduler = newScheduler @@ -206,9 +229,14 @@ func newChangefeed4Test( return c } -func (c *changefeed) Tick(ctx cdcContext.Context, captures map[model.CaptureID]*model.CaptureInfo) { +func (c *changefeed) Tick(ctx cdcContext.Context, + cfInfo *model.ChangeFeedInfo, + cfStatus *model.ChangeFeedStatus, + captures map[model.CaptureID]*model.CaptureInfo, +) (model.Ts, model.Ts) { startTime := time.Now() - + c.latestInfo = cfInfo + c.latestStatus = cfStatus // Handle all internal warnings. noMoreWarnings := false for !noMoreWarnings { @@ -224,7 +252,7 @@ func (c *changefeed) Tick(ctx cdcContext.Context, captures map[model.CaptureID]* if err != nil { c.handleErr(ctx, err) } - return + return 0, 0 } ctx = cdcContext.WithErrorHandler(ctx, func(err error) error { @@ -234,8 +262,7 @@ func (c *changefeed) Tick(ctx cdcContext.Context, captures map[model.CaptureID]* } return nil }) - c.state.CheckCaptureAlive(ctx.GlobalVars().CaptureInfo.ID) - err := c.tick(ctx, captures) + checkpointTs, minTableBarrierTs, err := c.tick(ctx, captures) // The tick duration is recorded only if changefeed has completed initialization if c.initialized { @@ -253,6 +280,7 @@ func (c *changefeed) Tick(ctx cdcContext.Context, captures map[model.CaptureID]* log.Error("changefeed tick failed", zap.Error(err)) c.handleErr(ctx, err) } + return checkpointTs, minTableBarrierTs } func (c *changefeed) handleErr(ctx cdcContext.Context, err error) { @@ -265,7 +293,7 @@ func (c *changefeed) handleErr(ctx cdcContext.Context, err error) { } else { code = string(cerror.ErrOwnerUnknown.RFCCode()) } - c.feedStateManager.handleError(&model.RunningError{ + c.feedStateManager.HandleError(&model.RunningError{ Time: time.Now(), Addr: config.GetGlobalServerConfig().AdvertiseAddr, Code: code, @@ -285,7 +313,7 @@ func (c *changefeed) handleWarning(err error) { code = string(cerror.ErrOwnerUnknown.RFCCode()) } - c.feedStateManager.handleWarning(&model.RunningError{ + c.feedStateManager.HandleWarning(&model.RunningError{ Time: time.Now(), Addr: config.GetGlobalServerConfig().AdvertiseAddr, Code: code, @@ -293,8 +321,10 @@ func (c *changefeed) handleWarning(err error) { }) } -func (c *changefeed) checkStaleCheckpointTs(ctx cdcContext.Context, checkpointTs uint64) error { - cfInfo := c.state.Info +func (c *changefeed) checkStaleCheckpointTs(ctx cdcContext.Context, + cfInfo *model.ChangeFeedInfo, + checkpointTs uint64, +) error { if cfInfo.NeedBlockGC() { failpoint.Inject("InjectChangefeedFastFailError", func() error { return cerror.ErrStartTsBeforeGC.FastGen("InjectChangefeedFastFailError") @@ -306,48 +336,48 @@ func (c *changefeed) checkStaleCheckpointTs(ctx cdcContext.Context, checkpointTs return nil } -func (c *changefeed) tick(ctx cdcContext.Context, captures map[model.CaptureID]*model.CaptureInfo) error { - adminJobPending := c.feedStateManager.Tick(c.state, c.resolvedTs) - preCheckpointTs := c.state.Info.GetCheckpointTs(c.state.Status) +// tick is the main logic of changefeed. +// tick returns the checkpointTs and minTableBarrierTs. +func (c *changefeed) tick(ctx cdcContext.Context, + captures map[model.CaptureID]*model.CaptureInfo, +) (model.Ts, model.Ts, error) { + adminJobPending := c.feedStateManager.Tick(c.resolvedTs, c.latestStatus, c.latestInfo) + preCheckpointTs := c.latestInfo.GetCheckpointTs(c.latestStatus) // checkStaleCheckpointTs must be called before `feedStateManager.ShouldRunning()` // to ensure all changefeeds, no matter whether they are running or not, will be checked. - if err := c.checkStaleCheckpointTs(ctx, preCheckpointTs); err != nil { - return errors.Trace(err) + if err := c.checkStaleCheckpointTs(ctx, c.latestInfo, preCheckpointTs); err != nil { + return 0, 0, errors.Trace(err) } if !c.feedStateManager.ShouldRunning() { c.isRemoved = c.feedStateManager.ShouldRemoved() c.releaseResources(ctx) - return nil + return 0, 0, nil } if adminJobPending { - return nil - } - - if !c.preflightCheck(captures) { - return nil + return 0, 0, nil } if err := c.initialize(ctx); err != nil { - return errors.Trace(err) + return 0, 0, errors.Trace(err) } select { case err := <-c.errCh: - return errors.Trace(err) + return 0, 0, errors.Trace(err) default: } // TODO: pass table checkpointTs when we support concurrent process ddl allPhysicalTables, barrier, err := c.ddlManager.tick(ctx, preCheckpointTs, nil) if err != nil { - return errors.Trace(err) + return 0, 0, errors.Trace(err) } - err = c.handleBarrier(ctx, barrier) + err = c.handleBarrier(ctx, c.latestInfo, c.latestStatus, barrier) if err != nil { - return errors.Trace(err) + return 0, 0, errors.Trace(err) } log.Debug("owner handles barrier", @@ -363,14 +393,14 @@ func (c *changefeed) tick(ctx cdcContext.Context, captures map[model.CaptureID]* // This condition implies that the DDL resolved-ts has not yet reached checkpointTs, // which implies that it would be premature to schedule tables or to update status. // So we return here. - return nil + return 0, 0, nil } newCheckpointTs, newResolvedTs, err := c.scheduler.Tick( ctx, preCheckpointTs, allPhysicalTables, captures, barrier) if err != nil { - return errors.Trace(err) + return 0, 0, errors.Trace(err) } pdTime := c.upstream.PDClock.CurrentTime() @@ -379,12 +409,12 @@ func (c *changefeed) tick(ctx cdcContext.Context, captures map[model.CaptureID]* // CheckpointCannotProceed implies that not all tables are being replicated normally, // so in that case there is no need to advance the global watermarks. if newCheckpointTs == scheduler.CheckpointCannotProceed { - if c.state.Status != nil { + if c.latestStatus != nil { // We should keep the metrics updated even if the scheduler cannot // advance the watermarks for now. - c.updateMetrics(currentTs, c.state.Status.CheckpointTs, c.resolvedTs) + c.updateMetrics(currentTs, c.latestStatus.CheckpointTs, c.resolvedTs) } - return nil + return 0, 0, nil } log.Debug("owner prepares to update status", @@ -399,31 +429,30 @@ func (c *changefeed) tick(ctx cdcContext.Context, captures map[model.CaptureID]* } // MinTableBarrierTs should never regress - if barrier.MinTableBarrierTs < c.state.Status.MinTableBarrierTs { - barrier.MinTableBarrierTs = c.state.Status.MinTableBarrierTs + if barrier.MinTableBarrierTs < c.latestStatus.MinTableBarrierTs { + barrier.MinTableBarrierTs = c.latestStatus.MinTableBarrierTs } failpoint.Inject("ChangefeedOwnerDontUpdateCheckpoint", func() { - if c.lastDDLTs != 0 && c.state.Status.CheckpointTs >= c.lastDDLTs { + if c.lastDDLTs != 0 && c.latestStatus.CheckpointTs >= c.lastDDLTs { log.Info("owner won't update checkpoint because of failpoint", zap.String("namespace", c.id.Namespace), zap.String("changefeed", c.id.ID), - zap.Uint64("keepCheckpoint", c.state.Status.CheckpointTs), + zap.Uint64("keepCheckpoint", c.latestStatus.CheckpointTs), zap.Uint64("skipCheckpoint", newCheckpointTs)) - newCheckpointTs = c.state.Status.CheckpointTs + newCheckpointTs = c.latestStatus.CheckpointTs } }) - c.updateStatus(newCheckpointTs, barrier.MinTableBarrierTs) c.updateMetrics(currentTs, newCheckpointTs, c.resolvedTs) c.tickDownstreamObserver(ctx) - return nil + return newCheckpointTs, barrier.MinTableBarrierTs, nil } func (c *changefeed) initialize(ctx cdcContext.Context) (err error) { - if c.initialized || c.state.Status == nil { - // If `c.state.Status` is nil it means the changefeed struct is just created, it needs to + if c.initialized || c.latestStatus == nil { + // If `c.latestStatus` is nil it means the changefeed struct is just created, it needs to // 1. use startTs as checkpointTs and resolvedTs, if it's a new created changefeed; or // 2. load checkpointTs and resolvedTs from etcd, if it's an existing changefeed. // And then it can continue to initialize. @@ -451,9 +480,9 @@ LOOP2: } } - checkpointTs := c.state.Status.CheckpointTs + checkpointTs := c.latestStatus.CheckpointTs c.resolvedTs = checkpointTs - minTableBarrierTs := c.state.Status.MinTableBarrierTs + minTableBarrierTs := c.latestStatus.MinTableBarrierTs failpoint.Inject("NewChangefeedNoRetryError", func() { failpoint.Return(cerror.ErrStartTsBeforeGC.GenWithStackByArgs(checkpointTs-300, checkpointTs)) @@ -462,7 +491,7 @@ LOOP2: failpoint.Return(errors.New("failpoint injected retriable error")) }) - if c.state.Info.Config.CheckGCSafePoint { + if c.latestInfo.Config.CheckGCSafePoint { // Check TiDB GC safepoint does not exceed the checkpoint. // // We update TTL to 10 minutes, @@ -516,19 +545,19 @@ LOOP2: } c.barriers = newBarriers() - if util.GetOrZero(c.state.Info.Config.EnableSyncPoint) { + if util.GetOrZero(c.latestInfo.Config.EnableSyncPoint) { c.barriers.Update(syncPointBarrier, c.resolvedTs) } - c.barriers.Update(finishBarrier, c.state.Info.GetTargetTs()) + c.barriers.Update(finishBarrier, c.latestInfo.GetTargetTs()) - filter, err := filter.NewFilter(c.state.Info.Config, "") + filter, err := filter.NewFilter(c.latestInfo.Config, "") if err != nil { return errors.Trace(err) } c.schema, err = newSchemaWrap4Owner( c.upstream.KVStorage, ddlStartTs, - c.state.Info.Config, + c.latestInfo.Config, c.id, filter) if err != nil { @@ -542,14 +571,14 @@ LOOP2: if err != nil { return errors.Trace(err) } - c.state.Info.Config.Sink.TiDBSourceID = sourceID + c.latestInfo.Config.Sink.TiDBSourceID = sourceID log.Info("set source id", zap.Uint64("sourceID", sourceID), zap.String("namespace", c.id.Namespace), zap.String("changefeed", c.id.ID), ) - c.ddlSink = c.newSink(c.id, c.state.Info, ctx.Throw, func(err error) { + c.ddlSink = c.newSink(c.id, c.latestInfo, ctx.Throw, func(err error) { select { case <-ctx.Done(): case c.warningCh <- err: @@ -558,7 +587,7 @@ LOOP2: c.ddlSink.run(cancelCtx) c.ddlPuller, err = c.newDDLPuller(cancelCtx, - c.state.Info.Config, + c.latestInfo.Config, c.upstream, ddlStartTs, c.id, c.schema, @@ -573,13 +602,13 @@ LOOP2: ctx.Throw(c.ddlPuller.Run(cancelCtx)) }() - c.downstreamObserver, err = c.newDownstreamObserver(ctx, c.id, c.state.Info.SinkURI, c.state.Info.Config) + c.downstreamObserver, err = c.newDownstreamObserver(ctx, c.id, c.latestInfo.SinkURI, c.latestInfo.Config) if err != nil { return err } c.observerLastTick = atomic.NewTime(time.Time{}) - c.redoDDLMgr, err = redo.NewDDLManager(cancelCtx, c.id, c.state.Info.Config.Consistent, ddlStartTs) + c.redoDDLMgr, err = redo.NewDDLManager(cancelCtx, c.id, c.latestInfo.Config.Consistent, ddlStartTs) failpoint.Inject("ChangefeedNewRedoManagerError", func() { err = errors.New("changefeed new redo manager injected error") }) @@ -596,7 +625,7 @@ LOOP2: c.redoMetaMgr, err = redo.NewMetaManagerWithInit(cancelCtx, c.id, - c.state.Info.Config.Consistent, checkpointTs) + c.latestInfo.Config.Consistent, checkpointTs) if err != nil { return err } @@ -612,7 +641,7 @@ LOOP2: zap.String("namespace", c.id.Namespace), zap.String("changefeed", c.id.ID)) - downstreamType, err := c.state.Info.DownstreamType() + downstreamType, err := c.latestInfo.DownstreamType() if err != nil { return errors.Trace(err) } @@ -620,20 +649,20 @@ LOOP2: c.ddlManager = newDDLManager( c.id, ddlStartTs, - c.state.Status.CheckpointTs, + c.latestStatus.CheckpointTs, c.ddlSink, c.ddlPuller, c.schema, c.redoDDLMgr, c.redoMetaMgr, downstreamType, - util.GetOrZero(c.state.Info.Config.BDRMode), + util.GetOrZero(c.latestInfo.Config.BDRMode), ) // create scheduler cfg := *c.cfg - cfg.ChangefeedSettings = c.state.Info.Config.Scheduler - epoch := c.state.Info.Epoch + cfg.ChangefeedSettings = c.latestInfo.Config.Scheduler + epoch := c.latestInfo.Epoch c.scheduler, err = c.newScheduler(ctx, c.upstream, epoch, &cfg, c.redoMetaMgr) if err != nil { return errors.Trace(err) @@ -643,12 +672,12 @@ LOOP2: c.initialized = true log.Info("changefeed initialized", - zap.String("namespace", c.state.ID.Namespace), - zap.String("changefeed", c.state.ID.ID), + zap.String("namespace", c.id.Namespace), + zap.String("changefeed", c.id.ID), zap.Uint64("changefeedEpoch", epoch), zap.Uint64("checkpointTs", checkpointTs), zap.Uint64("resolvedTs", c.resolvedTs), - zap.String("info", c.state.Info.String())) + zap.String("info", c.latestInfo.String())) return nil } @@ -724,8 +753,6 @@ func (c *changefeed) releaseResources(ctx cdcContext.Context) { log.Info("changefeed closed", zap.String("namespace", c.id.Namespace), zap.String("changefeed", c.id.ID), - zap.Any("status", c.state.Status), - zap.String("info", c.state.Info.String()), zap.Bool("isRemoved", c.isRemoved)) } @@ -759,18 +786,19 @@ func (c *changefeed) cleanupMetrics() { // cleanup redo logs if changefeed is removed and redo log is enabled func (c *changefeed) cleanupRedoManager(ctx context.Context) { + cfInfo := c.latestInfo if c.isRemoved { - if c.state == nil || c.state.Info == nil || c.state.Info.Config == nil || - c.state.Info.Config.Consistent == nil { - log.Warn("changefeed is removed, but state is not complete", zap.Any("state", c.state)) + if cfInfo == nil || cfInfo.Config == nil || + cfInfo.Config.Consistent == nil { + log.Warn("changefeed is removed, but state is not complete", zap.Any("info", cfInfo)) return } - if !redoCfg.IsConsistentEnabled(c.state.Info.Config.Consistent.Level) { + if !redoCfg.IsConsistentEnabled(cfInfo.Config.Consistent.Level) { return } // when removing a paused changefeed, the redo manager is nil, create a new one if c.redoMetaMgr == nil { - redoMetaMgr, err := redo.NewMetaManager(ctx, c.id, c.state.Info.Config.Consistent) + redoMetaMgr, err := redo.NewMetaManager(ctx, c.id, cfInfo.Config.Consistent) if err != nil { log.Info("owner creates redo manager for clean fail", zap.String("namespace", c.id.Namespace), @@ -813,79 +841,26 @@ func (c *changefeed) cleanupChangefeedServiceGCSafePoints(ctx cdcContext.Context } } -// preflightCheck makes sure that the metadata in Etcd is complete enough to run the tick. -// If the metadata is not complete, such as when the ChangeFeedStatus is nil, -// this function will reconstruct the lost metadata and skip this tick. -func (c *changefeed) preflightCheck(captures map[model.CaptureID]*model.CaptureInfo) (ok bool) { - ok = true - if c.state.Status == nil { - // complete the changefeed status when it is just created. - c.state.PatchStatus( - func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { - if status == nil { - status = &model.ChangeFeedStatus{ - // changefeed status is nil when the changefeed has just created. - CheckpointTs: c.state.Info.StartTs, - MinTableBarrierTs: c.state.Info.StartTs, - AdminJobType: model.AdminNone, - } - return status, true, nil - } - return status, false, nil - }) - ok = false - } else if c.state.Status.MinTableBarrierTs == 0 { - // complete the changefeed status when the TiCDC cluster is - // upgraded from an old version(less than v6.7.0). - c.state.PatchStatus( - func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { - if status != nil { - if status.MinTableBarrierTs == 0 { - status.MinTableBarrierTs = status.CheckpointTs - } - return status, true, nil - } - return status, false, nil - }) - ok = false - } - - // clean stale capture task positions - for captureID := range c.state.TaskPositions { - if _, exist := captures[captureID]; !exist { - c.state.PatchTaskPosition(captureID, func(position *model.TaskPosition) (*model.TaskPosition, bool, error) { - return nil, position != nil, nil - }) - ok = false - } - } - if !ok { - log.Info("changefeed preflight check failed, will skip this tick", - zap.String("namespace", c.id.Namespace), - zap.String("changefeed", c.id.ID), - zap.Any("status", c.state.Status), zap.Bool("ok", ok), - ) - } - - return -} - // handleBarrier calculates the barrierTs of the changefeed. // barrierTs is used to control the data that can be flush to downstream. -func (c *changefeed) handleBarrier(ctx cdcContext.Context, barrier *schedulepb.BarrierWithMinTs) error { +func (c *changefeed) handleBarrier(ctx cdcContext.Context, + cfInfo *model.ChangeFeedInfo, + cfStatus *model.ChangeFeedStatus, + barrier *schedulepb.BarrierWithMinTs, +) error { barrierTp, barrierTs := c.barriers.Min() c.metricsChangefeedBarrierTsGauge.Set(float64(oracle.ExtractPhysical(barrierTs))) // It means: // 1. All data before the barrierTs was sent to downstream. // 2. No more data after barrierTs was sent to downstream. - checkpointReachBarrier := barrierTs == c.state.Status.CheckpointTs + checkpointReachBarrier := barrierTs == cfStatus.CheckpointTs if checkpointReachBarrier { switch barrierTp { case syncPointBarrier: nextSyncPointTs := oracle.GoTimeToTS( oracle.GetTimeFromTS(barrierTs). - Add(util.GetOrZero(c.state.Info.Config.SyncPointInterval)), + Add(util.GetOrZero(cfInfo.Config.SyncPointInterval)), ) if err := c.ddlSink.emitSyncPoint(ctx, barrierTs); err != nil { return errors.Trace(err) @@ -937,25 +912,6 @@ func (c *changefeed) updateMetrics(currentTs int64, checkpointTs, resolvedTs mod c.metricsCurrentPDTsGauge.Set(float64(currentTs)) } -func (c *changefeed) updateStatus(checkpointTs, minTableBarrierTs model.Ts) { - c.state.PatchStatus( - func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { - changed := false - if status == nil { - return nil, changed, nil - } - if status.CheckpointTs != checkpointTs { - status.CheckpointTs = checkpointTs - changed = true - } - if status.MinTableBarrierTs != minTableBarrierTs { - status.MinTableBarrierTs = minTableBarrierTs - changed = true - } - return status, changed, nil - }) -} - func (c *changefeed) Close(ctx cdcContext.Context) { startTime := time.Now() c.releaseResources(ctx) diff --git a/cdc/owner/changefeed_test.go b/cdc/owner/changefeed_test.go index b184d306775..ffb4725f0c5 100644 --- a/cdc/owner/changefeed_test.go +++ b/cdc/owner/changefeed_test.go @@ -186,7 +186,7 @@ func (m *mockScheduler) Close(ctx context.Context) {} func createChangefeed4Test(ctx cdcContext.Context, t *testing.T, ) ( - *changefeed, map[model.CaptureID]*model.CaptureInfo, *orchestrator.ReactorStateTester, + *changefeed, map[model.CaptureID]*model.CaptureInfo, *orchestrator.ReactorStateTester, *orchestrator.ChangefeedReactorState, ) { up := upstream.NewUpstream4Test(&gc.MockPDClient{ UpdateServiceGCSafePointFunc: func(ctx context.Context, serviceID string, ttl int64, safePoint uint64) (uint64, error) { @@ -203,7 +203,8 @@ func createChangefeed4Test(ctx cdcContext.Context, t *testing.T, return info, true, nil }) tester.MustApplyPatches() - cf := newChangefeed4Test(ctx.ChangefeedVars().ID, state, up, + cf := newChangefeed4Test(ctx.ChangefeedVars().ID, + state.Info, state.Status, newFeedStateManager(up, state), up, // new ddl puller func(ctx context.Context, replicaConfig *config.ReplicaConfig, @@ -246,62 +247,66 @@ func createChangefeed4Test(ctx cdcContext.Context, t *testing.T, []byte(`{"id":"`+ctx.GlobalVars().CaptureInfo.ID+`","address":"127.0.0.1:8300"}`)) tester.MustApplyPatches() captures := map[model.CaptureID]*model.CaptureInfo{ctx.GlobalVars().CaptureInfo.ID: ctx.GlobalVars().CaptureInfo} - return cf, captures, tester + return cf, captures, tester, state } func TestPreCheck(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - cf, captures, tester := createChangefeed4Test(ctx, t) - cf.Tick(ctx, captures) + _, captures, tester, state := createChangefeed4Test(ctx, t) + state.CheckCaptureAlive(ctx.GlobalVars().CaptureInfo.ID) + preflightCheck(state, captures) tester.MustApplyPatches() - require.NotNil(t, cf.state.Status) + require.NotNil(t, state.Status) // test clean the meta data of offline capture offlineCaputreID := "offline-capture" - cf.state.PatchTaskPosition(offlineCaputreID, func(position *model.TaskPosition) (*model.TaskPosition, bool, error) { + state.PatchTaskPosition(offlineCaputreID, func(position *model.TaskPosition) (*model.TaskPosition, bool, error) { return new(model.TaskPosition), true, nil }) tester.MustApplyPatches() - cf.Tick(ctx, captures) + state.CheckCaptureAlive(ctx.GlobalVars().CaptureInfo.ID) + require.False(t, preflightCheck(state, captures)) tester.MustApplyPatches() - require.NotNil(t, cf.state.Status) - require.NotContains(t, cf.state.TaskPositions, offlineCaputreID) + require.NotNil(t, state.Status) + require.NotContains(t, state.TaskPositions, offlineCaputreID) } func TestInitialize(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - cf, captures, tester := createChangefeed4Test(ctx, t) + cf, captures, tester, state := createChangefeed4Test(ctx, t) defer cf.Close(ctx) // pre check - cf.Tick(ctx, captures) + state.CheckCaptureAlive(ctx.GlobalVars().CaptureInfo.ID) + require.False(t, preflightCheck(state, captures)) tester.MustApplyPatches() // initialize ctx.GlobalVars().EtcdClient = &etcd.CDCEtcdClientImpl{} - cf.Tick(ctx, captures) + cf.Tick(ctx, state.Info, state.Status, captures) tester.MustApplyPatches() - require.Equal(t, cf.state.Status.CheckpointTs, ctx.ChangefeedVars().Info.StartTs) + require.Equal(t, state.Status.CheckpointTs, ctx.ChangefeedVars().Info.StartTs) } func TestChangefeedHandleError(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - cf, captures, tester := createChangefeed4Test(ctx, t) + cf, captures, tester, state := createChangefeed4Test(ctx, t) defer cf.Close(ctx) // pre check - cf.Tick(ctx, captures) + state.CheckCaptureAlive(ctx.GlobalVars().CaptureInfo.ID) + require.False(t, preflightCheck(state, captures)) tester.MustApplyPatches() // initialize - cf.Tick(ctx, captures) + cf.Tick(ctx, state.Info, state.Status, captures) tester.MustApplyPatches() cf.errCh <- errors.New("fake error") // handle error - cf.Tick(ctx, captures) + cf.Tick(ctx, state.Info, state.Status, captures) tester.MustApplyPatches() - require.Equal(t, cf.state.Status.CheckpointTs, ctx.ChangefeedVars().Info.StartTs) - require.Equal(t, cf.state.Info.Error.Message, "fake error") + require.Equal(t, state.Status.CheckpointTs, ctx.ChangefeedVars().Info.StartTs) + require.Equal(t, state.Info.Error.Message, "fake error") } func TestExecDDL(t *testing.T) { @@ -316,19 +321,22 @@ func TestExecDDL(t *testing.T) { ctx := cdcContext.NewContext4Test(context.Background(), true) ctx.ChangefeedVars().Info.StartTs = startTs - cf, captures, tester := createChangefeed4Test(ctx, t) + cf, captures, tester, state := createChangefeed4Test(ctx, t) cf.upstream.KVStorage = helper.Storage() defer cf.Close(ctx) - tickThreeTime := func() { - cf.Tick(ctx, captures) - tester.MustApplyPatches() - cf.Tick(ctx, captures) + tickTwoTime := func() { + checkpointTs, minTableBarrierTs := cf.Tick(ctx, state.Info, state.Status, captures) + updateStatus(state, checkpointTs, minTableBarrierTs) tester.MustApplyPatches() - cf.Tick(ctx, captures) + checkpointTs, minTableBarrierTs = cf.Tick(ctx, state.Info, state.Status, captures) + updateStatus(state, checkpointTs, minTableBarrierTs) tester.MustApplyPatches() } // pre check and initialize - tickThreeTime() + state.CheckCaptureAlive(ctx.GlobalVars().CaptureInfo.ID) + require.False(t, preflightCheck(state, captures)) + tester.MustApplyPatches() + tickTwoTime() tableIDs, err := cf.schema.AllPhysicalTables(ctx, startTs-1) require.Nil(t, err) require.Len(t, tableIDs, 1) @@ -341,8 +349,8 @@ func TestExecDDL(t *testing.T) { job.BinlogInfo.FinishedTS = mockDDLPuller.resolvedTs mockDDLPuller.ddlQueue = append(mockDDLPuller.ddlQueue, job) // three tick to make sure all barrier set in initialize is handled - tickThreeTime() - require.Equal(t, cf.state.Status.CheckpointTs, mockDDLPuller.resolvedTs) + tickTwoTime() + require.Equal(t, state.Status.CheckpointTs, mockDDLPuller.resolvedTs) // The ephemeral table should have left no trace in the schema cache tableIDs, err = cf.schema.AllPhysicalTables(ctx, mockDDLPuller.resolvedTs) require.Nil(t, err) @@ -351,38 +359,38 @@ func TestExecDDL(t *testing.T) { // executing the ddl finished mockDDLSink.ddlDone = true mockDDLPuller.resolvedTs += 1000 - tickThreeTime() - require.Equal(t, mockDDLPuller.resolvedTs, cf.state.Status.CheckpointTs) + tickTwoTime() + require.Equal(t, mockDDLPuller.resolvedTs, state.Status.CheckpointTs) // handle create database job = helper.DDL2Job("create database test1") mockDDLPuller.resolvedTs += 1000 job.BinlogInfo.FinishedTS = mockDDLPuller.resolvedTs mockDDLPuller.ddlQueue = append(mockDDLPuller.ddlQueue, job) - tickThreeTime() - require.Equal(t, cf.state.Status.CheckpointTs, mockDDLPuller.resolvedTs) + tickTwoTime() + require.Equal(t, state.Status.CheckpointTs, mockDDLPuller.resolvedTs) require.Equal(t, "create database test1", mockDDLSink.ddlExecuting.Query) // executing the ddl finished mockDDLSink.ddlDone = true mockDDLPuller.resolvedTs += 1000 - tickThreeTime() - require.Equal(t, cf.state.Status.CheckpointTs, mockDDLPuller.resolvedTs) + tickTwoTime() + require.Equal(t, state.Status.CheckpointTs, mockDDLPuller.resolvedTs) // handle create table job = helper.DDL2Job("create table test1.test1(id int primary key)") mockDDLPuller.resolvedTs += 1000 job.BinlogInfo.FinishedTS = mockDDLPuller.resolvedTs mockDDLPuller.ddlQueue = append(mockDDLPuller.ddlQueue, job) - tickThreeTime() + tickTwoTime() - require.Equal(t, cf.state.Status.CheckpointTs, mockDDLPuller.resolvedTs) + require.Equal(t, state.Status.CheckpointTs, mockDDLPuller.resolvedTs) require.Equal(t, "create table test1.test1(id int primary key)", mockDDLSink.ddlExecuting.Query) // executing the ddl finished mockDDLSink.ddlDone = true mockDDLPuller.resolvedTs += 1000 - tickThreeTime() + tickTwoTime() require.Contains(t, cf.scheduler.(*mockScheduler).currentTables, job.TableID) } @@ -398,19 +406,25 @@ func TestEmitCheckpointTs(t *testing.T) { ctx := cdcContext.NewContext4Test(context.Background(), true) ctx.ChangefeedVars().Info.StartTs = startTs - cf, captures, tester := createChangefeed4Test(ctx, t) + cf, captures, tester, state := createChangefeed4Test(ctx, t) cf.upstream.KVStorage = helper.Storage() defer cf.Close(ctx) tickThreeTime := func() { - cf.Tick(ctx, captures) + checkpointTs, minTableBarrierTs := cf.Tick(ctx, state.Info, state.Status, captures) + updateStatus(state, checkpointTs, minTableBarrierTs) tester.MustApplyPatches() - cf.Tick(ctx, captures) + checkpointTs, minTableBarrierTs = cf.Tick(ctx, state.Info, state.Status, captures) + updateStatus(state, checkpointTs, minTableBarrierTs) tester.MustApplyPatches() - cf.Tick(ctx, captures) + checkpointTs, minTableBarrierTs = cf.Tick(ctx, state.Info, state.Status, captures) + updateStatus(state, checkpointTs, minTableBarrierTs) tester.MustApplyPatches() } // pre check and initialize + state.CheckCaptureAlive(ctx.GlobalVars().CaptureInfo.ID) + require.False(t, preflightCheck(state, captures)) + tester.MustApplyPatches() tickThreeTime() mockDDLSink := cf.ddlManager.ddlSink.(*mockDDLSink) @@ -427,12 +441,12 @@ func TestEmitCheckpointTs(t *testing.T) { mockDDLPuller := cf.ddlManager.ddlPuller.(*mockDDLPuller) mockDDLPuller.resolvedTs = startTs + 1000 cf.ddlManager.schema.AdvanceResolvedTs(mockDDLPuller.resolvedTs) - cf.state.Status.CheckpointTs = mockDDLPuller.resolvedTs + state.Status.CheckpointTs = mockDDLPuller.resolvedTs job.BinlogInfo.FinishedTS = mockDDLPuller.resolvedTs mockDDLPuller.ddlQueue = append(mockDDLPuller.ddlQueue, job) // three tick to make sure all barrier set in initialize is handled tickThreeTime() - require.Equal(t, cf.state.Status.CheckpointTs, mockDDLPuller.resolvedTs) + require.Equal(t, state.Status.CheckpointTs, mockDDLPuller.resolvedTs) tables, err = cf.ddlManager.allTables(ctx) require.Nil(t, err) // The ephemeral table should only be deleted after the ddl is executed. @@ -446,7 +460,7 @@ func TestEmitCheckpointTs(t *testing.T) { mockDDLSink.ddlDone = true mockDDLPuller.resolvedTs += 2000 tickThreeTime() - require.Equal(t, cf.state.Status.CheckpointTs, mockDDLPuller.resolvedTs) + require.Equal(t, state.Status.CheckpointTs, mockDDLPuller.resolvedTs) ts, names = mockDDLSink.getCheckpointTsAndTableNames() require.Equal(t, ts, mockDDLPuller.resolvedTs) require.Len(t, names, 0) @@ -458,15 +472,16 @@ func TestSyncPoint(t *testing.T) { ctx.ChangefeedVars().Info.Config.SyncPointInterval = util.AddressOf(1 * time.Second) // SyncPoint option is only available for MySQL compatible database. ctx.ChangefeedVars().Info.SinkURI = "mysql://" - cf, captures, tester := createChangefeed4Test(ctx, t) + cf, captures, tester, state := createChangefeed4Test(ctx, t) defer cf.Close(ctx) // pre check - cf.Tick(ctx, captures) + state.CheckCaptureAlive(ctx.GlobalVars().CaptureInfo.ID) + require.False(t, preflightCheck(state, captures)) tester.MustApplyPatches() // initialize - cf.Tick(ctx, captures) + cf.Tick(ctx, state.Info, state.Status, captures) tester.MustApplyPatches() mockDDLPuller := cf.ddlManager.ddlPuller.(*mockDDLPuller) @@ -475,7 +490,8 @@ func TestSyncPoint(t *testing.T) { mockDDLPuller.resolvedTs = oracle.GoTimeToTS(oracle.GetTimeFromTS(mockDDLPuller.resolvedTs).Add(5 * time.Second)) // tick 20 times for i := 0; i <= 20; i++ { - cf.Tick(ctx, captures) + checkpointTs, minTableBarrierTs := cf.Tick(ctx, state.Info, state.Status, captures) + updateStatus(state, checkpointTs, minTableBarrierTs) tester.MustApplyPatches() } for i := 1; i < len(mockDDLSink.syncPointHis); i++ { @@ -488,28 +504,30 @@ func TestSyncPoint(t *testing.T) { func TestFinished(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) ctx.ChangefeedVars().Info.TargetTs = ctx.ChangefeedVars().Info.StartTs + 1000 - cf, captures, tester := createChangefeed4Test(ctx, t) + cf, captures, tester, state := createChangefeed4Test(ctx, t) defer cf.Close(ctx) // pre check - cf.Tick(ctx, captures) + state.CheckCaptureAlive(ctx.GlobalVars().CaptureInfo.ID) + require.False(t, preflightCheck(state, captures)) tester.MustApplyPatches() // initialize - cf.Tick(ctx, captures) + cf.Tick(ctx, state.Info, state.Status, captures) tester.MustApplyPatches() mockDDLPuller := cf.ddlManager.ddlPuller.(*mockDDLPuller) mockDDLPuller.resolvedTs += 2000 // tick many times to make sure the change feed is stopped for i := 0; i <= 10; i++ { - cf.Tick(ctx, captures) + checkpointTs, minTableBarrierTs := cf.Tick(ctx, state.Info, state.Status, captures) + updateStatus(state, checkpointTs, minTableBarrierTs) tester.MustApplyPatches() } - fmt.Println("checkpoint ts", cf.state.Status.CheckpointTs) - fmt.Println("target ts", cf.state.Info.TargetTs) - require.Equal(t, cf.state.Status.CheckpointTs, cf.state.Info.TargetTs) - require.Equal(t, cf.state.Info.State, model.StateFinished) + fmt.Println("checkpoint ts", state.Status.CheckpointTs) + fmt.Println("target ts", state.Info.TargetTs) + require.Equal(t, state.Status.CheckpointTs, state.Info.TargetTs) + require.Equal(t, state.Info.State, model.StateFinished) } func TestRemoveChangefeed(t *testing.T) { @@ -558,14 +576,16 @@ func testChangefeedReleaseResource( redoLogDir string, expectedInitialized bool, ) { - cf, captures, tester := createChangefeed4Test(ctx, t) + var err error + cf, captures, tester, state := createChangefeed4Test(ctx, t) // pre check - cf.Tick(ctx, captures) + state.CheckCaptureAlive(ctx.GlobalVars().CaptureInfo.ID) + require.False(t, preflightCheck(state, captures)) tester.MustApplyPatches() // initialize - cf.Tick(ctx, captures) + cf.Tick(ctx, state.Info, state.Status, captures) tester.MustApplyPatches() require.Equal(t, cf.initialized, expectedInitialized) @@ -576,11 +596,11 @@ func testChangefeedReleaseResource( }) cf.isReleased = false // changefeed tick will release resources - err := cf.tick(ctx, captures) + cf.Tick(ctx, state.Info, state.Status, captures) require.Nil(t, err) cancel() - if cf.state.Info.Config.Consistent.UseFileBackend { + if state.Info.Config.Consistent.UseFileBackend { // check redo log dir is deleted _, err = os.Stat(redoLogDir) require.True(t, os.IsNotExist(err)) @@ -600,59 +620,59 @@ func TestBarrierAdvance(t *testing.T) { } ctx.ChangefeedVars().Info.SinkURI = "mysql://" - cf, captures, tester := createChangefeed4Test(ctx, t) + cf, captures, tester, state := createChangefeed4Test(ctx, t) defer cf.Close(ctx) // The changefeed load the info from etcd. - cf.state.Status = &model.ChangeFeedStatus{ - CheckpointTs: cf.state.Info.StartTs, - MinTableBarrierTs: cf.state.Info.StartTs + 5, + state.Status = &model.ChangeFeedStatus{ + CheckpointTs: state.Info.StartTs, + MinTableBarrierTs: state.Info.StartTs + 5, } // Do the preflightCheck and initialize the changefeed. - cf.Tick(ctx, captures) + cf.Tick(ctx, state.Info, state.Status, captures) tester.MustApplyPatches() if i == 1 { cf.ddlManager.ddlResolvedTs += 10 } - _, barrier, err := cf.ddlManager.tick(ctx, cf.state.Status.CheckpointTs, nil) + _, barrier, err := cf.ddlManager.tick(ctx, state.Status.CheckpointTs, nil) require.Nil(t, err) - err = cf.handleBarrier(ctx, barrier) + err = cf.handleBarrier(ctx, state.Info, state.Status, barrier) require.Nil(t, err) if i == 0 { - require.Equal(t, cf.state.Info.StartTs, barrier.GlobalBarrierTs) + require.Equal(t, state.Info.StartTs, barrier.GlobalBarrierTs) } // sync-point is enabled, sync point barrier is ticked if i == 1 { - require.Equal(t, cf.state.Info.StartTs+10, barrier.GlobalBarrierTs) + require.Equal(t, state.Info.StartTs+10, barrier.GlobalBarrierTs) } // Suppose tableCheckpoint has been advanced. - cf.state.Status.CheckpointTs += 10 + state.Status.CheckpointTs += 10 // Need more 1 tick to advance barrier if sync-point is enabled. if i == 1 { - err = cf.handleBarrier(ctx, barrier) + err = cf.handleBarrier(ctx, state.Info, state.Status, barrier) require.Nil(t, err) - require.Equal(t, cf.state.Info.StartTs+10, barrier.GlobalBarrierTs) + require.Equal(t, state.Info.StartTs+10, barrier.GlobalBarrierTs) // Then the last tick barrier must be advanced correctly. cf.ddlManager.ddlResolvedTs += 1000000000000 - _, barrier, err = cf.ddlManager.tick(ctx, cf.state.Status.CheckpointTs+10, nil) + _, barrier, err = cf.ddlManager.tick(ctx, state.Status.CheckpointTs+10, nil) require.Nil(t, err) - err = cf.handleBarrier(ctx, barrier) + err = cf.handleBarrier(ctx, state.Info, state.Status, barrier) nextSyncPointTs := oracle.GoTimeToTS( - oracle.GetTimeFromTS(cf.state.Status.CheckpointTs + 10). + oracle.GetTimeFromTS(state.Status.CheckpointTs + 10). Add(util.GetOrZero(ctx.ChangefeedVars().Info.Config.SyncPointInterval)), ) require.Nil(t, err) require.Equal(t, nextSyncPointTs, barrier.GlobalBarrierTs) - require.Less(t, cf.state.Status.CheckpointTs+10, barrier.GlobalBarrierTs) + require.Less(t, state.Status.CheckpointTs+10, barrier.GlobalBarrierTs) require.Less(t, barrier.GlobalBarrierTs, cf.ddlManager.ddlResolvedTs) } } diff --git a/cdc/owner/feed_state_manager.go b/cdc/owner/feed_state_manager.go index db4a0a32988..3d4bcbc62bb 100644 --- a/cdc/owner/feed_state_manager.go +++ b/cdc/owner/feed_state_manager.go @@ -21,7 +21,6 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/tiflow/cdc/model" - "github.com/pingcap/tiflow/pkg/config" cerrors "github.com/pingcap/tiflow/pkg/errors" "github.com/pingcap/tiflow/pkg/orchestrator" "github.com/pingcap/tiflow/pkg/upstream" @@ -41,6 +40,30 @@ const ( defaultBackoffMultiplier = 2.0 ) +// FeedStateManager manages the life cycle of a changefeed, currently it is responsible for: +// 1. Handle admin jobs +// 2. Handle errors +// 3. Handle warnings +// 4. Control the status of a changefeed +type FeedStateManager interface { + // PushAdminJob pushed an admin job to the admin job queue + PushAdminJob(job *model.AdminJob) + // Tick is the main logic of the FeedStateManager, it will be called periodically + // resolvedTs is the resolvedTs of the changefeed + // returns true if there is a pending admin job, if so changefeed should not run the tick logic + Tick(resolvedTs model.Ts, status *model.ChangeFeedStatus, info *model.ChangeFeedInfo) (adminJobPending bool) + // HandleError is called an error occurs in Changefeed.Tick + HandleError(errs ...*model.RunningError) + // HandleWarning is called a warning occurs in Changefeed.Tick + HandleWarning(warnings ...*model.RunningError) + // ShouldRunning returns if the changefeed should be running + ShouldRunning() bool + // ShouldRemoved returns if the changefeed should be removed + ShouldRemoved() bool + // MarkFinished is call when a changefeed is finished + MarkFinished() +} + // feedStateManager manages the ReactorState of a changefeed // when an error or an admin job occurs, the feedStateManager is responsible for controlling the ReactorState type feedStateManager struct { @@ -71,9 +94,12 @@ type feedStateManager struct { } // newFeedStateManager creates feedStateManager and initialize the exponential backoff -func newFeedStateManager(up *upstream.Upstream, cfg *config.ReplicaConfig) *feedStateManager { +func newFeedStateManager(up *upstream.Upstream, + state *orchestrator.ChangefeedReactorState, +) *feedStateManager { m := new(feedStateManager) m.upstream = up + m.state = state m.errBackoff = backoff.NewExponentialBackOff() m.errBackoff.InitialInterval = defaultBackoffInitInterval @@ -81,8 +107,8 @@ func newFeedStateManager(up *upstream.Upstream, cfg *config.ReplicaConfig) *feed m.errBackoff.Multiplier = defaultBackoffMultiplier m.errBackoff.RandomizationFactor = defaultBackoffRandomizationFactor // backoff will stop once the defaultBackoffMaxElapsedTime has elapsed. - m.errBackoff.MaxElapsedTime = *cfg.ChangefeedErrorStuckDuration - m.changefeedErrorStuckDuration = *cfg.ChangefeedErrorStuckDuration + m.errBackoff.MaxElapsedTime = *state.Info.Config.ChangefeedErrorStuckDuration + m.changefeedErrorStuckDuration = *state.Info.Config.ChangefeedErrorStuckDuration m.resetErrRetry() m.isRetrying = false @@ -114,15 +140,14 @@ func (m *feedStateManager) resetErrRetry() { m.lastErrorRetryTime = time.Unix(0, 0) } -func (m *feedStateManager) Tick( - state *orchestrator.ChangefeedReactorState, - resolvedTs model.Ts, +func (m *feedStateManager) Tick(resolvedTs model.Ts, + status *model.ChangeFeedStatus, info *model.ChangeFeedInfo, ) (adminJobPending bool) { - m.checkAndInitLastRetryCheckpointTs(state.Status) + m.checkAndInitLastRetryCheckpointTs(status) - if state.Status != nil { - if m.checkpointTs < state.Status.CheckpointTs { - m.checkpointTs = state.Status.CheckpointTs + if status != nil { + if m.checkpointTs < status.CheckpointTs { + m.checkpointTs = status.CheckpointTs m.checkpointTsAdvanced = time.Now() } if m.resolvedTs < resolvedTs { @@ -132,7 +157,7 @@ func (m *feedStateManager) Tick( m.checkpointTsAdvanced = time.Now() } } - m.state = state + m.shouldBeRunning = true defer func() { if !m.shouldBeRunning { @@ -147,7 +172,7 @@ func (m *feedStateManager) Tick( return } - switch m.state.Info.State { + switch info.State { case model.StateUnInitialized: m.patchState(model.StateNormal) return @@ -180,7 +205,7 @@ func (m *feedStateManager) Tick( // retry the changefeed m.shouldBeRunning = true - if m.state.Status != nil { + if status != nil { m.lastErrorRetryCheckpointTs = m.state.Status.CheckpointTs } m.patchState(model.StateWarning) @@ -193,14 +218,14 @@ func (m *feedStateManager) Tick( case model.StateNormal, model.StateWarning: m.checkAndChangeState() errs := m.errorsReportedByProcessors() - m.handleError(errs...) + m.HandleError(errs...) // only handle warnings when there are no errors // otherwise, the warnings will cover the errors if len(errs) == 0 { // warning are come from processors' sink component // they ere not fatal errors, so we don't need to stop the changefeed warnings := m.warningsReportedByProcessors() - m.handleWarning(warnings...) + m.HandleWarning(warnings...) } } return @@ -506,7 +531,7 @@ func (m *feedStateManager) warningsReportedByProcessors() []*model.RunningError return result } -func (m *feedStateManager) handleError(errs ...*model.RunningError) { +func (m *feedStateManager) HandleError(errs ...*model.RunningError) { if len(errs) == 0 { return } @@ -572,7 +597,7 @@ func (m *feedStateManager) handleError(errs ...*model.RunningError) { } } -func (m *feedStateManager) handleWarning(errs ...*model.RunningError) { +func (m *feedStateManager) HandleWarning(errs ...*model.RunningError) { if len(errs) == 0 { return } @@ -591,7 +616,7 @@ func (m *feedStateManager) handleWarning(errs ...*model.RunningError) { zap.Duration("checkpointTime", currTime.Sub(ckptTime)), ) code, _ := cerrors.RFCCode(cerrors.ErrChangefeedUnretryable) - m.handleError(&model.RunningError{ + m.HandleError(&model.RunningError{ Time: lastError.Time, Addr: lastError.Addr, Code: string(code), diff --git a/cdc/owner/feed_state_manager_test.go b/cdc/owner/feed_state_manager_test.go index e81ae7584ac..0ae8e512ac5 100644 --- a/cdc/owner/feed_state_manager_test.go +++ b/cdc/owner/feed_state_manager_test.go @@ -74,6 +74,7 @@ func TestHandleJob(t *testing.T) { manager := newFeedStateManager4Test(200, 1600, 0, 2.0) state := orchestrator.NewChangefeedReactorState(etcd.DefaultCDCClusterID, ctx.ChangefeedVars().ID) + manager.state = state tester := orchestrator.NewReactorStateTester(t, state, nil) state.PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { require.Nil(t, info) @@ -84,7 +85,7 @@ func TestHandleJob(t *testing.T) { return &model.ChangeFeedStatus{}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.True(t, manager.ShouldRunning()) @@ -93,7 +94,7 @@ func TestHandleJob(t *testing.T) { CfID: model.DefaultChangeFeedID("fake-changefeed-id"), Type: model.AdminStop, }) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.True(t, manager.ShouldRunning()) @@ -102,7 +103,7 @@ func TestHandleJob(t *testing.T) { CfID: ctx.ChangefeedVars().ID, Type: model.AdminResume, }) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.True(t, manager.ShouldRunning()) @@ -111,7 +112,7 @@ func TestHandleJob(t *testing.T) { CfID: ctx.ChangefeedVars().ID, Type: model.AdminStop, }) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.False(t, manager.ShouldRunning()) @@ -125,7 +126,7 @@ func TestHandleJob(t *testing.T) { CfID: ctx.ChangefeedVars().ID, Type: model.AdminResume, }) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.True(t, manager.ShouldRunning()) require.False(t, manager.ShouldRemoved()) @@ -138,7 +139,7 @@ func TestHandleJob(t *testing.T) { CfID: ctx.ChangefeedVars().ID, Type: model.AdminRemove, }) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.False(t, manager.ShouldRunning()) @@ -161,7 +162,8 @@ func TestResumeChangefeedWithCheckpointTs(t *testing.T) { return &model.ChangeFeedStatus{}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.state = state + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.True(t, manager.ShouldRunning()) @@ -170,7 +172,7 @@ func TestResumeChangefeedWithCheckpointTs(t *testing.T) { CfID: ctx.ChangefeedVars().ID, Type: model.AdminStop, }) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.False(t, manager.ShouldRunning()) @@ -185,7 +187,7 @@ func TestResumeChangefeedWithCheckpointTs(t *testing.T) { Type: model.AdminResume, OverwriteCheckpointTs: 100, }) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.True(t, manager.ShouldRunning()) require.False(t, manager.ShouldRemoved()) @@ -203,7 +205,7 @@ func TestResumeChangefeedWithCheckpointTs(t *testing.T) { }}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.Equal(t, state.Info.State, model.StateFailed) require.Equal(t, state.Info.AdminJobType, model.AdminStop) @@ -216,7 +218,7 @@ func TestResumeChangefeedWithCheckpointTs(t *testing.T) { Type: model.AdminResume, OverwriteCheckpointTs: 200, }) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.True(t, manager.ShouldRunning()) require.False(t, manager.ShouldRemoved()) @@ -241,12 +243,13 @@ func TestMarkFinished(t *testing.T) { return &model.ChangeFeedStatus{}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.state = state + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.True(t, manager.ShouldRunning()) manager.MarkFinished() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.False(t, manager.ShouldRunning()) @@ -275,12 +278,13 @@ func TestCleanUpInfos(t *testing.T) { }) tester.MustApplyPatches() require.Contains(t, state.TaskPositions, ctx.GlobalVars().CaptureInfo.ID) - manager.Tick(state, 0) + manager.state = state + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.True(t, manager.ShouldRunning()) manager.MarkFinished() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.False(t, manager.ShouldRunning()) require.Equal(t, state.Info.State, model.StateFinished) @@ -307,7 +311,8 @@ func TestHandleError(t *testing.T) { }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.state = state + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() intervals := []time.Duration{200, 400, 800, 1600, 1600} @@ -326,19 +331,19 @@ func TestHandleError(t *testing.T) { }}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.False(t, manager.ShouldRunning()) require.Equal(t, state.Info.State, model.StatePending) require.Equal(t, state.Info.AdminJobType, model.AdminStop) require.Equal(t, state.Status.AdminJobType, model.AdminStop) time.Sleep(d) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() } // no error tick, state should be transferred from pending to warning - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) require.True(t, manager.ShouldRunning()) require.Equal(t, model.StateWarning, state.Info.State) require.Equal(t, model.AdminNone, state.Info.AdminJobType) @@ -352,7 +357,7 @@ func TestHandleError(t *testing.T) { return status, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.True(t, manager.ShouldRunning()) state.PatchStatus( @@ -360,7 +365,7 @@ func TestHandleError(t *testing.T) { status.CheckpointTs += 1 return status, true, nil }) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.Equal(t, model.StateNormal, state.Info.State) require.Equal(t, model.AdminNone, state.Info.AdminJobType) @@ -386,7 +391,8 @@ func TestHandleFastFailError(t *testing.T) { }}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.state = state + manager.Tick(0, state.Status, state.Info) // test handling fast failed error with non-nil ChangeFeedInfo tester.MustApplyPatches() // test handling fast failed error with nil ChangeFeedInfo @@ -394,7 +400,7 @@ func TestHandleFastFailError(t *testing.T) { state.PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { return nil, true, nil }) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) // When the patches are applied, the callback function of PatchInfo in feedStateManager.HandleError will be called. // At that time, the nil pointer will be checked instead of throwing a panic. See issue #3128 for more detail. tester.MustApplyPatches() @@ -413,7 +419,7 @@ func TestHandleErrorWhenChangefeedIsPaused(t *testing.T) { manager.state.Info = &model.ChangeFeedInfo{ State: model.StateStopped, } - manager.handleError(err) + manager.HandleError(err) require.Equal(t, model.StateStopped, manager.state.Info.State) } @@ -474,7 +480,8 @@ func TestChangefeedStatusNotExist(t *testing.T) { etcd.DefaultClusterAndMetaPrefix, ): "d563bfc0-f406-4f34-bc7d-6dc2e35a44e5", }) - manager.Tick(state, 0) + manager.state = state + manager.Tick(0, state.Status, state.Info) require.False(t, manager.ShouldRunning()) require.False(t, manager.ShouldRemoved()) tester.MustApplyPatches() @@ -483,7 +490,7 @@ func TestChangefeedStatusNotExist(t *testing.T) { CfID: ctx.ChangefeedVars().ID, Type: model.AdminRemove, }) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) require.False(t, manager.ShouldRunning()) require.True(t, manager.ShouldRemoved()) tester.MustApplyPatches() @@ -504,7 +511,8 @@ func TestChangefeedNotRetry(t *testing.T) { return &model.ChangeFeedInfo{SinkURI: "123", Config: &config.ReplicaConfig{}, State: model.StateNormal}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.state = state + manager.Tick(0, state.Status, state.Info) require.True(t, manager.ShouldRunning()) // changefeed in error state but error can be retried @@ -522,7 +530,7 @@ func TestChangefeedNotRetry(t *testing.T) { }, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) require.True(t, manager.ShouldRunning()) state.PatchTaskPosition("test", @@ -539,7 +547,7 @@ func TestChangefeedNotRetry(t *testing.T) { return position, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) require.False(t, manager.ShouldRunning()) state.PatchTaskPosition("test", @@ -555,7 +563,7 @@ func TestChangefeedNotRetry(t *testing.T) { return position, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) // should be false require.False(t, manager.ShouldRunning()) @@ -572,7 +580,7 @@ func TestChangefeedNotRetry(t *testing.T) { return position, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) // should be false require.False(t, manager.ShouldRunning()) } @@ -594,7 +602,8 @@ func TestBackoffStopsUnexpectedly(t *testing.T) { }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.state = state + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() for i := 1; i <= 10; i++ { @@ -622,7 +631,7 @@ func TestBackoffStopsUnexpectedly(t *testing.T) { }}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() // If an error occurs, backing off from running the task. require.False(t, manager.ShouldRunning()) @@ -634,7 +643,7 @@ func TestBackoffStopsUnexpectedly(t *testing.T) { // 500ms is the backoff interval, so sleep 500ms and after a manager // tick, the changefeed will turn into normal state time.Sleep(500 * time.Millisecond) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() } } @@ -656,7 +665,8 @@ func TestBackoffNeverStops(t *testing.T) { }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.state = state + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() for i := 1; i <= 30; i++ { @@ -675,7 +685,7 @@ func TestBackoffNeverStops(t *testing.T) { }}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.False(t, manager.ShouldRunning()) require.Equal(t, model.StatePending, state.Info.State) @@ -684,7 +694,7 @@ func TestBackoffNeverStops(t *testing.T) { // 100ms is the backoff interval, so sleep 100ms and after a manager tick, // the changefeed will turn into normal state time.Sleep(100 * time.Millisecond) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() } } @@ -706,7 +716,8 @@ func TestUpdateChangefeedEpoch(t *testing.T) { }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.state = state + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.Equal(t, state.Info.State, model.StateNormal) require.True(t, manager.ShouldRunning()) @@ -726,7 +737,7 @@ func TestUpdateChangefeedEpoch(t *testing.T) { }}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.False(t, manager.ShouldRunning()) require.Equal(t, model.StatePending, state.Info.State, i) @@ -762,7 +773,8 @@ func TestHandleWarning(t *testing.T) { }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.state = state + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.Equal(t, model.StateNormal, state.Info.State) require.True(t, manager.ShouldRunning()) @@ -778,7 +790,7 @@ func TestHandleWarning(t *testing.T) { }}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) // some patches will be generated when the manager.Tick is called // so we need to apply the patches before we check the state tester.MustApplyPatches() @@ -794,7 +806,7 @@ func TestHandleWarning(t *testing.T) { }, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.Equal(t, model.StateWarning, state.Info.State) require.True(t, manager.ShouldRunning()) @@ -808,7 +820,7 @@ func TestHandleWarning(t *testing.T) { }, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.Equal(t, model.StateNormal, state.Info.State) require.True(t, manager.ShouldRunning()) @@ -825,7 +837,7 @@ func TestHandleWarning(t *testing.T) { }}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) // some patches will be generated when the manager.Tick is called // so we need to apply the patches before we check the state tester.MustApplyPatches() @@ -845,7 +857,7 @@ func TestHandleWarning(t *testing.T) { manager.checkpointTsAdvanced = manager. checkpointTsAdvanced.Add(-(manager.changefeedErrorStuckDuration + 1)) // resolveTs = 202 > checkpointTs = 201 - manager.Tick(state, 202) + manager.Tick(202, state.Status, state.Info) // some patches will be generated when the manager.Tick is called // so we need to apply the patches before we check the state tester.MustApplyPatches() @@ -874,7 +886,8 @@ func TestErrorAfterWarning(t *testing.T) { }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.state = state + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.Equal(t, model.StateNormal, state.Info.State) require.True(t, manager.ShouldRunning()) @@ -890,7 +903,7 @@ func TestErrorAfterWarning(t *testing.T) { }}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) // some patches will be generated when the manager.Tick is called // so we need to apply the patches before we check the state tester.MustApplyPatches() @@ -906,7 +919,7 @@ func TestErrorAfterWarning(t *testing.T) { }, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) tester.MustApplyPatches() require.Equal(t, model.StateWarning, state.Info.State) require.True(t, manager.ShouldRunning()) @@ -925,13 +938,13 @@ func TestErrorAfterWarning(t *testing.T) { }) tester.MustApplyPatches() - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) // some patches will be generated when the manager.Tick is called // so we need to apply the patches before we check the state tester.MustApplyPatches() require.Equal(t, model.StatePending, state.Info.State) require.False(t, manager.ShouldRunning()) - manager.Tick(state, 0) + manager.Tick(0, state.Status, state.Info) // some patches will be generated when the manager.Tick is called // so we need to apply the patches before we check the state @@ -948,6 +961,7 @@ func TestHandleWarningWhileAdvanceResolvedTs(t *testing.T) { manager := newFeedStateManager4Test(200, 1600, maxElapsedTimeInMs, 2.0) state := orchestrator.NewChangefeedReactorState(etcd.DefaultCDCClusterID, ctx.ChangefeedVars().ID) + manager.state = state tester := orchestrator.NewReactorStateTester(t, state, nil) state.PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { require.Nil(t, info) @@ -961,7 +975,7 @@ func TestHandleWarningWhileAdvanceResolvedTs(t *testing.T) { }) tester.MustApplyPatches() - manager.Tick(state, 200) + manager.Tick(200, state.Status, state.Info) tester.MustApplyPatches() require.Equal(t, model.StateNormal, state.Info.State) require.True(t, manager.ShouldRunning()) @@ -977,7 +991,7 @@ func TestHandleWarningWhileAdvanceResolvedTs(t *testing.T) { }}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 200) + manager.Tick(200, state.Status, state.Info) // some patches will be generated when the manager.Tick is called // so we need to apply the patches before we check the state tester.MustApplyPatches() @@ -1002,7 +1016,7 @@ func TestHandleWarningWhileAdvanceResolvedTs(t *testing.T) { }}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 200) + manager.Tick(200, state.Status, state.Info) tester.MustApplyPatches() require.Equal(t, model.StateWarning, state.Info.State) require.True(t, manager.ShouldRunning()) @@ -1023,7 +1037,7 @@ func TestHandleWarningWhileAdvanceResolvedTs(t *testing.T) { }}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 400) + manager.Tick(400, state.Status, state.Info) tester.MustApplyPatches() require.Equal(t, model.StateWarning, state.Info.State) require.True(t, manager.ShouldRunning()) @@ -1045,7 +1059,7 @@ func TestHandleWarningWhileAdvanceResolvedTs(t *testing.T) { }}, true, nil }) tester.MustApplyPatches() - manager.Tick(state, 400) + manager.Tick(400, state.Status, state.Info) tester.MustApplyPatches() require.Equal(t, model.StateFailed, state.Info.State) require.False(t, manager.ShouldRunning()) diff --git a/cdc/owner/owner.go b/cdc/owner/owner.go index 9c02435ae2e..de05c6f0aa1 100644 --- a/cdc/owner/owner.go +++ b/cdc/owner/owner.go @@ -123,7 +123,9 @@ type ownerImpl struct { newChangefeed func( id model.ChangeFeedID, - state *orchestrator.ChangefeedReactorState, + cfInfo *model.ChangeFeedInfo, + cfStatus *model.ChangeFeedStatus, + feedStateManager FeedStateManager, up *upstream.Upstream, cfg *config.SchedulerConfig, ) *changefeed @@ -142,7 +144,7 @@ func NewOwner( upstreamManager: upstreamManager, changefeeds: make(map[model.ChangeFeedID]*changefeed), lastTickTime: time.Now(), - newChangefeed: newChangefeed, + newChangefeed: NewChangefeed, logLimiter: rate.NewLimiter(versionInconsistentLogRate, versionInconsistentLogRate), cfg: cfg, etcdClient: etcdClient, @@ -187,13 +189,21 @@ func (o *ownerImpl) Tick(stdCtx context.Context, rawState orchestrator.ReactorSt upstreamInfo := state.Upstreams[changefeedState.Info.UpstreamID] up = o.upstreamManager.AddUpstream(upstreamInfo) } - cfReactor = o.newChangefeed(changefeedID, changefeedState, up, o.cfg) + cfReactor = o.newChangefeed(changefeedID, changefeedState.Info, changefeedState.Status, + newFeedStateManager(up, changefeedState), + up, o.cfg) o.changefeeds[changefeedID] = cfReactor } ctx = cdcContext.WithChangefeedVars(ctx, &cdcContext.ChangefeedVars{ ID: changefeedID, }) - cfReactor.Tick(ctx, o.getChangefeedCaptures(changefeedState, state)) + changefeedState.CheckCaptureAlive(ctx.GlobalVars().CaptureInfo.ID) + captures := o.getChangefeedCaptures(changefeedState, state) + if !preflightCheck(changefeedState, captures) { + continue + } + checkpointTs, minTableBarrierTs := cfReactor.Tick(ctx, changefeedState.Info, changefeedState.Status, captures) + updateStatus(changefeedState, checkpointTs, minTableBarrierTs) } o.changefeedTicked = true @@ -222,6 +232,89 @@ func (o *ownerImpl) Tick(stdCtx context.Context, rawState orchestrator.ReactorSt return state, nil } +// preflightCheck makes sure that the metadata in Etcd is complete enough to run the tick. +// If the metadata is not complete, such as when the ChangeFeedStatus is nil, +// this function will reconstruct the lost metadata and skip this tick. +func preflightCheck(changefeed *orchestrator.ChangefeedReactorState, + captures map[model.CaptureID]*model.CaptureInfo, +) (ok bool) { + ok = true + if changefeed.Status == nil { + // complete the changefeed status when it is just created. + changefeed.PatchStatus( + func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { + if status == nil { + status = &model.ChangeFeedStatus{ + // changefeed status is nil when the changefeed has just created. + CheckpointTs: changefeed.Info.StartTs, + MinTableBarrierTs: changefeed.Info.StartTs, + AdminJobType: model.AdminNone, + } + return status, true, nil + } + return status, false, nil + }) + ok = false + } else if changefeed.Status.MinTableBarrierTs == 0 { + // complete the changefeed status when the TiCDC cluster is + // upgraded from an old version(less than v6.7.0). + changefeed.PatchStatus( + func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { + if status != nil { + if status.MinTableBarrierTs == 0 { + status.MinTableBarrierTs = status.CheckpointTs + } + return status, true, nil + } + return status, false, nil + }) + ok = false + } + + // clean stale capture task positions + for captureID := range changefeed.TaskPositions { + if _, exist := captures[captureID]; !exist { + changefeed.PatchTaskPosition(captureID, func(position *model.TaskPosition) (*model.TaskPosition, bool, error) { + return nil, position != nil, nil + }) + ok = false + } + } + if !ok { + log.Info("changefeed preflight check failed, will skip this tick", + zap.String("namespace", changefeed.ID.Namespace), + zap.String("changefeed", changefeed.ID.ID), + zap.Any("status", changefeed.Status), zap.Bool("ok", ok), + ) + } + + return +} + +func updateStatus(changefeed *orchestrator.ChangefeedReactorState, + checkpointTs, minTableBarrierTs model.Ts, +) { + if checkpointTs == 0 || minTableBarrierTs == 0 { + return + } + changefeed.PatchStatus( + func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { + changed := false + if status == nil { + return nil, changed, nil + } + if status.CheckpointTs != checkpointTs { + status.CheckpointTs = checkpointTs + changed = true + } + if status.MinTableBarrierTs != minTableBarrierTs { + status.MinTableBarrierTs = minTableBarrierTs + changed = true + } + return status, changed, nil + }) +} + // shouldHandleChangefeed returns whether the owner should handle the changefeed. func (o *ownerImpl) shouldHandleChangefeed(_ *orchestrator.ChangefeedReactorState) bool { return true @@ -357,9 +450,9 @@ func (o *ownerImpl) updateMetrics() { o.lastTickTime = now for cfID, cf := range o.changefeeds { - if cf.state != nil && cf.state.Info != nil { + if cf.latestInfo != nil { changefeedStatusGauge.WithLabelValues(cfID.Namespace, cfID.ID). - Set(float64(cf.state.Info.State.ToInt())) + Set(float64(cf.latestInfo.State.ToInt())) } } } @@ -403,7 +496,7 @@ func (o *ownerImpl) handleDrainCaptures(ctx context.Context, query *scheduler.Qu ) for _, changefeed := range o.changefeeds { // Only count normal changefeed. - state := changefeed.state.Info.State + state := changefeed.latestInfo.State if state != model.StateNormal { log.Info("skip drain changefeed", zap.String("state", string(state)), @@ -496,28 +589,22 @@ func (o *ownerImpl) handleQueries(query *Query) error { ret := map[model.ChangeFeedID]*model.ChangeFeedStatusForAPI{} for cfID, cfReactor := range o.changefeeds { ret[cfID] = &model.ChangeFeedStatusForAPI{} - if cfReactor.state == nil { - continue - } - if cfReactor.state.Status == nil { + if cfReactor.latestStatus == nil { continue } ret[cfID].ResolvedTs = cfReactor.resolvedTs - ret[cfID].CheckpointTs = cfReactor.state.Status.CheckpointTs + ret[cfID].CheckpointTs = cfReactor.latestStatus.CheckpointTs } query.Data = ret case QueryAllChangeFeedInfo: ret := map[model.ChangeFeedID]*model.ChangeFeedInfo{} for cfID, cfReactor := range o.changefeeds { - if cfReactor.state == nil { - continue - } - if cfReactor.state.Info == nil { + if cfReactor.latestInfo == nil { ret[cfID] = &model.ChangeFeedInfo{} continue } var err error - ret[cfID], err = cfReactor.state.Info.Clone() + ret[cfID], err = cfReactor.latestInfo.Clone() if err != nil { return errors.Trace(err) } @@ -528,9 +615,6 @@ func (o *ownerImpl) handleQueries(query *Query) error { if !ok { return cerror.ErrChangeFeedNotExists.GenWithStackByArgs(query.ChangeFeedID) } - if cfReactor.state == nil { - return cerror.ErrChangeFeedNotExists.GenWithStackByArgs(query.ChangeFeedID) - } var ret map[model.CaptureID]*model.TaskStatus provider := cfReactor.GetInfoProvider() @@ -596,17 +680,14 @@ func (o *ownerImpl) isHealthy() bool { return false } for _, changefeed := range o.changefeeds { - if changefeed.state == nil { - log.Warn("isHealthy: changefeed state is nil", - zap.String("namespace", changefeed.id.Namespace), - zap.String("changefeed", changefeed.id.ID)) + if changefeed.latestInfo == nil { continue } - if changefeed.state.Info.State != model.StateNormal { + if changefeed.latestInfo.State != model.StateNormal { log.Warn("isHealthy: changefeed not normal", zap.String("namespace", changefeed.id.Namespace), zap.String("changefeed", changefeed.id.ID), - zap.Any("state", changefeed.state.Info.State)) + zap.Any("state", changefeed.latestInfo.State)) continue } diff --git a/cdc/owner/owner_test.go b/cdc/owner/owner_test.go index 681c8007926..9f9dcd1f198 100644 --- a/cdc/owner/owner_test.go +++ b/cdc/owner/owner_test.go @@ -80,11 +80,13 @@ func newOwner4Test( o := NewOwner(m, config.NewDefaultSchedulerConfig(), etcdClient).(*ownerImpl) o.newChangefeed = func( id model.ChangeFeedID, - state *orchestrator.ChangefeedReactorState, + cfInfo *model.ChangeFeedInfo, + cfStatus *model.ChangeFeedStatus, + cfstateManager FeedStateManager, up *upstream.Upstream, cfg *config.SchedulerConfig, ) *changefeed { - return newChangefeed4Test(id, state, up, newDDLPuller, newSink, + return newChangefeed4Test(id, cfInfo, cfStatus, cfstateManager, up, newDDLPuller, newSink, newScheduler, newDownstreamObserver) } return o @@ -485,11 +487,13 @@ func TestAsyncStop(t *testing.T) { func TestHandleDrainCapturesSchedulerNotReady(t *testing.T) { t.Parallel() + state := &orchestrator.ChangefeedReactorState{ + Info: &model.ChangeFeedInfo{State: model.StateNormal}, + } cf := &changefeed{ - scheduler: nil, // scheduler is not set. - state: &orchestrator.ChangefeedReactorState{ - Info: &model.ChangeFeedInfo{State: model.StateNormal}, - }, + scheduler: nil, // scheduler is not set. + latestStatus: state.Status, + latestInfo: state.Info, } pdClient := &gc.MockPDClient{} @@ -524,7 +528,7 @@ func TestHandleDrainCapturesSchedulerNotReady(t *testing.T) { require.Nil(t, <-done) // Only count changefeed that is normal. - cf.state.Info.State = model.StateStopped + state.Info.State = model.StateStopped query = &scheduler.Query{CaptureID: "test"} done = make(chan error, 1) o.handleDrainCaptures(ctx, query, done) @@ -567,19 +571,19 @@ func TestIsHealthyWithAbnormalChangefeeds(t *testing.T) { require.True(t, query.Data.(bool)) // state is not normal - cf.state = &orchestrator.ChangefeedReactorState{ + state := &orchestrator.ChangefeedReactorState{ Info: &model.ChangeFeedInfo{State: model.StateStopped}, } + cf.latestInfo = state.Info + cf.latestStatus = state.Status err = o.handleQueries(query) require.NoError(t, err) require.True(t, query.Data.(bool)) // 2 changefeeds, another is normal, and scheduler initialized. o.changefeeds[model.ChangeFeedID{ID: "2"}] = &changefeed{ - state: &orchestrator.ChangefeedReactorState{ - Info: &model.ChangeFeedInfo{State: model.StateNormal}, - }, - scheduler: &healthScheduler{init: true}, + latestInfo: &model.ChangeFeedInfo{State: model.StateNormal}, + scheduler: &healthScheduler{init: true}, } err = o.handleQueries(query) require.NoError(t, err) @@ -624,10 +628,8 @@ func TestIsHealthy(t *testing.T) { // changefeed in normal, but the scheduler is not set, Unhealthy. cf := &changefeed{ - state: &orchestrator.ChangefeedReactorState{ - Info: &model.ChangeFeedInfo{State: model.StateNormal}, - }, - scheduler: nil, // scheduler is not set. + latestInfo: &model.ChangeFeedInfo{State: model.StateNormal}, + scheduler: nil, // scheduler is not set. } o.changefeeds[model.ChangeFeedID{ID: "1"}] = cf o.changefeedTicked = true @@ -650,10 +652,8 @@ func TestIsHealthy(t *testing.T) { // Unhealthy, there is another changefeed is not initialized. o.changefeeds[model.ChangeFeedID{ID: "1"}] = &changefeed{ - state: &orchestrator.ChangefeedReactorState{ - Info: &model.ChangeFeedInfo{State: model.StateNormal}, - }, - scheduler: &healthScheduler{init: false}, + latestInfo: &model.ChangeFeedInfo{State: model.StateNormal}, + scheduler: &healthScheduler{init: false}, } o.changefeedTicked = true err = o.handleQueries(query) From 3e784207dd3734c6f4e7115d9ac8668bd7859bd5 Mon Sep 17 00:00:00 2001 From: Ling Jin <7138436+3AceShowHand@users.noreply.github.com> Date: Tue, 17 Oct 2023 03:59:06 -0500 Subject: [PATCH 03/15] kafka(ticdc): support columns dispatcher. (#9863) close pingcap/tiflow#9862 --- cdc/api/v2/model.go | 9 ++- cdc/model/schema_storage.go | 52 +++++++++--- cdc/model/schema_storage_test.go | 54 ++++++++++++- .../dmlsink/mq/dispatcher/event_router.go | 33 ++++++-- .../mq/dispatcher/partition/columns.go | 73 +++++++++++++++++ .../mq/dispatcher/partition/columns_test.go | 81 +++++++++++++++++++ docs/swagger/docs.go | 14 ++++ docs/swagger/swagger.json | 14 ++++ docs/swagger/swagger.yaml | 11 +++ pkg/config/sink.go | 7 +- .../mq_sink_dispatcher/conf/changefeed.toml | 4 + .../mq_sink_dispatcher/conf/diff_config.toml | 29 +++++++ .../conf/new_changefeed.toml | 4 + .../mq_sink_dispatcher/run.sh | 71 ++++++++++++++++ tests/integration_tests/run_group.sh | 2 +- 15 files changed, 436 insertions(+), 22 deletions(-) create mode 100644 cdc/sink/dmlsink/mq/dispatcher/partition/columns.go create mode 100644 cdc/sink/dmlsink/mq/dispatcher/partition/columns_test.go create mode 100644 tests/integration_tests/mq_sink_dispatcher/conf/changefeed.toml create mode 100644 tests/integration_tests/mq_sink_dispatcher/conf/diff_config.toml create mode 100644 tests/integration_tests/mq_sink_dispatcher/conf/new_changefeed.toml create mode 100644 tests/integration_tests/mq_sink_dispatcher/run.sh diff --git a/cdc/api/v2/model.go b/cdc/api/v2/model.go index 0880ddc6749..af9887b0240 100644 --- a/cdc/api/v2/model.go +++ b/cdc/api/v2/model.go @@ -275,6 +275,7 @@ func (c *ReplicaConfig) toInternalReplicaConfigWithOriginConfig( DispatcherRule: "", PartitionRule: rule.PartitionRule, IndexName: rule.IndexName, + Columns: rule.Columns, TopicRule: rule.TopicRule, }) } @@ -554,6 +555,7 @@ func ToAPIReplicaConfig(c *config.ReplicaConfig) *ReplicaConfig { Matcher: rule.Matcher, PartitionRule: rule.PartitionRule, IndexName: rule.IndexName, + Columns: rule.Columns, TopicRule: rule.TopicRule, }) } @@ -915,9 +917,10 @@ type LargeMessageHandleConfig struct { // This is a duplicate of config.DispatchRule type DispatchRule struct { Matcher []string `json:"matcher,omitempty"` - PartitionRule string `json:"partition"` - IndexName string `json:"index"` - TopicRule string `json:"topic"` + PartitionRule string `json:"partition,omitempty"` + IndexName string `json:"index,omitempty"` + Columns []string `json:"columns,omitempty"` + TopicRule string `json:"topic,omitempty"` } // ColumnSelector represents a column selector for a table. diff --git a/cdc/model/schema_storage.go b/cdc/model/schema_storage.go index c9a2b21da39..26d904bba0c 100644 --- a/cdc/model/schema_storage.go +++ b/cdc/model/schema_storage.go @@ -316,18 +316,50 @@ func (ti *TableInfo) Clone() *TableInfo { return WrapTableInfo(ti.SchemaID, ti.TableName.Schema, ti.Version, ti.TableInfo.Clone()) } +// GetIndex return the corresponding index by the given name. +func (ti *TableInfo) GetIndex(name string) *model.IndexInfo { + for _, index := range ti.Indices { + if index != nil && index.Name.O == name { + return index + } + } + return nil +} + // IndexByName returns the index columns and offsets of the corresponding index by name func (ti *TableInfo) IndexByName(name string) ([]string, []int, bool) { - for _, index := range ti.Indices { - if index.Name.O == name { - names := make([]string, 0, len(index.Columns)) - offset := make([]int, 0, len(index.Columns)) - for _, col := range index.Columns { - names = append(names, col.Name.O) - offset = append(offset, col.Offset) - } - return names, offset, true + index := ti.GetIndex(name) + if index == nil { + return nil, nil, false + } + names := make([]string, 0, len(index.Columns)) + offset := make([]int, 0, len(index.Columns)) + for _, col := range index.Columns { + names = append(names, col.Name.O) + offset = append(offset, col.Offset) + } + return names, offset, true +} + +// ColumnsByNames returns the column offsets of the corresponding columns by names +// If any column does not exist, return false +func (ti *TableInfo) ColumnsByNames(names []string) ([]int, bool) { + // todo: optimize it + columnOffsets := make(map[string]int, len(ti.Columns)) + for _, col := range ti.Columns { + if col != nil { + columnOffsets[col.Name.O] = col.Offset } } - return nil, nil, false + + result := make([]int, 0, len(names)) + for _, col := range names { + offset, ok := columnOffsets[col] + if !ok { + return nil, false + } + result = append(result, offset) + } + + return result, true } diff --git a/cdc/model/schema_storage_test.go b/cdc/model/schema_storage_test.go index 9d4791a4799..0da4e756153 100644 --- a/cdc/model/schema_storage_test.go +++ b/cdc/model/schema_storage_test.go @@ -273,6 +273,16 @@ func TestTableInfoClone(t *testing.T) { func TestIndexByName(t *testing.T) { tableInfo := &TableInfo{ + TableInfo: &timodel.TableInfo{ + Indices: nil, + }, + } + names, offsets, ok := tableInfo.IndexByName("idx1") + require.False(t, ok) + require.Nil(t, names) + require.Nil(t, offsets) + + tableInfo = &TableInfo{ TableInfo: &timodel.TableInfo{ Indices: []*timodel.IndexInfo{ { @@ -291,7 +301,7 @@ func TestIndexByName(t *testing.T) { }, } - names, offsets, ok := tableInfo.IndexByName("idx2") + names, offsets, ok = tableInfo.IndexByName("idx2") require.False(t, ok) require.Nil(t, names) require.Nil(t, offsets) @@ -301,3 +311,45 @@ func TestIndexByName(t *testing.T) { require.Equal(t, []string{"col1"}, names) require.Equal(t, []int{0}, offsets) } + +func TestColumnsByNames(t *testing.T) { + tableInfo := &TableInfo{ + TableInfo: &timodel.TableInfo{ + Columns: []*timodel.ColumnInfo{ + { + Name: timodel.CIStr{ + O: "col2", + }, + Offset: 1, + }, + { + Name: timodel.CIStr{ + O: "col1", + }, + Offset: 0, + }, + { + Name: timodel.CIStr{ + O: "col3", + }, + Offset: 2, + }, + }, + }, + } + + names := []string{"col1", "col2", "col3"} + offsets, ok := tableInfo.ColumnsByNames(names) + require.True(t, ok) + require.Equal(t, []int{0, 1, 2}, offsets) + + names = []string{"col2"} + offsets, ok = tableInfo.ColumnsByNames(names) + require.True(t, ok) + require.Equal(t, []int{1}, offsets) + + names = []string{"col1", "col-not-found"} + offsets, ok = tableInfo.ColumnsByNames(names) + require.False(t, ok) + require.Nil(t, offsets) +} diff --git a/cdc/sink/dmlsink/mq/dispatcher/event_router.go b/cdc/sink/dmlsink/mq/dispatcher/event_router.go index a11013cf7b5..53fc0c7fd57 100644 --- a/cdc/sink/dmlsink/mq/dispatcher/event_router.go +++ b/cdc/sink/dmlsink/mq/dispatcher/event_router.go @@ -67,7 +67,9 @@ func NewEventRouter( f = filter.CaseInsensitive(f) } - d := getPartitionDispatcher(ruleConfig.PartitionRule, scheme, ruleConfig.IndexName) + d := getPartitionDispatcher( + ruleConfig.PartitionRule, scheme, ruleConfig.IndexName, ruleConfig.Columns, + ) t, err := getTopicDispatcher(ruleConfig.TopicRule, defaultTopic, protocol, scheme) if err != nil { return nil, err @@ -130,12 +132,27 @@ func (s *EventRouter) GetPartitionForRowChange( func (s *EventRouter) VerifyTables(infos []*model.TableInfo) error { for _, table := range infos { _, partitionDispatcher := s.matchDispatcher(table.TableName.Schema, table.TableName.Table) - if v, ok := partitionDispatcher.(*partition.IndexValueDispatcher); ok { - _, _, ok = table.IndexByName(v.IndexName) - if !ok { + switch v := partitionDispatcher.(type) { + case *partition.IndexValueDispatcher: + index := table.GetIndex(v.IndexName) + if index == nil { return cerror.ErrDispatcherFailed.GenWithStack( "index not found when verify the table, table: %v, index: %s", table.TableName, v.IndexName) } + // only allow the unique index to be set. + // For the non-unique index, if any column belongs to the index is updated, + // the event is not split, it may cause incorrect data consumption. + if !index.Unique { + return cerror.ErrDispatcherFailed.GenWithStack( + "index is not unique when verify the table, table: %v, index: %s", table.TableName, v.IndexName) + } + case *partition.ColumnsDispatcher: + _, ok := table.ColumnsByNames(v.Columns) + if !ok { + return cerror.ErrDispatcherFailed.GenWithStack( + "columns not found when verify the table, table: %v, columns: %v", table.TableName, v.Columns) + } + default: } } return nil @@ -191,7 +208,9 @@ func (s *EventRouter) matchDispatcher( } // getPartitionDispatcher returns the partition dispatcher for a specific partition rule. -func getPartitionDispatcher(rule string, scheme string, indexName string) partition.Dispatcher { +func getPartitionDispatcher( + rule string, scheme string, indexName string, columns []string, +) partition.Dispatcher { switch strings.ToLower(rule) { case "default": return partition.NewDefaultDispatcher() @@ -204,6 +223,8 @@ func getPartitionDispatcher(rule string, scheme string, indexName string) partit case "rowid": log.Warn("rowid is deprecated, index-value is used as the partition dispatcher.") return partition.NewIndexValueDispatcher(indexName) + case "columns": + return partition.NewColumnsDispatcher(columns) default: } @@ -211,7 +232,7 @@ func getPartitionDispatcher(rule string, scheme string, indexName string) partit return partition.NewKeyDispatcher(rule) } - log.Warn("the partition dispatch rule is not default/ts/table/index-value," + + log.Warn("the partition dispatch rule is not default/ts/table/index-value/columns," + " use the default rule instead.") return partition.NewDefaultDispatcher() } diff --git a/cdc/sink/dmlsink/mq/dispatcher/partition/columns.go b/cdc/sink/dmlsink/mq/dispatcher/partition/columns.go new file mode 100644 index 00000000000..e4a14cfcf2f --- /dev/null +++ b/cdc/sink/dmlsink/mq/dispatcher/partition/columns.go @@ -0,0 +1,73 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package partition + +import ( + "strconv" + "sync" + + "github.com/pingcap/log" + "github.com/pingcap/tiflow/cdc/model" + "github.com/pingcap/tiflow/pkg/errors" + "github.com/pingcap/tiflow/pkg/hash" + "go.uber.org/zap" +) + +// ColumnsDispatcher is a partition dispatcher +// which dispatches events based on the given columns. +type ColumnsDispatcher struct { + hasher *hash.PositionInertia + lock sync.Mutex + + Columns []string +} + +// NewColumnsDispatcher creates a ColumnsDispatcher. +func NewColumnsDispatcher(columns []string) *ColumnsDispatcher { + return &ColumnsDispatcher{ + hasher: hash.NewPositionInertia(), + Columns: columns, + } +} + +// DispatchRowChangedEvent returns the target partition to which +// a row changed event should be dispatched. +func (r *ColumnsDispatcher) DispatchRowChangedEvent(row *model.RowChangedEvent, partitionNum int32) (int32, string, error) { + r.lock.Lock() + defer r.lock.Unlock() + r.hasher.Reset() + + r.hasher.Write([]byte(row.Table.Schema), []byte(row.Table.Table)) + + dispatchCols := row.Columns + if len(dispatchCols) == 0 { + dispatchCols = row.PreColumns + } + + offsets, ok := row.TableInfo.ColumnsByNames(r.Columns) + if !ok { + log.Error("columns not found when dispatch event", + zap.Any("tableName", row.Table), + zap.Strings("columns", r.Columns)) + return 0, "", errors.ErrDispatcherFailed.GenWithStack( + "columns not found when dispatch event, table: %v, columns: %v", row.Table, r.Columns) + } + + for idx := 0; idx < len(r.Columns); idx++ { + r.hasher.Write([]byte(r.Columns[idx]), []byte(model.ColumnValueString(dispatchCols[offsets[idx]].Value))) + } + + sum32 := r.hasher.Sum32() + return int32(sum32 % uint32(partitionNum)), strconv.FormatInt(int64(sum32), 10), nil +} diff --git a/cdc/sink/dmlsink/mq/dispatcher/partition/columns_test.go b/cdc/sink/dmlsink/mq/dispatcher/partition/columns_test.go new file mode 100644 index 00000000000..af9ed4939ae --- /dev/null +++ b/cdc/sink/dmlsink/mq/dispatcher/partition/columns_test.go @@ -0,0 +1,81 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package partition + +import ( + "testing" + + timodel "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tiflow/cdc/model" + "github.com/pingcap/tiflow/pkg/errors" + "github.com/stretchr/testify/require" +) + +func TestColumnsDispatcher(t *testing.T) { + t.Parallel() + + event := &model.RowChangedEvent{ + Table: &model.TableName{ + Schema: "test", + Table: "t1", + }, + TableInfo: &model.TableInfo{ + TableInfo: &timodel.TableInfo{ + Columns: []*timodel.ColumnInfo{ + { + Name: timodel.CIStr{ + O: "col2", + }, + Offset: 1, + }, + { + Name: timodel.CIStr{ + O: "col1", + }, + Offset: 0, + }, + { + Name: timodel.CIStr{ + O: "col3", + }, + Offset: 2, + }, + }, + }, + }, + Columns: []*model.Column{ + { + Name: "col1", + Value: 11, + }, + { + Name: "col2", + Value: 22, + }, + { + Name: "col3", + Value: 33, + }, + }, + } + + p := NewColumnsDispatcher([]string{"col-2", "col-not-found"}) + _, _, err := p.DispatchRowChangedEvent(event, 16) + require.ErrorIs(t, err, errors.ErrDispatcherFailed) + + p = NewColumnsDispatcher([]string{"col2", "col1"}) + index, _, err := p.DispatchRowChangedEvent(event, 16) + require.NoError(t, err) + require.Equal(t, int32(15), index) +} diff --git a/docs/swagger/docs.go b/docs/swagger/docs.go index c1948fb22ec..ca7a3347066 100644 --- a/docs/swagger/docs.go +++ b/docs/swagger/docs.go @@ -1424,11 +1424,19 @@ var doc = `{ "config.DispatchRule": { "type": "object", "properties": { + "columns": { + "description": "Columns are set when using columns dispatcher.", + "type": "array", + "items": { + "type": "string" + } + }, "dispatcher": { "description": "Deprecated, please use PartitionRule.", "type": "string" }, "index": { + "description": "IndexName is set when using index-value dispatcher with specified index.", "type": "string" }, "matcher": { @@ -2380,6 +2388,12 @@ var doc = `{ "v2.DispatchRule": { "type": "object", "properties": { + "columns": { + "type": "array", + "items": { + "type": "string" + } + }, "index": { "type": "string" }, diff --git a/docs/swagger/swagger.json b/docs/swagger/swagger.json index 3bf59aae04b..d748dd19560 100644 --- a/docs/swagger/swagger.json +++ b/docs/swagger/swagger.json @@ -1405,11 +1405,19 @@ "config.DispatchRule": { "type": "object", "properties": { + "columns": { + "description": "Columns are set when using columns dispatcher.", + "type": "array", + "items": { + "type": "string" + } + }, "dispatcher": { "description": "Deprecated, please use PartitionRule.", "type": "string" }, "index": { + "description": "IndexName is set when using index-value dispatcher with specified index.", "type": "string" }, "matcher": { @@ -2361,6 +2369,12 @@ "v2.DispatchRule": { "type": "object", "properties": { + "columns": { + "type": "array", + "items": { + "type": "string" + } + }, "index": { "type": "string" }, diff --git a/docs/swagger/swagger.yaml b/docs/swagger/swagger.yaml index 3481eb99ca2..ce21f335acb 100644 --- a/docs/swagger/swagger.yaml +++ b/docs/swagger/swagger.yaml @@ -54,10 +54,17 @@ definitions: type: object config.DispatchRule: properties: + columns: + description: Columns are set when using columns dispatcher. + items: + type: string + type: array dispatcher: description: Deprecated, please use PartitionRule. type: string index: + description: IndexName is set when using index-value dispatcher with specified + index. type: string matcher: items: @@ -734,6 +741,10 @@ definitions: type: object v2.DispatchRule: properties: + columns: + items: + type: string + type: array index: type: string matcher: diff --git a/pkg/config/sink.go b/pkg/config/sink.go index e78ad20a205..44d59590db5 100644 --- a/pkg/config/sink.go +++ b/pkg/config/sink.go @@ -292,7 +292,12 @@ type DispatchRule struct { // PartitionRule is an alias added for DispatcherRule to mitigate confusions. // In the future release, the DispatcherRule is expected to be removed . PartitionRule string `toml:"partition" json:"partition"` - IndexName string `toml:"index" json:"index"` + + // IndexName is set when using index-value dispatcher with specified index. + IndexName string `toml:"index" json:"index"` + + // Columns are set when using columns dispatcher. + Columns []string `toml:"columns" json:"columns"` TopicRule string `toml:"topic" json:"topic"` } diff --git a/tests/integration_tests/mq_sink_dispatcher/conf/changefeed.toml b/tests/integration_tests/mq_sink_dispatcher/conf/changefeed.toml new file mode 100644 index 00000000000..849681cee76 --- /dev/null +++ b/tests/integration_tests/mq_sink_dispatcher/conf/changefeed.toml @@ -0,0 +1,4 @@ +[sink] +dispatchers = [ + {matcher = ['dispatcher.index'], partition = "index-value", index = "idx_a"} +] diff --git a/tests/integration_tests/mq_sink_dispatcher/conf/diff_config.toml b/tests/integration_tests/mq_sink_dispatcher/conf/diff_config.toml new file mode 100644 index 00000000000..d3cb63bac5d --- /dev/null +++ b/tests/integration_tests/mq_sink_dispatcher/conf/diff_config.toml @@ -0,0 +1,29 @@ +# diff Configuration. + +check-thread-count = 4 + +export-fix-sql = true + +check-struct-only = false + +[task] +output-dir = "/tmp/tidb_cdc_test/dispatcher/output" + +source-instances = ["tidb0"] + +target-instance = "mysql1" + +target-check-tables = ["dispatcher.?*"] + +[data-sources] +[data-sources.tidb0] +host = "127.0.0.1" +port = 4000 +user = "root" +password = "" + +[data-sources.mysql1] +host = "127.0.0.1" +port = 3306 +user = "root" +password = "" diff --git a/tests/integration_tests/mq_sink_dispatcher/conf/new_changefeed.toml b/tests/integration_tests/mq_sink_dispatcher/conf/new_changefeed.toml new file mode 100644 index 00000000000..4fbc029095c --- /dev/null +++ b/tests/integration_tests/mq_sink_dispatcher/conf/new_changefeed.toml @@ -0,0 +1,4 @@ +[sink] +dispatchers = [ + {matcher = ['dispatcher.index'], partition = "index-value", index = ""} +] diff --git a/tests/integration_tests/mq_sink_dispatcher/run.sh b/tests/integration_tests/mq_sink_dispatcher/run.sh new file mode 100644 index 00000000000..ae28b63c464 --- /dev/null +++ b/tests/integration_tests/mq_sink_dispatcher/run.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +set -e + +CUR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +source $CUR/../_utils/test_prepare +WORK_DIR=$OUT_DIR/$TEST_NAME +CDC_BINARY=cdc.test +SINK_TYPE=$1 +MAX_RETRIES=10 + +# use kafka-consumer with canal-json decoder to sync data from kafka to mysql +function run() { + if [ "$SINK_TYPE" != "kafka" ]; then + return + fi + + rm -rf $WORK_DIR && mkdir -p $WORK_DIR + + start_tidb_cluster --workdir $WORK_DIR + + cd $WORK_DIR + + TOPIC_NAME="dispatcher-test" + + run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --loglevel "info" + + changefeed_id="test" + # record tso before we create tables to skip the system table DDLs + start_ts=$(run_cdc_cli_tso_query ${UP_PD_HOST_1} ${UP_PD_PORT_1}) + SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=canal-json&enable-tidb-extension=true" + run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" -c ${changefeed_id} --config="$CUR/conf/changefeed.toml" + + ensure $MAX_RETRIES check_changefeed_state http://${UP_PD_HOST_1}:${UP_PD_PORT_1} $changefeed_id "normal" "null" "" + + run_sql "DROP DATABASE if exists dispatcher;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} + run_sql "CREATE DATABASE dispatcher;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} + run_sql "CREATE TABLE dispatcher.index (a int primary key, b int);" ${UP_TIDB_HOST} ${UP_TIDB_PORT} + + run_sql "INSERT INTO dispatcher.index values (1, 2);" ${UP_TIDB_HOST} ${UP_TIDB_PORT} + + ensure $MAX_RETRIES check_changefeed_state http://${UP_PD_HOST_1}:${UP_PD_PORT_1} $changefeed_id "failed" "ErrDispatcherFailed" + + run_cdc_cli changefeed update -c ${changefeed_id} --sink-uri="$SINK_URI" --config="$CUR/conf/new_changefeed.toml" --no-confirm + + run_cdc_cli changefeed resume -c ${changefeed_id} + + ensure $MAX_RETRIES check_changefeed_state http://${UP_PD_HOST_1}:${UP_PD_PORT_1} $changefeed_id "normal" "null" "" + + cdc_kafka_consumer --upstream-uri $SINK_URI --downstream-uri="mysql://root@127.0.0.1:3306/?safe-mode=true&batch-dml-enable=false" --upstream-tidb-dsn="root@tcp(${UP_TIDB_HOST}:${UP_TIDB_PORT})/?" --config="$CUR/conf/new_changefeed.toml" 2>&1 & + + run_sql "INSERT INTO dispatcher.index values (2, 3);" ${UP_TIDB_HOST} ${UP_TIDB_PORT} + run_sql "INSERT INTO dispatcher.index values (3, 4);" ${UP_TIDB_HOST} ${UP_TIDB_PORT} + run_sql "INSERT INTO dispatcher.index values (4, 5);" ${UP_TIDB_HOST} ${UP_TIDB_PORT} + run_sql "UPDATE dispatcher.index set b = 5 where a = 1;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} + run_sql "UPDATE dispatcher.index set b = 6 where a = 2;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} + run_sql "DELETE FROM dispatcher.index where a = 3;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} + + run_sql "CREATE TABLE test.finish_mark (a int primary key);" ${UP_TIDB_HOST} ${UP_TIDB_PORT} + + # sync_diff can't check non-exist table, so we check expected tables are created in downstream first + check_table_exists test.finish_mark ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT} 200 + check_sync_diff $WORK_DIR $CUR/conf/diff_config.toml + + cleanup_process $CDC_BINARY +} + +trap stop_tidb_cluster EXIT +run $* +check_logs $WORK_DIR +echo "[$(date)] <<<<<< run test case $TEST_NAME success! >>>>>>" diff --git a/tests/integration_tests/run_group.sh b/tests/integration_tests/run_group.sh index 21fb8d504ab..0364ef6831f 100755 --- a/tests/integration_tests/run_group.sh +++ b/tests/integration_tests/run_group.sh @@ -14,7 +14,7 @@ mysql_only="bdr_mode capture_suicide_while_balance_table syncpoint hang_sink_sui mysql_only_http="http_api http_api_tls api_v2" mysql_only_consistent_replicate="consistent_replicate_ddl consistent_replicate_gbk consistent_replicate_nfs consistent_replicate_storage_file consistent_replicate_storage_file_large_value consistent_replicate_storage_s3 consistent_partition_table" -kafka_only="kafka_big_messages kafka_compression kafka_messages kafka_sink_error_resume mq_sink_lost_callback" +kafka_only="kafka_big_messages kafka_compression kafka_messages kafka_sink_error_resume mq_sink_lost_callback mq_sink_dispatcher" kafka_only_protocol="canal_json_adapter_compatibility canal_json_basic multi_topics avro_basic canal_json_handle_key_only open_protocol_handle_key_only canal_json_claim_check open_protocol_claim_check" kafka_only_v2="kafka_big_txn_v2 kafka_big_messages_v2 multi_tables_ddl_v2 multi_topics_v2" From 9203b1ce27ed7463fffc82e6627dc62bc10676b9 Mon Sep 17 00:00:00 2001 From: qupeng Date: Tue, 17 Oct 2023 17:52:59 +0800 Subject: [PATCH 04/15] kv-client(cdc): add more metrics and logs (#9912) ref pingcap/tiflow#9222 --- cdc/kv/client.go | 7 ++-- cdc/kv/metrics.go | 5 +++ cdc/kv/regionlock/region_range_lock.go | 13 +++---- cdc/kv/regionlock/region_range_lock_test.go | 8 ++-- cdc/kv/shared_client.go | 6 +-- cdc/kv/shared_client_test.go | 2 +- cdc/kv/sharedconn/conn_and_client.go | 43 +++++++++++++-------- cdc/kv/sharedconn/conn_and_client_test.go | 11 +++--- cdc/processor/sourcemanager/manager.go | 2 +- cdc/puller/ddl_puller.go | 2 +- 10 files changed, 57 insertions(+), 42 deletions(-) diff --git a/cdc/kv/client.go b/cdc/kv/client.go index f70ec5e0e97..f500e0c321d 100644 --- a/cdc/kv/client.go +++ b/cdc/kv/client.go @@ -374,9 +374,10 @@ func newEventFeedSession( startTs uint64, eventCh chan<- model.RegionFeedEvent, ) *eventFeedSession { - id := strconv.FormatUint(allocID(), 10) + id := allocID() + idStr := strconv.FormatUint(id, 10) rangeLock := regionlock.NewRegionRangeLock( - totalSpan.StartKey, totalSpan.EndKey, startTs, + id, totalSpan.StartKey, totalSpan.EndKey, startTs, client.changefeed.Namespace+"."+client.changefeed.ID) return &eventFeedSession{ client: client, @@ -389,7 +390,7 @@ func newEventFeedSession( eventCh: eventCh, rangeLock: rangeLock, lockResolver: lockResolver, - id: id, + id: idStr, regionChSizeGauge: clientChannelSize.WithLabelValues("region"), errChSizeGauge: clientChannelSize.WithLabelValues("err"), rangeChSizeGauge: clientChannelSize.WithLabelValues("range"), diff --git a/cdc/kv/metrics.go b/cdc/kv/metrics.go index 6b6127f43c4..fb0d6d90a9b 100644 --- a/cdc/kv/metrics.go +++ b/cdc/kv/metrics.go @@ -145,6 +145,11 @@ var ( []string{"namespace", "changefeed"}) ) +// GetGlobalGrpcMetrics gets the global grpc metrics. +func GetGlobalGrpcMetrics() *grpc_prometheus.ClientMetrics { + return grpcMetrics +} + // InitMetrics registers all metrics in the kv package func InitMetrics(registry *prometheus.Registry) { registry.MustRegister(eventFeedErrorCounter) diff --git a/cdc/kv/regionlock/region_range_lock.go b/cdc/kv/regionlock/region_range_lock.go index ac876dd5545..930ba3e960e 100644 --- a/cdc/kv/regionlock/region_range_lock.go +++ b/cdc/kv/regionlock/region_range_lock.go @@ -140,12 +140,6 @@ func (e *rangeLockEntry) String() string { len(e.waiters)) } -var currentID uint64 = 0 - -func allocID() uint64 { - return atomic.AddUint64(¤tID, 1) -} - // RegionRangeLock is specifically used for kv client to manage exclusive region ranges. Acquiring lock will be blocked // if part of its range is already locked. It also manages checkpoint ts of all ranges. The ranges are marked by a // version number, which should comes from the Region's Epoch version. The version is used to compare which range is @@ -166,10 +160,11 @@ type RegionRangeLock struct { // NewRegionRangeLock creates a new RegionRangeLock. func NewRegionRangeLock( + id uint64, startKey, endKey []byte, startTs uint64, changefeedLogInfo string, ) *RegionRangeLock { return &RegionRangeLock{ - id: allocID(), + id: id, totalSpan: tablepb.Span{StartKey: startKey, EndKey: endKey}, changefeedLogInfo: changefeedLogInfo, rangeCheckpointTs: newRangeTsMap(startKey, endKey, startTs), @@ -489,7 +484,9 @@ func (l *RegionRangeLock) CollectLockedRangeAttrs( lastEnd := l.totalSpan.StartKey l.rangeLock.Ascend(func(item *rangeLockEntry) bool { - action(item.regionID, &item.state) + if action != nil { + action(item.regionID, &item.state) + } r.HoleExists = r.HoleExists || spanz.EndCompare(lastEnd, item.startKey) < 0 ckpt := item.state.CheckpointTs.Load() diff --git a/cdc/kv/regionlock/region_range_lock_test.go b/cdc/kv/regionlock/region_range_lock_test.go index 8b1a5690190..af887248164 100644 --- a/cdc/kv/regionlock/region_range_lock_test.go +++ b/cdc/kv/regionlock/region_range_lock_test.go @@ -90,7 +90,7 @@ func TestRegionRangeLock(t *testing.T) { t.Parallel() ctx := context.TODO() - l := NewRegionRangeLock([]byte("a"), []byte("h"), math.MaxUint64, "") + l := NewRegionRangeLock(1, []byte("a"), []byte("h"), math.MaxUint64, "") mustLockRangeSuccess(ctx, t, l, "a", "e", 1, 1, math.MaxUint64) unlockRange(l, "a", "e", 1, 1, 100) @@ -107,7 +107,7 @@ func TestRegionRangeLock(t *testing.T) { func TestRegionRangeLockStale(t *testing.T) { t.Parallel() - l := NewRegionRangeLock([]byte("a"), []byte("z"), math.MaxUint64, "") + l := NewRegionRangeLock(1, []byte("a"), []byte("z"), math.MaxUint64, "") ctx := context.TODO() mustLockRangeSuccess(ctx, t, l, "c", "g", 1, 10, math.MaxUint64) mustLockRangeSuccess(ctx, t, l, "j", "n", 2, 8, math.MaxUint64) @@ -130,7 +130,7 @@ func TestRegionRangeLockLockingRegionID(t *testing.T) { t.Parallel() ctx := context.TODO() - l := NewRegionRangeLock([]byte("a"), []byte("z"), math.MaxUint64, "") + l := NewRegionRangeLock(1, []byte("a"), []byte("z"), math.MaxUint64, "") mustLockRangeSuccess(ctx, t, l, "c", "d", 1, 10, math.MaxUint64) mustLockRangeStale(ctx, t, l, "e", "f", 1, 5, "e", "f") @@ -166,7 +166,7 @@ func TestRegionRangeLockCanBeCancelled(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) - l := NewRegionRangeLock([]byte("a"), []byte("z"), math.MaxUint64, "") + l := NewRegionRangeLock(1, []byte("a"), []byte("z"), math.MaxUint64, "") mustLockRangeSuccess(ctx, t, l, "g", "h", 1, 10, math.MaxUint64) wait := mustLockRangeWait(ctx, t, l, "g", "h", 1, 12) cancel() diff --git a/cdc/kv/shared_client.go b/cdc/kv/shared_client.go index d61d5a23a88..d90f46ff242 100644 --- a/cdc/kv/shared_client.go +++ b/cdc/kv/shared_client.go @@ -406,7 +406,7 @@ func (s *SharedClient) createRegionRequest(sri singleRegionInfo) *cdcpb.ChangeDa func (s *SharedClient) appendRequest(r *requestedStore, sri singleRegionInfo) { offset := r.nextStream.Add(1) % uint32(len(r.streams)) - log.Debug("event feed will request a region", + log.Info("event feed will request a region", zap.String("namespace", s.changefeed.Namespace), zap.String("changefeed", s.changefeed.ID), zap.Uint64("streamID", r.streams[offset].streamID), @@ -572,7 +572,7 @@ func (s *SharedClient) handleError(ctx context.Context, errInfo regionErrorInfo) switch eerr := err.(type) { case *eventError: innerErr := eerr.err - log.Debug("cdc error", + log.Info("cdc region error", zap.String("namespace", s.changefeed.Namespace), zap.String("changefeed", s.changefeed.ID), zap.Any("subscriptionID", errInfo.requestedTable.subscriptionID), @@ -694,7 +694,7 @@ func (s *SharedClient) newRequestedTable( eventCh chan<- MultiplexingEvent, ) *requestedTable { cfName := s.changefeed.String() - rangeLock := regionlock.NewRegionRangeLock(span.StartKey, span.EndKey, startTs, cfName) + rangeLock := regionlock.NewRegionRangeLock(uint64(subID), span.StartKey, span.EndKey, startTs, cfName) rt := &requestedTable{ subscriptionID: subID, diff --git a/cdc/kv/shared_client_test.go b/cdc/kv/shared_client_test.go index 18041a19a70..5a28d6e7bef 100644 --- a/cdc/kv/shared_client_test.go +++ b/cdc/kv/shared_client_test.go @@ -108,7 +108,7 @@ func TestConnectToOfflineOrFailedTiKV(t *testing.T) { pdClient = &mockPDClient{Client: pdClient, versionGen: defaultVersionGen} - grpcPool := sharedconn.NewConnAndClientPool(&security.Credential{}) + grpcPool := sharedconn.NewConnAndClientPool(&security.Credential{}, nil) regionCache := tikv.NewRegionCache(pdClient) diff --git a/cdc/kv/sharedconn/conn_and_client.go b/cdc/kv/sharedconn/conn_and_client.go index 79b3f3135e2..c3a96473dec 100644 --- a/cdc/kv/sharedconn/conn_and_client.go +++ b/cdc/kv/sharedconn/conn_and_client.go @@ -44,6 +44,7 @@ func StatusIsEOF(status *grpcstatus.Status) bool { // ConnAndClientPool is a pool of ConnAndClient. type ConnAndClientPool struct { credential *security.Credential + grpcMetrics *grpc_prometheus.ClientMetrics maxStreamsPerConn int sync.Mutex @@ -74,14 +75,23 @@ type connArray struct { } // NewConnAndClientPool creates a new ConnAndClientPool. -func NewConnAndClientPool(credential *security.Credential, maxStreamsPerConn ...int) *ConnAndClientPool { - return newConnAndClientPool(credential, 1000) +func NewConnAndClientPool( + credential *security.Credential, + grpcMetrics *grpc_prometheus.ClientMetrics, + maxStreamsPerConn ...int, +) *ConnAndClientPool { + return newConnAndClientPool(credential, grpcMetrics, 1000) } -func newConnAndClientPool(credential *security.Credential, maxStreamsPerConn int) *ConnAndClientPool { +func newConnAndClientPool( + credential *security.Credential, + grpcMetrics *grpc_prometheus.ClientMetrics, + maxStreamsPerConn int, +) *ConnAndClientPool { stores := make(map[string]*connArray, 64) return &ConnAndClientPool{ credential: credential, + grpcMetrics: grpcMetrics, maxStreamsPerConn: maxStreamsPerConn, stores: stores, } @@ -105,7 +115,7 @@ func (c *ConnAndClientPool) Connect(ctx context.Context, addr string) (cc *ConnA conns.Unlock() var conn *Conn - if conn, err = conns.connect(ctx, c.credential); err != nil { + if conn, err = conns.connect(ctx); err != nil { return } if conn != nil { @@ -162,11 +172,11 @@ func (c *ConnAndClient) Release() { } } -func (c *connArray) connect(ctx context.Context, credential *security.Credential) (conn *Conn, err error) { +func (c *connArray) connect(ctx context.Context) (conn *Conn, err error) { if c.inConnecting.CompareAndSwap(false, true) { defer c.inConnecting.Store(false) var clientConn *grpc.ClientConn - if clientConn, err = connect(ctx, credential, c.addr); err != nil { + if clientConn, err = c.pool.connect(ctx, c.addr); err != nil { return } @@ -240,21 +250,17 @@ func (c *connArray) sort(locked bool) { }) } -func connect(ctx context.Context, credential *security.Credential, target string) (*grpc.ClientConn, error) { - grpcTLSOption, err := credential.ToGRPCDialOption() +func (c *ConnAndClientPool) connect(ctx context.Context, target string) (*grpc.ClientConn, error) { + grpcTLSOption, err := c.credential.ToGRPCDialOption() if err != nil { return nil, err } - return grpc.DialContext( - ctx, - target, + dialOptions := []grpc.DialOption{ grpcTLSOption, grpc.WithInitialWindowSize(grpcInitialWindowSize), grpc.WithInitialConnWindowSize(grpcInitialConnWindowSize), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcMaxCallRecvMsgSize)), - grpc.WithUnaryInterceptor(grpcMetrics.UnaryClientInterceptor()), - grpc.WithStreamInterceptor(grpcMetrics.StreamClientInterceptor()), grpc.WithConnectParams(grpc.ConnectParams{ Backoff: backoff.Config{ BaseDelay: time.Second, @@ -269,7 +275,14 @@ func connect(ctx context.Context, credential *security.Credential, target string Timeout: 3 * time.Second, PermitWithoutStream: true, }), - ) + } + + if c.grpcMetrics != nil { + dialOptions = append(dialOptions, grpc.WithUnaryInterceptor(c.grpcMetrics.UnaryClientInterceptor())) + dialOptions = append(dialOptions, grpc.WithStreamInterceptor(c.grpcMetrics.StreamClientInterceptor())) + } + + return grpc.DialContext(ctx, target, dialOptions...) } const ( @@ -290,8 +303,6 @@ const ( rpcMetaFeatureStreamMultiplexing string = "stream-multiplexing" ) -var grpcMetrics = grpc_prometheus.NewClientMetrics() - func getContextFromFeatures(ctx context.Context, features []string) context.Context { return metadata.NewOutgoingContext( ctx, diff --git a/cdc/kv/sharedconn/conn_and_client_test.go b/cdc/kv/sharedconn/conn_and_client_test.go index b87589e7071..991ddbb61cc 100644 --- a/cdc/kv/sharedconn/conn_and_client_test.go +++ b/cdc/kv/sharedconn/conn_and_client_test.go @@ -47,7 +47,7 @@ func TestConnAndClientPool(t *testing.T) { require.NotNil(t, svc) defer svc.GracefulStop() - pool := newConnAndClientPool(&security.Credential{}, 2) + pool := newConnAndClientPool(&security.Credential{}, nil, 2) cc1, err := pool.Connect(context.Background(), addr) require.Nil(t, err) require.NotNil(t, cc1) @@ -95,7 +95,7 @@ func TestConnAndClientPoolForV2(t *testing.T) { require.NotNil(t, svc) defer svc.GracefulStop() - pool := newConnAndClientPool(&security.Credential{}, 2) + pool := newConnAndClientPool(&security.Credential{}, nil, 2) cc1, err := pool.Connect(context.Background(), addr) require.Nil(t, err) require.NotNil(t, cc1) @@ -106,11 +106,12 @@ func TestConnAndClientPoolForV2(t *testing.T) { } func TestConnectToUnavailable(t *testing.T) { + pool := newConnAndClientPool(&security.Credential{}, nil, 1) + targets := []string{"127.0.0.1:9999", "2.2.2.2:9999"} for _, target := range targets { ctx := context.Background() - - conn, err := connect(ctx, &security.Credential{}, target) + conn, err := pool.connect(ctx, target) require.NotNil(t, conn) require.Nil(t, err) @@ -136,7 +137,7 @@ func TestConnectToUnavailable(t *testing.T) { require.NotNil(t, svc) defer svc.GracefulStop() - conn, err := connect(context.Background(), &security.Credential{}, addr) + conn, err := pool.connect(context.Background(), addr) require.NotNil(t, conn) require.Nil(t, err) diff --git a/cdc/processor/sourcemanager/manager.go b/cdc/processor/sourcemanager/manager.go index 59a377366dd..3035076993f 100644 --- a/cdc/processor/sourcemanager/manager.go +++ b/cdc/processor/sourcemanager/manager.go @@ -200,7 +200,7 @@ func (m *SourceManager) GetTableSorterStats(span tablepb.Span) engine.TableStats func (m *SourceManager) Run(ctx context.Context, _ ...chan<- error) error { if m.multiplexing { serverConfig := config.GetGlobalServerConfig() - grpcPool := sharedconn.NewConnAndClientPool(m.up.SecurityConfig) + grpcPool := sharedconn.NewConnAndClientPool(m.up.SecurityConfig, kv.GetGlobalGrpcMetrics()) client := kv.NewSharedClient( m.changefeedID, serverConfig, m.bdrMode, m.up.PDClient, grpcPool, m.up.RegionCache, m.up.PDClock, diff --git a/cdc/puller/ddl_puller.go b/cdc/puller/ddl_puller.go index 4777e8645a6..041ebc5dcd4 100644 --- a/cdc/puller/ddl_puller.go +++ b/cdc/puller/ddl_puller.go @@ -597,7 +597,7 @@ func NewDDLJobPuller( rawDDLCh := make(chan *model.RawKVEntry, defaultPullerOutputChanSize) mp.sortedDDLCh = memorysorter.SortOutput(ctx, changefeed, rawDDLCh) - grpcPool := sharedconn.NewConnAndClientPool(up.SecurityConfig) + grpcPool := sharedconn.NewConnAndClientPool(up.SecurityConfig, kv.GetGlobalGrpcMetrics()) client := kv.NewSharedClient( changefeed, cfg, ddlPullerFilterLoop, From c988064e2305b3e4c6b0c2bf6e7d8851c33b9712 Mon Sep 17 00:00:00 2001 From: Ling Jin <7138436+3AceShowHand@users.noreply.github.com> Date: Tue, 17 Oct 2023 23:09:29 -0500 Subject: [PATCH 05/15] codec(ticdc): add simple encoder and decoder interface declaration. (#9903) close pingcap/tiflow#9899 --- pkg/sink/codec/simple/decoder.go | 56 +++++++++++++++++++++++++++ pkg/sink/codec/simple/encoder.go | 66 ++++++++++++++++++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100644 pkg/sink/codec/simple/decoder.go create mode 100644 pkg/sink/codec/simple/encoder.go diff --git a/pkg/sink/codec/simple/decoder.go b/pkg/sink/codec/simple/decoder.go new file mode 100644 index 00000000000..ef8e4598bc0 --- /dev/null +++ b/pkg/sink/codec/simple/decoder.go @@ -0,0 +1,56 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package simple + +import ( + "github.com/pingcap/tiflow/cdc/model" +) + +type decoder struct{} + +// NewDecoder returns a new decoder +func NewDecoder() *decoder { + // TODO implement me + panic("implement me") +} + +// AddKeyValue add the received key and values to the decoder, +func (d *decoder) AddKeyValue(key, value []byte) error { + // TODO implement me + panic("implement me") +} + +// HasNext returns whether there is any event need to be consumed +func (d *decoder) HasNext() (model.MessageType, bool, error) { + // TODO implement me + panic("implement me") +} + +// NextResolvedEvent returns the next resolved event if exists +func (d *decoder) NextResolvedEvent() (uint64, error) { + // TODO implement me + panic("implement me") +} + +// NextRowChangedEvent returns the next row changed event if exists +func (d *decoder) NextRowChangedEvent() (*model.RowChangedEvent, error) { + // TODO implement me + panic("implement me") +} + +// NextDDLEvent returns the next DDL event if exists +func (d *decoder) NextDDLEvent() (*model.DDLEvent, error) { + // TODO implement me + panic("implement me") +} diff --git a/pkg/sink/codec/simple/encoder.go b/pkg/sink/codec/simple/encoder.go new file mode 100644 index 00000000000..babf9d8cf20 --- /dev/null +++ b/pkg/sink/codec/simple/encoder.go @@ -0,0 +1,66 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package simple + +import ( + "context" + + "github.com/pingcap/tiflow/cdc/model" + "github.com/pingcap/tiflow/pkg/sink/codec/common" +) + +//nolint:unused +type encoder struct{} + +type builder struct{} + +// NewBuilder returns a new builder +func NewBuilder() *builder { + // TODO implement me + panic("implement me") +} + +// AppendRowChangedEvent implement the RowEventEncoder interface +// +//nolint:unused +func (e *encoder) AppendRowChangedEvent( + ctx context.Context, s string, event *model.RowChangedEvent, callback func(), +) error { + // TODO implement me + panic("implement me") +} + +// Build implement the RowEventEncoder interface +// +//nolint:unused +func (e *encoder) Build() []*common.Message { + // TODO implement me + panic("implement me") +} + +// EncodeCheckpointEvent implement the DDLEventBatchEncoder interface +// +//nolint:unused +func (e *encoder) EncodeCheckpointEvent(ts uint64) (*common.Message, error) { + // TODO implement me + panic("implement me") +} + +// EncodeDDLEvent implement the DDLEventBatchEncoder interface +// +//nolint:unused +func (e *encoder) EncodeDDLEvent(event *model.DDLEvent) (*common.Message, error) { + // TODO implement me + panic("implement me") +} From 0a70e3a9ee13f5cc26c8a39a6cc24ffe57878d7b Mon Sep 17 00:00:00 2001 From: Ling Jin <7138436+3AceShowHand@users.noreply.github.com> Date: Tue, 17 Oct 2023 23:09:35 -0500 Subject: [PATCH 06/15] changefeed(ticdc): check changefeed info is nil to prevent panic (#9917) close pingcap/tiflow#9915 --- pkg/upstream/manager.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/upstream/manager.go b/pkg/upstream/manager.go index 4dcc89d4302..6cd90ef92e7 100644 --- a/pkg/upstream/manager.go +++ b/pkg/upstream/manager.go @@ -196,7 +196,9 @@ func (m *Manager) Tick(ctx context.Context, activeUpstreams := make(map[uint64]struct{}) for _, cf := range globalState.Changefeeds { - activeUpstreams[cf.Info.UpstreamID] = struct{}{} + if cf != nil && cf.Info != nil { + activeUpstreams[cf.Info.UpstreamID] = struct{}{} + } } m.mu.Lock() defer m.mu.Unlock() From 230ee2e4d02493a4ed8e8109612e5fbc1a9a9c92 Mon Sep 17 00:00:00 2001 From: Jianyuan Jiang Date: Wed, 18 Oct 2023 22:18:28 +0800 Subject: [PATCH 07/15] server(ticc): add feature flag for cdc v2 (#9914) close pingcap/tiflow#9913 --- cdc/server/server.go | 10 +- cdcv2/capture/capture.go | 254 +++++++++++++++++++++++++++++++++ pkg/cmd/server/server_test.go | 16 +++ pkg/config/cdc_v2.go | 58 ++++++++ pkg/config/cdc_v2_test.go | 40 ++++++ pkg/config/config_test_data.go | 8 +- pkg/config/debug.go | 6 + pkg/config/server_config.go | 1 + 8 files changed, 390 insertions(+), 3 deletions(-) create mode 100644 cdcv2/capture/capture.go create mode 100644 pkg/config/cdc_v2.go create mode 100644 pkg/config/cdc_v2_test.go diff --git a/cdc/server/server.go b/cdc/server/server.go index 3180ee0dc10..f57ab945a11 100644 --- a/cdc/server/server.go +++ b/cdc/server/server.go @@ -31,6 +31,7 @@ import ( "github.com/pingcap/tiflow/cdc/capture" "github.com/pingcap/tiflow/cdc/kv" "github.com/pingcap/tiflow/cdc/processor/sourcemanager/engine/factory" + capturev2 "github.com/pingcap/tiflow/cdcv2/capture" "github.com/pingcap/tiflow/pkg/config" cerror "github.com/pingcap/tiflow/pkg/errors" "github.com/pingcap/tiflow/pkg/etcd" @@ -197,8 +198,13 @@ func (s *server) prepare(ctx context.Context) error { s.createSortEngineFactory() s.setMemoryLimit() - s.capture = capture.NewCapture(s.pdEndpoints, cdcEtcdClient, - s.grpcService, s.sortEngineFactory, s.pdClient) + if conf.Debug.CDCV2.Enable { + s.capture = capturev2.NewCapture(s.pdEndpoints, cdcEtcdClient, + s.grpcService, s.sortEngineFactory, s.pdClient) + } else { + s.capture = capture.NewCapture(s.pdEndpoints, cdcEtcdClient, + s.grpcService, s.sortEngineFactory, s.pdClient) + } return nil } diff --git a/cdcv2/capture/capture.go b/cdcv2/capture/capture.go new file mode 100644 index 00000000000..a5bac0fdb98 --- /dev/null +++ b/cdcv2/capture/capture.go @@ -0,0 +1,254 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package capture + +import ( + "context" + "io" + "sync" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + "github.com/pingcap/tiflow/cdc/capture" + "github.com/pingcap/tiflow/cdc/controller" + "github.com/pingcap/tiflow/cdc/model" + "github.com/pingcap/tiflow/cdc/owner" + "github.com/pingcap/tiflow/cdc/processor/sourcemanager/engine/factory" + "github.com/pingcap/tiflow/pkg/config" + cerror "github.com/pingcap/tiflow/pkg/errors" + "github.com/pingcap/tiflow/pkg/etcd" + "github.com/pingcap/tiflow/pkg/p2p" + "github.com/pingcap/tiflow/pkg/upstream" + pd "github.com/tikv/pd/client" + "go.uber.org/zap" + "golang.org/x/time/rate" +) + +// NewCapture returns a new Capture instance +func NewCapture(pdEndpoints []string, + etcdClient etcd.CDCEtcdClient, + grpcService *p2p.ServerWrapper, + sortEngineMangerFactory *factory.SortEngineFactory, + pdClient pd.Client, +) capture.Capture { + return &captureImpl{ + config: config.GetGlobalServerConfig(), + liveness: model.LivenessCaptureAlive, + EtcdClient: etcdClient, + grpcService: grpcService, + cancel: func() {}, + pdEndpoints: pdEndpoints, + info: &model.CaptureInfo{}, + sortEngineFactory: sortEngineMangerFactory, + pdClient: pdClient, + } +} + +type captureImpl struct { + // captureMu is used to protect the capture info and processorManager. + captureMu sync.Mutex + info *model.CaptureInfo + liveness model.Liveness + config *config.ServerConfig + + pdClient pd.Client + pdEndpoints []string + ownerMu sync.Mutex + owner owner.Owner + controller controller.Controller + upstreamManager *upstream.Manager + + EtcdClient etcd.CDCEtcdClient + + sortEngineFactory *factory.SortEngineFactory + + // MessageServer is the receiver of the messages from the other nodes. + // It should be recreated each time the capture is restarted. + MessageServer *p2p.MessageServer + + // MessageRouter manages the clients to send messages to all peers. + MessageRouter p2p.MessageRouter + + // grpcService is a wrapper that can hold a MessageServer. + // The instance should last for the whole life of the server, + // regardless of server restarting. + // This design is to solve the problem that grpc-go cannot gracefully + // unregister a service. + grpcService *p2p.ServerWrapper + + cancel context.CancelFunc +} + +func (c *captureImpl) Run(ctx context.Context) error { + defer log.Info("the capture routine has exited") + // Limit the frequency of reset capture to avoid frequent recreating of resources + rl := rate.NewLimiter(0.05, 2) + for { + select { + case <-ctx.Done(): + return nil + default: + } + ctx, cancel := context.WithCancel(ctx) + c.cancel = cancel + err := rl.Wait(ctx) + if err != nil { + if errors.Cause(err) == context.Canceled { + return nil + } + return errors.Trace(err) + } + err = c.run(ctx) + // if capture suicided, reset the capture and run again. + // if the canceled error throw, there are two possible scenarios: + // 1. the internal context canceled, it means some error happened in + // the internal, and the routine is exited, we should restart + // the capture. + // 2. the parent context canceled, it means that the caller of + // the capture hope the capture to exit, and this loop will return + // in the above `select` block. + // if there are some **internal** context deadline exceeded (IO/network + // timeout), reset the capture and run again. + // + // TODO: make sure the internal cancel should return the real error + // instead of context.Canceled. + if cerror.ErrCaptureSuicide.Equal(err) || + context.Canceled == errors.Cause(err) || + context.DeadlineExceeded == errors.Cause(err) { + log.Info("capture recovered", zap.String("captureID", c.info.ID)) + continue + } + return errors.Trace(err) + } +} + +func (c *captureImpl) run(stdCtx context.Context) error { + err := c.reset(stdCtx) + if err != nil { + log.Error("reset capture failed", zap.Error(err)) + return errors.Trace(err) + } + if err != nil { + return errors.Trace(err) + } + // todo: run capture logic + return nil +} + +// reset the capture before run it. +func (c *captureImpl) reset(ctx context.Context) error { + return nil +} + +func (c *captureImpl) Close() { + defer c.cancel() + // Safety: Here we mainly want to stop the owner + // and ignore it if the owner does not exist or is not set. + o, _ := c.GetOwner() + if o != nil { + o.AsyncStop() + log.Info("owner closed", zap.String("captureID", c.info.ID)) + } + + c.captureMu.Lock() + defer c.captureMu.Unlock() + + c.grpcService.Reset(nil) + if c.MessageRouter != nil { + c.MessageRouter.Close() + c.MessageRouter = nil + } + log.Info("message router closed", zap.String("captureID", c.info.ID)) +} + +// Drain does nothing for now. +func (c *captureImpl) Drain() <-chan struct{} { + done := make(chan struct{}) + close(done) + return done +} + +func (c *captureImpl) Liveness() model.Liveness { + return c.liveness +} + +func (c *captureImpl) GetOwner() (owner.Owner, error) { + c.ownerMu.Lock() + defer c.ownerMu.Unlock() + return c.owner, nil +} + +func (c *captureImpl) GetController() (controller.Controller, error) { + c.ownerMu.Lock() + defer c.ownerMu.Unlock() + if c.owner == nil { + return nil, cerror.ErrNotOwner.GenWithStackByArgs() + } + return c.controller, nil +} + +func (c *captureImpl) GetControllerCaptureInfo(ctx context.Context) (*model.CaptureInfo, error) { + panic("implement me") +} + +func (c *captureImpl) IsController() bool { + c.captureMu.Lock() + defer c.captureMu.Unlock() + return c.controller != nil +} + +func (c *captureImpl) Info() (model.CaptureInfo, error) { + c.captureMu.Lock() + defer c.captureMu.Unlock() + // when c.reset has not been called yet, c.info is nil. + if c.info != nil { + return *c.info, nil + } + return model.CaptureInfo{}, cerror.ErrCaptureNotInitialized.GenWithStackByArgs() +} + +func (c *captureImpl) StatusProvider() owner.StatusProvider { + c.ownerMu.Lock() + defer c.ownerMu.Unlock() + if c.owner == nil { + return nil + } + panic("implement me") +} + +func (c *captureImpl) WriteDebugInfo(ctx context.Context, w io.Writer) { + panic("implement me") +} + +func (c *captureImpl) GetUpstreamManager() (*upstream.Manager, error) { + if c.upstreamManager == nil { + return nil, cerror.ErrUpstreamManagerNotReady + } + return c.upstreamManager, nil +} + +func (c *captureImpl) GetEtcdClient() etcd.CDCEtcdClient { + return c.EtcdClient +} + +func (c *captureImpl) IsReady() bool { + panic("implement me") +} + +func (c *captureImpl) GetUpstreamInfo(ctx context.Context, + id model.UpstreamID, + s string, +) (*model.UpstreamInfo, error) { + panic("implement me") +} diff --git a/pkg/cmd/server/server_test.go b/pkg/cmd/server/server_test.go index 7e67a43cf0c..64413bb4a9b 100644 --- a/pkg/cmd/server/server_test.go +++ b/pkg/cmd/server/server_test.go @@ -199,6 +199,10 @@ func TestParseCfg(t *testing.T) { CheckBalanceInterval: 60000000000, AddTableBatchSize: 50, }, + CDCV2: &config.CDCV2{ + Enable: false, + MetaStoreConfig: config.MetaStoreConfiguration{}, + }, }, ClusterID: "default", MaxMemoryPercentage: config.DisableMemoryLimit, @@ -338,6 +342,10 @@ check-balance-interval = "10s" CheckBalanceInterval: config.TomlDuration(10 * time.Second), AddTableBatchSize: 50, }, + CDCV2: &config.CDCV2{ + Enable: false, + MetaStoreConfig: config.MetaStoreConfiguration{}, + }, }, ClusterID: "default", MaxMemoryPercentage: config.DisableMemoryLimit, @@ -469,6 +477,10 @@ cert-allowed-cn = ["dd","ee"] CheckBalanceInterval: 60000000000, AddTableBatchSize: 50, }, + CDCV2: &config.CDCV2{ + Enable: false, + MetaStoreConfig: config.MetaStoreConfiguration{}, + }, }, ClusterID: "default", MaxMemoryPercentage: config.DisableMemoryLimit, @@ -527,5 +539,9 @@ unknown3 = 3 CheckBalanceInterval: 60000000000, AddTableBatchSize: 50, }, + CDCV2: &config.CDCV2{ + Enable: false, + MetaStoreConfig: config.MetaStoreConfiguration{}, + }, }, o.serverConfig.Debug) } diff --git a/pkg/config/cdc_v2.go b/pkg/config/cdc_v2.go new file mode 100644 index 00000000000..3d464928c45 --- /dev/null +++ b/pkg/config/cdc_v2.go @@ -0,0 +1,58 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "net/url" + + "github.com/pingcap/errors" +) + +// CDCV2 represents config for ticdc v2 +type CDCV2 struct { + // Enable represents if the cdc v2 is enabled or not + Enable bool `toml:"enable" json:"enable"` + // MetaStoreConfig represents config for new meta store configurations + MetaStoreConfig MetaStoreConfiguration `toml:"meta-store" json:"meta-store"` +} + +// MetaStoreConfiguration represents config for new meta store configurations +type MetaStoreConfiguration struct { + // URI is the address of the meta store. + // for example: "mysql://127.0.0.1:3306/test" + URI string `toml:"uri" json:"uri"` +} + +// ValidateAndAdjust validates the meta store configurations +func (c *CDCV2) ValidateAndAdjust() error { + if !c.Enable { + return nil + } + if c.MetaStoreConfig.URI == "" { + return errors.New("missing meta store uri configuration") + } + parsedURI, err := url.Parse(c.MetaStoreConfig.URI) + if err != nil { + return errors.Trace(err) + } + if !isSupportedScheme(parsedURI.Scheme) { + return errors.Errorf("the %s scheme is not supported by meta store", parsedURI.Scheme) + } + return nil +} + +// isSupportedScheme returns true if the scheme is compatible with MySQL. +func isSupportedScheme(scheme string) bool { + return scheme == "mysql" +} diff --git a/pkg/config/cdc_v2_test.go b/pkg/config/cdc_v2_test.go new file mode 100644 index 00000000000..be7726c5783 --- /dev/null +++ b/pkg/config/cdc_v2_test.go @@ -0,0 +1,40 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDefaultCDCV2Config(t *testing.T) { + defaultCDCV2 := defaultServerConfig.Debug.CDCV2 + require.NotNil(t, defaultCDCV2) + require.False(t, defaultCDCV2.Enable) +} + +func TestCDCV2ValidateAndAdjust(t *testing.T) { + cdcV2 := &CDCV2{ + Enable: false, + MetaStoreConfig: MetaStoreConfiguration{}, + } + require.Nil(t, cdcV2.ValidateAndAdjust()) + cdcV2.Enable = true + require.NotNil(t, cdcV2.ValidateAndAdjust()) + cdcV2.MetaStoreConfig.URI = "http://127.0.0.1" + require.NotNil(t, cdcV2.ValidateAndAdjust()) + cdcV2.MetaStoreConfig.URI = "mysql://127.0.0.1" + require.Nil(t, cdcV2.ValidateAndAdjust()) +} diff --git a/pkg/config/config_test_data.go b/pkg/config/config_test_data.go index 35caa03f18d..4c56fa54461 100644 --- a/pkg/config/config_test_data.go +++ b/pkg/config/config_test_data.go @@ -157,7 +157,13 @@ const ( "check-balance-interval": 60000000000, "add-table-batch-size": 50 }, - "enable-kv-connect-backoff": false + "enable-kv-connect-backoff": false, + "cdc-v2": { + "enable": false, + "meta-store": { + "uri": "" + } + } }, "cluster-id": "default", "max-memory-percentage": 0, diff --git a/pkg/config/debug.go b/pkg/config/debug.go index 3a7815090bc..d5c24cd10d2 100644 --- a/pkg/config/debug.go +++ b/pkg/config/debug.go @@ -28,6 +28,9 @@ type DebugConfig struct { // EnableKVConnectBackOff enables the backoff for kv connect. EnableKVConnectBackOff bool `toml:"enable-kv-connect-backoff" json:"enable-kv-connect-backoff"` + + // CDCV2 enables ticdc version 2 implementation with new metastore + CDCV2 *CDCV2 `toml:"cdc-v2" json:"cdc-v2"` } // ValidateAndAdjust validates and adjusts the debug configuration @@ -41,6 +44,9 @@ func (c *DebugConfig) ValidateAndAdjust() error { if err := c.Scheduler.ValidateAndAdjust(); err != nil { return errors.Trace(err) } + if err := c.CDCV2.ValidateAndAdjust(); err != nil { + return errors.Trace(err) + } return nil } diff --git a/pkg/config/server_config.go b/pkg/config/server_config.go index eda9f95c8e5..bac3c1384be 100644 --- a/pkg/config/server_config.go +++ b/pkg/config/server_config.go @@ -134,6 +134,7 @@ var defaultServerConfig = &ServerConfig{ Scheduler: NewDefaultSchedulerConfig(), EnableKVConnectBackOff: false, + CDCV2: &CDCV2{Enable: false}, }, ClusterID: "default", GcTunerMemoryThreshold: DisableMemoryLimit, From 1060be2fcdd356d0735b93e3f95295f128232f8d Mon Sep 17 00:00:00 2001 From: Jianyuan Jiang Date: Wed, 18 Oct 2023 22:18:35 +0800 Subject: [PATCH 08/15] owner(ticdc): refine status provider interface (#9922) close pingcap/tiflow#9921 --- cdc/owner/mock/status_provider_mock.go | 30 ------------ cdc/owner/owner.go | 36 +++++++-------- cdc/owner/owner_test.go | 14 ++++-- cdc/owner/status_provider.go | 64 ++++++++------------------ 4 files changed, 48 insertions(+), 96 deletions(-) diff --git a/cdc/owner/mock/status_provider_mock.go b/cdc/owner/mock/status_provider_mock.go index 22442877668..2cc69039b23 100644 --- a/cdc/owner/mock/status_provider_mock.go +++ b/cdc/owner/mock/status_provider_mock.go @@ -35,36 +35,6 @@ func (m *MockStatusProvider) EXPECT() *MockStatusProviderMockRecorder { return m.recorder } -// GetAllChangeFeedInfo mocks base method. -func (m *MockStatusProvider) GetAllChangeFeedInfo(ctx context.Context) (map[model.ChangeFeedID]*model.ChangeFeedInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAllChangeFeedInfo", ctx) - ret0, _ := ret[0].(map[model.ChangeFeedID]*model.ChangeFeedInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAllChangeFeedInfo indicates an expected call of GetAllChangeFeedInfo. -func (mr *MockStatusProviderMockRecorder) GetAllChangeFeedInfo(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllChangeFeedInfo", reflect.TypeOf((*MockStatusProvider)(nil).GetAllChangeFeedInfo), ctx) -} - -// GetAllChangeFeedStatuses mocks base method. -func (m *MockStatusProvider) GetAllChangeFeedStatuses(ctx context.Context) (map[model.ChangeFeedID]*model.ChangeFeedStatusForAPI, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAllChangeFeedStatuses", ctx) - ret0, _ := ret[0].(map[model.ChangeFeedID]*model.ChangeFeedStatusForAPI) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAllChangeFeedStatuses indicates an expected call of GetAllChangeFeedStatuses. -func (mr *MockStatusProviderMockRecorder) GetAllChangeFeedStatuses(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllChangeFeedStatuses", reflect.TypeOf((*MockStatusProvider)(nil).GetAllChangeFeedStatuses), ctx) -} - // GetAllTaskStatuses mocks base method. func (m *MockStatusProvider) GetAllTaskStatuses(ctx context.Context, changefeedID model.ChangeFeedID) (map[model.CaptureID]*model.TaskStatus, error) { m.ctrl.T.Helper() diff --git a/cdc/owner/owner.go b/cdc/owner/owner.go index de05c6f0aa1..a7d03f1345d 100644 --- a/cdc/owner/owner.go +++ b/cdc/owner/owner.go @@ -585,31 +585,31 @@ func (o *ownerImpl) handleJobs(ctx context.Context) { func (o *ownerImpl) handleQueries(query *Query) error { switch query.Tp { - case QueryAllChangeFeedStatuses: - ret := map[model.ChangeFeedID]*model.ChangeFeedStatusForAPI{} - for cfID, cfReactor := range o.changefeeds { - ret[cfID] = &model.ChangeFeedStatusForAPI{} - if cfReactor.latestStatus == nil { - continue - } - ret[cfID].ResolvedTs = cfReactor.resolvedTs - ret[cfID].CheckpointTs = cfReactor.latestStatus.CheckpointTs + case QueryChangeFeedStatuses: + cfReactor, ok := o.changefeeds[query.ChangeFeedID] + if !ok { + query.Data = nil + return nil } + ret := &model.ChangeFeedStatusForAPI{} + ret.ResolvedTs = cfReactor.resolvedTs + ret.CheckpointTs = cfReactor.latestStatus.CheckpointTs query.Data = ret - case QueryAllChangeFeedInfo: - ret := map[model.ChangeFeedID]*model.ChangeFeedInfo{} - for cfID, cfReactor := range o.changefeeds { - if cfReactor.latestInfo == nil { - ret[cfID] = &model.ChangeFeedInfo{} - continue - } + case QueryChangefeedInfo: + cfReactor, ok := o.changefeeds[query.ChangeFeedID] + if !ok { + query.Data = nil + return nil + } + if cfReactor.latestInfo == nil { + query.Data = &model.ChangeFeedInfo{} + } else { var err error - ret[cfID], err = cfReactor.latestInfo.Clone() + query.Data, err = cfReactor.latestInfo.Clone() if err != nil { return errors.Trace(err) } } - query.Data = ret case QueryAllTaskStatuses: cfReactor, ok := o.changefeeds[query.ChangeFeedID] if !ok { diff --git a/cdc/owner/owner_test.go b/cdc/owner/owner_test.go index 9f9dcd1f198..0fdae64e1ac 100644 --- a/cdc/owner/owner_test.go +++ b/cdc/owner/owner_test.go @@ -425,11 +425,20 @@ func TestHandleJobsDontBlock(t *testing.T) { ctx1, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - var errIn error var infos map[model.ChangeFeedID]*model.ChangeFeedInfo done := make(chan struct{}) go func() { - infos, errIn = statusProvider.GetAllChangeFeedInfo(ctx1) + info1, err := statusProvider.GetChangeFeedInfo(ctx1, cf1) + require.Nil(t, err) + info2, err := statusProvider.GetChangeFeedInfo(ctx1, cf2) + require.Nil(t, err) + info3, err := statusProvider.GetChangeFeedInfo(ctx1, cf3) + require.Nil(t, err) + infos = map[model.ChangeFeedID]*model.ChangeFeedInfo{ + cf1: info1, + cf2: info2, + cf3: info3, + } done <- struct{}{} }() @@ -447,7 +456,6 @@ WorkLoop: require.Nil(t, err) } } - require.Nil(t, errIn) require.NotNil(t, infos[cf1]) require.NotNil(t, infos[cf2]) require.NotNil(t, infos[cf3]) diff --git a/cdc/owner/status_provider.go b/cdc/owner/status_provider.go index 9cfdff04ff5..f1b6a057ddd 100644 --- a/cdc/owner/status_provider.go +++ b/cdc/owner/status_provider.go @@ -24,15 +24,9 @@ import ( // StatusProvider provide some func to get meta-information from owner // The interface is thread-safe. type StatusProvider interface { - // GetAllChangeFeedStatuses returns all changefeeds' runtime status. - GetAllChangeFeedStatuses(ctx context.Context) (map[model.ChangeFeedID]*model.ChangeFeedStatusForAPI, error) - // GetChangeFeedStatus returns a changefeeds' runtime status. GetChangeFeedStatus(ctx context.Context, changefeedID model.ChangeFeedID) (*model.ChangeFeedStatusForAPI, error) - // GetAllChangeFeedInfo returns all changefeeds' info. - GetAllChangeFeedInfo(ctx context.Context) (map[model.ChangeFeedID]*model.ChangeFeedInfo, error) - // GetChangeFeedInfo returns a changefeeds' info. GetChangeFeedInfo(ctx context.Context, changefeedID model.ChangeFeedID) (*model.ChangeFeedInfo, error) @@ -55,12 +49,8 @@ type StatusProvider interface { type QueryType int32 const ( - // QueryAllChangeFeedStatuses query all changefeed status. - QueryAllChangeFeedStatuses QueryType = iota - // QueryAllChangeFeedInfo is the type of query all changefeed info. - QueryAllChangeFeedInfo // QueryAllTaskStatuses is the type of query all task statuses. - QueryAllTaskStatuses + QueryAllTaskStatuses QueryType = iota // QueryProcessors is the type of query processors. QueryProcessors // QueryCaptures is the type of query captures info. @@ -68,7 +58,11 @@ const ( // QueryHealth is the type of query cluster health info. QueryHealth // QueryOwner is the type of query changefeed owner - QueryOwner = 6 + QueryOwner + // QueryChangefeedInfo is the type of query changefeed info + QueryChangefeedInfo + // QueryChangeFeedStatuses is the type of query changefeed status + QueryChangeFeedStatuses ) // Query wraps query command and return results. @@ -88,56 +82,36 @@ type ownerStatusProvider struct { owner Owner } -func (p *ownerStatusProvider) GetAllChangeFeedStatuses(ctx context.Context) ( - map[model.ChangeFeedID]*model.ChangeFeedStatusForAPI, error, -) { - query := &Query{ - Tp: QueryAllChangeFeedStatuses, - } - if err := p.sendQueryToOwner(ctx, query); err != nil { - return nil, errors.Trace(err) - } - return query.Data.(map[model.ChangeFeedID]*model.ChangeFeedStatusForAPI), nil -} - func (p *ownerStatusProvider) GetChangeFeedStatus(ctx context.Context, changefeedID model.ChangeFeedID, ) (*model.ChangeFeedStatusForAPI, error) { - statuses, err := p.GetAllChangeFeedStatuses(ctx) - if err != nil { - return nil, errors.Trace(err) - } - status, exist := statuses[changefeedID] - if !exist { - return nil, cerror.ErrChangeFeedNotExists.GenWithStackByArgs(changefeedID) - } - return status, nil -} - -func (p *ownerStatusProvider) GetAllChangeFeedInfo(ctx context.Context) ( - map[model.ChangeFeedID]*model.ChangeFeedInfo, error, -) { query := &Query{ - Tp: QueryAllChangeFeedInfo, + Tp: QueryChangeFeedStatuses, + ChangeFeedID: changefeedID, } if err := p.sendQueryToOwner(ctx, query); err != nil { return nil, errors.Trace(err) } - return query.Data.(map[model.ChangeFeedID]*model.ChangeFeedInfo), nil + if query.Data == nil { + return nil, cerror.ErrChangeFeedNotExists.GenWithStackByArgs(changefeedID) + } + return query.Data.(*model.ChangeFeedStatusForAPI), nil } func (p *ownerStatusProvider) GetChangeFeedInfo(ctx context.Context, changefeedID model.ChangeFeedID, ) (*model.ChangeFeedInfo, error) { - infos, err := p.GetAllChangeFeedInfo(ctx) - if err != nil { + query := &Query{ + Tp: QueryChangefeedInfo, + ChangeFeedID: changefeedID, + } + if err := p.sendQueryToOwner(ctx, query); err != nil { return nil, errors.Trace(err) } - info, exist := infos[changefeedID] - if !exist { + if query.Data == nil { return nil, cerror.ErrChangeFeedNotExists.GenWithStackByArgs(changefeedID) } - return info, nil + return query.Data.(*model.ChangeFeedInfo), nil } func (p *ownerStatusProvider) GetAllTaskStatuses(ctx context.Context, From 314c221420bccb687c396cc195ce8a48a63db312 Mon Sep 17 00:00:00 2001 From: Jianyuan Jiang Date: Thu, 19 Oct 2023 14:39:29 +0800 Subject: [PATCH 09/15] cdcv2(ticdc): init meta store and controller (#9929) close pingcap/tiflow#9928 --- cdcv2/capture/capture.go | 101 ++++++++++++++++++++++-- cdcv2/controller/controller.go | 140 +++++++++++++++++++++++++++++++++ pkg/config/cdc_v2.go | 75 ++++++++++++++++++ pkg/config/cdc_v2_test.go | 15 ++++ pkg/config/config_test_data.go | 5 +- 5 files changed, 330 insertions(+), 6 deletions(-) create mode 100644 cdcv2/controller/controller.go diff --git a/cdcv2/capture/capture.go b/cdcv2/capture/capture.go index a5bac0fdb98..2c38c075168 100644 --- a/cdcv2/capture/capture.go +++ b/cdcv2/capture/capture.go @@ -15,9 +15,11 @@ package capture import ( "context" + "database/sql" "io" "sync" + "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/tiflow/cdc/capture" @@ -25,13 +27,19 @@ import ( "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/cdc/owner" "github.com/pingcap/tiflow/cdc/processor/sourcemanager/engine/factory" + controllerv2 "github.com/pingcap/tiflow/cdcv2/controller" + "github.com/pingcap/tiflow/cdcv2/metadata" + msql "github.com/pingcap/tiflow/cdcv2/metadata/sql" "github.com/pingcap/tiflow/pkg/config" + cdcContext "github.com/pingcap/tiflow/pkg/context" cerror "github.com/pingcap/tiflow/pkg/errors" "github.com/pingcap/tiflow/pkg/etcd" "github.com/pingcap/tiflow/pkg/p2p" "github.com/pingcap/tiflow/pkg/upstream" + "github.com/pingcap/tiflow/pkg/version" pd "github.com/tikv/pd/client" "go.uber.org/zap" + "golang.org/x/sync/errgroup" "golang.org/x/time/rate" ) @@ -88,6 +96,10 @@ type captureImpl struct { grpcService *p2p.ServerWrapper cancel context.CancelFunc + + storage *sql.DB + captureObservation metadata.CaptureObservation + controllerObserver metadata.ControllerObservation } func (c *captureImpl) Run(ctx context.Context) error { @@ -139,15 +151,94 @@ func (c *captureImpl) run(stdCtx context.Context) error { log.Error("reset capture failed", zap.Error(err)) return errors.Trace(err) } - if err != nil { - return errors.Trace(err) - } - // todo: run capture logic - return nil + defer func() { + c.Close() + c.grpcService.Reset(nil) + }() + + g, stdCtx := errgroup.WithContext(stdCtx) + + ctx := cdcContext.NewContext(stdCtx, &cdcContext.GlobalVars{ + CaptureInfo: c.info, + EtcdClient: c.EtcdClient, + MessageServer: c.MessageServer, + MessageRouter: c.MessageRouter, + SortEngineFactory: c.sortEngineFactory, + }) + + g.Go(func() error { + return c.MessageServer.Run(ctx, c.MessageRouter.GetLocalChannel()) + }) + + g.Go(func() error { + return c.captureObservation.Run(ctx, + func(ctx context.Context, + controllerObserver metadata.ControllerObservation, + ) error { + c.controllerObserver = controllerObserver + ctrl := controllerv2.NewController( + c.upstreamManager, + c.info, controllerObserver, c.captureObservation) + c.controller = ctrl + return ctrl.Run(ctx) + }) + }) + return errors.Trace(g.Wait()) } // reset the capture before run it. func (c *captureImpl) reset(ctx context.Context) error { + c.captureMu.Lock() + defer c.captureMu.Unlock() + c.info = &model.CaptureInfo{ + ID: uuid.New().String(), + AdvertiseAddr: c.config.AdvertiseAddr, + Version: version.ReleaseVersion, + } + + if c.upstreamManager != nil { + c.upstreamManager.Close() + } + c.upstreamManager = upstream.NewManager(ctx, c.EtcdClient.GetGCServiceID()) + _, err := c.upstreamManager.AddDefaultUpstream(c.pdEndpoints, c.config.Security, c.pdClient) + if err != nil { + return errors.Trace(err) + } + + c.grpcService.Reset(nil) + + if c.MessageRouter != nil { + c.MessageRouter.Close() + c.MessageRouter = nil + } + messageServerConfig := c.config.Debug.Messages.ToMessageServerConfig() + c.MessageServer = p2p.NewMessageServer(c.info.ID, messageServerConfig) + c.grpcService.Reset(c.MessageServer) + + messageClientConfig := c.config.Debug.Messages.ToMessageClientConfig() + + // Puts the advertise-addr of the local node to the client config. + // This is for metrics purpose only, so that the receiver knows which + // node the connections are from. + advertiseAddr := c.config.AdvertiseAddr + messageClientConfig.AdvertisedAddr = advertiseAddr + + c.MessageRouter = p2p.NewMessageRouterWithLocalClient(c.info.ID, c.config.Security, messageClientConfig) + + dsnConfig, err := c.config.Debug.CDCV2.MetaStoreConfig.GenDSN() + if err != nil { + return errors.Trace(err) + } + c.storage, err = sql.Open("mysql", dsnConfig.FormatDSN()) + if err != nil { + return errors.Trace(err) + } + captureDB, err := msql.NewCaptureObservation(c.storage, c.info) + c.captureObservation = captureDB + if err != nil { + return errors.Trace(err) + } + log.Info("capture initialized", zap.Any("capture", c.info)) return nil } diff --git a/cdcv2/controller/controller.go b/cdcv2/controller/controller.go new file mode 100644 index 00000000000..6c3b5c10798 --- /dev/null +++ b/cdcv2/controller/controller.go @@ -0,0 +1,140 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +//nolint:unused +package controller + +import ( + "context" + "sync/atomic" + "time" + + "github.com/pingcap/log" + "github.com/pingcap/tiflow/cdc/controller" + "github.com/pingcap/tiflow/cdc/model" + "github.com/pingcap/tiflow/cdcv2/metadata" + cerror "github.com/pingcap/tiflow/pkg/errors" + "github.com/pingcap/tiflow/pkg/orchestrator" + "github.com/pingcap/tiflow/pkg/upstream" + "go.uber.org/zap" +) + +var _ controller.Controller = &controllerImpl{} + +type controllerImpl struct { + captureInfo *model.CaptureInfo + captures map[model.CaptureID]*model.CaptureInfo + upstreamManager *upstream.Manager + + lastTickTime time.Time + // bootstrapped specifies whether the controller has been initialized. + // This will only be done when the controller starts the first Tick. + // NOTICE: Do not use it in a method other than tick unexpectedly, + // as it is not a thread-safe value. + bootstrapped bool + closed int32 + + controllerObservation metadata.ControllerObservation + captureObservation metadata.CaptureObservation +} + +// NewController creates a new Controller +func NewController( + upstreamManager *upstream.Manager, + captureInfo *model.CaptureInfo, + controllerObservation metadata.ControllerObservation, + captureObservation metadata.CaptureObservation, +) *controllerImpl { + return &controllerImpl{ + upstreamManager: upstreamManager, + captures: map[model.CaptureID]*model.CaptureInfo{}, + lastTickTime: time.Now(), + captureInfo: captureInfo, + controllerObservation: controllerObservation, + captureObservation: captureObservation, + } +} + +func (o *controllerImpl) Run(stdCtx context.Context) error { + tick := time.Tick(time.Second * 5) + for { + select { + case <-stdCtx.Done(): + return stdCtx.Err() + case <-tick: + changefeeds, captures, err := o.controllerObservation.ScheduleSnapshot() + if err != nil { + log.Error("failed to get snapshot", zap.Error(err)) + } + log.Info("controller snapshot", + zap.Int("changefeeds", len(changefeeds)), + zap.Int("captures", len(captures))) + + // if closed, exit the etcd worker loop + if atomic.LoadInt32(&o.closed) != 0 { + return cerror.ErrReactorFinished.GenWithStackByArgs() + } + } + } +} + +func (o *controllerImpl) Tick(ctx context.Context, + state orchestrator.ReactorState, +) (nextState orchestrator.ReactorState, err error) { + // TODO implement me + panic("implement me") +} + +func (o *controllerImpl) AsyncStop() { + // TODO implement me + panic("implement me") +} + +func (o *controllerImpl) GetChangefeedOwnerCaptureInfo(id model.ChangeFeedID) *model.CaptureInfo { + // TODO implement me + panic("implement me") +} + +func (o *controllerImpl) GetAllChangeFeedInfo(ctx context.Context) (map[model.ChangeFeedID]*model.ChangeFeedInfo, error) { + // TODO implement me + panic("implement me") +} + +func (o *controllerImpl) GetAllChangeFeedCheckpointTs(ctx context.Context) (map[model.ChangeFeedID]uint64, error) { + // TODO implement me + panic("implement me") +} + +func (o *controllerImpl) GetCaptures(ctx context.Context) ([]*model.CaptureInfo, error) { + // TODO implement me + panic("implement me") +} + +func (o *controllerImpl) GetProcessors(ctx context.Context) ([]*model.ProcInfoSnap, error) { + // TODO implement me + panic("implement me") +} + +func (o *controllerImpl) IsChangefeedExists(ctx context.Context, id model.ChangeFeedID) (bool, error) { + // TODO implement me + panic("implement me") +} + +func (o *controllerImpl) CreateChangefeed( + ctx context.Context, + upstreamInfo *model.UpstreamInfo, + changefeedInfo *model.ChangeFeedInfo, +) error { + // TODO implement me + panic("implement me") +} diff --git a/pkg/config/cdc_v2.go b/pkg/config/cdc_v2.go index 3d464928c45..1869e45d4f0 100644 --- a/pkg/config/cdc_v2.go +++ b/pkg/config/cdc_v2.go @@ -14,9 +14,14 @@ package config import ( + "fmt" + "net" "net/url" + dmysql "github.com/go-sql-driver/mysql" "github.com/pingcap/errors" + cerror "github.com/pingcap/tiflow/pkg/errors" + "github.com/pingcap/tiflow/pkg/security" ) // CDCV2 represents config for ticdc v2 @@ -32,6 +37,10 @@ type MetaStoreConfiguration struct { // URI is the address of the meta store. // for example: "mysql://127.0.0.1:3306/test" URI string `toml:"uri" json:"uri"` + // SSLCA is the path of the CA certificate file. + SSLCa string `toml:"ssl-ca" json:"ssl-ca"` + SSLCert string `toml:"ssl-cert" json:"ssl-cert"` + SSLKey string `toml:"ssl-key" json:"ssl-key"` } // ValidateAndAdjust validates the meta store configurations @@ -52,6 +61,72 @@ func (c *CDCV2) ValidateAndAdjust() error { return nil } +// GenDSN generates a DSN from the given metastore config. +func (cfg *MetaStoreConfiguration) GenDSN() (*dmysql.Config, error) { + endpoint, err := url.Parse(cfg.URI) + if err != nil { + return nil, errors.Trace(err) + } + tls, err := cfg.getSSLParam() + if err != nil { + return nil, errors.Trace(err) + } + username := endpoint.User.Username() + if username == "" { + username = "root" + } + password, _ := endpoint.User.Password() + + hostName := endpoint.Hostname() + port := endpoint.Port() + if port == "" { + port = "3306" + } + + // This will handle the IPv6 address format. + var dsn *dmysql.Config + host := net.JoinHostPort(hostName, port) + // dsn format of the driver: + // [username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] + dsnStr := fmt.Sprintf("%s:%s@tcp(%s)%s%s", username, password, host, endpoint.Path, tls) + if dsn, err = dmysql.ParseDSN(dsnStr); err != nil { + return nil, errors.Trace(err) + } + + // create test db used for parameter detection + // Refer https://github.com/go-sql-driver/mysql#parameters + if dsn.Params == nil { + dsn.Params = make(map[string]string) + } + // enable parseTime for time.Time type + dsn.Params["parseTime"] = "true" + for key, pa := range endpoint.Query() { + dsn.Params[key] = pa[0] + } + return dsn, nil +} + +func (cfg *MetaStoreConfiguration) getSSLParam() (string, error) { + if len(cfg.SSLCa) == 0 || len(cfg.SSLCert) == 0 || len(cfg.SSLKey) == 0 { + return "", nil + } + credential := security.Credential{ + CAPath: cfg.SSLCa, + CertPath: cfg.SSLCert, + KeyPath: cfg.SSLKey, + } + tlsCfg, err := credential.ToTLSConfig() + if err != nil { + return "", errors.Trace(err) + } + name := "cdc_mysql_tls_meta_store" + err = dmysql.RegisterTLSConfig(name, tlsCfg) + if err != nil { + return "", cerror.ErrMySQLConnectionError.Wrap(err).GenWithStack("fail to open MySQL connection") + } + return "?tls=" + name, nil +} + // isSupportedScheme returns true if the scheme is compatible with MySQL. func isSupportedScheme(scheme string) bool { return scheme == "mysql" diff --git a/pkg/config/cdc_v2_test.go b/pkg/config/cdc_v2_test.go index be7726c5783..43e148cde18 100644 --- a/pkg/config/cdc_v2_test.go +++ b/pkg/config/cdc_v2_test.go @@ -38,3 +38,18 @@ func TestCDCV2ValidateAndAdjust(t *testing.T) { cdcV2.MetaStoreConfig.URI = "mysql://127.0.0.1" require.Nil(t, cdcV2.ValidateAndAdjust()) } + +func TestGenDSN(t *testing.T) { + storeConfig := &MetaStoreConfiguration{ + URI: "mysql://root:abcd@127.0.0.1:4000/cdc?a=c&timeout=1m", + } + dsn, err := storeConfig.GenDSN() + require.Nil(t, err) + require.Equal(t, "root", dsn.User) + require.Equal(t, "abcd", dsn.Passwd) + require.Equal(t, "127.0.0.1:4000", dsn.Addr) + require.Equal(t, "cdc", dsn.DBName) + require.Equal(t, "true", dsn.Params["parseTime"]) + require.Equal(t, "1m", dsn.Params["timeout"]) + require.Equal(t, "c", dsn.Params["a"]) +} diff --git a/pkg/config/config_test_data.go b/pkg/config/config_test_data.go index 4c56fa54461..18d803b30f6 100644 --- a/pkg/config/config_test_data.go +++ b/pkg/config/config_test_data.go @@ -161,7 +161,10 @@ const ( "cdc-v2": { "enable": false, "meta-store": { - "uri": "" + "uri": "", + "ssl-ca": "", + "ssl-cert": "", + "ssl-key": "" } } }, From 582c4df29a390bc69dfffece2e5e36ae8e993043 Mon Sep 17 00:00:00 2001 From: CharlesCheung <61726649+CharlesCheung96@users.noreply.github.com> Date: Thu, 19 Oct 2023 20:49:59 +0800 Subject: [PATCH 10/15] metadata(cdc): add unit tests (part 3) (#9843) ref pingcap/tiflow#9196 --- cdc/model/errors_test.go | 50 ++ cdcv2/metadata/model.go | 2 +- cdcv2/metadata/sql/client.go | 4 +- cdcv2/metadata/sql/client_orm.go | 26 +- cdcv2/metadata/sql/client_test.go | 788 +++++++++++++++++++++++++++++- cdcv2/metadata/sql/observation.go | 2 +- pkg/config/replica_config.go | 8 +- 7 files changed, 851 insertions(+), 29 deletions(-) diff --git a/cdc/model/errors_test.go b/cdc/model/errors_test.go index 74559dc1937..64bf21ccd31 100644 --- a/cdc/model/errors_test.go +++ b/cdc/model/errors_test.go @@ -15,12 +15,15 @@ package model import ( "testing" + "time" cerror "github.com/pingcap/tiflow/pkg/errors" "github.com/stretchr/testify/require" ) func TestIsChangefeedNotRetryError(t *testing.T) { + t.Parallel() + cases := []struct { err RunningError result bool @@ -55,3 +58,50 @@ func TestIsChangefeedNotRetryError(t *testing.T) { require.Equal(t, c.result, c.err.ShouldFailChangefeed()) } } + +func TestRunningErrorScan(t *testing.T) { + t.Parallel() + + timeNow := time.Now() + timeNowJSON, err := timeNow.MarshalJSON() + require.Nil(t, err) + + newTime := time.Time{} + err = newTime.UnmarshalJSON(timeNowJSON) + require.Nil(t, err) + // timeNow: 2023-10-13 16:48:08.345614 +0800 CST m=+0.027639459 + // newTime: 2023-10-13 16:48:08.345614 +0800 CST + require.NotEqual(t, timeNow, newTime) + + cases := []struct { + err RunningError + result string + }{ + { + RunningError{ + Time: timeNow, + Addr: "", + Code: string(cerror.ErrAPIGetPDClientFailed.RFCCode()), + Message: cerror.ErrAPIGetPDClientFailed.Error(), + }, + `{"time":` + string(timeNowJSON) + + `,"addr":"","code":"CDC:ErrAPIGetPDClientFailed","message":"` + + cerror.ErrAPIGetPDClientFailed.Error() + `"}`, + }, + } + + for _, c := range cases { + v, err := c.err.Value() + b, ok := v.([]byte) + require.True(t, ok) + require.Nil(t, err) + require.Equal(t, c.result, string(b)) + + var err2 RunningError + err = err2.Scan(b) + require.Nil(t, err) + require.Equal(t, c.err.Addr, err2.Addr) + require.Equal(t, c.err.Code, err2.Code) + require.Equal(t, c.err.Message, err2.Message) + } +} diff --git a/cdcv2/metadata/model.go b/cdcv2/metadata/model.go index 10c35cfb632..2a89f2d6491 100644 --- a/cdcv2/metadata/model.go +++ b/cdcv2/metadata/model.go @@ -165,7 +165,7 @@ func (s *SchedState) fromString(str string) error { // Value implements the driver.Valuer interface. func (s SchedState) Value() (driver.Value, error) { - return s.toString(), nil + return []byte(s.toString()), nil } // Scan implements the sql.Scanner interface. diff --git a/cdcv2/metadata/sql/client.go b/cdcv2/metadata/sql/client.go index 885f7bdd8a1..d4f27a7a4c5 100644 --- a/cdcv2/metadata/sql/client.go +++ b/cdcv2/metadata/sql/client.go @@ -171,8 +171,8 @@ type progressClient[T TxnContext] interface { createProgress(tx T, pr *ProgressDO) error deleteProgress(tx T, pr *ProgressDO) error updateProgress(tx T, pr *ProgressDO) error - queryProgresss(tx T) ([]*ProgressDO, error) - queryProgresssByUpdateAt(tx T, lastUpdateAt time.Time) ([]*ProgressDO, error) + queryProgresses(tx T) ([]*ProgressDO, error) + queryProgressesByUpdateAt(tx T, lastUpdateAt time.Time) ([]*ProgressDO, error) queryProgressByCaptureID(tx T, id string) (*ProgressDO, error) queryProgressByCaptureIDsWithLock(tx T, ids []string) ([]*ProgressDO, error) } diff --git a/cdcv2/metadata/sql/client_orm.go b/cdcv2/metadata/sql/client_orm.go index d8643c53242..13bc2dcb3e3 100644 --- a/cdcv2/metadata/sql/client_orm.go +++ b/cdcv2/metadata/sql/client_orm.go @@ -133,7 +133,7 @@ func (c *ormClient) queryUpstreamsByUpdateAt(tx *gorm.DB, lastUpdateAt time.Time //nolint:unused func (c *ormClient) queryUpstreamByID(tx *gorm.DB, id uint64) (*UpstreamDO, error) { up := &UpstreamDO{} - ret := tx.Where("id = ?", id).First(up) + ret := tx.Where("id = ?", id).Limit(1).Find(up) if err := handleSingleOpErr(ret, 1, "QueryUpstreamsByUpdateAt"); err != nil { return nil, errors.Trace(err) } @@ -224,7 +224,7 @@ func (c *ormClient) queryChangefeedInfosByUpdateAt(tx *gorm.DB, lastUpdateAt tim // nolint:unused func (c *ormClient) queryChangefeedInfosByUUIDs(tx *gorm.DB, uuids ...uint64) ([]*ChangefeedInfoDO, error) { infos := []*ChangefeedInfoDO{} - ret := tx.Where("uuid in (?)", uuids).Find(&infos) + ret := tx.Where("uuid IN (?)", uuids).Find(&infos) if err := handleSingleOpErr(ret, int64(len(uuids)), "QueryChangefeedInfosByUUIDs"); err != nil { // TODO: optimize the behavior when some uuids are not found. return infos, errors.Trace(err) @@ -237,7 +237,7 @@ func (c *ormClient) queryChangefeedInfosByUUIDs(tx *gorm.DB, uuids ...uint64) ([ //nolint:unused func (c *ormClient) queryChangefeedInfoByUUID(tx *gorm.DB, uuid uint64) (*ChangefeedInfoDO, error) { info := &ChangefeedInfoDO{} - ret := tx.Where("uuid = ?", uuid).First(info) + ret := tx.Where("uuid = ?", uuid).Limit(1).Find(info) // TODO(CharlesCheung): handle record not found error. if err := handleSingleOpErr(ret, 1, "QueryChangefeedInfoByUUID"); err != nil { @@ -317,7 +317,7 @@ func (c *ormClient) queryChangefeedStatesByUpdateAt(tx *gorm.DB, lastUpdateAt ti //nolint:unused func (c *ormClient) queryChangefeedStateByUUID(tx *gorm.DB, uuid uint64) (*ChangefeedStateDO, error) { state := &ChangefeedStateDO{} - ret := tx.Where("changefeed_uuid = ?", uuid).First(state) + ret := tx.Where("changefeed_uuid = ?", uuid).Limit(1).Find(state) if err := handleSingleOpErr(ret, 1, "QueryChangefeedStateByUUID"); err != nil { return nil, errors.Trace(err) } @@ -344,7 +344,7 @@ func (c *ormClient) queryChangefeedStateByUUIDWithLock(tx *gorm.DB, uuid uint64) Clauses(clause.Locking{ Strength: "SHARE", Table: clause.Table{Name: clause.CurrentTable}, - }).First(state) + }).Limit(1).Find(state) if err := handleSingleOpErr(ret, 1, "QueryChangefeedStateByUUIDWithLock"); err != nil { return nil, errors.Trace(err) } @@ -462,7 +462,7 @@ func (c *ormClient) querySchedulesByOwnerIDAndUpdateAt(tx *gorm.DB, captureID st //nolint:unused func (c *ormClient) queryScheduleByUUID(tx *gorm.DB, uuid uint64) (*ScheduleDO, error) { schedule := &ScheduleDO{} - ret := tx.Where("changefeed_uuid = ?", uuid).First(schedule) + ret := tx.Where("changefeed_uuid = ?", uuid).Limit(1).Find(schedule) if err := handleSingleOpErr(ret, 1, "QueryScheduleByUUID"); err != nil { return nil, errors.Trace(err) } @@ -513,25 +513,25 @@ func (c *ormClient) updateProgress(tx *gorm.DB, pr *ProgressDO) error { return nil } -// queryProgresss implements the progressClient interface. +// queryProgresses implements the progressClient interface. // //nolint:unused -func (c *ormClient) queryProgresss(tx *gorm.DB) ([]*ProgressDO, error) { +func (c *ormClient) queryProgresses(tx *gorm.DB) ([]*ProgressDO, error) { progresses := []*ProgressDO{} ret := tx.Find(&progresses) - if err := handleSingleOpErr(ret, -1, "QueryProgresss"); err != nil { + if err := handleSingleOpErr(ret, -1, "queryProgresses"); err != nil { return nil, errors.Trace(err) } return progresses, nil } -// queryProgresssByUpdateAt implements the progressClient interface. +// queryProgressesByUpdateAt implements the progressClient interface. // //nolint:unused -func (c *ormClient) queryProgresssByUpdateAt(tx *gorm.DB, lastUpdateAt time.Time) ([]*ProgressDO, error) { +func (c *ormClient) queryProgressesByUpdateAt(tx *gorm.DB, lastUpdateAt time.Time) ([]*ProgressDO, error) { progresses := []*ProgressDO{} ret := tx.Where("update_at > ?", lastUpdateAt).Find(&progresses) - if err := handleSingleOpErr(ret, -1, "QueryProgresssByUpdateAt"); err != nil { + if err := handleSingleOpErr(ret, -1, "queryProgressesByUpdateAt"); err != nil { return nil, errors.Trace(err) } return progresses, nil @@ -542,7 +542,7 @@ func (c *ormClient) queryProgresssByUpdateAt(tx *gorm.DB, lastUpdateAt time.Time //nolint:unused func (c *ormClient) queryProgressByCaptureID(tx *gorm.DB, id string) (*ProgressDO, error) { progress := &ProgressDO{} - ret := tx.Where("capture_id = ?", id).First(progress) + ret := tx.Where("capture_id = ?", id).Limit(1).Find(progress) if err := handleSingleOpErr(ret, 1, "QueryProgressByCaptureID"); err != nil { return nil, errors.Trace(err) } diff --git a/cdcv2/metadata/sql/client_test.go b/cdcv2/metadata/sql/client_test.go index 02fde5fd8f6..0812c1fe994 100644 --- a/cdcv2/metadata/sql/client_test.go +++ b/cdcv2/metadata/sql/client_test.go @@ -396,6 +396,44 @@ func TestProgressClientExecSQL(t *testing.T) { // ================================ Test Query ================================= +type queryType int32 + +const ( + queryTypePoint queryType = iota + queryTypeRange + queryTypeFullTable +) + +func runMockQueryTest( + _ *testing.T, mock sqlmock.Sqlmock, + expectedSQL string, args []driver.Value, + columns []string, rows []interface{}, + getValue func(interface{}) []driver.Value, + runQuery func(expectedRowsCnt int, expectedError error), + queryTpye queryType, +) { + // Test normal execution + returnRows := sqlmock.NewRows(columns) + for _, row := range rows { + returnRows.AddRow(getValue(row)...) + } + mock.ExpectQuery(expectedSQL).WithArgs(args...).WillReturnRows(returnRows) + runQuery(len(rows), nil) + + // Test return empty rows + mock.ExpectQuery(expectedSQL).WithArgs(args...).WillReturnRows(sqlmock.NewRows(columns)) + if queryTpye == queryTypePoint { + runQuery(0, errors.ErrMetaRowsAffectedNotMatch) + } else { + runQuery(0, nil) + } + + // Test return error + testErr := errors.New("test error") + mock.ExpectQuery(expectedSQL).WithArgs(args...).WillReturnError(testErr) + runQuery(0, testErr) +} + func TestUpstreamClientQuerySQL(t *testing.T) { t.Parallel() @@ -403,17 +441,745 @@ func TestUpstreamClientQuerySQL(t *testing.T) { defer backendDB.Close() client := NewORMClient("test-upstream-client-query", db) + rows := []*UpstreamDO{ + { + ID: 1, + Endpoints: strings.Join([]string{"endpoint1", "endpoint2"}, ","), + Config: nil, /* test nil */ + Version: 1, + UpdateAt: time.Now(), + }, + { + ID: 2, + Endpoints: strings.Join([]string{"endpoint3", "endpoint4"}, ","), + Config: &security.Credential{}, /* test empty */ + Version: 2, + UpdateAt: time.Now(), + }, + } + // Test queryUpstreams - expectedSQL := "SELECT * FROM `upstream`" - mock.ExpectQuery(expectedSQL).WillReturnRows( - sqlmock.NewRows([]string{"id", "endpoints", "config", "version", "update_at"}). - AddRow(1, []byte("endpoint1,endpoint2"), nil, 1, time.Now()), + expectedQueryUpstreams := rows + queryUpstreamsRows := []interface{}{expectedQueryUpstreams[0], expectedQueryUpstreams[1]} + runMockQueryTest(t, mock, + "SELECT * FROM `upstream`", nil, + []string{"id", "endpoints", "config", "version", "update_at"}, + queryUpstreamsRows, + func(r interface{}) []driver.Value { + row, ok := r.(*UpstreamDO) + require.True(t, ok) + return []driver.Value{row.ID, row.Endpoints, row.Config, row.Version, row.UpdateAt} + }, + func(expectedRowsCnt int, expectedError error) { + upstreams, err := client.queryUpstreams(db) + require.ErrorIs(t, err, expectedError) + require.Len(t, upstreams, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQueryUpstreams, upstreams) + } + }, + queryTypeFullTable, + ) + + // Test queryUpstreamsByUpdateAt + expectedQueryUpstreamsByUpdateAt := rows + queryUpstreamsByUpdateAtRows := []interface{}{expectedQueryUpstreamsByUpdateAt[0], expectedQueryUpstreamsByUpdateAt[1]} + queryAt := time.Now() + runMockQueryTest(t, mock, + "SELECT * FROM `upstream` WHERE update_at > ?", []driver.Value{queryAt}, + []string{"id", "endpoints", "config", "version", "update_at"}, + queryUpstreamsByUpdateAtRows, + func(r interface{}) []driver.Value { + row, ok := r.(*UpstreamDO) + require.True(t, ok) + return []driver.Value{row.ID, row.Endpoints, row.Config, row.Version, row.UpdateAt} + }, + func(expectedRowsCnt int, expectedError error) { + upstreams, err := client.queryUpstreamsByUpdateAt(db, queryAt) + require.ErrorIs(t, err, expectedError) + require.Len(t, upstreams, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQueryUpstreamsByUpdateAt, upstreams) + } + }, + queryTypeRange, + ) + + // Test queryUpstreamByID + for _, row := range rows { + expectedQueryUpstreamByID := row + queryUpstreamByIDRows := []interface{}{row} + runMockQueryTest(t, mock, + "SELECT * FROM `upstream` WHERE id = ? LIMIT 1", + []driver.Value{expectedQueryUpstreamByID.ID}, + []string{"id", "endpoints", "config", "version", "update_at"}, + queryUpstreamByIDRows, + func(r interface{}) []driver.Value { + row, ok := r.(*UpstreamDO) + require.True(t, ok) + return []driver.Value{row.ID, row.Endpoints, row.Config, row.Version, row.UpdateAt} + }, + func(expectedRowsCnt int, expectedError error) { + upstream, err := client.queryUpstreamByID(db, expectedQueryUpstreamByID.ID) + require.ErrorIs(t, err, expectedError) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQueryUpstreamByID, upstream) + } else { + require.Nil(t, upstream) + } + }, + queryTypePoint, + ) + } +} + +func TestChangefeedInfoClientQuerySQL(t *testing.T) { + t.Parallel() + + backendDB, db, mock := newMockDB(t) + defer backendDB.Close() + client := NewORMClient("test-changefeed-info-client-query", db) + + rows := []*ChangefeedInfoDO{ + { + ChangefeedInfo: metadata.ChangefeedInfo{ + ChangefeedIdent: metadata.ChangefeedIdent{ + UUID: 1, + Namespace: "namespace", + ID: "id", + }, + UpstreamID: 1, + SinkURI: "sinkURI", + StartTs: 1, + TargetTs: 1, + Config: nil, /* test nil */ + }, + RemovedAt: nil, /* test nil */ + Version: 1, + UpdateAt: time.Now(), + }, + { + ChangefeedInfo: metadata.ChangefeedInfo{ + ChangefeedIdent: metadata.ChangefeedIdent{ + UUID: 2, + Namespace: "namespace", + ID: "id", + }, + UpstreamID: 2, + SinkURI: "sinkURI", + StartTs: 2, + TargetTs: 2, + Config: &config.ReplicaConfig{}, /* test empty */ + }, + RemovedAt: &time.Time{}, /* test empty */ + Version: 2, + UpdateAt: time.Now(), + }, + } + + // Test queryChangefeedInfos + expectedQueryChangefeedInfos := rows + queryChangefeedInfosRows := []interface{}{expectedQueryChangefeedInfos[0], expectedQueryChangefeedInfos[1]} + runMockQueryTest(t, mock, + "SELECT * FROM `changefeed_info`", nil, + []string{ + "uuid", "namespace", "id", "upstream_id", "sink_uri", + "start_ts", "target_ts", "config", "removed_at", + "version", "update_at", + }, + queryChangefeedInfosRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ChangefeedInfoDO) + require.True(t, ok) + return []driver.Value{ + row.UUID, row.Namespace, row.ID, row.UpstreamID, row.SinkURI, + row.StartTs, row.TargetTs, row.Config, row.RemovedAt, + row.Version, row.UpdateAt, + } + }, + func(expectedRowsCnt int, expectedError error) { + changefeedInfos, err := client.queryChangefeedInfos(db) + require.ErrorIs(t, err, expectedError) + require.Len(t, changefeedInfos, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQueryChangefeedInfos, changefeedInfos) + } + }, + queryTypeFullTable, + ) + + // Test queryChangefeedInfosByUpdateAt + expectedQueryChangefeedInfosByUpdateAt := rows + queryChangefeedInfosByUpdateAtRows := []interface{}{ + expectedQueryChangefeedInfosByUpdateAt[0], + expectedQueryChangefeedInfosByUpdateAt[1], + } + queryAt := time.Now() + runMockQueryTest(t, mock, + "SELECT * FROM `changefeed_info` WHERE update_at > ?", []driver.Value{queryAt}, + []string{ + "uuid", "namespace", "id", "upstream_id", "sink_uri", + "start_ts", "target_ts", "config", "removed_at", + "version", "update_at", + }, + queryChangefeedInfosByUpdateAtRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ChangefeedInfoDO) + require.True(t, ok) + return []driver.Value{ + row.UUID, row.Namespace, row.ID, row.UpstreamID, row.SinkURI, + row.StartTs, row.TargetTs, row.Config, row.RemovedAt, + row.Version, row.UpdateAt, + } + }, + func(expectedRowsCnt int, expectedError error) { + changefeedInfos, err := client.queryChangefeedInfosByUpdateAt(db, queryAt) + require.ErrorIs(t, err, expectedError) + require.Len(t, changefeedInfos, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQueryChangefeedInfosByUpdateAt, changefeedInfos) + } + }, + queryTypeRange, + ) + + // Test queryChangefeedInfosByUUIDs + expectedQueryChangefeedInfosByUUIDs := rows + queryChangefeedInfosByUUIDsRows := []interface{}{ + expectedQueryChangefeedInfosByUUIDs[0], + expectedQueryChangefeedInfosByUUIDs[1], + } + runMockQueryTest(t, mock, + "SELECT * FROM `changefeed_info` WHERE uuid IN (?,?)", []driver.Value{1, 2}, + []string{ + "uuid", "namespace", "id", "upstream_id", "sink_uri", + "start_ts", "target_ts", "config", "removed_at", + "version", "update_at", + }, + queryChangefeedInfosByUUIDsRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ChangefeedInfoDO) + require.True(t, ok) + return []driver.Value{ + row.UUID, row.Namespace, row.ID, row.UpstreamID, row.SinkURI, + row.StartTs, row.TargetTs, row.Config, row.RemovedAt, + row.Version, row.UpdateAt, + } + }, + func(expectedRowsCnt int, expectedError error) { + changefeedInfos, err := client.queryChangefeedInfosByUUIDs(db, 1, 2) + require.ErrorIs(t, err, expectedError) + require.Len(t, changefeedInfos, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQueryChangefeedInfosByUUIDs, changefeedInfos) + } + }, + queryTypePoint, + ) + + // Test queryChangefeedInfoByUUID + for _, row := range rows { + expectedQueryChangefeedInfoByUUID := row + queryChangefeedInfoByUUIDRows := []interface{}{row} + runMockQueryTest(t, mock, + "SELECT * FROM `changefeed_info` WHERE uuid = ? LIMIT 1", + []driver.Value{expectedQueryChangefeedInfoByUUID.UUID}, + []string{ + "uuid", "namespace", "id", "upstream_id", "sink_uri", + "start_ts", "target_ts", "config", "removed_at", + "version", "update_at", + }, + queryChangefeedInfoByUUIDRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ChangefeedInfoDO) + require.True(t, ok) + return []driver.Value{ + row.UUID, row.Namespace, row.ID, row.UpstreamID, row.SinkURI, + row.StartTs, row.TargetTs, row.Config, row.RemovedAt, + row.Version, row.UpdateAt, + } + }, + func(expectedRowsCnt int, expectedError error) { + changefeedInfo, err := client.queryChangefeedInfoByUUID(db, expectedQueryChangefeedInfoByUUID.UUID) + require.ErrorIs(t, err, expectedError) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQueryChangefeedInfoByUUID, changefeedInfo) + } else { + require.Nil(t, changefeedInfo) + } + }, + queryTypePoint, + ) + } +} + +func TestChangefeedStateClientQuerySQL(t *testing.T) { + t.Parallel() + + backendDB, db, mock := newMockDB(t) + defer backendDB.Close() + client := NewORMClient("test-changefeed-state-client-query", db) + + rows := []*ChangefeedStateDO{ + { + ChangefeedState: metadata.ChangefeedState{ + ChangefeedUUID: 1, + State: "state", + // Note that warning and error could be nil. + Warning: nil, /* test nil */ + Error: &model.RunningError{}, /* test empty*/ + }, + Version: 1, + UpdateAt: time.Now(), + }, + { + ChangefeedState: metadata.ChangefeedState{ + ChangefeedUUID: 2, + State: "state", + Warning: &model.RunningError{ + // ref: TestRunningErrorScan + // Time: time.Now(), + Addr: "addr", + Code: "warn", + }, + Error: &model.RunningError{ + // Time: time.Now(), + Addr: "addr", + Code: "error", + }, + }, + Version: 2, + UpdateAt: time.Now(), + }, + } + + // Test queryChangefeedStates + expectedQueryChangefeedStates := rows + queryChangefeedStatesRows := []interface{}{expectedQueryChangefeedStates[0], expectedQueryChangefeedStates[1]} + runMockQueryTest(t, mock, + "SELECT * FROM `changefeed_state`", nil, + []string{"changefeed_uuid", "state", "warning", "error", "version", "update_at"}, + queryChangefeedStatesRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ChangefeedStateDO) + require.True(t, ok) + return []driver.Value{row.ChangefeedUUID, row.State, row.Warning, row.Error, row.Version, row.UpdateAt} + }, + func(expectedRowsCnt int, expectedError error) { + changefeedStates, err := client.queryChangefeedStates(db) + require.ErrorIs(t, err, expectedError) + require.Len(t, changefeedStates, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQueryChangefeedStates, changefeedStates) + } + }, + queryTypeFullTable, + ) + + // Test queryChangefeedStatesByUpdateAt + expectedQueryChangefeedStatesByUpdateAt := rows + queryChangefeedStatesByUpdateAtRows := []interface{}{ + expectedQueryChangefeedStatesByUpdateAt[0], + expectedQueryChangefeedStatesByUpdateAt[1], + } + queryAt := time.Now() + runMockQueryTest(t, mock, + "SELECT * FROM `changefeed_state` WHERE update_at > ?", []driver.Value{queryAt}, + []string{"changefeed_uuid", "state", "warning", "error", "version", "update_at"}, + queryChangefeedStatesByUpdateAtRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ChangefeedStateDO) + require.True(t, ok) + return []driver.Value{row.ChangefeedUUID, row.State, row.Warning, row.Error, row.Version, row.UpdateAt} + }, + func(expectedRowsCnt int, expectedError error) { + changefeedStates, err := client.queryChangefeedStatesByUpdateAt(db, queryAt) + require.ErrorIs(t, err, expectedError) + require.Len(t, changefeedStates, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQueryChangefeedStatesByUpdateAt, changefeedStates) + } + }, + queryTypeRange, + ) + + // Test queryChangefeedStateByUUID + for _, row := range rows { + expectedQueryChangefeedStateByUUID := row + queryChangefeedStateByUUIDRows := []interface{}{row} + runMockQueryTest(t, mock, + "SELECT * FROM `changefeed_state` WHERE changefeed_uuid = ? LIMIT 1", + []driver.Value{expectedQueryChangefeedStateByUUID.ChangefeedUUID}, + []string{"changefeed_uuid", "state", "warning", "error", "version", "update_at"}, + queryChangefeedStateByUUIDRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ChangefeedStateDO) + require.True(t, ok) + return []driver.Value{row.ChangefeedUUID, row.State, row.Warning, row.Error, row.Version, row.UpdateAt} + }, + func(expectedRowsCnt int, expectedError error) { + changefeedState, err := client.queryChangefeedStateByUUID(db, expectedQueryChangefeedStateByUUID.ChangefeedUUID) + require.ErrorIs(t, err, expectedError) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQueryChangefeedStateByUUID, changefeedState) + } else { + require.Nil(t, changefeedState) + } + }, + queryTypePoint, + ) + } + + // Test queryChangefeedStateByUUIDWithLock + for _, row := range rows { + expectedQueryChangefeedStateByUUIDWithLock := row + queryChangefeedStateByUUIDWithLockRows := []interface{}{row} + runMockQueryTest(t, mock, + "SELECT * FROM `changefeed_state` WHERE changefeed_uuid = ? LIMIT 1 LOCK IN SHARE MODE", + []driver.Value{expectedQueryChangefeedStateByUUIDWithLock.ChangefeedUUID}, + []string{"changefeed_uuid", "state", "warning", "error", "version", "update_at"}, + queryChangefeedStateByUUIDWithLockRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ChangefeedStateDO) + require.True(t, ok) + return []driver.Value{row.ChangefeedUUID, row.State, row.Warning, row.Error, row.Version, row.UpdateAt} + }, + func(expectedRowsCnt int, expectedError error) { + changefeedState, err := client.queryChangefeedStateByUUIDWithLock(db, expectedQueryChangefeedStateByUUIDWithLock.ChangefeedUUID) + require.ErrorIs(t, err, expectedError) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQueryChangefeedStateByUUIDWithLock, changefeedState) + } else { + require.Nil(t, changefeedState) + } + }, + queryTypePoint, + ) + } +} + +func TestScheduleClientQuerySQL(t *testing.T) { + t.Parallel() + + backendDB, db, mock := newMockDB(t) + defer backendDB.Close() + client := NewORMClient("test-schedule-client-query", db) + + ownerCapture := "test-schedule-client-query" + rows := []*ScheduleDO{ + { + ScheduledChangefeed: metadata.ScheduledChangefeed{ + ChangefeedUUID: 1, + Owner: nil, /* test nil */ + OwnerState: metadata.SchedRemoved, + Processors: nil, /* test nil */ + TaskPosition: metadata.ChangefeedProgress{ + CheckpointTs: 1, + }, + }, + Version: 1, + UpdateAt: time.Now(), + }, + { + ScheduledChangefeed: metadata.ScheduledChangefeed{ + ChangefeedUUID: 2, + Owner: &ownerCapture, + OwnerState: metadata.SchedRemoved, + Processors: &ownerCapture, + TaskPosition: metadata.ChangefeedProgress{ + CheckpointTs: 2, + }, + }, + Version: 2, + UpdateAt: time.Now(), + }, + } + + // Test querySchedules + expectedQuerySchedules := rows + querySchedulesRows := []interface{}{expectedQuerySchedules[0], expectedQuerySchedules[1]} + runMockQueryTest(t, mock, + "SELECT * FROM `schedule`", nil, + []string{ + "changefeed_uuid", "owner", "owner_state", "processors", "task_position", + "version", "update_at", + }, + querySchedulesRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ScheduleDO) + require.True(t, ok) + return []driver.Value{ + row.ChangefeedUUID, row.Owner, row.OwnerState, row.Processors, row.TaskPosition, + row.Version, row.UpdateAt, + } + }, + func(expectedRowsCnt int, expectedError error) { + schedules, err := client.querySchedules(db) + require.ErrorIs(t, err, expectedError) + require.Len(t, schedules, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQuerySchedules, schedules) + } + }, + queryTypeFullTable, + ) + + // Test querySchedulesByUpdateAt + expectedQuerySchedulesByUpdateAt := rows + querySchedulesByUpdateAtRows := []interface{}{ + expectedQuerySchedulesByUpdateAt[0], + expectedQuerySchedulesByUpdateAt[1], + } + queryAt := time.Now() + runMockQueryTest(t, mock, + "SELECT * FROM `schedule` WHERE update_at > ?", []driver.Value{queryAt}, + []string{ + "changefeed_uuid", "owner", "owner_state", "processors", "task_position", + "version", "update_at", + }, + querySchedulesByUpdateAtRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ScheduleDO) + require.True(t, ok) + return []driver.Value{ + row.ChangefeedUUID, row.Owner, row.OwnerState, row.Processors, row.TaskPosition, + row.Version, row.UpdateAt, + } + }, + func(expectedRowsCnt int, expectedError error) { + schedules, err := client.querySchedulesByUpdateAt(db, queryAt) + require.ErrorIs(t, err, expectedError) + require.Len(t, schedules, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQuerySchedulesByUpdateAt, schedules) + } + }, + queryTypeRange, + ) + + // Test querySchedulesByOwnerIDAndUpdateAt + expectedQuerySchedulesByOwnerIDAndUpdateAt := rows + querySchedulesByOwnerIDAndUpdateAtRows := []interface{}{ + expectedQuerySchedulesByOwnerIDAndUpdateAt[0], + expectedQuerySchedulesByOwnerIDAndUpdateAt[1], + } + queryAt = time.Now() + runMockQueryTest(t, mock, + "SELECT * FROM `schedule` WHERE owner = ? and update_at > ?", []driver.Value{ownerCapture, queryAt}, + []string{ + "changefeed_uuid", "owner", "owner_state", "processors", "task_position", + "version", "update_at", + }, + querySchedulesByOwnerIDAndUpdateAtRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ScheduleDO) + require.True(t, ok) + return []driver.Value{ + row.ChangefeedUUID, row.Owner, row.OwnerState, row.Processors, row.TaskPosition, + row.Version, row.UpdateAt, + } + }, + func(expectedRowsCnt int, expectedError error) { + schedules, err := client.querySchedulesByOwnerIDAndUpdateAt(db, ownerCapture, queryAt) + require.ErrorIs(t, err, expectedError) + require.Len(t, schedules, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQuerySchedulesByOwnerIDAndUpdateAt, schedules) + } + }, + queryTypeRange, + ) + + // Test queryScheduleByUUID + for _, row := range rows { + expectedQueryScheduleByUUID := row + queryScheduleByUUIDRows := []interface{}{row} + runMockQueryTest(t, mock, + "SELECT * FROM `schedule` WHERE changefeed_uuid = ? LIMIT 1", + []driver.Value{expectedQueryScheduleByUUID.ChangefeedUUID}, + []string{ + "changefeed_uuid", "owner", "owner_state", "processors", "task_position", + "version", "update_at", + }, + queryScheduleByUUIDRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ScheduleDO) + require.True(t, ok) + return []driver.Value{ + row.ChangefeedUUID, row.Owner, row.OwnerState, row.Processors, row.TaskPosition, + row.Version, row.UpdateAt, + } + }, + func(expectedRowsCnt int, expectedError error) { + schedule, err := client.queryScheduleByUUID(db, expectedQueryScheduleByUUID.ChangefeedUUID) + require.ErrorIs(t, err, expectedError) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQueryScheduleByUUID, schedule) + } else { + require.Nil(t, schedule) + } + }, + queryTypePoint, + ) + } + + // Test querySchedulesUinqueOwnerIDs + expectedQuerySchedulesUinqueOwnerIDs := []string{"owner1", "owner2"} + querySchedulesUinqueOwnerIDsRows := []interface{}{ + expectedQuerySchedulesUinqueOwnerIDs[0], + expectedQuerySchedulesUinqueOwnerIDs[1], + } + runMockQueryTest(t, mock, + "SELECT DISTINCT `owner` FROM `schedule` WHERE owner IS NOT NULL", nil, + []string{"owner"}, + querySchedulesUinqueOwnerIDsRows, + func(r interface{}) []driver.Value { + row, ok := r.(string) + require.True(t, ok) + return []driver.Value{row} + }, + func(expectedRowsCnt int, expectedError error) { + ownerIDs, err := client.querySchedulesUinqueOwnerIDs(db) + require.ErrorIs(t, err, expectedError) + require.Len(t, ownerIDs, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedQuerySchedulesUinqueOwnerIDs, ownerIDs) + } + }, + queryTypeFullTable, + ) +} + +func TestProgressClientQuerySQL(t *testing.T) { + t.Parallel() + + backendDB, db, mock := newMockDB(t) + defer backendDB.Close() + client := NewORMClient("test-progress-client-query", db) + + rows := []*ProgressDO{ + { + CaptureID: "captureID-1", + Progress: &metadata.CaptureProgress{ + 1: { + CheckpointTs: 1, + MinTableBarrierTs: 1, + }, + 2: { + CheckpointTs: 2, + MinTableBarrierTs: 2, + }, + }, + Version: 1, + UpdateAt: time.Now(), + }, + { + CaptureID: "captureID-2", + Progress: &metadata.CaptureProgress{}, + Version: 2, + UpdateAt: time.Now(), + }, + } + + // Test queryProgresses + expectedqueryProgresses := rows + queryProgressesRows := []interface{}{expectedqueryProgresses[0], expectedqueryProgresses[1]} + runMockQueryTest(t, mock, + "SELECT * FROM `progress`", nil, + []string{"capture_id", "progress", "version", "update_at"}, + queryProgressesRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ProgressDO) + require.True(t, ok) + return []driver.Value{row.CaptureID, row.Progress, row.Version, row.UpdateAt} + }, + func(expectedRowsCnt int, expectedError error) { + progresses, err := client.queryProgresses(db) + require.ErrorIs(t, err, expectedError) + require.Len(t, progresses, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedqueryProgresses, progresses) + } + }, + queryTypeFullTable, + ) + + // Test queryProgressesByUpdateAt + expectedqueryProgressesByUpdateAt := rows + queryProgressesByUpdateAtRows := []interface{}{ + expectedqueryProgressesByUpdateAt[0], + expectedqueryProgressesByUpdateAt[1], + } + queryAt := time.Now() + runMockQueryTest(t, mock, + "SELECT * FROM `progress` WHERE update_at > ?", []driver.Value{queryAt}, + []string{"capture_id", "progress", "version", "update_at"}, + queryProgressesByUpdateAtRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ProgressDO) + require.True(t, ok) + return []driver.Value{row.CaptureID, row.Progress, row.Version, row.UpdateAt} + }, + func(expectedRowsCnt int, expectedError error) { + progresses, err := client.queryProgressesByUpdateAt(db, queryAt) + require.ErrorIs(t, err, expectedError) + require.Len(t, progresses, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedqueryProgressesByUpdateAt, progresses) + } + }, + queryTypeRange, + ) + + // Test queryProgressByCaptureID + for _, row := range rows { + expectedqueryProgressByCaptureID := row + queryProgressByCaptureIDRows := []interface{}{row} + runMockQueryTest(t, mock, + "SELECT * FROM `progress` WHERE capture_id = ? LIMIT 1", + []driver.Value{expectedqueryProgressByCaptureID.CaptureID}, + []string{"capture_id", "progress", "version", "update_at"}, + queryProgressByCaptureIDRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ProgressDO) + require.True(t, ok) + return []driver.Value{row.CaptureID, row.Progress, row.Version, row.UpdateAt} + }, + func(expectedRowsCnt int, expectedError error) { + progress, err := client.queryProgressByCaptureID(db, expectedqueryProgressByCaptureID.CaptureID) + require.ErrorIs(t, err, expectedError) + if expectedRowsCnt != 0 { + require.Equal(t, expectedqueryProgressByCaptureID, progress) + } else { + require.Nil(t, progress) + } + }, + queryTypePoint, + ) + } + + // Test queryProgressByCaptureIDsWithLock + expectedqueryProgressByCaptureIDsWithLock := rows + queryProgressByCaptureIDsWithLockRows := []interface{}{rows[0], rows[1]} + captureIDs := []string{expectedqueryProgressByCaptureIDsWithLock[0].CaptureID, expectedqueryProgressByCaptureIDsWithLock[1].CaptureID} + runMockQueryTest(t, mock, + "SELECT * FROM `progress` WHERE capture_id in (?,?) LOCK IN SHARE MODE", + []driver.Value{expectedqueryProgressByCaptureIDsWithLock[0].CaptureID, expectedqueryProgressByCaptureIDsWithLock[1].CaptureID}, + []string{"capture_id", "progress", "version", "update_at"}, + queryProgressByCaptureIDsWithLockRows, + func(r interface{}) []driver.Value { + row, ok := r.(*ProgressDO) + require.True(t, ok) + return []driver.Value{row.CaptureID, row.Progress, row.Version, row.UpdateAt} + }, + func(expectedRowsCnt int, expectedError error) { + progress, err := client.queryProgressByCaptureIDsWithLock(db, captureIDs) + require.ErrorIs(t, err, expectedError) + require.Len(t, progress, expectedRowsCnt) + if expectedRowsCnt != 0 { + require.Equal(t, expectedqueryProgressByCaptureIDsWithLock, progress) + } + }, + queryTypeRange, ) - upstreams, err := client.queryUpstreams(db) - require.NoError(t, err) - require.Len(t, upstreams, 1) - require.Equal(t, uint64(1), upstreams[0].ID) - require.Equal(t, "endpoint1,endpoint2", upstreams[0].Endpoints) - require.Nil(t, upstreams[0].Config) - require.Equal(t, uint64(1), upstreams[0].Version) } diff --git a/cdcv2/metadata/sql/observation.go b/cdcv2/metadata/sql/observation.go index e1cdba99fb3..ac049976129 100644 --- a/cdcv2/metadata/sql/observation.go +++ b/cdcv2/metadata/sql/observation.go @@ -278,7 +278,7 @@ func (c *CaptureOb[T]) GetChangefeedProgress( var prDOs []*ProgressDO var scDOs []*ScheduleDO err = c.client.Txn(c.egCtx, func(tx T) error { - prDOs, err = c.client.queryProgresss(tx) + prDOs, err = c.client.queryProgresses(tx) if err != nil { return err } diff --git a/pkg/config/replica_config.go b/pkg/config/replica_config.go index 2c8f37f6185..dcf193c94a3 100644 --- a/pkg/config/replica_config.go +++ b/pkg/config/replica_config.go @@ -143,7 +143,13 @@ type replicaConfig struct { // Value implements the driver.Valuer interface func (c ReplicaConfig) Value() (driver.Value, error) { - return c.Marshal() + cfg, err := c.Marshal() + if err != nil { + return nil, err + } + + // TODO: refactor the meaningless type conversion. + return []byte(cfg), nil } // Scan implements the sql.Scanner interface From 747965d02ee1ae00778de34af41fbfed4b1cc8f7 Mon Sep 17 00:00:00 2001 From: Jianyuan Jiang Date: Fri, 20 Oct 2023 15:09:29 +0800 Subject: [PATCH 11/15] capture(ticdc): remove reactor from owner and controller interface (#9931) close pingcap/tiflow#9930 --- cdc/capture/capture.go | 4 ++-- cdc/controller/controller.go | 6 ++++-- cdc/controller/mock/controller_mock.go | 16 ---------------- cdc/owner/mock/owner_mock.go | 16 ---------------- cdc/owner/owner.go | 6 ++++-- 5 files changed, 10 insertions(+), 38 deletions(-) diff --git a/cdc/capture/capture.go b/cdc/capture/capture.go index 59230dd7674..5a58446ec67 100644 --- a/cdc/capture/capture.go +++ b/cdc/capture/capture.go @@ -517,12 +517,12 @@ func (c *captureImpl) campaignOwner(ctx cdcContext.Context) error { ctx, cancelOwner := context.WithCancel(ctx) ownerCtx := cdcContext.NewContext(ctx, newGlobalVars) g.Go(func() error { - return c.runEtcdWorker(ownerCtx, owner, + return c.runEtcdWorker(ownerCtx, owner.(orchestrator.Reactor), orchestrator.NewGlobalState(c.EtcdClient.GetClusterID(), c.config.CaptureSessionTTL), ownerFlushInterval, util.RoleOwner.String()) }) g.Go(func() error { - er := c.runEtcdWorker(ownerCtx, controller, + er := c.runEtcdWorker(ownerCtx, controller.(orchestrator.Reactor), globalState, // todo: do not use owner flush interval ownerFlushInterval, util.RoleController.String()) diff --git a/cdc/controller/controller.go b/cdc/controller/controller.go index 3ca900588cd..20bbf62f795 100644 --- a/cdc/controller/controller.go +++ b/cdc/controller/controller.go @@ -45,7 +45,6 @@ const versionInconsistentLogRate = 1 // Controller is a manager to schedule changefeeds type Controller interface { - orchestrator.Reactor AsyncStop() GetChangefeedOwnerCaptureInfo(id model.ChangeFeedID) *model.CaptureInfo GetAllChangeFeedInfo(ctx context.Context) ( @@ -63,7 +62,10 @@ type Controller interface { ) error } -var _ Controller = &controllerImpl{} +var ( + _ orchestrator.Reactor = &controllerImpl{} + _ Controller = &controllerImpl{} +) type controllerImpl struct { changefeeds map[model.ChangeFeedID]*orchestrator.ChangefeedReactorState diff --git a/cdc/controller/mock/controller_mock.go b/cdc/controller/mock/controller_mock.go index 91f53eb96e6..1d9b8a423ef 100644 --- a/cdc/controller/mock/controller_mock.go +++ b/cdc/controller/mock/controller_mock.go @@ -10,7 +10,6 @@ import ( gomock "github.com/golang/mock/gomock" model "github.com/pingcap/tiflow/cdc/model" - orchestrator "github.com/pingcap/tiflow/pkg/orchestrator" ) // MockController is a mock of Controller interface. @@ -150,18 +149,3 @@ func (mr *MockControllerMockRecorder) IsChangefeedExists(ctx, id interface{}) *g mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsChangefeedExists", reflect.TypeOf((*MockController)(nil).IsChangefeedExists), ctx, id) } - -// Tick mocks base method. -func (m *MockController) Tick(ctx context.Context, state orchestrator.ReactorState) (orchestrator.ReactorState, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Tick", ctx, state) - ret0, _ := ret[0].(orchestrator.ReactorState) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Tick indicates an expected call of Tick. -func (mr *MockControllerMockRecorder) Tick(ctx, state interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tick", reflect.TypeOf((*MockController)(nil).Tick), ctx, state) -} diff --git a/cdc/owner/mock/owner_mock.go b/cdc/owner/mock/owner_mock.go index 7094bdafce3..06f27cbe98b 100644 --- a/cdc/owner/mock/owner_mock.go +++ b/cdc/owner/mock/owner_mock.go @@ -13,7 +13,6 @@ import ( model "github.com/pingcap/tiflow/cdc/model" owner "github.com/pingcap/tiflow/cdc/owner" scheduler "github.com/pingcap/tiflow/cdc/scheduler" - orchestrator "github.com/pingcap/tiflow/pkg/orchestrator" ) // MockOwner is a mock of Owner interface. @@ -111,21 +110,6 @@ func (mr *MockOwnerMockRecorder) ScheduleTable(cfID, toCapture, tableID, done in return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ScheduleTable", reflect.TypeOf((*MockOwner)(nil).ScheduleTable), cfID, toCapture, tableID, done) } -// Tick mocks base method. -func (m *MockOwner) Tick(ctx context.Context, state orchestrator.ReactorState) (orchestrator.ReactorState, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Tick", ctx, state) - ret0, _ := ret[0].(orchestrator.ReactorState) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Tick indicates an expected call of Tick. -func (mr *MockOwnerMockRecorder) Tick(ctx, state interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tick", reflect.TypeOf((*MockOwner)(nil).Tick), ctx, state) -} - // UpdateChangefeed mocks base method. func (m *MockOwner) UpdateChangefeed(ctx context.Context, changeFeedInfo *model.ChangeFeedInfo) error { m.ctrl.T.Helper() diff --git a/cdc/owner/owner.go b/cdc/owner/owner.go index a7d03f1345d..966e26da00f 100644 --- a/cdc/owner/owner.go +++ b/cdc/owner/owner.go @@ -81,7 +81,6 @@ type ownerJob struct { // // The interface is thread-safe, except for Tick, it's only used by etcd worker. type Owner interface { - orchestrator.Reactor EnqueueJob(adminJob model.AdminJob, done chan<- error) RebalanceTables(cfID model.ChangeFeedID, done chan<- error) ScheduleTable( @@ -132,7 +131,10 @@ type ownerImpl struct { cfg *config.SchedulerConfig } -var _ Owner = &ownerImpl{} +var ( + _ orchestrator.Reactor = &ownerImpl{} + _ Owner = &ownerImpl{} +) // NewOwner creates a new Owner func NewOwner( From a71208a423aeaaaa984b0321695ad6a27a437357 Mon Sep 17 00:00:00 2001 From: qupeng Date: Fri, 20 Oct 2023 16:44:01 +0800 Subject: [PATCH 12/15] kv-client(cdc): log slowest regions and region holes (#9933) ref pingcap/tiflow#9222 --- cdc/kv/regionlock/region_range_lock.go | 11 +++--- cdc/kv/shared_client.go | 50 ++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 4 deletions(-) diff --git a/cdc/kv/regionlock/region_range_lock.go b/cdc/kv/regionlock/region_range_lock.go index 930ba3e960e..3c375f843df 100644 --- a/cdc/kv/regionlock/region_range_lock.go +++ b/cdc/kv/regionlock/region_range_lock.go @@ -487,8 +487,9 @@ func (l *RegionRangeLock) CollectLockedRangeAttrs( if action != nil { action(item.regionID, &item.state) } - - r.HoleExists = r.HoleExists || spanz.EndCompare(lastEnd, item.startKey) < 0 + if spanz.EndCompare(lastEnd, item.startKey) < 0 { + r.Holes = append(r.Holes, tablepb.Span{StartKey: lastEnd, EndKey: item.startKey}) + } ckpt := item.state.CheckpointTs.Load() if ckpt > r.FastestRegion.CheckpointTs { r.FastestRegion.RegionID = item.regionID @@ -505,13 +506,15 @@ func (l *RegionRangeLock) CollectLockedRangeAttrs( lastEnd = item.endKey return true }) - r.HoleExists = r.HoleExists || spanz.EndCompare(lastEnd, l.totalSpan.EndKey) < 0 + if spanz.EndCompare(lastEnd, l.totalSpan.EndKey) < 0 { + r.Holes = append(r.Holes, tablepb.Span{StartKey: lastEnd, EndKey: l.totalSpan.EndKey}) + } return } // CollectedLockedRangeAttrs returns by `RegionRangeLock.CollectedLockedRangeAttrs`. type CollectedLockedRangeAttrs struct { - HoleExists bool + Holes []tablepb.Span FastestRegion LockedRangeAttrs SlowestRegion LockedRangeAttrs } diff --git a/cdc/kv/shared_client.go b/cdc/kv/shared_client.go index d90f46ff242..ebc98bc95c9 100644 --- a/cdc/kv/shared_client.go +++ b/cdc/kv/shared_client.go @@ -16,6 +16,8 @@ package kv import ( "context" "encoding/binary" + "fmt" + "strings" "sync" "sync/atomic" "time" @@ -40,6 +42,7 @@ import ( "github.com/pingcap/tiflow/pkg/version" "github.com/prometheus/client_golang/prometheus" kvclientv2 "github.com/tikv/client-go/v2/kv" + "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/tikv" pd "github.com/tikv/pd/client" "go.uber.org/zap" @@ -246,6 +249,7 @@ func (s *SharedClient) Run(ctx context.Context) error { g.Go(func() error { return s.requestRegionToStore(ctx, g) }) g.Go(func() error { return s.handleErrors(ctx) }) g.Go(func() error { return s.resolveLock(ctx) }) + g.Go(func() error { return s.logSlowRegions(ctx) }) log.Info("event feed started", zap.String("namespace", s.changefeed.Namespace), @@ -689,6 +693,52 @@ func (s *SharedClient) resolveLock(ctx context.Context) error { } } +func (s *SharedClient) logSlowRegions(ctx context.Context) error { + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + + currTime := s.pdClock.CurrentTime() + s.totalSpans.RLock() + for subscriptionID, rt := range s.totalSpans.v { + attr := rt.rangeLock.CollectLockedRangeAttrs(nil) + if attr.SlowestRegion.Initialized { + ckptTime := oracle.GetTimeFromTS(attr.SlowestRegion.CheckpointTs) + if currTime.Sub(ckptTime) > 2*resolveLockMinInterval { + log.Info("event feed finds a slow region", + zap.String("namespace", s.changefeed.Namespace), + zap.String("changefeed", s.changefeed.ID), + zap.Any("subscriptionID", subscriptionID), + zap.Any("slowRegion", attr.SlowestRegion)) + } + } else if currTime.Sub(attr.SlowestRegion.Created) > 10*time.Minute { + log.Info("event feed initializes a region too slow", + zap.String("namespace", s.changefeed.Namespace), + zap.String("changefeed", s.changefeed.ID), + zap.Any("subscriptionID", subscriptionID), + zap.Any("slowRegion", attr.SlowestRegion)) + } + if len(attr.Holes) > 0 { + holes := make([]string, 0, len(attr.Holes)) + for _, hole := range attr.Holes { + holes = append(holes, fmt.Sprintf("[%s,%s)", hole.StartKey, hole.EndKey)) + } + log.Info("event feed holes exist", + zap.String("namespace", s.changefeed.Namespace), + zap.String("changefeed", s.changefeed.ID), + zap.Any("subscriptionID", subscriptionID), + zap.String("holes", strings.Join(holes, ", "))) + } + } + s.totalSpans.RUnlock() + } +} + func (s *SharedClient) newRequestedTable( subID SubscriptionID, span tablepb.Span, startTs uint64, eventCh chan<- MultiplexingEvent, From e4ac90bc2fa108e856ea056e2949420d7a7694fe Mon Sep 17 00:00:00 2001 From: Ling Jin <7138436+3AceShowHand@users.noreply.github.com> Date: Fri, 20 Oct 2023 04:57:30 -0500 Subject: [PATCH 13/15] mysql(ticdc): use string join to build multi statement sql. (#9936) close pingcap/tiflow#9935 --- cdc/sink/dmlsink/txn/mysql/mysql.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/cdc/sink/dmlsink/txn/mysql/mysql.go b/cdc/sink/dmlsink/txn/mysql/mysql.go index f0eb05709a2..447d1a00710 100644 --- a/cdc/sink/dmlsink/txn/mysql/mysql.go +++ b/cdc/sink/dmlsink/txn/mysql/mysql.go @@ -20,6 +20,7 @@ import ( "fmt" "math" "net/url" + "strings" "time" dmysql "github.com/go-sql-driver/mysql" @@ -651,20 +652,17 @@ func (s *mysqlBackend) prepareDMLs() *preparedDMLs { func (s *mysqlBackend) multiStmtExecute( ctx context.Context, dmls *preparedDMLs, tx *sql.Tx, writeTimeout time.Duration, ) error { - start := time.Now() - multiStmtSQL := "" - multiStmtArgs := []any{} - for i, query := range dmls.sqls { - multiStmtSQL += query - if i != len(dmls.sqls)-1 { - multiStmtSQL += ";" - } - multiStmtArgs = append(multiStmtArgs, dmls.values[i]...) + var multiStmtArgs []any + for _, value := range dmls.values { + multiStmtArgs = append(multiStmtArgs, value...) } + multiStmtSQL := strings.Join(dmls.sqls, ";") + log.Debug("exec row", zap.Int("workerID", s.workerID), zap.String("sql", multiStmtSQL), zap.Any("args", multiStmtArgs)) ctx, cancel := context.WithTimeout(ctx, writeTimeout) defer cancel() + start := time.Now() _, execError := tx.ExecContext(ctx, multiStmtSQL, multiStmtArgs...) if execError != nil { err := logDMLTxnErr( From 5ec0b15ee18010bf6ac3c261da98cbf752892fd3 Mon Sep 17 00:00:00 2001 From: feran-morgan-pingcap <127978765+feran-morgan-pingcap@users.noreply.github.com> Date: Tue, 24 Oct 2023 23:18:33 -0700 Subject: [PATCH 14/15] Removed String() call in favor of variable substitution. (#9829) close pingcap/tiflow#9676 --- dm/pkg/binlog/position.go | 13 +++++++++++-- dm/syncer/checkpoint.go | 2 +- dm/syncer/syncer.go | 2 +- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/dm/pkg/binlog/position.go b/dm/pkg/binlog/position.go index 21eae14c359..77ac1f990f3 100644 --- a/dm/pkg/binlog/position.go +++ b/dm/pkg/binlog/position.go @@ -365,9 +365,18 @@ func IsFreshPosition(location Location, flavor string, cmpGTID bool) bool { // -1, true if gSet1 is less than gSet2 // // but if can't compare gSet1 and gSet2, will returns 0, false. +var ( + emptyMySQLGTIDSet, _ = gmysql.ParseMysqlGTIDSet("") + emptyMariaDBGTIDSet, _ = gmysql.ParseMariadbGTIDSet("") +) + +func CheckGTIDSetEmpty(gSet gmysql.GTIDSet) bool { + return gSet == nil || gSet.Equal(emptyMySQLGTIDSet) || gSet.Equal(emptyMariaDBGTIDSet) +} + func CompareGTID(gSet1, gSet2 gmysql.GTIDSet) (int, bool) { - gSetIsEmpty1 := gSet1 == nil || len(gSet1.String()) == 0 - gSetIsEmpty2 := gSet2 == nil || len(gSet2.String()) == 0 + gSetIsEmpty1 := CheckGTIDSetEmpty(gSet1) + gSetIsEmpty2 := CheckGTIDSetEmpty(gSet2) switch { case gSetIsEmpty1 && gSetIsEmpty2: diff --git a/dm/syncer/checkpoint.go b/dm/syncer/checkpoint.go index 62ba79ec4bd..babaf9dd7c3 100644 --- a/dm/syncer/checkpoint.go +++ b/dm/syncer/checkpoint.go @@ -675,7 +675,7 @@ func (cp *RemoteCheckPoint) IsOlderThanTablePoint(table *filter.Table, location } oldLocation := point.MySQLLocation() // if we update enable-gtid = false to true, we need to compare binlog position instead of GTID before we save table point - cmpGTID := cp.cfg.EnableGTID && !(oldLocation.GTIDSetStr() == "" && binlog.ComparePosition(oldLocation.Position, binlog.MinPosition) > 0) + cmpGTID := cp.cfg.EnableGTID && !(binlog.CheckGTIDSetEmpty(oldLocation.GetGTID()) && binlog.ComparePosition(oldLocation.Position, binlog.MinPosition) > 0) cp.logCtx.L().Debug("compare table location whether is newer", zap.Stringer("location", location), zap.Stringer("old location", oldLocation), zap.Bool("cmpGTID", cmpGTID)) return binlog.CompareLocation(location, oldLocation, cmpGTID) <= 0 diff --git a/dm/syncer/syncer.go b/dm/syncer/syncer.go index a160c36210f..5e3e414053b 100644 --- a/dm/syncer/syncer.go +++ b/dm/syncer/syncer.go @@ -3497,7 +3497,7 @@ func (s *Syncer) adjustGlobalPointGTID(tctx *tcontext.Context) (bool, error) { // 2. location already has GTID position // 3. location is totally new, has no position info // 4. location is too early thus not a COMMIT location, which happens when it's reset by other logic - if !s.cfg.EnableGTID || location.GTIDSetStr() != "" || location.Position.Name == "" || location.Position.Pos == 4 { + if !s.cfg.EnableGTID || !binlog.CheckGTIDSetEmpty(location.GetGTID()) || location.Position.Name == "" || location.Position.Pos == 4 { return false, nil } // set enableGTID to false for new streamerController From 0c2904081451619ac1f65d3039583c2f336b736b Mon Sep 17 00:00:00 2001 From: CharlesCheung <61726649+CharlesCheung96@users.noreply.github.com> Date: Thu, 26 Oct 2023 16:06:34 +0800 Subject: [PATCH 15/15] scheduler(ticdc): revert 3b8d55 and do not return error when resolvedTs less than checkpoint (#9953) ref pingcap/tiflow#9830, ref pingcap/tiflow#9926 --- cdc/owner/changefeed.go | 6 +- cdc/processor/processor.go | 5 +- cdc/processor/sinkmanager/manager.go | 8 +- cdc/processor/sinkmanager/manager_test.go | 12 +- cdc/scheduler/internal/v3/agent/main_test.go | 24 ---- cdc/scheduler/internal/v3/agent/table.go | 4 +- cdc/scheduler/internal/v3/compat/main_test.go | 24 ---- cdc/scheduler/internal/v3/coordinator.go | 2 +- .../internal/v3/coordinator_bench_test.go | 2 +- .../internal/v3/keyspan/main_test.go | 24 ---- cdc/scheduler/internal/v3/member/main_test.go | 24 ---- .../internal/v3/replication/main_test.go | 24 ---- .../v3/replication/replication_manager.go | 19 ++- .../replication/replication_manager_test.go | 111 ++++-------------- .../v3/replication/replication_set.go | 41 +++---- .../v3/replication/replication_set_test.go | 53 ++++++--- .../internal/v3/scheduler/main_test.go | 24 ---- .../internal/v3/scheduler/scheduler.go | 2 +- .../v3/scheduler/scheduler_balance.go | 2 +- .../v3/scheduler/scheduler_balance_test.go | 15 +-- .../internal/v3/scheduler/scheduler_basic.go | 12 +- .../v3/scheduler/scheduler_basic_test.go | 25 ++-- .../v3/scheduler/scheduler_drain_capture.go | 2 +- .../scheduler/scheduler_drain_capture_test.go | 32 ++--- .../v3/scheduler/scheduler_manager.go | 18 +-- .../v3/scheduler/scheduler_manager_test.go | 11 +- .../v3/scheduler/scheduler_move_table.go | 2 +- .../v3/scheduler/scheduler_move_table_test.go | 18 +-- .../v3/scheduler/scheduler_rebalance.go | 2 +- .../v3/scheduler/scheduler_rebalance_test.go | 18 +-- .../internal/v3/transport/main_test.go | 24 ---- 31 files changed, 170 insertions(+), 420 deletions(-) delete mode 100644 cdc/scheduler/internal/v3/agent/main_test.go delete mode 100644 cdc/scheduler/internal/v3/compat/main_test.go delete mode 100644 cdc/scheduler/internal/v3/keyspan/main_test.go delete mode 100644 cdc/scheduler/internal/v3/member/main_test.go delete mode 100644 cdc/scheduler/internal/v3/replication/main_test.go delete mode 100644 cdc/scheduler/internal/v3/scheduler/main_test.go delete mode 100644 cdc/scheduler/internal/v3/transport/main_test.go diff --git a/cdc/owner/changefeed.go b/cdc/owner/changefeed.go index 2370380ebdb..d7e5e18af1a 100644 --- a/cdc/owner/changefeed.go +++ b/cdc/owner/changefeed.go @@ -481,7 +481,9 @@ LOOP2: } checkpointTs := c.latestStatus.CheckpointTs - c.resolvedTs = checkpointTs + if c.resolvedTs == 0 { + c.resolvedTs = checkpointTs + } minTableBarrierTs := c.latestStatus.MinTableBarrierTs failpoint.Inject("NewChangefeedNoRetryError", func() { @@ -630,7 +632,6 @@ LOOP2: return err } if c.redoMetaMgr.Enabled() { - c.resolvedTs = c.redoMetaMgr.GetFlushedMeta().ResolvedTs c.wg.Add(1) go func() { defer c.wg.Done() @@ -748,7 +749,6 @@ func (c *changefeed) releaseResources(ctx cdcContext.Context) { c.barriers = nil c.initialized = false c.isReleased = true - c.resolvedTs = 0 log.Info("changefeed closed", zap.String("namespace", c.id.Namespace), diff --git a/cdc/processor/processor.go b/cdc/processor/processor.go index 5a148240f80..3c279758881 100644 --- a/cdc/processor/processor.go +++ b/cdc/processor/processor.go @@ -166,13 +166,12 @@ func (p *processor) AddTableSpan( // table is `prepared`, and a `isPrepare = false` request indicate that old table should // be stopped on original capture already, it's safe to start replicating data now. if !isPrepare { - redoStartTs := checkpoint.ResolvedTs if p.redo.r.Enabled() { // ResolvedTs is store in external storage when redo log is enabled, so we need to // start table with ResolvedTs in redoDMLManager. - p.redo.r.StartTable(span, redoStartTs) + p.redo.r.StartTable(span, checkpoint.ResolvedTs) } - if err := p.sinkManager.r.StartTable(span, startTs, redoStartTs); err != nil { + if err := p.sinkManager.r.StartTable(span, startTs); err != nil { return false, errors.Trace(err) } } diff --git a/cdc/processor/sinkmanager/manager.go b/cdc/processor/sinkmanager/manager.go index 76cf1685479..74c5513888c 100644 --- a/cdc/processor/sinkmanager/manager.go +++ b/cdc/processor/sinkmanager/manager.go @@ -821,11 +821,7 @@ func (m *SinkManager) AddTable(span tablepb.Span, startTs model.Ts, targetTs mod } // StartTable sets the table(TableSink) state to replicating. -func (m *SinkManager) StartTable( - span tablepb.Span, - startTs model.Ts, - redoStartTs model.Ts, -) error { +func (m *SinkManager) StartTable(span tablepb.Span, startTs model.Ts) error { log.Info("Start table sink", zap.String("namespace", m.changefeedID.Namespace), zap.String("changefeed", m.changefeedID.ID), @@ -852,7 +848,7 @@ func (m *SinkManager) StartTable( if m.redoDMLMgr != nil { m.redoProgressHeap.push(&progress{ span: span, - nextLowerBoundPos: engine.Position{StartTs: 0, CommitTs: redoStartTs + 1}, + nextLowerBoundPos: engine.Position{StartTs: 0, CommitTs: startTs + 1}, version: tableSink.(*tableSinkWrapper).version, }) } diff --git a/cdc/processor/sinkmanager/manager_test.go b/cdc/processor/sinkmanager/manager_test.go index 16806580c02..b70e17fc3ab 100644 --- a/cdc/processor/sinkmanager/manager_test.go +++ b/cdc/processor/sinkmanager/manager_test.go @@ -117,7 +117,7 @@ func TestAddTable(t *testing.T) { require.True(t, ok) require.NotNil(t, tableSink) require.Equal(t, 0, manager.sinkProgressHeap.len(), "Not started table shout not in progress heap") - err := manager.StartTable(span, 1, 1) + err := manager.StartTable(span, 1) require.NoError(t, err) require.Equal(t, uint64(0x7ffffffffffbffff), tableSink.(*tableSinkWrapper).replicateTs) @@ -144,7 +144,7 @@ func TestRemoveTable(t *testing.T) { tableSink, ok := manager.tableSinks.Load(span) require.True(t, ok) require.NotNil(t, tableSink) - err := manager.StartTable(span, 0, 0) + err := manager.StartTable(span, 0) require.NoError(t, err) addTableAndAddEventsToSortEngine(t, e, span) manager.UpdateBarrierTs(4, nil) @@ -191,7 +191,7 @@ func TestGenerateTableSinkTaskWithBarrierTs(t *testing.T) { manager.UpdateBarrierTs(4, nil) manager.UpdateReceivedSorterResolvedTs(span, 5) manager.schemaStorage.AdvanceResolvedTs(5) - err := manager.StartTable(span, 0, 0) + err := manager.StartTable(span, 0) require.NoError(t, err) require.Eventually(t, func() bool { @@ -222,7 +222,7 @@ func TestGenerateTableSinkTaskWithResolvedTs(t *testing.T) { manager.UpdateBarrierTs(4, nil) manager.UpdateReceivedSorterResolvedTs(span, 3) manager.schemaStorage.AdvanceResolvedTs(4) - err := manager.StartTable(span, 0, 0) + err := manager.StartTable(span, 0) require.NoError(t, err) require.Eventually(t, func() bool { @@ -252,7 +252,7 @@ func TestGetTableStatsToReleaseMemQuota(t *testing.T) { manager.UpdateBarrierTs(4, nil) manager.UpdateReceivedSorterResolvedTs(span, 5) manager.schemaStorage.AdvanceResolvedTs(5) - err := manager.StartTable(span, 0, 0) + err := manager.StartTable(span, 0) require.NoError(t, err) require.Eventually(t, func() bool { @@ -341,7 +341,7 @@ func TestSinkManagerRunWithErrors(t *testing.T) { source.AddTable(span, "test", 100) manager.AddTable(span, 100, math.MaxUint64) - manager.StartTable(span, 100, 0) + manager.StartTable(span, 100) source.Add(span, model.NewResolvedPolymorphicEvent(0, 101)) manager.UpdateReceivedSorterResolvedTs(span, 101) manager.UpdateBarrierTs(101, nil) diff --git a/cdc/scheduler/internal/v3/agent/main_test.go b/cdc/scheduler/internal/v3/agent/main_test.go deleted file mode 100644 index cb7775cac71..00000000000 --- a/cdc/scheduler/internal/v3/agent/main_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package agent - -import ( - "testing" - - "github.com/pingcap/tiflow/pkg/leakutil" -) - -func TestMain(m *testing.M) { - leakutil.SetUpLeakTest(m) -} diff --git a/cdc/scheduler/internal/v3/agent/table.go b/cdc/scheduler/internal/v3/agent/table.go index be1f7ec929b..506bef29a54 100644 --- a/cdc/scheduler/internal/v3/agent/table.go +++ b/cdc/scheduler/internal/v3/agent/table.go @@ -75,7 +75,7 @@ func (t *tableSpan) getTableSpanStatus(collectStat bool) tablepb.TableStatus { func newAddTableResponseMessage(status tablepb.TableStatus) *schedulepb.Message { if status.Checkpoint.ResolvedTs < status.Checkpoint.CheckpointTs { - log.Panic("schedulerv3: resolved ts should not less than checkpoint ts", + log.Warn("schedulerv3: resolved ts should not less than checkpoint ts", zap.Any("tableStatus", status), zap.Any("checkpoint", status.Checkpoint.CheckpointTs), zap.Any("resolved", status.Checkpoint.ResolvedTs)) @@ -100,7 +100,7 @@ func newRemoveTableResponseMessage(status tablepb.TableStatus) *schedulepb.Messa // Advance resolved ts to checkpoint ts if table is removed. status.Checkpoint.ResolvedTs = status.Checkpoint.CheckpointTs } else { - log.Panic("schedulerv3: resolved ts should not less than checkpoint ts", + log.Warn("schedulerv3: resolved ts should not less than checkpoint ts", zap.Any("tableStatus", status), zap.Any("checkpoint", status.Checkpoint.CheckpointTs), zap.Any("resolved", status.Checkpoint.ResolvedTs)) diff --git a/cdc/scheduler/internal/v3/compat/main_test.go b/cdc/scheduler/internal/v3/compat/main_test.go deleted file mode 100644 index d8ad350ef4d..00000000000 --- a/cdc/scheduler/internal/v3/compat/main_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package compat - -import ( - "testing" - - "github.com/pingcap/tiflow/pkg/leakutil" -) - -func TestMain(m *testing.M) { - leakutil.SetUpLeakTest(m) -} diff --git a/cdc/scheduler/internal/v3/coordinator.go b/cdc/scheduler/internal/v3/coordinator.go index 2d536eb23d5..e27bdf49c97 100644 --- a/cdc/scheduler/internal/v3/coordinator.go +++ b/cdc/scheduler/internal/v3/coordinator.go @@ -334,7 +334,7 @@ func (c *coordinator) poll( currentSpans := c.reconciler.Reconcile( ctx, &c.tableRanges, replications, c.captureM.Captures, c.compat) allTasks := c.schedulerM.Schedule( - checkpointTs, currentSpans, c.captureM.Captures, replications, runningTasks, c.redoMetaManager) + checkpointTs, currentSpans, c.captureM.Captures, replications, runningTasks) // Handle generated schedule tasks. msgs, err = c.replicationM.HandleTasks(allTasks) diff --git a/cdc/scheduler/internal/v3/coordinator_bench_test.go b/cdc/scheduler/internal/v3/coordinator_bench_test.go index cdf3adc86d3..75094ba1d91 100644 --- a/cdc/scheduler/internal/v3/coordinator_bench_test.go +++ b/cdc/scheduler/internal/v3/coordinator_bench_test.go @@ -149,7 +149,7 @@ func BenchmarkCoordinatorHeartbeatResponse(b *testing.B) { captureID := fmt.Sprint(i % captureCount) span := tablepb.Span{TableID: tableID} rep, err := replication.NewReplicationSet( - span, tablepb.Checkpoint{}, map[string]*tablepb.TableStatus{ + span, 0, map[string]*tablepb.TableStatus{ captureID: { Span: tablepb.Span{TableID: tableID}, State: tablepb.TableStateReplicating, diff --git a/cdc/scheduler/internal/v3/keyspan/main_test.go b/cdc/scheduler/internal/v3/keyspan/main_test.go deleted file mode 100644 index da122358394..00000000000 --- a/cdc/scheduler/internal/v3/keyspan/main_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package keyspan - -import ( - "testing" - - "github.com/pingcap/tiflow/pkg/leakutil" -) - -func TestMain(m *testing.M) { - leakutil.SetUpLeakTest(m) -} diff --git a/cdc/scheduler/internal/v3/member/main_test.go b/cdc/scheduler/internal/v3/member/main_test.go deleted file mode 100644 index 02b77a22186..00000000000 --- a/cdc/scheduler/internal/v3/member/main_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package member - -import ( - "testing" - - "github.com/pingcap/tiflow/pkg/leakutil" -) - -func TestMain(m *testing.M) { - leakutil.SetUpLeakTest(m) -} diff --git a/cdc/scheduler/internal/v3/replication/main_test.go b/cdc/scheduler/internal/v3/replication/main_test.go deleted file mode 100644 index cefa385ed62..00000000000 --- a/cdc/scheduler/internal/v3/replication/main_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package replication - -import ( - "testing" - - "github.com/pingcap/tiflow/pkg/leakutil" -) - -func TestMain(m *testing.M) { - leakutil.SetUpLeakTest(m) -} diff --git a/cdc/scheduler/internal/v3/replication/replication_manager.go b/cdc/scheduler/internal/v3/replication/replication_manager.go index 416aaf4b29d..0ec7c7a671b 100644 --- a/cdc/scheduler/internal/v3/replication/replication_manager.go +++ b/cdc/scheduler/internal/v3/replication/replication_manager.go @@ -78,14 +78,14 @@ func (t MoveTable) String() string { // AddTable is a schedule task for adding a table. type AddTable struct { - Span tablepb.Span - CaptureID model.CaptureID - Checkpoint tablepb.Checkpoint + Span tablepb.Span + CaptureID model.CaptureID + CheckpointTs model.Ts } func (t AddTable) String() string { - return fmt.Sprintf("AddTable, span: %s, capture: %s, checkpointTs: %d, resolvedTs: %d", - t.Span.String(), t.CaptureID, t.Checkpoint.CheckpointTs, t.Checkpoint.ResolvedTs) + return fmt.Sprintf("AddTable, span: %s, capture: %s, checkpointTs: %d", + t.Span.String(), t.CaptureID, t.CheckpointTs) } // RemoveTable is a schedule task for removing a table. @@ -200,12 +200,7 @@ func (r *Manager) HandleCaptureChanges( } var err error spanStatusMap.Ascend(func(span tablepb.Span, status map[string]*tablepb.TableStatus) bool { - checkpoint := tablepb.Checkpoint{ - CheckpointTs: checkpointTs, - // Note that the real resolved ts is stored in the status. - ResolvedTs: checkpointTs, - } - table, err1 := NewReplicationSet(span, checkpoint, status, r.changefeedID) + table, err1 := NewReplicationSet(span, checkpointTs, status, r.changefeedID) if err1 != nil { err = errors.Trace(err1) return false @@ -442,7 +437,7 @@ func (r *Manager) handleAddTableTask( var err error table, ok := r.spans.Get(task.Span) if !ok { - table, err = NewReplicationSet(task.Span, task.Checkpoint, nil, r.changefeedID) + table, err = NewReplicationSet(task.Span, task.CheckpointTs, nil, r.changefeedID) if err != nil { return nil, errors.Trace(err) } diff --git a/cdc/scheduler/internal/v3/replication/replication_manager_test.go b/cdc/scheduler/internal/v3/replication/replication_manager_test.go index 6cbfab08396..0584e62972c 100644 --- a/cdc/scheduler/internal/v3/replication/replication_manager_test.go +++ b/cdc/scheduler/internal/v3/replication/replication_manager_test.go @@ -35,12 +35,7 @@ func TestReplicationManagerHandleAddTableTask(t *testing.T) { // Absent -> Prepare msgs, err := r.HandleTasks([]*ScheduleTask{{ AddTable: &AddTable{ - Span: spanz.TableIDToComparableSpan(1), - CaptureID: "1", - Checkpoint: tablepb.Checkpoint{ - CheckpointTs: 1, - ResolvedTs: 1, - }, + Span: spanz.TableIDToComparableSpan(1), CaptureID: "1", CheckpointTs: 1, }, Accept: func() { addTableCh <- 1 @@ -155,15 +150,9 @@ func TestReplicationManagerRemoveTable(t *testing.T) { // Add the table. span := spanz.TableIDToComparableSpan(1) - tbl, err := NewReplicationSet(span, - tablepb.Checkpoint{ - CheckpointTs: 0, - ResolvedTs: 0, - }, - map[string]*tablepb.TableStatus{ - "1": {Span: span, State: tablepb.TableStateReplicating}, - }, - model.ChangeFeedID{}) + tbl, err := NewReplicationSet(span, 0, map[string]*tablepb.TableStatus{ + "1": {Span: span, State: tablepb.TableStateReplicating}, + }, model.ChangeFeedID{}) require.Nil(t, err) require.Equal(t, ReplicationSetStateReplicating, tbl.State) r.spans.ReplaceOrInsert(spanz.TableIDToComparableSpan(1), tbl) @@ -257,15 +246,9 @@ func TestReplicationManagerMoveTable(t *testing.T) { // Add the table. span := spanz.TableIDToComparableSpan(1) - tbl, err := NewReplicationSet(span, - tablepb.Checkpoint{ - CheckpointTs: 0, - ResolvedTs: 0, - }, - map[string]*tablepb.TableStatus{ - source: {Span: span, State: tablepb.TableStateReplicating}, - }, - model.ChangeFeedID{}) + tbl, err := NewReplicationSet(span, 0, map[string]*tablepb.TableStatus{ + source: {Span: span, State: tablepb.TableStateReplicating}, + }, model.ChangeFeedID{}) require.Nil(t, err) require.Equal(t, ReplicationSetStateReplicating, tbl.State) r.spans.ReplaceOrInsert(spanz.TableIDToComparableSpan(1), tbl) @@ -394,23 +377,19 @@ func TestReplicationManagerBurstBalance(t *testing.T) { r := NewReplicationManager(1, model.ChangeFeedID{}) balanceTableCh := make(chan int, 1) - checkpoint := tablepb.Checkpoint{ - CheckpointTs: 1, - ResolvedTs: 1, - } // Burst balance is not limited by maxTaskConcurrency. msgs, err := r.HandleTasks([]*ScheduleTask{{ AddTable: &AddTable{ - Span: spanz.TableIDToComparableSpan(1), CaptureID: "0", Checkpoint: checkpoint, + Span: spanz.TableIDToComparableSpan(1), CaptureID: "0", CheckpointTs: 1, }, }, { BurstBalance: &BurstBalance{ AddTables: []AddTable{{ - Span: spanz.TableIDToComparableSpan(1), CaptureID: "1", Checkpoint: checkpoint, + Span: spanz.TableIDToComparableSpan(1), CaptureID: "1", CheckpointTs: 1, }, { - Span: spanz.TableIDToComparableSpan(2), CaptureID: "2", Checkpoint: checkpoint, + Span: spanz.TableIDToComparableSpan(2), CaptureID: "2", CheckpointTs: 1, }, { - Span: spanz.TableIDToComparableSpan(3), CaptureID: "3", Checkpoint: checkpoint, + Span: spanz.TableIDToComparableSpan(3), CaptureID: "3", CheckpointTs: 1, }}, }, Accept: func() { @@ -445,25 +424,19 @@ func TestReplicationManagerBurstBalance(t *testing.T) { // Add a new table. span := spanz.TableIDToComparableSpan(5) - table5, err := NewReplicationSet(span, - tablepb.Checkpoint{}, - map[string]*tablepb.TableStatus{ - "5": {Span: span, State: tablepb.TableStateReplicating}, - }, model.ChangeFeedID{}) + table5, err := NewReplicationSet(span, 0, map[string]*tablepb.TableStatus{ + "5": {Span: span, State: tablepb.TableStateReplicating}, + }, model.ChangeFeedID{}) require.Nil(t, err) r.spans.ReplaceOrInsert(span, table5) - checkpoint = tablepb.Checkpoint{ - CheckpointTs: 2, - ResolvedTs: 2, - } // More burst balance is still allowed. msgs, err = r.HandleTasks([]*ScheduleTask{{ BurstBalance: &BurstBalance{ AddTables: []AddTable{{ - Span: spanz.TableIDToComparableSpan(4), CaptureID: "4", Checkpoint: checkpoint, + Span: spanz.TableIDToComparableSpan(4), CaptureID: "4", CheckpointTs: 2, }, { - Span: spanz.TableIDToComparableSpan(1), CaptureID: "0", Checkpoint: checkpoint, + Span: spanz.TableIDToComparableSpan(1), CaptureID: "0", CheckpointTs: 2, }}, RemoveTables: []RemoveTable{{ Span: spanz.TableIDToComparableSpan(5), CaptureID: "5", @@ -516,13 +489,13 @@ func TestReplicationManagerBurstBalanceMoveTables(t *testing.T) { var err error // Two tables in "1". span := spanz.TableIDToComparableSpan(1) - table, err := NewReplicationSet(span, tablepb.Checkpoint{}, map[string]*tablepb.TableStatus{ + table, err := NewReplicationSet(span, 0, map[string]*tablepb.TableStatus{ "1": {Span: span, State: tablepb.TableStateReplicating}, }, model.ChangeFeedID{}) require.Nil(t, err) r.spans.ReplaceOrInsert(span, table) span2 := spanz.TableIDToComparableSpan(2) - table2, err := NewReplicationSet(span2, tablepb.Checkpoint{}, map[string]*tablepb.TableStatus{ + table2, err := NewReplicationSet(span2, 0, map[string]*tablepb.TableStatus{ "1": { Span: span2, State: tablepb.TableStateReplicating, Checkpoint: tablepb.Checkpoint{CheckpointTs: 1, ResolvedTs: 1}, @@ -635,11 +608,7 @@ func TestReplicationManagerAdvanceCheckpoint(t *testing.T) { t.Parallel() r := NewReplicationManager(1, model.ChangeFeedID{}) span := spanz.TableIDToComparableSpan(1) - rs, err := NewReplicationSet(span, - tablepb.Checkpoint{ - CheckpointTs: 10, - ResolvedTs: 10, - }, + rs, err := NewReplicationSet(span, model.Ts(10), map[model.CaptureID]*tablepb.TableStatus{ "1": { Span: spanz.TableIDToComparableSpan(1), @@ -654,11 +623,7 @@ func TestReplicationManagerAdvanceCheckpoint(t *testing.T) { r.spans.ReplaceOrInsert(span, rs) span2 := spanz.TableIDToComparableSpan(2) - rs, err = NewReplicationSet(span2, - tablepb.Checkpoint{ - CheckpointTs: 15, - ResolvedTs: 15, - }, + rs, err = NewReplicationSet(span2, model.Ts(15), map[model.CaptureID]*tablepb.TableStatus{ "2": { Span: spanz.TableIDToComparableSpan(2), @@ -694,11 +659,7 @@ func TestReplicationManagerAdvanceCheckpoint(t *testing.T) { require.Equal(t, checkpointCannotProceed, resolved) span3 := spanz.TableIDToComparableSpan(3) - rs, err = NewReplicationSet(span3, - tablepb.Checkpoint{ - CheckpointTs: 5, - ResolvedTs: 5, - }, + rs, err = NewReplicationSet(span3, model.Ts(5), map[model.CaptureID]*tablepb.TableStatus{ "1": { Span: spanz.TableIDToComparableSpan(3), @@ -725,11 +686,7 @@ func TestReplicationManagerAdvanceCheckpoint(t *testing.T) { currentTables.UpdateTables([]model.TableID{1, 2, 3, 4}) span4 := spanz.TableIDToComparableSpan(4) - rs, err = NewReplicationSet(span4, - tablepb.Checkpoint{ - CheckpointTs: 3, - ResolvedTs: 3, - }, + rs, err = NewReplicationSet(span4, model.Ts(3), map[model.CaptureID]*tablepb.TableStatus{ "1": { Span: spanz.TableIDToComparableSpan(4), @@ -753,11 +710,7 @@ func TestReplicationManagerAdvanceCheckpoint(t *testing.T) { span5_2 := spanz.TableIDToComparableSpan(5) span5_2.StartKey = append(span5_2.StartKey, 0) for _, span := range []tablepb.Span{span5_1, span5_2} { - rs, err = NewReplicationSet(span, - tablepb.Checkpoint{ - CheckpointTs: 3, - ResolvedTs: 3, - }, + rs, err = NewReplicationSet(span, model.Ts(3), map[model.CaptureID]*tablepb.TableStatus{ "1": { Span: span, @@ -791,11 +744,7 @@ func TestReplicationManagerAdvanceCheckpoint(t *testing.T) { // redo is enabled currentTables.UpdateTables([]model.TableID{4}) spanRedo := spanz.TableIDToComparableSpan(4) - rs, err = NewReplicationSet(spanRedo, - tablepb.Checkpoint{ - CheckpointTs: 3, - ResolvedTs: 3, - }, + rs, err = NewReplicationSet(spanRedo, model.Ts(3), map[model.CaptureID]*tablepb.TableStatus{ "1": { Span: spanz.TableIDToComparableSpan(4), @@ -821,11 +770,7 @@ func TestReplicationManagerAdvanceCheckpointWithRedoEnabled(t *testing.T) { t.Parallel() r := NewReplicationManager(1, model.ChangeFeedID{}) span := spanz.TableIDToComparableSpan(1) - rs, err := NewReplicationSet(span, - tablepb.Checkpoint{ - CheckpointTs: 10, - ResolvedTs: 10, - }, + rs, err := NewReplicationSet(span, model.Ts(10), map[model.CaptureID]*tablepb.TableStatus{ "1": { Span: spanz.TableIDToComparableSpan(1), @@ -840,11 +785,7 @@ func TestReplicationManagerAdvanceCheckpointWithRedoEnabled(t *testing.T) { r.spans.ReplaceOrInsert(span, rs) span2 := spanz.TableIDToComparableSpan(2) - rs, err = NewReplicationSet(span2, - tablepb.Checkpoint{ - CheckpointTs: 15, - ResolvedTs: 15, - }, + rs, err = NewReplicationSet(span2, model.Ts(15), map[model.CaptureID]*tablepb.TableStatus{ "2": { Span: spanz.TableIDToComparableSpan(2), diff --git a/cdc/scheduler/internal/v3/replication/replication_set.go b/cdc/scheduler/internal/v3/replication/replication_set.go index f62752f27f2..fec432499a8 100644 --- a/cdc/scheduler/internal/v3/replication/replication_set.go +++ b/cdc/scheduler/internal/v3/replication/replication_set.go @@ -141,7 +141,7 @@ type ReplicationSet struct { //nolint:revive // NewReplicationSet returns a new replication set. func NewReplicationSet( span tablepb.Span, - checkpoint tablepb.Checkpoint, + checkpoint model.Ts, tableStatus map[model.CaptureID]*tablepb.TableStatus, changefeed model.ChangeFeedID, ) (*ReplicationSet, error) { @@ -149,7 +149,10 @@ func NewReplicationSet( Changefeed: changefeed, Span: span, Captures: make(map[string]Role), - Checkpoint: checkpoint, + Checkpoint: tablepb.Checkpoint{ + CheckpointTs: checkpoint, + ResolvedTs: checkpoint, + }, } // Count of captures that is in Stopping states. stoppingCount := 0 @@ -159,9 +162,7 @@ func NewReplicationSet( return nil, r.inconsistentError(table, captureID, "schedulerv3: table id inconsistent") } - if err := r.updateCheckpointAndStats(table.Checkpoint, table.Stats); err != nil { - return nil, errors.Trace(err) - } + r.updateCheckpointAndStats(table.Checkpoint, table.Stats) switch table.State { case tablepb.TableStateReplicating: @@ -497,8 +498,8 @@ func (r *ReplicationSet) pollOnPrepare( } case tablepb.TableStateReplicating: if r.Primary == captureID { - err := r.updateCheckpointAndStats(input.Checkpoint, input.Stats) - return nil, false, err + r.updateCheckpointAndStats(input.Checkpoint, input.Stats) + return nil, false, nil } case tablepb.TableStateStopping, tablepb.TableStateStopped: if r.Primary == captureID { @@ -611,9 +612,7 @@ func (r *ReplicationSet) pollOnCommit( case tablepb.TableStateStopped, tablepb.TableStateAbsent: if r.Primary == captureID { - if err := r.updateCheckpointAndStats(input.Checkpoint, input.Stats); err != nil { - return nil, false, errors.Trace(err) - } + r.updateCheckpointAndStats(input.Checkpoint, input.Stats) original := r.Primary r.clearPrimary() if !r.hasRole(RoleSecondary) { @@ -685,9 +684,7 @@ func (r *ReplicationSet) pollOnCommit( case tablepb.TableStateReplicating: if r.Primary == captureID { - if err := r.updateCheckpointAndStats(input.Checkpoint, input.Stats); err != nil { - return nil, false, errors.Trace(err) - } + r.updateCheckpointAndStats(input.Checkpoint, input.Stats) if r.hasRole(RoleSecondary) { // Original primary is not stopped, ask for stopping. return &schedulepb.Message{ @@ -722,8 +719,8 @@ func (r *ReplicationSet) pollOnCommit( case tablepb.TableStateStopping: if r.Primary == captureID && r.hasRole(RoleSecondary) { - err := r.updateCheckpointAndStats(input.Checkpoint, input.Stats) - return nil, false, err + r.updateCheckpointAndStats(input.Checkpoint, input.Stats) + return nil, false, nil } else if r.isInRole(captureID, RoleUndetermined) { log.Info("schedulerv3: capture is stopping during Commit", zap.String("namespace", r.Changefeed.Namespace), @@ -752,8 +749,8 @@ func (r *ReplicationSet) pollOnReplicating( switch input.State { case tablepb.TableStateReplicating: if r.Primary == captureID { - err := r.updateCheckpointAndStats(input.Checkpoint, input.Stats) - return nil, false, err + r.updateCheckpointAndStats(input.Checkpoint, input.Stats) + return nil, false, nil } return nil, false, r.multiplePrimaryError( input, captureID, "schedulerv3: multiple primary") @@ -764,10 +761,7 @@ func (r *ReplicationSet) pollOnReplicating( case tablepb.TableStateStopping: case tablepb.TableStateStopped: if r.Primary == captureID { - if err := r.updateCheckpointAndStats(input.Checkpoint, input.Stats); err != nil { - return nil, false, errors.Trace(err) - } - + r.updateCheckpointAndStats(input.Checkpoint, input.Stats) // Primary is stopped, but we still has secondary. // Clear primary and promote secondary when it's prepared. log.Info("schedulerv3: primary is stopped during Replicating", @@ -997,7 +991,7 @@ func (r *ReplicationSet) handleCaptureShutdown( func (r *ReplicationSet) updateCheckpointAndStats( checkpoint tablepb.Checkpoint, stats tablepb.Stats, -) error { +) { if checkpoint.ResolvedTs < checkpoint.CheckpointTs { log.Warn("schedulerv3: resolved ts should not less than checkpoint ts", zap.String("namespace", r.Changefeed.Namespace), @@ -1025,11 +1019,8 @@ func (r *ReplicationSet) updateCheckpointAndStats( zap.Any("replicationSet", r), zap.Any("checkpointTs", r.Checkpoint.CheckpointTs), zap.Any("resolvedTs", r.Checkpoint.ResolvedTs)) - return errors.ErrInvalidCheckpointTs.GenWithStackByArgs(r.Checkpoint.CheckpointTs, - r.Checkpoint.ResolvedTs) } r.Stats = stats - return nil } // SetHeap is a max-heap, it implements heap.Interface. diff --git a/cdc/scheduler/internal/v3/replication/replication_set_test.go b/cdc/scheduler/internal/v3/replication/replication_set_test.go index 1da88a55753..da417438bc0 100644 --- a/cdc/scheduler/internal/v3/replication/replication_set_test.go +++ b/cdc/scheduler/internal/v3/replication/replication_set_test.go @@ -220,10 +220,7 @@ func TestNewReplicationSet(t *testing.T) { for id, tc := range testcases { set := tc.set status := tc.tableStatus - checkpoint := tablepb.Checkpoint{ - CheckpointTs: tc.checkpoint, - ResolvedTs: tc.checkpoint, - } + checkpoint := tc.checkpoint span := tablepb.Span{TableID: 0} output, err := NewReplicationSet(span, checkpoint, status, model.ChangeFeedID{}) @@ -271,7 +268,7 @@ func TestReplicationSetPoll(t *testing.T) { } } span := tablepb.Span{TableID: 1} - r, _ := NewReplicationSet(span, tablepb.Checkpoint{}, status, model.ChangeFeedID{}) + r, _ := NewReplicationSet(span, 0, status, model.ChangeFeedID{}) var tableStates []int for state := range tablepb.TableState_name { tableStates = append(tableStates, int(state)) @@ -303,7 +300,7 @@ func TestReplicationSetPollUnknownCapture(t *testing.T) { tableID := model.TableID(1) span := tablepb.Span{TableID: tableID} - r, err := NewReplicationSet(span, tablepb.Checkpoint{}, map[model.CaptureID]*tablepb.TableStatus{ + r, err := NewReplicationSet(span, 0, map[model.CaptureID]*tablepb.TableStatus{ "1": { Span: tablepb.Span{TableID: tableID}, State: tablepb.TableStateReplicating, @@ -340,7 +337,7 @@ func TestReplicationSetAddTable(t *testing.T) { from := "1" tableID := model.TableID(1) span := tablepb.Span{TableID: tableID} - r, err := NewReplicationSet(span, tablepb.Checkpoint{}, nil, model.ChangeFeedID{}) + r, err := NewReplicationSet(span, 0, nil, model.ChangeFeedID{}) require.Nil(t, err) // Absent -> Prepare @@ -485,7 +482,7 @@ func TestReplicationSetRemoveTable(t *testing.T) { from := "1" tableID := model.TableID(1) span := tablepb.Span{TableID: tableID} - r, err := NewReplicationSet(span, tablepb.Checkpoint{}, nil, model.ChangeFeedID{}) + r, err := NewReplicationSet(span, 0, nil, model.ChangeFeedID{}) require.Nil(t, err) // Ignore removing table if it's not in replicating. @@ -566,7 +563,7 @@ func TestReplicationSetMoveTable(t *testing.T) { tableID := model.TableID(1) span := tablepb.Span{TableID: tableID} - r, err := NewReplicationSet(span, tablepb.Checkpoint{}, nil, model.ChangeFeedID{}) + r, err := NewReplicationSet(span, 0, nil, model.ChangeFeedID{}) require.Nil(t, err) source := "1" @@ -798,7 +795,7 @@ func TestReplicationSetCaptureShutdown(t *testing.T) { from := "1" tableID := model.TableID(1) span := tablepb.Span{TableID: tableID} - r, err := NewReplicationSet(span, tablepb.Checkpoint{}, nil, model.ChangeFeedID{}) + r, err := NewReplicationSet(span, 0, nil, model.ChangeFeedID{}) require.Nil(t, err) // Add table, Absent -> Prepare @@ -1104,7 +1101,7 @@ func TestReplicationSetCaptureShutdownAfterReconstructCommitState(t *testing.T) from: {Span: tablepb.Span{TableID: tableID}, State: tablepb.TableStatePrepared}, } span := tablepb.Span{TableID: tableID} - r, err := NewReplicationSet(span, tablepb.Checkpoint{}, tableStatus, model.ChangeFeedID{}) + r, err := NewReplicationSet(span, 0, tableStatus, model.ChangeFeedID{}) require.Nil(t, err) require.Equal(t, ReplicationSetStateCommit, r.State) require.Equal(t, "", r.Primary) @@ -1125,7 +1122,7 @@ func TestReplicationSetMoveTableWithHeartbeatResponse(t *testing.T) { tableID := model.TableID(1) span := tablepb.Span{TableID: tableID} - r, err := NewReplicationSet(span, tablepb.Checkpoint{}, nil, model.ChangeFeedID{}) + r, err := NewReplicationSet(span, 0, nil, model.ChangeFeedID{}) require.Nil(t, err) source := "1" @@ -1213,7 +1210,7 @@ func TestReplicationSetMoveTableSameDestCapture(t *testing.T) { tableID := model.TableID(1) span := tablepb.Span{TableID: tableID} - r, err := NewReplicationSet(span, tablepb.Checkpoint{}, nil, model.ChangeFeedID{}) + r, err := NewReplicationSet(span, 0, nil, model.ChangeFeedID{}) require.Nil(t, err) source := "1" @@ -1246,7 +1243,7 @@ func TestReplicationSetCommitRestart(t *testing.T) { }, } span := tablepb.Span{TableID: 0} - r, err := NewReplicationSet(span, tablepb.Checkpoint{}, tableStatus, model.ChangeFeedID{}) + r, err := NewReplicationSet(span, 0, tableStatus, model.ChangeFeedID{}) require.Nil(t, err) require.Equal(t, ReplicationSetStateCommit, r.State) require.EqualValues(t, RoleSecondary, r.Captures["1"]) @@ -1329,7 +1326,7 @@ func TestReplicationSetRemoveRestart(t *testing.T) { }, } span := tablepb.Span{TableID: 0} - r, err := NewReplicationSet(span, tablepb.Checkpoint{}, tableStatus, model.ChangeFeedID{}) + r, err := NewReplicationSet(span, 0, tableStatus, model.ChangeFeedID{}) require.Nil(t, err) require.Equal(t, ReplicationSetStateRemoving, r.State) require.False(t, r.hasRole(RoleSecondary)) @@ -1458,3 +1455,29 @@ func TestReplicationSetHeap_MinK(t *testing.T) { require.Equal(t, expectedTables, tables) require.Equal(t, 0, h.Len()) } + +func TestUpdateCheckpointAndStats(t *testing.T) { + cases := []struct { + checkpoint tablepb.Checkpoint + stats tablepb.Stats + }{ + { + checkpoint: tablepb.Checkpoint{ + CheckpointTs: 1, + ResolvedTs: 2, + }, + stats: tablepb.Stats{}, + }, + { + checkpoint: tablepb.Checkpoint{ + CheckpointTs: 2, + ResolvedTs: 1, + }, + stats: tablepb.Stats{}, + }, + } + r := &ReplicationSet{} + for _, c := range cases { + r.updateCheckpointAndStats(c.checkpoint, c.stats) + } +} diff --git a/cdc/scheduler/internal/v3/scheduler/main_test.go b/cdc/scheduler/internal/v3/scheduler/main_test.go deleted file mode 100644 index 47da658bfe6..00000000000 --- a/cdc/scheduler/internal/v3/scheduler/main_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package scheduler - -import ( - "testing" - - "github.com/pingcap/tiflow/pkg/leakutil" -) - -func TestMain(m *testing.M) { - leakutil.SetUpLeakTest(m) -} diff --git a/cdc/scheduler/internal/v3/scheduler/scheduler.go b/cdc/scheduler/internal/v3/scheduler/scheduler.go index fd117849bc3..96088ec52ff 100644 --- a/cdc/scheduler/internal/v3/scheduler/scheduler.go +++ b/cdc/scheduler/internal/v3/scheduler/scheduler.go @@ -24,7 +24,7 @@ import ( type scheduler interface { Name() string Schedule( - checkpoint tablepb.Checkpoint, + checkpointTs model.Ts, currentSpans []tablepb.Span, aliveCaptures map[model.CaptureID]*member.CaptureStatus, replications *spanz.BtreeMap[*replication.ReplicationSet], diff --git a/cdc/scheduler/internal/v3/scheduler/scheduler_balance.go b/cdc/scheduler/internal/v3/scheduler/scheduler_balance.go index 26dce7da249..21c642a9178 100644 --- a/cdc/scheduler/internal/v3/scheduler/scheduler_balance.go +++ b/cdc/scheduler/internal/v3/scheduler/scheduler_balance.go @@ -56,7 +56,7 @@ func (b *balanceScheduler) Name() string { } func (b *balanceScheduler) Schedule( - _ tablepb.Checkpoint, + _ model.Ts, _ []tablepb.Span, captures map[model.CaptureID]*member.CaptureStatus, replications *spanz.BtreeMap[*replication.ReplicationSet], diff --git a/cdc/scheduler/internal/v3/scheduler/scheduler_balance_test.go b/cdc/scheduler/internal/v3/scheduler/scheduler_balance_test.go index a66b52b328e..a8efb07ad35 100644 --- a/cdc/scheduler/internal/v3/scheduler/scheduler_balance_test.go +++ b/cdc/scheduler/internal/v3/scheduler/scheduler_balance_test.go @@ -18,7 +18,6 @@ import ( "time" "github.com/pingcap/tiflow/cdc/model" - "github.com/pingcap/tiflow/cdc/processor/tablepb" "github.com/pingcap/tiflow/cdc/scheduler/internal/v3/member" "github.com/pingcap/tiflow/cdc/scheduler/internal/v3/replication" "github.com/pingcap/tiflow/pkg/spanz" @@ -28,7 +27,6 @@ import ( func TestSchedulerBalanceCaptureOnline(t *testing.T) { t.Parallel() - var checkpoint tablepb.Checkpoint sched := newBalanceScheduler(time.Duration(0), 3) sched.random = nil @@ -39,14 +37,14 @@ func TestSchedulerBalanceCaptureOnline(t *testing.T) { 1: {State: replication.ReplicationSetStateReplicating, Primary: "a"}, 2: {State: replication.ReplicationSetStateReplicating, Primary: "a"}, }) - tasks := sched.Schedule(checkpoint, currentTables, captures, replications) + tasks := sched.Schedule(0, currentTables, captures, replications) require.Len(t, tasks, 1) require.NotNil(t, tasks[0].MoveTable) require.Equal(t, tasks[0].MoveTable.Span.TableID, model.TableID(1)) // New capture "b" online, but this time has capture is stopping captures["a"].State = member.CaptureStateStopping - tasks = sched.Schedule(checkpoint, currentTables, captures, replications) + tasks = sched.Schedule(0, currentTables, captures, replications) require.Len(t, tasks, 0) // New capture "b" online, it keeps balancing, even though it has not pass @@ -58,7 +56,7 @@ func TestSchedulerBalanceCaptureOnline(t *testing.T) { 1: {State: replication.ReplicationSetStateReplicating, Primary: "a"}, 2: {State: replication.ReplicationSetStateReplicating, Primary: "a"}, }) - tasks = sched.Schedule(checkpoint, currentTables, captures, replications) + tasks = sched.Schedule(0, currentTables, captures, replications) require.Len(t, tasks, 1) // New capture "b" online, but this time it not pass check balance interval. @@ -70,14 +68,13 @@ func TestSchedulerBalanceCaptureOnline(t *testing.T) { 1: {State: replication.ReplicationSetStateReplicating, Primary: "a"}, 2: {State: replication.ReplicationSetStateReplicating, Primary: "a"}, }) - tasks = sched.Schedule(checkpoint, currentTables, captures, replications) + tasks = sched.Schedule(0, currentTables, captures, replications) require.Len(t, tasks, 0) } func TestSchedulerBalanceTaskLimit(t *testing.T) { t.Parallel() - var checkpoint tablepb.Checkpoint sched := newBalanceScheduler(time.Duration(0), 2) sched.random = nil @@ -90,10 +87,10 @@ func TestSchedulerBalanceTaskLimit(t *testing.T) { 3: {State: replication.ReplicationSetStateReplicating, Primary: "a"}, 4: {State: replication.ReplicationSetStateReplicating, Primary: "a"}, }) - tasks := sched.Schedule(checkpoint, currentTables, captures, replications) + tasks := sched.Schedule(0, currentTables, captures, replications) require.Len(t, tasks, 2) sched = newBalanceScheduler(time.Duration(0), 1) - tasks = sched.Schedule(checkpoint, currentTables, captures, replications) + tasks = sched.Schedule(0, currentTables, captures, replications) require.Len(t, tasks, 1) } diff --git a/cdc/scheduler/internal/v3/scheduler/scheduler_basic.go b/cdc/scheduler/internal/v3/scheduler/scheduler_basic.go index bb6e613f9de..2efffdbda7a 100644 --- a/cdc/scheduler/internal/v3/scheduler/scheduler_basic.go +++ b/cdc/scheduler/internal/v3/scheduler/scheduler_basic.go @@ -49,7 +49,7 @@ func (b *basicScheduler) Name() string { } func (b *basicScheduler) Schedule( - checkpoint tablepb.Checkpoint, + checkpointTs model.Ts, currentSpans []tablepb.Span, captures map[model.CaptureID]*member.CaptureStatus, replications *spanz.BtreeMap[*replication.ReplicationSet], @@ -102,7 +102,7 @@ func (b *basicScheduler) Schedule( return tasks } tasks = append( - tasks, newBurstAddTables(b.changefeedID, checkpoint, newSpans, captureIDs)) + tasks, newBurstAddTables(b.changefeedID, checkpointTs, newSpans, captureIDs)) } // Build remove table tasks. @@ -140,16 +140,16 @@ func (b *basicScheduler) Schedule( // newBurstAddTables add each new table to captures in a round-robin way. func newBurstAddTables( changefeedID model.ChangeFeedID, - checkpoint tablepb.Checkpoint, newSpans []tablepb.Span, captureIDs []model.CaptureID, + checkpointTs model.Ts, newSpans []tablepb.Span, captureIDs []model.CaptureID, ) *replication.ScheduleTask { idx := 0 tables := make([]replication.AddTable, 0, len(newSpans)) for _, span := range newSpans { targetCapture := captureIDs[idx] tables = append(tables, replication.AddTable{ - Span: span, - CaptureID: targetCapture, - Checkpoint: checkpoint, + Span: span, + CaptureID: targetCapture, + CheckpointTs: checkpointTs, }) log.Info("schedulerv3: burst add table", zap.String("namespace", changefeedID.Namespace), diff --git a/cdc/scheduler/internal/v3/scheduler/scheduler_basic_test.go b/cdc/scheduler/internal/v3/scheduler/scheduler_basic_test.go index ad8ecd63a3d..83d00ed36b0 100644 --- a/cdc/scheduler/internal/v3/scheduler/scheduler_basic_test.go +++ b/cdc/scheduler/internal/v3/scheduler/scheduler_basic_test.go @@ -36,7 +36,6 @@ func mapToSpanMap[T any](in map[model.TableID]T) *spanz.BtreeMap[T] { func TestSchedulerBasic(t *testing.T) { t.Parallel() - var checkpoint tablepb.Checkpoint captures := map[model.CaptureID]*member.CaptureStatus{"a": {}, "b": {}} currentTables := spanz.ArrayToSpan([]model.TableID{1, 2, 3, 4}) @@ -47,7 +46,7 @@ func TestSchedulerBasic(t *testing.T) { // one capture stopping, another one is initialized captures["a"].State = member.CaptureStateStopping - tasks := b.Schedule(checkpoint, currentTables, captures, replications) + tasks := b.Schedule(0, currentTables, captures, replications) require.Len(t, tasks, 1) require.Len(t, tasks[0].BurstBalance.AddTables, 2) require.Equal(t, tasks[0].BurstBalance.AddTables[0].CaptureID, "b") @@ -55,12 +54,12 @@ func TestSchedulerBasic(t *testing.T) { // all capture's stopping, cannot add table captures["b"].State = member.CaptureStateStopping - tasks = b.Schedule(checkpoint, currentTables, captures, replications) + tasks = b.Schedule(0, currentTables, captures, replications) require.Len(t, tasks, 0) captures["a"].State = member.CaptureStateInitialized captures["b"].State = member.CaptureStateInitialized - tasks = b.Schedule(checkpoint, currentTables, captures, replications) + tasks = b.Schedule(0, currentTables, captures, replications) require.Len(t, tasks, 1) require.Len(t, tasks[0].BurstBalance.AddTables, 2) require.Equal(t, tasks[0].BurstBalance.AddTables[0].Span.TableID, model.TableID(1)) @@ -89,11 +88,10 @@ func TestSchedulerBasic(t *testing.T) { }, 4: {State: replication.ReplicationSetStateAbsent}, }) - checkpoint1 := tablepb.Checkpoint{CheckpointTs: 1, ResolvedTs: 1} - tasks = b.Schedule(checkpoint1, currentTables, captures, replications) + tasks = b.Schedule(1, currentTables, captures, replications) require.Len(t, tasks, 1) require.Equal(t, tasks[0].BurstBalance.AddTables[0].Span.TableID, model.TableID(4)) - require.Equal(t, tasks[0].BurstBalance.AddTables[0].Checkpoint, checkpoint1) + require.Equal(t, tasks[0].BurstBalance.AddTables[0].CheckpointTs, model.Ts(1)) // DDL CREATE/DROP/TRUNCATE TABLE. // AddTable 4, and RemoveTable 5. @@ -123,16 +121,15 @@ func TestSchedulerBasic(t *testing.T) { }, }, }) - checkpoint2 := tablepb.Checkpoint{CheckpointTs: 2, ResolvedTs: 2} - tasks = b.Schedule(checkpoint2, currentTables, captures, replications) + tasks = b.Schedule(2, currentTables, captures, replications) require.Len(t, tasks, 2) if tasks[0].BurstBalance.AddTables != nil { require.Equal(t, tasks[0].BurstBalance.AddTables[0].Span.TableID, model.TableID(4)) - require.Equal(t, tasks[0].BurstBalance.AddTables[0].Checkpoint, checkpoint2) + require.Equal(t, tasks[0].BurstBalance.AddTables[0].CheckpointTs, model.Ts(2)) require.Equal(t, tasks[1].BurstBalance.RemoveTables[0].Span.TableID, model.TableID(5)) } else { require.Equal(t, tasks[1].BurstBalance.AddTables[0].Span.TableID, model.TableID(4)) - require.Equal(t, tasks[0].BurstBalance.AddTables[0].Checkpoint, checkpoint2) + require.Equal(t, tasks[0].BurstBalance.AddTables[0].CheckpointTs, model.Ts(2)) require.Equal(t, tasks[0].BurstBalance.RemoveTables[0].Span.TableID, model.TableID(5)) } @@ -169,8 +166,7 @@ func TestSchedulerBasic(t *testing.T) { }, }, }) - checkpoint3 := tablepb.Checkpoint{CheckpointTs: 3, ResolvedTs: 3} - tasks = b.Schedule(checkpoint3, currentTables, captures, replications) + tasks = b.Schedule(3, currentTables, captures, replications) require.Len(t, tasks, 1) require.Equal(t, tasks[0].BurstBalance.RemoveTables[0].Span.TableID, model.TableID(5)) } @@ -197,13 +193,12 @@ func benchmarkSchedulerBalance( ), ) { size := 16384 - var checkpoint tablepb.Checkpoint for total := 1; total <= size; total *= 2 { name, currentTables, captures, replications, sched := factory(total) b.ResetTimer() b.Run(name, func(b *testing.B) { for i := 0; i < b.N; i++ { - sched.Schedule(checkpoint, currentTables, captures, replications) + sched.Schedule(0, currentTables, captures, replications) } }) b.StopTimer() diff --git a/cdc/scheduler/internal/v3/scheduler/scheduler_drain_capture.go b/cdc/scheduler/internal/v3/scheduler/scheduler_drain_capture.go index a1995a58506..66de141b7a4 100644 --- a/cdc/scheduler/internal/v3/scheduler/scheduler_drain_capture.go +++ b/cdc/scheduler/internal/v3/scheduler/scheduler_drain_capture.go @@ -71,7 +71,7 @@ func (d *drainCaptureScheduler) setTarget(target model.CaptureID) bool { } func (d *drainCaptureScheduler) Schedule( - _ tablepb.Checkpoint, + _ model.Ts, _ []tablepb.Span, captures map[model.CaptureID]*member.CaptureStatus, replications *spanz.BtreeMap[*replication.ReplicationSet], diff --git a/cdc/scheduler/internal/v3/scheduler/scheduler_drain_capture_test.go b/cdc/scheduler/internal/v3/scheduler/scheduler_drain_capture_test.go index 81af850e20a..03f1a99197e 100644 --- a/cdc/scheduler/internal/v3/scheduler/scheduler_drain_capture_test.go +++ b/cdc/scheduler/internal/v3/scheduler/scheduler_drain_capture_test.go @@ -30,18 +30,18 @@ func TestDrainCapture(t *testing.T) { scheduler := newDrainCaptureScheduler(10, model.ChangeFeedID{}) require.Equal(t, "drain-capture-scheduler", scheduler.Name()) - var checkpoint tablepb.Checkpoint + var checkpointTs model.Ts captures := make(map[model.CaptureID]*member.CaptureStatus) currentTables := make([]tablepb.Span, 0) replications := mapToSpanMap(make(map[model.TableID]*replication.ReplicationSet)) - tasks := scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks := scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 0) ok := scheduler.setTarget("a") require.True(t, ok) - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 0) // the target capture has no table at the beginning, so reset the target require.Equal(t, captureIDNotDraining, scheduler.target) @@ -50,7 +50,7 @@ func TestDrainCapture(t *testing.T) { ok = scheduler.setTarget("b") require.True(t, ok) - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 0) // the target capture cannot be found in the latest captures require.Equal(t, captureIDNotDraining, scheduler.target) @@ -105,7 +105,7 @@ func TestDrainCapture(t *testing.T) { ok = scheduler.setTarget("a") require.True(t, ok) // not all table is replicating, skip this tick. - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Equal(t, "a", scheduler.target) require.Len(t, tasks, 0) @@ -118,13 +118,13 @@ func TestDrainCapture(t *testing.T) { 7: {State: replication.ReplicationSetStateReplicating, Primary: "b"}, }) - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Equal(t, "a", scheduler.target) require.Len(t, tasks, 3) scheduler = newDrainCaptureScheduler(1, model.ChangeFeedID{}) require.True(t, scheduler.setTarget("a")) - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Equal(t, "a", scheduler.target) require.Len(t, tasks, 1) } @@ -132,13 +132,13 @@ func TestDrainCapture(t *testing.T) { func TestDrainStoppingCapture(t *testing.T) { t.Parallel() - var checkpoint tablepb.Checkpoint + var checkpointTs model.Ts captures := make(map[model.CaptureID]*member.CaptureStatus) currentTables := make([]tablepb.Span, 0) replications := mapToSpanMap(make(map[model.TableID]*replication.ReplicationSet)) scheduler := newDrainCaptureScheduler(10, model.ChangeFeedID{}) - tasks := scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks := scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Empty(t, tasks) captures["a"] = &member.CaptureStatus{} @@ -147,7 +147,7 @@ func TestDrainStoppingCapture(t *testing.T) { 1: {State: replication.ReplicationSetStateReplicating, Primary: "a"}, 2: {State: replication.ReplicationSetStateReplicating, Primary: "b"}, }) - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 1) require.EqualValues(t, 2, tasks[0].MoveTable.Span.TableID) require.EqualValues(t, "a", tasks[0].MoveTable.DestCapture) @@ -157,7 +157,7 @@ func TestDrainStoppingCapture(t *testing.T) { func TestDrainSkipOwner(t *testing.T) { t.Parallel() - var checkpoint tablepb.Checkpoint + var checkpointTs model.Ts currentTables := make([]tablepb.Span, 0) captures := map[model.CaptureID]*member.CaptureStatus{ "a": {}, @@ -168,7 +168,7 @@ func TestDrainSkipOwner(t *testing.T) { 2: {State: replication.ReplicationSetStateReplicating, Primary: "b"}, }) scheduler := newDrainCaptureScheduler(10, model.ChangeFeedID{}) - tasks := scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks := scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 0) require.EqualValues(t, captureIDNotDraining, scheduler.getTarget()) } @@ -176,7 +176,7 @@ func TestDrainSkipOwner(t *testing.T) { func TestDrainImbalanceCluster(t *testing.T) { t.Parallel() - var checkpoint tablepb.Checkpoint + var checkpointTs model.Ts currentTables := make([]tablepb.Span, 0) captures := map[model.CaptureID]*member.CaptureStatus{ "a": {State: member.CaptureStateInitialized}, @@ -188,7 +188,7 @@ func TestDrainImbalanceCluster(t *testing.T) { }) scheduler := newDrainCaptureScheduler(10, model.ChangeFeedID{}) scheduler.setTarget("a") - tasks := scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks := scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 2) require.EqualValues(t, "a", scheduler.getTarget()) } @@ -196,7 +196,7 @@ func TestDrainImbalanceCluster(t *testing.T) { func TestDrainEvenlyDistributedTables(t *testing.T) { t.Parallel() - var checkpoint tablepb.Checkpoint + var checkpointTs model.Ts currentTables := make([]tablepb.Span, 0) captures := map[model.CaptureID]*member.CaptureStatus{ "a": {State: member.CaptureStateInitialized}, @@ -211,7 +211,7 @@ func TestDrainEvenlyDistributedTables(t *testing.T) { }) scheduler := newDrainCaptureScheduler(10, model.ChangeFeedID{}) scheduler.setTarget("a") - tasks := scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks := scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 3) taskMap := make(map[model.CaptureID]int) for _, t := range tasks { diff --git a/cdc/scheduler/internal/v3/scheduler/scheduler_manager.go b/cdc/scheduler/internal/v3/scheduler/scheduler_manager.go index 21ab13b3ef5..76817b344f6 100644 --- a/cdc/scheduler/internal/v3/scheduler/scheduler_manager.go +++ b/cdc/scheduler/internal/v3/scheduler/scheduler_manager.go @@ -20,7 +20,6 @@ import ( "github.com/pingcap/log" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/cdc/processor/tablepb" - "github.com/pingcap/tiflow/cdc/redo" "github.com/pingcap/tiflow/cdc/scheduler/internal/v3/member" "github.com/pingcap/tiflow/cdc/scheduler/internal/v3/replication" "github.com/pingcap/tiflow/pkg/config" @@ -70,22 +69,7 @@ func (sm *Manager) Schedule( aliveCaptures map[model.CaptureID]*member.CaptureStatus, replications *spanz.BtreeMap[*replication.ReplicationSet], runTasking *spanz.BtreeMap[*replication.ScheduleTask], - redoMetaManager redo.MetaManager, ) []*replication.ScheduleTask { - checkpoint := tablepb.Checkpoint{ - CheckpointTs: checkpointTs, - ResolvedTs: checkpointTs, - } - if redoMetaManager != nil && redoMetaManager.Enabled() { - flushedMeta := redoMetaManager.GetFlushedMeta() - if flushedMeta.ResolvedTs < checkpointTs { - log.Panic("schedulerv3: flushed resolved ts is less than checkpoint ts", - zap.Uint64("checkpointTs", checkpointTs), - zap.Any("flushedMeta", flushedMeta)) - } - checkpoint.ResolvedTs = flushedMeta.ResolvedTs - } - for sid, scheduler := range sm.schedulers { // Basic scheduler bypasses max task check, because it handles the most // critical scheduling, e.g. add table via CREATE TABLE DDL. @@ -96,7 +80,7 @@ func (sm *Manager) Schedule( return nil } } - tasks := scheduler.Schedule(checkpoint, currentSpans, aliveCaptures, replications) + tasks := scheduler.Schedule(checkpointTs, currentSpans, aliveCaptures, replications) for _, t := range tasks { name := struct { scheduler, task string diff --git a/cdc/scheduler/internal/v3/scheduler/scheduler_manager_test.go b/cdc/scheduler/internal/v3/scheduler/scheduler_manager_test.go index 613273b81c6..e7b3fb83a64 100644 --- a/cdc/scheduler/internal/v3/scheduler/scheduler_manager_test.go +++ b/cdc/scheduler/internal/v3/scheduler/scheduler_manager_test.go @@ -18,7 +18,6 @@ import ( "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/cdc/processor/tablepb" - "github.com/pingcap/tiflow/cdc/redo" "github.com/pingcap/tiflow/cdc/scheduler/internal/v3/member" "github.com/pingcap/tiflow/cdc/scheduler/internal/v3/replication" "github.com/pingcap/tiflow/pkg/config" @@ -54,16 +53,14 @@ func TestSchedulerManagerScheduler(t *testing.T) { // schedulerPriorityBasic bypasses task check. replications := mapToSpanMap(map[model.TableID]*replication.ReplicationSet{}) runningTasks := mapToSpanMap(map[model.TableID]*replication.ScheduleTask{1: {}}) - - redoMetaManager := redo.NewDisabledMetaManager() - tasks := m.Schedule(0, currentSpans, captures, replications, runningTasks, redoMetaManager) + tasks := m.Schedule(0, currentSpans, captures, replications, runningTasks) require.Len(t, tasks, 1) // No more task. replications = mapToSpanMap(map[model.TableID]*replication.ReplicationSet{ 1: {State: replication.ReplicationSetStateReplicating, Primary: "a"}, }) - tasks = m.Schedule(0, currentSpans, captures, replications, runningTasks, redoMetaManager) + tasks = m.Schedule(0, currentSpans, captures, replications, runningTasks) require.Len(t, tasks, 0) // Move table is dropped because of running tasks. @@ -71,7 +68,7 @@ func TestSchedulerManagerScheduler(t *testing.T) { replications = mapToSpanMap(map[model.TableID]*replication.ReplicationSet{ 1: {State: replication.ReplicationSetStateReplicating, Primary: "a"}, }) - tasks = m.Schedule(0, currentSpans, captures, replications, runningTasks, redoMetaManager) + tasks = m.Schedule(0, currentSpans, captures, replications, runningTasks) require.Len(t, tasks, 0) // Move table can proceed after clean up tasks. @@ -80,6 +77,6 @@ func TestSchedulerManagerScheduler(t *testing.T) { 1: {State: replication.ReplicationSetStateReplicating, Primary: "a"}, }) runningTasks = spanz.NewBtreeMap[*replication.ScheduleTask]() - tasks = m.Schedule(0, currentSpans, captures, replications, runningTasks, redoMetaManager) + tasks = m.Schedule(0, currentSpans, captures, replications, runningTasks) require.Len(t, tasks, 1) } diff --git a/cdc/scheduler/internal/v3/scheduler/scheduler_move_table.go b/cdc/scheduler/internal/v3/scheduler/scheduler_move_table.go index bf9ef491f57..10b44de77e5 100644 --- a/cdc/scheduler/internal/v3/scheduler/scheduler_move_table.go +++ b/cdc/scheduler/internal/v3/scheduler/scheduler_move_table.go @@ -67,7 +67,7 @@ func (m *moveTableScheduler) addTask(span tablepb.Span, target model.CaptureID) } func (m *moveTableScheduler) Schedule( - _ tablepb.Checkpoint, + _ model.Ts, currentSpans []tablepb.Span, captures map[model.CaptureID]*member.CaptureStatus, replications *spanz.BtreeMap[*replication.ReplicationSet], diff --git a/cdc/scheduler/internal/v3/scheduler/scheduler_move_table_test.go b/cdc/scheduler/internal/v3/scheduler/scheduler_move_table_test.go index 3adc047bfbc..e485db9ebed 100644 --- a/cdc/scheduler/internal/v3/scheduler/scheduler_move_table_test.go +++ b/cdc/scheduler/internal/v3/scheduler/scheduler_move_table_test.go @@ -27,7 +27,7 @@ import ( func TestSchedulerMoveTable(t *testing.T) { t.Parallel() - var checkpoint tablepb.Checkpoint + var checkpointTs model.Ts captures := map[model.CaptureID]*member.CaptureStatus{"a": { State: member.CaptureStateInitialized, }, "b": { @@ -43,38 +43,38 @@ func TestSchedulerMoveTable(t *testing.T) { require.Equal(t, "move-table-scheduler", scheduler.Name()) tasks := scheduler.Schedule( - checkpoint, currentTables, map[model.CaptureID]*member.CaptureStatus{}, replications) + checkpointTs, currentTables, map[model.CaptureID]*member.CaptureStatus{}, replications) require.Len(t, tasks, 0) scheduler.addTask(tablepb.Span{TableID: 0}, "a") tasks = scheduler.Schedule( - checkpoint, currentTables, map[model.CaptureID]*member.CaptureStatus{}, replications) + checkpointTs, currentTables, map[model.CaptureID]*member.CaptureStatus{}, replications) require.Len(t, tasks, 0) // move a not exist table scheduler.addTask(tablepb.Span{TableID: 0}, "a") - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 0) // move table to a not exist capture scheduler.addTask(tablepb.Span{TableID: 1}, "c") - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 0) // move table not replicating scheduler.addTask(tablepb.Span{TableID: 1}, "b") tasks = scheduler.Schedule( - checkpoint, currentTables, captures, spanz.NewBtreeMap[*replication.ReplicationSet]()) + checkpointTs, currentTables, captures, spanz.NewBtreeMap[*replication.ReplicationSet]()) require.Len(t, tasks, 0) scheduler.addTask(tablepb.Span{TableID: 1}, "b") replications.GetV(tablepb.Span{TableID: 1}).State = replication.ReplicationSetStatePrepare - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 0) scheduler.addTask(tablepb.Span{TableID: 1}, "b") replications.GetV(tablepb.Span{TableID: 1}).State = replication.ReplicationSetStateReplicating - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 1) require.Equal(t, model.TableID(1), tasks[0].MoveTable.Span.TableID) require.Equal(t, "b", tasks[0].MoveTable.DestCapture) @@ -83,7 +83,7 @@ func TestSchedulerMoveTable(t *testing.T) { // the target capture is stopping scheduler.addTask(tablepb.Span{TableID: 1}, "b") captures["b"].State = member.CaptureStateStopping - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 0) require.False(t, scheduler.tasks.Has(tablepb.Span{TableID: 1})) } diff --git a/cdc/scheduler/internal/v3/scheduler/scheduler_rebalance.go b/cdc/scheduler/internal/v3/scheduler/scheduler_rebalance.go index 2bb4a4ecbda..ecc02d37c20 100644 --- a/cdc/scheduler/internal/v3/scheduler/scheduler_rebalance.go +++ b/cdc/scheduler/internal/v3/scheduler/scheduler_rebalance.go @@ -51,7 +51,7 @@ func (r *rebalanceScheduler) Name() string { } func (r *rebalanceScheduler) Schedule( - _ tablepb.Checkpoint, + _ model.Ts, currentSpans []tablepb.Span, captures map[model.CaptureID]*member.CaptureStatus, replications *spanz.BtreeMap[*replication.ReplicationSet], diff --git a/cdc/scheduler/internal/v3/scheduler/scheduler_rebalance_test.go b/cdc/scheduler/internal/v3/scheduler/scheduler_rebalance_test.go index c2b3ea9c878..e6b1ff53d83 100644 --- a/cdc/scheduler/internal/v3/scheduler/scheduler_rebalance_test.go +++ b/cdc/scheduler/internal/v3/scheduler/scheduler_rebalance_test.go @@ -28,7 +28,7 @@ import ( func TestSchedulerRebalance(t *testing.T) { t.Parallel() - var checkpoint tablepb.Checkpoint + var checkpointTs model.Ts captures := map[model.CaptureID]*member.CaptureStatus{"a": {}, "b": {}} currentTables := spanz.ArrayToSpan([]model.TableID{1, 2, 3, 4}) @@ -57,22 +57,22 @@ func TestSchedulerRebalance(t *testing.T) { scheduler := newRebalanceScheduler(model.ChangeFeedID{}) require.Equal(t, "rebalance-scheduler", scheduler.Name()) // rebalance is not triggered - tasks := scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks := scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 0) atomic.StoreInt32(&scheduler.rebalance, 1) // no captures tasks = scheduler.Schedule( - checkpoint, currentTables, map[model.CaptureID]*member.CaptureStatus{}, replications) + checkpointTs, currentTables, map[model.CaptureID]*member.CaptureStatus{}, replications) require.Len(t, tasks, 0) // table not in the replication set, tasks = scheduler.Schedule( - checkpoint, spanz.ArrayToSpan([]model.TableID{0}), captures, replications) + checkpointTs, spanz.ArrayToSpan([]model.TableID{0}), captures, replications) require.Len(t, tasks, 0) // not all tables are replicating, - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 0) // table distribution is balanced, should have no task. @@ -82,7 +82,7 @@ func TestSchedulerRebalance(t *testing.T) { 3: {State: replication.ReplicationSetStateReplicating, Primary: "b"}, 4: {State: replication.ReplicationSetStateReplicating, Primary: "b"}, }) - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 0) // Imbalance. @@ -97,14 +97,14 @@ func TestSchedulerRebalance(t *testing.T) { // capture is stopping, ignore the request captures["a"].State = member.CaptureStateStopping - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 0) require.Equal(t, atomic.LoadInt32(&scheduler.rebalance), int32(0)) captures["a"].State = member.CaptureStateInitialized atomic.StoreInt32(&scheduler.rebalance, 1) scheduler.random = nil // disable random to make test easier. - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 1) require.Contains(t, tasks[0].BurstBalance.MoveTables, replication.MoveTable{ Span: tablepb.Span{TableID: 1}, DestCapture: "b", @@ -115,6 +115,6 @@ func TestSchedulerRebalance(t *testing.T) { require.EqualValues(t, 0, atomic.LoadInt32(&scheduler.rebalance)) // pending task is not consumed yet, this turn should have no tasks. - tasks = scheduler.Schedule(checkpoint, currentTables, captures, replications) + tasks = scheduler.Schedule(checkpointTs, currentTables, captures, replications) require.Len(t, tasks, 0) } diff --git a/cdc/scheduler/internal/v3/transport/main_test.go b/cdc/scheduler/internal/v3/transport/main_test.go deleted file mode 100644 index bdb388e4cbc..00000000000 --- a/cdc/scheduler/internal/v3/transport/main_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "testing" - - "github.com/pingcap/tiflow/pkg/leakutil" -) - -func TestMain(m *testing.M) { - leakutil.SetUpLeakTest(m) -}