From 150db5c7c8fae10dd5cd1f3d14a87fdb31769441 Mon Sep 17 00:00:00 2001 From: 3AceShowHand Date: Thu, 23 Nov 2023 16:50:57 +0800 Subject: [PATCH 01/11] add testkit. --- .../testkit/testkit.go | 47 +++++++++---------- 1 file changed, 23 insertions(+), 24 deletions(-) rename cdc/entry/schema_test_helper.go => pkg/testkit/testkit.go (76%) diff --git a/cdc/entry/schema_test_helper.go b/pkg/testkit/testkit.go similarity index 76% rename from cdc/entry/schema_test_helper.go rename to pkg/testkit/testkit.go index f438ae31239..97b2b87b647 100644 --- a/cdc/entry/schema_test_helper.go +++ b/pkg/testkit/testkit.go @@ -1,4 +1,4 @@ -// Copyright 2021 PingCAP, Inc. +// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package entry +package testkit import ( "encoding/json" @@ -22,8 +22,8 @@ import ( tiddl "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" - timeta "github.com/pingcap/tidb/meta" - timodel "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/testkit" @@ -31,28 +31,27 @@ import ( "github.com/tikv/client-go/v2/oracle" ) -// SchemaTestHelper is a test helper for schema which creates an internal tidb instance to generate DDL jobs with meta information -type SchemaTestHelper struct { +type TestKit struct { t *testing.T tk *testkit.TestKit storage kv.Storage domain *domain.Domain } -// NewSchemaTestHelper creates a SchemaTestHelper -func NewSchemaTestHelper(t *testing.T) *SchemaTestHelper { +// New return a new testkit +func New(t *testing.T) *TestKit { store, err := mockstore.NewMockStore() - require.Nil(t, err) + require.NoError(t, err) ticonfig.UpdateGlobal(func(conf *ticonfig.Config) { conf.AlterPrimaryKey = true }) session.SetSchemaLease(0) session.DisableStats4Test() domain, err := session.BootstrapSession(store) - require.Nil(t, err) + require.NoError(t, err) domain.SetStatsUpdating(true) tk := testkit.NewTestKit(t, store) - return &SchemaTestHelper{ + return &TestKit{ t: t, tk: tk, storage: store, @@ -61,7 +60,7 @@ func NewSchemaTestHelper(t *testing.T) *SchemaTestHelper { } // DDL2Job executes the DDL stmt and returns the DDL job -func (s *SchemaTestHelper) DDL2Job(ddl string) *timodel.Job { +func (s *TestKit) DDL2Job(ddl string) *model.Job { s.tk.MustExec(ddl) jobs, err := tiddl.GetLastNHistoryDDLJobs(s.GetCurrentMeta(), 1) require.Nil(s.t, err) @@ -69,9 +68,9 @@ func (s *SchemaTestHelper) DDL2Job(ddl string) *timodel.Job { // Set State from Synced to Done. // Because jobs are put to history queue after TiDB alter its state from // Done to Synced. - jobs[0].State = timodel.JobStateDone + jobs[0].State = model.JobStateDone res := jobs[0] - if res.Type != timodel.ActionRenameTables { + if res.Type != model.ActionRenameTables { return res } @@ -89,13 +88,13 @@ func (s *SchemaTestHelper) DDL2Job(ddl string) *timodel.Job { for i := 0; i < tableNum; i++ { oldTableIDs[i] = res.BinlogInfo.MultipleTableInfos[i].ID } - newTableNames := make([]timodel.CIStr, tableNum) + newTableNames := make([]model.CIStr, tableNum) for i := 0; i < tableNum; i++ { newTableNames[i] = res.BinlogInfo.MultipleTableInfos[i].Name } - oldSchemaNames := make([]timodel.CIStr, tableNum) + oldSchemaNames := make([]model.CIStr, tableNum) for i := 0; i < tableNum; i++ { - oldSchemaNames[i] = timodel.NewCIStr(schema) + oldSchemaNames[i] = model.NewCIStr(schema) } newSchemaIDs := oldSchemaIDs @@ -113,7 +112,7 @@ func (s *SchemaTestHelper) DDL2Job(ddl string) *timodel.Job { // It is mainly used for "DROP TABLE" and "DROP VIEW" statement because // multiple jobs will be generated after executing these two types of // DDL statements. -func (s *SchemaTestHelper) DDL2Jobs(ddl string, jobCnt int) []*timodel.Job { +func (s *TestKit) DDL2Jobs(ddl string, jobCnt int) []*model.Job { s.tk.MustExec(ddl) jobs, err := tiddl.GetLastNHistoryDDLJobs(s.GetCurrentMeta(), jobCnt) require.Nil(s.t, err) @@ -122,30 +121,30 @@ func (s *SchemaTestHelper) DDL2Jobs(ddl string, jobCnt int) []*timodel.Job { // Because jobs are put to history queue after TiDB alter its state from // Done to Synced. for i := range jobs { - jobs[i].State = timodel.JobStateDone + jobs[i].State = model.JobStateDone } return jobs } // Storage returns the tikv storage -func (s *SchemaTestHelper) Storage() kv.Storage { +func (s *TestKit) Storage() kv.Storage { return s.storage } // Tk returns the TestKit -func (s *SchemaTestHelper) Tk() *testkit.TestKit { +func (s *TestKit) Tk() *testkit.TestKit { return s.tk } // GetCurrentMeta return the current meta snapshot -func (s *SchemaTestHelper) GetCurrentMeta() *timeta.Meta { +func (s *TestKit) GetCurrentMeta() *meta.Meta { ver, err := s.storage.CurrentVersion(oracle.GlobalTxnScope) require.Nil(s.t, err) - return timeta.NewSnapshotMeta(s.storage.GetSnapshot(ver)) + return meta.NewSnapshotMeta(s.storage.GetSnapshot(ver)) } // Close closes the helper -func (s *SchemaTestHelper) Close() { +func (s *TestKit) Close() { s.domain.Close() s.storage.Close() //nolint:errcheck } From dcfea518676aa73da4e0b2c419c90a5f9cb00160 Mon Sep 17 00:00:00 2001 From: 3AceShowHand Date: Thu, 23 Nov 2023 17:09:22 +0800 Subject: [PATCH 02/11] fix mounter unit test. --- cdc/entry/mounter_test.go | 107 ++++++++++++++++---------------------- pkg/testkit/testkit.go | 13 ++--- 2 files changed, 48 insertions(+), 72 deletions(-) diff --git a/cdc/entry/mounter_test.go b/cdc/entry/mounter_test.go index e1df5b2a473..cbfd62ad8d9 100644 --- a/cdc/entry/mounter_test.go +++ b/cdc/entry/mounter_test.go @@ -11,9 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build intest -// +build intest - package entry import ( @@ -25,7 +22,6 @@ import ( "time" "github.com/pingcap/log" - ticonfig "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/executor" tidbkv "github.com/pingcap/tidb/kv" @@ -34,9 +30,6 @@ import ( "github.com/pingcap/tidb/parser/ast" timodel "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/session" - "github.com/pingcap/tidb/store/mockstore" - "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tiflow/cdc/model" @@ -48,6 +41,7 @@ import ( codecCommon "github.com/pingcap/tiflow/pkg/sink/codec/common" "github.com/pingcap/tiflow/pkg/spanz" "github.com/pingcap/tiflow/pkg/sqlmodel" + "github.com/pingcap/tiflow/pkg/testkit" "github.com/pingcap/tiflow/pkg/util" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" @@ -267,19 +261,9 @@ func testMounterDisableOldValue(t *testing.T, tc struct { delApproximateBytes [][]int }, ) { - store, err := mockstore.NewMockStore() - require.Nil(t, err) - defer store.Close() //nolint:errcheck - ticonfig.UpdateGlobal(func(conf *ticonfig.Config) { - // we can update the tidb config here - }) - session.SetSchemaLease(0) - session.DisableStats4Test() - domain, err := session.BootstrapSession(store) - require.Nil(t, err) - defer domain.Close() - domain.SetStatsUpdating(true) - tk := testkit.NewTestKit(t, store) + tk := testkit.New(t) + defer tk.Close() + tk.MustExec("set @@tidb_enable_clustered_index=1;") tk.MustExec("use test;") @@ -287,7 +271,7 @@ func testMounterDisableOldValue(t *testing.T, tc struct { f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") require.Nil(t, err) - jobs, err := getAllHistoryDDLJob(store, f) + jobs, err := getAllHistoryDDLJob(tk.Storage(), f) require.Nil(t, err) scheamStorage, err := NewSchemaStorage(nil, 0, false, dummyChangeFeedID, util.RoleTester, f) @@ -309,7 +293,7 @@ func testMounterDisableOldValue(t *testing.T, tc struct { tk.MustExec(insertSQL, params...) } - ver, err := store.CurrentVersion(oracle.GlobalTxnScope) + ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) scheamStorage.AdvanceResolvedTs(ver.Ver) config := config.GetDefaultReplicaConfig() @@ -323,7 +307,7 @@ func testMounterDisableOldValue(t *testing.T, tc struct { // [TODO] check size and readd rowBytes mountAndCheckRowInTable := func(tableID int64, _ []int, f func(key []byte, value []byte) *model.RawKVEntry) int { var rows int - walkTableSpanInStore(t, store, tableID, func(key []byte, value []byte) { + walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) { rawKV := f(key, value) row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) require.Nil(t, err) @@ -1020,13 +1004,12 @@ func TestGetDefaultZeroValue(t *testing.T) { } func TestE2ERowLevelChecksum(t *testing.T) { - helper := NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() - tk := helper.Tk() // upstream TiDB enable checksum functionality tk.MustExec("set global tidb_enable_row_level_checksum = 1") - helper.Tk().MustExec("use test") + tk.MustExec("use test") // changefeed enable checksum functionality replicaConfig := config.GetDefaultReplicaConfig() @@ -1034,11 +1017,11 @@ func TestE2ERowLevelChecksum(t *testing.T) { filter, err := filter.NewFilter(replicaConfig, "") require.NoError(t, err) - ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) + ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) require.NoError(t, err) changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") - schemaStorage, err := NewSchemaStorage(helper.GetCurrentMeta(), + schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), ver.Ver, false, changefeed, util.RoleTester, filter) require.NoError(t, err) require.NotNil(t, schemaStorage) @@ -1101,7 +1084,7 @@ func TestE2ERowLevelChecksum(t *testing.T) { description text CHARACTER SET gbk, image tinyblob );` - job := helper.DDL2Job(createTableSQL) + job := tk.DDL2Job(createTableSQL) err = schemaStorage.HandleDDLJob(job) require.NoError(t, err) @@ -1137,7 +1120,7 @@ func TestE2ERowLevelChecksum(t *testing.T) { );` tk.MustExec(insertDataSQL) - key, value := getLastKeyValueInStore(t, helper.Storage(), tableInfo.ID) + key, value := getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) rawKV := &model.RawKVEntry{ OpType: model.OpTypePut, Key: key, @@ -1193,30 +1176,28 @@ func TestE2ERowLevelChecksum(t *testing.T) { } func TestDecodeRowEnableChecksum(t *testing.T) { - helper := NewSchemaTestHelper(t) - defer helper.Close() - - tk := helper.Tk() + tk := testkit.New(t) + defer tk.Close() tk.MustExec("set global tidb_enable_row_level_checksum = 1") - helper.Tk().MustExec("use test") + tk.MustExec("use test") replicaConfig := config.GetDefaultReplicaConfig() replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness filter, err := filter.NewFilter(replicaConfig, "") require.NoError(t, err) - ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) + ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) require.NoError(t, err) changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") - schemaStorage, err := NewSchemaStorage(helper.GetCurrentMeta(), + schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), ver.Ver, false, changefeed, util.RoleTester, filter) require.NoError(t, err) require.NotNil(t, schemaStorage) createTableDDL := "create table t (id int primary key, a int)" - job := helper.DDL2Job(createTableDDL) + job := tk.DDL2Job(createTableDDL) err = schemaStorage.HandleDDLJob(job) require.NoError(t, err) @@ -1234,7 +1215,7 @@ func TestDecodeRowEnableChecksum(t *testing.T) { tk.Session().GetSessionVars().EnableRowLevelChecksum = false tk.MustExec("insert into t values (1, 10)") - key, value := getLastKeyValueInStore(t, helper.Storage(), tableInfo.ID) + key, value := getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) rawKV := &model.RawKVEntry{ OpType: model.OpTypePut, Key: key, @@ -1253,7 +1234,7 @@ func TestDecodeRowEnableChecksum(t *testing.T) { tk.Session().GetSessionVars().EnableRowLevelChecksum = true tk.MustExec("insert into t values (2, 20)") - key, value = getLastKeyValueInStore(t, helper.Storage(), tableInfo.ID) + key, value = getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) rawKV = &model.RawKVEntry{ OpType: model.OpTypePut, Key: key, @@ -1273,11 +1254,11 @@ func TestDecodeRowEnableChecksum(t *testing.T) { // row with 2 checksum tk.MustExec("insert into t values (3, 30)") - job = helper.DDL2Job("alter table t change column a a varchar(10)") + job = tk.DDL2Job("alter table t change column a a varchar(10)") err = schemaStorage.HandleDDLJob(job) require.NoError(t, err) - key, value = getLastKeyValueInStore(t, helper.Storage(), tableInfo.ID) + key, value = getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) rawKV = &model.RawKVEntry{ OpType: model.OpTypePut, Key: key, @@ -1317,21 +1298,21 @@ func TestDecodeRowEnableChecksum(t *testing.T) { require.Error(t, err) require.ErrorIs(t, err, cerror.ErrCorruptedDataMutation) - job = helper.DDL2Job("drop table t") + job = tk.DDL2Job("drop table t") err = schemaStorage.HandleDDLJob(job) require.NoError(t, err) } func TestDecodeRow(t *testing.T) { - helper := NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() - helper.Tk().MustExec("set @@tidb_enable_clustered_index=1;") - helper.Tk().MustExec("use test;") + tk.MustExec("set @@tidb_enable_clustered_index=1;") + tk.MustExec("use test;") changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") - ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) + ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) require.NoError(t, err) cfg := config.GetDefaultReplicaConfig() @@ -1339,13 +1320,13 @@ func TestDecodeRow(t *testing.T) { filter, err := filter.NewFilter(cfg, "") require.NoError(t, err) - schemaStorage, err := NewSchemaStorage(helper.GetCurrentMeta(), + schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), ver.Ver, false, changefeed, util.RoleTester, filter) require.NoError(t, err) // apply ddl to schemaStorage ddl := "create table test.student(id int primary key, name char(50), age int, gender char(10))" - job := helper.DDL2Job(ddl) + job := tk.DDL2Job(ddl) err = schemaStorage.HandleDDLJob(job) require.NoError(t, err) @@ -1355,12 +1336,12 @@ func TestDecodeRow(t *testing.T) { mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, cfg.Integrity).(*mounter) - helper.Tk().MustExec(`insert into student values(1, "dongmen", 20, "male")`) - helper.Tk().MustExec(`update student set age = 27 where id = 1`) + tk.MustExec(`insert into student values(1, "dongmen", 20, "male")`) + tk.MustExec(`update student set age = 27 where id = 1`) ctx := context.Background() decodeAndCheckRowInTable := func(tableID int64, f func(key []byte, value []byte) *model.RawKVEntry) { - walkTableSpanInStore(t, helper.Storage(), tableID, func(key []byte, value []byte) { + walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) { rawKV := f(key, value) row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) @@ -1393,7 +1374,7 @@ func TestDecodeRow(t *testing.T) { decodeAndCheckRowInTable(tableInfo.ID, toRawKV) decodeAndCheckRowInTable(tableInfo.ID, toRawKV) - job = helper.DDL2Job("drop table student") + job = tk.DDL2Job("drop table student") err = schemaStorage.HandleDDLJob(job) require.NoError(t, err) } @@ -1401,9 +1382,9 @@ func TestDecodeRow(t *testing.T) { // TestDecodeEventIgnoreRow tests a PolymorphicEvent.Row is nil // if this event should be filter out by filter. func TestDecodeEventIgnoreRow(t *testing.T) { - helper := NewSchemaTestHelper(t) - defer helper.Close() - helper.Tk().MustExec("use test;") + tk := testkit.New(t) + defer tk.Close() + tk.MustExec("use test;") ddls := []string{ "create table test.student(id int primary key, name char(50), age int, gender char(10))", @@ -1417,15 +1398,15 @@ func TestDecodeEventIgnoreRow(t *testing.T) { cfg.Filter.Rules = []string{"test.student", "test.computer"} f, err := filter.NewFilter(cfg, "") require.Nil(t, err) - ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) + ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) - schemaStorage, err := NewSchemaStorage(helper.GetCurrentMeta(), + schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), ver.Ver, false, cfID, util.RoleTester, f) require.Nil(t, err) // apply ddl to schemaStorage for _, ddl := range ddls { - job := helper.DDL2Job(ddl) + job := tk.DDL2Job(ddl) err = schemaStorage.HandleDDLJob(job) require.Nil(t, err) } @@ -1475,13 +1456,13 @@ func TestDecodeEventIgnoreRow(t *testing.T) { } else { tables = append(tables, tc.table) } - helper.tk.MustExec(insertSQL, tc.columns...) + tk.MustExec(insertSQL, tc.columns...) } ctx := context.Background() decodeAndCheckRowInTable := func(tableID int64, f func(key []byte, value []byte) *model.RawKVEntry) int { var rows int - walkTableSpanInStore(t, helper.Storage(), tableID, func(key []byte, value []byte) { + walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) { rawKV := f(key, value) pEvent := model.NewPolymorphicEvent(rawKV) err := mounter.DecodeEvent(ctx, pEvent) diff --git a/pkg/testkit/testkit.go b/pkg/testkit/testkit.go index 97b2b87b647..d545407e5b1 100644 --- a/pkg/testkit/testkit.go +++ b/pkg/testkit/testkit.go @@ -32,8 +32,8 @@ import ( ) type TestKit struct { + *testkit.TestKit t *testing.T - tk *testkit.TestKit storage kv.Storage domain *domain.Domain } @@ -53,7 +53,7 @@ func New(t *testing.T) *TestKit { tk := testkit.NewTestKit(t, store) return &TestKit{ t: t, - tk: tk, + TestKit: tk, storage: store, domain: domain, } @@ -61,7 +61,7 @@ func New(t *testing.T) *TestKit { // DDL2Job executes the DDL stmt and returns the DDL job func (s *TestKit) DDL2Job(ddl string) *model.Job { - s.tk.MustExec(ddl) + s.MustExec(ddl) jobs, err := tiddl.GetLastNHistoryDDLJobs(s.GetCurrentMeta(), 1) require.Nil(s.t, err) require.Len(s.t, jobs, 1) @@ -113,7 +113,7 @@ func (s *TestKit) DDL2Job(ddl string) *model.Job { // multiple jobs will be generated after executing these two types of // DDL statements. func (s *TestKit) DDL2Jobs(ddl string, jobCnt int) []*model.Job { - s.tk.MustExec(ddl) + s.MustExec(ddl) jobs, err := tiddl.GetLastNHistoryDDLJobs(s.GetCurrentMeta(), jobCnt) require.Nil(s.t, err) require.Len(s.t, jobs, jobCnt) @@ -131,11 +131,6 @@ func (s *TestKit) Storage() kv.Storage { return s.storage } -// Tk returns the TestKit -func (s *TestKit) Tk() *testkit.TestKit { - return s.tk -} - // GetCurrentMeta return the current meta snapshot func (s *TestKit) GetCurrentMeta() *meta.Meta { ver, err := s.storage.CurrentVersion(oracle.GlobalTxnScope) From 8a9d441c98322f1eb75dd7945b347b1ce815aea0 Mon Sep 17 00:00:00 2001 From: 3AceShowHand Date: Thu, 23 Nov 2023 17:32:56 +0800 Subject: [PATCH 03/11] update testkit. --- cdc/entry/mounter_test.go | 4 +- cdc/entry/schema_storage_test.go | 240 ++++++++++--------------------- pkg/testkit/testkit.go | 84 ++++++++--- 3 files changed, 139 insertions(+), 189 deletions(-) diff --git a/cdc/entry/mounter_test.go b/cdc/entry/mounter_test.go index cbfd62ad8d9..67ba14675dc 100644 --- a/cdc/entry/mounter_test.go +++ b/cdc/entry/mounter_test.go @@ -271,7 +271,7 @@ func testMounterDisableOldValue(t *testing.T, tc struct { f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") require.Nil(t, err) - jobs, err := getAllHistoryDDLJob(tk.Storage(), f) + jobs, err := tk.GetAllHistoryDDLJob(f) require.Nil(t, err) scheamStorage, err := NewSchemaStorage(nil, 0, false, dummyChangeFeedID, util.RoleTester, f) @@ -1713,8 +1713,6 @@ func TestNewDMRowChange(t *testing.T) { } func TestFormatColVal(t *testing.T) { - t.Parallel() - ftTypeFloatNotNull := types.NewFieldType(mysql.TypeFloat) ftTypeFloatNotNull.SetFlag(mysql.NotNullFlag) col := &timodel.ColumnInfo{FieldType: *ftTypeFloatNotNull} diff --git a/cdc/entry/schema_storage_test.go b/cdc/entry/schema_storage_test.go index 8cda912ba1b..f2d66c2704a 100644 --- a/cdc/entry/schema_storage_test.go +++ b/cdc/entry/schema_storage_test.go @@ -11,9 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build intest -// +build intest - package entry import ( @@ -23,36 +20,25 @@ import ( "testing" "github.com/pingcap/errors" - "github.com/pingcap/log" - ticonfig "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/ddl" - "github.com/pingcap/tidb/domain" - tidbkv "github.com/pingcap/tidb/kv" - timeta "github.com/pingcap/tidb/meta" timodel "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/session" - "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/store/mockstore" - "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/types" "github.com/pingcap/tiflow/cdc/entry/schema" "github.com/pingcap/tiflow/cdc/kv" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/filter" + "github.com/pingcap/tiflow/pkg/testkit" "github.com/pingcap/tiflow/pkg/util" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" - "go.uber.org/zap" ) func TestSchema(t *testing.T) { - dbName := timodel.NewCIStr("Test") // db and ignoreDB info dbInfo := &timodel.DBInfo{ ID: 1, - Name: dbName, + Name: timodel.NewCIStr("Test"), State: timodel.StatePublic, } // `createSchema` job1 @@ -67,7 +53,7 @@ func TestSchema(t *testing.T) { // reconstruct the local schema snap := schema.NewEmptySnapshot(false) err := snap.HandleDDL(job) - require.Nil(t, err) + require.NoError(t, err) _, exist := snap.SchemaByID(job.SchemaID) require.True(t, exist) @@ -81,7 +67,7 @@ func TestSchema(t *testing.T) { Query: "drop database test", } err = snap.HandleDDL(job) - require.Nil(t, err) + require.NoError(t, err) _, exist = snap.SchemaByID(job.SchemaID) require.False(t, exist) @@ -95,7 +81,7 @@ func TestSchema(t *testing.T) { } err = snap.HandleDDL(job) - require.Nil(t, err) + require.NoError(t, err) err = snap.HandleDDL(job) require.True(t, errors.IsAlreadyExists(err)) @@ -109,7 +95,7 @@ func TestSchema(t *testing.T) { Query: "drop database test", } err = snap.HandleDDL(job) - require.Nil(t, err) + require.NoError(t, err) err = snap.HandleDDL(job) require.True(t, errors.IsNotFound(err)) } @@ -210,7 +196,7 @@ func TestTable(t *testing.T) { snap := schema.NewEmptySnapshot(false) for _, job := range jobs { err := snap.HandleDDL(job) - require.Nil(t, err) + require.NoError(t, err) } // check the historical db that constructed above whether in the schema list of local schema @@ -241,12 +227,12 @@ func TestTable(t *testing.T) { Query: "truncate table " + tbName.O, } preTableInfo, err := snap.PreTableInfo(job) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, preTableInfo.TableName, model.TableName{Schema: "Test", Table: "T", TableID: 2}) require.Equal(t, preTableInfo.ID, int64(2)) err = snap.HandleDDL(job) - require.Nil(t, err) + require.NoError(t, err) _, ok = snap.PhysicalTableByID(tblInfo1.ID) require.True(t, ok) @@ -268,12 +254,12 @@ func TestTable(t *testing.T) { Query: "drop table " + tbName.O, } preTableInfo, err = snap.PreTableInfo(job) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, preTableInfo.TableName, model.TableName{Schema: "Test", Table: "T", TableID: 9}) require.Equal(t, preTableInfo.ID, int64(9)) err = snap.HandleDDL(job) - require.Nil(t, err) + require.NoError(t, err) _, ok = snap.PhysicalTableByID(tblInfo.ID) require.False(t, ok) @@ -291,7 +277,7 @@ func TestTable(t *testing.T) { Query: "drop table " + dbName.O, } err = snap.DoHandleDDL(job) - require.Nil(t, err) + require.NoError(t, err) } func TestHandleDDL(t *testing.T) { @@ -413,7 +399,7 @@ func TestHandleRenameTables(t *testing.T) { Query: fmt.Sprintf("create database %s", dbInfo.Name.O), } err := snap.HandleDDL(job) - require.Nil(t, err) + require.NoError(t, err) } for i = 1; i < 3; i++ { tblInfo := &timodel.TableInfo{ @@ -431,7 +417,7 @@ func TestHandleRenameTables(t *testing.T) { Query: "create table " + tblInfo.Name.O, } err := snap.HandleDDL(job) - require.Nil(t, err) + require.NoError(t, err) } // rename table db1.table_1 to db2.x, db2.table_2 to db1.y @@ -442,7 +428,7 @@ func TestHandleRenameTables(t *testing.T) { oldSchemaNames := []timodel.CIStr{timodel.NewCIStr("db_1"), timodel.NewCIStr("db_2")} args := []interface{}{oldSchemaIDs, newSchemaIDs, newTableNames, oldTableIDs, oldSchemaNames} rawArgs, err := json.Marshal(args) - require.Nil(t, err) + require.NoError(t, err) var job *timodel.Job = &timodel.Job{ Type: timodel.ActionRenameTables, RawArgs: rawArgs, @@ -548,12 +534,12 @@ func TestMultiVersionStorage(t *testing.T) { jobs = append(jobs, job) f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") - require.Nil(t, err) + require.NoError(t, err) storage, err := NewSchemaStorage(nil, 0, false, model.DefaultChangeFeedID("dummy"), util.RoleTester, f) - require.Nil(t, err) + require.NoError(t, err) for _, job := range jobs { err := storage.HandleDDLJob(job) - require.Nil(t, err) + require.NoError(t, err) } // `dropTable` job @@ -567,7 +553,7 @@ func TestMultiVersionStorage(t *testing.T) { } err = storage.HandleDDLJob(job) - require.Nil(t, err) + require.NoError(t, err) // `dropSchema` job job = &timodel.Job{ @@ -579,11 +565,11 @@ func TestMultiVersionStorage(t *testing.T) { } err = storage.HandleDDLJob(job) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, storage.(*schemaStorageImpl).resolvedTs, uint64(140)) snap, err := storage.GetSnapshot(ctx, 100) - require.Nil(t, err) + require.NoError(t, err) _, exist := snap.SchemaByID(11) require.True(t, exist) _, exist = snap.PhysicalTableByID(12) @@ -592,7 +578,7 @@ func TestMultiVersionStorage(t *testing.T) { require.False(t, exist) snap, err = storage.GetSnapshot(ctx, 115) - require.Nil(t, err) + require.NoError(t, err) _, exist = snap.SchemaByID(11) require.True(t, exist) _, exist = snap.PhysicalTableByID(12) @@ -601,7 +587,7 @@ func TestMultiVersionStorage(t *testing.T) { require.False(t, exist) snap, err = storage.GetSnapshot(ctx, 125) - require.Nil(t, err) + require.NoError(t, err) _, exist = snap.SchemaByID(11) require.True(t, exist) _, exist = snap.PhysicalTableByID(12) @@ -610,7 +596,7 @@ func TestMultiVersionStorage(t *testing.T) { require.True(t, exist) snap, err = storage.GetSnapshot(ctx, 135) - require.Nil(t, err) + require.NoError(t, err) _, exist = snap.SchemaByID(11) require.True(t, exist) _, exist = snap.PhysicalTableByID(12) @@ -619,7 +605,7 @@ func TestMultiVersionStorage(t *testing.T) { require.True(t, exist) snap, err = storage.GetSnapshot(ctx, 140) - require.Nil(t, err) + require.NoError(t, err) _, exist = snap.SchemaByID(11) require.False(t, exist) _, exist = snap.PhysicalTableByID(12) @@ -631,7 +617,7 @@ func TestMultiVersionStorage(t *testing.T) { require.Equal(t, uint64(0), lastSchemaTs) snap, err = storage.GetSnapshot(ctx, 100) - require.Nil(t, err) + require.NoError(t, err) _, exist = snap.SchemaByID(11) require.True(t, exist) _, exist = snap.PhysicalTableByID(12) @@ -642,7 +628,7 @@ func TestMultiVersionStorage(t *testing.T) { _, err = storage.GetSnapshot(ctx, 100) require.NotNil(t, err) snap, err = storage.GetSnapshot(ctx, 115) - require.Nil(t, err) + require.NoError(t, err) _, exist = snap.SchemaByID(11) require.True(t, exist) _, exist = snap.PhysicalTableByID(12) @@ -656,7 +642,7 @@ func TestMultiVersionStorage(t *testing.T) { storage.AdvanceResolvedTs(185) snap, err = storage.GetSnapshot(ctx, 180) - require.Nil(t, err) + require.NoError(t, err) _, exist = snap.SchemaByID(11) require.False(t, exist) _, exist = snap.PhysicalTableByID(12) @@ -672,31 +658,23 @@ func TestMultiVersionStorage(t *testing.T) { } func TestCreateSnapFromMeta(t *testing.T) { - store, err := mockstore.NewMockStore() - require.Nil(t, err) - defer store.Close() //nolint:errcheck - - session.SetSchemaLease(0) - session.DisableStats4Test() - domain, err := session.BootstrapSession(store) - require.Nil(t, err) - defer domain.Close() - domain.SetStatsUpdating(true) - tk := testkit.NewTestKit(t, store) + tk := testkit.New(t) + defer tk.Close() + tk.MustExec("create database test2") tk.MustExec("create table test.simple_test1 (id bigint primary key)") tk.MustExec("create table test.simple_test2 (id bigint primary key)") tk.MustExec("create table test2.simple_test3 (id bigint primary key)") tk.MustExec("create table test2.simple_test4 (id bigint primary key)") tk.MustExec("create table test2.simple_test5 (a bigint)") - ver, err := store.CurrentVersion(oracle.GlobalTxnScope) - require.Nil(t, err) - meta, err := kv.GetSnapshotMeta(store, ver.Ver) - require.Nil(t, err) + ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) + require.NoError(t, err) + meta, err := kv.GetSnapshotMeta(tk.Storage(), ver.Ver) + require.NoError(t, err) f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") - require.Nil(t, err) + require.NoError(t, err) snap, err := schema.NewSnapshotFromMeta(meta, ver.Ver, false, f) - require.Nil(t, err) + require.NoError(t, err) _, ok := snap.TableByName("test", "simple_test1") require.True(t, ok) tableID, ok := snap.TableIDByName("test2", "simple_test5") @@ -708,39 +686,30 @@ func TestCreateSnapFromMeta(t *testing.T) { } func TestExplicitTables(t *testing.T) { - store, err := mockstore.NewMockStore() - require.Nil(t, err) - defer store.Close() //nolint:errcheck - - session.SetSchemaLease(0) - session.DisableStats4Test() - domain, err := session.BootstrapSession(store) - require.Nil(t, err) - defer domain.Close() - domain.SetStatsUpdating(true) - tk := testkit.NewTestKit(t, store) - ver1, err := store.CurrentVersion(oracle.GlobalTxnScope) - require.Nil(t, err) + tk := testkit.New(t) + defer tk.Close() + ver1, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) + require.NoError(t, err) tk.MustExec("create database test2") tk.MustExec("create table test.simple_test1 (id bigint primary key)") tk.MustExec("create table test.simple_test2 (id bigint unique key)") tk.MustExec("create table test2.simple_test3 (a bigint)") tk.MustExec("create table test2.simple_test4 (a varchar(20) unique key)") tk.MustExec("create table test2.simple_test5 (a varchar(20))") - ver2, err := store.CurrentVersion(oracle.GlobalTxnScope) - require.Nil(t, err) - meta1, err := kv.GetSnapshotMeta(store, ver1.Ver) - require.Nil(t, err) + ver2, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) + require.NoError(t, err) + meta1, err := kv.GetSnapshotMeta(tk.Storage(), ver1.Ver) + require.NoError(t, err) f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") - require.Nil(t, err) + require.NoError(t, err) snap1, err := schema.NewSnapshotFromMeta(meta1, ver1.Ver, true /* forceReplicate */, f) - require.Nil(t, err) - meta2, err := kv.GetSnapshotMeta(store, ver2.Ver) - require.Nil(t, err) + require.NoError(t, err) + meta2, err := kv.GetSnapshotMeta(tk.Storage(), ver2.Ver) + require.NoError(t, err) snap2, err := schema.NewSnapshotFromMeta(meta2, ver2.Ver, false /* forceReplicate */, f) - require.Nil(t, err) + require.NoError(t, err) snap3, err := schema.NewSnapshotFromMeta(meta2, ver2.Ver, true /* forceReplicate */, f) - require.Nil(t, err) + require.NoError(t, err) // we don't need to count system tables since TiCDC // don't replicate them and TiDB change them frequently, @@ -859,45 +828,34 @@ func TestSchemaStorage(t *testing.T) { }} testOneGroup := func(tc []string) { - store, err := mockstore.NewMockStore() - require.Nil(t, err) - defer store.Close() //nolint:errcheck - ticonfig.UpdateGlobal(func(conf *ticonfig.Config) { - conf.AlterPrimaryKey = true - }) - session.SetSchemaLease(0) - session.DisableStats4Test() - domain, err := session.BootstrapSession(store) - require.Nil(t, err) - defer domain.Close() - domain.SetStatsUpdating(true) - tk := testkit.NewTestKit(t, store) + tk := testkit.New(t) + defer tk.Close() tk.MustExec("set global tidb_enable_clustered_index = 'int_only';") for _, ddlSQL := range tc { tk.MustExec(ddlSQL) } f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") - require.Nil(t, err) + require.NoError(t, err) - jobs, err := getAllHistoryDDLJob(store, f) - require.Nil(t, err) + jobs, err := tk.GetAllHistoryDDLJob(f) + require.NoError(t, err) schemaStorage, err := NewSchemaStorage(nil, 0, false, model.DefaultChangeFeedID("dummy"), util.RoleTester, f) - require.Nil(t, err) + require.NoError(t, err) for _, job := range jobs { err := schemaStorage.HandleDDLJob(job) - require.Nil(t, err) + require.NoError(t, err) } for _, job := range jobs { ts := job.BinlogInfo.FinishedTS - meta, err := kv.GetSnapshotMeta(store, ts) - require.Nil(t, err) + meta, err := kv.GetSnapshotMeta(tk.Storage(), ts) + require.NoError(t, err) snapFromMeta, err := schema.NewSnapshotFromMeta(meta, ts, false, f) - require.Nil(t, err) + require.NoError(t, err) snapFromSchemaStore, err := schemaStorage.GetSnapshot(ctx, ts) - require.Nil(t, err) + require.NoError(t, err) s1 := snapFromMeta.DumpToString() s2 := snapFromSchemaStore.DumpToString() @@ -910,75 +868,27 @@ func TestSchemaStorage(t *testing.T) { } } -func getAllHistoryDDLJob(storage tidbkv.Storage, f filter.Filter) ([]*timodel.Job, error) { - s, err := session.CreateSession(storage) - if err != nil { - return nil, errors.Trace(err) - } - - if s != nil { - defer s.Close() - } - - store := domain.GetDomain(s.(sessionctx.Context)).Store() - txn, err := store.Begin() - if err != nil { - return nil, errors.Trace(err) - } - defer txn.Rollback() //nolint:errcheck - txnMeta := timeta.NewMeta(txn) - - jobs, err := ddl.GetAllHistoryDDLJobs(txnMeta) - res := make([]*timodel.Job, 0) - if err != nil { - return nil, errors.Trace(err) - } - for i, job := range jobs { - ignoreSchema := f.ShouldIgnoreSchema(job.SchemaName) - ignoreTable := f.ShouldIgnoreTable(job.SchemaName, job.TableName) - if ignoreSchema || ignoreTable { - log.Info("Ignore ddl job", zap.Stringer("job", job)) - continue - } - // Set State from Synced to Done. - // Because jobs are put to history queue after TiDB alter its state from - // Done to Synced. - jobs[i].State = timodel.JobStateDone - res = append(res, job) - } - return jobs, nil -} - // This test is used to show how the schemaStorage choose a handleKey of a table. // The handleKey is chosen by the following rules: // 1. If the table has a primary key, the handleKey is the first column of the primary key. // 2. If the table has not null unique key, the handleKey is the first column of the unique key. // 3. If the table has no primary key and no not null unique key, it has no handleKey. func TestHandleKey(t *testing.T) { - store, err := mockstore.NewMockStore() - require.Nil(t, err) - defer store.Close() //nolint:errcheck - - session.SetSchemaLease(0) - session.DisableStats4Test() - domain, err := session.BootstrapSession(store) - require.Nil(t, err) - defer domain.Close() - domain.SetStatsUpdating(true) - tk := testkit.NewTestKit(t, store) + tk := testkit.New(t) + defer tk.Close() tk.MustExec("create database test2") tk.MustExec("create table test.simple_test1 (id bigint primary key)") tk.MustExec("create table test.simple_test2 (id bigint, age int NOT NULL, " + "name char NOT NULL, UNIQUE KEY(age, name))") tk.MustExec("create table test.simple_test3 (id bigint, age int)") - ver, err := store.CurrentVersion(oracle.GlobalTxnScope) - require.Nil(t, err) - meta, err := kv.GetSnapshotMeta(store, ver.Ver) - require.Nil(t, err) + ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) + require.NoError(t, err) + meta, err := kv.GetSnapshotMeta(tk.Storage(), ver.Ver) + require.NoError(t, err) f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") - require.Nil(t, err) + require.NoError(t, err) snap, err := schema.NewSnapshotFromMeta(meta, ver.Ver, false, f) - require.Nil(t, err) + require.NoError(t, err) tb1, ok := snap.TableByName("test", "simple_test1") require.True(t, ok) require.Equal(t, int64(-1), tb1.HandleIndexID) // pk is handleKey @@ -1011,13 +921,11 @@ func TestHandleKey(t *testing.T) { } func TestGetPrimaryKey(t *testing.T) { - t.Parallel() - - helper := NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t1(a int primary key, b int)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 0, job.BinlogInfo.TableInfo) names := tableInfo.GetPrimaryKeyColumnNames() @@ -1025,7 +933,7 @@ func TestGetPrimaryKey(t *testing.T) { require.Containsf(t, names, "a", "names: %v", names) sql = `create table test.t2(a int, b int, c int, primary key(a, b))` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 0, job.BinlogInfo.TableInfo) names = tableInfo.GetPrimaryKeyColumnNames() diff --git a/pkg/testkit/testkit.go b/pkg/testkit/testkit.go index d545407e5b1..77f7d17e6eb 100644 --- a/pkg/testkit/testkit.go +++ b/pkg/testkit/testkit.go @@ -18,6 +18,7 @@ import ( "strings" "testing" + "github.com/pingcap/log" ticonfig "github.com/pingcap/tidb/config" tiddl "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" @@ -25,10 +26,14 @@ import ( "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tiflow/pkg/errors" + "github.com/pingcap/tiflow/pkg/filter" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" + "go.uber.org/zap" ) type TestKit struct { @@ -60,11 +65,11 @@ func New(t *testing.T) *TestKit { } // DDL2Job executes the DDL stmt and returns the DDL job -func (s *TestKit) DDL2Job(ddl string) *model.Job { - s.MustExec(ddl) - jobs, err := tiddl.GetLastNHistoryDDLJobs(s.GetCurrentMeta(), 1) - require.Nil(s.t, err) - require.Len(s.t, jobs, 1) +func (tk *TestKit) DDL2Job(ddl string) *model.Job { + tk.MustExec(ddl) + jobs, err := tiddl.GetLastNHistoryDDLJobs(tk.GetCurrentMeta(), 1) + require.Nil(tk.t, err) + require.Len(tk.t, jobs, 1) // Set State from Synced to Done. // Because jobs are put to history queue after TiDB alter its state from // Done to Synced. @@ -103,7 +108,7 @@ func (s *TestKit) DDL2Job(ddl string) *model.Job { newTableNames, oldTableIDs, oldSchemaNames, } rawArgs, err := json.Marshal(args) - require.NoError(s.t, err) + require.NoError(tk.t, err) res.RawArgs = rawArgs return res } @@ -112,11 +117,11 @@ func (s *TestKit) DDL2Job(ddl string) *model.Job { // It is mainly used for "DROP TABLE" and "DROP VIEW" statement because // multiple jobs will be generated after executing these two types of // DDL statements. -func (s *TestKit) DDL2Jobs(ddl string, jobCnt int) []*model.Job { - s.MustExec(ddl) - jobs, err := tiddl.GetLastNHistoryDDLJobs(s.GetCurrentMeta(), jobCnt) - require.Nil(s.t, err) - require.Len(s.t, jobs, jobCnt) +func (tk *TestKit) DDL2Jobs(ddl string, jobCnt int) []*model.Job { + tk.MustExec(ddl) + jobs, err := tiddl.GetLastNHistoryDDLJobs(tk.GetCurrentMeta(), jobCnt) + require.Nil(tk.t, err) + require.Len(tk.t, jobs, jobCnt) // Set State from Synced to Done. // Because jobs are put to history queue after TiDB alter its state from // Done to Synced. @@ -127,19 +132,58 @@ func (s *TestKit) DDL2Jobs(ddl string, jobCnt int) []*model.Job { } // Storage returns the tikv storage -func (s *TestKit) Storage() kv.Storage { - return s.storage +func (tk *TestKit) Storage() kv.Storage { + return tk.storage } // GetCurrentMeta return the current meta snapshot -func (s *TestKit) GetCurrentMeta() *meta.Meta { - ver, err := s.storage.CurrentVersion(oracle.GlobalTxnScope) - require.Nil(s.t, err) - return meta.NewSnapshotMeta(s.storage.GetSnapshot(ver)) +func (tk *TestKit) GetCurrentMeta() *meta.Meta { + ver, err := tk.storage.CurrentVersion(oracle.GlobalTxnScope) + require.Nil(tk.t, err) + return meta.NewSnapshotMeta(tk.storage.GetSnapshot(ver)) } // Close closes the helper -func (s *TestKit) Close() { - s.domain.Close() - s.storage.Close() //nolint:errcheck +func (tk *TestKit) Close() { + tk.domain.Close() + tk.storage.Close() //nolint:errcheck +} + +func (tk *TestKit) GetAllHistoryDDLJob(f filter.Filter) ([]*model.Job, error) { + s, err := session.CreateSession(tk.storage) + if err != nil { + return nil, errors.Trace(err) + } + + if s != nil { + defer s.Close() + } + + store := domain.GetDomain(s.(sessionctx.Context)).Store() + txn, err := store.Begin() + if err != nil { + return nil, errors.Trace(err) + } + defer txn.Rollback() //nolint:errcheck + txnMeta := meta.NewMeta(txn) + + jobs, err := tiddl.GetAllHistoryDDLJobs(txnMeta) + res := make([]*model.Job, 0) + if err != nil { + return nil, errors.Trace(err) + } + for i, job := range jobs { + ignoreSchema := f.ShouldIgnoreSchema(job.SchemaName) + ignoreTable := f.ShouldIgnoreTable(job.SchemaName, job.TableName) + if ignoreSchema || ignoreTable { + log.Info("Ignore ddl job", zap.Stringer("job", job)) + continue + } + // Set State from Synced to Done. + // Because jobs are put to history queue after TiDB alter its state from + // Done to Synced. + jobs[i].State = model.JobStateDone + res = append(res, job) + } + return jobs, nil } From d58e944f2bfeef864ba14e2fbc7240d63ec039d5 Mon Sep 17 00:00:00 2001 From: 3AceShowHand Date: Thu, 23 Nov 2023 17:38:18 +0800 Subject: [PATCH 04/11] update testkit. --- cdc/api/v2/api_helpers_test.go | 5 +-- cdc/owner/changefeed_test.go | 8 ++-- cdc/owner/ddl_manager_test.go | 13 +++--- cdc/owner/schema_test.go | 33 ++++++++------- cdc/puller/ddl_puller_test.go | 4 +- cdc/sink/dmlsink/mq/mq_dml_sink_test.go | 5 +-- .../columnselector/column_selector_test.go | 9 ++-- cdc/sink/dmlsink/mq/worker_test.go | 17 ++++---- pkg/filter/expr_filter_bench_test.go | 2 +- pkg/filter/expr_filter_test.go | 8 ++-- pkg/sink/codec/canal/canal_encoder_test.go | 9 ++-- pkg/sink/codec/canal/canal_entry_test.go | 20 ++++----- .../canal_json_row_event_encoder_test.go | 9 ++-- .../canal_json_txn_event_encoder_test.go | 14 +++---- pkg/sink/codec/canal/canal_test_util.go | 8 ++-- pkg/sink/codec/canal/type_test.go | 41 +++++++++---------- pkg/sink/codec/simple/encoder_test.go | 9 ++-- 17 files changed, 102 insertions(+), 112 deletions(-) diff --git a/cdc/api/v2/api_helpers_test.go b/cdc/api/v2/api_helpers_test.go index 2aa23710761..5448f66d467 100644 --- a/cdc/api/v2/api_helpers_test.go +++ b/cdc/api/v2/api_helpers_test.go @@ -20,7 +20,6 @@ import ( "github.com/golang/mock/gomock" mock_controller "github.com/pingcap/tiflow/cdc/controller/mock" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" cerror "github.com/pingcap/tiflow/pkg/errors" @@ -31,7 +30,7 @@ import ( func TestVerifyCreateChangefeedConfig(t *testing.T) { ctx := context.Background() pdClient := &mockPDClient{} - helper := entry.NewSchemaTestHelper(t) + tk := testkit.New(t) helper.Tk().MustExec("use test;") storage := helper.Storage() ctrl := mock_controller.NewMockController(gomock.NewController(t)) @@ -112,7 +111,7 @@ func TestVerifyUpdateChangefeedConfig(t *testing.T) { Config: config.GetDefaultReplicaConfig(), } oldUpInfo := &model.UpstreamInfo{} - helper := entry.NewSchemaTestHelper(t) + tk := testkit.New(t) helper.Tk().MustExec("use test;") storage := helper.Storage() h := &APIV2HelpersImpl{} diff --git a/cdc/owner/changefeed_test.go b/cdc/owner/changefeed_test.go index 14c3615a39e..15d05ce4a04 100644 --- a/cdc/owner/changefeed_test.go +++ b/cdc/owner/changefeed_test.go @@ -314,8 +314,8 @@ func TestChangefeedHandleError(t *testing.T) { } func TestExecDDL(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() // Creates a table, which will be deleted at the start-ts of the changefeed. // It is expected that the changefeed DOES NOT replicate this table. helper.DDL2Job("create database test0") @@ -399,8 +399,8 @@ func TestExecDDL(t *testing.T) { } func TestEmitCheckpointTs(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() // Creates a table, which will be deleted at the start-ts of the changefeed. // It is expected that the changefeed DOES NOT replicate this table. helper.DDL2Job("create database test0") diff --git a/cdc/owner/ddl_manager_test.go b/cdc/owner/ddl_manager_test.go index 3255608be32..80bea1e6ed3 100644 --- a/cdc/owner/ddl_manager_test.go +++ b/cdc/owner/ddl_manager_test.go @@ -19,7 +19,6 @@ import ( "testing" timodel "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/cdc/redo" "github.com/pingcap/tiflow/cdc/scheduler/schedulepb" @@ -159,8 +158,8 @@ func TestGetSnapshotTs(t *testing.T) { } func TestExecRenameTablesDDL(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() ctx := cdcContext.NewBackendContext4Test(true) dm := createDDLManagerForTest(t) mockDDLSink := dm.ddlSink.(*mockDDLSink) @@ -259,8 +258,8 @@ func TestExecRenameTablesDDL(t *testing.T) { } func TestExecDropTablesDDL(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() ctx := cdcContext.NewBackendContext4Test(true) dm := createDDLManagerForTest(t) mockDDLSink := dm.ddlSink.(*mockDDLSink) @@ -323,8 +322,8 @@ func TestExecDropTablesDDL(t *testing.T) { } func TestExecDropViewsDDL(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() ctx := cdcContext.NewBackendContext4Test(true) dm := createDDLManagerForTest(t) mockDDLSink := dm.ddlSink.(*mockDDLSink) diff --git a/cdc/owner/schema_test.go b/cdc/owner/schema_test.go index 53148bf20f8..b39a21a2397 100644 --- a/cdc/owner/schema_test.go +++ b/cdc/owner/schema_test.go @@ -23,7 +23,6 @@ import ( timodel "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/types" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/filter" @@ -34,8 +33,8 @@ import ( var dummyChangeFeedID = model.DefaultChangeFeedID("dummy_changefeed") func TestAllPhysicalTables(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") @@ -92,8 +91,8 @@ func TestAllPhysicalTables(t *testing.T) { } func TestAllTables(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") @@ -131,8 +130,8 @@ func TestAllTables(t *testing.T) { } func TestIsIneligibleTableID(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") @@ -189,8 +188,8 @@ func compareEvents(t *testing.T, e1, e2 *model.DDLEvent) { } func TestBuildDDLEventsFromSingleTableDDL(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") @@ -264,8 +263,8 @@ func TestBuildDDLEventsFromSingleTableDDL(t *testing.T) { } func TestBuildDDLEventsFromRenameTablesDDL(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) @@ -392,8 +391,8 @@ func TestBuildDDLEventsFromRenameTablesDDL(t *testing.T) { } func TestBuildDDLEventsFromDropTablesDDL(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) @@ -495,8 +494,8 @@ func TestBuildDDLEventsFromDropTablesDDL(t *testing.T) { } func TestBuildDDLEventsFromDropViewsDDL(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) @@ -616,8 +615,8 @@ func TestBuildDDLEventsFromDropViewsDDL(t *testing.T) { } func TestBuildIgnoredDDLJob(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) diff --git a/cdc/puller/ddl_puller_test.go b/cdc/puller/ddl_puller_test.go index 8b6a7dcd3fa..95f836c591f 100644 --- a/cdc/puller/ddl_puller_test.go +++ b/cdc/puller/ddl_puller_test.go @@ -154,7 +154,7 @@ func TestHandleRenameTable(t *testing.T) { startTs := uint64(10) mockPuller := newMockPuller(t, startTs) ddlJobPuller, helper := newMockDDLJobPuller(t, mockPuller, true) - defer helper.Close() + defer tk.Close() ddlJobPullerImpl := ddlJobPuller.(*ddlJobPullerImpl) cfg := config.GetDefaultReplicaConfig() @@ -380,7 +380,7 @@ func TestHandleJob(t *testing.T) { startTs := uint64(10) mockPuller := newMockPuller(t, startTs) ddlJobPuller, helper := newMockDDLJobPuller(t, mockPuller, true) - defer helper.Close() + defer tk.Close() ddlJobPullerImpl := ddlJobPuller.(*ddlJobPullerImpl) cfg := config.GetDefaultReplicaConfig() diff --git a/cdc/sink/dmlsink/mq/mq_dml_sink_test.go b/cdc/sink/dmlsink/mq/mq_dml_sink_test.go index c297a3079cd..bd27c61a7ff 100644 --- a/cdc/sink/dmlsink/mq/mq_dml_sink_test.go +++ b/cdc/sink/dmlsink/mq/mq_dml_sink_test.go @@ -21,7 +21,6 @@ import ( "time" "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/cdc/sink/dmlsink" "github.com/pingcap/tiflow/cdc/sink/dmlsink/mq/dmlproducer" @@ -81,8 +80,8 @@ func TestWriteEvents(t *testing.T) { require.NotNil(t, s) defer s.Close() - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` job := helper.DDL2Job(sql) diff --git a/cdc/sink/dmlsink/mq/transformer/columnselector/column_selector_test.go b/cdc/sink/dmlsink/mq/transformer/columnselector/column_selector_test.go index 30e5aa56307..feb49a24e7e 100644 --- a/cdc/sink/dmlsink/mq/transformer/columnselector/column_selector_test.go +++ b/cdc/sink/dmlsink/mq/transformer/columnselector/column_selector_test.go @@ -19,7 +19,6 @@ package columnselector import ( "testing" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/cdc/sink/dmlsink/mq/dispatcher" "github.com/pingcap/tiflow/pkg/config" @@ -93,8 +92,8 @@ func TestNewColumnSelector(t *testing.T) { } func TestVerifyTables(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t1( a int primary key, @@ -263,8 +262,8 @@ func TestVerifyTablesColumnFilteredInDispatcher(t *testing.T) { eventRouter, err := dispatcher.NewEventRouter(replicaConfig, config.ProtocolDefault, "default", "default") require.NoError(t, err) - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t1(a int primary key, b int, c int)` job := helper.DDL2Job(sql) diff --git a/cdc/sink/dmlsink/mq/worker_test.go b/cdc/sink/dmlsink/mq/worker_test.go index c31db5d074d..d3f7c6b3b32 100644 --- a/cdc/sink/dmlsink/mq/worker_test.go +++ b/cdc/sink/dmlsink/mq/worker_test.go @@ -20,7 +20,6 @@ import ( "time" "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/cdc/sink/dmlsink" "github.com/pingcap/tiflow/cdc/sink/dmlsink/mq/dmlproducer" @@ -63,8 +62,8 @@ func newNonBatchEncodeWorker(ctx context.Context, t *testing.T) (*worker, dmlpro } func TestNonBatchEncode_SendMessages(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` job := helper.DDL2Job(sql) @@ -264,8 +263,8 @@ func TestBatchEncode_Group(t *testing.T) { } func TestBatchEncode_GroupWhenTableStopping(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` job := helper.DDL2Job(sql) @@ -358,8 +357,8 @@ func TestBatchEncode_SendMessages(t *testing.T) { worker, p := newBatchEncodeWorker(ctx, t) defer worker.close() - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` job := helper.DDL2Job(sql) @@ -517,8 +516,8 @@ func TestNonBatchEncode_SendMessagesWhenTableStopping(t *testing.T) { replicatingStatus := state.TableSinkSinking stoppedStatus := state.TableSinkStopping - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` job := helper.DDL2Job(sql) diff --git a/pkg/filter/expr_filter_bench_test.go b/pkg/filter/expr_filter_bench_test.go index 62263e8e2f8..a0b6d601eec 100644 --- a/pkg/filter/expr_filter_bench_test.go +++ b/pkg/filter/expr_filter_bench_test.go @@ -38,7 +38,7 @@ func BenchmarkSkipDML(b *testing.B) { t := &testing.T{} helper := newTestHelper(t) - defer helper.close() + defer tk.Close() helper.getTk().MustExec("use test;") ddl := "create table test.student(id int primary key, name char(50), age int, gender char(10))" tableInfo := helper.execDDL(ddl) diff --git a/pkg/filter/expr_filter_test.go b/pkg/filter/expr_filter_test.go index 66fe1f02aa8..119448ada4c 100644 --- a/pkg/filter/expr_filter_test.go +++ b/pkg/filter/expr_filter_test.go @@ -27,7 +27,7 @@ import ( func TestShouldSkipDMLBasic(t *testing.T) { helper := newTestHelper(t) - defer helper.close() + defer tk.Close() helper.getTk().MustExec("use test;") type innerCase struct { @@ -355,7 +355,7 @@ func TestShouldSkipDMLBasic(t *testing.T) { // are as expected. func TestShouldSkipDMLError(t *testing.T) { helper := newTestHelper(t) - defer helper.close() + defer tk.Close() helper.getTk().MustExec("use test;") type innerCase struct { @@ -472,7 +472,7 @@ func TestShouldSkipDMLError(t *testing.T) { // the filter will works as expected. func TestShouldSkipDMLTableUpdated(t *testing.T) { helper := newTestHelper(t) - defer helper.close() + defer tk.Close() helper.getTk().MustExec("use test;") type innerCase struct { @@ -668,7 +668,7 @@ func TestShouldSkipDMLTableUpdated(t *testing.T) { func TestVerify(t *testing.T) { helper := newTestHelper(t) - defer helper.close() + defer tk.Close() helper.getTk().MustExec("use test;") type testCase struct { diff --git a/pkg/sink/codec/canal/canal_encoder_test.go b/pkg/sink/codec/canal/canal_encoder_test.go index b56cbf9a730..955284f33b8 100644 --- a/pkg/sink/codec/canal/canal_encoder_test.go +++ b/pkg/sink/codec/canal/canal_encoder_test.go @@ -19,7 +19,6 @@ import ( "github.com/golang/protobuf/proto" "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/sink/codec/common" @@ -93,8 +92,8 @@ var ( ) func TestCanalBatchEncoder(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a varchar(10) primary key)` job := helper.DDL2Job(sql) @@ -151,8 +150,8 @@ func TestCanalBatchEncoder(t *testing.T) { } func TestCanalAppendRowChangedEventWithCallback(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a varchar(10) primary key)` job := helper.DDL2Job(sql) diff --git a/pkg/sink/codec/canal/canal_entry_test.go b/pkg/sink/codec/canal/canal_entry_test.go index 25d833f3c50..49f16b4ac97 100644 --- a/pkg/sink/codec/canal/canal_entry_test.go +++ b/pkg/sink/codec/canal/canal_entry_test.go @@ -19,19 +19,19 @@ import ( "github.com/golang/protobuf/proto" mm "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/sink/codec/common" "github.com/pingcap/tiflow/pkg/sink/codec/internal" + "github.com/pingcap/tiflow/pkg/testkit" canal "github.com/pingcap/tiflow/proto/canal" "github.com/stretchr/testify/require" "golang.org/x/text/encoding/charmap" ) func TestInsert(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t( id int primary key, @@ -39,7 +39,7 @@ func TestInsert(t *testing.T) { tiny tinyint unsigned, comment text, bb blob)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() @@ -123,11 +123,11 @@ func TestInsert(t *testing.T) { } func TestUpdate(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(id int primary key, name varchar(32))` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() @@ -212,11 +212,11 @@ func TestUpdate(t *testing.T) { } func TestDelete(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(id int primary key)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() diff --git a/pkg/sink/codec/canal/canal_json_row_event_encoder_test.go b/pkg/sink/codec/canal/canal_json_row_event_encoder_test.go index ef48961636c..14ab5b482ce 100644 --- a/pkg/sink/codec/canal/canal_json_row_event_encoder_test.go +++ b/pkg/sink/codec/canal/canal_json_row_event_encoder_test.go @@ -20,7 +20,6 @@ import ( "testing" "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/compression" "github.com/pingcap/tiflow/pkg/config" @@ -581,8 +580,8 @@ func TestDDLEventWithExtensionValueMarshal(t *testing.T) { } func TestCanalJSONAppendRowChangedEventWithCallback(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` job := helper.DDL2Job(sql) @@ -673,8 +672,8 @@ func TestCanalJSONAppendRowChangedEventWithCallback(t *testing.T) { } func TestMaxMessageBytes(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` job := helper.DDL2Job(sql) diff --git a/pkg/sink/codec/canal/canal_json_txn_event_encoder_test.go b/pkg/sink/codec/canal/canal_json_txn_event_encoder_test.go index dfbae44c58e..b74d03853b3 100644 --- a/pkg/sink/codec/canal/canal_json_txn_event_encoder_test.go +++ b/pkg/sink/codec/canal/canal_json_txn_event_encoder_test.go @@ -17,10 +17,10 @@ import ( "testing" "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/sink/codec/common" + "github.com/pingcap/tiflow/pkg/testkit" "github.com/stretchr/testify/require" ) @@ -35,11 +35,11 @@ func TestBuildCanalJSONTxnEventEncoder(t *testing.T) { } func TestCanalJSONTxnEventEncoderMaxMessageBytes(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() @@ -77,11 +77,11 @@ func TestCanalJSONTxnEventEncoderMaxMessageBytes(t *testing.T) { } func TestCanalJSONAppendTxnEventEncoderWithCallback(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() diff --git a/pkg/sink/codec/canal/canal_test_util.go b/pkg/sink/codec/canal/canal_test_util.go index c7d3d82ba03..85c75fdbacc 100644 --- a/pkg/sink/codec/canal/canal_test_util.go +++ b/pkg/sink/codec/canal/canal_test_util.go @@ -18,8 +18,8 @@ import ( mm "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" + "github.com/pingcap/tiflow/pkg/testkit" ) type testColumnTuple struct { @@ -383,8 +383,8 @@ func collectExpectedDecodedValue(columns []*testColumnTuple) map[string]interfac } func newLargeEvent4Test(t *testing.T) (*model.RowChangedEvent, *model.RowChangedEvent, *model.RowChangedEvent) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t( t tinyint primary key, @@ -439,7 +439,7 @@ func newLargeEvent4Test(t *testing.T) (*model.RowChangedEvent, *model.RowChanged setT set('a', 'b', 'c'), bitT bit(4), jsonT json)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfo := tableInfo.GetRowColInfos() diff --git a/pkg/sink/codec/canal/type_test.go b/pkg/sink/codec/canal/type_test.go index 864a6faa84a..e504d91bb87 100644 --- a/pkg/sink/codec/canal/type_test.go +++ b/pkg/sink/codec/canal/type_test.go @@ -16,7 +16,6 @@ package canal import ( "testing" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/sink/codec/internal" "github.com/pingcap/tiflow/pkg/sink/codec/utils" @@ -24,8 +23,8 @@ import ( ) func TestGetMySQLType4IntTypes(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t1 ( a int primary key, @@ -388,8 +387,8 @@ func TestGetMySQLType4IntTypes(t *testing.T) { } func TestGetMySQLType4FloatType(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t1( a int primary key, @@ -504,8 +503,8 @@ func TestGetMySQLType4FloatType(t *testing.T) { } func TestGetMySQLType4Decimal(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t1(a int primary key, b decimal, c numeric)` job := helper.DDL2Job(sql) @@ -577,8 +576,8 @@ func TestGetMySQLType4Decimal(t *testing.T) { } func TestGetMySQLType4TimeTypes(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t1(a int primary key, b time, c time(3))` job := helper.DDL2Job(sql) @@ -694,8 +693,8 @@ func TestGetMySQLType4TimeTypes(t *testing.T) { } func TestGetMySQLType4Char(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a int primary key, b char, c char(123))` job := helper.DDL2Job(sql) @@ -740,8 +739,8 @@ func TestGetMySQLType4Char(t *testing.T) { } func TestGetMySQLType4TextTypes(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t1(a int primary key, b text, c tinytext, d mediumtext, e longtext)` job := helper.DDL2Job(sql) @@ -795,8 +794,8 @@ func TestGetMySQLType4TextTypes(t *testing.T) { } func TestGetMySQLType4BinaryType(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t1(a int primary key, b binary, c binary(10))` job := helper.DDL2Job(sql) @@ -841,8 +840,8 @@ func TestGetMySQLType4BinaryType(t *testing.T) { } func TestGetMySQLType4BlobType(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t1(a int primary key, b blob, c tinyblob, d mediumblob, e longblob)` job := helper.DDL2Job(sql) @@ -896,8 +895,8 @@ func TestGetMySQLType4BlobType(t *testing.T) { } func TestGetMySQLType4EnumAndSet(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a int primary key, b enum('a', 'b', 'c'), c set('a', 'b', 'c'))` job := helper.DDL2Job(sql) @@ -933,8 +932,8 @@ func TestGetMySQLType4EnumAndSet(t *testing.T) { } func TestGetMySQLType4JSON(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.t(a int primary key, b json)` job := helper.DDL2Job(sql) diff --git a/pkg/sink/codec/simple/encoder_test.go b/pkg/sink/codec/simple/encoder_test.go index 081a087e903..f49e2292e90 100644 --- a/pkg/sink/codec/simple/encoder_test.go +++ b/pkg/sink/codec/simple/encoder_test.go @@ -16,7 +16,6 @@ package simple import ( "testing" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/stretchr/testify/require" ) @@ -45,8 +44,8 @@ func TestEncodeCheckpoint(t *testing.T) { } func TestEncodeDDLEvent(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.test(id int primary key, name varchar(255) not null, age int, email varchar(255) not null, key idx_name(name), key idx_name_email(name, email))` @@ -85,8 +84,8 @@ func TestEncodeDDLEvent(t *testing.T) { } func TestEncodeBootstrapEvent(t *testing.T) { - helper := entry.NewSchemaTestHelper(t) - defer helper.Close() + tk := testkit.New(t) + defer tk.Close() sql := `create table test.test(id int primary key, name varchar(255) not null, age int, email varchar(255) not null, key idx_name(name), key idx_name_email(name, email))` From 62eacce192db2823ef7f91a18915401cbde49a20 Mon Sep 17 00:00:00 2001 From: 3AceShowHand Date: Thu, 23 Nov 2023 18:17:35 +0800 Subject: [PATCH 05/11] testkit support dml to event. --- pkg/sink/codec/simple/encoder_test.go | 5 +- pkg/testkit/testkit.go | 114 ++++++++++++++++++++++---- 2 files changed, 100 insertions(+), 19 deletions(-) diff --git a/pkg/sink/codec/simple/encoder_test.go b/pkg/sink/codec/simple/encoder_test.go index f49e2292e90..b9d216830c2 100644 --- a/pkg/sink/codec/simple/encoder_test.go +++ b/pkg/sink/codec/simple/encoder_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/pingcap/tiflow/cdc/model" + "github.com/pingcap/tiflow/pkg/testkit" "github.com/stretchr/testify/require" ) @@ -49,7 +50,7 @@ func TestEncodeDDLEvent(t *testing.T) { sql := `create table test.test(id int primary key, name varchar(255) not null, age int, email varchar(255) not null, key idx_name(name), key idx_name_email(name, email))` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(1, "test", 1, job.BinlogInfo.TableInfo) enc := NewBuilder().Build() ddlEvent := &model.DDLEvent{ @@ -89,7 +90,7 @@ func TestEncodeBootstrapEvent(t *testing.T) { sql := `create table test.test(id int primary key, name varchar(255) not null, age int, email varchar(255) not null, key idx_name(name), key idx_name_email(name, email))` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(1, "test", 1, job.BinlogInfo.TableInfo) enc := NewBuilder().Build() ddlEvent := &model.DDLEvent{ diff --git a/pkg/testkit/testkit.go b/pkg/testkit/testkit.go index 77f7d17e6eb..6b3fd8d6999 100644 --- a/pkg/testkit/testkit.go +++ b/pkg/testkit/testkit.go @@ -14,9 +14,11 @@ package testkit import ( + "context" "encoding/json" "strings" "testing" + "time" "github.com/pingcap/log" ticonfig "github.com/pingcap/tidb/config" @@ -24,13 +26,18 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" - "github.com/pingcap/tidb/parser/model" + timodel "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tiflow/cdc/entry" + "github.com/pingcap/tiflow/cdc/model" + "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/errors" "github.com/pingcap/tiflow/pkg/filter" + "github.com/pingcap/tiflow/pkg/spanz" + "github.com/pingcap/tiflow/pkg/util" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" "go.uber.org/zap" @@ -41,10 +48,13 @@ type TestKit struct { t *testing.T storage kv.Storage domain *domain.Domain + + schemaStorage entry.SchemaStorage + mounter entry.Mounter } // New return a new testkit -func New(t *testing.T) *TestKit { +func New(t *testing.T, replicaConfig *config.ReplicaConfig) *TestKit { store, err := mockstore.NewMockStore() require.NoError(t, err) ticonfig.UpdateGlobal(func(conf *ticonfig.Config) { @@ -56,16 +66,78 @@ func New(t *testing.T) *TestKit { require.NoError(t, err) domain.SetStatsUpdating(true) tk := testkit.NewTestKit(t, store) + + filter, err := filter.NewFilter(replicaConfig, "") + require.NoError(t, err) + + ver, err := store.CurrentVersion(oracle.GlobalTxnScope) + require.NoError(t, err) + + changefeedID := model.DefaultChangeFeedID("changefeed-testkit") + + meta := meta.NewSnapshotMeta(store.GetSnapshot(ver)) + schemaStorage, err := entry.NewSchemaStorage( + meta, ver.Ver, replicaConfig.ForceReplicate, + changefeedID, util.RoleTester, filter) + require.NoError(t, err) + + mounter := entry.NewMounter(schemaStorage, changefeedID, time.Local, + filter, replicaConfig.Integrity) + return &TestKit{ - t: t, - TestKit: tk, - storage: store, - domain: domain, + t: t, + TestKit: tk, + storage: store, + domain: domain, + schemaStorage: schemaStorage, + mounter: mounter, + } +} + +func (tk *TestKit) DML2Event(dml string, schema, table string) *model.RowChangedEvent { + tk.MustExec(dml) + + tableID, ok := tk.schemaStorage.GetLastSnapshot().TableIDByName(schema, table) + require.True(tk.t, ok) + + key, value := tk.getLastKeyValue(tableID) + + ts := tk.schemaStorage.GetLastSnapshot().CurrentTs() + rawKV := &model.RawKVEntry{ + // todo: assume the operation is put at the moment, can we infer it from the DML ? + OpType: model.OpTypePut, + Key: key, + Value: value, + OldValue: nil, + StartTs: ts - 1, + CRTs: ts + 1, } + polymorphicEvent := model.NewPolymorphicEvent(rawKV) + err := tk.mounter.DecodeEvent(context.Background(), polymorphicEvent) + require.NoError(tk.t, err) + return polymorphicEvent.Row +} + +func (tk *TestKit) getLastKeyValue(tableID int64) (key, value []byte) { + txn, err := tk.storage.Begin() + require.NoError(tk.t, err) + defer txn.Rollback() //nolint:errcheck + + start, end := spanz.GetTableRange(tableID) + iter, err := txn.Iter(start, end) + require.NoError(tk.t, err) + defer iter.Close() + for iter.Valid() { + key = iter.Key() + value = iter.Value() + err = iter.Next() + require.NoError(tk.t, err) + } + return key, value } // DDL2Job executes the DDL stmt and returns the DDL job -func (tk *TestKit) DDL2Job(ddl string) *model.Job { +func (tk *TestKit) DDL2Job(ddl string) *timodel.Job { tk.MustExec(ddl) jobs, err := tiddl.GetLastNHistoryDDLJobs(tk.GetCurrentMeta(), 1) require.Nil(tk.t, err) @@ -73,9 +145,9 @@ func (tk *TestKit) DDL2Job(ddl string) *model.Job { // Set State from Synced to Done. // Because jobs are put to history queue after TiDB alter its state from // Done to Synced. - jobs[0].State = model.JobStateDone + jobs[0].State = timodel.JobStateDone res := jobs[0] - if res.Type != model.ActionRenameTables { + if res.Type != timodel.ActionRenameTables { return res } @@ -93,13 +165,13 @@ func (tk *TestKit) DDL2Job(ddl string) *model.Job { for i := 0; i < tableNum; i++ { oldTableIDs[i] = res.BinlogInfo.MultipleTableInfos[i].ID } - newTableNames := make([]model.CIStr, tableNum) + newTableNames := make([]timodel.CIStr, tableNum) for i := 0; i < tableNum; i++ { newTableNames[i] = res.BinlogInfo.MultipleTableInfos[i].Name } - oldSchemaNames := make([]model.CIStr, tableNum) + oldSchemaNames := make([]timodel.CIStr, tableNum) for i := 0; i < tableNum; i++ { - oldSchemaNames[i] = model.NewCIStr(schema) + oldSchemaNames[i] = timodel.NewCIStr(schema) } newSchemaIDs := oldSchemaIDs @@ -110,6 +182,14 @@ func (tk *TestKit) DDL2Job(ddl string) *model.Job { rawArgs, err := json.Marshal(args) require.NoError(tk.t, err) res.RawArgs = rawArgs + + err = tk.schemaStorage.HandleDDLJob(res) + require.NoError(tk.t, err) + + ver, err := tk.storage.CurrentVersion(oracle.GlobalTxnScope) + require.NoError(tk.t, err) + tk.schemaStorage.AdvanceResolvedTs(ver.Ver) + return res } @@ -117,7 +197,7 @@ func (tk *TestKit) DDL2Job(ddl string) *model.Job { // It is mainly used for "DROP TABLE" and "DROP VIEW" statement because // multiple jobs will be generated after executing these two types of // DDL statements. -func (tk *TestKit) DDL2Jobs(ddl string, jobCnt int) []*model.Job { +func (tk *TestKit) DDL2Jobs(ddl string, jobCnt int) []*timodel.Job { tk.MustExec(ddl) jobs, err := tiddl.GetLastNHistoryDDLJobs(tk.GetCurrentMeta(), jobCnt) require.Nil(tk.t, err) @@ -126,7 +206,7 @@ func (tk *TestKit) DDL2Jobs(ddl string, jobCnt int) []*model.Job { // Because jobs are put to history queue after TiDB alter its state from // Done to Synced. for i := range jobs { - jobs[i].State = model.JobStateDone + jobs[i].State = timodel.JobStateDone } return jobs } @@ -149,7 +229,7 @@ func (tk *TestKit) Close() { tk.storage.Close() //nolint:errcheck } -func (tk *TestKit) GetAllHistoryDDLJob(f filter.Filter) ([]*model.Job, error) { +func (tk *TestKit) GetAllHistoryDDLJob(f filter.Filter) ([]*timodel.Job, error) { s, err := session.CreateSession(tk.storage) if err != nil { return nil, errors.Trace(err) @@ -168,7 +248,7 @@ func (tk *TestKit) GetAllHistoryDDLJob(f filter.Filter) ([]*model.Job, error) { txnMeta := meta.NewMeta(txn) jobs, err := tiddl.GetAllHistoryDDLJobs(txnMeta) - res := make([]*model.Job, 0) + res := make([]*timodel.Job, 0) if err != nil { return nil, errors.Trace(err) } @@ -182,7 +262,7 @@ func (tk *TestKit) GetAllHistoryDDLJob(f filter.Filter) ([]*model.Job, error) { // Set State from Synced to Done. // Because jobs are put to history queue after TiDB alter its state from // Done to Synced. - jobs[i].State = model.JobStateDone + jobs[i].State = timodel.JobStateDone res = append(res, job) } return jobs, nil From d07a9c1c770c2879b3c082c4c1a0c20e67cf5c54 Mon Sep 17 00:00:00 2001 From: 3AceShowHand Date: Thu, 23 Nov 2023 18:23:16 +0800 Subject: [PATCH 06/11] fix some test. --- cdc/entry/mounter_test.go | 43 +++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/cdc/entry/mounter_test.go b/cdc/entry/mounter_test.go index 67ba14675dc..e36a5a36d73 100644 --- a/cdc/entry/mounter_test.go +++ b/cdc/entry/mounter_test.go @@ -261,7 +261,8 @@ func testMounterDisableOldValue(t *testing.T, tc struct { delApproximateBytes [][]int }, ) { - tk := testkit.New(t) + replicaConfig := config.GetDefaultReplicaConfig() + tk := testkit.New(t, replicaConfig) defer tk.Close() tk.MustExec("set @@tidb_enable_clustered_index=1;") @@ -269,7 +270,7 @@ func testMounterDisableOldValue(t *testing.T, tc struct { tk.MustExec(tc.createTableDDL) - f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") + f, err := filter.NewFilter(replicaConfig, "") require.Nil(t, err) jobs, err := tk.GetAllHistoryDDLJob(f) require.Nil(t, err) @@ -296,11 +297,10 @@ func testMounterDisableOldValue(t *testing.T, tc struct { ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) scheamStorage.AdvanceResolvedTs(ver.Ver) - config := config.GetDefaultReplicaConfig() - filter, err := filter.NewFilter(config, "") + filter, err := filter.NewFilter(replicaConfig, "") require.Nil(t, err) mounter := NewMounter(scheamStorage, - model.DefaultChangeFeedID("c1"), time.UTC, filter, config.Integrity).(*mounter) + model.DefaultChangeFeedID("c1"), time.UTC, filter, replicaConfig.Integrity).(*mounter) mounter.tz = time.Local ctx := context.Background() @@ -1004,16 +1004,17 @@ func TestGetDefaultZeroValue(t *testing.T) { } func TestE2ERowLevelChecksum(t *testing.T) { - tk := testkit.New(t) + // changefeed enable checksum functionality + replicaConfig := config.GetDefaultReplicaConfig() + replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness + + tk := testkit.New(t, replicaConfig) defer tk.Close() // upstream TiDB enable checksum functionality tk.MustExec("set global tidb_enable_row_level_checksum = 1") tk.MustExec("use test") - // changefeed enable checksum functionality - replicaConfig := config.GetDefaultReplicaConfig() - replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness filter, err := filter.NewFilter(replicaConfig, "") require.NoError(t, err) @@ -1176,14 +1177,15 @@ func TestE2ERowLevelChecksum(t *testing.T) { } func TestDecodeRowEnableChecksum(t *testing.T) { - tk := testkit.New(t) + replicaConfig := config.GetDefaultReplicaConfig() + replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness + + tk := testkit.New(t, replicaConfig) defer tk.Close() tk.MustExec("set global tidb_enable_row_level_checksum = 1") tk.MustExec("use test") - replicaConfig := config.GetDefaultReplicaConfig() - replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness filter, err := filter.NewFilter(replicaConfig, "") require.NoError(t, err) @@ -1304,7 +1306,9 @@ func TestDecodeRowEnableChecksum(t *testing.T) { } func TestDecodeRow(t *testing.T) { - tk := testkit.New(t) + replicaConfig := config.GetDefaultReplicaConfig() + + tk := testkit.New(t, replicaConfig) defer tk.Close() tk.MustExec("set @@tidb_enable_clustered_index=1;") @@ -1315,9 +1319,7 @@ func TestDecodeRow(t *testing.T) { ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) require.NoError(t, err) - cfg := config.GetDefaultReplicaConfig() - - filter, err := filter.NewFilter(cfg, "") + filter, err := filter.NewFilter(replicaConfig, "") require.NoError(t, err) schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), @@ -1382,7 +1384,10 @@ func TestDecodeRow(t *testing.T) { // TestDecodeEventIgnoreRow tests a PolymorphicEvent.Row is nil // if this event should be filter out by filter. func TestDecodeEventIgnoreRow(t *testing.T) { - tk := testkit.New(t) + replicaConfig := config.GetDefaultReplicaConfig() + replicaConfig.Filter.Rules = []string{"test.student", "test.computer"} + + tk := testkit.New(t, replicaConfig) defer tk.Close() tk.MustExec("use test;") @@ -1394,9 +1399,7 @@ func TestDecodeEventIgnoreRow(t *testing.T) { cfID := model.DefaultChangeFeedID("changefeed-test-ignore-event") - cfg := config.GetDefaultReplicaConfig() - cfg.Filter.Rules = []string{"test.student", "test.computer"} - f, err := filter.NewFilter(cfg, "") + f, err := filter.NewFilter(replicaConfig, "") require.Nil(t, err) ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) From d69d58218a8a7aa51aee02104ec7877d27a0515e Mon Sep 17 00:00:00 2001 From: 3AceShowHand Date: Thu, 23 Nov 2023 18:33:37 +0800 Subject: [PATCH 07/11] fix some test. --- cdc/entry/mounter_test.go | 15 ++-- cdc/entry/schema_storage_test.go | 11 ++- {pkg/testkit => cdc/entry}/testkit.go | 25 +++---- pkg/sink/codec/canal/canal_encoder_test.go | 9 ++- pkg/sink/codec/canal/canal_entry_test.go | 8 +- .../canal_json_row_event_encoder_test.go | 9 ++- .../canal_json_txn_event_encoder_test.go | 6 +- pkg/sink/codec/canal/canal_test_util.go | 5 +- pkg/sink/codec/canal/type_test.go | 74 ++++++++++--------- pkg/sink/codec/simple/encoder_test.go | 6 +- 10 files changed, 85 insertions(+), 83 deletions(-) rename {pkg/testkit => cdc/entry}/testkit.go (92%) diff --git a/cdc/entry/mounter_test.go b/cdc/entry/mounter_test.go index e36a5a36d73..07d51eef6a0 100644 --- a/cdc/entry/mounter_test.go +++ b/cdc/entry/mounter_test.go @@ -41,7 +41,6 @@ import ( codecCommon "github.com/pingcap/tiflow/pkg/sink/codec/common" "github.com/pingcap/tiflow/pkg/spanz" "github.com/pingcap/tiflow/pkg/sqlmodel" - "github.com/pingcap/tiflow/pkg/testkit" "github.com/pingcap/tiflow/pkg/util" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" @@ -262,7 +261,7 @@ func testMounterDisableOldValue(t *testing.T, tc struct { }, ) { replicaConfig := config.GetDefaultReplicaConfig() - tk := testkit.New(t, replicaConfig) + tk := NewTestKit(t, replicaConfig) defer tk.Close() tk.MustExec("set @@tidb_enable_clustered_index=1;") @@ -1008,7 +1007,7 @@ func TestE2ERowLevelChecksum(t *testing.T) { replicaConfig := config.GetDefaultReplicaConfig() replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness - tk := testkit.New(t, replicaConfig) + tk := NewTestKit(t, replicaConfig) defer tk.Close() // upstream TiDB enable checksum functionality @@ -1180,7 +1179,7 @@ func TestDecodeRowEnableChecksum(t *testing.T) { replicaConfig := config.GetDefaultReplicaConfig() replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness - tk := testkit.New(t, replicaConfig) + tk := NewTestKit(t, replicaConfig) defer tk.Close() tk.MustExec("set global tidb_enable_row_level_checksum = 1") @@ -1308,7 +1307,7 @@ func TestDecodeRowEnableChecksum(t *testing.T) { func TestDecodeRow(t *testing.T) { replicaConfig := config.GetDefaultReplicaConfig() - tk := testkit.New(t, replicaConfig) + tk := NewTestKit(t, replicaConfig) defer tk.Close() tk.MustExec("set @@tidb_enable_clustered_index=1;") @@ -1336,7 +1335,7 @@ func TestDecodeRow(t *testing.T) { schemaStorage.AdvanceResolvedTs(ver.Ver) - mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, cfg.Integrity).(*mounter) + mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, replicaConfig.Integrity).(*mounter) tk.MustExec(`insert into student values(1, "dongmen", 20, "male")`) tk.MustExec(`update student set age = 27 where id = 1`) @@ -1387,7 +1386,7 @@ func TestDecodeEventIgnoreRow(t *testing.T) { replicaConfig := config.GetDefaultReplicaConfig() replicaConfig.Filter.Rules = []string{"test.student", "test.computer"} - tk := testkit.New(t, replicaConfig) + tk := NewTestKit(t, replicaConfig) defer tk.Close() tk.MustExec("use test;") @@ -1416,7 +1415,7 @@ func TestDecodeEventIgnoreRow(t *testing.T) { ts := schemaStorage.GetLastSnapshot().CurrentTs() schemaStorage.AdvanceResolvedTs(ver.Ver) - mounter := NewMounter(schemaStorage, cfID, time.Local, f, cfg.Integrity).(*mounter) + mounter := NewMounter(schemaStorage, cfID, time.Local, f, replicaConfig.Integrity).(*mounter) type testCase struct { schema string diff --git a/cdc/entry/schema_storage_test.go b/cdc/entry/schema_storage_test.go index f2d66c2704a..d249617d5ea 100644 --- a/cdc/entry/schema_storage_test.go +++ b/cdc/entry/schema_storage_test.go @@ -28,7 +28,6 @@ import ( "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/filter" - "github.com/pingcap/tiflow/pkg/testkit" "github.com/pingcap/tiflow/pkg/util" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" @@ -658,7 +657,7 @@ func TestMultiVersionStorage(t *testing.T) { } func TestCreateSnapFromMeta(t *testing.T) { - tk := testkit.New(t) + tk := NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() tk.MustExec("create database test2") @@ -686,7 +685,7 @@ func TestCreateSnapFromMeta(t *testing.T) { } func TestExplicitTables(t *testing.T) { - tk := testkit.New(t) + tk := NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() ver1, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) require.NoError(t, err) @@ -828,7 +827,7 @@ func TestSchemaStorage(t *testing.T) { }} testOneGroup := func(tc []string) { - tk := testkit.New(t) + tk := NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() tk.MustExec("set global tidb_enable_clustered_index = 'int_only';") for _, ddlSQL := range tc { @@ -874,7 +873,7 @@ func TestSchemaStorage(t *testing.T) { // 2. If the table has not null unique key, the handleKey is the first column of the unique key. // 3. If the table has no primary key and no not null unique key, it has no handleKey. func TestHandleKey(t *testing.T) { - tk := testkit.New(t) + tk := NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() tk.MustExec("create database test2") tk.MustExec("create table test.simple_test1 (id bigint primary key)") @@ -921,7 +920,7 @@ func TestHandleKey(t *testing.T) { } func TestGetPrimaryKey(t *testing.T) { - tk := testkit.New(t) + tk := NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t1(a int primary key, b int)` diff --git a/pkg/testkit/testkit.go b/cdc/entry/testkit.go similarity index 92% rename from pkg/testkit/testkit.go rename to cdc/entry/testkit.go index 6b3fd8d6999..e10a7f3f9ed 100644 --- a/pkg/testkit/testkit.go +++ b/cdc/entry/testkit.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package testkit +package entry import ( "context" @@ -25,13 +25,12 @@ import ( tiddl "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/meta" + timeta "github.com/pingcap/tidb/meta" timodel "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/errors" @@ -49,12 +48,12 @@ type TestKit struct { storage kv.Storage domain *domain.Domain - schemaStorage entry.SchemaStorage - mounter entry.Mounter + schemaStorage SchemaStorage + mounter Mounter } -// New return a new testkit -func New(t *testing.T, replicaConfig *config.ReplicaConfig) *TestKit { +// NewTestKit return a new testkit +func NewTestKit(t *testing.T, replicaConfig *config.ReplicaConfig) *TestKit { store, err := mockstore.NewMockStore() require.NoError(t, err) ticonfig.UpdateGlobal(func(conf *ticonfig.Config) { @@ -75,13 +74,13 @@ func New(t *testing.T, replicaConfig *config.ReplicaConfig) *TestKit { changefeedID := model.DefaultChangeFeedID("changefeed-testkit") - meta := meta.NewSnapshotMeta(store.GetSnapshot(ver)) - schemaStorage, err := entry.NewSchemaStorage( + meta := timeta.NewSnapshotMeta(store.GetSnapshot(ver)) + schemaStorage, err := NewSchemaStorage( meta, ver.Ver, replicaConfig.ForceReplicate, changefeedID, util.RoleTester, filter) require.NoError(t, err) - mounter := entry.NewMounter(schemaStorage, changefeedID, time.Local, + mounter := NewMounter(schemaStorage, changefeedID, time.Local, filter, replicaConfig.Integrity) return &TestKit{ @@ -217,10 +216,10 @@ func (tk *TestKit) Storage() kv.Storage { } // GetCurrentMeta return the current meta snapshot -func (tk *TestKit) GetCurrentMeta() *meta.Meta { +func (tk *TestKit) GetCurrentMeta() *timeta.Meta { ver, err := tk.storage.CurrentVersion(oracle.GlobalTxnScope) require.Nil(tk.t, err) - return meta.NewSnapshotMeta(tk.storage.GetSnapshot(ver)) + return timeta.NewSnapshotMeta(tk.storage.GetSnapshot(ver)) } // Close closes the helper @@ -245,7 +244,7 @@ func (tk *TestKit) GetAllHistoryDDLJob(f filter.Filter) ([]*timodel.Job, error) return nil, errors.Trace(err) } defer txn.Rollback() //nolint:errcheck - txnMeta := meta.NewMeta(txn) + txnMeta := timeta.NewMeta(txn) jobs, err := tiddl.GetAllHistoryDDLJobs(txnMeta) res := make([]*timodel.Job, 0) diff --git a/pkg/sink/codec/canal/canal_encoder_test.go b/pkg/sink/codec/canal/canal_encoder_test.go index 955284f33b8..b5f7de82c89 100644 --- a/pkg/sink/codec/canal/canal_encoder_test.go +++ b/pkg/sink/codec/canal/canal_encoder_test.go @@ -19,6 +19,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/sink/codec/common" @@ -92,11 +93,11 @@ var ( ) func TestCanalBatchEncoder(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a varchar(10) primary key)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) for _, cs := range rowCases { @@ -150,11 +151,11 @@ func TestCanalBatchEncoder(t *testing.T) { } func TestCanalAppendRowChangedEventWithCallback(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a varchar(10) primary key)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfo := tableInfo.GetRowColInfos() diff --git a/pkg/sink/codec/canal/canal_entry_test.go b/pkg/sink/codec/canal/canal_entry_test.go index 49f16b4ac97..8204ce51a51 100644 --- a/pkg/sink/codec/canal/canal_entry_test.go +++ b/pkg/sink/codec/canal/canal_entry_test.go @@ -19,18 +19,18 @@ import ( "github.com/golang/protobuf/proto" mm "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/sink/codec/common" "github.com/pingcap/tiflow/pkg/sink/codec/internal" - "github.com/pingcap/tiflow/pkg/testkit" canal "github.com/pingcap/tiflow/proto/canal" "github.com/stretchr/testify/require" "golang.org/x/text/encoding/charmap" ) func TestInsert(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t( @@ -123,7 +123,7 @@ func TestInsert(t *testing.T) { } func TestUpdate(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(id int primary key, name varchar(32))` @@ -212,7 +212,7 @@ func TestUpdate(t *testing.T) { } func TestDelete(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(id int primary key)` diff --git a/pkg/sink/codec/canal/canal_json_row_event_encoder_test.go b/pkg/sink/codec/canal/canal_json_row_event_encoder_test.go index 14ab5b482ce..5b3370a658c 100644 --- a/pkg/sink/codec/canal/canal_json_row_event_encoder_test.go +++ b/pkg/sink/codec/canal/canal_json_row_event_encoder_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/compression" "github.com/pingcap/tiflow/pkg/config" @@ -580,11 +581,11 @@ func TestDDLEventWithExtensionValueMarshal(t *testing.T) { } func TestCanalJSONAppendRowChangedEventWithCallback(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() @@ -672,11 +673,11 @@ func TestCanalJSONAppendRowChangedEventWithCallback(t *testing.T) { } func TestMaxMessageBytes(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() diff --git a/pkg/sink/codec/canal/canal_json_txn_event_encoder_test.go b/pkg/sink/codec/canal/canal_json_txn_event_encoder_test.go index b74d03853b3..4331388a9b7 100644 --- a/pkg/sink/codec/canal/canal_json_txn_event_encoder_test.go +++ b/pkg/sink/codec/canal/canal_json_txn_event_encoder_test.go @@ -17,10 +17,10 @@ import ( "testing" "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/sink/codec/common" - "github.com/pingcap/tiflow/pkg/testkit" "github.com/stretchr/testify/require" ) @@ -35,7 +35,7 @@ func TestBuildCanalJSONTxnEventEncoder(t *testing.T) { } func TestCanalJSONTxnEventEncoderMaxMessageBytes(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` @@ -77,7 +77,7 @@ func TestCanalJSONTxnEventEncoderMaxMessageBytes(t *testing.T) { } func TestCanalJSONAppendTxnEventEncoderWithCallback(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` diff --git a/pkg/sink/codec/canal/canal_test_util.go b/pkg/sink/codec/canal/canal_test_util.go index 85c75fdbacc..388d09f78d0 100644 --- a/pkg/sink/codec/canal/canal_test_util.go +++ b/pkg/sink/codec/canal/canal_test_util.go @@ -18,8 +18,9 @@ import ( mm "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" - "github.com/pingcap/tiflow/pkg/testkit" + "github.com/pingcap/tiflow/pkg/config" ) type testColumnTuple struct { @@ -383,7 +384,7 @@ func collectExpectedDecodedValue(columns []*testColumnTuple) map[string]interfac } func newLargeEvent4Test(t *testing.T) (*model.RowChangedEvent, *model.RowChangedEvent, *model.RowChangedEvent) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t( diff --git a/pkg/sink/codec/canal/type_test.go b/pkg/sink/codec/canal/type_test.go index e504d91bb87..bb6cc5ceab7 100644 --- a/pkg/sink/codec/canal/type_test.go +++ b/pkg/sink/codec/canal/type_test.go @@ -16,14 +16,16 @@ package canal import ( "testing" + "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" + "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/sink/codec/internal" "github.com/pingcap/tiflow/pkg/sink/codec/utils" "github.com/stretchr/testify/require" ) func TestGetMySQLType4IntTypes(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t1 ( @@ -32,7 +34,7 @@ func TestGetMySQLType4IntTypes(t *testing.T) { c smallint, d mediumint, e bigint)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() @@ -103,7 +105,7 @@ func TestGetMySQLType4IntTypes(t *testing.T) { c smallint unsigned, d mediumint unsigned, e bigint unsigned)` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -217,7 +219,7 @@ func TestGetMySQLType4IntTypes(t *testing.T) { c smallint(5), d mediumint(8), e bigint(19))` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -263,7 +265,7 @@ func TestGetMySQLType4IntTypes(t *testing.T) { c smallint(5) unsigned, d mediumint(8) unsigned, e bigint(19) unsigned)` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -309,7 +311,7 @@ func TestGetMySQLType4IntTypes(t *testing.T) { c smallint unsigned zerofill, d mediumint zerofill, e bigint zerofill)` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -354,7 +356,7 @@ func TestGetMySQLType4IntTypes(t *testing.T) { b bit, c bit(3), d bool)` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -387,14 +389,14 @@ func TestGetMySQLType4IntTypes(t *testing.T) { } func TestGetMySQLType4FloatType(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t1( a int primary key, b float, c double)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() @@ -422,7 +424,7 @@ func TestGetMySQLType4FloatType(t *testing.T) { require.Equal(t, internal.JavaSQLTypeDOUBLE, javaType) sql = `create table test.t2(a int primary key, b float(10, 3), c float(10))` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -442,7 +444,7 @@ func TestGetMySQLType4FloatType(t *testing.T) { require.Equal(t, "float", mysqlType) sql = `create table test.t3(a int primary key, b double(20, 3))` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -460,7 +462,7 @@ func TestGetMySQLType4FloatType(t *testing.T) { c double unsigned, d float zerofill, e double zerofill)` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -503,11 +505,11 @@ func TestGetMySQLType4FloatType(t *testing.T) { } func TestGetMySQLType4Decimal(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t1(a int primary key, b decimal, c numeric)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() @@ -527,7 +529,7 @@ func TestGetMySQLType4Decimal(t *testing.T) { require.Equal(t, "decimal(10,0)", mysqlType) sql = `create table test.t2(a int primary key, b decimal(5), c decimal(5, 2))` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -551,7 +553,7 @@ func TestGetMySQLType4Decimal(t *testing.T) { require.Equal(t, internal.JavaSQLTypeDECIMAL, javaType) sql = `create table test.t3(a int primary key, b decimal unsigned, c decimal zerofill)` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -576,11 +578,11 @@ func TestGetMySQLType4Decimal(t *testing.T) { } func TestGetMySQLType4TimeTypes(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t1(a int primary key, b time, c time(3))` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() @@ -604,7 +606,7 @@ func TestGetMySQLType4TimeTypes(t *testing.T) { require.Equal(t, javaType, internal.JavaSQLTypeTIME) sql = `create table test.t2(a int primary key, b datetime, c datetime(3))` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -628,7 +630,7 @@ func TestGetMySQLType4TimeTypes(t *testing.T) { require.Equal(t, javaType, internal.JavaSQLTypeTIMESTAMP) sql = `create table test.t3(a int primary key, b timestamp, c timestamp(3))` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -652,7 +654,7 @@ func TestGetMySQLType4TimeTypes(t *testing.T) { require.Equal(t, javaType, internal.JavaSQLTypeTIMESTAMP) sql = `create table test.t4(a int primary key, b date)` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -668,7 +670,7 @@ func TestGetMySQLType4TimeTypes(t *testing.T) { require.Equal(t, javaType, internal.JavaSQLTypeDATE) sql = `create table test.t5(a int primary key, b year, c year(4))` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -693,11 +695,11 @@ func TestGetMySQLType4TimeTypes(t *testing.T) { } func TestGetMySQLType4Char(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a int primary key, b char, c char(123))` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() @@ -721,7 +723,7 @@ func TestGetMySQLType4Char(t *testing.T) { require.Equal(t, javaType, internal.JavaSQLTypeCHAR) sql = `create table test.t1(a int primary key, b varchar(123))` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -739,11 +741,11 @@ func TestGetMySQLType4Char(t *testing.T) { } func TestGetMySQLType4TextTypes(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t1(a int primary key, b text, c tinytext, d mediumtext, e longtext)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() @@ -794,11 +796,11 @@ func TestGetMySQLType4TextTypes(t *testing.T) { } func TestGetMySQLType4BinaryType(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t1(a int primary key, b binary, c binary(10))` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() @@ -822,7 +824,7 @@ func TestGetMySQLType4BinaryType(t *testing.T) { require.Equal(t, "binary(10)", mysqlType) sql = `create table test.t2(a int primary key, b varbinary(23))` - job = helper.DDL2Job(sql) + job = tk.DDL2Job(sql) tableInfo = model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos = tableInfo.GetRowColInfos() @@ -840,11 +842,11 @@ func TestGetMySQLType4BinaryType(t *testing.T) { } func TestGetMySQLType4BlobType(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t1(a int primary key, b blob, c tinyblob, d mediumblob, e longblob)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() @@ -895,11 +897,11 @@ func TestGetMySQLType4BlobType(t *testing.T) { } func TestGetMySQLType4EnumAndSet(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a int primary key, b enum('a', 'b', 'c'), c set('a', 'b', 'c'))` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() @@ -932,11 +934,11 @@ func TestGetMySQLType4EnumAndSet(t *testing.T) { } func TestGetMySQLType4JSON(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a int primary key, b json)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfos := tableInfo.GetRowColInfos() diff --git a/pkg/sink/codec/simple/encoder_test.go b/pkg/sink/codec/simple/encoder_test.go index b9d216830c2..0741ae20c48 100644 --- a/pkg/sink/codec/simple/encoder_test.go +++ b/pkg/sink/codec/simple/encoder_test.go @@ -16,8 +16,8 @@ package simple import ( "testing" + "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" - "github.com/pingcap/tiflow/pkg/testkit" "github.com/stretchr/testify/require" ) @@ -45,7 +45,7 @@ func TestEncodeCheckpoint(t *testing.T) { } func TestEncodeDDLEvent(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t) defer tk.Close() sql := `create table test.test(id int primary key, name varchar(255) not null, @@ -85,7 +85,7 @@ func TestEncodeDDLEvent(t *testing.T) { } func TestEncodeBootstrapEvent(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t) defer tk.Close() sql := `create table test.test(id int primary key, name varchar(255) not null, From d4a04e8c54512214844b1dcca259fd2f1b91a537 Mon Sep 17 00:00:00 2001 From: 3AceShowHand Date: Thu, 23 Nov 2023 18:35:49 +0800 Subject: [PATCH 08/11] fix some tests. --- cdc/api/v2/api_helpers_test.go | 4 ++-- cdc/owner/changefeed_test.go | 4 ++-- cdc/owner/ddl_manager_test.go | 6 +++--- cdc/owner/schema_test.go | 16 ++++++++-------- cdc/sink/dmlsink/mq/mq_dml_sink_test.go | 2 +- .../columnselector/column_selector_test.go | 4 ++-- cdc/sink/dmlsink/mq/worker_test.go | 8 ++++---- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/cdc/api/v2/api_helpers_test.go b/cdc/api/v2/api_helpers_test.go index 5448f66d467..6f06fe52da0 100644 --- a/cdc/api/v2/api_helpers_test.go +++ b/cdc/api/v2/api_helpers_test.go @@ -30,7 +30,7 @@ import ( func TestVerifyCreateChangefeedConfig(t *testing.T) { ctx := context.Background() pdClient := &mockPDClient{} - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) helper.Tk().MustExec("use test;") storage := helper.Storage() ctrl := mock_controller.NewMockController(gomock.NewController(t)) @@ -111,7 +111,7 @@ func TestVerifyUpdateChangefeedConfig(t *testing.T) { Config: config.GetDefaultReplicaConfig(), } oldUpInfo := &model.UpstreamInfo{} - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) helper.Tk().MustExec("use test;") storage := helper.Storage() h := &APIV2HelpersImpl{} diff --git a/cdc/owner/changefeed_test.go b/cdc/owner/changefeed_test.go index 15d05ce4a04..9794fb60e85 100644 --- a/cdc/owner/changefeed_test.go +++ b/cdc/owner/changefeed_test.go @@ -314,7 +314,7 @@ func TestChangefeedHandleError(t *testing.T) { } func TestExecDDL(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() // Creates a table, which will be deleted at the start-ts of the changefeed. // It is expected that the changefeed DOES NOT replicate this table. @@ -399,7 +399,7 @@ func TestExecDDL(t *testing.T) { } func TestEmitCheckpointTs(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() // Creates a table, which will be deleted at the start-ts of the changefeed. // It is expected that the changefeed DOES NOT replicate this table. diff --git a/cdc/owner/ddl_manager_test.go b/cdc/owner/ddl_manager_test.go index 80bea1e6ed3..d7a198f4803 100644 --- a/cdc/owner/ddl_manager_test.go +++ b/cdc/owner/ddl_manager_test.go @@ -158,7 +158,7 @@ func TestGetSnapshotTs(t *testing.T) { } func TestExecRenameTablesDDL(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() ctx := cdcContext.NewBackendContext4Test(true) dm := createDDLManagerForTest(t) @@ -258,7 +258,7 @@ func TestExecRenameTablesDDL(t *testing.T) { } func TestExecDropTablesDDL(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() ctx := cdcContext.NewBackendContext4Test(true) dm := createDDLManagerForTest(t) @@ -322,7 +322,7 @@ func TestExecDropTablesDDL(t *testing.T) { } func TestExecDropViewsDDL(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() ctx := cdcContext.NewBackendContext4Test(true) dm := createDDLManagerForTest(t) diff --git a/cdc/owner/schema_test.go b/cdc/owner/schema_test.go index b39a21a2397..fe74898bee2 100644 --- a/cdc/owner/schema_test.go +++ b/cdc/owner/schema_test.go @@ -33,7 +33,7 @@ import ( var dummyChangeFeedID = model.DefaultChangeFeedID("dummy_changefeed") func TestAllPhysicalTables(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) @@ -91,7 +91,7 @@ func TestAllPhysicalTables(t *testing.T) { } func TestAllTables(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) @@ -130,7 +130,7 @@ func TestAllTables(t *testing.T) { } func TestIsIneligibleTableID(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) @@ -188,7 +188,7 @@ func compareEvents(t *testing.T, e1, e2 *model.DDLEvent) { } func TestBuildDDLEventsFromSingleTableDDL(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) require.Nil(t, err) @@ -263,7 +263,7 @@ func TestBuildDDLEventsFromSingleTableDDL(t *testing.T) { } func TestBuildDDLEventsFromRenameTablesDDL(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) @@ -391,7 +391,7 @@ func TestBuildDDLEventsFromRenameTablesDDL(t *testing.T) { } func TestBuildDDLEventsFromDropTablesDDL(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) @@ -494,7 +494,7 @@ func TestBuildDDLEventsFromDropTablesDDL(t *testing.T) { } func TestBuildDDLEventsFromDropViewsDDL(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) @@ -615,7 +615,7 @@ func TestBuildDDLEventsFromDropViewsDDL(t *testing.T) { } func TestBuildIgnoredDDLJob(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) diff --git a/cdc/sink/dmlsink/mq/mq_dml_sink_test.go b/cdc/sink/dmlsink/mq/mq_dml_sink_test.go index bd27c61a7ff..3f66d886b5e 100644 --- a/cdc/sink/dmlsink/mq/mq_dml_sink_test.go +++ b/cdc/sink/dmlsink/mq/mq_dml_sink_test.go @@ -80,7 +80,7 @@ func TestWriteEvents(t *testing.T) { require.NotNil(t, s) defer s.Close() - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` diff --git a/cdc/sink/dmlsink/mq/transformer/columnselector/column_selector_test.go b/cdc/sink/dmlsink/mq/transformer/columnselector/column_selector_test.go index feb49a24e7e..c308caef868 100644 --- a/cdc/sink/dmlsink/mq/transformer/columnselector/column_selector_test.go +++ b/cdc/sink/dmlsink/mq/transformer/columnselector/column_selector_test.go @@ -92,7 +92,7 @@ func TestNewColumnSelector(t *testing.T) { } func TestVerifyTables(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t1( @@ -262,7 +262,7 @@ func TestVerifyTablesColumnFilteredInDispatcher(t *testing.T) { eventRouter, err := dispatcher.NewEventRouter(replicaConfig, config.ProtocolDefault, "default", "default") require.NoError(t, err) - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t1(a int primary key, b int, c int)` diff --git a/cdc/sink/dmlsink/mq/worker_test.go b/cdc/sink/dmlsink/mq/worker_test.go index d3f7c6b3b32..a98177909fa 100644 --- a/cdc/sink/dmlsink/mq/worker_test.go +++ b/cdc/sink/dmlsink/mq/worker_test.go @@ -62,7 +62,7 @@ func newNonBatchEncodeWorker(ctx context.Context, t *testing.T) (*worker, dmlpro } func TestNonBatchEncode_SendMessages(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` @@ -263,7 +263,7 @@ func TestBatchEncode_Group(t *testing.T) { } func TestBatchEncode_GroupWhenTableStopping(t *testing.T) { - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` @@ -357,7 +357,7 @@ func TestBatchEncode_SendMessages(t *testing.T) { worker, p := newBatchEncodeWorker(ctx, t) defer worker.close() - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` @@ -516,7 +516,7 @@ func TestNonBatchEncode_SendMessagesWhenTableStopping(t *testing.T) { replicatingStatus := state.TableSinkSinking stoppedStatus := state.TableSinkStopping - tk := testkit.New(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` From 5833505685d73269ec983d596023bc9dc93f4492 Mon Sep 17 00:00:00 2001 From: 3AceShowHand Date: Mon, 27 Nov 2023 14:21:44 +0800 Subject: [PATCH 09/11] fix ut --- cdc/api/v2/api_helpers_test.go | 9 +- cdc/owner/changefeed_test.go | 20 +-- cdc/puller/ddl_puller_test.go | 128 +++++++++--------- .../columnselector/column_selector_test.go | 5 +- cdc/sink/dmlsink/mq/worker_test.go | 9 +- pkg/filter/expr_filter_test.go | 14 +- pkg/sink/codec/simple/encoder_test.go | 5 +- 7 files changed, 97 insertions(+), 93 deletions(-) diff --git a/cdc/api/v2/api_helpers_test.go b/cdc/api/v2/api_helpers_test.go index 6f06fe52da0..d7a0c3f966c 100644 --- a/cdc/api/v2/api_helpers_test.go +++ b/cdc/api/v2/api_helpers_test.go @@ -20,6 +20,7 @@ import ( "github.com/golang/mock/gomock" mock_controller "github.com/pingcap/tiflow/cdc/controller/mock" + "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" cerror "github.com/pingcap/tiflow/pkg/errors" @@ -31,8 +32,8 @@ func TestVerifyCreateChangefeedConfig(t *testing.T) { ctx := context.Background() pdClient := &mockPDClient{} tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) - helper.Tk().MustExec("use test;") - storage := helper.Storage() + tk.MustExec("use test;") + storage := tk.Storage() ctrl := mock_controller.NewMockController(gomock.NewController(t)) cfg := &ChangefeedConfig{} h := &APIV2HelpersImpl{} @@ -112,8 +113,8 @@ func TestVerifyUpdateChangefeedConfig(t *testing.T) { } oldUpInfo := &model.UpstreamInfo{} tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) - helper.Tk().MustExec("use test;") - storage := helper.Storage() + tk.MustExec("use test;") + storage := tk.Storage() h := &APIV2HelpersImpl{} newCfInfo, newUpInfo, err := h.verifyUpdateChangefeedConfig(ctx, cfg, oldInfo, oldUpInfo, storage, 0) require.NotNil(t, err) diff --git a/cdc/owner/changefeed_test.go b/cdc/owner/changefeed_test.go index 9794fb60e85..e2d57d2466d 100644 --- a/cdc/owner/changefeed_test.go +++ b/cdc/owner/changefeed_test.go @@ -318,15 +318,15 @@ func TestExecDDL(t *testing.T) { defer tk.Close() // Creates a table, which will be deleted at the start-ts of the changefeed. // It is expected that the changefeed DOES NOT replicate this table. - helper.DDL2Job("create database test0") - job := helper.DDL2Job("create table test0.table0(id int primary key)") + tk.DDL2Job("create database test0") + job := tk.DDL2Job("create table test0.table0(id int primary key)") startTs := job.BinlogInfo.FinishedTS + 1000 ctx := cdcContext.NewContext4Test(context.Background(), true) ctx.ChangefeedVars().Info.StartTs = startTs cf, captures, tester, state := createChangefeed4Test(ctx, t) - cf.upstream.KVStorage = helper.Storage() + cf.upstream.KVStorage = tk.Storage() defer cf.Close(ctx) tickTwoTime := func() { checkpointTs, minTableBarrierTs := cf.Tick(ctx, state.Info, state.Status, captures) @@ -345,7 +345,7 @@ func TestExecDDL(t *testing.T) { require.Nil(t, err) require.Len(t, tableIDs, 1) - job = helper.DDL2Job("drop table test0.table0") + job = tk.DDL2Job("drop table test0.table0") // ddl puller resolved ts grow up mockDDLPuller := cf.ddlManager.ddlPuller.(*mockDDLPuller) mockDDLPuller.resolvedTs = startTs @@ -367,7 +367,7 @@ func TestExecDDL(t *testing.T) { require.Equal(t, mockDDLPuller.resolvedTs, state.Status.CheckpointTs) // handle create database - job = helper.DDL2Job("create database test1") + job = tk.DDL2Job("create database test1") mockDDLPuller.resolvedTs += 1000 job.BinlogInfo.FinishedTS = mockDDLPuller.resolvedTs mockDDLPuller.ddlQueue = append(mockDDLPuller.ddlQueue, job) @@ -382,7 +382,7 @@ func TestExecDDL(t *testing.T) { require.Equal(t, state.Status.CheckpointTs, mockDDLPuller.resolvedTs) // handle create table - job = helper.DDL2Job("create table test1.test1(id int primary key)") + job = tk.DDL2Job("create table test1.test1(id int primary key)") mockDDLPuller.resolvedTs += 1000 job.BinlogInfo.FinishedTS = mockDDLPuller.resolvedTs mockDDLPuller.ddlQueue = append(mockDDLPuller.ddlQueue, job) @@ -403,15 +403,15 @@ func TestEmitCheckpointTs(t *testing.T) { defer tk.Close() // Creates a table, which will be deleted at the start-ts of the changefeed. // It is expected that the changefeed DOES NOT replicate this table. - helper.DDL2Job("create database test0") - job := helper.DDL2Job("create table test0.table0(id int primary key)") + tk.DDL2Job("create database test0") + job := tk.DDL2Job("create table test0.table0(id int primary key)") startTs := job.BinlogInfo.FinishedTS + 1000 ctx := cdcContext.NewContext4Test(context.Background(), true) ctx.ChangefeedVars().Info.StartTs = startTs cf, captures, tester, state := createChangefeed4Test(ctx, t) - cf.upstream.KVStorage = helper.Storage() + cf.upstream.KVStorage = tk.Storage() defer cf.Close(ctx) tickThreeTime := func() { @@ -440,7 +440,7 @@ func TestEmitCheckpointTs(t *testing.T) { require.Equal(t, ts, startTs) require.Len(t, names, 1) - job = helper.DDL2Job("drop table test0.table0") + job = tk.DDL2Job("drop table test0.table0") // ddl puller resolved ts grow up mockDDLPuller := cf.ddlManager.ddlPuller.(*mockDDLPuller) mockDDLPuller.resolvedTs = startTs + 1000 diff --git a/cdc/puller/ddl_puller_test.go b/cdc/puller/ddl_puller_test.go index 95f836c591f..8e3b4872af7 100644 --- a/cdc/puller/ddl_puller_test.go +++ b/cdc/puller/ddl_puller_test.go @@ -118,7 +118,7 @@ func newMockDDLJobPuller( t *testing.T, puller Puller, needSchemaStorage bool, -) (DDLJobPuller, *entry.SchemaTestHelper) { +) (DDLJobPuller, *entry.TestKit) { res := &ddlJobPullerImpl{ outputCh: make( chan *model.DDLJobEntry, @@ -127,11 +127,11 @@ func newMockDDLJobPuller( res.multiplexing = false res.puller.Puller = puller - var helper *entry.SchemaTestHelper + var tk *entry.TestKit if needSchemaStorage { - helper = entry.NewSchemaTestHelper(t) - kvStorage := helper.Storage() - ts := helper.GetCurrentMeta().StartTS + tk = entry.NewTestKit(t, config.GetDefaultReplicaConfig()) + kvStorage := tk.Storage() + ts := tk.GetCurrentMeta().StartTS meta, err := kv.GetSnapshotMeta(kvStorage, ts) require.Nil(t, err) f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") @@ -147,13 +147,13 @@ func newMockDDLJobPuller( res.schemaStorage = schemaStorage res.kvStorage = kvStorage } - return res, helper + return res, tk } func TestHandleRenameTable(t *testing.T) { startTs := uint64(10) mockPuller := newMockPuller(t, startTs) - ddlJobPuller, helper := newMockDDLJobPuller(t, mockPuller, true) + ddlJobPuller, tk := newMockDDLJobPuller(t, mockPuller, true) defer tk.Close() ddlJobPullerImpl := ddlJobPuller.(*ddlJobPullerImpl) @@ -193,43 +193,43 @@ func TestHandleRenameTable(t *testing.T) { // only table t1 remain. { remainTables := make([]int64, 1) - job := helper.DDL2Job("create database test1") + job := tk.DDL2Job("create database test1") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("create table test1.t1(id int)") + job = tk.DDL2Job("create table test1.t1(id int)") remainTables[0] = job.TableID mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("create table test1.t2(id int)") + job = tk.DDL2Job("create table test1.t2(id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("create table test1.t3(id int)") + job = tk.DDL2Job("create table test1.t3(id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("create table test1.t5(id int)") + job = tk.DDL2Job("create table test1.t5(id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("create database ignore1") + job = tk.DDL2Job("create database ignore1") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("create table ignore1.a(id int)") + job = tk.DDL2Job("create table ignore1.a(id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("rename table test1.t1 to test1.t11, test1.t3 to test1.t33, test1.t5 to test1.t55, ignore1.a to ignore1.b") + job = tk.DDL2Job("rename table test1.t1 to test1.t11, test1.t3 to test1.t33, test1.t5 to test1.t55, ignore1.a to ignore1.b") skip, err := ddlJobPullerImpl.handleRenameTables(job) require.NoError(t, err) @@ -239,8 +239,8 @@ func TestHandleRenameTable(t *testing.T) { } { - _ = helper.DDL2Job("create table test1.t6(id int)") - job := helper.DDL2Job("rename table test1.t2 to test1.t22, test1.t6 to test1.t66") + _ = tk.DDL2Job("create table test1.t6(id int)") + job := tk.DDL2Job("rename table test1.t2 to test1.t22, test1.t6 to test1.t66") skip, err := ddlJobPullerImpl.handleRenameTables(job) require.Error(t, err) require.True(t, skip) @@ -251,27 +251,27 @@ func TestHandleRenameTable(t *testing.T) { // all tables are filtered out { - job := helper.DDL2Job("create database test2") + job := tk.DDL2Job("create database test2") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("create table test2.t1(id int)") + job = tk.DDL2Job("create table test2.t1(id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("create table test2.t2(id int)") + job = tk.DDL2Job("create table test2.t2(id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("create table test2.t3(id int)") + job = tk.DDL2Job("create table test2.t3(id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("rename table test2.t1 to test2.t11, test2.t2 to test2.t22, test2.t3 to test2.t33") + job = tk.DDL2Job("rename table test2.t1 to test2.t11, test2.t2 to test2.t22, test2.t3 to test2.t33") skip, err := ddlJobPullerImpl.handleRenameTables(job) require.NoError(t, err) require.True(t, skip) @@ -279,23 +279,23 @@ func TestHandleRenameTable(t *testing.T) { // test uppercase db name { - job := helper.DDL2Job("create database Test3") + job := tk.DDL2Job("create database Test3") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("create table Test3.t1(id int)") + job = tk.DDL2Job("create table Test3.t1(id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) // skip this table - job = helper.DDL2Job("create table Test3.t2(id int)") + job = tk.DDL2Job("create table Test3.t2(id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("rename table Test3.t1 to Test3.t11, Test3.t2 to Test3.t22") + job = tk.DDL2Job("rename table Test3.t1 to Test3.t11, Test3.t2 to Test3.t22") skip, err := ddlJobPullerImpl.handleRenameTables(job) require.NoError(t, err) require.False(t, skip) @@ -306,45 +306,45 @@ func TestHandleRenameTable(t *testing.T) { // test rename table { - job := helper.DDL2Job("create table test1.t99 (id int)") + job := tk.DDL2Job("create table test1.t99 (id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) // this ddl should be skipped - job = helper.DDL2Job("create table test1.t1000 (id int)") + job = tk.DDL2Job("create table test1.t1000 (id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) // this ddl should be skipped - job = helper.DDL2Job("create table test1.t888 (id int)") + job = tk.DDL2Job("create table test1.t888 (id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("create table test1.t20230808 (id int)") + job = tk.DDL2Job("create table test1.t20230808 (id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("create table test1.t202308081 (id int)") + job = tk.DDL2Job("create table test1.t202308081 (id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) - job = helper.DDL2Job("create table test1.t202308082 (id int)") + job = tk.DDL2Job("create table test1.t202308082 (id int)") mockPuller.appendDDL(job) mockPuller.appendResolvedTs(job.BinlogInfo.FinishedTS + 1) waitResolvedTs(t, ddlJobPuller, job.BinlogInfo.FinishedTS+1) // since test1.99 in filter rule, we replicate it - job = helper.DDL2Job("rename table test1.t99 to test1.t999") + job = tk.DDL2Job("rename table test1.t99 to test1.t999") skip, err := ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.False(t, skip) // since test1.t100 is in filter rule, replicate it - job = helper.DDL2Job("rename table test1.t1000 to test1.t100") + job = tk.DDL2Job("rename table test1.t1000 to test1.t100") skip, err = ddlJobPullerImpl.handleJob(job) require.Error(t, err) require.True(t, skip) @@ -353,14 +353,14 @@ func TestHandleRenameTable(t *testing.T) { "if you want to replicate this table, please add its old name to filter rule.", job.TableID, job.Query)) // since test1.t888 and test1.t777 are not in filter rule, skip it - job = helper.DDL2Job("rename table test1.t888 to test1.t777") + job = tk.DDL2Job("rename table test1.t888 to test1.t777") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) // since test1.t20230808 is in filter rule, replicate it // ref: https://github.com/pingcap/tiflow/issues/9488 - job = helper.DDL2Job("rename table test1.t20230808 to ignore1.ignore") + job = tk.DDL2Job("rename table test1.t20230808 to ignore1.ignore") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.False(t, skip) @@ -368,7 +368,7 @@ func TestHandleRenameTable(t *testing.T) { // FIXME(dongmen): since test1.t202308081 and test1.t202308082 are in filter rule, it should be replicated // but now it will throw an error since schema ignore1 are not in schemaStorage // ref: https://github.com/pingcap/tiflow/issues/9488 - job = helper.DDL2Job("rename table test1.t202308081 to ignore1.ignore1, test1.t202308082 to ignore1.dongmen") + job = tk.DDL2Job("rename table test1.t202308081 to ignore1.ignore1, test1.t202308082 to ignore1.dongmen") skip, err = ddlJobPullerImpl.handleJob(job) require.NotNil(t, err) require.True(t, skip) @@ -379,7 +379,7 @@ func TestHandleRenameTable(t *testing.T) { func TestHandleJob(t *testing.T) { startTs := uint64(10) mockPuller := newMockPuller(t, startTs) - ddlJobPuller, helper := newMockDDLJobPuller(t, mockPuller, true) + ddlJobPuller, tk := newMockDDLJobPuller(t, mockPuller, true) defer tk.Close() ddlJobPullerImpl := ddlJobPuller.(*ddlJobPullerImpl) @@ -405,17 +405,17 @@ func TestHandleJob(t *testing.T) { // test create database { - job := helper.DDL2Job("create database test1") + job := tk.DDL2Job("create database test1") skip, err := ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.False(t, skip) - job = helper.DDL2Job("create database test2") + job = tk.DDL2Job("create database test2") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) - job = helper.DDL2Job("create database test3") + job = tk.DDL2Job("create database test3") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) @@ -423,7 +423,7 @@ func TestHandleJob(t *testing.T) { // test drop databases { - job := helper.DDL2Job("drop database test2") + job := tk.DDL2Job("drop database test2") skip, err := ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) @@ -431,44 +431,44 @@ func TestHandleJob(t *testing.T) { // test create table { - job := helper.DDL2Job("create table test1.t1(id int) partition by range(id) (partition p0 values less than (10))") + job := tk.DDL2Job("create table test1.t1(id int) partition by range(id) (partition p0 values less than (10))") skip, err := ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.False(t, skip) - job = helper.DDL2Job("alter table test1.t1 add column c1 int") + job = tk.DDL2Job("alter table test1.t1 add column c1 int") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) - job = helper.DDL2Job("create table test1.testStartTs(id int)") + job = tk.DDL2Job("create table test1.testStartTs(id int)") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.False(t, skip) - job = helper.DDL2Job("alter table test1.testStartTs add column c1 int") + job = tk.DDL2Job("alter table test1.testStartTs add column c1 int") job.StartTS = 1 skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) - job = helper.DDL2Job("create table test1.t2(id int)") + job = tk.DDL2Job("create table test1.t2(id int)") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.False(t, skip) - job = helper.DDL2Job("create table test1.t3(id int)") + job = tk.DDL2Job("create table test1.t3(id int)") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) - job = helper.DDL2Job("create table test1.t4(id int) partition by range(id) (partition p0 values less than (10))") + job = tk.DDL2Job("create table test1.t4(id int) partition by range(id) (partition p0 values less than (10))") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) // make sure no schema not found error - job = helper.DDL2Job("create table test3.t1(id int) partition by range(id) (partition p0 values less than (10))") + job = tk.DDL2Job("create table test3.t1(id int) partition by range(id) (partition p0 values less than (10))") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) @@ -476,12 +476,12 @@ func TestHandleJob(t *testing.T) { // test drop table { - job := helper.DDL2Job("drop table test1.t2") + job := tk.DDL2Job("drop table test1.t2") skip, err := ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.False(t, skip) - job = helper.DDL2Job("drop table test1.t3") + job = tk.DDL2Job("drop table test1.t3") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) @@ -489,12 +489,12 @@ func TestHandleJob(t *testing.T) { // test add column and drop column { - job := helper.DDL2Job("alter table test1.t1 add column age int") + job := tk.DDL2Job("alter table test1.t1 add column age int") skip, err := ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.False(t, skip) - job = helper.DDL2Job("alter table test1.t4 add column age int") + job = tk.DDL2Job("alter table test1.t4 add column age int") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) @@ -502,22 +502,22 @@ func TestHandleJob(t *testing.T) { // test add index and drop index { - job := helper.DDL2Job("alter table test1.t1 add index idx_age(age)") + job := tk.DDL2Job("alter table test1.t1 add index idx_age(age)") skip, err := ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.False(t, skip) - job = helper.DDL2Job("alter table test1.t4 add index idx_age(age)") + job = tk.DDL2Job("alter table test1.t4 add index idx_age(age)") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) - job = helper.DDL2Job("alter table test1.t1 drop index idx_age") + job = tk.DDL2Job("alter table test1.t1 drop index idx_age") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.False(t, skip) - job = helper.DDL2Job("alter table test1.t4 drop index idx_age") + job = tk.DDL2Job("alter table test1.t4 drop index idx_age") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) @@ -525,12 +525,12 @@ func TestHandleJob(t *testing.T) { // test drop column { - job := helper.DDL2Job("alter table test1.t1 drop column age") + job := tk.DDL2Job("alter table test1.t1 drop column age") skip, err := ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.False(t, skip) - job = helper.DDL2Job("alter table test1.t4 drop column age") + job = tk.DDL2Job("alter table test1.t4 drop column age") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) @@ -538,12 +538,12 @@ func TestHandleJob(t *testing.T) { // test truncate table { - job := helper.DDL2Job("truncate table test1.t1") + job := tk.DDL2Job("truncate table test1.t1") skip, err := ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.False(t, skip) - job = helper.DDL2Job("truncate table test1.t4") + job = tk.DDL2Job("truncate table test1.t4") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) @@ -551,12 +551,12 @@ func TestHandleJob(t *testing.T) { // test add table partition { - job := helper.DDL2Job("alter table test1.t1 add partition (partition p1 values less than (100))") + job := tk.DDL2Job("alter table test1.t1 add partition (partition p1 values less than (100))") skip, err := ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.False(t, skip) - job = helper.DDL2Job("alter table test1.t4 add partition (partition p1 values less than (100))") + job = tk.DDL2Job("alter table test1.t4 add partition (partition p1 values less than (100))") skip, err = ddlJobPullerImpl.handleJob(job) require.NoError(t, err) require.True(t, skip) diff --git a/cdc/sink/dmlsink/mq/transformer/columnselector/column_selector_test.go b/cdc/sink/dmlsink/mq/transformer/columnselector/column_selector_test.go index c308caef868..daa585155cb 100644 --- a/cdc/sink/dmlsink/mq/transformer/columnselector/column_selector_test.go +++ b/cdc/sink/dmlsink/mq/transformer/columnselector/column_selector_test.go @@ -19,6 +19,7 @@ package columnselector import ( "testing" + "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/cdc/sink/dmlsink/mq/dispatcher" "github.com/pingcap/tiflow/pkg/config" @@ -104,7 +105,7 @@ func TestVerifyTables(t *testing.T) { unique key uk_b_c(b, c), unique key uk_d_e(d, e), key idx_c_d(c, d))` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 0, job.BinlogInfo.TableInfo) infos := []*model.TableInfo{tableInfo} @@ -266,7 +267,7 @@ func TestVerifyTablesColumnFilteredInDispatcher(t *testing.T) { defer tk.Close() sql := `create table test.t1(a int primary key, b int, c int)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 0, job.BinlogInfo.TableInfo) infos := []*model.TableInfo{tableInfo} diff --git a/cdc/sink/dmlsink/mq/worker_test.go b/cdc/sink/dmlsink/mq/worker_test.go index a98177909fa..8765e585289 100644 --- a/cdc/sink/dmlsink/mq/worker_test.go +++ b/cdc/sink/dmlsink/mq/worker_test.go @@ -20,6 +20,7 @@ import ( "time" "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/cdc/sink/dmlsink" "github.com/pingcap/tiflow/cdc/sink/dmlsink/mq/dmlproducer" @@ -66,7 +67,7 @@ func TestNonBatchEncode_SendMessages(t *testing.T) { defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfo := tableInfo.GetRowColInfos() @@ -267,7 +268,7 @@ func TestBatchEncode_GroupWhenTableStopping(t *testing.T) { defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfo := tableInfo.GetRowColInfos() @@ -361,7 +362,7 @@ func TestBatchEncode_SendMessages(t *testing.T) { defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfo := tableInfo.GetRowColInfos() @@ -520,7 +521,7 @@ func TestNonBatchEncode_SendMessagesWhenTableStopping(t *testing.T) { defer tk.Close() sql := `create table test.t(a varchar(255) primary key)` - job := helper.DDL2Job(sql) + job := tk.DDL2Job(sql) tableInfo := model.WrapTableInfo(0, "test", 1, job.BinlogInfo.TableInfo) _, _, colInfo := tableInfo.GetRowColInfos() diff --git a/pkg/filter/expr_filter_test.go b/pkg/filter/expr_filter_test.go index 119448ada4c..dfcc61f024c 100644 --- a/pkg/filter/expr_filter_test.go +++ b/pkg/filter/expr_filter_test.go @@ -26,9 +26,9 @@ import ( ) func TestShouldSkipDMLBasic(t *testing.T) { - helper := newTestHelper(t) - defer tk.Close() - helper.getTk().MustExec("use test;") + tk := newTestHelper(t) + defer tk.close() + tk.getTk().MustExec("use test;") type innerCase struct { schema string @@ -323,7 +323,7 @@ func TestShouldSkipDMLBasic(t *testing.T) { }) for _, tc := range testCases { - tableInfo := helper.execDDL(tc.ddl) + tableInfo := tk.execDDL(tc.ddl) f, err := newExprFilter("", tc.cfg, config.GetDefaultReplicaConfig().SQLMode) require.Nil(t, err) for _, c := range tc.cases { @@ -355,7 +355,7 @@ func TestShouldSkipDMLBasic(t *testing.T) { // are as expected. func TestShouldSkipDMLError(t *testing.T) { helper := newTestHelper(t) - defer tk.Close() + defer helper.close() helper.getTk().MustExec("use test;") type innerCase struct { @@ -472,7 +472,7 @@ func TestShouldSkipDMLError(t *testing.T) { // the filter will works as expected. func TestShouldSkipDMLTableUpdated(t *testing.T) { helper := newTestHelper(t) - defer tk.Close() + defer helper.close() helper.getTk().MustExec("use test;") type innerCase struct { @@ -668,7 +668,7 @@ func TestShouldSkipDMLTableUpdated(t *testing.T) { func TestVerify(t *testing.T) { helper := newTestHelper(t) - defer tk.Close() + defer helper.close() helper.getTk().MustExec("use test;") type testCase struct { diff --git a/pkg/sink/codec/simple/encoder_test.go b/pkg/sink/codec/simple/encoder_test.go index 0741ae20c48..24a4d8e228d 100644 --- a/pkg/sink/codec/simple/encoder_test.go +++ b/pkg/sink/codec/simple/encoder_test.go @@ -18,6 +18,7 @@ import ( "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" + "github.com/pingcap/tiflow/pkg/config" "github.com/stretchr/testify/require" ) @@ -45,7 +46,7 @@ func TestEncodeCheckpoint(t *testing.T) { } func TestEncodeDDLEvent(t *testing.T) { - tk := entry.NewTestKit(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.test(id int primary key, name varchar(255) not null, @@ -85,7 +86,7 @@ func TestEncodeDDLEvent(t *testing.T) { } func TestEncodeBootstrapEvent(t *testing.T) { - tk := entry.NewTestKit(t) + tk := entry.NewTestKit(t, config.GetDefaultReplicaConfig()) defer tk.Close() sql := `create table test.test(id int primary key, name varchar(255) not null, From 8bce06c4db8db841d2cdc3339f3c07392589f6a4 Mon Sep 17 00:00:00 2001 From: 3AceShowHand Date: Mon, 27 Nov 2023 15:13:59 +0800 Subject: [PATCH 10/11] fix mounter test --- cdc/entry/mounter_test.go | 2723 ++++++++++++++++++------------------- cdc/entry/testkit.go | 125 +- 2 files changed, 1400 insertions(+), 1448 deletions(-) diff --git a/cdc/entry/mounter_test.go b/cdc/entry/mounter_test.go index 07d51eef6a0..f543cd1e912 100644 --- a/cdc/entry/mounter_test.go +++ b/cdc/entry/mounter_test.go @@ -14,36 +14,16 @@ package entry import ( - "bytes" - "context" - "math" "strings" "testing" - "time" "github.com/pingcap/log" - "github.com/pingcap/tidb/ddl" - "github.com/pingcap/tidb/executor" tidbkv "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/meta/autoid" - "github.com/pingcap/tidb/parser" - "github.com/pingcap/tidb/parser/ast" - timodel "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" - cerror "github.com/pingcap/tiflow/pkg/errors" - "github.com/pingcap/tiflow/pkg/filter" - "github.com/pingcap/tiflow/pkg/integrity" - "github.com/pingcap/tiflow/pkg/sink/codec/avro" - codecCommon "github.com/pingcap/tiflow/pkg/sink/codec/common" "github.com/pingcap/tiflow/pkg/spanz" - "github.com/pingcap/tiflow/pkg/sqlmodel" - "github.com/pingcap/tiflow/pkg/util" "github.com/stretchr/testify/require" - "github.com/tikv/client-go/v2/oracle" "go.uber.org/zap" ) @@ -267,84 +247,55 @@ func testMounterDisableOldValue(t *testing.T, tc struct { tk.MustExec("set @@tidb_enable_clustered_index=1;") tk.MustExec("use test;") - tk.MustExec(tc.createTableDDL) - - f, err := filter.NewFilter(replicaConfig, "") - require.Nil(t, err) - jobs, err := tk.GetAllHistoryDDLJob(f) - require.Nil(t, err) - - scheamStorage, err := NewSchemaStorage(nil, 0, false, dummyChangeFeedID, util.RoleTester, f) - require.Nil(t, err) - for _, job := range jobs { - err := scheamStorage.HandleDDLJob(job) - require.Nil(t, err) - } - tableInfo, ok := scheamStorage.GetLastSnapshot().TableByName("test", tc.tableName) - require.True(t, ok) + tableInfo := tk.DDL2TableInfo(tc.createTableDDL) if tableInfo.IsCommonHandle { // we can check this log to make sure if the clustered-index is enabled log.Info("this table is enable the clustered index", zap.String("tableName", tableInfo.Name.L)) } + var count int for _, params := range tc.values { - insertSQL := prepareInsertSQL(t, tableInfo, len(params)) - tk.MustExec(insertSQL, params...) - } - - ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) - require.Nil(t, err) - scheamStorage.AdvanceResolvedTs(ver.Ver) - filter, err := filter.NewFilter(replicaConfig, "") - require.Nil(t, err) - mounter := NewMounter(scheamStorage, - model.DefaultChangeFeedID("c1"), time.UTC, filter, replicaConfig.Integrity).(*mounter) - mounter.tz = time.Local - ctx := context.Background() - - // [TODO] check size and readd rowBytes - mountAndCheckRowInTable := func(tableID int64, _ []int, f func(key []byte, value []byte) *model.RawKVEntry) int { - var rows int - walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) { - rawKV := f(key, value) - row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) - require.Nil(t, err) - if row == nil { - return - } - rows++ - require.Equal(t, row.Table.Table, tc.tableName) - require.Equal(t, row.Table.Schema, "test") - // [TODO] check size and reopen this check - // require.Equal(t, rowBytes[rows-1], row.ApproximateBytes(), row) - t.Log("ApproximateBytes", tc.tableName, rows-1, row.ApproximateBytes()) - // TODO: test column flag, column type and index columns - if len(row.Columns) != 0 { - checkSQL, params := prepareCheckSQL(t, tc.tableName, row.Columns) - result := tk.MustQuery(checkSQL, params...) - result.Check([][]interface{}{{"1"}}) - } - if len(row.PreColumns) != 0 { - checkSQL, params := prepareCheckSQL(t, tc.tableName, row.PreColumns) - result := tk.MustQuery(checkSQL, params...) - result.Check([][]interface{}{{"1"}}) - } - }) - return rows - } - mountAndCheckRow := func(rowsBytes [][]int, f func(key []byte, value []byte) *model.RawKVEntry) int { - partitionInfo := tableInfo.GetPartitionInfo() - if partitionInfo == nil { - return mountAndCheckRowInTable(tableInfo.ID, rowsBytes[0], f) + //tk.MustExec(insertSQL, params...) + event := tk.DML2Event(insertSQL, "test", tc.tableName) + if event == nil { + continue } - var rows int - for i, p := range partitionInfo.Definitions { - rows += mountAndCheckRowInTable(p.ID, rowsBytes[i], f) + count++ + require.Equal(t, event.Table.Table, tc.tableName) + require.Equal(t, event.Table.Schema, "test") + t.Log("ApproximateBytes", tc.tableName, count-1, event.ApproximateBytes()) + if len(event.Columns) != 0 { + checkSQL, params := prepareCheckSQL(t, tc.tableName, event.Columns) + result := tk.MustQuery(checkSQL, params...) + result.Check([][]interface{}{{"1"}}) + } + if len(event.PreColumns) != 0 { + checkSQL, params := prepareCheckSQL(t, tc.tableName, event.PreColumns) + result := tk.MustQuery(checkSQL, params...) + result.Check([][]interface{}{{"1"}}) } - return rows } + //ctx := context.Background() + // [TODO] check size and readd rowBytes + //mountAndCheckRowInTable := func(tableID int64, _ []int, f func(key []byte, value []byte) *model.RawKVEntry) int { + // var rows int + // walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) {}) + // return rows + //} + //mountAndCheckRow := func(rowsBytes [][]int, f func(key []byte, value []byte) *model.RawKVEntry) int { + // partitionInfo := tableInfo.GetPartitionInfo() + // if partitionInfo == nil { + // return mountAndCheckRowInTable(tableInfo.ID, rowsBytes[0], f) + // } + // var rows int + // for i, p := range partitionInfo.Definitions { + // rows += mountAndCheckRowInTable(p.ID, rowsBytes[i], f) + // } + // return rows + //} + rows := mountAndCheckRow(tc.putApproximateBytes, func(key []byte, value []byte) *model.RawKVEntry { return &model.RawKVEntry{ OpType: model.OpTypePut, @@ -445,1302 +396,1302 @@ func walkTableSpanInStore(t *testing.T, store tidbkv.Storage, tableID int64, f f } } -func getLastKeyValueInStore(t *testing.T, store tidbkv.Storage, tableID int64) (key, value []byte) { - txn, err := store.Begin() - require.NoError(t, err) - defer txn.Rollback() //nolint:errcheck - startKey, endKey := spanz.GetTableRange(tableID) - kvIter, err := txn.Iter(startKey, endKey) - require.NoError(t, err) - defer kvIter.Close() - for kvIter.Valid() { - key = kvIter.Key() - value = kvIter.Value() - err = kvIter.Next() - require.NoError(t, err) - } - return key, value -} - -// We use OriginDefaultValue instead of DefaultValue in the ut, pls ref to -// https://github.com/pingcap/tiflow/issues/4048 -// FIXME: OriginDefaultValue seems always to be string, and test more corner case -// Ref: https://github.com/pingcap/tidb/blob/d2c352980a43bb593db81fd1db996f47af596d91/table/column.go#L489 -func TestGetDefaultZeroValue(t *testing.T) { - // Check following MySQL type, ref to: - // https://github.com/pingcap/tidb/blob/master/parser/mysql/type.go - - // mysql flag null - ftNull := types.NewFieldType(mysql.TypeUnspecified) - - // mysql.TypeTiny + notnull - ftTinyIntNotNull := types.NewFieldType(mysql.TypeTiny) - ftTinyIntNotNull.AddFlag(mysql.NotNullFlag) - - // mysql.TypeTiny + notnull + unsigned - ftTinyIntNotNullUnSigned := types.NewFieldType(mysql.TypeTiny) - ftTinyIntNotNullUnSigned.SetFlag(mysql.NotNullFlag) - ftTinyIntNotNullUnSigned.AddFlag(mysql.UnsignedFlag) - - // mysql.TypeTiny + null - ftTinyIntNull := types.NewFieldType(mysql.TypeTiny) - - // mysql.TypeShort + notnull - ftShortNotNull := types.NewFieldType(mysql.TypeShort) - ftShortNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeLong + notnull - ftLongNotNull := types.NewFieldType(mysql.TypeLong) - ftLongNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeLonglong + notnull - ftLongLongNotNull := types.NewFieldType(mysql.TypeLonglong) - ftLongLongNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeInt24 + notnull - ftInt24NotNull := types.NewFieldType(mysql.TypeInt24) - ftInt24NotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeFloat + notnull - ftTypeFloatNotNull := types.NewFieldType(mysql.TypeFloat) - ftTypeFloatNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeFloat + notnull + unsigned - ftTypeFloatNotNullUnSigned := types.NewFieldType(mysql.TypeFloat) - ftTypeFloatNotNullUnSigned.SetFlag(mysql.NotNullFlag | mysql.UnsignedFlag) - - // mysql.TypeFloat + null - ftTypeFloatNull := types.NewFieldType(mysql.TypeFloat) - - // mysql.TypeDouble + notnull - ftTypeDoubleNotNull := types.NewFieldType(mysql.TypeDouble) - ftTypeDoubleNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeNewDecimal + notnull - ftTypeNewDecimalNull := types.NewFieldType(mysql.TypeNewDecimal) - ftTypeNewDecimalNull.SetFlen(5) - ftTypeNewDecimalNull.SetDecimal(2) - - // mysql.TypeNewDecimal + notnull - ftTypeNewDecimalNotNull := types.NewFieldType(mysql.TypeNewDecimal) - ftTypeNewDecimalNotNull.SetFlag(mysql.NotNullFlag) - ftTypeNewDecimalNotNull.SetFlen(5) - ftTypeNewDecimalNotNull.SetDecimal(2) - - // mysql.TypeNull - ftTypeNull := types.NewFieldType(mysql.TypeNull) - - // mysql.TypeTimestamp + notnull - ftTypeTimestampNotNull := types.NewFieldType(mysql.TypeTimestamp) - ftTypeTimestampNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeTimestamp + notnull - ftTypeTimestampNull := types.NewFieldType(mysql.TypeTimestamp) - - // mysql.TypeDate + notnull - ftTypeDateNotNull := types.NewFieldType(mysql.TypeDate) - ftTypeDateNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeDuration + notnull - ftTypeDurationNotNull := types.NewFieldType(mysql.TypeDuration) - ftTypeDurationNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeDatetime + notnull - ftTypeDatetimeNotNull := types.NewFieldType(mysql.TypeDatetime) - ftTypeDatetimeNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeYear + notnull - ftTypeYearNotNull := types.NewFieldType(mysql.TypeYear) - ftTypeYearNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeNewDate + notnull - ftTypeNewDateNotNull := types.NewFieldType(mysql.TypeNewDate) - ftTypeNewDateNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeVarchar + notnull - ftTypeVarcharNotNull := types.NewFieldType(mysql.TypeVarchar) - ftTypeVarcharNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeTinyBlob + notnull - ftTypeTinyBlobNotNull := types.NewFieldType(mysql.TypeTinyBlob) - ftTypeTinyBlobNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeMediumBlob + notnull - ftTypeMediumBlobNotNull := types.NewFieldType(mysql.TypeMediumBlob) - ftTypeMediumBlobNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeLongBlob + notnull - ftTypeLongBlobNotNull := types.NewFieldType(mysql.TypeLongBlob) - ftTypeLongBlobNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeBlob + notnull - ftTypeBlobNotNull := types.NewFieldType(mysql.TypeBlob) - ftTypeBlobNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeVarString + notnull - ftTypeVarStringNotNull := types.NewFieldType(mysql.TypeVarString) - ftTypeVarStringNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeString + notnull - ftTypeStringNotNull := types.NewFieldType(mysql.TypeString) - ftTypeStringNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeBit + notnull - ftTypeBitNotNull := types.NewFieldType(mysql.TypeBit) - ftTypeBitNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeJSON + notnull - ftTypeJSONNotNull := types.NewFieldType(mysql.TypeJSON) - ftTypeJSONNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeEnum + notnull + nodefault - ftTypeEnumNotNull := types.NewFieldType(mysql.TypeEnum) - ftTypeEnumNotNull.SetFlag(mysql.NotNullFlag) - ftTypeEnumNotNull.SetElems([]string{"e0", "e1"}) - - // mysql.TypeEnum + null - ftTypeEnumNull := types.NewFieldType(mysql.TypeEnum) - - // mysql.TypeSet + notnull - ftTypeSetNotNull := types.NewFieldType(mysql.TypeSet) - ftTypeSetNotNull.SetFlag(mysql.NotNullFlag) - - // mysql.TypeGeometry + notnull - ftTypeGeometryNotNull := types.NewFieldType(mysql.TypeGeometry) - ftTypeGeometryNotNull.SetFlag(mysql.NotNullFlag) - - testCases := []struct { - Name string - ColInfo timodel.ColumnInfo - Res interface{} - Default interface{} - }{ - // mysql flag null - { - Name: "mysql flag null", - ColInfo: timodel.ColumnInfo{FieldType: *ftNull}, - Res: nil, - Default: nil, - }, - // mysql.TypeTiny + notnull + nodefault - { - Name: "mysql.TypeTiny + notnull + nodefault", - ColInfo: timodel.ColumnInfo{FieldType: *ftTinyIntNotNull.Clone()}, - Res: int64(0), - Default: nil, - }, - // mysql.TypeTiny + notnull + default - { - Name: "mysql.TypeTiny + notnull + default", - ColInfo: timodel.ColumnInfo{ - OriginDefaultValue: -1314, - FieldType: *ftTinyIntNotNull, - }, - Res: int64(-1314), - Default: int64(-1314), - }, - // mysql.TypeTiny + notnull + unsigned - { - Name: "mysql.TypeTiny + notnull + default + unsigned", - ColInfo: timodel.ColumnInfo{FieldType: *ftTinyIntNotNullUnSigned}, - Res: uint64(0), - Default: nil, - }, - // mysql.TypeTiny + notnull + default + unsigned - { - Name: "mysql.TypeTiny + notnull + unsigned", - ColInfo: timodel.ColumnInfo{OriginDefaultValue: uint64(1314), FieldType: *ftTinyIntNotNullUnSigned}, - Res: uint64(1314), - Default: uint64(1314), - }, - // mysql.TypeTiny + null + default - { - Name: "mysql.TypeTiny + null + default", - ColInfo: timodel.ColumnInfo{ - OriginDefaultValue: -1314, - FieldType: *ftTinyIntNull, - }, - Res: int64(-1314), - Default: int64(-1314), - }, - // mysql.TypeTiny + null + nodefault - { - Name: "mysql.TypeTiny + null + nodefault", - ColInfo: timodel.ColumnInfo{FieldType: *ftTinyIntNull}, - Res: nil, - Default: nil, - }, - // mysql.TypeShort, others testCases same as tiny - { - Name: "mysql.TypeShort, others testCases same as tiny", - ColInfo: timodel.ColumnInfo{FieldType: *ftShortNotNull}, - Res: int64(0), - Default: nil, - }, - // mysql.TypeLong, others testCases same as tiny - { - Name: "mysql.TypeLong, others testCases same as tiny", - ColInfo: timodel.ColumnInfo{FieldType: *ftLongNotNull}, - Res: int64(0), - Default: nil, - }, - // mysql.TypeLonglong, others testCases same as tiny - { - Name: "mysql.TypeLonglong, others testCases same as tiny", - ColInfo: timodel.ColumnInfo{FieldType: *ftLongLongNotNull}, - Res: int64(0), - Default: nil, - }, - // mysql.TypeInt24, others testCases same as tiny - { - Name: "mysql.TypeInt24, others testCases same as tiny", - ColInfo: timodel.ColumnInfo{FieldType: *ftInt24NotNull}, - Res: int64(0), - Default: nil, - }, - // mysql.TypeFloat + notnull + nodefault - { - Name: "mysql.TypeFloat + notnull + nodefault", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeFloatNotNull}, - Res: float32(0), - Default: nil, - }, - // mysql.TypeFloat + notnull + default - { - Name: "mysql.TypeFloat + notnull + default", - ColInfo: timodel.ColumnInfo{ - OriginDefaultValue: float32(-3.1415), - FieldType: *ftTypeFloatNotNull, - }, - Res: float32(-3.1415), - Default: float32(-3.1415), - }, - // mysql.TypeFloat + notnull + default + unsigned - { - Name: "mysql.TypeFloat + notnull + default + unsigned", - ColInfo: timodel.ColumnInfo{ - OriginDefaultValue: float32(3.1415), - FieldType: *ftTypeFloatNotNullUnSigned, - }, - Res: float32(3.1415), - Default: float32(3.1415), - }, - // mysql.TypeFloat + notnull + unsigned - { - Name: "mysql.TypeFloat + notnull + unsigned", - ColInfo: timodel.ColumnInfo{ - FieldType: *ftTypeFloatNotNullUnSigned, - }, - Res: float32(0), - Default: nil, - }, - // mysql.TypeFloat + null + default - { - Name: "mysql.TypeFloat + null + default", - ColInfo: timodel.ColumnInfo{ - OriginDefaultValue: float32(-3.1415), - FieldType: *ftTypeFloatNull, - }, - Res: float32(-3.1415), - Default: float32(-3.1415), - }, - // mysql.TypeFloat + null + nodefault - { - Name: "mysql.TypeFloat + null + nodefault", - ColInfo: timodel.ColumnInfo{ - FieldType: *ftTypeFloatNull, - }, - Res: nil, - Default: nil, - }, - // mysql.TypeDouble, other testCases same as float - { - Name: "mysql.TypeDouble, other testCases same as float", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDoubleNotNull}, - Res: float64(0), - Default: nil, - }, - // mysql.TypeNewDecimal + notnull + nodefault - { - Name: "mysql.TypeNewDecimal + notnull + nodefault", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNewDecimalNotNull}, - Res: "0", // related with Flen and Decimal - Default: nil, - }, - // mysql.TypeNewDecimal + null + nodefault - { - Name: "mysql.TypeNewDecimal + null + nodefault", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNewDecimalNull}, - Res: nil, - Default: nil, - }, - // mysql.TypeNewDecimal + null + default - { - Name: "mysql.TypeNewDecimal + null + default", - ColInfo: timodel.ColumnInfo{ - OriginDefaultValue: "-3.14", // no float - FieldType: *ftTypeNewDecimalNotNull, - }, - Res: "-3.14", - Default: "-3.14", - }, - // mysql.TypeNull - { - Name: "mysql.TypeNull", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNull}, - Res: nil, - Default: nil, - }, - // mysql.TypeTimestamp + notnull + nodefault - { - Name: "mysql.TypeTimestamp + notnull + nodefault", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeTimestampNotNull}, - Res: "0000-00-00 00:00:00", - Default: nil, - }, - // mysql.TypeTimestamp + notnull + default - { - Name: "mysql.TypeTimestamp + notnull + default", - ColInfo: timodel.ColumnInfo{ - OriginDefaultValue: "2020-11-19 12:12:12", - FieldType: *ftTypeTimestampNotNull, - }, - Res: "2020-11-19 12:12:12", - Default: "2020-11-19 12:12:12", - }, - // mysql.TypeTimestamp + null + default - { - Name: "mysql.TypeTimestamp + null + default", - ColInfo: timodel.ColumnInfo{ - OriginDefaultValue: "2020-11-19 12:12:12", - FieldType: *ftTypeTimestampNull, - }, - Res: "2020-11-19 12:12:12", - Default: "2020-11-19 12:12:12", - }, - // mysql.TypeDate, other testCases same as TypeTimestamp - { - Name: "mysql.TypeDate, other testCases same as TypeTimestamp", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDateNotNull}, - Res: "0000-00-00", - Default: nil, - }, - // mysql.TypeDuration, other testCases same as TypeTimestamp - { - Name: "mysql.TypeDuration, other testCases same as TypeTimestamp", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDurationNotNull}, - Res: "00:00:00", - Default: nil, - }, - // mysql.TypeDatetime, other testCases same as TypeTimestamp - { - Name: "mysql.TypeDatetime, other testCases same as TypeTimestamp", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDatetimeNotNull}, - Res: "0000-00-00 00:00:00", - Default: nil, - }, - // mysql.TypeYear + notnull + nodefault - { - Name: "mysql.TypeYear + notnull + nodefault", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeYearNotNull}, - Res: int64(0), - Default: nil, - }, - // mysql.TypeYear + notnull + default - { - Name: "mysql.TypeYear + notnull + default", - ColInfo: timodel.ColumnInfo{ - OriginDefaultValue: "2021", - FieldType: *ftTypeYearNotNull, - }, - // TypeYear default value will be a string and then translate to []byte - Res: "2021", - Default: "2021", - }, - // mysql.TypeNewDate - { - Name: "mysql.TypeNewDate", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNewDateNotNull}, - Res: nil, // [TODO] seems not support by TiDB, need check - Default: nil, - }, - // mysql.TypeVarchar + notnull + nodefault - { - Name: "mysql.TypeVarchar + notnull + nodefault", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeVarcharNotNull}, - Res: []byte{}, - Default: nil, - }, - // mysql.TypeVarchar + notnull + default - { - Name: "mysql.TypeVarchar + notnull + default", - ColInfo: timodel.ColumnInfo{ - OriginDefaultValue: "e0", - FieldType: *ftTypeVarcharNotNull, - }, - // TypeVarchar default value will be a string and then translate to []byte - Res: "e0", - Default: "e0", - }, - // mysql.TypeTinyBlob - { - Name: "mysql.TypeTinyBlob", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeTinyBlobNotNull}, - Res: []byte{}, - Default: nil, - }, - // mysql.TypeMediumBlob - { - Name: "mysql.TypeMediumBlob", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeMediumBlobNotNull}, - Res: []byte{}, - Default: nil, - }, - // mysql.TypeLongBlob - { - Name: "mysql.TypeLongBlob", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeLongBlobNotNull}, - Res: []byte{}, - Default: nil, - }, - // mysql.TypeBlob - { - Name: "mysql.TypeBlob", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeBlobNotNull}, - Res: []byte{}, - Default: nil, - }, - // mysql.TypeVarString - { - Name: "mysql.TypeVarString", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeVarStringNotNull}, - Res: []byte{}, - Default: nil, - }, - // mysql.TypeString - { - Name: "mysql.TypeString", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeStringNotNull}, - Res: []byte{}, - Default: nil, - }, - // mysql.TypeBit - { - Name: "mysql.TypeBit", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeBitNotNull}, - Res: uint64(0), - Default: nil, - }, - // BLOB, TEXT, GEOMETRY or JSON column can't have a default value - // mysql.TypeJSON - { - Name: "mysql.TypeJSON", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeJSONNotNull}, - Res: "null", - Default: nil, - }, - // mysql.TypeEnum + notnull + nodefault - { - Name: "mysql.TypeEnum + notnull + nodefault", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeEnumNotNull}, - // TypeEnum value will be a string and then translate to []byte - // NotNull && no default will choose first element - Res: uint64(1), - Default: nil, - }, - // mysql.TypeEnum + notnull + default - { - Name: "mysql.TypeEnum + notnull + default", - ColInfo: timodel.ColumnInfo{ - OriginDefaultValue: "e1", - FieldType: *ftTypeEnumNotNull, - }, - // TypeEnum default value will be a string and then translate to []byte - Res: "e1", - Default: "e1", - }, - // mysql.TypeEnum + null - { - Name: "mysql.TypeEnum + null", - ColInfo: timodel.ColumnInfo{ - FieldType: *ftTypeEnumNull, - }, - Res: nil, - }, - // mysql.TypeSet + notnull - { - Name: "mysql.TypeSet + notnull", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeSetNotNull}, - Res: uint64(0), - Default: nil, - }, - // mysql.TypeSet + notnull + default - { - Name: "mysql.TypeSet + notnull + default", - ColInfo: timodel.ColumnInfo{ - OriginDefaultValue: "1,e", - FieldType: *ftTypeSetNotNull, - }, - // TypeSet default value will be a string and then translate to []byte - Res: "1,e", - Default: "1,e", - }, - // mysql.TypeGeometry - { - Name: "mysql.TypeGeometry", - ColInfo: timodel.ColumnInfo{FieldType: *ftTypeGeometryNotNull}, - Res: nil, // not support yet - Default: nil, - }, - } - - for _, tc := range testCases { - _, val, _, _, _ := getDefaultOrZeroValue(&tc.ColInfo) - require.Equal(t, tc.Res, val, tc.Name) - val = GetDDLDefaultDefinition(&tc.ColInfo) - require.Equal(t, tc.Default, val, tc.Name) - } -} - -func TestE2ERowLevelChecksum(t *testing.T) { - // changefeed enable checksum functionality - replicaConfig := config.GetDefaultReplicaConfig() - replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness - - tk := NewTestKit(t, replicaConfig) - defer tk.Close() - - // upstream TiDB enable checksum functionality - tk.MustExec("set global tidb_enable_row_level_checksum = 1") - tk.MustExec("use test") - - filter, err := filter.NewFilter(replicaConfig, "") - require.NoError(t, err) - - ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) - require.NoError(t, err) - - changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") - schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), - ver.Ver, false, changefeed, util.RoleTester, filter) - require.NoError(t, err) - require.NotNil(t, schemaStorage) - - createTableSQL := `create table t ( - id int primary key auto_increment, - - c_tinyint tinyint null, - c_smallint smallint null, - c_mediumint mediumint null, - c_int int null, - c_bigint bigint null, - - c_unsigned_tinyint tinyint unsigned null, - c_unsigned_smallint smallint unsigned null, - c_unsigned_mediumint mediumint unsigned null, - c_unsigned_int int unsigned null, - c_unsigned_bigint bigint unsigned null, - - c_float float null, - c_double double null, - c_decimal decimal null, - c_decimal_2 decimal(10, 4) null, - - c_unsigned_float float unsigned null, - c_unsigned_double double unsigned null, - c_unsigned_decimal decimal unsigned null, - c_unsigned_decimal_2 decimal(10, 4) unsigned null, - - c_date date null, - c_datetime datetime null, - c_timestamp timestamp null, - c_time time null, - c_year year null, - - c_tinytext tinytext null, - c_text text null, - c_mediumtext mediumtext null, - c_longtext longtext null, - - c_tinyblob tinyblob null, - c_blob blob null, - c_mediumblob mediumblob null, - c_longblob longblob null, - - c_char char(16) null, - c_varchar varchar(16) null, - c_binary binary(16) null, - c_varbinary varbinary(16) null, - - c_enum enum ('a','b','c') null, - c_set set ('a','b','c') null, - c_bit bit(64) null, - c_json json null, - --- gbk dmls - name varchar(128) CHARACTER SET gbk, - country char(32) CHARACTER SET gbk, - city varchar(64), - description text CHARACTER SET gbk, - image tinyblob -);` - job := tk.DDL2Job(createTableSQL) - err = schemaStorage.HandleDDLJob(job) - require.NoError(t, err) - - ts := schemaStorage.GetLastSnapshot().CurrentTs() - schemaStorage.AdvanceResolvedTs(ver.Ver) - - mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, replicaConfig.Integrity).(*mounter) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName("test", "t") - require.True(t, ok) - - tk.Session().GetSessionVars().EnableRowLevelChecksum = true - - insertDataSQL := `insert into t values ( - 2, - 1, 2, 3, 4, 5, - 1, 2, 3, 4, 5, - 2020.0202, 2020.0303, 2020.0404, 2021.1208, - 3.1415, 2.7182, 8000, 179394.233, - '2020-02-20', '2020-02-20 02:20:20', '2020-02-20 02:20:20', '02:20:20', '2020', - '89504E470D0A1A0A', '89504E470D0A1A0A', '89504E470D0A1A0A', '89504E470D0A1A0A', - x'89504E470D0A1A0A', x'89504E470D0A1A0A', x'89504E470D0A1A0A', x'89504E470D0A1A0A', - '89504E470D0A1A0A', '89504E470D0A1A0A', x'89504E470D0A1A0A', x'89504E470D0A1A0A', - 'b', 'b,c', b'1000001', '{ -"key1": "value1", -"key2": "value2", -"key3": "123" -}', - '测试', "中国", "上海", "你好,世界", 0xC4E3BAC3CAC0BDE7 -);` - tk.MustExec(insertDataSQL) - - key, value := getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) - rawKV := &model.RawKVEntry{ - OpType: model.OpTypePut, - Key: key, - Value: value, - StartTs: ts - 1, - CRTs: ts + 1, - } - row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) - require.NoError(t, err) - require.NotNil(t, row) - require.NotNil(t, row.Checksum) - - expected, ok := mounter.decoder.GetChecksum() - require.True(t, ok) - require.Equal(t, expected, row.Checksum.Current) - require.False(t, row.Checksum.Corrupted) - - // avro encoder enable checksum functionality. - codecConfig := codecCommon.NewConfig(config.ProtocolAvro) - codecConfig.EnableTiDBExtension = true - codecConfig.EnableRowChecksum = true - codecConfig.AvroDecimalHandlingMode = "string" - codecConfig.AvroBigintUnsignedHandlingMode = "string" - - avroEncoder, err := avro.SetupEncoderAndSchemaRegistry4Testing(ctx, codecConfig) - defer avro.TeardownEncoderAndSchemaRegistry4Testing() - require.NoError(t, err) - - topic := "test.t" - - err = avroEncoder.AppendRowChangedEvent(ctx, topic, row, func() {}) - require.NoError(t, err) - msg := avroEncoder.Build() - require.Len(t, msg, 1) - - schemaM, err := avro.NewConfluentSchemaManager( - ctx, "http://127.0.0.1:8081", nil) - require.NoError(t, err) - - // decoder enable checksum functionality. - decoder := avro.NewDecoder(codecConfig, schemaM, topic, time.Local) - err = decoder.AddKeyValue(msg[0].Key, msg[0].Value) - require.NoError(t, err) - - messageType, hasNext, err := decoder.HasNext() - require.NoError(t, err) - require.True(t, hasNext) - require.Equal(t, model.MessageTypeRow, messageType) - - row, err = decoder.NextRowChangedEvent() - // no error, checksum verification passed. - require.NoError(t, err) -} - -func TestDecodeRowEnableChecksum(t *testing.T) { - replicaConfig := config.GetDefaultReplicaConfig() - replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness - - tk := NewTestKit(t, replicaConfig) - defer tk.Close() - - tk.MustExec("set global tidb_enable_row_level_checksum = 1") - tk.MustExec("use test") - - filter, err := filter.NewFilter(replicaConfig, "") - require.NoError(t, err) - - ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) - require.NoError(t, err) - - changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") - schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), - ver.Ver, false, changefeed, util.RoleTester, filter) - require.NoError(t, err) - require.NotNil(t, schemaStorage) - - createTableDDL := "create table t (id int primary key, a int)" - job := tk.DDL2Job(createTableDDL) - err = schemaStorage.HandleDDLJob(job) - require.NoError(t, err) - - ts := schemaStorage.GetLastSnapshot().CurrentTs() - schemaStorage.AdvanceResolvedTs(ver.Ver) - - mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, replicaConfig.Integrity).(*mounter) - - ctx := context.Background() - - tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName("test", "t") - require.True(t, ok) - - // row without checksum - tk.Session().GetSessionVars().EnableRowLevelChecksum = false - tk.MustExec("insert into t values (1, 10)") - - key, value := getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) - rawKV := &model.RawKVEntry{ - OpType: model.OpTypePut, - Key: key, - Value: value, - StartTs: ts - 1, - CRTs: ts + 1, - } - - row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) - require.NoError(t, err) - require.NotNil(t, row) - // the upstream tidb does not enable checksum, so the checksum is nil - require.Nil(t, row.Checksum) - - // row with one checksum - tk.Session().GetSessionVars().EnableRowLevelChecksum = true - tk.MustExec("insert into t values (2, 20)") - - key, value = getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) - rawKV = &model.RawKVEntry{ - OpType: model.OpTypePut, - Key: key, - Value: value, - StartTs: ts - 1, - CRTs: ts + 1, - } - row, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) - require.NoError(t, err) - require.NotNil(t, row) - require.NotNil(t, row.Checksum) - - expected, ok := mounter.decoder.GetChecksum() - require.True(t, ok) - require.Equal(t, expected, row.Checksum.Current) - require.False(t, row.Checksum.Corrupted) - - // row with 2 checksum - tk.MustExec("insert into t values (3, 30)") - job = tk.DDL2Job("alter table t change column a a varchar(10)") - err = schemaStorage.HandleDDLJob(job) - require.NoError(t, err) - - key, value = getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) - rawKV = &model.RawKVEntry{ - OpType: model.OpTypePut, - Key: key, - Value: value, - StartTs: ts - 1, - CRTs: ts + 1, - } - row, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) - require.NoError(t, err) - require.NotNil(t, row) - require.NotNil(t, row.Checksum) - - first, ok := mounter.decoder.GetChecksum() - require.True(t, ok) - - extra, ok := mounter.decoder.GetExtraChecksum() - require.True(t, ok) - - if row.Checksum.Current != first { - require.Equal(t, extra, row.Checksum.Current) - } else { - require.Equal(t, first, row.Checksum.Current) - } - require.False(t, row.Checksum.Corrupted) - - // hack the table info to make the checksum corrupted - tableInfo.Columns[0].FieldType = *types.NewFieldType(mysql.TypeVarchar) - - // corrupt-handle-level default to warn, so no error, but the checksum is corrupted - row, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) - require.NoError(t, err) - require.NotNil(t, row.Checksum) - require.True(t, row.Checksum.Corrupted) - - mounter.integrity.CorruptionHandleLevel = integrity.CorruptionHandleLevelError - _, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) - require.Error(t, err) - require.ErrorIs(t, err, cerror.ErrCorruptedDataMutation) - - job = tk.DDL2Job("drop table t") - err = schemaStorage.HandleDDLJob(job) - require.NoError(t, err) -} - -func TestDecodeRow(t *testing.T) { - replicaConfig := config.GetDefaultReplicaConfig() - - tk := NewTestKit(t, replicaConfig) - defer tk.Close() - - tk.MustExec("set @@tidb_enable_clustered_index=1;") - tk.MustExec("use test;") - - changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") - - ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) - require.NoError(t, err) - - filter, err := filter.NewFilter(replicaConfig, "") - require.NoError(t, err) - - schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), - ver.Ver, false, changefeed, util.RoleTester, filter) - require.NoError(t, err) - - // apply ddl to schemaStorage - ddl := "create table test.student(id int primary key, name char(50), age int, gender char(10))" - job := tk.DDL2Job(ddl) - err = schemaStorage.HandleDDLJob(job) - require.NoError(t, err) - - ts := schemaStorage.GetLastSnapshot().CurrentTs() - - schemaStorage.AdvanceResolvedTs(ver.Ver) - - mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, replicaConfig.Integrity).(*mounter) - - tk.MustExec(`insert into student values(1, "dongmen", 20, "male")`) - tk.MustExec(`update student set age = 27 where id = 1`) - - ctx := context.Background() - decodeAndCheckRowInTable := func(tableID int64, f func(key []byte, value []byte) *model.RawKVEntry) { - walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) { - rawKV := f(key, value) - - row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) - require.NoError(t, err) - require.NotNil(t, row) - - if row.Columns != nil { - require.NotNil(t, mounter.decoder) - } - - if row.PreColumns != nil { - require.NotNil(t, mounter.preDecoder) - } - }) - } - - toRawKV := func(key []byte, value []byte) *model.RawKVEntry { - return &model.RawKVEntry{ - OpType: model.OpTypePut, - Key: key, - Value: value, - StartTs: ts - 1, - CRTs: ts + 1, - } - } - - tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName("test", "student") - require.True(t, ok) - - decodeAndCheckRowInTable(tableInfo.ID, toRawKV) - decodeAndCheckRowInTable(tableInfo.ID, toRawKV) - - job = tk.DDL2Job("drop table student") - err = schemaStorage.HandleDDLJob(job) - require.NoError(t, err) -} - -// TestDecodeEventIgnoreRow tests a PolymorphicEvent.Row is nil -// if this event should be filter out by filter. -func TestDecodeEventIgnoreRow(t *testing.T) { - replicaConfig := config.GetDefaultReplicaConfig() - replicaConfig.Filter.Rules = []string{"test.student", "test.computer"} - - tk := NewTestKit(t, replicaConfig) - defer tk.Close() - tk.MustExec("use test;") - - ddls := []string{ - "create table test.student(id int primary key, name char(50), age int, gender char(10))", - "create table test.computer(id int primary key, brand char(50), price int)", - "create table test.poet(id int primary key, name char(50), works char(100))", - } - - cfID := model.DefaultChangeFeedID("changefeed-test-ignore-event") - - f, err := filter.NewFilter(replicaConfig, "") - require.Nil(t, err) - ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) - require.Nil(t, err) - - schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), - ver.Ver, false, cfID, util.RoleTester, f) - require.Nil(t, err) - // apply ddl to schemaStorage - for _, ddl := range ddls { - job := tk.DDL2Job(ddl) - err = schemaStorage.HandleDDLJob(job) - require.Nil(t, err) - } - - ts := schemaStorage.GetLastSnapshot().CurrentTs() - schemaStorage.AdvanceResolvedTs(ver.Ver) - mounter := NewMounter(schemaStorage, cfID, time.Local, f, replicaConfig.Integrity).(*mounter) - - type testCase struct { - schema string - table string - columns []interface{} - ignored bool - } - - testCases := []testCase{ - { - schema: "test", - table: "student", - columns: []interface{}{1, "dongmen", 20, "male"}, - ignored: false, - }, - { - schema: "test", - table: "computer", - columns: []interface{}{1, "apple", 19999}, - ignored: false, - }, - // This case should be ignored by its table name. - { - schema: "test", - table: "poet", - columns: []interface{}{1, "李白", "静夜思"}, - ignored: true, - }, - } - - ignoredTables := make([]string, 0) - tables := make([]string, 0) - for _, tc := range testCases { - tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName(tc.schema, tc.table) - require.True(t, ok) - // TODO: add other dml event type - insertSQL := prepareInsertSQL(t, tableInfo, len(tc.columns)) - if tc.ignored { - ignoredTables = append(ignoredTables, tc.table) - } else { - tables = append(tables, tc.table) - } - tk.MustExec(insertSQL, tc.columns...) - } - ctx := context.Background() - - decodeAndCheckRowInTable := func(tableID int64, f func(key []byte, value []byte) *model.RawKVEntry) int { - var rows int - walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) { - rawKV := f(key, value) - pEvent := model.NewPolymorphicEvent(rawKV) - err := mounter.DecodeEvent(ctx, pEvent) - require.Nil(t, err) - if pEvent.Row == nil { - return - } - row := pEvent.Row - rows++ - require.Equal(t, row.Table.Schema, "test") - // Now we only allow filter dml event by table, so we only check row's table. - require.NotContains(t, ignoredTables, row.Table.Table) - require.Contains(t, tables, row.Table.Table) - }) - return rows - } - - toRawKV := func(key []byte, value []byte) *model.RawKVEntry { - return &model.RawKVEntry{ - OpType: model.OpTypePut, - Key: key, - Value: value, - StartTs: ts - 1, - CRTs: ts + 1, - } - } - - for _, tc := range testCases { - tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName(tc.schema, tc.table) - require.True(t, ok) - decodeAndCheckRowInTable(tableInfo.ID, toRawKV) - } -} - -func TestBuildTableInfo(t *testing.T) { - cases := []struct { - origin string - recovered string - recoveredWithNilCol string - }{ - { - "CREATE TABLE t1 (c INT PRIMARY KEY)", - "CREATE TABLE `BuildTiDBTableInfo` (\n" + - " `c` int(0) NOT NULL,\n" + - " PRIMARY KEY (`c`(0)) /*T![clustered_index] CLUSTERED */\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - "CREATE TABLE `BuildTiDBTableInfo` (\n" + - " `c` int(0) NOT NULL,\n" + - " PRIMARY KEY (`c`(0)) /*T![clustered_index] CLUSTERED */\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - }, - { - "CREATE TABLE t1 (" + - " c INT UNSIGNED," + - " c2 VARCHAR(10) NOT NULL," + - " c3 BIT(10) NOT NULL," + - " UNIQUE KEY (c2, c3)" + - ")", - // CDC discards field length. - "CREATE TABLE `BuildTiDBTableInfo` (\n" + - " `c` int(0) unsigned DEFAULT NULL,\n" + - " `c2` varchar(0) NOT NULL,\n" + - " `c3` bit(0) NOT NULL,\n" + - " UNIQUE KEY `idx_0` (`c2`(0),`c3`(0))\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - "CREATE TABLE `BuildTiDBTableInfo` (\n" + - " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + - " `c2` varchar(0) NOT NULL,\n" + - " `c3` bit(0) NOT NULL,\n" + - " UNIQUE KEY `idx_0` (`c2`(0),`c3`(0))\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - }, - { - "CREATE TABLE t1 (" + - " c INT UNSIGNED," + - " gen INT AS (c+1) VIRTUAL," + - " c2 VARCHAR(10) NOT NULL," + - " gen2 INT AS (c+2) STORED," + - " c3 BIT(10) NOT NULL," + - " PRIMARY KEY (c, c2)" + - ")", - // CDC discards virtual generated column, and generating expression of stored generated column. - "CREATE TABLE `BuildTiDBTableInfo` (\n" + - " `c` int(0) unsigned NOT NULL,\n" + - " `c2` varchar(0) NOT NULL,\n" + - " `gen2` int(0) GENERATED ALWAYS AS (pass_generated_check) STORED,\n" + - " `c3` bit(0) NOT NULL,\n" + - " PRIMARY KEY (`c`(0),`c2`(0)) /*T![clustered_index] CLUSTERED */\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - "CREATE TABLE `BuildTiDBTableInfo` (\n" + - " `c` int(0) unsigned NOT NULL,\n" + - " `c2` varchar(0) NOT NULL,\n" + - " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + - " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + - " PRIMARY KEY (`c`(0),`c2`(0)) /*T![clustered_index] CLUSTERED */\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - }, - { - "CREATE TABLE `t1` (" + - " `a` int(11) NOT NULL," + - " `b` int(11) DEFAULT NULL," + - " `c` int(11) DEFAULT NULL," + - " PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */," + - " UNIQUE KEY `b` (`b`)" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - "CREATE TABLE `BuildTiDBTableInfo` (\n" + - " `a` int(0) NOT NULL,\n" + - " `b` int(0) DEFAULT NULL,\n" + - " `c` int(0) DEFAULT NULL,\n" + - " PRIMARY KEY (`a`(0)) /*T![clustered_index] CLUSTERED */,\n" + - " UNIQUE KEY `idx_1` (`b`(0))\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - "CREATE TABLE `BuildTiDBTableInfo` (\n" + - " `a` int(0) NOT NULL,\n" + - " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + - " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + - " PRIMARY KEY (`a`(0)) /*T![clustered_index] CLUSTERED */\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - }, - { // This case is to check the primary key is correctly identified by BuildTiDBTableInfo - "CREATE TABLE your_table (" + - " id INT NOT NULL," + - " name VARCHAR(50) NOT NULL," + - " email VARCHAR(100) NOT NULL," + - " age INT NOT NULL ," + - " address VARCHAR(200) NOT NULL," + - " PRIMARY KEY (id, name)," + - " UNIQUE INDEX idx_unique_1 (id, email, age)," + - " UNIQUE INDEX idx_unique_2 (name, email, address)" + - " );", - "CREATE TABLE `BuildTiDBTableInfo` (\n" + - " `id` int(0) NOT NULL,\n" + - " `name` varchar(0) NOT NULL,\n" + - " `email` varchar(0) NOT NULL,\n" + - " `age` int(0) NOT NULL,\n" + - " `address` varchar(0) NOT NULL,\n" + - " PRIMARY KEY (`id`(0),`name`(0)) /*T![clustered_index] CLUSTERED */,\n" + - " UNIQUE KEY `idx_1` (`id`(0),`email`(0),`age`(0)),\n" + - " UNIQUE KEY `idx_2` (`name`(0),`email`(0),`address`(0))\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - "CREATE TABLE `BuildTiDBTableInfo` (\n" + - " `id` int(0) NOT NULL,\n" + - " `name` varchar(0) NOT NULL,\n" + - " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + - " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + - " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + - " PRIMARY KEY (`id`(0),`name`(0)) /*T![clustered_index] CLUSTERED */,\n" + - " UNIQUE KEY `idx_1` (`id`(0),`omitted`(0),`omitted`(0)),\n" + - " UNIQUE KEY `idx_2` (`name`(0),`omitted`(0),`omitted`(0))\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - }, - } - p := parser.New() - for i, c := range cases { - stmt, err := p.ParseOneStmt(c.origin, "", "") - require.NoError(t, err) - originTI, err := ddl.BuildTableInfoFromAST(stmt.(*ast.CreateTableStmt)) - require.NoError(t, err) - cdcTableInfo := model.WrapTableInfo(0, "test", 0, originTI) - cols, _, _, _, err := datum2Column(cdcTableInfo, map[int64]types.Datum{}) - require.NoError(t, err) - recoveredTI := model.BuildTiDBTableInfo(cols, cdcTableInfo.IndexColumnsOffset) - handle := sqlmodel.GetWhereHandle(recoveredTI, recoveredTI) - require.NotNil(t, handle.UniqueNotNullIdx) - require.Equal(t, c.recovered, showCreateTable(t, recoveredTI)) - // make sure BuildTiDBTableInfo indentify the correct primary key - if i == 5 { - inexes := recoveredTI.Indices - primaryCount := 0 - for i := range inexes { - if inexes[i].Primary { - primaryCount++ - } - } - require.Equal(t, 1, primaryCount) - require.Equal(t, 2, len(handle.UniqueNotNullIdx.Columns)) - } - // mimic the columns are set to nil when old value feature is disabled - for i := range cols { - if !cols[i].Flag.IsHandleKey() { - cols[i] = nil - } - } - recoveredTI = model.BuildTiDBTableInfo(cols, cdcTableInfo.IndexColumnsOffset) - handle = sqlmodel.GetWhereHandle(recoveredTI, recoveredTI) - require.NotNil(t, handle.UniqueNotNullIdx) - require.Equal(t, c.recoveredWithNilCol, showCreateTable(t, recoveredTI)) - } -} - -var tiCtx = mock.NewContext() - -func showCreateTable(t *testing.T, ti *timodel.TableInfo) string { - result := bytes.NewBuffer(make([]byte, 0, 512)) - err := executor.ConstructResultOfShowCreateTable(tiCtx, ti, autoid.Allocators{}, result) - require.NoError(t, err) - return result.String() -} - -func TestNewDMRowChange(t *testing.T) { - cases := []struct { - origin string - recovered string - }{ - { - "CREATE TABLE t1 (id INT," + - " a1 INT NOT NULL," + - " a3 INT NOT NULL," + - " UNIQUE KEY dex1(a1, a3));", - "CREATE TABLE `BuildTiDBTableInfo` (\n" + - " `id` int(0) DEFAULT NULL,\n" + - " `a1` int(0) NOT NULL,\n" + - " `a3` int(0) NOT NULL,\n" + - " UNIQUE KEY `idx_0` (`a1`(0),`a3`(0))\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - }, - } - p := parser.New() - for _, c := range cases { - stmt, err := p.ParseOneStmt(c.origin, "", "") - require.NoError(t, err) - originTI, err := ddl.BuildTableInfoFromAST(stmt.(*ast.CreateTableStmt)) - require.NoError(t, err) - cdcTableInfo := model.WrapTableInfo(0, "test", 0, originTI) - cols := []*model.Column{ - { - Name: "id", Type: 3, Charset: "binary", Flag: 65, Value: 1, Default: nil, - }, - { - Name: "a1", Type: 3, Charset: "binary", Flag: 51, Value: 1, Default: nil, - }, - { - Name: "a3", Type: 3, Charset: "binary", Flag: 51, Value: 2, Default: nil, - }, - } - recoveredTI := model.BuildTiDBTableInfo(cols, cdcTableInfo.IndexColumnsOffset) - require.Equal(t, c.recovered, showCreateTable(t, recoveredTI)) - tableName := &model.TableName{Schema: "db", Table: "t1"} - rowChange := sqlmodel.NewRowChange(tableName, nil, []interface{}{1, 1, 2}, nil, recoveredTI, nil, nil) - sqlGot, argsGot := rowChange.GenSQL(sqlmodel.DMLDelete) - require.Equal(t, "DELETE FROM `db`.`t1` WHERE `a1` = ? AND `a3` = ? LIMIT 1", sqlGot) - require.Equal(t, []interface{}{1, 2}, argsGot) - - sqlGot, argsGot = sqlmodel.GenDeleteSQL(rowChange, rowChange) - require.Equal(t, "DELETE FROM `db`.`t1` WHERE (`a1` = ? AND `a3` = ?) OR (`a1` = ? AND `a3` = ?)", sqlGot) - require.Equal(t, []interface{}{1, 2, 1, 2}, argsGot) - } -} - -func TestFormatColVal(t *testing.T) { - ftTypeFloatNotNull := types.NewFieldType(mysql.TypeFloat) - ftTypeFloatNotNull.SetFlag(mysql.NotNullFlag) - col := &timodel.ColumnInfo{FieldType: *ftTypeFloatNotNull} - - var datum types.Datum - - datum.SetFloat32(123.99) - value, _, _, err := formatColVal(datum, col) - require.NoError(t, err) - require.EqualValues(t, float32(123.99), value) - - datum.SetFloat32(float32(math.NaN())) - value, _, warn, err := formatColVal(datum, col) - require.NoError(t, err) - require.Equal(t, float32(0), value) - require.NotZero(t, warn) - - datum.SetFloat32(float32(math.Inf(1))) - value, _, warn, err = formatColVal(datum, col) - require.NoError(t, err) - require.Equal(t, float32(0), value) - require.NotZero(t, warn) - - datum.SetFloat32(float32(math.Inf(-1))) - value, _, warn, err = formatColVal(datum, col) - require.NoError(t, err) - require.Equal(t, float32(0), value) - require.NotZero(t, warn) -} +//func getLastKeyValueInStore(t *testing.T, store tidbkv.Storage, tableID int64) (key, value []byte) { +// txn, err := store.Begin() +// require.NoError(t, err) +// defer txn.Rollback() //nolint:errcheck +// startKey, endKey := spanz.GetTableRange(tableID) +// kvIter, err := txn.Iter(startKey, endKey) +// require.NoError(t, err) +// defer kvIter.Close() +// for kvIter.Valid() { +// key = kvIter.Key() +// value = kvIter.Value() +// err = kvIter.Next() +// require.NoError(t, err) +// } +// return key, value +//} +// +//// We use OriginDefaultValue instead of DefaultValue in the ut, pls ref to +//// https://github.com/pingcap/tiflow/issues/4048 +//// FIXME: OriginDefaultValue seems always to be string, and test more corner case +//// Ref: https://github.com/pingcap/tidb/blob/d2c352980a43bb593db81fd1db996f47af596d91/table/column.go#L489 +//func TestGetDefaultZeroValue(t *testing.T) { +// // Check following MySQL type, ref to: +// // https://github.com/pingcap/tidb/blob/master/parser/mysql/type.go +// +// // mysql flag null +// ftNull := types.NewFieldType(mysql.TypeUnspecified) +// +// // mysql.TypeTiny + notnull +// ftTinyIntNotNull := types.NewFieldType(mysql.TypeTiny) +// ftTinyIntNotNull.AddFlag(mysql.NotNullFlag) +// +// // mysql.TypeTiny + notnull + unsigned +// ftTinyIntNotNullUnSigned := types.NewFieldType(mysql.TypeTiny) +// ftTinyIntNotNullUnSigned.SetFlag(mysql.NotNullFlag) +// ftTinyIntNotNullUnSigned.AddFlag(mysql.UnsignedFlag) +// +// // mysql.TypeTiny + null +// ftTinyIntNull := types.NewFieldType(mysql.TypeTiny) +// +// // mysql.TypeShort + notnull +// ftShortNotNull := types.NewFieldType(mysql.TypeShort) +// ftShortNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeLong + notnull +// ftLongNotNull := types.NewFieldType(mysql.TypeLong) +// ftLongNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeLonglong + notnull +// ftLongLongNotNull := types.NewFieldType(mysql.TypeLonglong) +// ftLongLongNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeInt24 + notnull +// ftInt24NotNull := types.NewFieldType(mysql.TypeInt24) +// ftInt24NotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeFloat + notnull +// ftTypeFloatNotNull := types.NewFieldType(mysql.TypeFloat) +// ftTypeFloatNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeFloat + notnull + unsigned +// ftTypeFloatNotNullUnSigned := types.NewFieldType(mysql.TypeFloat) +// ftTypeFloatNotNullUnSigned.SetFlag(mysql.NotNullFlag | mysql.UnsignedFlag) +// +// // mysql.TypeFloat + null +// ftTypeFloatNull := types.NewFieldType(mysql.TypeFloat) +// +// // mysql.TypeDouble + notnull +// ftTypeDoubleNotNull := types.NewFieldType(mysql.TypeDouble) +// ftTypeDoubleNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeNewDecimal + notnull +// ftTypeNewDecimalNull := types.NewFieldType(mysql.TypeNewDecimal) +// ftTypeNewDecimalNull.SetFlen(5) +// ftTypeNewDecimalNull.SetDecimal(2) +// +// // mysql.TypeNewDecimal + notnull +// ftTypeNewDecimalNotNull := types.NewFieldType(mysql.TypeNewDecimal) +// ftTypeNewDecimalNotNull.SetFlag(mysql.NotNullFlag) +// ftTypeNewDecimalNotNull.SetFlen(5) +// ftTypeNewDecimalNotNull.SetDecimal(2) +// +// // mysql.TypeNull +// ftTypeNull := types.NewFieldType(mysql.TypeNull) +// +// // mysql.TypeTimestamp + notnull +// ftTypeTimestampNotNull := types.NewFieldType(mysql.TypeTimestamp) +// ftTypeTimestampNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeTimestamp + notnull +// ftTypeTimestampNull := types.NewFieldType(mysql.TypeTimestamp) +// +// // mysql.TypeDate + notnull +// ftTypeDateNotNull := types.NewFieldType(mysql.TypeDate) +// ftTypeDateNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeDuration + notnull +// ftTypeDurationNotNull := types.NewFieldType(mysql.TypeDuration) +// ftTypeDurationNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeDatetime + notnull +// ftTypeDatetimeNotNull := types.NewFieldType(mysql.TypeDatetime) +// ftTypeDatetimeNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeYear + notnull +// ftTypeYearNotNull := types.NewFieldType(mysql.TypeYear) +// ftTypeYearNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeNewDate + notnull +// ftTypeNewDateNotNull := types.NewFieldType(mysql.TypeNewDate) +// ftTypeNewDateNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeVarchar + notnull +// ftTypeVarcharNotNull := types.NewFieldType(mysql.TypeVarchar) +// ftTypeVarcharNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeTinyBlob + notnull +// ftTypeTinyBlobNotNull := types.NewFieldType(mysql.TypeTinyBlob) +// ftTypeTinyBlobNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeMediumBlob + notnull +// ftTypeMediumBlobNotNull := types.NewFieldType(mysql.TypeMediumBlob) +// ftTypeMediumBlobNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeLongBlob + notnull +// ftTypeLongBlobNotNull := types.NewFieldType(mysql.TypeLongBlob) +// ftTypeLongBlobNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeBlob + notnull +// ftTypeBlobNotNull := types.NewFieldType(mysql.TypeBlob) +// ftTypeBlobNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeVarString + notnull +// ftTypeVarStringNotNull := types.NewFieldType(mysql.TypeVarString) +// ftTypeVarStringNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeString + notnull +// ftTypeStringNotNull := types.NewFieldType(mysql.TypeString) +// ftTypeStringNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeBit + notnull +// ftTypeBitNotNull := types.NewFieldType(mysql.TypeBit) +// ftTypeBitNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeJSON + notnull +// ftTypeJSONNotNull := types.NewFieldType(mysql.TypeJSON) +// ftTypeJSONNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeEnum + notnull + nodefault +// ftTypeEnumNotNull := types.NewFieldType(mysql.TypeEnum) +// ftTypeEnumNotNull.SetFlag(mysql.NotNullFlag) +// ftTypeEnumNotNull.SetElems([]string{"e0", "e1"}) +// +// // mysql.TypeEnum + null +// ftTypeEnumNull := types.NewFieldType(mysql.TypeEnum) +// +// // mysql.TypeSet + notnull +// ftTypeSetNotNull := types.NewFieldType(mysql.TypeSet) +// ftTypeSetNotNull.SetFlag(mysql.NotNullFlag) +// +// // mysql.TypeGeometry + notnull +// ftTypeGeometryNotNull := types.NewFieldType(mysql.TypeGeometry) +// ftTypeGeometryNotNull.SetFlag(mysql.NotNullFlag) +// +// testCases := []struct { +// Name string +// ColInfo timodel.ColumnInfo +// Res interface{} +// Default interface{} +// }{ +// // mysql flag null +// { +// Name: "mysql flag null", +// ColInfo: timodel.ColumnInfo{FieldType: *ftNull}, +// Res: nil, +// Default: nil, +// }, +// // mysql.TypeTiny + notnull + nodefault +// { +// Name: "mysql.TypeTiny + notnull + nodefault", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTinyIntNotNull.Clone()}, +// Res: int64(0), +// Default: nil, +// }, +// // mysql.TypeTiny + notnull + default +// { +// Name: "mysql.TypeTiny + notnull + default", +// ColInfo: timodel.ColumnInfo{ +// OriginDefaultValue: -1314, +// FieldType: *ftTinyIntNotNull, +// }, +// Res: int64(-1314), +// Default: int64(-1314), +// }, +// // mysql.TypeTiny + notnull + unsigned +// { +// Name: "mysql.TypeTiny + notnull + default + unsigned", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTinyIntNotNullUnSigned}, +// Res: uint64(0), +// Default: nil, +// }, +// // mysql.TypeTiny + notnull + default + unsigned +// { +// Name: "mysql.TypeTiny + notnull + unsigned", +// ColInfo: timodel.ColumnInfo{OriginDefaultValue: uint64(1314), FieldType: *ftTinyIntNotNullUnSigned}, +// Res: uint64(1314), +// Default: uint64(1314), +// }, +// // mysql.TypeTiny + null + default +// { +// Name: "mysql.TypeTiny + null + default", +// ColInfo: timodel.ColumnInfo{ +// OriginDefaultValue: -1314, +// FieldType: *ftTinyIntNull, +// }, +// Res: int64(-1314), +// Default: int64(-1314), +// }, +// // mysql.TypeTiny + null + nodefault +// { +// Name: "mysql.TypeTiny + null + nodefault", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTinyIntNull}, +// Res: nil, +// Default: nil, +// }, +// // mysql.TypeShort, others testCases same as tiny +// { +// Name: "mysql.TypeShort, others testCases same as tiny", +// ColInfo: timodel.ColumnInfo{FieldType: *ftShortNotNull}, +// Res: int64(0), +// Default: nil, +// }, +// // mysql.TypeLong, others testCases same as tiny +// { +// Name: "mysql.TypeLong, others testCases same as tiny", +// ColInfo: timodel.ColumnInfo{FieldType: *ftLongNotNull}, +// Res: int64(0), +// Default: nil, +// }, +// // mysql.TypeLonglong, others testCases same as tiny +// { +// Name: "mysql.TypeLonglong, others testCases same as tiny", +// ColInfo: timodel.ColumnInfo{FieldType: *ftLongLongNotNull}, +// Res: int64(0), +// Default: nil, +// }, +// // mysql.TypeInt24, others testCases same as tiny +// { +// Name: "mysql.TypeInt24, others testCases same as tiny", +// ColInfo: timodel.ColumnInfo{FieldType: *ftInt24NotNull}, +// Res: int64(0), +// Default: nil, +// }, +// // mysql.TypeFloat + notnull + nodefault +// { +// Name: "mysql.TypeFloat + notnull + nodefault", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeFloatNotNull}, +// Res: float32(0), +// Default: nil, +// }, +// // mysql.TypeFloat + notnull + default +// { +// Name: "mysql.TypeFloat + notnull + default", +// ColInfo: timodel.ColumnInfo{ +// OriginDefaultValue: float32(-3.1415), +// FieldType: *ftTypeFloatNotNull, +// }, +// Res: float32(-3.1415), +// Default: float32(-3.1415), +// }, +// // mysql.TypeFloat + notnull + default + unsigned +// { +// Name: "mysql.TypeFloat + notnull + default + unsigned", +// ColInfo: timodel.ColumnInfo{ +// OriginDefaultValue: float32(3.1415), +// FieldType: *ftTypeFloatNotNullUnSigned, +// }, +// Res: float32(3.1415), +// Default: float32(3.1415), +// }, +// // mysql.TypeFloat + notnull + unsigned +// { +// Name: "mysql.TypeFloat + notnull + unsigned", +// ColInfo: timodel.ColumnInfo{ +// FieldType: *ftTypeFloatNotNullUnSigned, +// }, +// Res: float32(0), +// Default: nil, +// }, +// // mysql.TypeFloat + null + default +// { +// Name: "mysql.TypeFloat + null + default", +// ColInfo: timodel.ColumnInfo{ +// OriginDefaultValue: float32(-3.1415), +// FieldType: *ftTypeFloatNull, +// }, +// Res: float32(-3.1415), +// Default: float32(-3.1415), +// }, +// // mysql.TypeFloat + null + nodefault +// { +// Name: "mysql.TypeFloat + null + nodefault", +// ColInfo: timodel.ColumnInfo{ +// FieldType: *ftTypeFloatNull, +// }, +// Res: nil, +// Default: nil, +// }, +// // mysql.TypeDouble, other testCases same as float +// { +// Name: "mysql.TypeDouble, other testCases same as float", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDoubleNotNull}, +// Res: float64(0), +// Default: nil, +// }, +// // mysql.TypeNewDecimal + notnull + nodefault +// { +// Name: "mysql.TypeNewDecimal + notnull + nodefault", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNewDecimalNotNull}, +// Res: "0", // related with Flen and Decimal +// Default: nil, +// }, +// // mysql.TypeNewDecimal + null + nodefault +// { +// Name: "mysql.TypeNewDecimal + null + nodefault", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNewDecimalNull}, +// Res: nil, +// Default: nil, +// }, +// // mysql.TypeNewDecimal + null + default +// { +// Name: "mysql.TypeNewDecimal + null + default", +// ColInfo: timodel.ColumnInfo{ +// OriginDefaultValue: "-3.14", // no float +// FieldType: *ftTypeNewDecimalNotNull, +// }, +// Res: "-3.14", +// Default: "-3.14", +// }, +// // mysql.TypeNull +// { +// Name: "mysql.TypeNull", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNull}, +// Res: nil, +// Default: nil, +// }, +// // mysql.TypeTimestamp + notnull + nodefault +// { +// Name: "mysql.TypeTimestamp + notnull + nodefault", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeTimestampNotNull}, +// Res: "0000-00-00 00:00:00", +// Default: nil, +// }, +// // mysql.TypeTimestamp + notnull + default +// { +// Name: "mysql.TypeTimestamp + notnull + default", +// ColInfo: timodel.ColumnInfo{ +// OriginDefaultValue: "2020-11-19 12:12:12", +// FieldType: *ftTypeTimestampNotNull, +// }, +// Res: "2020-11-19 12:12:12", +// Default: "2020-11-19 12:12:12", +// }, +// // mysql.TypeTimestamp + null + default +// { +// Name: "mysql.TypeTimestamp + null + default", +// ColInfo: timodel.ColumnInfo{ +// OriginDefaultValue: "2020-11-19 12:12:12", +// FieldType: *ftTypeTimestampNull, +// }, +// Res: "2020-11-19 12:12:12", +// Default: "2020-11-19 12:12:12", +// }, +// // mysql.TypeDate, other testCases same as TypeTimestamp +// { +// Name: "mysql.TypeDate, other testCases same as TypeTimestamp", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDateNotNull}, +// Res: "0000-00-00", +// Default: nil, +// }, +// // mysql.TypeDuration, other testCases same as TypeTimestamp +// { +// Name: "mysql.TypeDuration, other testCases same as TypeTimestamp", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDurationNotNull}, +// Res: "00:00:00", +// Default: nil, +// }, +// // mysql.TypeDatetime, other testCases same as TypeTimestamp +// { +// Name: "mysql.TypeDatetime, other testCases same as TypeTimestamp", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDatetimeNotNull}, +// Res: "0000-00-00 00:00:00", +// Default: nil, +// }, +// // mysql.TypeYear + notnull + nodefault +// { +// Name: "mysql.TypeYear + notnull + nodefault", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeYearNotNull}, +// Res: int64(0), +// Default: nil, +// }, +// // mysql.TypeYear + notnull + default +// { +// Name: "mysql.TypeYear + notnull + default", +// ColInfo: timodel.ColumnInfo{ +// OriginDefaultValue: "2021", +// FieldType: *ftTypeYearNotNull, +// }, +// // TypeYear default value will be a string and then translate to []byte +// Res: "2021", +// Default: "2021", +// }, +// // mysql.TypeNewDate +// { +// Name: "mysql.TypeNewDate", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNewDateNotNull}, +// Res: nil, // [TODO] seems not support by TiDB, need check +// Default: nil, +// }, +// // mysql.TypeVarchar + notnull + nodefault +// { +// Name: "mysql.TypeVarchar + notnull + nodefault", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeVarcharNotNull}, +// Res: []byte{}, +// Default: nil, +// }, +// // mysql.TypeVarchar + notnull + default +// { +// Name: "mysql.TypeVarchar + notnull + default", +// ColInfo: timodel.ColumnInfo{ +// OriginDefaultValue: "e0", +// FieldType: *ftTypeVarcharNotNull, +// }, +// // TypeVarchar default value will be a string and then translate to []byte +// Res: "e0", +// Default: "e0", +// }, +// // mysql.TypeTinyBlob +// { +// Name: "mysql.TypeTinyBlob", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeTinyBlobNotNull}, +// Res: []byte{}, +// Default: nil, +// }, +// // mysql.TypeMediumBlob +// { +// Name: "mysql.TypeMediumBlob", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeMediumBlobNotNull}, +// Res: []byte{}, +// Default: nil, +// }, +// // mysql.TypeLongBlob +// { +// Name: "mysql.TypeLongBlob", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeLongBlobNotNull}, +// Res: []byte{}, +// Default: nil, +// }, +// // mysql.TypeBlob +// { +// Name: "mysql.TypeBlob", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeBlobNotNull}, +// Res: []byte{}, +// Default: nil, +// }, +// // mysql.TypeVarString +// { +// Name: "mysql.TypeVarString", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeVarStringNotNull}, +// Res: []byte{}, +// Default: nil, +// }, +// // mysql.TypeString +// { +// Name: "mysql.TypeString", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeStringNotNull}, +// Res: []byte{}, +// Default: nil, +// }, +// // mysql.TypeBit +// { +// Name: "mysql.TypeBit", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeBitNotNull}, +// Res: uint64(0), +// Default: nil, +// }, +// // BLOB, TEXT, GEOMETRY or JSON column can't have a default value +// // mysql.TypeJSON +// { +// Name: "mysql.TypeJSON", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeJSONNotNull}, +// Res: "null", +// Default: nil, +// }, +// // mysql.TypeEnum + notnull + nodefault +// { +// Name: "mysql.TypeEnum + notnull + nodefault", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeEnumNotNull}, +// // TypeEnum value will be a string and then translate to []byte +// // NotNull && no default will choose first element +// Res: uint64(1), +// Default: nil, +// }, +// // mysql.TypeEnum + notnull + default +// { +// Name: "mysql.TypeEnum + notnull + default", +// ColInfo: timodel.ColumnInfo{ +// OriginDefaultValue: "e1", +// FieldType: *ftTypeEnumNotNull, +// }, +// // TypeEnum default value will be a string and then translate to []byte +// Res: "e1", +// Default: "e1", +// }, +// // mysql.TypeEnum + null +// { +// Name: "mysql.TypeEnum + null", +// ColInfo: timodel.ColumnInfo{ +// FieldType: *ftTypeEnumNull, +// }, +// Res: nil, +// }, +// // mysql.TypeSet + notnull +// { +// Name: "mysql.TypeSet + notnull", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeSetNotNull}, +// Res: uint64(0), +// Default: nil, +// }, +// // mysql.TypeSet + notnull + default +// { +// Name: "mysql.TypeSet + notnull + default", +// ColInfo: timodel.ColumnInfo{ +// OriginDefaultValue: "1,e", +// FieldType: *ftTypeSetNotNull, +// }, +// // TypeSet default value will be a string and then translate to []byte +// Res: "1,e", +// Default: "1,e", +// }, +// // mysql.TypeGeometry +// { +// Name: "mysql.TypeGeometry", +// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeGeometryNotNull}, +// Res: nil, // not support yet +// Default: nil, +// }, +// } +// +// for _, tc := range testCases { +// _, val, _, _, _ := getDefaultOrZeroValue(&tc.ColInfo) +// require.Equal(t, tc.Res, val, tc.Name) +// val = GetDDLDefaultDefinition(&tc.ColInfo) +// require.Equal(t, tc.Default, val, tc.Name) +// } +//} +// +//func TestE2ERowLevelChecksum(t *testing.T) { +// // changefeed enable checksum functionality +// replicaConfig := config.GetDefaultReplicaConfig() +// replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness +// +// tk := NewTestKit(t, replicaConfig) +// defer tk.Close() +// +// // upstream TiDB enable checksum functionality +// tk.MustExec("set global tidb_enable_row_level_checksum = 1") +// tk.MustExec("use test") +// +// filter, err := filter.NewFilter(replicaConfig, "") +// require.NoError(t, err) +// +// ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) +// require.NoError(t, err) +// +// changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") +// schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), +// ver.Ver, false, changefeed, util.RoleTester, filter) +// require.NoError(t, err) +// require.NotNil(t, schemaStorage) +// +// createTableSQL := `create table t ( +// id int primary key auto_increment, +// +// c_tinyint tinyint null, +// c_smallint smallint null, +// c_mediumint mediumint null, +// c_int int null, +// c_bigint bigint null, +// +// c_unsigned_tinyint tinyint unsigned null, +// c_unsigned_smallint smallint unsigned null, +// c_unsigned_mediumint mediumint unsigned null, +// c_unsigned_int int unsigned null, +// c_unsigned_bigint bigint unsigned null, +// +// c_float float null, +// c_double double null, +// c_decimal decimal null, +// c_decimal_2 decimal(10, 4) null, +// +// c_unsigned_float float unsigned null, +// c_unsigned_double double unsigned null, +// c_unsigned_decimal decimal unsigned null, +// c_unsigned_decimal_2 decimal(10, 4) unsigned null, +// +// c_date date null, +// c_datetime datetime null, +// c_timestamp timestamp null, +// c_time time null, +// c_year year null, +// +// c_tinytext tinytext null, +// c_text text null, +// c_mediumtext mediumtext null, +// c_longtext longtext null, +// +// c_tinyblob tinyblob null, +// c_blob blob null, +// c_mediumblob mediumblob null, +// c_longblob longblob null, +// +// c_char char(16) null, +// c_varchar varchar(16) null, +// c_binary binary(16) null, +// c_varbinary varbinary(16) null, +// +// c_enum enum ('a','b','c') null, +// c_set set ('a','b','c') null, +// c_bit bit(64) null, +// c_json json null, +// +//-- gbk dmls +// name varchar(128) CHARACTER SET gbk, +// country char(32) CHARACTER SET gbk, +// city varchar(64), +// description text CHARACTER SET gbk, +// image tinyblob +//);` +// job := tk.DDL2Job(createTableSQL) +// err = schemaStorage.HandleDDLJob(job) +// require.NoError(t, err) +// +// ts := schemaStorage.GetLastSnapshot().CurrentTs() +// schemaStorage.AdvanceResolvedTs(ver.Ver) +// +// mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, replicaConfig.Integrity).(*mounter) +// +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName("test", "t") +// require.True(t, ok) +// +// tk.Session().GetSessionVars().EnableRowLevelChecksum = true +// +// insertDataSQL := `insert into t values ( +// 2, +// 1, 2, 3, 4, 5, +// 1, 2, 3, 4, 5, +// 2020.0202, 2020.0303, 2020.0404, 2021.1208, +// 3.1415, 2.7182, 8000, 179394.233, +// '2020-02-20', '2020-02-20 02:20:20', '2020-02-20 02:20:20', '02:20:20', '2020', +// '89504E470D0A1A0A', '89504E470D0A1A0A', '89504E470D0A1A0A', '89504E470D0A1A0A', +// x'89504E470D0A1A0A', x'89504E470D0A1A0A', x'89504E470D0A1A0A', x'89504E470D0A1A0A', +// '89504E470D0A1A0A', '89504E470D0A1A0A', x'89504E470D0A1A0A', x'89504E470D0A1A0A', +// 'b', 'b,c', b'1000001', '{ +//"key1": "value1", +//"key2": "value2", +//"key3": "123" +//}', +// '测试', "中国", "上海", "你好,世界", 0xC4E3BAC3CAC0BDE7 +//);` +// tk.MustExec(insertDataSQL) +// +// key, value := getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) +// rawKV := &model.RawKVEntry{ +// OpType: model.OpTypePut, +// Key: key, +// Value: value, +// StartTs: ts - 1, +// CRTs: ts + 1, +// } +// row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) +// require.NoError(t, err) +// require.NotNil(t, row) +// require.NotNil(t, row.Checksum) +// +// expected, ok := mounter.decoder.GetChecksum() +// require.True(t, ok) +// require.Equal(t, expected, row.Checksum.Current) +// require.False(t, row.Checksum.Corrupted) +// +// // avro encoder enable checksum functionality. +// codecConfig := codecCommon.NewConfig(config.ProtocolAvro) +// codecConfig.EnableTiDBExtension = true +// codecConfig.EnableRowChecksum = true +// codecConfig.AvroDecimalHandlingMode = "string" +// codecConfig.AvroBigintUnsignedHandlingMode = "string" +// +// avroEncoder, err := avro.SetupEncoderAndSchemaRegistry4Testing(ctx, codecConfig) +// defer avro.TeardownEncoderAndSchemaRegistry4Testing() +// require.NoError(t, err) +// +// topic := "test.t" +// +// err = avroEncoder.AppendRowChangedEvent(ctx, topic, row, func() {}) +// require.NoError(t, err) +// msg := avroEncoder.Build() +// require.Len(t, msg, 1) +// +// schemaM, err := avro.NewConfluentSchemaManager( +// ctx, "http://127.0.0.1:8081", nil) +// require.NoError(t, err) +// +// // decoder enable checksum functionality. +// decoder := avro.NewDecoder(codecConfig, schemaM, topic, time.Local) +// err = decoder.AddKeyValue(msg[0].Key, msg[0].Value) +// require.NoError(t, err) +// +// messageType, hasNext, err := decoder.HasNext() +// require.NoError(t, err) +// require.True(t, hasNext) +// require.Equal(t, model.MessageTypeRow, messageType) +// +// row, err = decoder.NextRowChangedEvent() +// // no error, checksum verification passed. +// require.NoError(t, err) +//} +// +//func TestDecodeRowEnableChecksum(t *testing.T) { +// replicaConfig := config.GetDefaultReplicaConfig() +// replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness +// +// tk := NewTestKit(t, replicaConfig) +// defer tk.Close() +// +// tk.MustExec("set global tidb_enable_row_level_checksum = 1") +// tk.MustExec("use test") +// +// filter, err := filter.NewFilter(replicaConfig, "") +// require.NoError(t, err) +// +// ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) +// require.NoError(t, err) +// +// changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") +// schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), +// ver.Ver, false, changefeed, util.RoleTester, filter) +// require.NoError(t, err) +// require.NotNil(t, schemaStorage) +// +// createTableDDL := "create table t (id int primary key, a int)" +// job := tk.DDL2Job(createTableDDL) +// err = schemaStorage.HandleDDLJob(job) +// require.NoError(t, err) +// +// ts := schemaStorage.GetLastSnapshot().CurrentTs() +// schemaStorage.AdvanceResolvedTs(ver.Ver) +// +// mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, replicaConfig.Integrity).(*mounter) +// +// ctx := context.Background() +// +// tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName("test", "t") +// require.True(t, ok) +// +// // row without checksum +// tk.Session().GetSessionVars().EnableRowLevelChecksum = false +// tk.MustExec("insert into t values (1, 10)") +// +// key, value := getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) +// rawKV := &model.RawKVEntry{ +// OpType: model.OpTypePut, +// Key: key, +// Value: value, +// StartTs: ts - 1, +// CRTs: ts + 1, +// } +// +// row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) +// require.NoError(t, err) +// require.NotNil(t, row) +// // the upstream tidb does not enable checksum, so the checksum is nil +// require.Nil(t, row.Checksum) +// +// // row with one checksum +// tk.Session().GetSessionVars().EnableRowLevelChecksum = true +// tk.MustExec("insert into t values (2, 20)") +// +// key, value = getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) +// rawKV = &model.RawKVEntry{ +// OpType: model.OpTypePut, +// Key: key, +// Value: value, +// StartTs: ts - 1, +// CRTs: ts + 1, +// } +// row, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) +// require.NoError(t, err) +// require.NotNil(t, row) +// require.NotNil(t, row.Checksum) +// +// expected, ok := mounter.decoder.GetChecksum() +// require.True(t, ok) +// require.Equal(t, expected, row.Checksum.Current) +// require.False(t, row.Checksum.Corrupted) +// +// // row with 2 checksum +// tk.MustExec("insert into t values (3, 30)") +// job = tk.DDL2Job("alter table t change column a a varchar(10)") +// err = schemaStorage.HandleDDLJob(job) +// require.NoError(t, err) +// +// key, value = getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) +// rawKV = &model.RawKVEntry{ +// OpType: model.OpTypePut, +// Key: key, +// Value: value, +// StartTs: ts - 1, +// CRTs: ts + 1, +// } +// row, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) +// require.NoError(t, err) +// require.NotNil(t, row) +// require.NotNil(t, row.Checksum) +// +// first, ok := mounter.decoder.GetChecksum() +// require.True(t, ok) +// +// extra, ok := mounter.decoder.GetExtraChecksum() +// require.True(t, ok) +// +// if row.Checksum.Current != first { +// require.Equal(t, extra, row.Checksum.Current) +// } else { +// require.Equal(t, first, row.Checksum.Current) +// } +// require.False(t, row.Checksum.Corrupted) +// +// // hack the table info to make the checksum corrupted +// tableInfo.Columns[0].FieldType = *types.NewFieldType(mysql.TypeVarchar) +// +// // corrupt-handle-level default to warn, so no error, but the checksum is corrupted +// row, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) +// require.NoError(t, err) +// require.NotNil(t, row.Checksum) +// require.True(t, row.Checksum.Corrupted) +// +// mounter.integrity.CorruptionHandleLevel = integrity.CorruptionHandleLevelError +// _, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) +// require.Error(t, err) +// require.ErrorIs(t, err, cerror.ErrCorruptedDataMutation) +// +// job = tk.DDL2Job("drop table t") +// err = schemaStorage.HandleDDLJob(job) +// require.NoError(t, err) +//} +// +//func TestDecodeRow(t *testing.T) { +// replicaConfig := config.GetDefaultReplicaConfig() +// +// tk := NewTestKit(t, replicaConfig) +// defer tk.Close() +// +// tk.MustExec("set @@tidb_enable_clustered_index=1;") +// tk.MustExec("use test;") +// +// changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") +// +// ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) +// require.NoError(t, err) +// +// filter, err := filter.NewFilter(replicaConfig, "") +// require.NoError(t, err) +// +// schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), +// ver.Ver, false, changefeed, util.RoleTester, filter) +// require.NoError(t, err) +// +// // apply ddl to schemaStorage +// ddl := "create table test.student(id int primary key, name char(50), age int, gender char(10))" +// job := tk.DDL2Job(ddl) +// err = schemaStorage.HandleDDLJob(job) +// require.NoError(t, err) +// +// ts := schemaStorage.GetLastSnapshot().CurrentTs() +// +// schemaStorage.AdvanceResolvedTs(ver.Ver) +// +// mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, replicaConfig.Integrity).(*mounter) +// +// tk.MustExec(`insert into student values(1, "dongmen", 20, "male")`) +// tk.MustExec(`update student set age = 27 where id = 1`) +// +// ctx := context.Background() +// decodeAndCheckRowInTable := func(tableID int64, f func(key []byte, value []byte) *model.RawKVEntry) { +// walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) { +// rawKV := f(key, value) +// +// row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) +// require.NoError(t, err) +// require.NotNil(t, row) +// +// if row.Columns != nil { +// require.NotNil(t, mounter.decoder) +// } +// +// if row.PreColumns != nil { +// require.NotNil(t, mounter.preDecoder) +// } +// }) +// } +// +// toRawKV := func(key []byte, value []byte) *model.RawKVEntry { +// return &model.RawKVEntry{ +// OpType: model.OpTypePut, +// Key: key, +// Value: value, +// StartTs: ts - 1, +// CRTs: ts + 1, +// } +// } +// +// tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName("test", "student") +// require.True(t, ok) +// +// decodeAndCheckRowInTable(tableInfo.ID, toRawKV) +// decodeAndCheckRowInTable(tableInfo.ID, toRawKV) +// +// job = tk.DDL2Job("drop table student") +// err = schemaStorage.HandleDDLJob(job) +// require.NoError(t, err) +//} +// +//// TestDecodeEventIgnoreRow tests a PolymorphicEvent.Row is nil +//// if this event should be filter out by filter. +//func TestDecodeEventIgnoreRow(t *testing.T) { +// replicaConfig := config.GetDefaultReplicaConfig() +// replicaConfig.Filter.Rules = []string{"test.student", "test.computer"} +// +// tk := NewTestKit(t, replicaConfig) +// defer tk.Close() +// tk.MustExec("use test;") +// +// ddls := []string{ +// "create table test.student(id int primary key, name char(50), age int, gender char(10))", +// "create table test.computer(id int primary key, brand char(50), price int)", +// "create table test.poet(id int primary key, name char(50), works char(100))", +// } +// +// cfID := model.DefaultChangeFeedID("changefeed-test-ignore-event") +// +// f, err := filter.NewFilter(replicaConfig, "") +// require.Nil(t, err) +// ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) +// require.Nil(t, err) +// +// schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), +// ver.Ver, false, cfID, util.RoleTester, f) +// require.Nil(t, err) +// // apply ddl to schemaStorage +// for _, ddl := range ddls { +// job := tk.DDL2Job(ddl) +// err = schemaStorage.HandleDDLJob(job) +// require.Nil(t, err) +// } +// +// ts := schemaStorage.GetLastSnapshot().CurrentTs() +// schemaStorage.AdvanceResolvedTs(ver.Ver) +// mounter := NewMounter(schemaStorage, cfID, time.Local, f, replicaConfig.Integrity).(*mounter) +// +// type testCase struct { +// schema string +// table string +// columns []interface{} +// ignored bool +// } +// +// testCases := []testCase{ +// { +// schema: "test", +// table: "student", +// columns: []interface{}{1, "dongmen", 20, "male"}, +// ignored: false, +// }, +// { +// schema: "test", +// table: "computer", +// columns: []interface{}{1, "apple", 19999}, +// ignored: false, +// }, +// // This case should be ignored by its table name. +// { +// schema: "test", +// table: "poet", +// columns: []interface{}{1, "李白", "静夜思"}, +// ignored: true, +// }, +// } +// +// ignoredTables := make([]string, 0) +// tables := make([]string, 0) +// for _, tc := range testCases { +// tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName(tc.schema, tc.table) +// require.True(t, ok) +// // TODO: add other dml event type +// insertSQL := prepareInsertSQL(t, tableInfo, len(tc.columns)) +// if tc.ignored { +// ignoredTables = append(ignoredTables, tc.table) +// } else { +// tables = append(tables, tc.table) +// } +// tk.MustExec(insertSQL, tc.columns...) +// } +// ctx := context.Background() +// +// decodeAndCheckRowInTable := func(tableID int64, f func(key []byte, value []byte) *model.RawKVEntry) int { +// var rows int +// walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) { +// rawKV := f(key, value) +// pEvent := model.NewPolymorphicEvent(rawKV) +// err := mounter.DecodeEvent(ctx, pEvent) +// require.Nil(t, err) +// if pEvent.Row == nil { +// return +// } +// row := pEvent.Row +// rows++ +// require.Equal(t, row.Table.Schema, "test") +// // Now we only allow filter dml event by table, so we only check row's table. +// require.NotContains(t, ignoredTables, row.Table.Table) +// require.Contains(t, tables, row.Table.Table) +// }) +// return rows +// } +// +// toRawKV := func(key []byte, value []byte) *model.RawKVEntry { +// return &model.RawKVEntry{ +// OpType: model.OpTypePut, +// Key: key, +// Value: value, +// StartTs: ts - 1, +// CRTs: ts + 1, +// } +// } +// +// for _, tc := range testCases { +// tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName(tc.schema, tc.table) +// require.True(t, ok) +// decodeAndCheckRowInTable(tableInfo.ID, toRawKV) +// } +//} +// +//func TestBuildTableInfo(t *testing.T) { +// cases := []struct { +// origin string +// recovered string +// recoveredWithNilCol string +// }{ +// { +// "CREATE TABLE t1 (c INT PRIMARY KEY)", +// "CREATE TABLE `BuildTiDBTableInfo` (\n" + +// " `c` int(0) NOT NULL,\n" + +// " PRIMARY KEY (`c`(0)) /*T![clustered_index] CLUSTERED */\n" + +// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", +// "CREATE TABLE `BuildTiDBTableInfo` (\n" + +// " `c` int(0) NOT NULL,\n" + +// " PRIMARY KEY (`c`(0)) /*T![clustered_index] CLUSTERED */\n" + +// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", +// }, +// { +// "CREATE TABLE t1 (" + +// " c INT UNSIGNED," + +// " c2 VARCHAR(10) NOT NULL," + +// " c3 BIT(10) NOT NULL," + +// " UNIQUE KEY (c2, c3)" + +// ")", +// // CDC discards field length. +// "CREATE TABLE `BuildTiDBTableInfo` (\n" + +// " `c` int(0) unsigned DEFAULT NULL,\n" + +// " `c2` varchar(0) NOT NULL,\n" + +// " `c3` bit(0) NOT NULL,\n" + +// " UNIQUE KEY `idx_0` (`c2`(0),`c3`(0))\n" + +// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", +// "CREATE TABLE `BuildTiDBTableInfo` (\n" + +// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + +// " `c2` varchar(0) NOT NULL,\n" + +// " `c3` bit(0) NOT NULL,\n" + +// " UNIQUE KEY `idx_0` (`c2`(0),`c3`(0))\n" + +// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", +// }, +// { +// "CREATE TABLE t1 (" + +// " c INT UNSIGNED," + +// " gen INT AS (c+1) VIRTUAL," + +// " c2 VARCHAR(10) NOT NULL," + +// " gen2 INT AS (c+2) STORED," + +// " c3 BIT(10) NOT NULL," + +// " PRIMARY KEY (c, c2)" + +// ")", +// // CDC discards virtual generated column, and generating expression of stored generated column. +// "CREATE TABLE `BuildTiDBTableInfo` (\n" + +// " `c` int(0) unsigned NOT NULL,\n" + +// " `c2` varchar(0) NOT NULL,\n" + +// " `gen2` int(0) GENERATED ALWAYS AS (pass_generated_check) STORED,\n" + +// " `c3` bit(0) NOT NULL,\n" + +// " PRIMARY KEY (`c`(0),`c2`(0)) /*T![clustered_index] CLUSTERED */\n" + +// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", +// "CREATE TABLE `BuildTiDBTableInfo` (\n" + +// " `c` int(0) unsigned NOT NULL,\n" + +// " `c2` varchar(0) NOT NULL,\n" + +// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + +// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + +// " PRIMARY KEY (`c`(0),`c2`(0)) /*T![clustered_index] CLUSTERED */\n" + +// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", +// }, +// { +// "CREATE TABLE `t1` (" + +// " `a` int(11) NOT NULL," + +// " `b` int(11) DEFAULT NULL," + +// " `c` int(11) DEFAULT NULL," + +// " PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */," + +// " UNIQUE KEY `b` (`b`)" + +// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", +// "CREATE TABLE `BuildTiDBTableInfo` (\n" + +// " `a` int(0) NOT NULL,\n" + +// " `b` int(0) DEFAULT NULL,\n" + +// " `c` int(0) DEFAULT NULL,\n" + +// " PRIMARY KEY (`a`(0)) /*T![clustered_index] CLUSTERED */,\n" + +// " UNIQUE KEY `idx_1` (`b`(0))\n" + +// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", +// "CREATE TABLE `BuildTiDBTableInfo` (\n" + +// " `a` int(0) NOT NULL,\n" + +// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + +// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + +// " PRIMARY KEY (`a`(0)) /*T![clustered_index] CLUSTERED */\n" + +// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", +// }, +// { // This case is to check the primary key is correctly identified by BuildTiDBTableInfo +// "CREATE TABLE your_table (" + +// " id INT NOT NULL," + +// " name VARCHAR(50) NOT NULL," + +// " email VARCHAR(100) NOT NULL," + +// " age INT NOT NULL ," + +// " address VARCHAR(200) NOT NULL," + +// " PRIMARY KEY (id, name)," + +// " UNIQUE INDEX idx_unique_1 (id, email, age)," + +// " UNIQUE INDEX idx_unique_2 (name, email, address)" + +// " );", +// "CREATE TABLE `BuildTiDBTableInfo` (\n" + +// " `id` int(0) NOT NULL,\n" + +// " `name` varchar(0) NOT NULL,\n" + +// " `email` varchar(0) NOT NULL,\n" + +// " `age` int(0) NOT NULL,\n" + +// " `address` varchar(0) NOT NULL,\n" + +// " PRIMARY KEY (`id`(0),`name`(0)) /*T![clustered_index] CLUSTERED */,\n" + +// " UNIQUE KEY `idx_1` (`id`(0),`email`(0),`age`(0)),\n" + +// " UNIQUE KEY `idx_2` (`name`(0),`email`(0),`address`(0))\n" + +// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", +// "CREATE TABLE `BuildTiDBTableInfo` (\n" + +// " `id` int(0) NOT NULL,\n" + +// " `name` varchar(0) NOT NULL,\n" + +// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + +// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + +// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + +// " PRIMARY KEY (`id`(0),`name`(0)) /*T![clustered_index] CLUSTERED */,\n" + +// " UNIQUE KEY `idx_1` (`id`(0),`omitted`(0),`omitted`(0)),\n" + +// " UNIQUE KEY `idx_2` (`name`(0),`omitted`(0),`omitted`(0))\n" + +// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", +// }, +// } +// p := parser.New() +// for i, c := range cases { +// stmt, err := p.ParseOneStmt(c.origin, "", "") +// require.NoError(t, err) +// originTI, err := ddl.BuildTableInfoFromAST(stmt.(*ast.CreateTableStmt)) +// require.NoError(t, err) +// cdcTableInfo := model.WrapTableInfo(0, "test", 0, originTI) +// cols, _, _, _, err := datum2Column(cdcTableInfo, map[int64]types.Datum{}) +// require.NoError(t, err) +// recoveredTI := model.BuildTiDBTableInfo(cols, cdcTableInfo.IndexColumnsOffset) +// handle := sqlmodel.GetWhereHandle(recoveredTI, recoveredTI) +// require.NotNil(t, handle.UniqueNotNullIdx) +// require.Equal(t, c.recovered, showCreateTable(t, recoveredTI)) +// // make sure BuildTiDBTableInfo indentify the correct primary key +// if i == 5 { +// inexes := recoveredTI.Indices +// primaryCount := 0 +// for i := range inexes { +// if inexes[i].Primary { +// primaryCount++ +// } +// } +// require.Equal(t, 1, primaryCount) +// require.Equal(t, 2, len(handle.UniqueNotNullIdx.Columns)) +// } +// // mimic the columns are set to nil when old value feature is disabled +// for i := range cols { +// if !cols[i].Flag.IsHandleKey() { +// cols[i] = nil +// } +// } +// recoveredTI = model.BuildTiDBTableInfo(cols, cdcTableInfo.IndexColumnsOffset) +// handle = sqlmodel.GetWhereHandle(recoveredTI, recoveredTI) +// require.NotNil(t, handle.UniqueNotNullIdx) +// require.Equal(t, c.recoveredWithNilCol, showCreateTable(t, recoveredTI)) +// } +//} +// +//var tiCtx = mock.NewContext() +// +//func showCreateTable(t *testing.T, ti *timodel.TableInfo) string { +// result := bytes.NewBuffer(make([]byte, 0, 512)) +// err := executor.ConstructResultOfShowCreateTable(tiCtx, ti, autoid.Allocators{}, result) +// require.NoError(t, err) +// return result.String() +//} +// +//func TestNewDMRowChange(t *testing.T) { +// cases := []struct { +// origin string +// recovered string +// }{ +// { +// "CREATE TABLE t1 (id INT," + +// " a1 INT NOT NULL," + +// " a3 INT NOT NULL," + +// " UNIQUE KEY dex1(a1, a3));", +// "CREATE TABLE `BuildTiDBTableInfo` (\n" + +// " `id` int(0) DEFAULT NULL,\n" + +// " `a1` int(0) NOT NULL,\n" + +// " `a3` int(0) NOT NULL,\n" + +// " UNIQUE KEY `idx_0` (`a1`(0),`a3`(0))\n" + +// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", +// }, +// } +// p := parser.New() +// for _, c := range cases { +// stmt, err := p.ParseOneStmt(c.origin, "", "") +// require.NoError(t, err) +// originTI, err := ddl.BuildTableInfoFromAST(stmt.(*ast.CreateTableStmt)) +// require.NoError(t, err) +// cdcTableInfo := model.WrapTableInfo(0, "test", 0, originTI) +// cols := []*model.Column{ +// { +// Name: "id", Type: 3, Charset: "binary", Flag: 65, Value: 1, Default: nil, +// }, +// { +// Name: "a1", Type: 3, Charset: "binary", Flag: 51, Value: 1, Default: nil, +// }, +// { +// Name: "a3", Type: 3, Charset: "binary", Flag: 51, Value: 2, Default: nil, +// }, +// } +// recoveredTI := model.BuildTiDBTableInfo(cols, cdcTableInfo.IndexColumnsOffset) +// require.Equal(t, c.recovered, showCreateTable(t, recoveredTI)) +// tableName := &model.TableName{Schema: "db", Table: "t1"} +// rowChange := sqlmodel.NewRowChange(tableName, nil, []interface{}{1, 1, 2}, nil, recoveredTI, nil, nil) +// sqlGot, argsGot := rowChange.GenSQL(sqlmodel.DMLDelete) +// require.Equal(t, "DELETE FROM `db`.`t1` WHERE `a1` = ? AND `a3` = ? LIMIT 1", sqlGot) +// require.Equal(t, []interface{}{1, 2}, argsGot) +// +// sqlGot, argsGot = sqlmodel.GenDeleteSQL(rowChange, rowChange) +// require.Equal(t, "DELETE FROM `db`.`t1` WHERE (`a1` = ? AND `a3` = ?) OR (`a1` = ? AND `a3` = ?)", sqlGot) +// require.Equal(t, []interface{}{1, 2, 1, 2}, argsGot) +// } +//} +// +//func TestFormatColVal(t *testing.T) { +// ftTypeFloatNotNull := types.NewFieldType(mysql.TypeFloat) +// ftTypeFloatNotNull.SetFlag(mysql.NotNullFlag) +// col := &timodel.ColumnInfo{FieldType: *ftTypeFloatNotNull} +// +// var datum types.Datum +// +// datum.SetFloat32(123.99) +// value, _, _, err := formatColVal(datum, col) +// require.NoError(t, err) +// require.EqualValues(t, float32(123.99), value) +// +// datum.SetFloat32(float32(math.NaN())) +// value, _, warn, err := formatColVal(datum, col) +// require.NoError(t, err) +// require.Equal(t, float32(0), value) +// require.NotZero(t, warn) +// +// datum.SetFloat32(float32(math.Inf(1))) +// value, _, warn, err = formatColVal(datum, col) +// require.NoError(t, err) +// require.Equal(t, float32(0), value) +// require.NotZero(t, warn) +// +// datum.SetFloat32(float32(math.Inf(-1))) +// value, _, warn, err = formatColVal(datum, col) +// require.NoError(t, err) +// require.Equal(t, float32(0), value) +// require.NotZero(t, warn) +//} diff --git a/cdc/entry/testkit.go b/cdc/entry/testkit.go index e10a7f3f9ed..0f6be0bef38 100644 --- a/cdc/entry/testkit.go +++ b/cdc/entry/testkit.go @@ -50,6 +50,7 @@ type TestKit struct { schemaStorage SchemaStorage mounter Mounter + filter filter.Filter } // NewTestKit return a new testkit @@ -88,6 +89,7 @@ func NewTestKit(t *testing.T, replicaConfig *config.ReplicaConfig) *TestKit { TestKit: tk, storage: store, domain: domain, + filter: filter, schemaStorage: schemaStorage, mounter: mounter, } @@ -95,10 +97,8 @@ func NewTestKit(t *testing.T, replicaConfig *config.ReplicaConfig) *TestKit { func (tk *TestKit) DML2Event(dml string, schema, table string) *model.RowChangedEvent { tk.MustExec(dml) - tableID, ok := tk.schemaStorage.GetLastSnapshot().TableIDByName(schema, table) require.True(tk.t, ok) - key, value := tk.getLastKeyValue(tableID) ts := tk.schemaStorage.GetLastSnapshot().CurrentTs() @@ -135,61 +135,62 @@ func (tk *TestKit) getLastKeyValue(tableID int64) (key, value []byte) { return key, value } -// DDL2Job executes the DDL stmt and returns the DDL job -func (tk *TestKit) DDL2Job(ddl string) *timodel.Job { +// DDL2TableInfo executes the DDL stmt and returns the DDL job +func (tk *TestKit) DDL2TableInfo(ddl string) *model.TableInfo { tk.MustExec(ddl) jobs, err := tiddl.GetLastNHistoryDDLJobs(tk.GetCurrentMeta(), 1) - require.Nil(tk.t, err) + require.NoError(tk.t, err) require.Len(tk.t, jobs, 1) // Set State from Synced to Done. // Because jobs are put to history queue after TiDB alter its state from // Done to Synced. jobs[0].State = timodel.JobStateDone res := jobs[0] - if res.Type != timodel.ActionRenameTables { - return res - } + if res.Type == timodel.ActionRenameTables { + // the RawArgs field in job fetched from tidb snapshot meta is incorrent, + // so we manually construct `job.RawArgs` to do the workaround. + // we assume the old schema name is same as the new schema name here. + // for example, "ALTER TABLE RENAME test.t1 TO test.t1, test.t2 to test.t22", schema name is "test" + schema := strings.Split(strings.Split(strings.Split(res.Query, ",")[1], " ")[1], ".")[0] + tableNum := len(res.BinlogInfo.MultipleTableInfos) + oldSchemaIDs := make([]int64, tableNum) + for i := 0; i < tableNum; i++ { + oldSchemaIDs[i] = res.SchemaID + } + oldTableIDs := make([]int64, tableNum) + for i := 0; i < tableNum; i++ { + oldTableIDs[i] = res.BinlogInfo.MultipleTableInfos[i].ID + } + newTableNames := make([]timodel.CIStr, tableNum) + for i := 0; i < tableNum; i++ { + newTableNames[i] = res.BinlogInfo.MultipleTableInfos[i].Name + } + oldSchemaNames := make([]timodel.CIStr, tableNum) + for i := 0; i < tableNum; i++ { + oldSchemaNames[i] = timodel.NewCIStr(schema) + } + newSchemaIDs := oldSchemaIDs - // the RawArgs field in job fetched from tidb snapshot meta is incorrent, - // so we manually construct `job.RawArgs` to do the workaround. - // we assume the old schema name is same as the new schema name here. - // for example, "ALTER TABLE RENAME test.t1 TO test.t1, test.t2 to test.t22", schema name is "test" - schema := strings.Split(strings.Split(strings.Split(res.Query, ",")[1], " ")[1], ".")[0] - tableNum := len(res.BinlogInfo.MultipleTableInfos) - oldSchemaIDs := make([]int64, tableNum) - for i := 0; i < tableNum; i++ { - oldSchemaIDs[i] = res.SchemaID - } - oldTableIDs := make([]int64, tableNum) - for i := 0; i < tableNum; i++ { - oldTableIDs[i] = res.BinlogInfo.MultipleTableInfos[i].ID - } - newTableNames := make([]timodel.CIStr, tableNum) - for i := 0; i < tableNum; i++ { - newTableNames[i] = res.BinlogInfo.MultipleTableInfos[i].Name - } - oldSchemaNames := make([]timodel.CIStr, tableNum) - for i := 0; i < tableNum; i++ { - oldSchemaNames[i] = timodel.NewCIStr(schema) - } - newSchemaIDs := oldSchemaIDs + args := []interface{}{ + oldSchemaIDs, newSchemaIDs, + newTableNames, oldTableIDs, oldSchemaNames, + } + rawArgs, err := json.Marshal(args) + require.NoError(tk.t, err) + res.RawArgs = rawArgs - args := []interface{}{ - oldSchemaIDs, newSchemaIDs, - newTableNames, oldTableIDs, oldSchemaNames, + err = tk.schemaStorage.HandleDDLJob(res) + require.NoError(tk.t, err) } - rawArgs, err := json.Marshal(args) - require.NoError(tk.t, err) - res.RawArgs = rawArgs - - err = tk.schemaStorage.HandleDDLJob(res) - require.NoError(tk.t, err) ver, err := tk.storage.CurrentVersion(oracle.GlobalTxnScope) require.NoError(tk.t, err) tk.schemaStorage.AdvanceResolvedTs(ver.Ver) - return res + tableInfo, ok := tk.schemaStorage.GetLastSnapshot().TableByName(res.SchemaName, res.TableName) + require.True(tk.t, ok) + + return tableInfo } // DDL2Jobs executes the DDL statement and return the corresponding DDL jobs. @@ -210,25 +211,7 @@ func (tk *TestKit) DDL2Jobs(ddl string, jobCnt int) []*timodel.Job { return jobs } -// Storage returns the tikv storage -func (tk *TestKit) Storage() kv.Storage { - return tk.storage -} - -// GetCurrentMeta return the current meta snapshot -func (tk *TestKit) GetCurrentMeta() *timeta.Meta { - ver, err := tk.storage.CurrentVersion(oracle.GlobalTxnScope) - require.Nil(tk.t, err) - return timeta.NewSnapshotMeta(tk.storage.GetSnapshot(ver)) -} - -// Close closes the helper -func (tk *TestKit) Close() { - tk.domain.Close() - tk.storage.Close() //nolint:errcheck -} - -func (tk *TestKit) GetAllHistoryDDLJob(f filter.Filter) ([]*timodel.Job, error) { +func (tk *TestKit) GetAllHistoryDDLJob() ([]*timodel.Job, error) { s, err := session.CreateSession(tk.storage) if err != nil { return nil, errors.Trace(err) @@ -252,8 +235,8 @@ func (tk *TestKit) GetAllHistoryDDLJob(f filter.Filter) ([]*timodel.Job, error) return nil, errors.Trace(err) } for i, job := range jobs { - ignoreSchema := f.ShouldIgnoreSchema(job.SchemaName) - ignoreTable := f.ShouldIgnoreTable(job.SchemaName, job.TableName) + ignoreSchema := tk.filter.ShouldIgnoreSchema(job.SchemaName) + ignoreTable := tk.filter.ShouldIgnoreTable(job.SchemaName, job.TableName) if ignoreSchema || ignoreTable { log.Info("Ignore ddl job", zap.Stringer("job", job)) continue @@ -266,3 +249,21 @@ func (tk *TestKit) GetAllHistoryDDLJob(f filter.Filter) ([]*timodel.Job, error) } return jobs, nil } + +// Storage returns the tikv storage +func (tk *TestKit) Storage() kv.Storage { + return tk.storage +} + +// GetCurrentMeta return the current meta snapshot +func (tk *TestKit) GetCurrentMeta() *timeta.Meta { + ver, err := tk.storage.CurrentVersion(oracle.GlobalTxnScope) + require.Nil(tk.t, err) + return timeta.NewSnapshotMeta(tk.storage.GetSnapshot(ver)) +} + +// Close closes the helper +func (tk *TestKit) Close() { + tk.domain.Close() + tk.storage.Close() //nolint:errcheck +} From e678896d7ef6f249ec51b5dd2dc45629e53dd40d Mon Sep 17 00:00:00 2001 From: 3AceShowHand Date: Mon, 27 Nov 2023 17:02:51 +0800 Subject: [PATCH 11/11] fix test. --- cdc/entry/mounter_test.go | 2981 +++++++++++++++--------------- cdc/entry/schema_storage_test.go | 11 +- cdc/entry/testkit.go | 91 +- 3 files changed, 1590 insertions(+), 1493 deletions(-) diff --git a/cdc/entry/mounter_test.go b/cdc/entry/mounter_test.go index f543cd1e912..5294278b2e8 100644 --- a/cdc/entry/mounter_test.go +++ b/cdc/entry/mounter_test.go @@ -14,16 +14,36 @@ package entry import ( + "bytes" + "context" + "math" "strings" "testing" + "time" "github.com/pingcap/log" + "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/executor" tidbkv "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/ast" + timodel "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" + cerror "github.com/pingcap/tiflow/pkg/errors" + "github.com/pingcap/tiflow/pkg/filter" + "github.com/pingcap/tiflow/pkg/integrity" + "github.com/pingcap/tiflow/pkg/sink/codec/avro" + "github.com/pingcap/tiflow/pkg/sink/codec/common" "github.com/pingcap/tiflow/pkg/spanz" + "github.com/pingcap/tiflow/pkg/sqlmodel" + "github.com/pingcap/tiflow/pkg/util" "github.com/stretchr/testify/require" + "github.com/tikv/client-go/v2/oracle" "go.uber.org/zap" ) @@ -39,33 +59,38 @@ func TestMounterDisableOldValue(t *testing.T) { // []int for approximateBytes of rows. putApproximateBytes [][]int delApproximateBytes [][]int - }{{ - tableName: "simple", - createTableDDL: "create table simple(id int primary key)", - values: [][]interface{}{{1}, {2}, {3}, {4}, {5}}, - putApproximateBytes: [][]int{{346, 346, 346, 346, 346}}, - delApproximateBytes: [][]int{{346, 346, 346, 346, 346}}, - }, { - tableName: "no_pk", - createTableDDL: "create table no_pk(id int not null unique key)", - values: [][]interface{}{{1}, {2}, {3}, {4}, {5}}, - putApproximateBytes: [][]int{{345, 345, 345, 345, 345}}, - delApproximateBytes: [][]int{{217, 217, 217, 217, 217}}, - }, { - tableName: "many_index", - createTableDDL: "create table many_index(id int not null unique key, c1 int unique key, c2 int, INDEX (c2))", - values: [][]interface{}{{1, 1, 1}, {2, 2, 2}, {3, 3, 3}, {4, 4, 4}, {5, 5, 5}}, - putApproximateBytes: [][]int{{638, 638, 638, 638, 638}}, - delApproximateBytes: [][]int{{254, 254, 254, 254, 254}}, - }, { - tableName: "default_value", - createTableDDL: "create table default_value(id int primary key, c1 int, c2 int not null default 5, c3 varchar(20), c4 varchar(20) not null default '666')", - values: [][]interface{}{{1}, {2}, {3}, {4}, {5}}, - putApproximateBytes: [][]int{{676, 676, 676, 676, 676}}, - delApproximateBytes: [][]int{{353, 353, 353, 353, 353}}, - }, { - tableName: "partition_table", - createTableDDL: `CREATE TABLE partition_table ( + }{ + { + tableName: "simple", + createTableDDL: "create table simple(id int primary key)", + values: [][]interface{}{{1}, {2}, {3}, {4}, {5}}, + putApproximateBytes: [][]int{{346, 346, 346, 346, 346}}, + delApproximateBytes: [][]int{{346, 346, 346, 346, 346}}, + }, + { + tableName: "no_pk", + createTableDDL: "create table no_pk(id int not null unique key)", + values: [][]interface{}{{1}, {2}, {3}, {4}, {5}}, + putApproximateBytes: [][]int{{345, 345, 345, 345, 345}}, + delApproximateBytes: [][]int{{217, 217, 217, 217, 217}}, + }, + { + tableName: "many_index", + createTableDDL: "create table many_index(id int not null unique key, c1 int unique key, c2 int, INDEX (c2))", + values: [][]interface{}{{1, 1, 1}, {2, 2, 2}, {3, 3, 3}, {4, 4, 4}, {5, 5, 5}}, + putApproximateBytes: [][]int{{638, 638, 638, 638, 638}}, + delApproximateBytes: [][]int{{254, 254, 254, 254, 254}}, + }, + { + tableName: "default_value", + createTableDDL: "create table default_value(id int primary key, c1 int, c2 int not null default 5, c3 varchar(20), c4 varchar(20) not null default '666')", + values: [][]interface{}{{1}, {2}, {3}, {4}, {5}}, + putApproximateBytes: [][]int{{676, 676, 676, 676, 676}}, + delApproximateBytes: [][]int{{353, 353, 353, 353, 353}}, + }, + { + tableName: "partition_table", + createTableDDL: `CREATE TABLE partition_table ( id INT NOT NULL AUTO_INCREMENT UNIQUE KEY, fname VARCHAR(25) NOT NULL, lname VARCHAR(25) NOT NULL, @@ -80,18 +105,19 @@ func TestMounterDisableOldValue(t *testing.T) { PARTITION p2 VALUES LESS THAN (15), PARTITION p3 VALUES LESS THAN (20) )`, - values: [][]interface{}{ - {1, "aa", "bb", 12, 12}, - {6, "aac", "bab", 51, 51}, - {11, "aad", "bsb", 71, 61}, - {18, "aae", "bbf", 21, 14}, - {15, "afa", "bbc", 11, 12}, - }, - putApproximateBytes: [][]int{{775}, {777}, {777}, {777, 777}}, - delApproximateBytes: [][]int{{227}, {227}, {227}, {227, 227}}, - }, { - tableName: "tp_int", - createTableDDL: `create table tp_int + values: [][]interface{}{ + {1, "aa", "bb", 12, 12}, + {6, "aac", "bab", 51, 51}, + {11, "aad", "bsb", 71, 61}, + {18, "aae", "bbf", 21, 14}, + {15, "afa", "bbc", 11, 12}, + }, + putApproximateBytes: [][]int{{775}, {777}, {777}, {777, 777}}, + delApproximateBytes: [][]int{{227}, {227}, {227}, {227, 227}}, + }, + { + tableName: "tp_int", + createTableDDL: `create table tp_int ( id int auto_increment, c_tinyint tinyint null, @@ -102,18 +128,19 @@ func TestMounterDisableOldValue(t *testing.T) { constraint pk primary key (id) );`, - values: [][]interface{}{ - {1, 1, 2, 3, 4, 5}, - {2}, - {3, 3, 4, 5, 6, 7}, - {4, 127, 32767, 8388607, 2147483647, 9223372036854775807}, - {5, -128, -32768, -8388608, -2147483648, -9223372036854775808}, - }, - putApproximateBytes: [][]int{{986, 626, 986, 986, 986}}, - delApproximateBytes: [][]int{{346, 346, 346, 346, 346}}, - }, { - tableName: "tp_text", - createTableDDL: `create table tp_text + values: [][]interface{}{ + {1, 1, 2, 3, 4, 5}, + {2}, + {3, 3, 4, 5, 6, 7}, + {4, 127, 32767, 8388607, 2147483647, 9223372036854775807}, + {5, -128, -32768, -8388608, -2147483648, -9223372036854775808}, + }, + putApproximateBytes: [][]int{{986, 626, 986, 986, 986}}, + delApproximateBytes: [][]int{{346, 346, 346, 346, 346}}, + }, + { + tableName: "tp_text", + createTableDDL: `create table tp_text ( id int auto_increment, c_tinytext tinytext null, @@ -131,31 +158,32 @@ func TestMounterDisableOldValue(t *testing.T) { constraint pk primary key (id) );`, - values: [][]interface{}{ - {1}, - { - 2, "89504E470D0A1A0A", "89504E470D0A1A0A", "89504E470D0A1A0A", "89504E470D0A1A0A", "89504E470D0A1A0A", - "89504E470D0A1A0A", - string([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}), - string([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}), - string([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}), - string([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}), - string([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}), - string([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}), - }, - { - 3, "bug free", "bug free", "bug free", "bug free", "bug free", "bug free", "bug free", "bug free", - "bug free", "bug free", "bug free", "bug free", + values: [][]interface{}{ + {1}, + { + 2, "89504E470D0A1A0A", "89504E470D0A1A0A", "89504E470D0A1A0A", "89504E470D0A1A0A", "89504E470D0A1A0A", + "89504E470D0A1A0A", + string([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}), + string([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}), + string([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}), + string([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}), + string([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}), + string([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}), + }, + { + 3, "bug free", "bug free", "bug free", "bug free", "bug free", "bug free", "bug free", "bug free", + "bug free", "bug free", "bug free", "bug free", + }, + {4, "", "", "", "", "", "", "", "", "", "", "", ""}, + {5, "你好", "我好", "大家好", "道路", "千万条", "安全", "第一条", "行车", "不规范", "亲人", "两行泪", "!"}, + {6, "😀", "😃", "😄", "😁", "😆", "😅", "😂", "🤣", "☺️", "😊", "😇", "🙂"}, }, - {4, "", "", "", "", "", "", "", "", "", "", "", ""}, - {5, "你好", "我好", "大家好", "道路", "千万条", "安全", "第一条", "行车", "不规范", "亲人", "两行泪", "!"}, - {6, "😀", "😃", "😄", "😁", "😆", "😅", "😂", "🤣", "☺️", "😊", "😇", "🙂"}, - }, - putApproximateBytes: [][]int{{1019, 1459, 1411, 1323, 1398, 1369}}, - delApproximateBytes: [][]int{{347, 347, 347, 347, 347, 347}}, - }, { - tableName: "tp_time", - createTableDDL: `create table tp_time + putApproximateBytes: [][]int{{1019, 1459, 1411, 1323, 1398, 1369}}, + delApproximateBytes: [][]int{{347, 347, 347, 347, 347, 347}}, + }, + { + tableName: "tp_time", + createTableDDL: `create table tp_time ( id int auto_increment, c_date date null, @@ -166,32 +194,34 @@ func TestMounterDisableOldValue(t *testing.T) { constraint pk primary key (id) );`, - values: [][]interface{}{ - {1}, - {2, "2020-02-20", "2020-02-20 02:20:20", "2020-02-20 02:20:20", "02:20:20", "2020"}, - }, - putApproximateBytes: [][]int{{627, 819}}, - delApproximateBytes: [][]int{{347, 347}}, - }, { - tableName: "tp_real", - createTableDDL: `create table tp_real - ( - id int auto_increment, - c_float float null, - c_double double null, - c_decimal decimal null, - constraint pk - primary key (id) - );`, - values: [][]interface{}{ - {1}, - {2, "2020.0202", "2020.0303", "2020.0404"}, - }, - putApproximateBytes: [][]int{{563, 551}}, - delApproximateBytes: [][]int{{347, 347}}, - }, { - tableName: "tp_other", - createTableDDL: `create table tp_other + values: [][]interface{}{ + {1}, + {2, "2020-02-20", "2020-02-20 02:20:20", "2020-02-20 02:20:20", "02:20:20", "2020"}, + }, + putApproximateBytes: [][]int{{627, 819}}, + delApproximateBytes: [][]int{{347, 347}}, + }, + { + tableName: "tp_real", + createTableDDL: `create table tp_real + ( + id int auto_increment, + c_float float null, + c_double double null, + c_decimal decimal null, + constraint pk + primary key (id) + );`, + values: [][]interface{}{ + {1}, + {2, "2020.0202", "2020.0303", "2020.0404"}, + }, + putApproximateBytes: [][]int{{563, 551}}, + delApproximateBytes: [][]int{{347, 347}}, + }, + { + tableName: "tp_other", + createTableDDL: `create table tp_other ( id int auto_increment, c_enum enum ('a','b','c') null, @@ -201,32 +231,35 @@ func TestMounterDisableOldValue(t *testing.T) { constraint pk primary key (id) );`, - values: [][]interface{}{ - {1}, - {2, "a", "a,c", 888, `{"aa":"bb"}`}, - }, - putApproximateBytes: [][]int{{636, 624}}, - delApproximateBytes: [][]int{{348, 348}}, - }, { - tableName: "clustered_index1", - createTableDDL: "CREATE TABLE clustered_index1 (id VARCHAR(255) PRIMARY KEY, data INT);", - values: [][]interface{}{ - {"hhh"}, - {"你好😘", 666}, - {"世界🤪", 888}, - }, - putApproximateBytes: [][]int{{383, 446, 446}}, - delApproximateBytes: [][]int{{311, 318, 318}}, - }, { - tableName: "clustered_index2", - createTableDDL: "CREATE TABLE clustered_index2 (id VARCHAR(255), data INT, ddaa date, PRIMARY KEY (id, data, ddaa), UNIQUE KEY (id, data, ddaa));", - values: [][]interface{}{ - {"你好😘", 666, "2020-11-20"}, - {"世界🤪", 888, "2020-05-12"}, - }, - putApproximateBytes: [][]int{{592, 592}}, - delApproximateBytes: [][]int{{592, 592}}, - }} + values: [][]interface{}{ + {1}, + {2, "a", "a,c", 888, `{"aa":"bb"}`}, + }, + putApproximateBytes: [][]int{{636, 624}}, + delApproximateBytes: [][]int{{348, 348}}, + }, + { + tableName: "clustered_index1", + createTableDDL: "CREATE TABLE clustered_index1 (id VARCHAR(255) PRIMARY KEY, data INT);", + values: [][]interface{}{ + {"hhh"}, + {"你好😘", 666}, + {"世界🤪", 888}, + }, + putApproximateBytes: [][]int{{383, 446, 446}}, + delApproximateBytes: [][]int{{311, 318, 318}}, + }, + { + tableName: "clustered_index2", + createTableDDL: "CREATE TABLE clustered_index2 (id VARCHAR(255), data INT, ddaa date, PRIMARY KEY (id, data, ddaa), UNIQUE KEY (id, data, ddaa));", + values: [][]interface{}{ + {"你好😘", 666, "2020-11-20"}, + {"世界🤪", 888, "2020-05-12"}, + }, + putApproximateBytes: [][]int{{592, 592}}, + delApproximateBytes: [][]int{{592, 592}}, + }, + } for _, tc := range testCases { testMounterDisableOldValue(t, tc) } @@ -253,11 +286,14 @@ func testMounterDisableOldValue(t *testing.T, tc struct { log.Info("this table is enable the clustered index", zap.String("tableName", tableInfo.Name.L)) } - var count int for _, params := range tc.values { insertSQL := prepareInsertSQL(t, tableInfo, len(params)) - //tk.MustExec(insertSQL, params...) - event := tk.DML2Event(insertSQL, "test", tc.tableName) + tk.MustExec(insertSQL, params...) + } + + var count int + events := tk.GetAllEventsByTable("test", tc.tableName) + for _, event := range events { if event == nil { continue } @@ -276,81 +312,92 @@ func testMounterDisableOldValue(t *testing.T, tc struct { result.Check([][]interface{}{{"1"}}) } } - - //ctx := context.Background() - // [TODO] check size and readd rowBytes - //mountAndCheckRowInTable := func(tableID int64, _ []int, f func(key []byte, value []byte) *model.RawKVEntry) int { - // var rows int - // walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) {}) - // return rows + require.Equal(t, count, len(tc.values)) + // + //for _, params := range tc.values { + // deleteSQL := prepareDeleteSQL(t, tableInfo, len(params)) + // tk.MustExec(deleteSQL, params...) //} - //mountAndCheckRow := func(rowsBytes [][]int, f func(key []byte, value []byte) *model.RawKVEntry) int { - // partitionInfo := tableInfo.GetPartitionInfo() - // if partitionInfo == nil { - // return mountAndCheckRowInTable(tableInfo.ID, rowsBytes[0], f) + + //count = 0 + //events = tk.GetAllEventsByTable("test", tc.tableName) + //for _, event := range events { + // if event == nil { + // continue + // } + // count++ + // require.Equal(t, event.Table.Table, tc.tableName) + // require.Equal(t, event.Table.Schema, "test") + // t.Log("ApproximateBytes", tc.tableName, count-1, event.ApproximateBytes()) + // if len(event.Columns) != 0 { + // checkSQL, params := prepareCheckSQL(t, tc.tableName, event.Columns) + // result := tk.MustQuery(checkSQL, params...) + // result.Check([][]interface{}{{"1"}}) // } - // var rows int - // for i, p := range partitionInfo.Definitions { - // rows += mountAndCheckRowInTable(p.ID, rowsBytes[i], f) + // if len(event.PreColumns) != 0 { + // checkSQL, params := prepareCheckSQL(t, tc.tableName, event.PreColumns) + // result := tk.MustQuery(checkSQL, params...) + // result.Check([][]interface{}{{"1"}}) // } - // return rows //} - - rows := mountAndCheckRow(tc.putApproximateBytes, func(key []byte, value []byte) *model.RawKVEntry { - return &model.RawKVEntry{ - OpType: model.OpTypePut, - Key: key, - Value: value, - StartTs: ver.Ver - 1, - CRTs: ver.Ver, - } - }) - require.Equal(t, rows, len(tc.values)) - - rows = mountAndCheckRow(tc.delApproximateBytes, func(key []byte, value []byte) *model.RawKVEntry { - return &model.RawKVEntry{ - OpType: model.OpTypeDelete, - Key: key, - Value: nil, // delete event doesn't include a value when old-value is disabled - StartTs: ver.Ver - 1, - CRTs: ver.Ver, - } - }) - require.Equal(t, rows, len(tc.values)) + //require.Equal(t, count, len(tc.values)) } +//func prepareDeleteSQL(t *testing.T, tableInfo *model.TableInfo, columnLens int) string { +// quotedTable := tableInfo.TableName.QuoteString() +// var sb strings.Builder +// _, err := sb.WriteString("DELETE FROM " + quotedTable + " WHERE ") +// require.NoError(t, err) +// +// for i := 0; i < columnLens; i++ { +// col := tableInfo.Columns[i] +// if i != 0 { +// _, err = sb.WriteString(" AND ") +// require.NoError(t, err) +// } +// _, err = sb.WriteString(col.Name.O) +// require.NoError(t, err) +// _, err = sb.WriteString("=") +// require.NoError(t, err) +// _, err = sb.WriteString("?") +// require.NoError(t, err) +// } +// require.NoError(t, err) +// return sb.String() +//} + func prepareInsertSQL(t *testing.T, tableInfo *model.TableInfo, columnLens int) string { var sb strings.Builder _, err := sb.WriteString("INSERT INTO " + tableInfo.Name.O + "(") - require.Nil(t, err) + require.NoError(t, err) for i := 0; i < columnLens; i++ { col := tableInfo.Columns[i] if i != 0 { _, err = sb.WriteString(", ") - require.Nil(t, err) + require.NoError(t, err) } _, err = sb.WriteString(col.Name.O) - require.Nil(t, err) + require.NoError(t, err) } _, err = sb.WriteString(") VALUES (") - require.Nil(t, err) + require.NoError(t, err) for i := 0; i < columnLens; i++ { if i != 0 { _, err = sb.WriteString(", ") - require.Nil(t, err) + require.NoError(t, err) } _, err = sb.WriteString("?") - require.Nil(t, err) + require.NoError(t, err) } _, err = sb.WriteString(")") - require.Nil(t, err) + require.NoError(t, err) return sb.String() } func prepareCheckSQL(t *testing.T, tableName string, cols []*model.Column) (string, []interface{}) { var sb strings.Builder _, err := sb.WriteString("SELECT count(1) FROM " + tableName + " WHERE ") - require.Nil(t, err) + require.NoError(t, err) params := make([]interface{}, 0, len(cols)) for i, col := range cols { // Since float type has precision problem, so skip it to avoid compare float number. @@ -359,11 +406,11 @@ func prepareCheckSQL(t *testing.T, tableName string, cols []*model.Column) (stri } if i != 0 { _, err = sb.WriteString(" AND ") - require.Nil(t, err) + require.NoError(t, err) } if col.Value == nil { _, err = sb.WriteString(col.Name + " IS NULL") - require.Nil(t, err) + require.NoError(t, err) continue } // convert types for tk.MustQuery @@ -376,1322 +423,1292 @@ func prepareCheckSQL(t *testing.T, tableName string, cols []*model.Column) (stri } else { _, err = sb.WriteString(col.Name + " = ?") } - require.Nil(t, err) + require.NoError(t, err) } return sb.String(), params } -func walkTableSpanInStore(t *testing.T, store tidbkv.Storage, tableID int64, f func(key []byte, value []byte)) { +func getLastKeyValueInStore(t *testing.T, store tidbkv.Storage, tableID int64) (key, value []byte) { txn, err := store.Begin() - require.Nil(t, err) + require.NoError(t, err) defer txn.Rollback() //nolint:errcheck startKey, endKey := spanz.GetTableRange(tableID) kvIter, err := txn.Iter(startKey, endKey) - require.Nil(t, err) + require.NoError(t, err) defer kvIter.Close() for kvIter.Valid() { - f(kvIter.Key(), kvIter.Value()) + key = kvIter.Key() + value = kvIter.Value() err = kvIter.Next() - require.Nil(t, err) + require.NoError(t, err) } + return key, value } -//func getLastKeyValueInStore(t *testing.T, store tidbkv.Storage, tableID int64) (key, value []byte) { -// txn, err := store.Begin() -// require.NoError(t, err) -// defer txn.Rollback() //nolint:errcheck -// startKey, endKey := spanz.GetTableRange(tableID) -// kvIter, err := txn.Iter(startKey, endKey) -// require.NoError(t, err) -// defer kvIter.Close() -// for kvIter.Valid() { -// key = kvIter.Key() -// value = kvIter.Value() -// err = kvIter.Next() -// require.NoError(t, err) -// } -// return key, value -//} -// -//// We use OriginDefaultValue instead of DefaultValue in the ut, pls ref to -//// https://github.com/pingcap/tiflow/issues/4048 -//// FIXME: OriginDefaultValue seems always to be string, and test more corner case -//// Ref: https://github.com/pingcap/tidb/blob/d2c352980a43bb593db81fd1db996f47af596d91/table/column.go#L489 -//func TestGetDefaultZeroValue(t *testing.T) { -// // Check following MySQL type, ref to: -// // https://github.com/pingcap/tidb/blob/master/parser/mysql/type.go -// -// // mysql flag null -// ftNull := types.NewFieldType(mysql.TypeUnspecified) -// -// // mysql.TypeTiny + notnull -// ftTinyIntNotNull := types.NewFieldType(mysql.TypeTiny) -// ftTinyIntNotNull.AddFlag(mysql.NotNullFlag) -// -// // mysql.TypeTiny + notnull + unsigned -// ftTinyIntNotNullUnSigned := types.NewFieldType(mysql.TypeTiny) -// ftTinyIntNotNullUnSigned.SetFlag(mysql.NotNullFlag) -// ftTinyIntNotNullUnSigned.AddFlag(mysql.UnsignedFlag) -// -// // mysql.TypeTiny + null -// ftTinyIntNull := types.NewFieldType(mysql.TypeTiny) -// -// // mysql.TypeShort + notnull -// ftShortNotNull := types.NewFieldType(mysql.TypeShort) -// ftShortNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeLong + notnull -// ftLongNotNull := types.NewFieldType(mysql.TypeLong) -// ftLongNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeLonglong + notnull -// ftLongLongNotNull := types.NewFieldType(mysql.TypeLonglong) -// ftLongLongNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeInt24 + notnull -// ftInt24NotNull := types.NewFieldType(mysql.TypeInt24) -// ftInt24NotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeFloat + notnull -// ftTypeFloatNotNull := types.NewFieldType(mysql.TypeFloat) -// ftTypeFloatNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeFloat + notnull + unsigned -// ftTypeFloatNotNullUnSigned := types.NewFieldType(mysql.TypeFloat) -// ftTypeFloatNotNullUnSigned.SetFlag(mysql.NotNullFlag | mysql.UnsignedFlag) -// -// // mysql.TypeFloat + null -// ftTypeFloatNull := types.NewFieldType(mysql.TypeFloat) -// -// // mysql.TypeDouble + notnull -// ftTypeDoubleNotNull := types.NewFieldType(mysql.TypeDouble) -// ftTypeDoubleNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeNewDecimal + notnull -// ftTypeNewDecimalNull := types.NewFieldType(mysql.TypeNewDecimal) -// ftTypeNewDecimalNull.SetFlen(5) -// ftTypeNewDecimalNull.SetDecimal(2) -// -// // mysql.TypeNewDecimal + notnull -// ftTypeNewDecimalNotNull := types.NewFieldType(mysql.TypeNewDecimal) -// ftTypeNewDecimalNotNull.SetFlag(mysql.NotNullFlag) -// ftTypeNewDecimalNotNull.SetFlen(5) -// ftTypeNewDecimalNotNull.SetDecimal(2) -// -// // mysql.TypeNull -// ftTypeNull := types.NewFieldType(mysql.TypeNull) -// -// // mysql.TypeTimestamp + notnull -// ftTypeTimestampNotNull := types.NewFieldType(mysql.TypeTimestamp) -// ftTypeTimestampNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeTimestamp + notnull -// ftTypeTimestampNull := types.NewFieldType(mysql.TypeTimestamp) -// -// // mysql.TypeDate + notnull -// ftTypeDateNotNull := types.NewFieldType(mysql.TypeDate) -// ftTypeDateNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeDuration + notnull -// ftTypeDurationNotNull := types.NewFieldType(mysql.TypeDuration) -// ftTypeDurationNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeDatetime + notnull -// ftTypeDatetimeNotNull := types.NewFieldType(mysql.TypeDatetime) -// ftTypeDatetimeNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeYear + notnull -// ftTypeYearNotNull := types.NewFieldType(mysql.TypeYear) -// ftTypeYearNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeNewDate + notnull -// ftTypeNewDateNotNull := types.NewFieldType(mysql.TypeNewDate) -// ftTypeNewDateNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeVarchar + notnull -// ftTypeVarcharNotNull := types.NewFieldType(mysql.TypeVarchar) -// ftTypeVarcharNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeTinyBlob + notnull -// ftTypeTinyBlobNotNull := types.NewFieldType(mysql.TypeTinyBlob) -// ftTypeTinyBlobNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeMediumBlob + notnull -// ftTypeMediumBlobNotNull := types.NewFieldType(mysql.TypeMediumBlob) -// ftTypeMediumBlobNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeLongBlob + notnull -// ftTypeLongBlobNotNull := types.NewFieldType(mysql.TypeLongBlob) -// ftTypeLongBlobNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeBlob + notnull -// ftTypeBlobNotNull := types.NewFieldType(mysql.TypeBlob) -// ftTypeBlobNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeVarString + notnull -// ftTypeVarStringNotNull := types.NewFieldType(mysql.TypeVarString) -// ftTypeVarStringNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeString + notnull -// ftTypeStringNotNull := types.NewFieldType(mysql.TypeString) -// ftTypeStringNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeBit + notnull -// ftTypeBitNotNull := types.NewFieldType(mysql.TypeBit) -// ftTypeBitNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeJSON + notnull -// ftTypeJSONNotNull := types.NewFieldType(mysql.TypeJSON) -// ftTypeJSONNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeEnum + notnull + nodefault -// ftTypeEnumNotNull := types.NewFieldType(mysql.TypeEnum) -// ftTypeEnumNotNull.SetFlag(mysql.NotNullFlag) -// ftTypeEnumNotNull.SetElems([]string{"e0", "e1"}) -// -// // mysql.TypeEnum + null -// ftTypeEnumNull := types.NewFieldType(mysql.TypeEnum) -// -// // mysql.TypeSet + notnull -// ftTypeSetNotNull := types.NewFieldType(mysql.TypeSet) -// ftTypeSetNotNull.SetFlag(mysql.NotNullFlag) -// -// // mysql.TypeGeometry + notnull -// ftTypeGeometryNotNull := types.NewFieldType(mysql.TypeGeometry) -// ftTypeGeometryNotNull.SetFlag(mysql.NotNullFlag) -// -// testCases := []struct { -// Name string -// ColInfo timodel.ColumnInfo -// Res interface{} -// Default interface{} -// }{ -// // mysql flag null -// { -// Name: "mysql flag null", -// ColInfo: timodel.ColumnInfo{FieldType: *ftNull}, -// Res: nil, -// Default: nil, -// }, -// // mysql.TypeTiny + notnull + nodefault -// { -// Name: "mysql.TypeTiny + notnull + nodefault", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTinyIntNotNull.Clone()}, -// Res: int64(0), -// Default: nil, -// }, -// // mysql.TypeTiny + notnull + default -// { -// Name: "mysql.TypeTiny + notnull + default", -// ColInfo: timodel.ColumnInfo{ -// OriginDefaultValue: -1314, -// FieldType: *ftTinyIntNotNull, -// }, -// Res: int64(-1314), -// Default: int64(-1314), -// }, -// // mysql.TypeTiny + notnull + unsigned -// { -// Name: "mysql.TypeTiny + notnull + default + unsigned", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTinyIntNotNullUnSigned}, -// Res: uint64(0), -// Default: nil, -// }, -// // mysql.TypeTiny + notnull + default + unsigned -// { -// Name: "mysql.TypeTiny + notnull + unsigned", -// ColInfo: timodel.ColumnInfo{OriginDefaultValue: uint64(1314), FieldType: *ftTinyIntNotNullUnSigned}, -// Res: uint64(1314), -// Default: uint64(1314), -// }, -// // mysql.TypeTiny + null + default -// { -// Name: "mysql.TypeTiny + null + default", -// ColInfo: timodel.ColumnInfo{ -// OriginDefaultValue: -1314, -// FieldType: *ftTinyIntNull, -// }, -// Res: int64(-1314), -// Default: int64(-1314), -// }, -// // mysql.TypeTiny + null + nodefault -// { -// Name: "mysql.TypeTiny + null + nodefault", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTinyIntNull}, -// Res: nil, -// Default: nil, -// }, -// // mysql.TypeShort, others testCases same as tiny -// { -// Name: "mysql.TypeShort, others testCases same as tiny", -// ColInfo: timodel.ColumnInfo{FieldType: *ftShortNotNull}, -// Res: int64(0), -// Default: nil, -// }, -// // mysql.TypeLong, others testCases same as tiny -// { -// Name: "mysql.TypeLong, others testCases same as tiny", -// ColInfo: timodel.ColumnInfo{FieldType: *ftLongNotNull}, -// Res: int64(0), -// Default: nil, -// }, -// // mysql.TypeLonglong, others testCases same as tiny -// { -// Name: "mysql.TypeLonglong, others testCases same as tiny", -// ColInfo: timodel.ColumnInfo{FieldType: *ftLongLongNotNull}, -// Res: int64(0), -// Default: nil, -// }, -// // mysql.TypeInt24, others testCases same as tiny -// { -// Name: "mysql.TypeInt24, others testCases same as tiny", -// ColInfo: timodel.ColumnInfo{FieldType: *ftInt24NotNull}, -// Res: int64(0), -// Default: nil, -// }, -// // mysql.TypeFloat + notnull + nodefault -// { -// Name: "mysql.TypeFloat + notnull + nodefault", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeFloatNotNull}, -// Res: float32(0), -// Default: nil, -// }, -// // mysql.TypeFloat + notnull + default -// { -// Name: "mysql.TypeFloat + notnull + default", -// ColInfo: timodel.ColumnInfo{ -// OriginDefaultValue: float32(-3.1415), -// FieldType: *ftTypeFloatNotNull, -// }, -// Res: float32(-3.1415), -// Default: float32(-3.1415), -// }, -// // mysql.TypeFloat + notnull + default + unsigned -// { -// Name: "mysql.TypeFloat + notnull + default + unsigned", -// ColInfo: timodel.ColumnInfo{ -// OriginDefaultValue: float32(3.1415), -// FieldType: *ftTypeFloatNotNullUnSigned, -// }, -// Res: float32(3.1415), -// Default: float32(3.1415), -// }, -// // mysql.TypeFloat + notnull + unsigned -// { -// Name: "mysql.TypeFloat + notnull + unsigned", -// ColInfo: timodel.ColumnInfo{ -// FieldType: *ftTypeFloatNotNullUnSigned, -// }, -// Res: float32(0), -// Default: nil, -// }, -// // mysql.TypeFloat + null + default -// { -// Name: "mysql.TypeFloat + null + default", -// ColInfo: timodel.ColumnInfo{ -// OriginDefaultValue: float32(-3.1415), -// FieldType: *ftTypeFloatNull, -// }, -// Res: float32(-3.1415), -// Default: float32(-3.1415), -// }, -// // mysql.TypeFloat + null + nodefault -// { -// Name: "mysql.TypeFloat + null + nodefault", -// ColInfo: timodel.ColumnInfo{ -// FieldType: *ftTypeFloatNull, -// }, -// Res: nil, -// Default: nil, -// }, -// // mysql.TypeDouble, other testCases same as float -// { -// Name: "mysql.TypeDouble, other testCases same as float", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDoubleNotNull}, -// Res: float64(0), -// Default: nil, -// }, -// // mysql.TypeNewDecimal + notnull + nodefault -// { -// Name: "mysql.TypeNewDecimal + notnull + nodefault", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNewDecimalNotNull}, -// Res: "0", // related with Flen and Decimal -// Default: nil, -// }, -// // mysql.TypeNewDecimal + null + nodefault -// { -// Name: "mysql.TypeNewDecimal + null + nodefault", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNewDecimalNull}, -// Res: nil, -// Default: nil, -// }, -// // mysql.TypeNewDecimal + null + default -// { -// Name: "mysql.TypeNewDecimal + null + default", -// ColInfo: timodel.ColumnInfo{ -// OriginDefaultValue: "-3.14", // no float -// FieldType: *ftTypeNewDecimalNotNull, -// }, -// Res: "-3.14", -// Default: "-3.14", -// }, -// // mysql.TypeNull -// { -// Name: "mysql.TypeNull", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNull}, -// Res: nil, -// Default: nil, -// }, -// // mysql.TypeTimestamp + notnull + nodefault -// { -// Name: "mysql.TypeTimestamp + notnull + nodefault", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeTimestampNotNull}, -// Res: "0000-00-00 00:00:00", -// Default: nil, -// }, -// // mysql.TypeTimestamp + notnull + default -// { -// Name: "mysql.TypeTimestamp + notnull + default", -// ColInfo: timodel.ColumnInfo{ -// OriginDefaultValue: "2020-11-19 12:12:12", -// FieldType: *ftTypeTimestampNotNull, -// }, -// Res: "2020-11-19 12:12:12", -// Default: "2020-11-19 12:12:12", -// }, -// // mysql.TypeTimestamp + null + default -// { -// Name: "mysql.TypeTimestamp + null + default", -// ColInfo: timodel.ColumnInfo{ -// OriginDefaultValue: "2020-11-19 12:12:12", -// FieldType: *ftTypeTimestampNull, -// }, -// Res: "2020-11-19 12:12:12", -// Default: "2020-11-19 12:12:12", -// }, -// // mysql.TypeDate, other testCases same as TypeTimestamp -// { -// Name: "mysql.TypeDate, other testCases same as TypeTimestamp", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDateNotNull}, -// Res: "0000-00-00", -// Default: nil, -// }, -// // mysql.TypeDuration, other testCases same as TypeTimestamp -// { -// Name: "mysql.TypeDuration, other testCases same as TypeTimestamp", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDurationNotNull}, -// Res: "00:00:00", -// Default: nil, -// }, -// // mysql.TypeDatetime, other testCases same as TypeTimestamp -// { -// Name: "mysql.TypeDatetime, other testCases same as TypeTimestamp", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDatetimeNotNull}, -// Res: "0000-00-00 00:00:00", -// Default: nil, -// }, -// // mysql.TypeYear + notnull + nodefault -// { -// Name: "mysql.TypeYear + notnull + nodefault", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeYearNotNull}, -// Res: int64(0), -// Default: nil, -// }, -// // mysql.TypeYear + notnull + default -// { -// Name: "mysql.TypeYear + notnull + default", -// ColInfo: timodel.ColumnInfo{ -// OriginDefaultValue: "2021", -// FieldType: *ftTypeYearNotNull, -// }, -// // TypeYear default value will be a string and then translate to []byte -// Res: "2021", -// Default: "2021", -// }, -// // mysql.TypeNewDate -// { -// Name: "mysql.TypeNewDate", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNewDateNotNull}, -// Res: nil, // [TODO] seems not support by TiDB, need check -// Default: nil, -// }, -// // mysql.TypeVarchar + notnull + nodefault -// { -// Name: "mysql.TypeVarchar + notnull + nodefault", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeVarcharNotNull}, -// Res: []byte{}, -// Default: nil, -// }, -// // mysql.TypeVarchar + notnull + default -// { -// Name: "mysql.TypeVarchar + notnull + default", -// ColInfo: timodel.ColumnInfo{ -// OriginDefaultValue: "e0", -// FieldType: *ftTypeVarcharNotNull, -// }, -// // TypeVarchar default value will be a string and then translate to []byte -// Res: "e0", -// Default: "e0", -// }, -// // mysql.TypeTinyBlob -// { -// Name: "mysql.TypeTinyBlob", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeTinyBlobNotNull}, -// Res: []byte{}, -// Default: nil, -// }, -// // mysql.TypeMediumBlob -// { -// Name: "mysql.TypeMediumBlob", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeMediumBlobNotNull}, -// Res: []byte{}, -// Default: nil, -// }, -// // mysql.TypeLongBlob -// { -// Name: "mysql.TypeLongBlob", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeLongBlobNotNull}, -// Res: []byte{}, -// Default: nil, -// }, -// // mysql.TypeBlob -// { -// Name: "mysql.TypeBlob", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeBlobNotNull}, -// Res: []byte{}, -// Default: nil, -// }, -// // mysql.TypeVarString -// { -// Name: "mysql.TypeVarString", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeVarStringNotNull}, -// Res: []byte{}, -// Default: nil, -// }, -// // mysql.TypeString -// { -// Name: "mysql.TypeString", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeStringNotNull}, -// Res: []byte{}, -// Default: nil, -// }, -// // mysql.TypeBit -// { -// Name: "mysql.TypeBit", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeBitNotNull}, -// Res: uint64(0), -// Default: nil, -// }, -// // BLOB, TEXT, GEOMETRY or JSON column can't have a default value -// // mysql.TypeJSON -// { -// Name: "mysql.TypeJSON", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeJSONNotNull}, -// Res: "null", -// Default: nil, -// }, -// // mysql.TypeEnum + notnull + nodefault -// { -// Name: "mysql.TypeEnum + notnull + nodefault", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeEnumNotNull}, -// // TypeEnum value will be a string and then translate to []byte -// // NotNull && no default will choose first element -// Res: uint64(1), -// Default: nil, -// }, -// // mysql.TypeEnum + notnull + default -// { -// Name: "mysql.TypeEnum + notnull + default", -// ColInfo: timodel.ColumnInfo{ -// OriginDefaultValue: "e1", -// FieldType: *ftTypeEnumNotNull, -// }, -// // TypeEnum default value will be a string and then translate to []byte -// Res: "e1", -// Default: "e1", -// }, -// // mysql.TypeEnum + null -// { -// Name: "mysql.TypeEnum + null", -// ColInfo: timodel.ColumnInfo{ -// FieldType: *ftTypeEnumNull, -// }, -// Res: nil, -// }, -// // mysql.TypeSet + notnull -// { -// Name: "mysql.TypeSet + notnull", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeSetNotNull}, -// Res: uint64(0), -// Default: nil, -// }, -// // mysql.TypeSet + notnull + default -// { -// Name: "mysql.TypeSet + notnull + default", -// ColInfo: timodel.ColumnInfo{ -// OriginDefaultValue: "1,e", -// FieldType: *ftTypeSetNotNull, -// }, -// // TypeSet default value will be a string and then translate to []byte -// Res: "1,e", -// Default: "1,e", -// }, -// // mysql.TypeGeometry -// { -// Name: "mysql.TypeGeometry", -// ColInfo: timodel.ColumnInfo{FieldType: *ftTypeGeometryNotNull}, -// Res: nil, // not support yet -// Default: nil, -// }, -// } -// -// for _, tc := range testCases { -// _, val, _, _, _ := getDefaultOrZeroValue(&tc.ColInfo) -// require.Equal(t, tc.Res, val, tc.Name) -// val = GetDDLDefaultDefinition(&tc.ColInfo) -// require.Equal(t, tc.Default, val, tc.Name) -// } -//} -// -//func TestE2ERowLevelChecksum(t *testing.T) { -// // changefeed enable checksum functionality -// replicaConfig := config.GetDefaultReplicaConfig() -// replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness -// -// tk := NewTestKit(t, replicaConfig) -// defer tk.Close() -// -// // upstream TiDB enable checksum functionality -// tk.MustExec("set global tidb_enable_row_level_checksum = 1") -// tk.MustExec("use test") -// -// filter, err := filter.NewFilter(replicaConfig, "") -// require.NoError(t, err) -// -// ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) -// require.NoError(t, err) -// -// changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") -// schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), -// ver.Ver, false, changefeed, util.RoleTester, filter) -// require.NoError(t, err) -// require.NotNil(t, schemaStorage) -// -// createTableSQL := `create table t ( -// id int primary key auto_increment, -// -// c_tinyint tinyint null, -// c_smallint smallint null, -// c_mediumint mediumint null, -// c_int int null, -// c_bigint bigint null, -// -// c_unsigned_tinyint tinyint unsigned null, -// c_unsigned_smallint smallint unsigned null, -// c_unsigned_mediumint mediumint unsigned null, -// c_unsigned_int int unsigned null, -// c_unsigned_bigint bigint unsigned null, -// -// c_float float null, -// c_double double null, -// c_decimal decimal null, -// c_decimal_2 decimal(10, 4) null, -// -// c_unsigned_float float unsigned null, -// c_unsigned_double double unsigned null, -// c_unsigned_decimal decimal unsigned null, -// c_unsigned_decimal_2 decimal(10, 4) unsigned null, -// -// c_date date null, -// c_datetime datetime null, -// c_timestamp timestamp null, -// c_time time null, -// c_year year null, -// -// c_tinytext tinytext null, -// c_text text null, -// c_mediumtext mediumtext null, -// c_longtext longtext null, -// -// c_tinyblob tinyblob null, -// c_blob blob null, -// c_mediumblob mediumblob null, -// c_longblob longblob null, -// -// c_char char(16) null, -// c_varchar varchar(16) null, -// c_binary binary(16) null, -// c_varbinary varbinary(16) null, -// -// c_enum enum ('a','b','c') null, -// c_set set ('a','b','c') null, -// c_bit bit(64) null, -// c_json json null, -// -//-- gbk dmls -// name varchar(128) CHARACTER SET gbk, -// country char(32) CHARACTER SET gbk, -// city varchar(64), -// description text CHARACTER SET gbk, -// image tinyblob -//);` -// job := tk.DDL2Job(createTableSQL) -// err = schemaStorage.HandleDDLJob(job) -// require.NoError(t, err) -// -// ts := schemaStorage.GetLastSnapshot().CurrentTs() -// schemaStorage.AdvanceResolvedTs(ver.Ver) -// -// mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, replicaConfig.Integrity).(*mounter) -// -// ctx, cancel := context.WithCancel(context.Background()) -// defer cancel() -// -// tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName("test", "t") -// require.True(t, ok) -// -// tk.Session().GetSessionVars().EnableRowLevelChecksum = true -// -// insertDataSQL := `insert into t values ( -// 2, -// 1, 2, 3, 4, 5, -// 1, 2, 3, 4, 5, -// 2020.0202, 2020.0303, 2020.0404, 2021.1208, -// 3.1415, 2.7182, 8000, 179394.233, -// '2020-02-20', '2020-02-20 02:20:20', '2020-02-20 02:20:20', '02:20:20', '2020', -// '89504E470D0A1A0A', '89504E470D0A1A0A', '89504E470D0A1A0A', '89504E470D0A1A0A', -// x'89504E470D0A1A0A', x'89504E470D0A1A0A', x'89504E470D0A1A0A', x'89504E470D0A1A0A', -// '89504E470D0A1A0A', '89504E470D0A1A0A', x'89504E470D0A1A0A', x'89504E470D0A1A0A', -// 'b', 'b,c', b'1000001', '{ -//"key1": "value1", -//"key2": "value2", -//"key3": "123" -//}', -// '测试', "中国", "上海", "你好,世界", 0xC4E3BAC3CAC0BDE7 -//);` -// tk.MustExec(insertDataSQL) -// -// key, value := getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) -// rawKV := &model.RawKVEntry{ -// OpType: model.OpTypePut, -// Key: key, -// Value: value, -// StartTs: ts - 1, -// CRTs: ts + 1, -// } -// row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) -// require.NoError(t, err) -// require.NotNil(t, row) -// require.NotNil(t, row.Checksum) -// -// expected, ok := mounter.decoder.GetChecksum() -// require.True(t, ok) -// require.Equal(t, expected, row.Checksum.Current) -// require.False(t, row.Checksum.Corrupted) -// -// // avro encoder enable checksum functionality. -// codecConfig := codecCommon.NewConfig(config.ProtocolAvro) -// codecConfig.EnableTiDBExtension = true -// codecConfig.EnableRowChecksum = true -// codecConfig.AvroDecimalHandlingMode = "string" -// codecConfig.AvroBigintUnsignedHandlingMode = "string" -// -// avroEncoder, err := avro.SetupEncoderAndSchemaRegistry4Testing(ctx, codecConfig) -// defer avro.TeardownEncoderAndSchemaRegistry4Testing() -// require.NoError(t, err) -// -// topic := "test.t" -// -// err = avroEncoder.AppendRowChangedEvent(ctx, topic, row, func() {}) -// require.NoError(t, err) -// msg := avroEncoder.Build() -// require.Len(t, msg, 1) -// -// schemaM, err := avro.NewConfluentSchemaManager( -// ctx, "http://127.0.0.1:8081", nil) -// require.NoError(t, err) -// -// // decoder enable checksum functionality. -// decoder := avro.NewDecoder(codecConfig, schemaM, topic, time.Local) -// err = decoder.AddKeyValue(msg[0].Key, msg[0].Value) -// require.NoError(t, err) -// -// messageType, hasNext, err := decoder.HasNext() -// require.NoError(t, err) -// require.True(t, hasNext) -// require.Equal(t, model.MessageTypeRow, messageType) -// -// row, err = decoder.NextRowChangedEvent() -// // no error, checksum verification passed. -// require.NoError(t, err) -//} -// -//func TestDecodeRowEnableChecksum(t *testing.T) { -// replicaConfig := config.GetDefaultReplicaConfig() -// replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness -// -// tk := NewTestKit(t, replicaConfig) -// defer tk.Close() -// -// tk.MustExec("set global tidb_enable_row_level_checksum = 1") -// tk.MustExec("use test") -// -// filter, err := filter.NewFilter(replicaConfig, "") -// require.NoError(t, err) -// -// ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) -// require.NoError(t, err) -// -// changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") -// schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), -// ver.Ver, false, changefeed, util.RoleTester, filter) -// require.NoError(t, err) -// require.NotNil(t, schemaStorage) -// -// createTableDDL := "create table t (id int primary key, a int)" -// job := tk.DDL2Job(createTableDDL) -// err = schemaStorage.HandleDDLJob(job) -// require.NoError(t, err) -// -// ts := schemaStorage.GetLastSnapshot().CurrentTs() -// schemaStorage.AdvanceResolvedTs(ver.Ver) -// -// mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, replicaConfig.Integrity).(*mounter) -// -// ctx := context.Background() -// -// tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName("test", "t") -// require.True(t, ok) -// -// // row without checksum -// tk.Session().GetSessionVars().EnableRowLevelChecksum = false -// tk.MustExec("insert into t values (1, 10)") -// -// key, value := getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) -// rawKV := &model.RawKVEntry{ -// OpType: model.OpTypePut, -// Key: key, -// Value: value, -// StartTs: ts - 1, -// CRTs: ts + 1, -// } -// -// row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) -// require.NoError(t, err) -// require.NotNil(t, row) -// // the upstream tidb does not enable checksum, so the checksum is nil -// require.Nil(t, row.Checksum) -// -// // row with one checksum -// tk.Session().GetSessionVars().EnableRowLevelChecksum = true -// tk.MustExec("insert into t values (2, 20)") -// -// key, value = getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) -// rawKV = &model.RawKVEntry{ -// OpType: model.OpTypePut, -// Key: key, -// Value: value, -// StartTs: ts - 1, -// CRTs: ts + 1, -// } -// row, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) -// require.NoError(t, err) -// require.NotNil(t, row) -// require.NotNil(t, row.Checksum) -// -// expected, ok := mounter.decoder.GetChecksum() -// require.True(t, ok) -// require.Equal(t, expected, row.Checksum.Current) -// require.False(t, row.Checksum.Corrupted) -// -// // row with 2 checksum -// tk.MustExec("insert into t values (3, 30)") -// job = tk.DDL2Job("alter table t change column a a varchar(10)") -// err = schemaStorage.HandleDDLJob(job) -// require.NoError(t, err) -// -// key, value = getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) -// rawKV = &model.RawKVEntry{ -// OpType: model.OpTypePut, -// Key: key, -// Value: value, -// StartTs: ts - 1, -// CRTs: ts + 1, -// } -// row, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) -// require.NoError(t, err) -// require.NotNil(t, row) -// require.NotNil(t, row.Checksum) -// -// first, ok := mounter.decoder.GetChecksum() -// require.True(t, ok) -// -// extra, ok := mounter.decoder.GetExtraChecksum() -// require.True(t, ok) -// -// if row.Checksum.Current != first { -// require.Equal(t, extra, row.Checksum.Current) -// } else { -// require.Equal(t, first, row.Checksum.Current) -// } -// require.False(t, row.Checksum.Corrupted) -// -// // hack the table info to make the checksum corrupted -// tableInfo.Columns[0].FieldType = *types.NewFieldType(mysql.TypeVarchar) -// -// // corrupt-handle-level default to warn, so no error, but the checksum is corrupted -// row, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) -// require.NoError(t, err) -// require.NotNil(t, row.Checksum) -// require.True(t, row.Checksum.Corrupted) -// -// mounter.integrity.CorruptionHandleLevel = integrity.CorruptionHandleLevelError -// _, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) -// require.Error(t, err) -// require.ErrorIs(t, err, cerror.ErrCorruptedDataMutation) -// -// job = tk.DDL2Job("drop table t") -// err = schemaStorage.HandleDDLJob(job) -// require.NoError(t, err) -//} -// -//func TestDecodeRow(t *testing.T) { -// replicaConfig := config.GetDefaultReplicaConfig() -// -// tk := NewTestKit(t, replicaConfig) -// defer tk.Close() -// -// tk.MustExec("set @@tidb_enable_clustered_index=1;") -// tk.MustExec("use test;") -// -// changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") -// -// ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) -// require.NoError(t, err) -// -// filter, err := filter.NewFilter(replicaConfig, "") -// require.NoError(t, err) -// -// schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), -// ver.Ver, false, changefeed, util.RoleTester, filter) -// require.NoError(t, err) -// -// // apply ddl to schemaStorage -// ddl := "create table test.student(id int primary key, name char(50), age int, gender char(10))" -// job := tk.DDL2Job(ddl) -// err = schemaStorage.HandleDDLJob(job) -// require.NoError(t, err) -// -// ts := schemaStorage.GetLastSnapshot().CurrentTs() -// -// schemaStorage.AdvanceResolvedTs(ver.Ver) -// -// mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, replicaConfig.Integrity).(*mounter) -// -// tk.MustExec(`insert into student values(1, "dongmen", 20, "male")`) -// tk.MustExec(`update student set age = 27 where id = 1`) -// -// ctx := context.Background() -// decodeAndCheckRowInTable := func(tableID int64, f func(key []byte, value []byte) *model.RawKVEntry) { -// walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) { -// rawKV := f(key, value) -// -// row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) -// require.NoError(t, err) -// require.NotNil(t, row) -// -// if row.Columns != nil { -// require.NotNil(t, mounter.decoder) -// } -// -// if row.PreColumns != nil { -// require.NotNil(t, mounter.preDecoder) -// } -// }) -// } -// -// toRawKV := func(key []byte, value []byte) *model.RawKVEntry { -// return &model.RawKVEntry{ -// OpType: model.OpTypePut, -// Key: key, -// Value: value, -// StartTs: ts - 1, -// CRTs: ts + 1, -// } -// } -// -// tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName("test", "student") -// require.True(t, ok) -// -// decodeAndCheckRowInTable(tableInfo.ID, toRawKV) -// decodeAndCheckRowInTable(tableInfo.ID, toRawKV) -// -// job = tk.DDL2Job("drop table student") -// err = schemaStorage.HandleDDLJob(job) -// require.NoError(t, err) -//} -// -//// TestDecodeEventIgnoreRow tests a PolymorphicEvent.Row is nil -//// if this event should be filter out by filter. -//func TestDecodeEventIgnoreRow(t *testing.T) { -// replicaConfig := config.GetDefaultReplicaConfig() -// replicaConfig.Filter.Rules = []string{"test.student", "test.computer"} -// -// tk := NewTestKit(t, replicaConfig) -// defer tk.Close() -// tk.MustExec("use test;") -// -// ddls := []string{ -// "create table test.student(id int primary key, name char(50), age int, gender char(10))", -// "create table test.computer(id int primary key, brand char(50), price int)", -// "create table test.poet(id int primary key, name char(50), works char(100))", -// } -// -// cfID := model.DefaultChangeFeedID("changefeed-test-ignore-event") -// -// f, err := filter.NewFilter(replicaConfig, "") -// require.Nil(t, err) -// ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) -// require.Nil(t, err) -// -// schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), -// ver.Ver, false, cfID, util.RoleTester, f) -// require.Nil(t, err) -// // apply ddl to schemaStorage -// for _, ddl := range ddls { -// job := tk.DDL2Job(ddl) -// err = schemaStorage.HandleDDLJob(job) -// require.Nil(t, err) -// } -// -// ts := schemaStorage.GetLastSnapshot().CurrentTs() -// schemaStorage.AdvanceResolvedTs(ver.Ver) -// mounter := NewMounter(schemaStorage, cfID, time.Local, f, replicaConfig.Integrity).(*mounter) -// -// type testCase struct { -// schema string -// table string -// columns []interface{} -// ignored bool -// } -// -// testCases := []testCase{ -// { -// schema: "test", -// table: "student", -// columns: []interface{}{1, "dongmen", 20, "male"}, -// ignored: false, -// }, -// { -// schema: "test", -// table: "computer", -// columns: []interface{}{1, "apple", 19999}, -// ignored: false, -// }, -// // This case should be ignored by its table name. -// { -// schema: "test", -// table: "poet", -// columns: []interface{}{1, "李白", "静夜思"}, -// ignored: true, -// }, -// } -// -// ignoredTables := make([]string, 0) -// tables := make([]string, 0) -// for _, tc := range testCases { -// tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName(tc.schema, tc.table) -// require.True(t, ok) -// // TODO: add other dml event type -// insertSQL := prepareInsertSQL(t, tableInfo, len(tc.columns)) -// if tc.ignored { -// ignoredTables = append(ignoredTables, tc.table) -// } else { -// tables = append(tables, tc.table) -// } -// tk.MustExec(insertSQL, tc.columns...) -// } -// ctx := context.Background() -// -// decodeAndCheckRowInTable := func(tableID int64, f func(key []byte, value []byte) *model.RawKVEntry) int { -// var rows int -// walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) { -// rawKV := f(key, value) -// pEvent := model.NewPolymorphicEvent(rawKV) -// err := mounter.DecodeEvent(ctx, pEvent) -// require.Nil(t, err) -// if pEvent.Row == nil { -// return -// } -// row := pEvent.Row -// rows++ -// require.Equal(t, row.Table.Schema, "test") -// // Now we only allow filter dml event by table, so we only check row's table. -// require.NotContains(t, ignoredTables, row.Table.Table) -// require.Contains(t, tables, row.Table.Table) -// }) -// return rows -// } -// -// toRawKV := func(key []byte, value []byte) *model.RawKVEntry { -// return &model.RawKVEntry{ -// OpType: model.OpTypePut, -// Key: key, -// Value: value, -// StartTs: ts - 1, -// CRTs: ts + 1, -// } -// } -// -// for _, tc := range testCases { -// tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName(tc.schema, tc.table) -// require.True(t, ok) -// decodeAndCheckRowInTable(tableInfo.ID, toRawKV) -// } -//} -// -//func TestBuildTableInfo(t *testing.T) { -// cases := []struct { -// origin string -// recovered string -// recoveredWithNilCol string -// }{ -// { -// "CREATE TABLE t1 (c INT PRIMARY KEY)", -// "CREATE TABLE `BuildTiDBTableInfo` (\n" + -// " `c` int(0) NOT NULL,\n" + -// " PRIMARY KEY (`c`(0)) /*T![clustered_index] CLUSTERED */\n" + -// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", -// "CREATE TABLE `BuildTiDBTableInfo` (\n" + -// " `c` int(0) NOT NULL,\n" + -// " PRIMARY KEY (`c`(0)) /*T![clustered_index] CLUSTERED */\n" + -// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", -// }, -// { -// "CREATE TABLE t1 (" + -// " c INT UNSIGNED," + -// " c2 VARCHAR(10) NOT NULL," + -// " c3 BIT(10) NOT NULL," + -// " UNIQUE KEY (c2, c3)" + -// ")", -// // CDC discards field length. -// "CREATE TABLE `BuildTiDBTableInfo` (\n" + -// " `c` int(0) unsigned DEFAULT NULL,\n" + -// " `c2` varchar(0) NOT NULL,\n" + -// " `c3` bit(0) NOT NULL,\n" + -// " UNIQUE KEY `idx_0` (`c2`(0),`c3`(0))\n" + -// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", -// "CREATE TABLE `BuildTiDBTableInfo` (\n" + -// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + -// " `c2` varchar(0) NOT NULL,\n" + -// " `c3` bit(0) NOT NULL,\n" + -// " UNIQUE KEY `idx_0` (`c2`(0),`c3`(0))\n" + -// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", -// }, -// { -// "CREATE TABLE t1 (" + -// " c INT UNSIGNED," + -// " gen INT AS (c+1) VIRTUAL," + -// " c2 VARCHAR(10) NOT NULL," + -// " gen2 INT AS (c+2) STORED," + -// " c3 BIT(10) NOT NULL," + -// " PRIMARY KEY (c, c2)" + -// ")", -// // CDC discards virtual generated column, and generating expression of stored generated column. -// "CREATE TABLE `BuildTiDBTableInfo` (\n" + -// " `c` int(0) unsigned NOT NULL,\n" + -// " `c2` varchar(0) NOT NULL,\n" + -// " `gen2` int(0) GENERATED ALWAYS AS (pass_generated_check) STORED,\n" + -// " `c3` bit(0) NOT NULL,\n" + -// " PRIMARY KEY (`c`(0),`c2`(0)) /*T![clustered_index] CLUSTERED */\n" + -// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", -// "CREATE TABLE `BuildTiDBTableInfo` (\n" + -// " `c` int(0) unsigned NOT NULL,\n" + -// " `c2` varchar(0) NOT NULL,\n" + -// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + -// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + -// " PRIMARY KEY (`c`(0),`c2`(0)) /*T![clustered_index] CLUSTERED */\n" + -// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", -// }, -// { -// "CREATE TABLE `t1` (" + -// " `a` int(11) NOT NULL," + -// " `b` int(11) DEFAULT NULL," + -// " `c` int(11) DEFAULT NULL," + -// " PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */," + -// " UNIQUE KEY `b` (`b`)" + -// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", -// "CREATE TABLE `BuildTiDBTableInfo` (\n" + -// " `a` int(0) NOT NULL,\n" + -// " `b` int(0) DEFAULT NULL,\n" + -// " `c` int(0) DEFAULT NULL,\n" + -// " PRIMARY KEY (`a`(0)) /*T![clustered_index] CLUSTERED */,\n" + -// " UNIQUE KEY `idx_1` (`b`(0))\n" + -// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", -// "CREATE TABLE `BuildTiDBTableInfo` (\n" + -// " `a` int(0) NOT NULL,\n" + -// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + -// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + -// " PRIMARY KEY (`a`(0)) /*T![clustered_index] CLUSTERED */\n" + -// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", -// }, -// { // This case is to check the primary key is correctly identified by BuildTiDBTableInfo -// "CREATE TABLE your_table (" + -// " id INT NOT NULL," + -// " name VARCHAR(50) NOT NULL," + -// " email VARCHAR(100) NOT NULL," + -// " age INT NOT NULL ," + -// " address VARCHAR(200) NOT NULL," + -// " PRIMARY KEY (id, name)," + -// " UNIQUE INDEX idx_unique_1 (id, email, age)," + -// " UNIQUE INDEX idx_unique_2 (name, email, address)" + -// " );", -// "CREATE TABLE `BuildTiDBTableInfo` (\n" + -// " `id` int(0) NOT NULL,\n" + -// " `name` varchar(0) NOT NULL,\n" + -// " `email` varchar(0) NOT NULL,\n" + -// " `age` int(0) NOT NULL,\n" + -// " `address` varchar(0) NOT NULL,\n" + -// " PRIMARY KEY (`id`(0),`name`(0)) /*T![clustered_index] CLUSTERED */,\n" + -// " UNIQUE KEY `idx_1` (`id`(0),`email`(0),`age`(0)),\n" + -// " UNIQUE KEY `idx_2` (`name`(0),`email`(0),`address`(0))\n" + -// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", -// "CREATE TABLE `BuildTiDBTableInfo` (\n" + -// " `id` int(0) NOT NULL,\n" + -// " `name` varchar(0) NOT NULL,\n" + -// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + -// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + -// " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + -// " PRIMARY KEY (`id`(0),`name`(0)) /*T![clustered_index] CLUSTERED */,\n" + -// " UNIQUE KEY `idx_1` (`id`(0),`omitted`(0),`omitted`(0)),\n" + -// " UNIQUE KEY `idx_2` (`name`(0),`omitted`(0),`omitted`(0))\n" + -// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", -// }, -// } -// p := parser.New() -// for i, c := range cases { -// stmt, err := p.ParseOneStmt(c.origin, "", "") -// require.NoError(t, err) -// originTI, err := ddl.BuildTableInfoFromAST(stmt.(*ast.CreateTableStmt)) -// require.NoError(t, err) -// cdcTableInfo := model.WrapTableInfo(0, "test", 0, originTI) -// cols, _, _, _, err := datum2Column(cdcTableInfo, map[int64]types.Datum{}) -// require.NoError(t, err) -// recoveredTI := model.BuildTiDBTableInfo(cols, cdcTableInfo.IndexColumnsOffset) -// handle := sqlmodel.GetWhereHandle(recoveredTI, recoveredTI) -// require.NotNil(t, handle.UniqueNotNullIdx) -// require.Equal(t, c.recovered, showCreateTable(t, recoveredTI)) -// // make sure BuildTiDBTableInfo indentify the correct primary key -// if i == 5 { -// inexes := recoveredTI.Indices -// primaryCount := 0 -// for i := range inexes { -// if inexes[i].Primary { -// primaryCount++ -// } -// } -// require.Equal(t, 1, primaryCount) -// require.Equal(t, 2, len(handle.UniqueNotNullIdx.Columns)) -// } -// // mimic the columns are set to nil when old value feature is disabled -// for i := range cols { -// if !cols[i].Flag.IsHandleKey() { -// cols[i] = nil -// } -// } -// recoveredTI = model.BuildTiDBTableInfo(cols, cdcTableInfo.IndexColumnsOffset) -// handle = sqlmodel.GetWhereHandle(recoveredTI, recoveredTI) -// require.NotNil(t, handle.UniqueNotNullIdx) -// require.Equal(t, c.recoveredWithNilCol, showCreateTable(t, recoveredTI)) -// } -//} -// -//var tiCtx = mock.NewContext() -// -//func showCreateTable(t *testing.T, ti *timodel.TableInfo) string { -// result := bytes.NewBuffer(make([]byte, 0, 512)) -// err := executor.ConstructResultOfShowCreateTable(tiCtx, ti, autoid.Allocators{}, result) -// require.NoError(t, err) -// return result.String() -//} -// -//func TestNewDMRowChange(t *testing.T) { -// cases := []struct { -// origin string -// recovered string -// }{ -// { -// "CREATE TABLE t1 (id INT," + -// " a1 INT NOT NULL," + -// " a3 INT NOT NULL," + -// " UNIQUE KEY dex1(a1, a3));", -// "CREATE TABLE `BuildTiDBTableInfo` (\n" + -// " `id` int(0) DEFAULT NULL,\n" + -// " `a1` int(0) NOT NULL,\n" + -// " `a3` int(0) NOT NULL,\n" + -// " UNIQUE KEY `idx_0` (`a1`(0),`a3`(0))\n" + -// ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", -// }, -// } -// p := parser.New() -// for _, c := range cases { -// stmt, err := p.ParseOneStmt(c.origin, "", "") -// require.NoError(t, err) -// originTI, err := ddl.BuildTableInfoFromAST(stmt.(*ast.CreateTableStmt)) -// require.NoError(t, err) -// cdcTableInfo := model.WrapTableInfo(0, "test", 0, originTI) -// cols := []*model.Column{ -// { -// Name: "id", Type: 3, Charset: "binary", Flag: 65, Value: 1, Default: nil, -// }, -// { -// Name: "a1", Type: 3, Charset: "binary", Flag: 51, Value: 1, Default: nil, -// }, -// { -// Name: "a3", Type: 3, Charset: "binary", Flag: 51, Value: 2, Default: nil, -// }, -// } -// recoveredTI := model.BuildTiDBTableInfo(cols, cdcTableInfo.IndexColumnsOffset) -// require.Equal(t, c.recovered, showCreateTable(t, recoveredTI)) -// tableName := &model.TableName{Schema: "db", Table: "t1"} -// rowChange := sqlmodel.NewRowChange(tableName, nil, []interface{}{1, 1, 2}, nil, recoveredTI, nil, nil) -// sqlGot, argsGot := rowChange.GenSQL(sqlmodel.DMLDelete) -// require.Equal(t, "DELETE FROM `db`.`t1` WHERE `a1` = ? AND `a3` = ? LIMIT 1", sqlGot) -// require.Equal(t, []interface{}{1, 2}, argsGot) -// -// sqlGot, argsGot = sqlmodel.GenDeleteSQL(rowChange, rowChange) -// require.Equal(t, "DELETE FROM `db`.`t1` WHERE (`a1` = ? AND `a3` = ?) OR (`a1` = ? AND `a3` = ?)", sqlGot) -// require.Equal(t, []interface{}{1, 2, 1, 2}, argsGot) -// } -//} -// -//func TestFormatColVal(t *testing.T) { -// ftTypeFloatNotNull := types.NewFieldType(mysql.TypeFloat) -// ftTypeFloatNotNull.SetFlag(mysql.NotNullFlag) -// col := &timodel.ColumnInfo{FieldType: *ftTypeFloatNotNull} -// -// var datum types.Datum -// -// datum.SetFloat32(123.99) -// value, _, _, err := formatColVal(datum, col) -// require.NoError(t, err) -// require.EqualValues(t, float32(123.99), value) -// -// datum.SetFloat32(float32(math.NaN())) -// value, _, warn, err := formatColVal(datum, col) -// require.NoError(t, err) -// require.Equal(t, float32(0), value) -// require.NotZero(t, warn) -// -// datum.SetFloat32(float32(math.Inf(1))) -// value, _, warn, err = formatColVal(datum, col) -// require.NoError(t, err) -// require.Equal(t, float32(0), value) -// require.NotZero(t, warn) -// -// datum.SetFloat32(float32(math.Inf(-1))) -// value, _, warn, err = formatColVal(datum, col) -// require.NoError(t, err) -// require.Equal(t, float32(0), value) -// require.NotZero(t, warn) -//} +// We use OriginDefaultValue instead of DefaultValue in the ut, pls ref to +// https://github.com/pingcap/tiflow/issues/4048 +// FIXME: OriginDefaultValue seems always to be string, and test more corner case +// Ref: https://github.com/pingcap/tidb/blob/d2c352980a43bb593db81fd1db996f47af596d91/table/column.go#L489 +func TestGetDefaultZeroValue(t *testing.T) { + // Check following MySQL type, ref to: + // https://github.com/pingcap/tidb/blob/master/parser/mysql/type.go + + // mysql flag null + ftNull := types.NewFieldType(mysql.TypeUnspecified) + + // mysql.TypeTiny + notnull + ftTinyIntNotNull := types.NewFieldType(mysql.TypeTiny) + ftTinyIntNotNull.AddFlag(mysql.NotNullFlag) + + // mysql.TypeTiny + notnull + unsigned + ftTinyIntNotNullUnSigned := types.NewFieldType(mysql.TypeTiny) + ftTinyIntNotNullUnSigned.SetFlag(mysql.NotNullFlag) + ftTinyIntNotNullUnSigned.AddFlag(mysql.UnsignedFlag) + + // mysql.TypeTiny + null + ftTinyIntNull := types.NewFieldType(mysql.TypeTiny) + + // mysql.TypeShort + notnull + ftShortNotNull := types.NewFieldType(mysql.TypeShort) + ftShortNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeLong + notnull + ftLongNotNull := types.NewFieldType(mysql.TypeLong) + ftLongNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeLonglong + notnull + ftLongLongNotNull := types.NewFieldType(mysql.TypeLonglong) + ftLongLongNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeInt24 + notnull + ftInt24NotNull := types.NewFieldType(mysql.TypeInt24) + ftInt24NotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeFloat + notnull + ftTypeFloatNotNull := types.NewFieldType(mysql.TypeFloat) + ftTypeFloatNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeFloat + notnull + unsigned + ftTypeFloatNotNullUnSigned := types.NewFieldType(mysql.TypeFloat) + ftTypeFloatNotNullUnSigned.SetFlag(mysql.NotNullFlag | mysql.UnsignedFlag) + + // mysql.TypeFloat + null + ftTypeFloatNull := types.NewFieldType(mysql.TypeFloat) + + // mysql.TypeDouble + notnull + ftTypeDoubleNotNull := types.NewFieldType(mysql.TypeDouble) + ftTypeDoubleNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeNewDecimal + notnull + ftTypeNewDecimalNull := types.NewFieldType(mysql.TypeNewDecimal) + ftTypeNewDecimalNull.SetFlen(5) + ftTypeNewDecimalNull.SetDecimal(2) + + // mysql.TypeNewDecimal + notnull + ftTypeNewDecimalNotNull := types.NewFieldType(mysql.TypeNewDecimal) + ftTypeNewDecimalNotNull.SetFlag(mysql.NotNullFlag) + ftTypeNewDecimalNotNull.SetFlen(5) + ftTypeNewDecimalNotNull.SetDecimal(2) + + // mysql.TypeNull + ftTypeNull := types.NewFieldType(mysql.TypeNull) + + // mysql.TypeTimestamp + notnull + ftTypeTimestampNotNull := types.NewFieldType(mysql.TypeTimestamp) + ftTypeTimestampNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeTimestamp + notnull + ftTypeTimestampNull := types.NewFieldType(mysql.TypeTimestamp) + + // mysql.TypeDate + notnull + ftTypeDateNotNull := types.NewFieldType(mysql.TypeDate) + ftTypeDateNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeDuration + notnull + ftTypeDurationNotNull := types.NewFieldType(mysql.TypeDuration) + ftTypeDurationNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeDatetime + notnull + ftTypeDatetimeNotNull := types.NewFieldType(mysql.TypeDatetime) + ftTypeDatetimeNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeYear + notnull + ftTypeYearNotNull := types.NewFieldType(mysql.TypeYear) + ftTypeYearNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeNewDate + notnull + ftTypeNewDateNotNull := types.NewFieldType(mysql.TypeNewDate) + ftTypeNewDateNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeVarchar + notnull + ftTypeVarcharNotNull := types.NewFieldType(mysql.TypeVarchar) + ftTypeVarcharNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeTinyBlob + notnull + ftTypeTinyBlobNotNull := types.NewFieldType(mysql.TypeTinyBlob) + ftTypeTinyBlobNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeMediumBlob + notnull + ftTypeMediumBlobNotNull := types.NewFieldType(mysql.TypeMediumBlob) + ftTypeMediumBlobNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeLongBlob + notnull + ftTypeLongBlobNotNull := types.NewFieldType(mysql.TypeLongBlob) + ftTypeLongBlobNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeBlob + notnull + ftTypeBlobNotNull := types.NewFieldType(mysql.TypeBlob) + ftTypeBlobNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeVarString + notnull + ftTypeVarStringNotNull := types.NewFieldType(mysql.TypeVarString) + ftTypeVarStringNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeString + notnull + ftTypeStringNotNull := types.NewFieldType(mysql.TypeString) + ftTypeStringNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeBit + notnull + ftTypeBitNotNull := types.NewFieldType(mysql.TypeBit) + ftTypeBitNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeJSON + notnull + ftTypeJSONNotNull := types.NewFieldType(mysql.TypeJSON) + ftTypeJSONNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeEnum + notnull + nodefault + ftTypeEnumNotNull := types.NewFieldType(mysql.TypeEnum) + ftTypeEnumNotNull.SetFlag(mysql.NotNullFlag) + ftTypeEnumNotNull.SetElems([]string{"e0", "e1"}) + + // mysql.TypeEnum + null + ftTypeEnumNull := types.NewFieldType(mysql.TypeEnum) + + // mysql.TypeSet + notnull + ftTypeSetNotNull := types.NewFieldType(mysql.TypeSet) + ftTypeSetNotNull.SetFlag(mysql.NotNullFlag) + + // mysql.TypeGeometry + notnull + ftTypeGeometryNotNull := types.NewFieldType(mysql.TypeGeometry) + ftTypeGeometryNotNull.SetFlag(mysql.NotNullFlag) + + testCases := []struct { + Name string + ColInfo timodel.ColumnInfo + Res interface{} + Default interface{} + }{ + // mysql flag null + { + Name: "mysql flag null", + ColInfo: timodel.ColumnInfo{FieldType: *ftNull}, + Res: nil, + Default: nil, + }, + // mysql.TypeTiny + notnull + nodefault + { + Name: "mysql.TypeTiny + notnull + nodefault", + ColInfo: timodel.ColumnInfo{FieldType: *ftTinyIntNotNull.Clone()}, + Res: int64(0), + Default: nil, + }, + // mysql.TypeTiny + notnull + default + { + Name: "mysql.TypeTiny + notnull + default", + ColInfo: timodel.ColumnInfo{ + OriginDefaultValue: -1314, + FieldType: *ftTinyIntNotNull, + }, + Res: int64(-1314), + Default: int64(-1314), + }, + // mysql.TypeTiny + notnull + unsigned + { + Name: "mysql.TypeTiny + notnull + default + unsigned", + ColInfo: timodel.ColumnInfo{FieldType: *ftTinyIntNotNullUnSigned}, + Res: uint64(0), + Default: nil, + }, + // mysql.TypeTiny + notnull + default + unsigned + { + Name: "mysql.TypeTiny + notnull + unsigned", + ColInfo: timodel.ColumnInfo{OriginDefaultValue: uint64(1314), FieldType: *ftTinyIntNotNullUnSigned}, + Res: uint64(1314), + Default: uint64(1314), + }, + // mysql.TypeTiny + null + default + { + Name: "mysql.TypeTiny + null + default", + ColInfo: timodel.ColumnInfo{ + OriginDefaultValue: -1314, + FieldType: *ftTinyIntNull, + }, + Res: int64(-1314), + Default: int64(-1314), + }, + // mysql.TypeTiny + null + nodefault + { + Name: "mysql.TypeTiny + null + nodefault", + ColInfo: timodel.ColumnInfo{FieldType: *ftTinyIntNull}, + Res: nil, + Default: nil, + }, + // mysql.TypeShort, others testCases same as tiny + { + Name: "mysql.TypeShort, others testCases same as tiny", + ColInfo: timodel.ColumnInfo{FieldType: *ftShortNotNull}, + Res: int64(0), + Default: nil, + }, + // mysql.TypeLong, others testCases same as tiny + { + Name: "mysql.TypeLong, others testCases same as tiny", + ColInfo: timodel.ColumnInfo{FieldType: *ftLongNotNull}, + Res: int64(0), + Default: nil, + }, + // mysql.TypeLonglong, others testCases same as tiny + { + Name: "mysql.TypeLonglong, others testCases same as tiny", + ColInfo: timodel.ColumnInfo{FieldType: *ftLongLongNotNull}, + Res: int64(0), + Default: nil, + }, + // mysql.TypeInt24, others testCases same as tiny + { + Name: "mysql.TypeInt24, others testCases same as tiny", + ColInfo: timodel.ColumnInfo{FieldType: *ftInt24NotNull}, + Res: int64(0), + Default: nil, + }, + // mysql.TypeFloat + notnull + nodefault + { + Name: "mysql.TypeFloat + notnull + nodefault", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeFloatNotNull}, + Res: float32(0), + Default: nil, + }, + // mysql.TypeFloat + notnull + default + { + Name: "mysql.TypeFloat + notnull + default", + ColInfo: timodel.ColumnInfo{ + OriginDefaultValue: float32(-3.1415), + FieldType: *ftTypeFloatNotNull, + }, + Res: float32(-3.1415), + Default: float32(-3.1415), + }, + // mysql.TypeFloat + notnull + default + unsigned + { + Name: "mysql.TypeFloat + notnull + default + unsigned", + ColInfo: timodel.ColumnInfo{ + OriginDefaultValue: float32(3.1415), + FieldType: *ftTypeFloatNotNullUnSigned, + }, + Res: float32(3.1415), + Default: float32(3.1415), + }, + // mysql.TypeFloat + notnull + unsigned + { + Name: "mysql.TypeFloat + notnull + unsigned", + ColInfo: timodel.ColumnInfo{ + FieldType: *ftTypeFloatNotNullUnSigned, + }, + Res: float32(0), + Default: nil, + }, + // mysql.TypeFloat + null + default + { + Name: "mysql.TypeFloat + null + default", + ColInfo: timodel.ColumnInfo{ + OriginDefaultValue: float32(-3.1415), + FieldType: *ftTypeFloatNull, + }, + Res: float32(-3.1415), + Default: float32(-3.1415), + }, + // mysql.TypeFloat + null + nodefault + { + Name: "mysql.TypeFloat + null + nodefault", + ColInfo: timodel.ColumnInfo{ + FieldType: *ftTypeFloatNull, + }, + Res: nil, + Default: nil, + }, + // mysql.TypeDouble, other testCases same as float + { + Name: "mysql.TypeDouble, other testCases same as float", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDoubleNotNull}, + Res: float64(0), + Default: nil, + }, + // mysql.TypeNewDecimal + notnull + nodefault + { + Name: "mysql.TypeNewDecimal + notnull + nodefault", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNewDecimalNotNull}, + Res: "0", // related with Flen and Decimal + Default: nil, + }, + // mysql.TypeNewDecimal + null + nodefault + { + Name: "mysql.TypeNewDecimal + null + nodefault", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNewDecimalNull}, + Res: nil, + Default: nil, + }, + // mysql.TypeNewDecimal + null + default + { + Name: "mysql.TypeNewDecimal + null + default", + ColInfo: timodel.ColumnInfo{ + OriginDefaultValue: "-3.14", // no float + FieldType: *ftTypeNewDecimalNotNull, + }, + Res: "-3.14", + Default: "-3.14", + }, + // mysql.TypeNull + { + Name: "mysql.TypeNull", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNull}, + Res: nil, + Default: nil, + }, + // mysql.TypeTimestamp + notnull + nodefault + { + Name: "mysql.TypeTimestamp + notnull + nodefault", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeTimestampNotNull}, + Res: "0000-00-00 00:00:00", + Default: nil, + }, + // mysql.TypeTimestamp + notnull + default + { + Name: "mysql.TypeTimestamp + notnull + default", + ColInfo: timodel.ColumnInfo{ + OriginDefaultValue: "2020-11-19 12:12:12", + FieldType: *ftTypeTimestampNotNull, + }, + Res: "2020-11-19 12:12:12", + Default: "2020-11-19 12:12:12", + }, + // mysql.TypeTimestamp + null + default + { + Name: "mysql.TypeTimestamp + null + default", + ColInfo: timodel.ColumnInfo{ + OriginDefaultValue: "2020-11-19 12:12:12", + FieldType: *ftTypeTimestampNull, + }, + Res: "2020-11-19 12:12:12", + Default: "2020-11-19 12:12:12", + }, + // mysql.TypeDate, other testCases same as TypeTimestamp + { + Name: "mysql.TypeDate, other testCases same as TypeTimestamp", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDateNotNull}, + Res: "0000-00-00", + Default: nil, + }, + // mysql.TypeDuration, other testCases same as TypeTimestamp + { + Name: "mysql.TypeDuration, other testCases same as TypeTimestamp", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDurationNotNull}, + Res: "00:00:00", + Default: nil, + }, + // mysql.TypeDatetime, other testCases same as TypeTimestamp + { + Name: "mysql.TypeDatetime, other testCases same as TypeTimestamp", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeDatetimeNotNull}, + Res: "0000-00-00 00:00:00", + Default: nil, + }, + // mysql.TypeYear + notnull + nodefault + { + Name: "mysql.TypeYear + notnull + nodefault", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeYearNotNull}, + Res: int64(0), + Default: nil, + }, + // mysql.TypeYear + notnull + default + { + Name: "mysql.TypeYear + notnull + default", + ColInfo: timodel.ColumnInfo{ + OriginDefaultValue: "2021", + FieldType: *ftTypeYearNotNull, + }, + // TypeYear default value will be a string and then translate to []byte + Res: "2021", + Default: "2021", + }, + // mysql.TypeNewDate + { + Name: "mysql.TypeNewDate", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeNewDateNotNull}, + Res: nil, // [TODO] seems not support by TiDB, need check + Default: nil, + }, + // mysql.TypeVarchar + notnull + nodefault + { + Name: "mysql.TypeVarchar + notnull + nodefault", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeVarcharNotNull}, + Res: []byte{}, + Default: nil, + }, + // mysql.TypeVarchar + notnull + default + { + Name: "mysql.TypeVarchar + notnull + default", + ColInfo: timodel.ColumnInfo{ + OriginDefaultValue: "e0", + FieldType: *ftTypeVarcharNotNull, + }, + // TypeVarchar default value will be a string and then translate to []byte + Res: "e0", + Default: "e0", + }, + // mysql.TypeTinyBlob + { + Name: "mysql.TypeTinyBlob", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeTinyBlobNotNull}, + Res: []byte{}, + Default: nil, + }, + // mysql.TypeMediumBlob + { + Name: "mysql.TypeMediumBlob", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeMediumBlobNotNull}, + Res: []byte{}, + Default: nil, + }, + // mysql.TypeLongBlob + { + Name: "mysql.TypeLongBlob", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeLongBlobNotNull}, + Res: []byte{}, + Default: nil, + }, + // mysql.TypeBlob + { + Name: "mysql.TypeBlob", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeBlobNotNull}, + Res: []byte{}, + Default: nil, + }, + // mysql.TypeVarString + { + Name: "mysql.TypeVarString", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeVarStringNotNull}, + Res: []byte{}, + Default: nil, + }, + // mysql.TypeString + { + Name: "mysql.TypeString", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeStringNotNull}, + Res: []byte{}, + Default: nil, + }, + // mysql.TypeBit + { + Name: "mysql.TypeBit", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeBitNotNull}, + Res: uint64(0), + Default: nil, + }, + // BLOB, TEXT, GEOMETRY or JSON column can't have a default value + // mysql.TypeJSON + { + Name: "mysql.TypeJSON", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeJSONNotNull}, + Res: "null", + Default: nil, + }, + // mysql.TypeEnum + notnull + nodefault + { + Name: "mysql.TypeEnum + notnull + nodefault", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeEnumNotNull}, + // TypeEnum value will be a string and then translate to []byte + // NotNull && no default will choose first element + Res: uint64(1), + Default: nil, + }, + // mysql.TypeEnum + notnull + default + { + Name: "mysql.TypeEnum + notnull + default", + ColInfo: timodel.ColumnInfo{ + OriginDefaultValue: "e1", + FieldType: *ftTypeEnumNotNull, + }, + // TypeEnum default value will be a string and then translate to []byte + Res: "e1", + Default: "e1", + }, + // mysql.TypeEnum + null + { + Name: "mysql.TypeEnum + null", + ColInfo: timodel.ColumnInfo{ + FieldType: *ftTypeEnumNull, + }, + Res: nil, + }, + // mysql.TypeSet + notnull + { + Name: "mysql.TypeSet + notnull", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeSetNotNull}, + Res: uint64(0), + Default: nil, + }, + // mysql.TypeSet + notnull + default + { + Name: "mysql.TypeSet + notnull + default", + ColInfo: timodel.ColumnInfo{ + OriginDefaultValue: "1,e", + FieldType: *ftTypeSetNotNull, + }, + // TypeSet default value will be a string and then translate to []byte + Res: "1,e", + Default: "1,e", + }, + // mysql.TypeGeometry + { + Name: "mysql.TypeGeometry", + ColInfo: timodel.ColumnInfo{FieldType: *ftTypeGeometryNotNull}, + Res: nil, // not support yet + Default: nil, + }, + } + + for _, tc := range testCases { + _, val, _, _, _ := getDefaultOrZeroValue(&tc.ColInfo) + require.Equal(t, tc.Res, val, tc.Name) + val = GetDDLDefaultDefinition(&tc.ColInfo) + require.Equal(t, tc.Default, val, tc.Name) + } +} + +func TestE2ERowLevelChecksum(t *testing.T) { + // changefeed enable checksum functionality + replicaConfig := config.GetDefaultReplicaConfig() + replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness + + tk := NewTestKit(t, replicaConfig) + defer tk.Close() + + // upstream TiDB enable checksum functionality + tk.MustExec("set global tidb_enable_row_level_checksum = 1") + tk.MustExec("use test") + + filter, err := filter.NewFilter(replicaConfig, "") + require.NoError(t, err) + + ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) + require.NoError(t, err) + + changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") + schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), + ver.Ver, false, changefeed, util.RoleTester, filter) + require.NoError(t, err) + require.NotNil(t, schemaStorage) + + createTableSQL := `create table t ( + id int primary key auto_increment, + + c_tinyint tinyint null, + c_smallint smallint null, + c_mediumint mediumint null, + c_int int null, + c_bigint bigint null, + + c_unsigned_tinyint tinyint unsigned null, + c_unsigned_smallint smallint unsigned null, + c_unsigned_mediumint mediumint unsigned null, + c_unsigned_int int unsigned null, + c_unsigned_bigint bigint unsigned null, + + c_float float null, + c_double double null, + c_decimal decimal null, + c_decimal_2 decimal(10, 4) null, + + c_unsigned_float float unsigned null, + c_unsigned_double double unsigned null, + c_unsigned_decimal decimal unsigned null, + c_unsigned_decimal_2 decimal(10, 4) unsigned null, + + c_date date null, + c_datetime datetime null, + c_timestamp timestamp null, + c_time time null, + c_year year null, + + c_tinytext tinytext null, + c_text text null, + c_mediumtext mediumtext null, + c_longtext longtext null, + + c_tinyblob tinyblob null, + c_blob blob null, + c_mediumblob mediumblob null, + c_longblob longblob null, + + c_char char(16) null, + c_varchar varchar(16) null, + c_binary binary(16) null, + c_varbinary varbinary(16) null, + + c_enum enum ('a','b','c') null, + c_set set ('a','b','c') null, + c_bit bit(64) null, + c_json json null, + +-- gbk dmls + name varchar(128) CHARACTER SET gbk, + country char(32) CHARACTER SET gbk, + city varchar(64), + description text CHARACTER SET gbk, + image tinyblob +);` + tableInfo := tk.DDL2TableInfo(createTableSQL) + + ts := schemaStorage.GetLastSnapshot().CurrentTs() + schemaStorage.AdvanceResolvedTs(ver.Ver) + + mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, replicaConfig.Integrity).(*mounter) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tk.Session().GetSessionVars().EnableRowLevelChecksum = true + insertDataSQL := `insert into t values ( + 2, + 1, 2, 3, 4, 5, + 1, 2, 3, 4, 5, + 2020.0202, 2020.0303, 2020.0404, 2021.1208, + 3.1415, 2.7182, 8000, 179394.233, + '2020-02-20', '2020-02-20 02:20:20', '2020-02-20 02:20:20', '02:20:20', '2020', + '89504E470D0A1A0A', '89504E470D0A1A0A', '89504E470D0A1A0A', '89504E470D0A1A0A', + x'89504E470D0A1A0A', x'89504E470D0A1A0A', x'89504E470D0A1A0A', x'89504E470D0A1A0A', + '89504E470D0A1A0A', '89504E470D0A1A0A', x'89504E470D0A1A0A', x'89504E470D0A1A0A', + 'b', 'b,c', b'1000001', '{ +"key1": "value1", +"key2": "value2", +"key3": "123" +}', + '测试', "中国", "上海", "你好,世界", 0xC4E3BAC3CAC0BDE7 +);` + tk.MustExec(insertDataSQL) + + key, value := getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) + rawKV := &model.RawKVEntry{ + OpType: model.OpTypePut, + Key: key, + Value: value, + StartTs: ts - 1, + CRTs: ts + 1, + } + row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) + require.NoError(t, err) + require.NotNil(t, row) + require.NotNil(t, row.Checksum) + + expected, ok := mounter.decoder.GetChecksum() + require.True(t, ok) + require.Equal(t, expected, row.Checksum.Current) + require.False(t, row.Checksum.Corrupted) + + // avro encoder enable checksum functionality. + codecConfig := common.NewConfig(config.ProtocolAvro) + codecConfig.EnableTiDBExtension = true + codecConfig.EnableRowChecksum = true + codecConfig.AvroDecimalHandlingMode = "string" + codecConfig.AvroBigintUnsignedHandlingMode = "string" + + avroEncoder, err := avro.SetupEncoderAndSchemaRegistry4Testing(ctx, codecConfig) + defer avro.TeardownEncoderAndSchemaRegistry4Testing() + require.NoError(t, err) + + topic := "test.t" + + err = avroEncoder.AppendRowChangedEvent(ctx, topic, row, func() {}) + require.NoError(t, err) + msg := avroEncoder.Build() + require.Len(t, msg, 1) + + schemaM, err := avro.NewConfluentSchemaManager( + ctx, "http://127.0.0.1:8081", nil) + require.NoError(t, err) + + // decoder enable checksum functionality. + decoder := avro.NewDecoder(codecConfig, schemaM, topic, time.Local) + err = decoder.AddKeyValue(msg[0].Key, msg[0].Value) + require.NoError(t, err) + + messageType, hasNext, err := decoder.HasNext() + require.NoError(t, err) + require.True(t, hasNext) + require.Equal(t, model.MessageTypeRow, messageType) + + row, err = decoder.NextRowChangedEvent() + // no error, checksum verification passed. + require.NoError(t, err) +} + +func TestDecodeRowEnableChecksum(t *testing.T) { + replicaConfig := config.GetDefaultReplicaConfig() + replicaConfig.Integrity.IntegrityCheckLevel = integrity.CheckLevelCorrectness + + tk := NewTestKit(t, replicaConfig) + defer tk.Close() + + tk.MustExec("set global tidb_enable_row_level_checksum = 1") + tk.MustExec("use test") + + filter, err := filter.NewFilter(replicaConfig, "") + require.NoError(t, err) + + ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) + require.NoError(t, err) + + changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") + schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), + ver.Ver, false, changefeed, util.RoleTester, filter) + require.NoError(t, err) + require.NotNil(t, schemaStorage) + + createTableDDL := "create table t (id int primary key, a int)" + tableInfo := tk.DDL2TableInfo(createTableDDL) + + ts := schemaStorage.GetLastSnapshot().CurrentTs() + schemaStorage.AdvanceResolvedTs(ver.Ver) + + mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, replicaConfig.Integrity).(*mounter) + + ctx := context.Background() + + // row without checksum + tk.Session().GetSessionVars().EnableRowLevelChecksum = false + tk.MustExec("insert into t values (1, 10)") + + key, value := getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) + rawKV := &model.RawKVEntry{ + OpType: model.OpTypePut, + Key: key, + Value: value, + StartTs: ts - 1, + CRTs: ts + 1, + } + + row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) + require.NoError(t, err) + require.NotNil(t, row) + // the upstream tidb does not enable checksum, so the checksum is nil + require.Nil(t, row.Checksum) + + // row with one checksum + tk.Session().GetSessionVars().EnableRowLevelChecksum = true + tk.MustExec("insert into t values (2, 20)") + + key, value = getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) + rawKV = &model.RawKVEntry{ + OpType: model.OpTypePut, + Key: key, + Value: value, + StartTs: ts - 1, + CRTs: ts + 1, + } + row, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) + require.NoError(t, err) + require.NotNil(t, row) + require.NotNil(t, row.Checksum) + + expected, ok := mounter.decoder.GetChecksum() + require.True(t, ok) + require.Equal(t, expected, row.Checksum.Current) + require.False(t, row.Checksum.Corrupted) + + // row with 2 checksum + tk.MustExec("insert into t values (3, 30)") + tableInfo = tk.DDL2TableInfo("alter table t change column a a varchar(10)") + + key, value = getLastKeyValueInStore(t, tk.Storage(), tableInfo.ID) + rawKV = &model.RawKVEntry{ + OpType: model.OpTypePut, + Key: key, + Value: value, + StartTs: ts - 1, + CRTs: ts + 1, + } + row, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) + require.NoError(t, err) + require.NotNil(t, row) + require.NotNil(t, row.Checksum) + + first, ok := mounter.decoder.GetChecksum() + require.True(t, ok) + + extra, ok := mounter.decoder.GetExtraChecksum() + require.True(t, ok) + + if row.Checksum.Current != first { + require.Equal(t, extra, row.Checksum.Current) + } else { + require.Equal(t, first, row.Checksum.Current) + } + require.False(t, row.Checksum.Corrupted) + + // hack the table info to make the checksum corrupted + tableInfo.Columns[0].FieldType = *types.NewFieldType(mysql.TypeVarchar) + + // corrupt-handle-level default to warn, so no error, but the checksum is corrupted + row, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) + require.NoError(t, err) + require.NotNil(t, row.Checksum) + require.True(t, row.Checksum.Corrupted) + + mounter.integrity.CorruptionHandleLevel = integrity.CorruptionHandleLevelError + _, err = mounter.unmarshalAndMountRowChanged(ctx, rawKV) + require.Error(t, err) + require.ErrorIs(t, err, cerror.ErrCorruptedDataMutation) + + _ = tk.DDL2TableInfo("drop table t") +} + +func TestDecodeRow(t *testing.T) { + replicaConfig := config.GetDefaultReplicaConfig() + + tk := NewTestKit(t, replicaConfig) + defer tk.Close() + + tk.MustExec("set @@tidb_enable_clustered_index=1;") + tk.MustExec("use test;") + + changefeed := model.DefaultChangeFeedID("changefeed-test-decode-row") + + ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) + require.NoError(t, err) + + filter, err := filter.NewFilter(replicaConfig, "") + require.NoError(t, err) + + schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), + ver.Ver, false, changefeed, util.RoleTester, filter) + require.NoError(t, err) + + // apply ddl to schemaStorage + ddl := "create table test.student(id int primary key, name char(50), age int, gender char(10))" + job := tk.DDL2Job(ddl) + err = schemaStorage.HandleDDLJob(job) + require.NoError(t, err) + + ts := schemaStorage.GetLastSnapshot().CurrentTs() + + schemaStorage.AdvanceResolvedTs(ver.Ver) + + mounter := NewMounter(schemaStorage, changefeed, time.Local, filter, replicaConfig.Integrity).(*mounter) + + tk.MustExec(`insert into student values(1, "dongmen", 20, "male")`) + tk.MustExec(`update student set age = 27 where id = 1`) + + ctx := context.Background() + decodeAndCheckRowInTable := func(tableID int64, f func(key []byte, value []byte) *model.RawKVEntry) { + walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) { + rawKV := f(key, value) + + row, err := mounter.unmarshalAndMountRowChanged(ctx, rawKV) + require.NoError(t, err) + require.NotNil(t, row) + + if row.Columns != nil { + require.NotNil(t, mounter.decoder) + } + + if row.PreColumns != nil { + require.NotNil(t, mounter.preDecoder) + } + }) + } + + toRawKV := func(key []byte, value []byte) *model.RawKVEntry { + return &model.RawKVEntry{ + OpType: model.OpTypePut, + Key: key, + Value: value, + StartTs: ts - 1, + CRTs: ts + 1, + } + } + + tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName("test", "student") + require.True(t, ok) + + decodeAndCheckRowInTable(tableInfo.ID, toRawKV) + decodeAndCheckRowInTable(tableInfo.ID, toRawKV) + + job = tk.DDL2Job("drop table student") + err = schemaStorage.HandleDDLJob(job) + require.NoError(t, err) +} + +// TestDecodeEventIgnoreRow tests a PolymorphicEvent.Row is nil +// if this event should be filter out by filter. +func TestDecodeEventIgnoreRow(t *testing.T) { + replicaConfig := config.GetDefaultReplicaConfig() + replicaConfig.Filter.Rules = []string{"test.student", "test.computer"} + + tk := NewTestKit(t, replicaConfig) + defer tk.Close() + tk.MustExec("use test;") + + ddls := []string{ + "create table test.student(id int primary key, name char(50), age int, gender char(10))", + "create table test.computer(id int primary key, brand char(50), price int)", + "create table test.poet(id int primary key, name char(50), works char(100))", + } + + cfID := model.DefaultChangeFeedID("changefeed-test-ignore-event") + + f, err := filter.NewFilter(replicaConfig, "") + require.NoError(t, err) + ver, err := tk.Storage().CurrentVersion(oracle.GlobalTxnScope) + require.NoError(t, err) + + schemaStorage, err := NewSchemaStorage(tk.GetCurrentMeta(), + ver.Ver, false, cfID, util.RoleTester, f) + require.NoError(t, err) + // apply ddl to schemaStorage + for _, ddl := range ddls { + job := tk.DDL2Job(ddl) + err = schemaStorage.HandleDDLJob(job) + require.NoError(t, err) + } + + ts := schemaStorage.GetLastSnapshot().CurrentTs() + schemaStorage.AdvanceResolvedTs(ver.Ver) + mounter := NewMounter(schemaStorage, cfID, time.Local, f, replicaConfig.Integrity).(*mounter) + + type testCase struct { + schema string + table string + columns []interface{} + ignored bool + } + + testCases := []testCase{ + { + schema: "test", + table: "student", + columns: []interface{}{1, "dongmen", 20, "male"}, + ignored: false, + }, + { + schema: "test", + table: "computer", + columns: []interface{}{1, "apple", 19999}, + ignored: false, + }, + // This case should be ignored by its table name. + { + schema: "test", + table: "poet", + columns: []interface{}{1, "李白", "静夜思"}, + ignored: true, + }, + } + + ignoredTables := make([]string, 0) + tables := make([]string, 0) + for _, tc := range testCases { + tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName(tc.schema, tc.table) + require.True(t, ok) + // TODO: add other dml event type + insertSQL := prepareInsertSQL(t, tableInfo, len(tc.columns)) + if tc.ignored { + ignoredTables = append(ignoredTables, tc.table) + } else { + tables = append(tables, tc.table) + } + tk.MustExec(insertSQL, tc.columns...) + } + ctx := context.Background() + + decodeAndCheckRowInTable := func(tableID int64, f func(key []byte, value []byte) *model.RawKVEntry) int { + var rows int + walkTableSpanInStore(t, tk.Storage(), tableID, func(key []byte, value []byte) { + rawKV := f(key, value) + pEvent := model.NewPolymorphicEvent(rawKV) + err := mounter.DecodeEvent(ctx, pEvent) + require.NoError(t, err) + if pEvent.Row == nil { + return + } + row := pEvent.Row + rows++ + require.Equal(t, row.Table.Schema, "test") + // Now we only allow filter dml event by table, so we only check row's table. + require.NotContains(t, ignoredTables, row.Table.Table) + require.Contains(t, tables, row.Table.Table) + }) + return rows + } + + toRawKV := func(key []byte, value []byte) *model.RawKVEntry { + return &model.RawKVEntry{ + OpType: model.OpTypePut, + Key: key, + Value: value, + StartTs: ts - 1, + CRTs: ts + 1, + } + } + + for _, tc := range testCases { + tableInfo, ok := schemaStorage.GetLastSnapshot().TableByName(tc.schema, tc.table) + require.True(t, ok) + decodeAndCheckRowInTable(tableInfo.ID, toRawKV) + } +} + +func TestBuildTableInfo(t *testing.T) { + cases := []struct { + origin string + recovered string + recoveredWithNilCol string + }{ + { + "CREATE TABLE t1 (c INT PRIMARY KEY)", + "CREATE TABLE `BuildTiDBTableInfo` (\n" + + " `c` int(0) NOT NULL,\n" + + " PRIMARY KEY (`c`(0)) /*T![clustered_index] CLUSTERED */\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + "CREATE TABLE `BuildTiDBTableInfo` (\n" + + " `c` int(0) NOT NULL,\n" + + " PRIMARY KEY (`c`(0)) /*T![clustered_index] CLUSTERED */\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + }, + { + "CREATE TABLE t1 (" + + " c INT UNSIGNED," + + " c2 VARCHAR(10) NOT NULL," + + " c3 BIT(10) NOT NULL," + + " UNIQUE KEY (c2, c3)" + + ")", + // CDC discards field length. + "CREATE TABLE `BuildTiDBTableInfo` (\n" + + " `c` int(0) unsigned DEFAULT NULL,\n" + + " `c2` varchar(0) NOT NULL,\n" + + " `c3` bit(0) NOT NULL,\n" + + " UNIQUE KEY `idx_0` (`c2`(0),`c3`(0))\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + "CREATE TABLE `BuildTiDBTableInfo` (\n" + + " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + + " `c2` varchar(0) NOT NULL,\n" + + " `c3` bit(0) NOT NULL,\n" + + " UNIQUE KEY `idx_0` (`c2`(0),`c3`(0))\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + }, + { + "CREATE TABLE t1 (" + + " c INT UNSIGNED," + + " gen INT AS (c+1) VIRTUAL," + + " c2 VARCHAR(10) NOT NULL," + + " gen2 INT AS (c+2) STORED," + + " c3 BIT(10) NOT NULL," + + " PRIMARY KEY (c, c2)" + + ")", + // CDC discards virtual generated column, and generating expression of stored generated column. + "CREATE TABLE `BuildTiDBTableInfo` (\n" + + " `c` int(0) unsigned NOT NULL,\n" + + " `c2` varchar(0) NOT NULL,\n" + + " `gen2` int(0) GENERATED ALWAYS AS (pass_generated_check) STORED,\n" + + " `c3` bit(0) NOT NULL,\n" + + " PRIMARY KEY (`c`(0),`c2`(0)) /*T![clustered_index] CLUSTERED */\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + "CREATE TABLE `BuildTiDBTableInfo` (\n" + + " `c` int(0) unsigned NOT NULL,\n" + + " `c2` varchar(0) NOT NULL,\n" + + " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + + " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + + " PRIMARY KEY (`c`(0),`c2`(0)) /*T![clustered_index] CLUSTERED */\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + }, + { + "CREATE TABLE `t1` (" + + " `a` int(11) NOT NULL," + + " `b` int(11) DEFAULT NULL," + + " `c` int(11) DEFAULT NULL," + + " PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */," + + " UNIQUE KEY `b` (`b`)" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + "CREATE TABLE `BuildTiDBTableInfo` (\n" + + " `a` int(0) NOT NULL,\n" + + " `b` int(0) DEFAULT NULL,\n" + + " `c` int(0) DEFAULT NULL,\n" + + " PRIMARY KEY (`a`(0)) /*T![clustered_index] CLUSTERED */,\n" + + " UNIQUE KEY `idx_1` (`b`(0))\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + "CREATE TABLE `BuildTiDBTableInfo` (\n" + + " `a` int(0) NOT NULL,\n" + + " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + + " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + + " PRIMARY KEY (`a`(0)) /*T![clustered_index] CLUSTERED */\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + }, + { // This case is to check the primary key is correctly identified by BuildTiDBTableInfo + "CREATE TABLE your_table (" + + " id INT NOT NULL," + + " name VARCHAR(50) NOT NULL," + + " email VARCHAR(100) NOT NULL," + + " age INT NOT NULL ," + + " address VARCHAR(200) NOT NULL," + + " PRIMARY KEY (id, name)," + + " UNIQUE INDEX idx_unique_1 (id, email, age)," + + " UNIQUE INDEX idx_unique_2 (name, email, address)" + + " );", + "CREATE TABLE `BuildTiDBTableInfo` (\n" + + " `id` int(0) NOT NULL,\n" + + " `name` varchar(0) NOT NULL,\n" + + " `email` varchar(0) NOT NULL,\n" + + " `age` int(0) NOT NULL,\n" + + " `address` varchar(0) NOT NULL,\n" + + " PRIMARY KEY (`id`(0),`name`(0)) /*T![clustered_index] CLUSTERED */,\n" + + " UNIQUE KEY `idx_1` (`id`(0),`email`(0),`age`(0)),\n" + + " UNIQUE KEY `idx_2` (`name`(0),`email`(0),`address`(0))\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + "CREATE TABLE `BuildTiDBTableInfo` (\n" + + " `id` int(0) NOT NULL,\n" + + " `name` varchar(0) NOT NULL,\n" + + " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + + " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + + " `omitted` unspecified GENERATED ALWAYS AS (pass_generated_check) VIRTUAL,\n" + + " PRIMARY KEY (`id`(0),`name`(0)) /*T![clustered_index] CLUSTERED */,\n" + + " UNIQUE KEY `idx_1` (`id`(0),`omitted`(0),`omitted`(0)),\n" + + " UNIQUE KEY `idx_2` (`name`(0),`omitted`(0),`omitted`(0))\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + }, + } + p := parser.New() + for i, c := range cases { + stmt, err := p.ParseOneStmt(c.origin, "", "") + require.NoError(t, err) + originTI, err := ddl.BuildTableInfoFromAST(stmt.(*ast.CreateTableStmt)) + require.NoError(t, err) + cdcTableInfo := model.WrapTableInfo(0, "test", 0, originTI) + cols, _, _, _, err := datum2Column(cdcTableInfo, map[int64]types.Datum{}) + require.NoError(t, err) + recoveredTI := model.BuildTiDBTableInfo(cols, cdcTableInfo.IndexColumnsOffset) + handle := sqlmodel.GetWhereHandle(recoveredTI, recoveredTI) + require.NotNil(t, handle.UniqueNotNullIdx) + require.Equal(t, c.recovered, showCreateTable(t, recoveredTI)) + // make sure BuildTiDBTableInfo indentify the correct primary key + if i == 5 { + inexes := recoveredTI.Indices + primaryCount := 0 + for i := range inexes { + if inexes[i].Primary { + primaryCount++ + } + } + require.Equal(t, 1, primaryCount) + require.Equal(t, 2, len(handle.UniqueNotNullIdx.Columns)) + } + // mimic the columns are set to nil when old value feature is disabled + for i := range cols { + if !cols[i].Flag.IsHandleKey() { + cols[i] = nil + } + } + recoveredTI = model.BuildTiDBTableInfo(cols, cdcTableInfo.IndexColumnsOffset) + handle = sqlmodel.GetWhereHandle(recoveredTI, recoveredTI) + require.NotNil(t, handle.UniqueNotNullIdx) + require.Equal(t, c.recoveredWithNilCol, showCreateTable(t, recoveredTI)) + } +} + +var tiCtx = mock.NewContext() + +func showCreateTable(t *testing.T, ti *timodel.TableInfo) string { + result := bytes.NewBuffer(make([]byte, 0, 512)) + err := executor.ConstructResultOfShowCreateTable(tiCtx, ti, autoid.Allocators{}, result) + require.NoError(t, err) + return result.String() +} + +func TestNewDMRowChange(t *testing.T) { + cases := []struct { + origin string + recovered string + }{ + { + "CREATE TABLE t1 (id INT," + + " a1 INT NOT NULL," + + " a3 INT NOT NULL," + + " UNIQUE KEY dex1(a1, a3));", + "CREATE TABLE `BuildTiDBTableInfo` (\n" + + " `id` int(0) DEFAULT NULL,\n" + + " `a1` int(0) NOT NULL,\n" + + " `a3` int(0) NOT NULL,\n" + + " UNIQUE KEY `idx_0` (`a1`(0),`a3`(0))\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", + }, + } + p := parser.New() + for _, c := range cases { + stmt, err := p.ParseOneStmt(c.origin, "", "") + require.NoError(t, err) + originTI, err := ddl.BuildTableInfoFromAST(stmt.(*ast.CreateTableStmt)) + require.NoError(t, err) + cdcTableInfo := model.WrapTableInfo(0, "test", 0, originTI) + cols := []*model.Column{ + { + Name: "id", Type: 3, Charset: "binary", Flag: 65, Value: 1, Default: nil, + }, + { + Name: "a1", Type: 3, Charset: "binary", Flag: 51, Value: 1, Default: nil, + }, + { + Name: "a3", Type: 3, Charset: "binary", Flag: 51, Value: 2, Default: nil, + }, + } + recoveredTI := model.BuildTiDBTableInfo(cols, cdcTableInfo.IndexColumnsOffset) + require.Equal(t, c.recovered, showCreateTable(t, recoveredTI)) + tableName := &model.TableName{Schema: "db", Table: "t1"} + rowChange := sqlmodel.NewRowChange(tableName, nil, []interface{}{1, 1, 2}, nil, recoveredTI, nil, nil) + sqlGot, argsGot := rowChange.GenSQL(sqlmodel.DMLDelete) + require.Equal(t, "DELETE FROM `db`.`t1` WHERE `a1` = ? AND `a3` = ? LIMIT 1", sqlGot) + require.Equal(t, []interface{}{1, 2}, argsGot) + + sqlGot, argsGot = sqlmodel.GenDeleteSQL(rowChange, rowChange) + require.Equal(t, "DELETE FROM `db`.`t1` WHERE (`a1` = ? AND `a3` = ?) OR (`a1` = ? AND `a3` = ?)", sqlGot) + require.Equal(t, []interface{}{1, 2, 1, 2}, argsGot) + } +} + +func TestFormatColVal(t *testing.T) { + ftTypeFloatNotNull := types.NewFieldType(mysql.TypeFloat) + ftTypeFloatNotNull.SetFlag(mysql.NotNullFlag) + col := &timodel.ColumnInfo{FieldType: *ftTypeFloatNotNull} + + var datum types.Datum + + datum.SetFloat32(123.99) + value, _, _, err := formatColVal(datum, col) + require.NoError(t, err) + require.EqualValues(t, float32(123.99), value) + + datum.SetFloat32(float32(math.NaN())) + value, _, warn, err := formatColVal(datum, col) + require.NoError(t, err) + require.Equal(t, float32(0), value) + require.NotZero(t, warn) + + datum.SetFloat32(float32(math.Inf(1))) + value, _, warn, err = formatColVal(datum, col) + require.NoError(t, err) + require.Equal(t, float32(0), value) + require.NotZero(t, warn) + + datum.SetFloat32(float32(math.Inf(-1))) + value, _, warn, err = formatColVal(datum, col) + require.NoError(t, err) + require.Equal(t, float32(0), value) + require.NotZero(t, warn) +} diff --git a/cdc/entry/schema_storage_test.go b/cdc/entry/schema_storage_test.go index d249617d5ea..6d2783ec2d1 100644 --- a/cdc/entry/schema_storage_test.go +++ b/cdc/entry/schema_storage_test.go @@ -837,7 +837,7 @@ func TestSchemaStorage(t *testing.T) { f, err := filter.NewFilter(config.GetDefaultReplicaConfig(), "") require.NoError(t, err) - jobs, err := tk.GetAllHistoryDDLJob(f) + jobs, err := tk.GetAllHistoryDDLJob() require.NoError(t, err) schemaStorage, err := NewSchemaStorage(nil, 0, false, model.DefaultChangeFeedID("dummy"), util.RoleTester, f) @@ -924,16 +924,17 @@ func TestGetPrimaryKey(t *testing.T) { defer tk.Close() sql := `create table test.t1(a int primary key, b int)` - job := tk.DDL2Job(sql) - tableInfo := model.WrapTableInfo(0, "test", 0, job.BinlogInfo.TableInfo) + tableInfo := tk.DDL2TableInfo(sql) + //tableInfo := model.WrapTableInfo(0, "test", 0, job.BinlogInfo.TableInfo) names := tableInfo.GetPrimaryKeyColumnNames() require.Len(t, names, 1) require.Containsf(t, names, "a", "names: %v", names) sql = `create table test.t2(a int, b int, c int, primary key(a, b))` - job = tk.DDL2Job(sql) - tableInfo = model.WrapTableInfo(0, "test", 0, job.BinlogInfo.TableInfo) + //job = tk.DDL2Job(sql) + //tableInfo = model.WrapTableInfo(0, "test", 0, job.BinlogInfo.TableInfo) + tableInfo = tk.DDL2TableInfo(sql) names = tableInfo.GetPrimaryKeyColumnNames() require.Len(t, names, 2) diff --git a/cdc/entry/testkit.go b/cdc/entry/testkit.go index 0f6be0bef38..049b6e4f13a 100644 --- a/cdc/entry/testkit.go +++ b/cdc/entry/testkit.go @@ -95,15 +95,57 @@ func NewTestKit(t *testing.T, replicaConfig *config.ReplicaConfig) *TestKit { } } +func (tk *TestKit) GetAllEventsByTable(schema, table string) []*model.RowChangedEvent { + tableInfo, ok := tk.schemaStorage.GetLastSnapshot().TableByName(schema, table) + require.True(tk.t, ok) + + var tableIDs []int64 + tableIDs = append(tableIDs, tableInfo.ID) + partitionInfo := tableInfo.GetPartitionInfo() + if partitionInfo != nil { + for _, partition := range partitionInfo.Definitions { + tableIDs = append(tableIDs, partition.ID) + } + } + + result := make([]*model.RowChangedEvent, 0) + for _, tableID := range tableIDs { + tk.iterKeyValue(tableID, func(key []byte, value []byte) { + ts := tk.schemaStorage.GetLastSnapshot().CurrentTs() + rawKV := &model.RawKVEntry{ + OpType: model.OpTypePut, + Key: key, + Value: value, + OldValue: nil, + StartTs: ts - 1, + CRTs: ts + 1, + } + polymorphicEvent := model.NewPolymorphicEvent(rawKV) + err := tk.mounter.DecodeEvent(context.Background(), polymorphicEvent) + require.NoError(tk.t, err) + result = append(result, polymorphicEvent.Row) + }) + } + return result +} + func (tk *TestKit) DML2Event(dml string, schema, table string) *model.RowChangedEvent { tk.MustExec(dml) - tableID, ok := tk.schemaStorage.GetLastSnapshot().TableIDByName(schema, table) + + tableInfo, ok := tk.schemaStorage.GetLastSnapshot().TableByName(schema, table) require.True(tk.t, ok) - key, value := tk.getLastKeyValue(tableID) + // if partitionInfo == nil { + // return mountAndCheckRowInTable(tableInfo.ID, rowsBytes[0], f) + // } + // var rows int + // for i, p := range partitionInfo.Definitions { + // rows += mountAndCheckRowInTable(p.ID, rowsBytes[i], f) + // } + + key, value := tk.getLastKeyValue(tableInfo.ID) ts := tk.schemaStorage.GetLastSnapshot().CurrentTs() rawKV := &model.RawKVEntry{ - // todo: assume the operation is put at the moment, can we infer it from the DML ? OpType: model.OpTypePut, Key: key, Value: value, @@ -117,6 +159,27 @@ func (tk *TestKit) DML2Event(dml string, schema, table string) *model.RowChanged return polymorphicEvent.Row } +func (tk *TestKit) DeleteDML2Event(deleteDML string, schema, table string) *model.RowChangedEvent { + tk.MustExec(deleteDML) + tableID, ok := tk.schemaStorage.GetLastSnapshot().TableIDByName(schema, table) + require.True(tk.t, ok) + key, value := tk.getLastKeyValue(tableID) + + ts := tk.schemaStorage.GetLastSnapshot().CurrentTs() + rawKV := &model.RawKVEntry{ + OpType: model.OpTypeDelete, + Key: key, + Value: value, + OldValue: nil, + StartTs: ts - 1, + CRTs: ts, + } + polymorphicEvent := model.NewPolymorphicEvent(rawKV) + err := tk.mounter.DecodeEvent(context.Background(), polymorphicEvent) + require.NoError(tk.t, err) + return polymorphicEvent.Row +} + func (tk *TestKit) getLastKeyValue(tableID int64) (key, value []byte) { txn, err := tk.storage.Begin() require.NoError(tk.t, err) @@ -135,6 +198,22 @@ func (tk *TestKit) getLastKeyValue(tableID int64) (key, value []byte) { return key, value } +func (tk *TestKit) iterKeyValue(tableID int64, f func(key []byte, value []byte)) { + txn, err := tk.storage.Begin() + require.NoError(tk.t, err) + defer txn.Rollback() //nolint:errcheck + + startKey, endKey := spanz.GetTableRange(tableID) + iter, err := txn.Iter(startKey, endKey) + require.NoError(tk.t, err) + defer iter.Close() + for iter.Valid() { + f(iter.Key(), iter.Value()) + err = iter.Next() + require.NoError(tk.t, err) + } +} + // DDL2TableInfo executes the DDL stmt and returns the DDL job func (tk *TestKit) DDL2TableInfo(ddl string) *model.TableInfo { tk.MustExec(ddl) @@ -178,11 +257,11 @@ func (tk *TestKit) DDL2TableInfo(ddl string) *model.TableInfo { rawArgs, err := json.Marshal(args) require.NoError(tk.t, err) res.RawArgs = rawArgs - - err = tk.schemaStorage.HandleDDLJob(res) - require.NoError(tk.t, err) } + err = tk.schemaStorage.HandleDDLJob(res) + require.NoError(tk.t, err) + ver, err := tk.storage.CurrentVersion(oracle.GlobalTxnScope) require.NoError(tk.t, err) tk.schemaStorage.AdvanceResolvedTs(ver.Ver)