From b0a608df42713319c829de93efc245d4f909af95 Mon Sep 17 00:00:00 2001 From: qupeng Date: Sat, 16 Dec 2023 10:09:20 +0800 Subject: [PATCH 1/2] This is an automated cherry-pick of #10139 Signed-off-by: ti-chi-bot --- cdc/api/v2/model.go | 21 ++++++++++++++++++++ cdc/api/v2/model_test.go | 4 ++++ cdc/processor/sinkmanager/manager.go | 24 ++++++++++++++++++----- cdc/processor/sinkmanager/manager_test.go | 5 ++++- dm/tests/lightning_mode/run.sh | 10 +++++----- dm/tests/tls/run.sh | 24 +++++++++++------------ pkg/config/config_test_data.go | 18 ++++++++++++++--- pkg/config/consistent.go | 11 +++++++++++ pkg/config/replica_config.go | 4 ++++ 9 files changed, 95 insertions(+), 26 deletions(-) diff --git a/cdc/api/v2/model.go b/cdc/api/v2/model.go index 7691e0557eb..c8b718e6201 100644 --- a/cdc/api/v2/model.go +++ b/cdc/api/v2/model.go @@ -274,6 +274,12 @@ func (c *ReplicaConfig) toInternalReplicaConfigWithOriginConfig( Compression: c.Consistent.Compression, FlushConcurrency: c.Consistent.FlushConcurrency, } + if c.Consistent.MemoryUsage != nil { + res.Consistent.MemoryUsage = &config.ConsistentMemoryUsage{ + MemoryQuotaPercentage: c.Consistent.MemoryUsage.MemoryQuotaPercentage, + EventCachePercentage: c.Consistent.MemoryUsage.EventCachePercentage, + } + } } if c.Sink != nil { var dispatchRules []*config.DispatchRule @@ -505,7 +511,14 @@ func ToAPIReplicaConfig(c *config.ReplicaConfig) *ReplicaConfig { Compression: cloned.Consistent.Compression, FlushConcurrency: cloned.Consistent.FlushConcurrency, } + if cloned.Consistent.MemoryUsage != nil { + res.Consistent.MemoryUsage = &ConsistentMemoryUsage{ + MemoryQuotaPercentage: cloned.Consistent.MemoryUsage.MemoryQuotaPercentage, + EventCachePercentage: cloned.Consistent.MemoryUsage.EventCachePercentage, + } + } } + if cloned.Mounter != nil { res.Mounter = &MounterConfig{ WorkerNum: cloned.Mounter.WorkerNum, @@ -693,6 +706,14 @@ type ConsistentConfig struct { UseFileBackend bool `json:"use_file_backend"` Compression string `json:"compression,omitempty"` FlushConcurrency int `json:"flush_concurrency,omitempty"` + + MemoryUsage *ConsistentMemoryUsage `json:"memory_usage"` +} + +// ConsistentMemoryUsage represents memory usage of Consistent module. +type ConsistentMemoryUsage struct { + MemoryQuotaPercentage uint64 `json:"memory_quota_percentage"` + EventCachePercentage uint64 `json:"event_cache_percentage"` } // ChangefeedSchedulerConfig is per changefeed scheduler settings. diff --git a/cdc/api/v2/model_test.go b/cdc/api/v2/model_test.go index 0ef9b028e54..e15c483f895 100644 --- a/cdc/api/v2/model_test.go +++ b/cdc/api/v2/model_test.go @@ -64,6 +64,10 @@ var defaultAPIConfig = &ReplicaConfig{ FlushWorkerNum: redo.DefaultFlushWorkerNum, Storage: "", UseFileBackend: false, + MemoryUsage: &ConsistentMemoryUsage{ + MemoryQuotaPercentage: 50, + EventCachePercentage: 0, + }, }, ChangefeedErrorStuckDuration: &JSONDuration{config. GetDefaultReplicaConfig().ChangefeedErrorStuckDuration}, diff --git a/cdc/processor/sinkmanager/manager.go b/cdc/processor/sinkmanager/manager.go index 4e308b2775b..2d4a0eb1c6a 100644 --- a/cdc/processor/sinkmanager/manager.go +++ b/cdc/processor/sinkmanager/manager.go @@ -160,6 +160,7 @@ func New( metricsTableSinkTotalRows: metricsTableSinkTotalRows, } + totalQuota := changefeedInfo.Config.MemoryQuota if redoDMLMgr != nil && redoDMLMgr.Enabled() { m.redoDMLMgr = redoDMLMgr m.redoProgressHeap = newTableProgresses() @@ -167,17 +168,27 @@ func New( m.redoTaskChan = make(chan *redoTask) m.redoWorkerAvailable = make(chan struct{}, 1) - // Use 3/4 memory quota as redo quota, and 1/2 again for redo cache. - m.sinkMemQuota = memquota.NewMemQuota(changefeedID, changefeedInfo.Config.MemoryQuota/4*1, "sink") - redoQuota := changefeedInfo.Config.MemoryQuota / 4 * 3 + consistentMemoryUsage := changefeedInfo.Config.Consistent.MemoryUsage + if consistentMemoryUsage == nil { + consistentMemoryUsage = config.GetDefaultReplicaConfig().Consistent.MemoryUsage + } + + redoQuota := totalQuota * consistentMemoryUsage.MemoryQuotaPercentage / 100 + sinkQuota := totalQuota - redoQuota + m.sinkMemQuota = memquota.NewMemQuota(changefeedID, sinkQuota, "sink") m.redoMemQuota = memquota.NewMemQuota(changefeedID, redoQuota, "redo") - m.eventCache = newRedoEventCache(changefeedID, redoQuota/2*1) + + eventCache := redoQuota * consistentMemoryUsage.EventCachePercentage / 100 + if eventCache > 0 { + m.eventCache = newRedoEventCache(changefeedID, eventCache) + } } else { - m.sinkMemQuota = memquota.NewMemQuota(changefeedID, changefeedInfo.Config.MemoryQuota, "sink") + m.sinkMemQuota = memquota.NewMemQuota(changefeedID, totalQuota, "sink") m.redoMemQuota = memquota.NewMemQuota(changefeedID, 0, "redo") } m.ready = make(chan struct{}) +<<<<<<< HEAD m.wg.Add(1) // So `SinkManager.Close` will also wait the subroutine. go func() { if err := m.run(ctx, warnChan); err != nil && errors.Cause(err) != context.Canceled { @@ -189,6 +200,9 @@ func New( }() <-m.ready return m, nil +======= + return m +>>>>>>> 844b41bed4 (redo(cdc): custom redo event cache and disable it by default (#10139)) } func (m *SinkManager) run(ctx context.Context, warnings ...chan<- error) (err error) { diff --git a/cdc/processor/sinkmanager/manager_test.go b/cdc/processor/sinkmanager/manager_test.go index 7c66cb17f99..112b0adcc6a 100644 --- a/cdc/processor/sinkmanager/manager_test.go +++ b/cdc/processor/sinkmanager/manager_test.go @@ -68,10 +68,13 @@ func createManagerWithMemEngine( } func getChangefeedInfo() *model.ChangeFeedInfo { + replicaConfig := config.GetDefaultReplicaConfig() + replicaConfig.Consistent.MemoryUsage.MemoryQuotaPercentage = 75 + replicaConfig.Consistent.MemoryUsage.EventCachePercentage = 50 return &model.ChangeFeedInfo{ Error: nil, SinkURI: "blackhole://", - Config: config.GetDefaultReplicaConfig(), + Config: replicaConfig, } } diff --git a/dm/tests/lightning_mode/run.sh b/dm/tests/lightning_mode/run.sh index 6a9da556f14..60bcd89f925 100755 --- a/dm/tests/lightning_mode/run.sh +++ b/dm/tests/lightning_mode/run.sh @@ -73,10 +73,10 @@ function run() { run_tidb_server 4000 $TIDB_PASSWORD } -cleanup_data lightning_mode -# also cleanup dm processes in case of last run failed -cleanup_process $* -run $* -cleanup_process $* +#cleanup_data lightning_mode +## also cleanup dm processes in case of last run failed +#cleanup_process $* +#run $* +#cleanup_process $* echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>" diff --git a/dm/tests/tls/run.sh b/dm/tests/tls/run.sh index c117173188e..7eb9ee360a3 100644 --- a/dm/tests/tls/run.sh +++ b/dm/tests/tls/run.sh @@ -455,17 +455,17 @@ function run() { test_source_and_target_with_empty_tlsconfig } -cleanup_data tls -cleanup_process - -run - -# kill the tidb with tls -pkill -hup tidb-server 2>/dev/null || true -wait_process_exit tidb-server - -run_tidb_server 4000 $TIDB_PASSWORD - -cleanup_process +#cleanup_data tls +#cleanup_process +# +#run +# +## kill the tidb with tls +#pkill -hup tidb-server 2>/dev/null || true +#wait_process_exit tidb-server +# +#run_tidb_server 4000 $TIDB_PASSWORD +# +#cleanup_process echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>" diff --git a/pkg/config/config_test_data.go b/pkg/config/config_test_data.go index 2d388c5d5e2..3e83122fa3a 100644 --- a/pkg/config/config_test_data.go +++ b/pkg/config/config_test_data.go @@ -65,7 +65,11 @@ const ( "encoding-worker-num": 16, "flush-worker-num": 8, "storage": "", - "use-file-backend": false + "use-file-backend": false, + "memory-usage": { + "memory-quota-percentage": 50, + "event-cache-percentage": 0 + } }, "changefeed-error-stuck-duration": 1800000000000, "sql-mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION" @@ -227,7 +231,11 @@ const ( "encoding-worker-num": 16, "flush-worker-num": 8, "storage": "", - "use-file-backend": false + "use-file-backend": false, + "memory-usage": { + "memory-quota-percentage": 50, + "event-cache-percentage": 0 + } }, "changefeed-error-stuck-duration": 1800000000000, "sql-mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION" @@ -293,7 +301,11 @@ const ( "encoding-worker-num": 16, "flush-worker-num": 8, "storage": "", - "use-file-backend": false + "use-file-backend": false, + "memory-usage": { + "memory-quota-percentage": 50, + "event-cache-percentage": 0 + } }, "changefeed-error-stuck-duration": 1800000000000, "sql-mode":"ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION" diff --git a/pkg/config/consistent.go b/pkg/config/consistent.go index c8f93c6eac8..3409cf8327b 100644 --- a/pkg/config/consistent.go +++ b/pkg/config/consistent.go @@ -35,6 +35,17 @@ type ConsistentConfig struct { UseFileBackend bool `toml:"use-file-backend" json:"use-file-backend"` Compression string `toml:"compression" json:"compression"` FlushConcurrency int `toml:"flush-concurrency" json:"flush-concurrency,omitempty"` + + MemoryUsage *ConsistentMemoryUsage `toml:"memory-usage" json:"memory-usage"` +} + +// ConsistentMemoryUsage represents memory usage of Consistent module. +type ConsistentMemoryUsage struct { + // ReplicaConfig.MemoryQuota * MemoryQuotaPercentage / 100 will be used for redo events. + MemoryQuotaPercentage uint64 `toml:"memory-quota-percentage" json:"memory-quota-percentage"` + // ReplicaConfig.MemoryQuota * MemoryQuotaPercentage / 100 * EventCachePercentage / 100 + // will be used for redo cache. + EventCachePercentage uint64 `toml:"event-cache-percentage" json:"event-cache-percentage"` } // ValidateAndAdjust validates the consistency config and adjusts it if necessary. diff --git a/pkg/config/replica_config.go b/pkg/config/replica_config.go index 520a34f48db..634cb230b35 100644 --- a/pkg/config/replica_config.go +++ b/pkg/config/replica_config.go @@ -82,6 +82,10 @@ var defaultReplicaConfig = &ReplicaConfig{ Storage: "", UseFileBackend: false, Compression: "", + MemoryUsage: &ConsistentMemoryUsage{ + MemoryQuotaPercentage: 50, + EventCachePercentage: 0, + }, }, ChangefeedErrorStuckDuration: time.Minute * 30, SQLMode: defaultSQLMode, From c5377ba6c850612c426eacf19e75cfe0ed89b68b Mon Sep 17 00:00:00 2001 From: qupeng Date: Sun, 17 Dec 2023 13:28:12 +0800 Subject: [PATCH 2/2] fix conflicts Signed-off-by: qupeng --- cdc/processor/sinkmanager/manager.go | 4 ---- dm/tests/lightning_mode/run.sh | 10 +++++----- dm/tests/tls/run.sh | 24 ++++++++++++------------ 3 files changed, 17 insertions(+), 21 deletions(-) diff --git a/cdc/processor/sinkmanager/manager.go b/cdc/processor/sinkmanager/manager.go index 2d4a0eb1c6a..d7101b4b969 100644 --- a/cdc/processor/sinkmanager/manager.go +++ b/cdc/processor/sinkmanager/manager.go @@ -188,7 +188,6 @@ func New( } m.ready = make(chan struct{}) -<<<<<<< HEAD m.wg.Add(1) // So `SinkManager.Close` will also wait the subroutine. go func() { if err := m.run(ctx, warnChan); err != nil && errors.Cause(err) != context.Canceled { @@ -200,9 +199,6 @@ func New( }() <-m.ready return m, nil -======= - return m ->>>>>>> 844b41bed4 (redo(cdc): custom redo event cache and disable it by default (#10139)) } func (m *SinkManager) run(ctx context.Context, warnings ...chan<- error) (err error) { diff --git a/dm/tests/lightning_mode/run.sh b/dm/tests/lightning_mode/run.sh index 60bcd89f925..6a9da556f14 100755 --- a/dm/tests/lightning_mode/run.sh +++ b/dm/tests/lightning_mode/run.sh @@ -73,10 +73,10 @@ function run() { run_tidb_server 4000 $TIDB_PASSWORD } -#cleanup_data lightning_mode -## also cleanup dm processes in case of last run failed -#cleanup_process $* -#run $* -#cleanup_process $* +cleanup_data lightning_mode +# also cleanup dm processes in case of last run failed +cleanup_process $* +run $* +cleanup_process $* echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>" diff --git a/dm/tests/tls/run.sh b/dm/tests/tls/run.sh index 7eb9ee360a3..c117173188e 100644 --- a/dm/tests/tls/run.sh +++ b/dm/tests/tls/run.sh @@ -455,17 +455,17 @@ function run() { test_source_and_target_with_empty_tlsconfig } -#cleanup_data tls -#cleanup_process -# -#run -# -## kill the tidb with tls -#pkill -hup tidb-server 2>/dev/null || true -#wait_process_exit tidb-server -# -#run_tidb_server 4000 $TIDB_PASSWORD -# -#cleanup_process +cleanup_data tls +cleanup_process + +run + +# kill the tidb with tls +pkill -hup tidb-server 2>/dev/null || true +wait_process_exit tidb-server + +run_tidb_server 4000 $TIDB_PASSWORD + +cleanup_process echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>"