Skip to content

Commit

Permalink
This is an automated cherry-pick of pingcap#10227
Browse files Browse the repository at this point in the history
Signed-off-by: ti-chi-bot <[email protected]>
  • Loading branch information
sdojjy authored and ti-chi-bot committed Dec 2, 2023
1 parent 8c006b3 commit 0271869
Show file tree
Hide file tree
Showing 5 changed files with 52 additions and 2 deletions.
15 changes: 15 additions & 0 deletions cdc/api/v2/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,11 @@ func (c *ReplicaConfig) toInternalReplicaConfigWithOriginConfig(
FlushWorkerNum: c.Consistent.FlushWorkerNum,
Storage: c.Consistent.Storage,
UseFileBackend: c.Consistent.UseFileBackend,
<<<<<<< HEAD
=======
Compression: c.Consistent.Compression,
FlushConcurrency: c.Consistent.FlushConcurrency,
>>>>>>> 89e57d7a6e (redo(ticdc): use multi part s3 uploader in redo (#10227))
}
}
if c.Sink != nil {
Expand Down Expand Up @@ -755,6 +760,11 @@ func ToAPIReplicaConfig(c *config.ReplicaConfig) *ReplicaConfig {
FlushWorkerNum: c.Consistent.FlushWorkerNum,
Storage: cloned.Consistent.Storage,
UseFileBackend: cloned.Consistent.UseFileBackend,
<<<<<<< HEAD
=======
Compression: cloned.Consistent.Compression,
FlushConcurrency: cloned.Consistent.FlushConcurrency,
>>>>>>> 89e57d7a6e (redo(ticdc): use multi part s3 uploader in redo (#10227))
}
}
if cloned.Mounter != nil {
Expand Down Expand Up @@ -950,6 +960,11 @@ type ConsistentConfig struct {
FlushWorkerNum int `json:"flush_worker_num"`
Storage string `json:"storage,omitempty"`
UseFileBackend bool `json:"use_file_backend"`
<<<<<<< HEAD
=======
Compression string `json:"compression,omitempty"`
FlushConcurrency int `json:"flush_concurrency,omitempty"`
>>>>>>> 89e57d7a6e (redo(ticdc): use multi part s3 uploader in redo (#10227))
}

// ChangefeedSchedulerConfig is per changefeed scheduler settings.
Expand Down
25 changes: 25 additions & 0 deletions cdc/redo/writer/memory/file_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,19 @@ func (f *fileWorkerGroup) bgFlushFileCache(egCtx context.Context) error {
return errors.Trace(egCtx.Err())
case file := <-f.flushCh:
start := time.Now()
<<<<<<< HEAD
err := f.extStorage.WriteFile(egCtx, file.filename, file.data)
=======
if err := file.writer.Close(); err != nil {
return errors.Trace(err)
}
var err error
if f.cfg.FlushConcurrency <= 1 {
err = f.extStorage.WriteFile(egCtx, file.filename, file.writer.buf.Bytes())
} else {
err = f.multiPartUpload(egCtx, file)
}
>>>>>>> 89e57d7a6e (redo(ticdc): use multi part s3 uploader in redo (#10227))
f.metricFlushAllDuration.Observe(time.Since(start).Seconds())
if err != nil {
return errors.Trace(err)
Expand All @@ -184,6 +196,19 @@ func (f *fileWorkerGroup) bgFlushFileCache(egCtx context.Context) error {
}
}

func (f *fileWorkerGroup) multiPartUpload(ctx context.Context, file *fileCache) error {
multipartWrite, err := f.extStorage.Create(ctx, file.filename, &storage.WriterOption{
Concurrency: f.cfg.FlushConcurrency,
})
if err != nil {
return errors.Trace(err)
}
if _, err = multipartWrite.Write(ctx, file.writer.buf.Bytes()); err != nil {
return errors.Trace(err)
}
return errors.Trace(multipartWrite.Close(ctx))
}

func (f *fileWorkerGroup) bgWriteLogs(
egCtx context.Context, inputCh <-chan *polymorphicRedoEvent,
) (err error) {
Expand Down
5 changes: 5 additions & 0 deletions pkg/config/consistent.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ type ConsistentConfig struct {
FlushWorkerNum int `toml:"flush-worker-num" json:"flush-worker-num"`
Storage string `toml:"storage" json:"storage"`
UseFileBackend bool `toml:"use-file-backend" json:"use-file-backend"`
<<<<<<< HEAD
=======
Compression string `toml:"compression" json:"compression"`
FlushConcurrency int `toml:"flush-concurrency" json:"flush-concurrency,omitempty"`
>>>>>>> 89e57d7a6e (redo(ticdc): use multi part s3 uploader in redo (#10227))
}

// ValidateAndAdjust validates the consistency config and adjusts it if necessary.
Expand Down
8 changes: 6 additions & 2 deletions pkg/util/external_storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,8 +196,12 @@ func (s *extStorageWithTimeout) WalkDir(
func (s *extStorageWithTimeout) Create(
ctx context.Context, path string, option *storage.WriterOption,
) (storage.ExternalFileWriter, error) {
ctx, cancel := context.WithTimeout(ctx, s.timeout)
defer cancel()
if option.Concurrency <= 1 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, s.timeout)
defer cancel()
}
// multipart uploading spawns a background goroutine, can't set timeout
return s.ExternalStorage.Create(ctx, path, option)
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
[consistent]
level = "eventual"
storage = "s3://logbucket/test-changefeed?endpoint=http://127.0.0.1:24927/"
flush-concurrency = 2

0 comments on commit 0271869

Please sign in to comment.