Skip to content

Commit

Permalink
pick more to 7.1
Browse files Browse the repository at this point in the history
Signed-off-by: qupeng <[email protected]>
  • Loading branch information
hicqu committed Nov 8, 2023
1 parent 06c6ae2 commit 9216167
Show file tree
Hide file tree
Showing 5 changed files with 194 additions and 103 deletions.
116 changes: 91 additions & 25 deletions cdc/kv/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"io"
"math/rand"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
Expand All @@ -42,6 +43,7 @@ import (
"github.com/pingcap/tiflow/pkg/version"
"github.com/prometheus/client_golang/prometheus"
tidbkv "github.com/tikv/client-go/v2/kv"
"github.com/tikv/client-go/v2/oracle"
"github.com/tikv/client-go/v2/tikv"
pd "github.com/tikv/pd/client"
"go.uber.org/zap"
Expand Down Expand Up @@ -271,7 +273,7 @@ func (c *CDCClient) EventFeed(
eventCh chan<- model.RegionFeedEvent,
) error {
s := newEventFeedSession(c, span, lockResolver, ts, eventCh)
return s.eventFeed(ctx, ts)
return s.eventFeed(ctx)
}

// RegionCount returns the number of captured regions.
Expand Down Expand Up @@ -365,7 +367,6 @@ type eventFeedSession struct {

type rangeRequestTask struct {
span tablepb.Span
ts uint64
}

func newEventFeedSession(
Expand All @@ -376,6 +377,7 @@ func newEventFeedSession(
eventCh chan<- model.RegionFeedEvent,
) *eventFeedSession {
id := allocID()
idStr := strconv.FormatUint(id, 10)
rangeLock := regionlock.NewRegionRangeLock(
id, totalSpan.StartKey, totalSpan.EndKey, startTs,
client.changefeed.Namespace+"."+client.changefeed.ID)
Expand All @@ -390,7 +392,7 @@ func newEventFeedSession(
eventCh: eventCh,
rangeLock: rangeLock,
lockResolver: lockResolver,
id: strconv.FormatUint(id, 10),
id: idStr,
regionChSizeGauge: clientChannelSize.WithLabelValues("region"),
errChSizeGauge: clientChannelSize.WithLabelValues("err"),
rangeChSizeGauge: clientChannelSize.WithLabelValues("range"),
Expand All @@ -406,7 +408,7 @@ func newEventFeedSession(
}
}

func (s *eventFeedSession) eventFeed(ctx context.Context, ts uint64) error {
func (s *eventFeedSession) eventFeed(ctx context.Context) error {
s.requestRangeCh = chann.NewAutoDrainChann[rangeRequestTask]()
s.regionCh = chann.NewAutoDrainChann[singleRegionInfo]()
s.regionRouter = chann.NewAutoDrainChann[singleRegionInfo]()
Expand All @@ -423,13 +425,11 @@ func (s *eventFeedSession) eventFeed(ctx context.Context, ts uint64) error {

g, ctx := errgroup.WithContext(ctx)

g.Go(func() error {
return s.dispatchRequest(ctx)
})
g.Go(func() error { return s.dispatchRequest(ctx) })

g.Go(func() error {
return s.requestRegionToStore(ctx, g)
})
g.Go(func() error { return s.requestRegionToStore(ctx, g) })

g.Go(func() error { return s.logSlowRegions(ctx) })

g.Go(func() error {
for {
Expand All @@ -447,7 +447,7 @@ func (s *eventFeedSession) eventFeed(ctx context.Context, ts uint64) error {
// Besides the count or frequency of range request is limited,
// we use ephemeral goroutine instead of permanent goroutine.
g.Go(func() error {
return s.divideAndSendEventFeedToRegions(ctx, task.span, task.ts)
return s.divideAndSendEventFeedToRegions(ctx, task.span)
})
}
}
Expand All @@ -468,15 +468,15 @@ func (s *eventFeedSession) eventFeed(ctx context.Context, ts uint64) error {
}
})

s.requestRangeCh.In() <- rangeRequestTask{span: s.totalSpan, ts: ts}
s.requestRangeCh.In() <- rangeRequestTask{span: s.totalSpan}
s.rangeChSizeGauge.Inc()

log.Info("event feed started",
zap.String("namespace", s.changefeed.Namespace),
zap.String("changefeed", s.changefeed.ID),
zap.Int64("tableID", s.tableID),
zap.String("tableName", s.tableName),
zap.Uint64("startTs", ts),
zap.Uint64("startTs", s.startTs),
zap.Stringer("span", &s.totalSpan))

return g.Wait()
Expand All @@ -485,9 +485,9 @@ func (s *eventFeedSession) eventFeed(ctx context.Context, ts uint64) error {
// scheduleDivideRegionAndRequest schedules a range to be divided by regions,
// and these regions will be then scheduled to send ChangeData requests.
func (s *eventFeedSession) scheduleDivideRegionAndRequest(
ctx context.Context, span tablepb.Span, ts uint64,
ctx context.Context, span tablepb.Span,
) {
task := rangeRequestTask{span: span, ts: ts}
task := rangeRequestTask{span: span}
select {
case s.requestRangeCh.In() <- task:
s.rangeChSizeGauge.Inc()
Expand All @@ -513,12 +513,11 @@ func (s *eventFeedSession) scheduleRegionRequest(ctx context.Context, sri single
zap.String("changefeed", s.changefeed.ID),
zap.Uint64("regionID", sri.verID.GetID()),
zap.Stringer("span", &sri.span),
zap.Uint64("resolvedTs", sri.resolvedTs()),
zap.Any("retrySpans", res.RetryRanges))
for _, r := range res.RetryRanges {
// This call is always blocking, otherwise if scheduling in a new
// goroutine, it won't block the caller of `schedulerRegionRequest`.
s.scheduleDivideRegionAndRequest(ctx, r, sri.resolvedTs())
s.scheduleDivideRegionAndRequest(ctx, r)
}
case regionlock.LockRangeStatusCancel:
return
Expand All @@ -529,11 +528,12 @@ func (s *eventFeedSession) scheduleRegionRequest(ctx context.Context, sri single

res := s.rangeLock.LockRange(
ctx, sri.span.StartKey, sri.span.EndKey, sri.verID.GetID(), sri.verID.GetVer())

failpoint.Inject("kvClientMockRangeLock", func(val failpoint.Value) {
// short sleep to wait region has split
time.Sleep(time.Second)
s.rangeLock.UnlockRange(sri.span.StartKey, sri.span.EndKey,
sri.verID.GetID(), sri.verID.GetVer(), sri.resolvedTs())
sri.verID.GetID(), sri.verID.GetVer())
regionNum := val.(int)
retryRanges := make([]tablepb.Span, 0, regionNum)
start := []byte("a")
Expand Down Expand Up @@ -689,13 +689,13 @@ func (s *eventFeedSession) requestRegionToStore(
state := newRegionFeedState(sri, requestID)
pendingRegions.setByRequestID(requestID, state)

log.Debug("start new request",
log.Info("start new request",
zap.String("namespace", s.changefeed.Namespace),
zap.String("changefeed", s.changefeed.ID),
zap.Int64("tableID", s.tableID),
zap.String("tableName", s.tableName),
zap.String("addr", storeAddr),
zap.Any("request", req))
zap.Uint64("regionID", sri.verID.GetID()),
zap.String("addr", storeAddr))

err = stream.client.Send(req)

Expand Down Expand Up @@ -818,7 +818,7 @@ func (s *eventFeedSession) dispatchRequest(ctx context.Context) error {
// to region boundaries. When region merging happens, it's possible that it
// will produce some overlapping spans.
func (s *eventFeedSession) divideAndSendEventFeedToRegions(
ctx context.Context, span tablepb.Span, ts uint64,
ctx context.Context, span tablepb.Span,
) error {
limit := 20
nextSpan := span
Expand Down Expand Up @@ -889,17 +889,24 @@ func (s *eventFeedSession) handleError(ctx context.Context, errInfo regionErrorI
switch eerr := errors.Cause(err).(type) {
case *eventError:
innerErr := eerr.err
log.Info("cdc region error",
zap.String("namespace", s.changefeed.Namespace),
zap.String("changefeed", s.changefeed.ID),
zap.Int64("tableID", s.tableID),
zap.String("tableName", s.tableName),
zap.Stringer("error", innerErr))

if notLeader := innerErr.GetNotLeader(); notLeader != nil {
metricFeedNotLeaderCounter.Inc()
s.client.regionCache.UpdateLeader(errInfo.verID, notLeader.GetLeader(), errInfo.rpcCtx.AccessIdx)
} else if innerErr.GetEpochNotMatch() != nil {
// TODO: If only confver is updated, we don't need to reload the region from region cache.
metricFeedEpochNotMatchCounter.Inc()
s.scheduleDivideRegionAndRequest(ctx, errInfo.span, errInfo.resolvedTs())
s.scheduleDivideRegionAndRequest(ctx, errInfo.span)
return nil
} else if innerErr.GetRegionNotFound() != nil {
metricFeedRegionNotFoundCounter.Inc()
s.scheduleDivideRegionAndRequest(ctx, errInfo.span, errInfo.resolvedTs())
s.scheduleDivideRegionAndRequest(ctx, errInfo.span)
return nil
} else if duplicatedRequest := innerErr.GetDuplicateRequest(); duplicatedRequest != nil {
metricFeedDuplicateRequestCounter.Inc()
Expand Down Expand Up @@ -929,7 +936,7 @@ func (s *eventFeedSession) handleError(ctx context.Context, errInfo regionErrorI
}
case *rpcCtxUnavailableErr:
metricFeedRPCCtxUnavailable.Inc()
s.scheduleDivideRegionAndRequest(ctx, errInfo.span, errInfo.resolvedTs())
s.scheduleDivideRegionAndRequest(ctx, errInfo.span)
return nil
case *connectToStoreErr:
metricConnectToStoreErr.Inc()
Expand Down Expand Up @@ -1216,6 +1223,17 @@ func (s *eventFeedSession) sendRegionChangeEvents(
continue
}

switch x := event.Event.(type) {
case *cdcpb.Event_Error:
log.Info("event feed receives a region error",
zap.String("namespace", s.changefeed.Namespace),
zap.String("changefeed", s.changefeed.ID),
zap.Int64("tableID", s.tableID),
zap.String("tableName", s.tableName),
zap.Uint64("regionID", event.RegionId),
zap.Any("error", x.Error))
}

slot := worker.inputCalcSlot(event.RegionId)
statefulEvents[slot] = append(statefulEvents[slot], &regionStatefulEvent{
changeEvent: event,
Expand Down Expand Up @@ -1308,6 +1326,54 @@ func (s *eventFeedSession) getStreamCancel(storeAddr string) (cancel context.Can
return
}

func (s *eventFeedSession) logSlowRegions(ctx context.Context) error {
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}

currTime, err := s.client.pdClock.CurrentTime()
if err != nil {
continue
}
attr := s.rangeLock.CollectLockedRangeAttrs(nil)
if attr.SlowestRegion.Initialized {
ckptTime := oracle.GetTimeFromTS(attr.SlowestRegion.CheckpointTs)
if currTime.Sub(ckptTime) > 20*time.Second {
log.Info("event feed finds a slow region",
zap.String("namespace", s.changefeed.Namespace),
zap.String("changefeed", s.changefeed.ID),
zap.Int64("tableID", s.tableID),
zap.String("tableName", s.tableName),
zap.Any("slowRegion", attr.SlowestRegion))
}
} else if currTime.Sub(attr.SlowestRegion.Created) > 10*time.Minute {
log.Info("event feed initializes a region too slow",
zap.String("namespace", s.changefeed.Namespace),
zap.String("changefeed", s.changefeed.ID),
zap.Int64("tableID", s.tableID),
zap.String("tableName", s.tableName),
zap.Any("slowRegion", attr.SlowestRegion))
}
if len(attr.Holes) > 0 {
holes := make([]string, 0, len(attr.Holes))
for _, hole := range attr.Holes {
holes = append(holes, fmt.Sprintf("[%s,%s)", hole.StartKey, hole.EndKey))
}
log.Info("event feed holes exist",
zap.String("namespace", s.changefeed.Namespace),
zap.String("changefeed", s.changefeed.ID),
zap.Int64("tableID", s.tableID),
zap.String("tableName", s.tableName),
zap.String("holes", strings.Join(holes, ", ")))
}
}
}

func assembleRowEvent(regionID uint64, entry *cdcpb.Event_Row) (model.RegionFeedEvent, error) {
var opType model.OpType
switch entry.GetOpType() {
Expand Down
12 changes: 7 additions & 5 deletions cdc/kv/region_state_bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"testing"
"time"

"github.com/pingcap/tiflow/cdc/kv/regionlock"
"github.com/pingcap/tiflow/pkg/spanz"
"github.com/tikv/client-go/v2/tikv"
)
Expand All @@ -40,9 +41,9 @@ func TestSyncRegionFeedStateMapConcurrentAccess(t *testing.T) {
return
default:
}
m.setByRequestID(1, &regionFeedState{})
m.setByRequestID(2, &regionFeedState{})
m.setByRequestID(3, &regionFeedState{})
m.setByRequestID(1, &regionFeedState{sri: singleRegionInfo{lockedRange: &regionlock.LockedRange{}}})
m.setByRequestID(2, &regionFeedState{sri: singleRegionInfo{lockedRange: &regionlock.LockedRange{}}})
m.setByRequestID(3, &regionFeedState{sri: singleRegionInfo{lockedRange: &regionlock.LockedRange{}}})
}
}()
wg.Add(1)
Expand All @@ -55,7 +56,7 @@ func TestSyncRegionFeedStateMapConcurrentAccess(t *testing.T) {
default:
}
m.iter(func(requestID uint64, state *regionFeedState) bool {
_ = state.initialized.Load()
state.isInitialized()
return true
})
}
Expand Down Expand Up @@ -118,7 +119,8 @@ func benchmarkGetRegionState(b *testing.B, bench func(b *testing.B, sm regionSta
state := newRegionFeedState(newSingleRegionInfo(
tikv.RegionVerID{},
spanz.ToSpan([]byte{}, spanz.UpperBoundKey),
0, &tikv.RPCContext{}), 0)
&tikv.RPCContext{}), 0)
state.sri.lockedRange = &regionlock.LockedRange{}

regionCount := []int{100, 1000, 10000, 20000, 40000, 80000, 160000, 320000}
for _, count := range regionCount {
Expand Down
Loading

0 comments on commit 9216167

Please sign in to comment.