Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

go/vt/wrangler: reduce VReplicationExec calls when getting copy state #14375

Merged
merged 20 commits into from
Dec 28, 2023
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3,628 changes: 1,819 additions & 1,809 deletions go/vt/proto/vtctldata/vtctldata.pb.go

Large diffs are not rendered by default.

32 changes: 30 additions & 2 deletions go/vt/proto/vtctldata/vtctldata_vtproto.pb.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

18 changes: 9 additions & 9 deletions go/vt/vtctl/vtctl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -200,12 +200,12 @@ func TestMoveTables(t *testing.T) {
expectResults: func() {
env.tmc.setVRResults(
target.tablet,
fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)",
fmt.Sprintf("select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (%d) and id in (select max(id) from _vt.copy_state where vrepl_id in (%d) group by vrepl_id, table_name)",
vrID, vrID),
sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"table_name|lastpk",
"varchar|varbinary"),
fmt.Sprintf("%s|", table),
"vrepl_id|table_name|lastpk",
"int64|varchar|varbinary"),
fmt.Sprintf("%d|%s|", vrID, table),
),
)
env.tmc.setDBAResults(
Expand Down Expand Up @@ -260,12 +260,12 @@ func TestMoveTables(t *testing.T) {
expectResults: func() {
env.tmc.setVRResults(
target.tablet,
fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)",
fmt.Sprintf("select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (%d) and id in (select max(id) from _vt.copy_state where vrepl_id in (%d) group by vrepl_id, table_name)",
vrID, vrID),
sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"table_name|lastpk",
"varchar|varbinary"),
fmt.Sprintf("%s|", table),
"vrepl_id|table_name|lastpk",
"int64|varchar|varbinary"),
fmt.Sprintf("%d|%s|", vrID, table),
),
)
env.tmc.setDBAResults(
Expand Down Expand Up @@ -320,7 +320,7 @@ func TestMoveTables(t *testing.T) {
expectResults: func() {
env.tmc.setVRResults(
target.tablet,
fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)",
fmt.Sprintf("select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (%d) and id in (select max(id) from _vt.copy_state where vrepl_id in (%d) group by vrepl_id, table_name)",
vrID, vrID),
&sqltypes.Result{},
)
Expand Down
2 changes: 1 addition & 1 deletion go/vt/vtctl/workflow/materializer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ const mzSelectFrozenQuery = "select 1 from _vt.vreplication where db_name='vt_ta
const mzCheckJournal = "/select val from _vt.resharding_journal where id="
const mzGetWorkflowStatusQuery = "select id, workflow, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, message, tags, workflow_type, workflow_sub_type, time_heartbeat, defer_secondary_keys, component_throttled, time_throttled, rows_copied from _vt.vreplication where workflow = 'workflow' and db_name = 'vt_targetks'"
const mzGetCopyState = "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1"
const mzGetLatestCopyState = "select table_name, lastpk from _vt.copy_state where vrepl_id = 1 and id in (select max(id) from _vt.copy_state where vrepl_id = 1 group by vrepl_id, table_name)"
const mzGetLatestCopyState = "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1) and id in (select max(id) from _vt.copy_state where vrepl_id in (1) group by vrepl_id, table_name)"
const insertPrefix = `/insert into _vt.vreplication\(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys\) values `
const eol = "$"

Expand Down
104 changes: 89 additions & 15 deletions go/vt/vtctl/workflow/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,72 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows
return nil, err
}

m := sync.Mutex{} // guards access to the following maps during concurrent calls to scanWorkflow
m := sync.Mutex{} // guards access to the following maps during concurrent calls to fetchCopyStates and scanWorkflow

copyStatesByShardStreamId := make(map[string][]*vtctldatapb.Workflow_Stream_CopyState, len(results))

fetchCopyStates := func(ctx context.Context, tablet *topo.TabletInfo, streamIds []int64) error {
span, ctx := trace.NewSpan(ctx, "workflow.Server.fetchCopyStates")
defer span.Finish()

span.Annotate("keyspace", req.Keyspace)
span.Annotate("shard", tablet.Shard)
span.Annotate("tablet_alias", tablet.AliasString())

copyStates, err := s.getWorkflowCopyStates(ctx, tablet, streamIds)
if err != nil {
return err
}

m.Lock()
defer m.Unlock()

for _, copyState := range copyStates {
shardStreamId := fmt.Sprintf("%s/%d", tablet.Shard, copyState.StreamId)
copyStatesByShardStreamId[shardStreamId] = append(
copyStatesByShardStreamId[shardStreamId],
copyState,
)
}

return nil
}

var (
fetchCopyStatesWg sync.WaitGroup
fetchCopyStatesErrors concurrency.FirstErrorRecorder
mattlord marked this conversation as resolved.
Show resolved Hide resolved
)

for tablet, result := range results {
qr := sqltypes.Proto3ToResult(result)

var streamIds []int64
maxenglander marked this conversation as resolved.
Show resolved Hide resolved
for _, row := range qr.Named().Rows {
streamId, err := row.ToInt64("id")
if err != nil {
return nil, err
}
streamIds = append(streamIds, streamId)
}

if len(streamIds) == 0 {
continue
}

fetchCopyStatesWg.Add(1)
go func(ctx context.Context, tablet *topo.TabletInfo, streamIds []int64) {
defer fetchCopyStatesWg.Done()
if err := fetchCopyStates(ctx, tablet, streamIds); err != nil {
fetchCopyStatesErrors.RecordError(err)
}
}(ctx, tablet, streamIds)
}

fetchCopyStatesWg.Wait()
if fetchCopyStatesErrors.HasErrors() {
return nil, fetchCopyStatesErrors.Error()
}

workflowsMap := make(map[string]*vtctldatapb.Workflow, len(results))
sourceKeyspaceByWorkflow := make(map[string]string, len(results))
sourceShardsByWorkflow := make(map[string]sets.Set[string], len(results))
Expand Down Expand Up @@ -541,19 +606,15 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows
},
}

stream.CopyStates, err = s.getWorkflowCopyStates(ctx, tablet, id)
if err != nil {
return err
// Merge in copy states, which we've already fetched.
shardStreamId := fmt.Sprintf("%s/%d", tablet.Shard, id)
if copyState, ok := copyStatesByShardStreamId[shardStreamId]; ok {
stream.CopyStates = copyState
}

span.Annotate("num_copy_states", len(stream.CopyStates))

// At this point, we're going to start modifying the maps defined
// outside this function, as well as fields on the passed-in Workflow
// pointer. Since we're running concurrently, take the lock.
//
// We've already made the remote call to getCopyStates, so synchronizing
// here shouldn't hurt too badly, performance-wise.
m.Lock()
defer m.Unlock()

Expand Down Expand Up @@ -1047,16 +1108,24 @@ func (s *Server) getWorkflowState(ctx context.Context, targetKeyspace, workflowN
return ts, state, nil
}

func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletInfo, id int64) ([]*vtctldatapb.Workflow_Stream_CopyState, error) {
func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletInfo, streamIds []int64) ([]*vtctldatapb.Workflow_Stream_CopyState, error) {
span, ctx := trace.NewSpan(ctx, "workflow.Server.getWorkflowCopyStates")
defer span.Finish()

span.Annotate("keyspace", tablet.Keyspace)
span.Annotate("shard", tablet.Shard)
span.Annotate("tablet_alias", tablet.AliasString())
span.Annotate("vrepl_id", id)
span.Annotate("num_stream_ids", len(streamIds))
mattlord marked this conversation as resolved.
Show resolved Hide resolved

query := fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)", id, id)
idsBV, err := sqltypes.BuildBindVariable(streamIds)
if err != nil {
return nil, err
}
query, err := sqlparser.ParseAndBind("select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in %a and id in (select max(id) from _vt.copy_state where vrepl_id in %a group by vrepl_id, table_name)",
idsBV, idsBV)
if err != nil {
return nil, err
}
qr, err := s.tmc.VReplicationExec(ctx, tablet.Tablet, query)
if err != nil {
return nil, err
Expand All @@ -1069,10 +1138,15 @@ func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletI

copyStates := make([]*vtctldatapb.Workflow_Stream_CopyState, len(result.Rows))
for i, row := range result.Rows {
// These fields are technically varbinary, but this is close enough.
streamId, err := row[0].ToCastInt64()
if err != nil {
return nil, fmt.Errorf("failed to cast vrepl_id to int64: %v", err)
mattlord marked this conversation as resolved.
Show resolved Hide resolved
}
mattlord marked this conversation as resolved.
Show resolved Hide resolved
// These string fields are technically varbinary, but this is close enough.
copyStates[i] = &vtctldatapb.Workflow_Stream_CopyState{
Table: row[0].ToString(),
LastPk: row[1].ToString(),
StreamId: streamId,
Table: row[1].ToString(),
LastPk: row[2].ToString(),
}
}

Expand Down
2 changes: 1 addition & 1 deletion go/vt/vttablet/tabletmanager/rpc_vreplication_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ const (
getWorkflowState = "select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1"
getCopyState = "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1"
getNumCopyStateTable = "select count(distinct table_name) from _vt.copy_state where vrepl_id=1"
getLatestCopyState = "select table_name, lastpk from _vt.copy_state where vrepl_id = 1 and id in (select max(id) from _vt.copy_state where vrepl_id = 1 group by vrepl_id, table_name)"
getLatestCopyState = "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1) and id in (select max(id) from _vt.copy_state where vrepl_id in (1) group by vrepl_id, table_name)"
getAutoIncrementStep = "select @@session.auto_increment_increment"
setSessionTZ = "set @@session.time_zone = '+00:00'"
setNames = "set names 'binary'"
Expand Down
34 changes: 27 additions & 7 deletions go/vt/wrangler/traffic_switcher_env_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ import (
"context"
"fmt"
"math/rand"
"strconv"
"strings"
"sync"
"testing"
"time"
Expand Down Expand Up @@ -56,7 +58,7 @@ import (
const (
streamInfoQuery = "select id, source, message, cell, tablet_types, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where workflow='%s' and db_name='vt_%s'"
streamExtInfoQuery = "select id, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, time_heartbeat, time_throttled, component_throttled, message, tags, workflow_type, workflow_sub_type, defer_secondary_keys, rows_copied from _vt.vreplication where db_name = 'vt_%s' and workflow = '%s'"
copyStateQuery = "select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)"
copyStateQuery = "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (%s) and id in (select max(id) from _vt.copy_state where vrepl_id in (%s) group by vrepl_id, table_name)"
maxValForSequence = "select max(`id`) as maxval from `vt_%s`.`%s`"
)

Expand Down Expand Up @@ -298,6 +300,7 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards,
for i, targetShard := range targetShards {
var streamInfoRows []string
var streamExtInfoRows []string
var vreplIDs []string
for j, sourceShard := range sourceShards {
bls := &binlogdatapb.BinlogSource{
Keyspace: "ks1",
Expand All @@ -314,8 +317,10 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards,
}
streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v||||1|0|0", j+1, bls))
streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|vt_ks1|%d|%d|0|0||1||0", j+1, now, now))
tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, j+1, j+1), noResult)
vreplIDs = append(vreplIDs, strconv.FormatInt(int64(j+1), 10))
}
vreplIDsJoined := strings.Join(vreplIDs, ", ")
tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, vreplIDsJoined, vreplIDsJoined), noResult)
tme.dbTargetClients[i].addInvariant(streamInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys",
"int64|varchar|varchar|varchar|varchar|int64|int64|int64"),
Expand All @@ -332,6 +337,7 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards,

for i, sourceShard := range sourceShards {
var streamInfoRows []string
var vreplIDs []string
for j, targetShard := range targetShards {
bls := &binlogdatapb.BinlogSource{
Keyspace: "ks2",
Expand All @@ -347,8 +353,10 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards,
},
}
streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v||||1|0|0", j+1, bls))
tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, j+1, j+1), noResult)
vreplIDs = append(vreplIDs, strconv.FormatInt(int64(j+1), 10))
}
vreplIDsJoined := strings.Join(vreplIDs, ", ")
tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, vreplIDsJoined, vreplIDsJoined), noResult)
tme.dbSourceClients[i].addInvariant(reverseStreamInfoKs1, sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys",
"int64|varchar|varchar|varchar|varchar|int64|int64|int64"),
Expand Down Expand Up @@ -470,6 +478,7 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar
for _, shardToMove := range shardsToMove {
var streamInfoRows []string
var streamExtInfoRows []string
var vreplIDs []string
if shardToMove == shard {
bls := &binlogdatapb.BinlogSource{
Keyspace: "ks1",
Expand All @@ -486,8 +495,10 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar
}
streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v||||1|0|0", i+1, bls))
streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|vt_ks1|%d|%d|0|0|||1||0", i+1, now, now))
vreplIDs = append(vreplIDs, strconv.FormatInt(int64(i+1), 10))
}
tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, i+1, i+1), noResult)
vreplIDsJoined := strings.Join(vreplIDs, ", ")
tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, vreplIDsJoined, vreplIDsJoined), noResult)
tme.dbTargetClients[i].addInvariant(streamInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys",
"int64|varchar|varchar|varchar|varchar|int64|int64|int64"),
Expand All @@ -506,6 +517,7 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar
for i, shard := range shards {
for _, shardToMove := range shardsToMove {
var streamInfoRows []string
var vreplIDs []string
if shardToMove == shard {
bls := &binlogdatapb.BinlogSource{
Keyspace: "ks2",
Expand All @@ -521,8 +533,10 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar
},
}
streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v||||1|0|0", i+1, bls))
tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, i+1, i+1), noResult)
vreplIDs = append(vreplIDs, strconv.FormatInt(int64(i+1), 10))
}
vreplIDsJoined := strings.Join(vreplIDs, ", ")
tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, vreplIDsJoined, vreplIDsJoined), noResult)
tme.dbSourceClients[i].addInvariant(reverseStreamInfoKs1, sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys",
"int64|varchar|varchar|varchar|varchar|int64|int64|int64"),
Expand Down Expand Up @@ -632,6 +646,7 @@ func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targe
for i, targetShard := range targetShards {
var rows, rowsRdOnly []string
var streamExtInfoRows []string
var vreplIDs []string
for j, sourceShard := range sourceShards {
if !key.KeyRangeIntersect(tme.targetKeyRanges[i], tme.sourceKeyRanges[j]) {
continue
Expand All @@ -649,8 +664,10 @@ func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targe
rows = append(rows, fmt.Sprintf("%d|%v||||1|0|0", j+1, bls))
rowsRdOnly = append(rows, fmt.Sprintf("%d|%v|||RDONLY|1|0|0", j+1, bls))
streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|vt_ks1|%d|%d|0|0|||", j+1, now, now))
tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, j+1, j+1), noResult)
vreplIDs = append(vreplIDs, strconv.FormatInt(int64(j+1), 10))
}
vreplIDsJoined := strings.Join(vreplIDs, ", ")
tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, vreplIDsJoined, vreplIDsJoined), noResult)
tme.dbTargetClients[i].addInvariant(streamInfoKs, sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys",
"int64|varchar|varchar|varchar|varchar|int64|int64|int64"),
Expand All @@ -670,11 +687,14 @@ func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targe
tme.targetKeyspace = "ks"
for i, dbclient := range tme.dbSourceClients {
var streamExtInfoRows []string
var vreplIDs []string
dbclient.addInvariant(streamInfoKs, &sqltypes.Result{})
for j := range targetShards {
streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|vt_ks|%d|%d|0|0|||", j+1, now, now))
tme.dbSourceClients[i].addInvariant(fmt.Sprintf(copyStateQuery, j+1, j+1), noResult)
vreplIDs = append(vreplIDs, strconv.FormatInt(int64(j+1), 10))
}
vreplIDsJoined := strings.Join(vreplIDs, ", ")
tme.dbSourceClients[i].addInvariant(fmt.Sprintf(copyStateQuery, vreplIDsJoined, vreplIDsJoined), noResult)
tme.dbSourceClients[i].addInvariant(streamExtInfoKs, sqltypes.MakeTestResult(sqltypes.MakeTestFields(
"id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags",
"int64|varchar|int64|int64|int64|varchar|varchar|int64|int64|int64|int64|varchar|varchar|varchar"),
Expand Down
Loading
Loading