From a43822554ec0e72606ed770d82d36f2bc6ff59eb Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Thu, 16 Oct 2025 14:42:05 +0000 Subject: [PATCH 01/13] Properly handle identifiers in TM sequence RPCs Signed-off-by: Matt Lord --- go/vt/vttablet/tabletmanager/rpc_vreplication.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go index 67c38545812..9c8d5aa8e24 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go @@ -748,9 +748,9 @@ func (tm *TabletManager) GetMaxValueForSequences(ctx context.Context, req *table func (tm *TabletManager) getMaxSequenceValue(ctx context.Context, sm *tabletmanagerdatapb.GetMaxValueForSequencesRequest_SequenceMetadata) (int64, error) { query := sqlparser.BuildParsedQuery(sqlGetMaxSequenceVal, - sm.UsingColEscaped, - sm.UsingTableDbNameEscaped, - sm.UsingTableNameEscaped, + sqlparser.NewColName(sm.UsingColEscaped), + sqlparser.NewIdentifierCI(sm.UsingTableDbNameEscaped), + sqlparser.NewTableName(sm.UsingTableNameEscaped), ) qr, err := tm.ExecuteFetchAsApp(ctx, &tabletmanagerdatapb.ExecuteFetchAsAppRequest{ Query: []byte(query.Query), @@ -804,8 +804,8 @@ func (tm *TabletManager) updateSequenceValue(ctx context.Context, seq *tabletman } log.Infof("Updating sequence %s.%s to %d", seq.BackingTableDbName, seq.BackingTableName, nextVal) initQuery := sqlparser.BuildParsedQuery(sqlInitSequenceTable, - seq.BackingTableDbName, - seq.BackingTableName, + sqlparser.NewIdentifierCI(seq.BackingTableDbName), + sqlparser.NewTableName(seq.BackingTableName), nextVal, nextVal, nextVal, @@ -846,7 +846,7 @@ func (tm *TabletManager) updateSequenceValue(ctx context.Context, seq *tabletman } func (tm *TabletManager) createSequenceTable(ctx context.Context, escapedTableName string) error { - stmt := sqlparser.BuildParsedQuery(sqlCreateSequenceTable, escapedTableName) + stmt := sqlparser.BuildParsedQuery(sqlCreateSequenceTable, sqlparser.NewTableName(escapedTableName)) _, err := tm.ApplySchema(ctx, &tmutils.SchemaChange{ SQL: stmt.Query, Force: false, From eeaac51cea896aa4d9a2ea38d01ead9edf4e9164 Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Thu, 16 Oct 2025 15:04:07 +0000 Subject: [PATCH 02/13] Update tests Signed-off-by: Matt Lord --- go/vt/vtctl/workflow/sequences_test.go | 67 +++++++++++++------------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/go/vt/vtctl/workflow/sequences_test.go b/go/vt/vtctl/workflow/sequences_test.go index 1717a3b38b3..970d52b02ab 100644 --- a/go/vt/vtctl/workflow/sequences_test.go +++ b/go/vt/vtctl/workflow/sequences_test.go @@ -28,12 +28,13 @@ import ( "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/mysqlctl/tmutils" - tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" "vitess.io/vitess/go/vt/proto/vschema" - vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) func TestInitializeTargetSequences(t *testing.T) { @@ -41,10 +42,10 @@ func TestInitializeTargetSequences(t *testing.T) { defer cancel() workflowName := "wf1" - tableName := "t1" - tableName2 := "t2" - sourceKeyspaceName := "sourceks" - targetKeyspaceName := "targetks" + tableName := "tbl-t1" + tableName2 := "tbl-t2" + sourceKeyspaceName := "source-ks" + targetKeyspaceName := "target-ks" schema := map[string]*tabletmanagerdatapb.SchemaDefinition{ tableName: { @@ -88,7 +89,7 @@ func TestInitializeTargetSequences(t *testing.T) { backingTableKeyspace: sourceKeyspaceName, backingTableDBName: fmt.Sprintf("vt_%s", sourceKeyspaceName), usingTableName: tableName, - usingTableDBName: "vt_targetks", + usingTableDBName: "vt_target-ks", usingTableDefinition: &vschema.Table{ AutoIncrement: &vschema.AutoIncrement{ Column: "my-col", @@ -101,7 +102,7 @@ func TestInitializeTargetSequences(t *testing.T) { backingTableKeyspace: sourceKeyspaceName, backingTableDBName: fmt.Sprintf("vt_%s", sourceKeyspaceName), usingTableName: tableName2, - usingTableDBName: "vt_targetks", + usingTableDBName: "vt_target-ks", usingTableDefinition: &vschema.Table{ AutoIncrement: &vschema.AutoIncrement{ Column: "my-col-2", @@ -118,13 +119,13 @@ func TestInitializeTargetSequences(t *testing.T) { BackingTableName: "my-seq1", UsingColEscaped: "`my-col`", UsingTableNameEscaped: fmt.Sprintf("`%s`", tableName), - UsingTableDbNameEscaped: "`vt_targetks`", + UsingTableDbNameEscaped: "`vt_target-ks`", }, { BackingTableName: "my-seq2", UsingColEscaped: "`my-col-2`", UsingTableNameEscaped: fmt.Sprintf("`%s`", tableName2), - UsingTableDbNameEscaped: "`vt_targetks`", + UsingTableDbNameEscaped: "`vt_target-ks`", }, }, }, @@ -163,16 +164,16 @@ func TestGetTargetSequenceMetadata(t *testing.T) { defer cancel() cell := "cell1" workflow := "wf1" - table := "`t1`" - tableDDL := "create table t1 (id int not null auto_increment primary key, c1 varchar(10))" - table2 := "t2" - unescapedTable := "t1" + table := "`tbl-t1`" + tableDDL := "create table tbl-t1 (id int not null auto_increment primary key, c1 varchar(10))" + table2 := "tbl-t2" + unescapedTable := "tbl-t1" sourceKeyspace := &testKeyspace{ KeyspaceName: "source-ks", ShardNames: []string{"0"}, } targetKeyspace := &testKeyspace{ - KeyspaceName: "targetks", + KeyspaceName: "target-ks", ShardNames: []string{"-80", "80-"}, } vindexes := map[string]*vschema.Vindex{ @@ -241,7 +242,7 @@ func TestGetTargetSequenceMetadata(t *testing.T) { backingTableKeyspace: "source-ks", backingTableDBName: "vt_source-ks", usingTableName: unescapedTable, - usingTableDBName: "vt_targetks", + usingTableDBName: "vt_target-ks", usingTableDefinition: &vschema.Table{ ColumnVindexes: []*vschema.ColumnVindex{ { @@ -301,7 +302,7 @@ func TestGetTargetSequenceMetadata(t *testing.T) { backingTableKeyspace: "source-ks", backingTableDBName: "vt_source-ks", usingTableName: unescapedTable, - usingTableDBName: "vt_targetks", + usingTableDBName: "vt_target-ks", usingTableDefinition: &vschema.Table{ ColumnVindexes: []*vschema.ColumnVindex{ { @@ -350,7 +351,7 @@ func TestGetTargetSequenceMetadata(t *testing.T) { backingTableKeyspace: "source-ks", backingTableDBName: "vt_source-ks", usingTableName: unescapedTable, - usingTableDBName: "vt_targetks", + usingTableDBName: "vt_target-ks", usingTableDefinition: &vschema.Table{ ColumnVindexes: []*vschema.ColumnVindex{ { @@ -414,7 +415,7 @@ func TestGetTargetSequenceMetadata(t *testing.T) { backingTableKeyspace: "source-ks", backingTableDBName: "vt_source-ks", usingTableName: unescapedTable, - usingTableDBName: "vt_targetks", + usingTableDBName: "vt_target-ks", usingTableDefinition: &vschema.Table{ ColumnVindexes: []*vschema.ColumnVindex{ { @@ -433,7 +434,7 @@ func TestGetTargetSequenceMetadata(t *testing.T) { backingTableKeyspace: "source-ks", backingTableDBName: "vt_source-ks", usingTableName: table2, - usingTableDBName: "vt_targetks", + usingTableDBName: "vt_target-ks", usingTableDefinition: &vschema.Table{ ColumnVindexes: []*vschema.ColumnVindex{ { @@ -482,7 +483,7 @@ func TestGetTargetSequenceMetadata(t *testing.T) { backingTableKeyspace: "source-ks", backingTableDBName: "vt_source-ks", usingTableName: unescapedTable, - usingTableDBName: "vt_targetks", + usingTableDBName: "vt_target-ks", usingTableDefinition: &vschema.Table{ ColumnVindexes: []*vschema.ColumnVindex{ { @@ -680,9 +681,9 @@ func TestDryRunInitializeTargetSequences(t *testing.T) { defer cancel() workflowName := "wf1" - tableName := "t1" - sourceKeyspaceName := "sourceks" - targetKeyspaceName := "targetks" + tableName := "tbl-t1" + sourceKeyspaceName := "source-ks" + targetKeyspaceName := "target-ks" schema := map[string]*tabletmanagerdatapb.SchemaDefinition{ tableName: { @@ -718,28 +719,28 @@ func TestDryRunInitializeTargetSequences(t *testing.T) { sm1 := sequenceMetadata{ backingTableName: "seq1", - backingTableKeyspace: "sourceks", + backingTableKeyspace: "source-ks", backingTableDBName: "ks1", - usingTableName: "t1", - usingTableDBName: "targetks", + usingTableName: "tbl-t1", + usingTableDBName: "target-ks", usingTableDefinition: &vschema.Table{ AutoIncrement: &vschema.AutoIncrement{Column: "id", Sequence: "seq1"}, }, } sm2 := sm1 sm2.backingTableName = "seq2" - sm2.usingTableName = "t2" + sm2.usingTableName = "tbl-t2" sm2.usingTableDefinition.AutoIncrement.Sequence = "seq2" sm3 := sm1 - sm3.backingTableName = "seq3" - sm3.usingTableName = "t3" + sm3.backingTableName = "tbl-seq3" + sm3.usingTableName = "tbl-t3" sm3.usingTableDefinition.AutoIncrement.Sequence = "seq3" tables := map[string]*sequenceMetadata{ - "t1": &sm1, - "t2": &sm2, - "t3": &sm3, + "tbl-t1": &sm1, + "tbl-t2": &sm2, + "tbl-t3": &sm3, } for range tables { From 1614b5831938935b162a2d467e50029f27a68364 Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Thu, 16 Oct 2025 15:38:32 +0000 Subject: [PATCH 03/13] More tweaks Signed-off-by: Matt Lord --- .../vttablet/tabletmanager/rpc_vreplication.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go index 9c8d5aa8e24..2520bb0bf32 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go @@ -797,6 +797,11 @@ func (tm *TabletManager) updateSequenceValue(ctx context.Context, seq *tabletman if tm.Tablet().DbNameOverride != "" { seq.BackingTableDbName = tm.Tablet().DbNameOverride } + backingTableDbNameEscaped, err := sqlescape.EnsureEscaped(seq.BackingTableDbName) + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid database name %s specified for sequence backing table: %v", + seq.BackingTableDbName, err) + } backingTableNameEscaped, err := sqlescape.EnsureEscaped(seq.BackingTableName) if err != nil { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid table name %s specified for sequence backing table: %v", @@ -804,8 +809,8 @@ func (tm *TabletManager) updateSequenceValue(ctx context.Context, seq *tabletman } log.Infof("Updating sequence %s.%s to %d", seq.BackingTableDbName, seq.BackingTableName, nextVal) initQuery := sqlparser.BuildParsedQuery(sqlInitSequenceTable, - sqlparser.NewIdentifierCI(seq.BackingTableDbName), - sqlparser.NewTableName(seq.BackingTableName), + backingTableDbNameEscaped, + backingTableNameEscaped, nextVal, nextVal, nextVal, @@ -828,7 +833,7 @@ func (tm *TabletManager) updateSequenceValue(ctx context.Context, seq *tabletman return vterrors.Errorf( vtrpcpb.Code_INTERNAL, "failed to initialize the backing sequence table %s.%s: %v", - seq.BackingTableDbName, seq.BackingTableName, err, + backingTableDbNameEscaped, backingTableNameEscaped, err, ) } @@ -842,11 +847,11 @@ func (tm *TabletManager) updateSequenceValue(ctx context.Context, seq *tabletman return vterrors.Errorf( vtrpcpb.Code_INTERNAL, "failed to initialize the backing sequence table %s.%s after retries. Last error: %v", - seq.BackingTableDbName, backingTableNameEscaped, err) + backingTableDbNameEscaped, backingTableNameEscaped, err) } -func (tm *TabletManager) createSequenceTable(ctx context.Context, escapedTableName string) error { - stmt := sqlparser.BuildParsedQuery(sqlCreateSequenceTable, sqlparser.NewTableName(escapedTableName)) +func (tm *TabletManager) createSequenceTable(ctx context.Context, tableName string) error { + stmt := sqlparser.BuildParsedQuery(sqlCreateSequenceTable, sqlparser.NewTableName(tableName)) _, err := tm.ApplySchema(ctx, &tmutils.SchemaChange{ SQL: stmt.Query, Force: false, From 15df096f0895e6904b1f1151e748b52f3fd8b40d Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Thu, 16 Oct 2025 17:42:11 +0000 Subject: [PATCH 04/13] Use dash in source and target keyspaces in e2e tests Signed-off-by: Matt Lord --- go/test/endtoend/vreplication/cluster_test.go | 2 +- go/test/endtoend/vreplication/config_test.go | 50 +++--- .../vreplication/initial_data_test.go | 40 ++--- go/test/endtoend/vreplication/migrate_test.go | 46 ++--- .../vreplication/movetables_buffering_test.go | 4 +- .../movetables_mirrortraffic_test.go | 16 +- .../vreplication/partial_movetables_test.go | 46 +++-- .../resharding_workflows_v2_test.go | 128 ++++++-------- .../vreplication/vreplication_test.go | 166 ++++++++---------- .../vreplication_vtctldclient_cli_test.go | 2 +- go/vt/vtctl/workflow/sequences_test.go | 67 ++++--- .../tabletmanager/rpc_vreplication.go | 12 +- .../tabletserver/vstreamer/planbuilder.go | 2 +- 13 files changed, 285 insertions(+), 296 deletions(-) diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index 79eaa4d1735..55c81a48ec6 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -360,7 +360,7 @@ func getClusterOptions(opts *clusterOptions) *clusterOptions { opts = &clusterOptions{} } if opts.cells == nil { - opts.cells = []string{"zone1"} + opts.cells = []string{defaultCellName} } if opts.clusterConfig == nil { opts.clusterConfig = mainClusterConfig diff --git a/go/test/endtoend/vreplication/config_test.go b/go/test/endtoend/vreplication/config_test.go index 53a861b9fa0..5fe3b801840 100644 --- a/go/test/endtoend/vreplication/config_test.go +++ b/go/test/endtoend/vreplication/config_test.go @@ -21,6 +21,16 @@ import ( "strings" ) +const ( + // Defaults used for all tests. + workflowName = "wf1" + sourceKs = "vitess-product" + targetKs = "vitess-customer" + ksWorkflow = targetKs + "." + workflowName + reverseKsWorkflow = sourceKs + "." + workflowName + "_reverse" + defaultCellName = "zone1" +) + // The product, customer, Lead, Lead-1 tables are used to exercise and test most Workflow variants. // We violate the NO_ZERO_DATES and NO_ZERO_IN_DATE sql_modes that are enabled by default in // MySQL 5.7+ and MariaDB 10.2+ to ensure that vreplication still works everywhere and the @@ -431,44 +441,44 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq } } ` - materializeProductSpec = ` + materializeProductSpec = fmt.Sprintf(` { "workflow": "cproduct", "source_keyspace": "product", - "target_keyspace": "customer", + "target_keyspace": "%s", "table_settings": [{ "target_table": "cproduct", "source_expression": "select * from product", "create_ddl": "create table cproduct(pid bigint, description varchar(128), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key(pid)) CHARSET=utf8mb4" }] } -` +`, targetKs) - materializeCustomerNameSpec = ` + materializeCustomerNameSpec = fmt.Sprintf(` { "workflow": "customer_name", - "source_keyspace": "customer", - "target_keyspace": "customer", + "source_keyspace": "%s", + "target_keyspace": "%s", "table_settings": [{ "target_table": "customer_name", "source_expression": "select cid, name from customer", "create_ddl": "create table if not exists customer_name (cid bigint not null, name varchar(128), primary key(cid), key(name))" }] } -` +`, targetKs, targetKs) - materializeCustomerTypeSpec = ` + materializeCustomerTypeSpec = fmt.Sprintf(` { "workflow": "enterprise_customer", - "source_keyspace": "customer", - "target_keyspace": "customer", + "source_keyspace": "%s", + "target_keyspace": "%s", "table_settings": [{ "target_table": "enterprise_customer", "source_expression": "select cid, name, typ from customer where typ = 'enterprise'", "create_ddl": "create table if not exists enterprise_customer (cid bigint not null, name varchar(128), typ varchar(64), primary key(cid), key(typ))" }] } -` +`, targetKs, targetKs) merchantOrdersVSchema = ` { @@ -512,10 +522,10 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq ` // the merchant-type keyspace allows us to test keyspace names with special characters in them (dash) - materializeMerchantOrdersSpec = ` + materializeMerchantOrdersSpec = fmt.Sprintf(` { "workflow": "morders", - "source_keyspace": "customer", + "source_keyspace": "%s", "target_keyspace": "merchant-type", "table_settings": [{ "target_table": "morders", @@ -523,12 +533,12 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq "create_ddl": "create table morders(oid int, cid int, mname varchar(128), pid int, price int, qty int, total int, total2 int as (10 * total), primary key(oid)) CHARSET=utf8" }] } -` +`, targetKs) - materializeMerchantSalesSpec = ` + materializeMerchantSalesSpec = fmt.Sprintf(` { "workflow": "msales", - "source_keyspace": "customer", + "source_keyspace": "%s", "target_keyspace": "merchant-type", "table_settings": [{ "target_table": "msales", @@ -536,7 +546,7 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq "create_ddl": "create table msales(merchant_name varchar(128), kount int, amount int, primary key(merchant_name)) CHARSET=utf8" }] } -` +`, targetKs) materializeSalesVSchema = ` { @@ -552,10 +562,10 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq } } ` - materializeSalesSpec = ` + materializeSalesSpec = fmt.Sprintf(` { "workflow": "sales", - "source_keyspace": "customer", + "source_keyspace": "%s", "target_keyspace": "product", "table_settings": [{ "target_Table": "sales", @@ -563,7 +573,7 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq "create_ddl": "create table sales(pid int, kount int, amount int, primary key(pid)) CHARSET=utf8" }] } -` +`, targetKs) materializeRollupSpec = ` { "workflow": "rollup", diff --git a/go/test/endtoend/vreplication/initial_data_test.go b/go/test/endtoend/vreplication/initial_data_test.go index ea34ef7fddf..e6318e1760c 100644 --- a/go/test/endtoend/vreplication/initial_data_test.go +++ b/go/test/endtoend/vreplication/initial_data_test.go @@ -31,15 +31,15 @@ func insertInitialData(t *testing.T) { defer closeConn() log.Infof("Inserting initial data") lines, _ := os.ReadFile("unsharded_init_data.sql") - execMultipleQueries(t, vtgateConn, "product:0", string(lines)) - execVtgateQuery(t, vtgateConn, "product:0", "insert into customer_seq(id, next_id, cache) values(0, 100, 100);") - execVtgateQuery(t, vtgateConn, "product:0", "insert into order_seq(id, next_id, cache) values(0, 100, 100);") - execVtgateQuery(t, vtgateConn, "product:0", "insert into customer_seq2(id, next_id, cache) values(0, 100, 100);") + execMultipleQueries(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), string(lines)) + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into customer_seq(id, next_id, cache) values(0, 100, 100);") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into order_seq(id, next_id, cache) values(0, 100, 100);") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into customer_seq2(id, next_id, cache) values(0, 100, 100);") log.Infof("Done inserting initial data") - waitForRowCount(t, vtgateConn, "product:0", "product", 2) - waitForRowCount(t, vtgateConn, "product:0", "customer", 3) - waitForQueryResult(t, vtgateConn, "product:0", "select * from merchant", + waitForRowCount(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "product", 2) + waitForRowCount(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "customer", 3) + waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "select * from merchant", `[[VARCHAR("Monoprice") VARCHAR("eléctronics")] [VARCHAR("newegg") VARCHAR("elec†ronics")]]`) insertJSONValues(t) @@ -52,12 +52,12 @@ func insertJSONValues(t *testing.T) { // insert null value combinations vtgateConn, closeConn := getVTGateConn() defer closeConn() - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(1, \"{}\")") - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j1, j3) values(2, \"{}\", \"{}\")") - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j2, j3) values(3, \"{}\", \"{}\")") - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j1, j2, j3) values(4, NULL, 'null', '\"null\"')") - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(5, JSON_QUOTE('null'))") - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(6, '{}')") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into json_tbl(id, j3) values(1, \"{}\")") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into json_tbl(id, j1, j3) values(2, \"{}\", \"{}\")") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into json_tbl(id, j2, j3) values(3, \"{}\", \"{}\")") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into json_tbl(id, j1, j2, j3) values(4, NULL, 'null', '\"null\"')") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into json_tbl(id, j3) values(5, JSON_QUOTE('null'))") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into json_tbl(id, j3) values(6, '{}')") id := 8 // 6 inserted above and one after copy phase is done @@ -68,7 +68,7 @@ func insertJSONValues(t *testing.T) { j1 := rand.IntN(numJsonValues) j2 := rand.IntN(numJsonValues) query := fmt.Sprintf(q, id, jsonValues[j1], jsonValues[j2]) - execVtgateQuery(t, vtgateConn, "product:0", query) + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), query) } } @@ -82,7 +82,7 @@ func insertMoreCustomers(t *testing.T, numCustomers int) { // that we reserved. vtgateConn, closeConn := getVTGateConn() defer closeConn() - maxID := waitForSequenceValue(t, vtgateConn, "product", "customer_seq", numCustomers) + maxID := waitForSequenceValue(t, vtgateConn, sourceKs, "customer_seq", numCustomers) // So we need to calculate the first value we reserved // from the max. cid := maxID - int64(numCustomers) @@ -97,28 +97,28 @@ func insertMoreCustomers(t *testing.T, numCustomers int) { } cid++ } - execVtgateQuery(t, vtgateConn, "customer", sql) + execVtgateQuery(t, vtgateConn, targetKs, sql) } func insertMoreProducts(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() sql := "insert into product(pid, description) values(3, 'cpu'),(4, 'camera'),(5, 'mouse');" - execVtgateQuery(t, vtgateConn, "product", sql) + execVtgateQuery(t, vtgateConn, sourceKs, sql) } func insertMoreProductsForSourceThrottler(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() sql := "insert into product(pid, description) values(103, 'new-cpu'),(104, 'new-camera'),(105, 'new-mouse');" - execVtgateQuery(t, vtgateConn, "product", sql) + execVtgateQuery(t, vtgateConn, sourceKs, sql) } func insertMoreProductsForTargetThrottler(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() sql := "insert into product(pid, description) values(203, 'new-cpu'),(204, 'new-camera'),(205, 'new-mouse');" - execVtgateQuery(t, vtgateConn, "product", sql) + execVtgateQuery(t, vtgateConn, sourceKs, sql) } var blobTableQueries = []string{ @@ -137,6 +137,6 @@ func insertIntoBlobTable(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() for _, query := range blobTableQueries { - execVtgateQuery(t, vtgateConn, "product:0", query) + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), query) } } diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index f654f9129a0..31d23547841 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -63,7 +63,7 @@ func TestMigrateUnsharded(t *testing.T) { }() defaultCell := vc.Cells[vc.CellNames[0]] - _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", + _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) require.NoError(t, err, "failed to create product keyspace") @@ -91,7 +91,7 @@ func TestMigrateUnsharded(t *testing.T) { extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort) insertInitialDataIntoExternalCluster(t, extVtgateConn) - targetPrimary := vc.getPrimaryTablet(t, "product", "0") + targetPrimary := vc.getPrimaryTablet(t, sourceKs, "0") var output, expected string @@ -115,26 +115,26 @@ func TestMigrateUnsharded(t *testing.T) { require.Equal(t, "/vitess/global", gjson.Get(output, "topo_root").String()) }) - ksWorkflow := "product.e1" + ksWorkflow := fmt.Sprintf("%s.e1", sourceKs) t.Run("migrate from external cluster", func(t *testing.T) { if output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", "product", "--workflow", "e1", + "--target-keyspace", sourceKs, "--workflow", "e1", "create", "--source-keyspace", "rating", "--mount-name", "ext1", "--all-tables", "--cells=extcell1", "--tablet-types=primary,replica"); err != nil { t.Fatalf("Migrate command failed with %+v : %s\n", err, output) } waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1) - waitForRowCountInTablet(t, targetPrimary, "product", "rating", 2) - waitForRowCountInTablet(t, targetPrimary, "product", "review", 3) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", sourceKs), 1) + waitForRowCountInTablet(t, targetPrimary, sourceKs, "rating", 2) + waitForRowCountInTablet(t, targetPrimary, sourceKs, "review", 3) execVtgateQuery(t, extVtgateConn, "rating", "insert into review(rid, pid, review) values(4, 1, 'review4');") execVtgateQuery(t, extVtgateConn, "rating", "insert into rating(gid, pid, rating) values(3, 1, 3);") - waitForRowCountInTablet(t, targetPrimary, "product", "rating", 3) - waitForRowCountInTablet(t, targetPrimary, "product", "review", 4) + waitForRowCountInTablet(t, targetPrimary, sourceKs, "rating", 3) + waitForRowCountInTablet(t, targetPrimary, sourceKs, "review", 4) doVDiff(t, ksWorkflow, "extcell1") output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", "product", "--workflow", "e1", "show") + "--target-keyspace", sourceKs, "--workflow", "e1", "show") require.NoError(t, err, "Migrate command failed with %s", output) wf := gjson.Get(output, "workflows").Array()[0] @@ -142,32 +142,32 @@ func TestMigrateUnsharded(t *testing.T) { require.Equal(t, "Migrate", wf.Get("workflow_type").String()) output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", "product", "--workflow", "e1", "status", "--format=json") + "--target-keyspace", sourceKs, "--workflow", "e1", "status", "--format=json") require.NoError(t, err, "Migrate command failed with %s", output) - require.Equal(t, "Running", gjson.Get(output, "shard_streams.product/0.streams.0.status").String()) + require.Equal(t, "Running", gjson.Get(output, fmt.Sprintf("shard_streams.%s/0.streams.0.status", sourceKs)).String()) output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", "product", "--workflow", "e1", "complete") + "--target-keyspace", sourceKs, "--workflow", "e1", "complete") require.NoError(t, err, "Migrate command failed with %s", output) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 0) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", sourceKs), 0) }) t.Run("cancel migrate workflow", func(t *testing.T) { - execVtgateQuery(t, vtgateConn, "product", "drop table review,rating") + execVtgateQuery(t, vtgateConn, sourceKs, "drop table review,rating") output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", "product", "--workflow", "e1", "Create", "--source-keyspace", "rating", + "--target-keyspace", sourceKs, "--workflow", "e1", "Create", "--source-keyspace", "rating", "--mount-name", "ext1", "--all-tables", "--auto-start=false", "--cells=extcell1") require.NoError(t, err, "Migrate command failed with %s", output) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1, binlogdatapb.VReplicationWorkflowState_Stopped.String()) - waitForRowCountInTablet(t, targetPrimary, "product", "rating", 0) - waitForRowCountInTablet(t, targetPrimary, "product", "review", 0) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", sourceKs), 1, binlogdatapb.VReplicationWorkflowState_Stopped.String()) + waitForRowCountInTablet(t, targetPrimary, sourceKs, "rating", 0) + waitForRowCountInTablet(t, targetPrimary, sourceKs, "review", 0) output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", "product", "--workflow", "e1", "cancel") + "--target-keyspace", sourceKs, "--workflow", "e1", "cancel") require.NoError(t, err, "Migrate command failed with %s", output) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 0) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", sourceKs), 0) var found bool found, err = checkIfTableExists(t, vc, "zone1-100", "review") require.NoError(t, err) @@ -213,7 +213,7 @@ func TestMigrateSharded(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - setupCustomerKeyspace(t) + setupTargetKeyspace(t) createMoveTablesWorkflow(t, "customer,Lead,datze,customer2") tstWorkflowSwitchReadsAndWrites(t) tstWorkflowComplete(t) @@ -246,7 +246,7 @@ func TestMigrateSharded(t *testing.T) { ksWorkflow := "rating.e1" if output, err = extVc.VtctldClient.ExecuteCommandWithOutput("Migrate", "--target-keyspace", "rating", "--workflow", "e1", - "create", "--source-keyspace", "customer", "--mount-name", "external", "--all-tables", "--cells=zone1", + "create", "--source-keyspace", targetKs, "--mount-name", "external", "--all-tables", "--cells=zone1", "--tablet-types=primary"); err != nil { require.FailNow(t, "Migrate command failed with %+v : %s\n", err, output) } diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go index da8b9d1f96b..3b13091f161 100644 --- a/go/test/endtoend/vreplication/movetables_buffering_test.go +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -24,7 +24,7 @@ func TestMoveTablesBuffering(t *testing.T) { defer vc.TearDown() currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables - setupMinimalCustomerKeyspace(t) + setupMinimalTargetKeyspace(t) tables := "loadtest" err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, tables, workflowActionCreate, "", "", "", defaultWorkflowExecOptions) @@ -40,7 +40,7 @@ func TestMoveTablesBuffering(t *testing.T) { catchup(t, targetTab1, workflowName, "MoveTables") catchup(t, targetTab2, workflowName, "MoveTables") vdiff(t, targetKs, workflowName, "", nil) - waitForLowLag(t, "customer", workflowName) + waitForLowLag(t, targetKs, workflowName) for i := 0; i < 10; i++ { tstWorkflowSwitchReadsAndWrites(t) time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) diff --git a/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go b/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go index e0e7dbfc148..f7e204ff888 100644 --- a/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go +++ b/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go @@ -36,20 +36,18 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { vc = setupMinimalCluster(t) defer vc.TearDown() - sourceKeyspace := "product" - targetKeyspace := "customer" workflowName := "wf1" tables := []string{"customer", "loadtest", "customer2"} - _ = setupMinimalCustomerKeyspace(t) + _ = setupMinimalTargetKeyspace(t) mtwf := &moveTablesWorkflow{ workflowInfo: &workflowInfo{ vc: vc, workflowName: workflowName, - targetKeyspace: targetKeyspace, + targetKeyspace: targetKs, }, - sourceKeyspace: sourceKeyspace, + sourceKeyspace: sourceKs, tables: "customer,loadtest,customer2", mirrorFlags: []string{"--percent", "25"}, } @@ -64,7 +62,7 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { // Mirror rules can be created after a MoveTables workflow is created. mt.MirrorTraffic() confirmMirrorRulesExist(t) - expectMirrorRules(t, sourceKeyspace, targetKeyspace, tables, []topodatapb.TabletType{ + expectMirrorRules(t, sourceKs, targetKs, tables, []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY, @@ -74,7 +72,7 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { mtwf.mirrorFlags[1] = "50" mt.MirrorTraffic() confirmMirrorRulesExist(t) - expectMirrorRules(t, sourceKeyspace, targetKeyspace, tables, []topodatapb.TabletType{ + expectMirrorRules(t, sourceKs, targetKs, tables, []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY, @@ -85,7 +83,7 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { mtwf.mirrorFlags[1] = "75" mt.MirrorTraffic() confirmMirrorRulesExist(t) - expectMirrorRules(t, sourceKeyspace, targetKeyspace, tables, []topodatapb.TabletType{ + expectMirrorRules(t, sourceKs, targetKs, tables, []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY, @@ -105,7 +103,7 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { mtwf.mirrorFlags = append(mtwf.mirrorFlags, "--tablet-types", "primary") mt.MirrorTraffic() confirmMirrorRulesExist(t) - expectMirrorRules(t, sourceKeyspace, targetKeyspace, tables, []topodatapb.TabletType{ + expectMirrorRules(t, sourceKs, targetKs, tables, []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, }, 100) diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index 88047bb0f59..92c55fdbea7 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -35,10 +35,10 @@ import ( // Before canceling, we first switch traffic to the target keyspace and then reverse it back to the source keyspace. // This tests that artifacts are being properly cleaned up when a MoveTables ia canceled. func testCancel(t *testing.T) { - targetKeyspace := "customer2" - sourceKeyspace := "customer" + targetKs := "customer2" + sourceKs := "customer" workflowName := "partial80DashForCancel" - ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) + ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflowName) // We use a different table in this MoveTables than the subsequent one, so that setting up of the artifacts // while creating MoveTables do not paper over any issues with cleaning up artifacts when MoveTables is canceled. // Ref: https://github.com/vitessio/vitess/issues/13998 @@ -49,9 +49,9 @@ func testCancel(t *testing.T) { workflowInfo: &workflowInfo{ vc: vc, workflowName: workflowName, - targetKeyspace: targetKeyspace, + targetKeyspace: targetKs, }, - sourceKeyspace: sourceKeyspace, + sourceKeyspace: sourceKs, tables: table, sourceShards: shard, }, workflowFlavorVtctld) @@ -63,22 +63,22 @@ func testCancel(t *testing.T) { waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) - checkDenyList(targetKeyspace, false) - checkDenyList(sourceKeyspace, false) + checkDenyList(targetKs, false) + checkDenyList(sourceKs, false) mt.SwitchReadsAndWrites() - checkDenyList(targetKeyspace, false) - checkDenyList(sourceKeyspace, true) + checkDenyList(targetKs, false) + checkDenyList(sourceKs, true) time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) mt.ReverseReadsAndWrites() - checkDenyList(targetKeyspace, true) - checkDenyList(sourceKeyspace, false) + checkDenyList(targetKs, true) + checkDenyList(sourceKs, false) time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) mt.Cancel() - checkDenyList(targetKeyspace, false) - checkDenyList(sourceKeyspace, false) + checkDenyList(targetKs, false) + checkDenyList(sourceKs, false) } @@ -109,26 +109,24 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { }() vc = setupMinimalCluster(t) defer vc.TearDown() - sourceKeyspace := "product" - targetKeyspace := "customer" workflowName := "wf1" - targetTabs := setupMinimalCustomerKeyspace(t) + targetTabs := setupMinimalTargetKeyspace(t) targetTab80Dash := targetTabs["80-"] targetTabDash80 := targetTabs["-80"] mt := newMoveTables(vc, &moveTablesWorkflow{ workflowInfo: &workflowInfo{ vc: vc, workflowName: workflowName, - targetKeyspace: targetKeyspace, + targetKeyspace: targetKs, }, - sourceKeyspace: sourceKeyspace, + sourceKeyspace: sourceKs, tables: "customer,loadtest,customer2", }, flavor) mt.Create() - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) catchup(t, targetTab80Dash, workflowName, "MoveTables") - vdiff(t, targetKeyspace, workflowName, defaultCellName, nil) + vdiff(t, targetKs, workflowName, defaultCellName, nil) mt.SwitchReadsAndWrites() time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) mt.Complete() @@ -165,8 +163,8 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { } var err error workflowName = "partial80Dash" - sourceKeyspace = "customer" - targetKeyspace = "customer2" + sourceKeyspace := targetKs + targetKeyspace := "customer2" shard := "80-" tables := "customer,loadtest" mt80Dash := newMoveTables(vc, &moveTablesWorkflow{ @@ -189,9 +187,9 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { }() lg.waitForCount(1000) } - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) catchup(t, targetTab80Dash, workflowName, "MoveTables") - vdiff(t, targetKeyspace, workflowName, defaultCellName, nil) + vdiff(t, targetKs, workflowName, defaultCellName, nil) vtgateConn, closeConn := getVTGateConn() defer closeConn() diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index da25df5b5ed..418e1aca8d9 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -45,15 +45,6 @@ import ( vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) -const ( - workflowName = "wf1" - sourceKs = "product" - targetKs = "customer" - ksWorkflow = targetKs + "." + workflowName - reverseKsWorkflow = sourceKs + "." + workflowName + "_reverse" - defaultCellName = "zone1" -) - const ( workflowActionCreate = "Create" workflowActionMirrorTraffic = "Mirror" @@ -306,8 +297,8 @@ func validateWritesRouteToSource(t *testing.T) { defer closeConn() insertQuery := "insert into customer(name, cid) values('tempCustomer2', 200)" matchInsertQuery := "insert into customer(`name`, cid) values" - assertQueryExecutesOnTablet(t, vtgateConn, sourceTab, "customer", insertQuery, matchInsertQuery) - execVtgateQuery(t, vtgateConn, "customer", "delete from customer where cid = 200") + assertQueryExecutesOnTablet(t, vtgateConn, sourceTab, targetKs, insertQuery, matchInsertQuery) + execVtgateQuery(t, vtgateConn, targetKs, "delete from customer where cid = 200") } func validateWritesRouteToTarget(t *testing.T) { @@ -315,10 +306,10 @@ func validateWritesRouteToTarget(t *testing.T) { defer closeConn() insertQuery := "insert into customer(name, cid) values('tempCustomer3', 101)" matchInsertQuery := "insert into customer(`name`, cid) values" - assertQueryExecutesOnTablet(t, vtgateConn, targetTab2, "customer", insertQuery, matchInsertQuery) + assertQueryExecutesOnTablet(t, vtgateConn, targetTab2, targetKs, insertQuery, matchInsertQuery) insertQuery = "insert into customer(name, cid) values('tempCustomer3', 102)" - assertQueryExecutesOnTablet(t, vtgateConn, targetTab1, "customer", insertQuery, matchInsertQuery) - execVtgateQuery(t, vtgateConn, "customer", "delete from customer where cid in (101, 102)") + assertQueryExecutesOnTablet(t, vtgateConn, targetTab1, targetKs, insertQuery, matchInsertQuery) + execVtgateQuery(t, vtgateConn, targetKs, "delete from customer where cid in (101, 102)") } func revert(t *testing.T, workflowType string) { @@ -399,8 +390,8 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { "customer2", workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - waitForWorkflowState(t, vc, "customer.wf2", binlogdatapb.VReplicationWorkflowState_Running.String()) - waitForLowLag(t, "customer", "wf2") + waitForWorkflowState(t, vc, fmt.Sprintf("%s.wf2", targetKs), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForLowLag(t, targetKs, "wf2") err = tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions) @@ -412,32 +403,32 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() // sanity check - output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "product") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", sourceKs) require.NoError(t, err) assert.NotContains(t, output, "customer2\"", "customer2 still found in keyspace product") - waitForRowCount(t, vtgateConn, "customer", "customer2", 3) + waitForRowCount(t, vtgateConn, targetKs, "customer2", 3) // check that customer2 has the sequence tag - output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "customer") + output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", targetKs) require.NoError(t, err) assert.Contains(t, output, "\"sequence\": \"customer_seq2\"", "customer2 sequence missing in keyspace customer") // ensure sequence is available to vtgate num := 5 for i := 0; i < num; i++ { - execVtgateQuery(t, vtgateConn, "customer", "insert into customer2(name) values('a')") + execVtgateQuery(t, vtgateConn, targetKs, "insert into customer2(name) values('a')") } - waitForRowCount(t, vtgateConn, "customer", "customer2", 3+num) + waitForRowCount(t, vtgateConn, targetKs, "customer2", 3+num) want := fmt.Sprintf("[[INT32(%d)]]", 100+num-1) - waitForQueryResult(t, vtgateConn, "customer", "select max(cid) from customer2", want) + waitForQueryResult(t, vtgateConn, targetKs, "select max(cid) from customer2", want) // use MoveTables to move customer2 back to product. Note that now the table has an associated sequence err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, "customer2", workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - waitForWorkflowState(t, vc, "product.wf3", binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.wf3", sourceKs), binlogdatapb.VReplicationWorkflowState_Running.String()) - waitForLowLag(t, "product", "wf3") + waitForLowLag(t, sourceKs, "wf3") err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) @@ -446,21 +437,21 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { require.NoError(t, err) // sanity check - output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "product") + output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", sourceKs) require.NoError(t, err) assert.Contains(t, output, "customer2\"", "customer2 not found in keyspace product ") // check that customer2 still has the sequence tag - output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "product") + output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", sourceKs) require.NoError(t, err) assert.Contains(t, output, "\"sequence\": \"customer_seq2\"", "customer2 still found in keyspace product") // ensure sequence is available to vtgate for i := 0; i < num; i++ { - execVtgateQuery(t, vtgateConn, "product", "insert into customer2(name) values('a')") + execVtgateQuery(t, vtgateConn, sourceKs, "insert into customer2(name) values('a')") } - waitForRowCount(t, vtgateConn, "product", "customer2", 3+num+num) - res := execVtgateQuery(t, vtgateConn, "product", "select max(cid) from customer2") + waitForRowCount(t, vtgateConn, sourceKs, "customer2", 3+num+num) + res := execVtgateQuery(t, vtgateConn, sourceKs, "select max(cid) from customer2") cid, err := res.Rows[0][0].ToInt() require.NoError(t, err) require.GreaterOrEqual(t, cid, 100+num+num-1) @@ -514,7 +505,7 @@ func testReshardV2Workflow(t *testing.T) { return default: // Use a random customer type for each record. - _ = execVtgateQuery(t, dataGenConn, "customer", fmt.Sprintf("insert into customer (cid, name, typ) values (%d, 'tempCustomer%d', %s)", + _ = execVtgateQuery(t, dataGenConn, targetKs, fmt.Sprintf("insert into customer (cid, name, typ) values (%d, 'tempCustomer%d', %s)", id, id, customerTypes[rand.IntN(len(customerTypes))])) } time.Sleep(1 * time.Millisecond) @@ -527,7 +518,7 @@ func testReshardV2Workflow(t *testing.T) { execMultipleQueries(t, vtgateConn, targetKs+"/-80", internalSchema) execMultipleQueries(t, vtgateConn, targetKs+"/80-", internalSchema) - createAdditionalCustomerShards(t, "-40,40-80,80-c0,c0-") + createAdditionalTargetShards(t, "-40,40-80,80-c0,c0-") createReshardWorkflow(t, "-80,80-", "-40,40-80,80-c0,c0-") validateReadsRouteToSource(t, "replica") validateWritesRouteToSource(t) @@ -545,23 +536,23 @@ func testReshardV2Workflow(t *testing.T) { // Confirm that we lost no customer related writes during the Reshard. dataGenCancel() dataGenWg.Wait() - cres := execVtgateQuery(t, dataGenConn, "customer", "select count(*) from customer") + cres := execVtgateQuery(t, dataGenConn, targetKs, "select count(*) from customer") require.Len(t, cres.Rows, 1) - waitForNoWorkflowLag(t, vc, "customer", "customer_name") - cnres := execVtgateQuery(t, dataGenConn, "customer", "select count(*) from customer_name") + waitForNoWorkflowLag(t, vc, targetKs, "customer_name") + cnres := execVtgateQuery(t, dataGenConn, targetKs, "select count(*) from customer_name") require.Len(t, cnres.Rows, 1) require.EqualValues(t, cres.Rows, cnres.Rows) if debugMode { // We expect the row count to differ in enterprise_customer because it is // using a `where typ='enterprise'` filter. So the count is only for debug // info. - ecres := execVtgateQuery(t, dataGenConn, "customer", "select count(*) from enterprise_customer") + ecres := execVtgateQuery(t, dataGenConn, targetKs, "select count(*) from enterprise_customer") t.Logf("Done inserting customer data. Record counts in customer: %s, customer_name: %s, enterprise_customer: %s", cres.Rows[0][0].ToString(), cnres.Rows[0][0].ToString(), ecres.Rows[0][0].ToString()) } // We also do a vdiff on the materialize workflows for good measure. - doVtctldclientVDiff(t, "customer", "customer_name", "", nil) - doVtctldclientVDiff(t, "customer", "enterprise_customer", "", nil) + doVtctldclientVDiff(t, targetKs, "customer_name", "", nil) + doVtctldclientVDiff(t, targetKs, "enterprise_customer", "", nil) } func testMoveTablesV2Workflow(t *testing.T) { @@ -573,13 +564,13 @@ func testMoveTablesV2Workflow(t *testing.T) { if !debugMode { return } - output, err := vc.VtctldClient.ExecuteCommandWithOutput("materialize", "--target-keyspace=customer", "show", "--workflow=customer_name", "--compact", "--include-logs=false") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("materialize", "--target-keyspace", targetKs, "show", "--workflow=customer_name", "--compact", "--include-logs=false") require.NoError(t, err) t.Logf("Materialize show output: %s", output) } // Test basic forward and reverse flows. - setupCustomerKeyspace(t) + setupTargetKeyspace(t) listOutputContainsWorkflow := func(output string, workflow string) bool { workflows := []string{} @@ -598,7 +589,7 @@ func testMoveTablesV2Workflow(t *testing.T) { require.NoError(t, err) return len(workflows) == 0 } - listAllArgs := []string{"workflow", "--keyspace", "customer", "list"} + listAllArgs := []string{"workflow", "--keyspace", targetKs, "list"} output, err := vc.VtctldClient.ExecuteCommandWithOutput(listAllArgs...) require.NoError(t, err) @@ -683,9 +674,9 @@ func testPartialSwitches(t *testing.T) { tstWorkflowSwitchWrites(t) checkStates(t, nextState, nextState) // idempotency - keyspace := "product" + keyspace := sourceKs if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_Reshard { - keyspace = "customer" + keyspace = targetKs } waitForLowLag(t, keyspace, "wf1_reverse") tstWorkflowReverseReads(t, "replica,rdonly", "zone1") @@ -712,13 +703,13 @@ func testRestOfWorkflow(t *testing.T) { Threshold: throttlerConfig.Threshold * 5, CustomQuery: throttlerConfig.Query, } - res, err := throttler.UpdateThrottlerTopoConfigRaw(vc.VtctldClient, "customer", req, nil, nil) + res, err := throttler.UpdateThrottlerTopoConfigRaw(vc.VtctldClient, targetKs, req, nil, nil) require.NoError(t, err, res) testPartialSwitches(t) // test basic forward and reverse flows - waitForLowLag(t, "customer", "wf1") + waitForLowLag(t, targetKs, "wf1") tstWorkflowSwitchReads(t, "", "") checkStates(t, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateReadsSwitched) validateReadsRouteToTarget(t, "replica,rdonly") @@ -730,9 +721,9 @@ func testRestOfWorkflow(t *testing.T) { validateWritesRouteToTarget(t) // this function is called for both MoveTables and Reshard, so the reverse workflows exist in different keyspaces - keyspace := "product" + keyspace := sourceKs if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_Reshard { - keyspace = "customer" + keyspace = targetKs } waitForLowLag(t, keyspace, "wf1_reverse") tstWorkflowReverseReads(t, "", "") @@ -745,7 +736,7 @@ func testRestOfWorkflow(t *testing.T) { validateReadsRouteToSource(t, "replica,rdonly") validateWritesRouteToSource(t) - waitForLowLag(t, "customer", "wf1") + waitForLowLag(t, targetKs, "wf1") tstWorkflowSwitchWrites(t) checkStates(t, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateWritesSwitched) validateReadsRouteToSource(t, "replica,rdonly") @@ -757,7 +748,7 @@ func testRestOfWorkflow(t *testing.T) { validateReadsRouteToSource(t, "replica,rdonly") validateWritesRouteToSource(t) - waitForLowLag(t, "customer", "wf1") + waitForLowLag(t, targetKs, "wf1") tstWorkflowSwitchReads(t, "", "") checkStates(t, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateReadsSwitched) validateReadsRouteToTarget(t, "replica,rdonly") @@ -784,9 +775,9 @@ func testRestOfWorkflow(t *testing.T) { require.Contains(t, err.Error(), wrangler.ErrWorkflowNotFullySwitched) // fully switch and complete - waitForLowLag(t, "customer", "wf1") - waitForLowLag(t, "customer", "customer_name") - waitForLowLag(t, "customer", "enterprise_customer") + waitForLowLag(t, targetKs, "wf1") + waitForLowLag(t, targetKs, "customer_name") + waitForLowLag(t, targetKs, "enterprise_customer") tstWorkflowSwitchReadsAndWrites(t) validateReadsRouteToTarget(t, "replica,rdonly") validateWritesRouteToTarget(t) @@ -801,30 +792,30 @@ func setupCluster(t *testing.T) *VitessCluster { zone1 := vc.Cells["zone1"] zone2 := vc.Cells["zone2"] - vc.AddKeyspace(t, []*Cell{zone1, zone2}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + vc.AddKeyspace(t, []*Cell{zone1, zone2}, sourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) defer getVTGateConn() verifyClusterHealth(t, vc) insertInitialData(t) defaultCell := vc.Cells[vc.CellNames[0]] - sourceTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + sourceTab = vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-100"].Vttablet if defaultReplicas > 0 { - sourceReplicaTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-101"].Vttablet + sourceReplicaTab = vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-101"].Vttablet } if defaultRdonly > 0 { - sourceRdonlyTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-102"].Vttablet + sourceRdonlyTab = vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-102"].Vttablet } return vc } -func setupCustomerKeyspace(t *testing.T) { - if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"], vc.Cells["zone2"]}, "customer", "-80,80-", +func setupTargetKeyspace(t *testing.T) { + if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"], vc.Cells["zone2"]}, targetKs, "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, nil); err != nil { t.Fatal(err) } defaultCell := vc.Cells[vc.CellNames[0]] - custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet if defaultReplicas > 0 { @@ -851,24 +842,24 @@ func setupMinimalCluster(t *testing.T) *VitessCluster { zone1 := vc.Cells["zone1"] - vc.AddKeyspace(t, []*Cell{zone1}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + vc.AddKeyspace(t, []*Cell{zone1}, sourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) verifyClusterHealth(t, vc) insertInitialData(t) - sourceTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + sourceTab = vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-100"].Vttablet return vc } -func setupMinimalCustomerKeyspace(t *testing.T) map[string]*cluster.VttabletProcess { +func setupMinimalTargetKeyspace(t *testing.T) map[string]*cluster.VttabletProcess { tablets := make(map[string]*cluster.VttabletProcess) - if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, "customer", "-80,80-", + if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, targetKs, "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, nil); err != nil { t.Fatal(err) } defaultCell := vc.Cells[vc.CellNames[0]] - custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet tablets["-80"] = targetTab1 @@ -900,11 +891,9 @@ func switchReadsNew(t *testing.T, workflowType, cells, ksWorkflow string, revers func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias string) { workflow := "wf1" - sourceKs := "product" - targetKs := "customer" ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) tables := "customer" - setupCustomerKeyspace(t) + setupTargetKeyspace(t) workflowType := "MoveTables" var moveTablesAndWait = func() { @@ -991,12 +980,11 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias switchWritesReverseSwitchReadsSwitchWrites() } -func createAdditionalCustomerShards(t *testing.T, shards string) { - ksName := "customer" +func createAdditionalTargetShards(t *testing.T, shards string) { defaultCell := vc.Cells[vc.CellNames[0]] - keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] + keyspace := vc.Cells[defaultCell.Name].Keyspaces[targetKs] require.NoError(t, vc.AddShards(t, []*Cell{defaultCell, vc.Cells["zone2"]}, keyspace, shards, defaultReplicas, defaultRdonly, 400, targetKsOpts)) - custKs := vc.Cells[defaultCell.Name].Keyspaces[ksName] + custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] targetTab2 = custKs.Shards["80-c0"].Tablets["zone1-600"].Vttablet targetTab1 = custKs.Shards["40-80"].Tablets["zone1-500"].Vttablet targetReplicaTab1 = custKs.Shards["-40"].Tablets["zone1-401"].Vttablet diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 4df6710bba0..c02e7be1332 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -303,7 +303,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string } defaultCell := vc.Cells[defaultCellName] - vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() @@ -315,7 +315,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string // the Lead and Lead-1 tables tested a specific case with binary sharding keys. Drop it now so that we don't // have to update the rest of the tests - execVtgateQuery(t, vtgateConn, "customer", "drop table `Lead`,`Lead-1`") + execVtgateQuery(t, vtgateConn, targetKs, "drop table `Lead`,`Lead-1`") validateRollupReplicates(t) shardOrders(t) shardMerchant(t) @@ -335,18 +335,18 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string insertMoreCustomers(t, 16) reshardCustomer2to4Split(t, nil, "") - confirmAllStreamsRunning(t, vtgateConn, "customer:-40") - expectNumberOfStreams(t, vtgateConn, "Customer2to4", "sales", "product:0", 4) + confirmAllStreamsRunning(t, vtgateConn, fmt.Sprintf("%s:-40", targetKs)) + expectNumberOfStreams(t, vtgateConn, "Customer2to4", "sales", fmt.Sprintf("%s:0", sourceKs), 4) reshardCustomer3to2SplitMerge(t) - confirmAllStreamsRunning(t, vtgateConn, "customer:-60") - expectNumberOfStreams(t, vtgateConn, "Customer3to2", "sales", "product:0", 3) + confirmAllStreamsRunning(t, vtgateConn, fmt.Sprintf("%s:-60", targetKs)) + expectNumberOfStreams(t, vtgateConn, "Customer3to2", "sales", fmt.Sprintf("%s:0", sourceKs), 3) reshardCustomer3to1Merge(t) - confirmAllStreamsRunning(t, vtgateConn, "customer:0") + confirmAllStreamsRunning(t, vtgateConn, fmt.Sprintf("%s:0", targetKs)) - expectNumberOfStreams(t, vtgateConn, "Customer3to1", "sales", "product:0", 1) + expectNumberOfStreams(t, vtgateConn, "Customer3to1", "sales", fmt.Sprintf("%s:0", sourceKs), 1) t.Run("Verify CopyState Is Optimized Afterwards", func(t *testing.T) { - tabletMap := vc.getVttabletsInKeyspace(t, defaultCell, "customer", topodatapb.TabletType_PRIMARY.String()) + tabletMap := vc.getVttabletsInKeyspace(t, defaultCell, targetKs, topodatapb.TabletType_PRIMARY.String()) require.NotNil(t, tabletMap) require.Greater(t, len(tabletMap), 0) for _, tablet := range tabletMap { @@ -359,8 +359,8 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string if strings.ToLower(binlogRowImage) == "noblob" { return } - _, err = vtgateConn.ExecuteFetch("use customer", 1, false) - require.NoError(t, err, "error using customer keyspace: %v", err) + _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use %s", targetKs), 1, false) + require.NoError(t, err, "error using %s keyspace: %v", targetKs, err) res, err := vtgateConn.ExecuteFetch("select count(*) from customer where name is not null", 1, false) require.NoError(t, err, "error getting current row count in customer: %v", err) require.Equal(t, 1, len(res.Rows), "expected 1 row in count(*) query, got %d", len(res.Rows)) @@ -375,9 +375,9 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace=product", "create", "--keyspace=customer", "--type=consistent_lookup", "--table-owner=customer", "--table-owner-columns=name,cid", "--ignore-nulls", "--tablet-types=PRIMARY") require.NoError(t, err, "error executing LookupVindex create: %v", err) - waitForWorkflowState(t, vc, fmt.Sprintf("product.%s", vindexName), binlogdatapb.VReplicationWorkflowState_Running.String()) - waitForRowCount(t, vtgateConn, "product", vindexName, int(rows)) - customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "customer") + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", sourceKs, vindexName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForRowCount(t, vtgateConn, sourceKs, vindexName, int(rows)) + customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", targetKs) require.NoError(t, err, "error executing GetVSchema: %v", err) vdx := gjson.Get(customerVSchema, fmt.Sprintf("vindexes.%s", vindexName)) require.NotNil(t, vdx, "lookup vindex %s not found", vindexName) @@ -385,7 +385,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace=product", "externalize", "--keyspace=customer") require.NoError(t, err, "error executing LookupVindex externalize: %v", err) - customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "customer") + customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", targetKs) require.NoError(t, err, "error executing GetVSchema: %v", err) vdx = gjson.Get(customerVSchema, fmt.Sprintf("vindexes.%s", vindexName)) require.NotNil(t, vdx, "lookup vindex %s not found", vindexName) @@ -393,7 +393,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace=product", "internalize", "--keyspace=customer") require.NoError(t, err, "error executing LookupVindex internalize: %v", err) - customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "customer") + customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", targetKs) require.NoError(t, err, "error executing GetVSchema: %v", err) vdx = gjson.Get(customerVSchema, fmt.Sprintf("vindexes.%s", vindexName)) require.NotNil(t, vdx, "lookup vindex %s not found", vindexName) @@ -628,7 +628,7 @@ func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { func testVStreamCellFlag(t *testing.T) { vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ - Keyspace: "product", + Keyspace: sourceKs, Shard: "0", Gtid: "", }}} @@ -714,7 +714,7 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { vc = NewVitessCluster(t, &clusterOptions{cells: cells}) defer vc.TearDown() - keyspace := "product" + keyspace := sourceKs shard := "0" // Run the e2e test with binlog_row_image=NOBLOB and @@ -738,7 +738,7 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { testVStreamFrom(t, vtgate, keyspace, 2) }) shardCustomer(t, true, []*Cell{cell1, cell2}, "alias", false) - isTableInDenyList(t, vc, "product/0", "customer") + isTableInDenyList(t, vc, fmt.Sprintf("%s/0", sourceKs), "customer") // we tag along this test so as not to create the overhead of creating another cluster testVStreamCellFlag(t) } @@ -810,22 +810,20 @@ func testVStreamFrom(t *testing.T, vtgate *cluster.VtgateProcess, table string, func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAlias string, withOpenTx bool) { t.Run("shardCustomer", func(t *testing.T) { workflow := "p2c" - sourceKs := "product" - targetKs := "customer" ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) - if _, err := vc.AddKeyspace(t, cells, "customer", "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil { + if _, err := vc.AddKeyspace(t, cells, targetKs, "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil { t.Fatal(err) } // Assume we are operating on first cell defaultCell := cells[0] - custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] tables := "customer,loadtest,Lead,Lead-1,db_order_test,geom_tbl,json_tbl,blüb_tbl,vdiff_order,reftable" moveTablesAction(t, "Create", sourceCellOrAlias, workflow, sourceKs, targetKs, tables) customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet - productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + productTab := vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-100"].Vttablet // Wait to finish the copy phase for all tables workflowType := "MoveTables" @@ -884,7 +882,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl totalInserts, totalUpdates, totalInsertQueries, totalUpdateQueries := 0, 0, 0, 0 for _, tab := range []*cluster.VttabletProcess{tablet200, tablet300} { - insertCount, updateCount, insertQueries, updateQueries := getPartialMetrics(t, "product.0.p2c.1", tab) + insertCount, updateCount, insertQueries, updateQueries := getPartialMetrics(t, fmt.Sprintf("%s.0.p2c.1", sourceKs), tab) totalInserts += insertCount totalUpdates += updateCount totalInsertQueries += insertQueries @@ -898,10 +896,10 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl }) query := "select cid from customer" - assertQueryExecutesOnTablet(t, vtgateConn, productTab, "product", query, query) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, sourceKs, query, query) insertQuery1 := "insert into customer(cid, name) values(1001, 'tempCustomer1')" matchInsertQuery1 := "insert into customer(cid, `name`) values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */)" - assertQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, sourceKs, insertQuery1, matchInsertQuery1) // FIXME for some reason, these inserts fails on mac, need to investigate, some // vreplication bug because of case insensitiveness of table names on mac? @@ -914,14 +912,14 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl if err != nil { require.FailNow(t, output) } - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("update `%s` set name='xyz'", tbl)) + execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("update `%s` set name='xyz'", tbl)) } } doVDiff(t, ksWorkflow, "") cellNames := getCellNames(cells) switchReadsDryRun(t, workflowType, cellNames, ksWorkflow, dryRunResultsReadCustomerShard) switchReads(t, workflowType, cellNames, ksWorkflow, false) - assertQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", query, query) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, targetKs, query, query) var commit func(t *testing.T) if withOpenTx { @@ -954,24 +952,24 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl catchup(t, productTab, workflow, "MoveTables") - doVDiff(t, "product.p2c_reverse", "") + doVDiff(t, fmt.Sprintf("%s.p2c_reverse", sourceKs), "") if withOpenTx { execVtgateQuery(t, vtgateConn, "", deleteOpenTxQuery) } - ksShards := []string{"product/0", "customer/-80", "customer/80-"} + ksShards := []string{fmt.Sprintf("%s/0", sourceKs), fmt.Sprintf("%s/-80", targetKs), fmt.Sprintf("%s/80-", targetKs)} printShardPositions(vc, ksShards) insertQuery2 := "insert into customer(name, cid) values('tempCustomer2', 100)" matchInsertQuery2 := "insert into customer(`name`, cid) values (:vtg1 /* VARCHAR */, :_cid_0)" - assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, targetKs, insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer3', 101)" // ID 101, hence due to reverse_bits in shard 80- - assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, targetKs, insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer4', 102)" // ID 102, hence due to reverse_bits in shard -80 - assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, targetKs, insertQuery2, matchInsertQuery2) - execVtgateQuery(t, vtgateConn, "customer", "update customer set meta = convert(x'7b7d' using utf8mb4) where cid = 1") + execVtgateQuery(t, vtgateConn, targetKs, "update customer set meta = convert(x'7b7d' using utf8mb4) where cid = 1") if testReverse { // Reverse Replicate switchReads(t, workflowType, cellNames, ksWorkflow, true) @@ -984,12 +982,12 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl require.Contains(t, output, "'customer.bmd5'") insertQuery1 = "insert into customer(cid, name) values(1002, 'tempCustomer5')" - assertQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, sourceKs, insertQuery1, matchInsertQuery1) // both inserts go into 80-, this tests the edge-case where a stream (-80) has no relevant new events after the previous switch insertQuery1 = "insert into customer(cid, name) values(1003, 'tempCustomer6')" - assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery1, matchInsertQuery1) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab1, targetKs, insertQuery1, matchInsertQuery1) insertQuery1 = "insert into customer(cid, name) values(1004, 'tempCustomer7')" - assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery1, matchInsertQuery1) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab2, targetKs, insertQuery1, matchInsertQuery1) waitForNoWorkflowLag(t, vc, targetKs, workflow) @@ -998,13 +996,13 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl switchWrites(t, workflowType, ksWorkflow, false) var exists bool - exists, err = isTableInDenyList(t, vc, "product/0", "customer") + exists, err = isTableInDenyList(t, vc, fmt.Sprintf("%s/0", sourceKs), "customer") require.NoError(t, err, "Error getting denylist for customer:0") require.True(t, exists) moveTablesAction(t, "Complete", cellNames, workflow, sourceKs, targetKs, tables) - exists, err = isTableInDenyList(t, vc, "product/0", "customer") + exists, err = isTableInDenyList(t, vc, fmt.Sprintf("%s/0", sourceKs), "customer") require.NoError(t, err, "Error getting denylist for customer:0") require.False(t, exists) @@ -1012,7 +1010,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl expectNumberOfStreams(t, vtgateConn, "shardCustomerTargetStreams", "p2c", "customer:"+shard, 0) } - expectNumberOfStreams(t, vtgateConn, "shardCustomerReverseStreams", "p2c_reverse", "product:0", 0) + expectNumberOfStreams(t, vtgateConn, "shardCustomerReverseStreams", "p2c_reverse", fmt.Sprintf("%s:0", sourceKs), 0) var found bool found, err = checkIfTableExists(t, vc, "zone1-100", "customer") @@ -1024,22 +1022,22 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl require.True(t, found) insertQuery2 = "insert into customer(name, cid) values('tempCustomer8', 103)" // ID 103, hence due to reverse_bits in shard 80- - assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, targetKs, insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer10', 104)" // ID 105, hence due to reverse_bits in shard -80 - assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, targetKs, insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer9', 105)" // ID 104, hence due to reverse_bits in shard 80- - assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, targetKs, insertQuery2, matchInsertQuery2) - execVtgateQuery(t, vtgateConn, "customer", "delete from customer where name like 'tempCustomer%'") - waitForRowCountInTablet(t, customerTab1, "customer", "customer", 1) - waitForRowCountInTablet(t, customerTab2, "customer", "customer", 2) - waitForRowCount(t, vtgateConn, "customer", "customer.customer", 3) + execVtgateQuery(t, vtgateConn, targetKs, "delete from customer where name like 'tempCustomer%'") + waitForRowCountInTablet(t, customerTab1, targetKs, "customer", 1) + waitForRowCountInTablet(t, customerTab2, targetKs, "customer", 2) + waitForRowCount(t, vtgateConn, targetKs, fmt.Sprintf("%s.customer", targetKs), 3) query = "insert into customer (name, cid) values('george', 5)" - execVtgateQuery(t, vtgateConn, "customer", query) - waitForRowCountInTablet(t, customerTab1, "customer", "customer", 1) - waitForRowCountInTablet(t, customerTab2, "customer", "customer", 3) - waitForRowCount(t, vtgateConn, "customer", "customer.customer", 4) + execVtgateQuery(t, vtgateConn, targetKs, query) + waitForRowCountInTablet(t, customerTab1, targetKs, "customer", 1) + waitForRowCountInTablet(t, customerTab2, targetKs, "customer", 3) + waitForRowCount(t, vtgateConn, targetKs, "customer.customer", 4) } }) } @@ -1049,8 +1047,8 @@ func validateRollupReplicates(t *testing.T) { insertMoreProducts(t) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - waitForRowCount(t, vtgateConn, "product", "rollup", 1) - waitForQueryResult(t, vtgateConn, "product:0", "select rollupname, kount from rollup", + waitForRowCount(t, vtgateConn, sourceKs, "rollup", 1) + waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "select rollupname, kount from rollup", `[[VARCHAR("total") INT32(5)]]`) }) } @@ -1059,14 +1057,13 @@ func reshardCustomer2to4Split(t *testing.T, cells []*Cell, sourceCellOrAlias str t.Run("reshardCustomer2to4Split", func(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - ksName := "customer" counts := map[string]int{"zone1-600": 4, "zone1-700": 5, "zone1-800": 6, "zone1-900": 5} - reshard(t, ksName, "customer", "c2c4", "-80,80-", "-40,40-80,80-c0,c0-", + reshard(t, targetKs, "customer", "c2c4", "-80,80-", "-40,40-80,80-c0,c0-", 600, counts, nil, nil, cells, sourceCellOrAlias, 1) - waitForRowCount(t, vtgateConn, ksName, "customer", 20) + waitForRowCount(t, vtgateConn, targetKs, "customer", 20) query := "insert into customer (name) values('yoko')" - execVtgateQuery(t, vtgateConn, ksName, query) - waitForRowCount(t, vtgateConn, ksName, "customer", 21) + execVtgateQuery(t, vtgateConn, targetKs, query) + waitForRowCount(t, vtgateConn, targetKs, "customer", 21) }) } @@ -1136,18 +1133,16 @@ func reshardMerchant3to1Merge(t *testing.T) { func reshardCustomer3to2SplitMerge(t *testing.T) { // -40,40-80,80-c0 => merge/split, c0- stays the same ending up with 3 t.Run("reshardCustomer3to2SplitMerge", func(t *testing.T) { - ksName := "customer" counts := map[string]int{"zone1-1000": 8, "zone1-1100": 8, "zone1-1200": 5} - reshard(t, ksName, "customer", "c4c3", "-40,40-80,80-c0", "-60,60-c0", + reshard(t, targetKs, "customer", "c4c3", "-40,40-80,80-c0", "-60,60-c0", 1000, counts, nil, nil, nil, "", 1) }) } func reshardCustomer3to1Merge(t *testing.T) { // to unsharded t.Run("reshardCustomer3to1Merge", func(t *testing.T) { - ksName := "customer" counts := map[string]int{"zone1-1500": 21} - reshard(t, ksName, "customer", "c3c1", "-60,60-c0,c0-", "0", + reshard(t, targetKs, "customer", "c3c1", "-60,60-c0,c0-", "0", 1500, counts, nil, nil, nil, "", 3) }) } @@ -1222,14 +1217,12 @@ func shardOrders(t *testing.T) { defaultCell := vc.Cells[vc.CellNames[0]] workflow := "o2c" cell := defaultCell.Name - sourceKs := "product" - targetKs := "customer" tables := "orders" ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) applyVSchema(t, ordersVSchema, targetKs) moveTablesAction(t, "Create", cell, workflow, sourceKs, targetKs, tables) - custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet workflowType := "MoveTables" @@ -1239,9 +1232,9 @@ func shardOrders(t *testing.T) { switchReads(t, workflowType, strings.Join(vc.CellNames, ","), ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) moveTablesAction(t, "Complete", cell, workflow, sourceKs, targetKs, tables) - waitForRowCountInTablet(t, customerTab1, "customer", "orders", 1) - waitForRowCountInTablet(t, customerTab2, "customer", "orders", 2) - waitForRowCount(t, vtgateConn, "customer", "orders", 3) + waitForRowCountInTablet(t, customerTab1, targetKs, "orders", 1) + waitForRowCountInTablet(t, customerTab2, targetKs, "orders", 2) + waitForRowCount(t, vtgateConn, targetKs, "orders", 3) }) } @@ -1260,7 +1253,6 @@ func shardMerchant(t *testing.T) { workflow := "p2m" defaultCell := vc.Cells[vc.CellNames[0]] cell := defaultCell.Name - sourceKs := "product" targetKs := merchantKeyspace tables := "merchant" ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) @@ -1329,9 +1321,9 @@ func testMaterializeWithNonExistentTable(t *testing.T) { func materializeProduct(t *testing.T) { t.Run("materializeProduct", func(t *testing.T) { - // Materializing from "product" keyspace to "customer" keyspace. + // Materializing from sourceKs keyspace to targetKs keyspace. workflow := "cproduct" - keyspace := "customer" + keyspace := targetKs defaultCell := vc.Cells[vc.CellNames[0]] applyVSchema(t, materializeProductVSchema, keyspace) materialize(t, materializeProductSpec) @@ -1341,10 +1333,10 @@ func materializeProduct(t *testing.T) { waitForRowCountInTablet(t, tab, keyspace, workflow, 5) } - productTablets := vc.getVttabletsInKeyspace(t, defaultCell, "product", "primary") + productTablets := vc.getVttabletsInKeyspace(t, defaultCell, sourceKs, "primary") t.Run("throttle-app-product", func(t *testing.T) { // Now, throttle the source side component (vstreamer), and insert some rows. - err := throttler.ThrottleKeyspaceApp(vc.VtctldClient, "product", sourceThrottlerAppName) + err := throttler.ThrottleKeyspaceApp(vc.VtctldClient, sourceKs, sourceThrottlerAppName) assert.NoError(t, err) for _, tab := range productTablets { status, err := throttler.GetThrottlerStatus(vc.VtctldClient, &cluster.Vttablet{Alias: tab.Name}) @@ -1378,7 +1370,7 @@ func materializeProduct(t *testing.T) { }) t.Run("unthrottle-app-product", func(t *testing.T) { // Unthrottle the vstreamer component, and expect the rows to show up. - err := throttler.UnthrottleKeyspaceApp(vc.VtctldClient, "product", sourceThrottlerAppName) + err := throttler.UnthrottleKeyspaceApp(vc.VtctldClient, sourceKs, sourceThrottlerAppName) assert.NoError(t, err) for _, tab := range productTablets { // Give time for unthrottling to take effect and for targets to fetch data. @@ -1452,15 +1444,14 @@ func materializeRollup(t *testing.T) { t.Run("materializeRollup", func(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - keyspace := "product" workflow := "rollup" - applyVSchema(t, materializeSalesVSchema, keyspace) + applyVSchema(t, materializeSalesVSchema, sourceKs) defaultCell := vc.Cells[vc.CellNames[0]] - productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + productTab := vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-100"].Vttablet materialize(t, materializeRollupSpec) catchup(t, productTab, workflow, "Materialize") - waitForRowCount(t, vtgateConn, "product", "rollup", 1) - waitForQueryResult(t, vtgateConn, "product:0", "select rollupname, kount from rollup", + waitForRowCount(t, vtgateConn, sourceKs, "rollup", 1) + waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "select rollupname, kount from rollup", `[[VARCHAR("total") INT32(2)]]`) }) } @@ -1469,14 +1460,13 @@ func materializeSales(t *testing.T) { t.Run("materializeSales", func(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - keyspace := "product" - applyVSchema(t, materializeSalesVSchema, keyspace) + applyVSchema(t, materializeSalesVSchema, sourceKs) materialize(t, materializeSalesSpec) defaultCell := vc.Cells[vc.CellNames[0]] - productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + productTab := vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-100"].Vttablet catchup(t, productTab, "sales", "Materialize") - waitForRowCount(t, vtgateConn, "product", "sales", 2) - waitForQueryResult(t, vtgateConn, "product:0", "select kount, amount from sales", + waitForRowCount(t, vtgateConn, sourceKs, "sales", 2) + waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "select kount, amount from sales", `[[INT32(1) INT32(10)] [INT32(2) INT32(35)]]`) }) } @@ -1974,13 +1964,13 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { debug := true if debug { log.Infof("------------------- START Extra debug info %s Switch writes %s", msg, ksWorkflow) - ksShards := []string{"product/0", "customer/-80", "customer/80-"} + ksShards := []string{fmt.Sprintf("%s/0", sourceKs), fmt.Sprintf("%s/-80", targetKs), fmt.Sprintf("%s/80-", targetKs)} printShardPositions(vc, ksShards) defaultCell := vc.Cells[vc.CellNames[0]] - custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet - productKs := vc.Cells[defaultCell.Name].Keyspaces["product"] + productKs := vc.Cells[defaultCell.Name].Keyspaces[sourceKs] productTab := productKs.Shards["0"].Tablets["zone1-100"].Vttablet tabs := []*cluster.VttabletProcess{productTab, customerTab1, customerTab2} queries := []string{ diff --git a/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go index 58af882d5b7..a17cb73a50d 100644 --- a/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go +++ b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go @@ -76,7 +76,7 @@ func TestVtctldclientCLI(t *testing.T) { sourceTab = vc.Cells["zone1"].Keyspaces[sourceKeyspaceName].Shards["0"].Tablets["zone1-100"].Vttablet require.NotNil(t, sourceTab) - targetTabs := setupMinimalCustomerKeyspace(t) + targetTabs := setupMinimalTargetKeyspace(t) targetTab1 = targetTabs["-80"] require.NotNil(t, targetTab1) targetTab2 = targetTabs["80-"] diff --git a/go/vt/vtctl/workflow/sequences_test.go b/go/vt/vtctl/workflow/sequences_test.go index 970d52b02ab..1717a3b38b3 100644 --- a/go/vt/vtctl/workflow/sequences_test.go +++ b/go/vt/vtctl/workflow/sequences_test.go @@ -28,13 +28,12 @@ import ( "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" "vitess.io/vitess/go/vt/proto/vschema" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" - - tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" - vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) func TestInitializeTargetSequences(t *testing.T) { @@ -42,10 +41,10 @@ func TestInitializeTargetSequences(t *testing.T) { defer cancel() workflowName := "wf1" - tableName := "tbl-t1" - tableName2 := "tbl-t2" - sourceKeyspaceName := "source-ks" - targetKeyspaceName := "target-ks" + tableName := "t1" + tableName2 := "t2" + sourceKeyspaceName := "sourceks" + targetKeyspaceName := "targetks" schema := map[string]*tabletmanagerdatapb.SchemaDefinition{ tableName: { @@ -89,7 +88,7 @@ func TestInitializeTargetSequences(t *testing.T) { backingTableKeyspace: sourceKeyspaceName, backingTableDBName: fmt.Sprintf("vt_%s", sourceKeyspaceName), usingTableName: tableName, - usingTableDBName: "vt_target-ks", + usingTableDBName: "vt_targetks", usingTableDefinition: &vschema.Table{ AutoIncrement: &vschema.AutoIncrement{ Column: "my-col", @@ -102,7 +101,7 @@ func TestInitializeTargetSequences(t *testing.T) { backingTableKeyspace: sourceKeyspaceName, backingTableDBName: fmt.Sprintf("vt_%s", sourceKeyspaceName), usingTableName: tableName2, - usingTableDBName: "vt_target-ks", + usingTableDBName: "vt_targetks", usingTableDefinition: &vschema.Table{ AutoIncrement: &vschema.AutoIncrement{ Column: "my-col-2", @@ -119,13 +118,13 @@ func TestInitializeTargetSequences(t *testing.T) { BackingTableName: "my-seq1", UsingColEscaped: "`my-col`", UsingTableNameEscaped: fmt.Sprintf("`%s`", tableName), - UsingTableDbNameEscaped: "`vt_target-ks`", + UsingTableDbNameEscaped: "`vt_targetks`", }, { BackingTableName: "my-seq2", UsingColEscaped: "`my-col-2`", UsingTableNameEscaped: fmt.Sprintf("`%s`", tableName2), - UsingTableDbNameEscaped: "`vt_target-ks`", + UsingTableDbNameEscaped: "`vt_targetks`", }, }, }, @@ -164,16 +163,16 @@ func TestGetTargetSequenceMetadata(t *testing.T) { defer cancel() cell := "cell1" workflow := "wf1" - table := "`tbl-t1`" - tableDDL := "create table tbl-t1 (id int not null auto_increment primary key, c1 varchar(10))" - table2 := "tbl-t2" - unescapedTable := "tbl-t1" + table := "`t1`" + tableDDL := "create table t1 (id int not null auto_increment primary key, c1 varchar(10))" + table2 := "t2" + unescapedTable := "t1" sourceKeyspace := &testKeyspace{ KeyspaceName: "source-ks", ShardNames: []string{"0"}, } targetKeyspace := &testKeyspace{ - KeyspaceName: "target-ks", + KeyspaceName: "targetks", ShardNames: []string{"-80", "80-"}, } vindexes := map[string]*vschema.Vindex{ @@ -242,7 +241,7 @@ func TestGetTargetSequenceMetadata(t *testing.T) { backingTableKeyspace: "source-ks", backingTableDBName: "vt_source-ks", usingTableName: unescapedTable, - usingTableDBName: "vt_target-ks", + usingTableDBName: "vt_targetks", usingTableDefinition: &vschema.Table{ ColumnVindexes: []*vschema.ColumnVindex{ { @@ -302,7 +301,7 @@ func TestGetTargetSequenceMetadata(t *testing.T) { backingTableKeyspace: "source-ks", backingTableDBName: "vt_source-ks", usingTableName: unescapedTable, - usingTableDBName: "vt_target-ks", + usingTableDBName: "vt_targetks", usingTableDefinition: &vschema.Table{ ColumnVindexes: []*vschema.ColumnVindex{ { @@ -351,7 +350,7 @@ func TestGetTargetSequenceMetadata(t *testing.T) { backingTableKeyspace: "source-ks", backingTableDBName: "vt_source-ks", usingTableName: unescapedTable, - usingTableDBName: "vt_target-ks", + usingTableDBName: "vt_targetks", usingTableDefinition: &vschema.Table{ ColumnVindexes: []*vschema.ColumnVindex{ { @@ -415,7 +414,7 @@ func TestGetTargetSequenceMetadata(t *testing.T) { backingTableKeyspace: "source-ks", backingTableDBName: "vt_source-ks", usingTableName: unescapedTable, - usingTableDBName: "vt_target-ks", + usingTableDBName: "vt_targetks", usingTableDefinition: &vschema.Table{ ColumnVindexes: []*vschema.ColumnVindex{ { @@ -434,7 +433,7 @@ func TestGetTargetSequenceMetadata(t *testing.T) { backingTableKeyspace: "source-ks", backingTableDBName: "vt_source-ks", usingTableName: table2, - usingTableDBName: "vt_target-ks", + usingTableDBName: "vt_targetks", usingTableDefinition: &vschema.Table{ ColumnVindexes: []*vschema.ColumnVindex{ { @@ -483,7 +482,7 @@ func TestGetTargetSequenceMetadata(t *testing.T) { backingTableKeyspace: "source-ks", backingTableDBName: "vt_source-ks", usingTableName: unescapedTable, - usingTableDBName: "vt_target-ks", + usingTableDBName: "vt_targetks", usingTableDefinition: &vschema.Table{ ColumnVindexes: []*vschema.ColumnVindex{ { @@ -681,9 +680,9 @@ func TestDryRunInitializeTargetSequences(t *testing.T) { defer cancel() workflowName := "wf1" - tableName := "tbl-t1" - sourceKeyspaceName := "source-ks" - targetKeyspaceName := "target-ks" + tableName := "t1" + sourceKeyspaceName := "sourceks" + targetKeyspaceName := "targetks" schema := map[string]*tabletmanagerdatapb.SchemaDefinition{ tableName: { @@ -719,28 +718,28 @@ func TestDryRunInitializeTargetSequences(t *testing.T) { sm1 := sequenceMetadata{ backingTableName: "seq1", - backingTableKeyspace: "source-ks", + backingTableKeyspace: "sourceks", backingTableDBName: "ks1", - usingTableName: "tbl-t1", - usingTableDBName: "target-ks", + usingTableName: "t1", + usingTableDBName: "targetks", usingTableDefinition: &vschema.Table{ AutoIncrement: &vschema.AutoIncrement{Column: "id", Sequence: "seq1"}, }, } sm2 := sm1 sm2.backingTableName = "seq2" - sm2.usingTableName = "tbl-t2" + sm2.usingTableName = "t2" sm2.usingTableDefinition.AutoIncrement.Sequence = "seq2" sm3 := sm1 - sm3.backingTableName = "tbl-seq3" - sm3.usingTableName = "tbl-t3" + sm3.backingTableName = "seq3" + sm3.usingTableName = "t3" sm3.usingTableDefinition.AutoIncrement.Sequence = "seq3" tables := map[string]*sequenceMetadata{ - "tbl-t1": &sm1, - "tbl-t2": &sm2, - "tbl-t3": &sm3, + "t1": &sm1, + "t2": &sm2, + "t3": &sm3, } for range tables { diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go index 2520bb0bf32..eb4ab0f8257 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go @@ -747,10 +747,16 @@ func (tm *TabletManager) GetMaxValueForSequences(ctx context.Context, req *table } func (tm *TabletManager) getMaxSequenceValue(ctx context.Context, sm *tabletmanagerdatapb.GetMaxValueForSequencesRequest_SequenceMetadata) (int64, error) { + for _, val := range []string{sm.UsingColEscaped, sm.UsingTableDbNameEscaped, sm.UsingTableNameEscaped} { + if val[0] != '`' { + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, + "the column (%s), database (%s), and table (%s) names must be escaped", sm.UsingColEscaped, sm.UsingTableDbNameEscaped, sm.UsingTableNameEscaped) + } + } query := sqlparser.BuildParsedQuery(sqlGetMaxSequenceVal, - sqlparser.NewColName(sm.UsingColEscaped), - sqlparser.NewIdentifierCI(sm.UsingTableDbNameEscaped), - sqlparser.NewTableName(sm.UsingTableNameEscaped), + sm.UsingColEscaped, + sm.UsingTableDbNameEscaped, + sm.UsingTableNameEscaped, ) qr, err := tm.ExecuteFetchAsApp(ctx, &tabletmanagerdatapb.ExecuteFetchAsAppRequest{ Query: []byte(query.Query), diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go index fb807eab7c3..18140cd0c5b 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go @@ -1048,7 +1048,7 @@ func findColumn(ti *Table, name sqlparser.IdentifierCI) (int, error) { } // Let's see if the Table only has TableMap event names and if so return a different error. for _, col := range ti.Fields { - if !strings.HasPrefix(col.Name, "@") { + if col.Name[0] != '@' { return 0, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "column %s not found in table %s", sqlparser.String(name), ti.Name) } } From fdd506a43dd8d0e892080f215a06c024871156aa Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Thu, 16 Oct 2025 19:23:05 +0000 Subject: [PATCH 05/13] Test fixes Signed-off-by: Matt Lord --- go/test/endtoend/vreplication/config_test.go | 26 ++----- go/test/endtoend/vreplication/helper_test.go | 8 +- .../vreplication/partial_movetables_test.go | 77 ++++++++++--------- .../endtoend/vreplication/time_zone_test.go | 8 +- go/test/endtoend/vreplication/vdiff2_test.go | 17 ++-- .../vdiff_multiple_movetables_test.go | 11 ++- .../vreplication/vdiff_online_ddl_test.go | 2 +- .../vreplication/vreplication_test.go | 32 ++++---- .../vreplication/vreplication_test_env.go | 40 ++++++---- .../vreplication_vtctldclient_cli_test.go | 68 ++++++++-------- go/test/endtoend/vreplication/vstream_test.go | 48 ++++++------ .../tabletmanager/rpc_vreplication.go | 2 +- .../tabletserver/vstreamer/planbuilder.go | 2 +- 13 files changed, 173 insertions(+), 168 deletions(-) diff --git a/go/test/endtoend/vreplication/config_test.go b/go/test/endtoend/vreplication/config_test.go index 5fe3b801840..c1f38e75d77 100644 --- a/go/test/endtoend/vreplication/config_test.go +++ b/go/test/endtoend/vreplication/config_test.go @@ -21,16 +21,6 @@ import ( "strings" ) -const ( - // Defaults used for all tests. - workflowName = "wf1" - sourceKs = "vitess-product" - targetKs = "vitess-customer" - ksWorkflow = targetKs + "." + workflowName - reverseKsWorkflow = sourceKs + "." + workflowName + "_reverse" - defaultCellName = "zone1" -) - // The product, customer, Lead, Lead-1 tables are used to exercise and test most Workflow variants. // We violate the NO_ZERO_DATES and NO_ZERO_IN_DATE sql_modes that are enabled by default in // MySQL 5.7+ and MariaDB 10.2+ to ensure that vreplication still works everywhere and the @@ -444,7 +434,7 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq materializeProductSpec = fmt.Sprintf(` { "workflow": "cproduct", - "source_keyspace": "product", + "source_keyspace": "%s", "target_keyspace": "%s", "table_settings": [{ "target_table": "cproduct", @@ -452,7 +442,7 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq "create_ddl": "create table cproduct(pid bigint, description varchar(128), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key(pid)) CHARSET=utf8mb4" }] } -`, targetKs) +`, sourceKs, targetKs) materializeCustomerNameSpec = fmt.Sprintf(` { @@ -566,26 +556,26 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq { "workflow": "sales", "source_keyspace": "%s", - "target_keyspace": "product", + "target_keyspace": "%s", "table_settings": [{ "target_Table": "sales", "source_expression": "select pid, count(*) as kount, sum(price) as amount from orders group by pid", "create_ddl": "create table sales(pid int, kount int, amount int, primary key(pid)) CHARSET=utf8" }] } -`, targetKs) - materializeRollupSpec = ` +`, targetKs, sourceKs) + materializeRollupSpec = fmt.Sprintf(` { "workflow": "rollup", - "source_keyspace": "product", - "target_keyspace": "product", + "source_keyspace": "%s", + "target_keyspace": "%s", "table_settings": [{ "target_table": "rollup", "source_expression": "select 'total' as rollupname, count(*) as kount from product group by rollupname", "create_ddl": "create table rollup(rollupname varchar(100), kount int, primary key (rollupname)) CHARSET=utf8mb4" }] } -` +`, sourceKs, sourceKs) initialExternalSchema = ` create table review(rid int, pid int, review varbinary(128), primary key(rid)); create table rating(gid int, pid int, rating int, primary key(gid)); diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index fe8fb22b60c..444673db5a6 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -288,7 +288,11 @@ func waitForRowCountInTablet(t *testing.T, vttablet *cluster.VttabletProcess, da // Note: you specify the number of values that you want to reserve // and you get back the max value reserved. func waitForSequenceValue(t *testing.T, conn *mysql.Conn, database, sequence string, numVals int) int64 { - query := fmt.Sprintf("select next %d values from %s.%s", numVals, database, sequence) + escapedDB, err := sqlescape.EnsureEscaped(database) + require.NoError(t, err) + escapedSeq, err := sqlescape.EnsureEscaped(sequence) + require.NoError(t, err) + query := fmt.Sprintf("select next %d values from %s.%s", numVals, escapedDB, escapedSeq) timer := time.NewTimer(defaultTimeout) defer timer.Stop() for { @@ -545,7 +549,7 @@ func validateDryRunResults(t *testing.T, output string, want []string) { } if !match { fail = true - require.Fail(t, "invlaid dry run results", "want %s, got %s\n", w, gotDryRun[i]) + require.Fail(t, "invalid dry run results", "want %s, got %s\n", w, gotDryRun[i]) } } if fail { diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index 92c55fdbea7..3cb4c8b33b1 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -35,10 +35,10 @@ import ( // Before canceling, we first switch traffic to the target keyspace and then reverse it back to the source keyspace. // This tests that artifacts are being properly cleaned up when a MoveTables ia canceled. func testCancel(t *testing.T) { - targetKs := "customer2" - sourceKs := "customer" + sourceKeyspace := targetKs + targetKeyspace := "customer2" workflowName := "partial80DashForCancel" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflowName) + ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) // We use a different table in this MoveTables than the subsequent one, so that setting up of the artifacts // while creating MoveTables do not paper over any issues with cleaning up artifacts when MoveTables is canceled. // Ref: https://github.com/vitessio/vitess/issues/13998 @@ -49,9 +49,9 @@ func testCancel(t *testing.T) { workflowInfo: &workflowInfo{ vc: vc, workflowName: workflowName, - targetKeyspace: targetKs, + targetKeyspace: targetKeyspace, }, - sourceKeyspace: sourceKs, + sourceKeyspace: sourceKeyspace, tables: table, sourceShards: shard, }, workflowFlavorVtctld) @@ -63,22 +63,22 @@ func testCancel(t *testing.T) { waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) - checkDenyList(targetKs, false) - checkDenyList(sourceKs, false) + checkDenyList(targetKeyspace, false) + checkDenyList(sourceKeyspace, false) mt.SwitchReadsAndWrites() - checkDenyList(targetKs, false) - checkDenyList(sourceKs, true) + checkDenyList(targetKeyspace, false) + checkDenyList(sourceKeyspace, true) time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) mt.ReverseReadsAndWrites() - checkDenyList(targetKs, true) - checkDenyList(sourceKs, false) + checkDenyList(targetKeyspace, true) + checkDenyList(sourceKeyspace, false) time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) mt.Cancel() - checkDenyList(targetKs, false) - checkDenyList(sourceKs, false) + checkDenyList(targetKeyspace, false) + checkDenyList(sourceKeyspace, false) } @@ -109,6 +109,8 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { }() vc = setupMinimalCluster(t) defer vc.TearDown() + sourceKeyspace := sourceKs + targetKeyspace := targetKs workflowName := "wf1" targetTabs := setupMinimalTargetKeyspace(t) targetTab80Dash := targetTabs["80-"] @@ -117,27 +119,30 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { workflowInfo: &workflowInfo{ vc: vc, workflowName: workflowName, - targetKeyspace: targetKs, + targetKeyspace: targetKeyspace, }, - sourceKeyspace: sourceKs, + sourceKeyspace: sourceKeyspace, tables: "customer,loadtest,customer2", }, flavor) mt.Create() - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) catchup(t, targetTab80Dash, workflowName, "MoveTables") - vdiff(t, targetKs, workflowName, defaultCellName, nil) + vdiff(t, targetKeyspace, workflowName, defaultCellName, nil) mt.SwitchReadsAndWrites() time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) mt.Complete() emptyGlobalRoutingRules := "{}\n" + sourceKeyspace = targetKs + targetKeyspace = "customer2" + // These should be listed in shard order emptyShardRoutingRules := `{"rules":[]}` - preCutoverShardRoutingRules := `{"rules":[{"from_keyspace":"customer2","to_keyspace":"customer","shard":"-80"},{"from_keyspace":"customer2","to_keyspace":"customer","shard":"80-"}]}` - halfCutoverShardRoutingRules := `{"rules":[{"from_keyspace":"customer2","to_keyspace":"customer","shard":"-80"},{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}` - postCutoverShardRoutingRules := `{"rules":[{"from_keyspace":"customer","to_keyspace":"customer2","shard":"-80"},{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}` + preCutoverShardRoutingRules := fmt.Sprintf(`{"rules":[{"from_keyspace":"%s","to_keyspace":"%s","shard":"-80"},{"from_keyspace":"%s","to_keyspace":"%s","shard":"80-"}]}`, targetKeyspace, sourceKeyspace, targetKeyspace, sourceKeyspace) + halfCutoverShardRoutingRules := fmt.Sprintf(`{"rules":[{"from_keyspace":"%s","to_keyspace":"%s","shard":"-80"},{"from_keyspace":"%s","to_keyspace":"%s","shard":"80-"}]}`, targetKeyspace, sourceKeyspace, sourceKeyspace, targetKeyspace) + postCutoverShardRoutingRules := fmt.Sprintf(`{"rules":[{"from_keyspace":"%s","to_keyspace":"%s","shard":"-80"},{"from_keyspace":"%s","to_keyspace":"%s","shard":"80-"}]}`, sourceKeyspace, targetKeyspace, sourceKeyspace, targetKeyspace) // Remove any manually applied shard routing rules as these // should be set by SwitchTraffic. @@ -163,8 +168,6 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { } var err error workflowName = "partial80Dash" - sourceKeyspace := targetKs - targetKeyspace := "customer2" shard := "80-" tables := "customer,loadtest" mt80Dash := newMoveTables(vc, &moveTablesWorkflow{ @@ -187,15 +190,15 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { }() lg.waitForCount(1000) } - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) catchup(t, targetTab80Dash, workflowName, "MoveTables") - vdiff(t, targetKs, workflowName, defaultCellName, nil) + vdiff(t, targetKeyspace, workflowName, defaultCellName, nil) vtgateConn, closeConn := getVTGateConn() defer closeConn() - waitForRowCount(t, vtgateConn, "customer", "customer", 3) // customer: all shards - waitForRowCount(t, vtgateConn, "customer2", "customer", 3) // customer2: all shards - waitForRowCount(t, vtgateConn, "customer2:80-", "customer", 2) // customer2: 80- + waitForRowCount(t, vtgateConn, sourceKeyspace, "customer", 3) // customer: all shards + waitForRowCount(t, vtgateConn, targetKeyspace, "customer", 3) // customer2: all shards + waitForRowCount(t, vtgateConn, fmt.Sprintf("%s:80-", sourceKeyspace), "customer", 2) // customer2: 80- confirmGlobalRoutingToSource := func() { output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules", "--compact") @@ -241,14 +244,14 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { require.NoError(t, err) _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) - require.Contains(t, err.Error(), "target: customer.80-.primary", "Query was routed to the target before any SwitchTraffic") + require.Contains(t, err.Error(), fmt.Sprintf("target: %s.80-.primary", sourceKeyspace), "Query was routed to the target before any SwitchTraffic") log.Infof("Testing reverse route (target->source) for shard NOT being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false) require.NoError(t, err) _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) - require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic") + require.Contains(t, err.Error(), fmt.Sprintf("target: %s.-80.primary", sourceKeyspace), "Query was routed to the target before any SwitchTraffic") // Switch all traffic for the shard mt80Dash.SwitchReadsAndWrites() @@ -273,7 +276,7 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) - require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before partial SwitchTraffic") + require.Contains(t, err.Error(), fmt.Sprintf("target: %s.-80.primary", sourceKeyspace), "Query was routed to the target before partial SwitchTraffic") // Shard targeting _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) @@ -281,7 +284,7 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false) + _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use `%s:80-`", sourceKeyspace), 0, false) require.NoError(t, err) _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) @@ -295,21 +298,21 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) - require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch("use `customer@replica`", 0, false) + require.Contains(t, err.Error(), fmt.Sprintf("target: %s.-80.replica", sourceKeyspace), "Query was routed to the target before partial SwitchTraffic") + _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use `%s@replica`", sourceKeyspace), 0, false) require.NoError(t, err) _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) - require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic") + require.Contains(t, err.Error(), fmt.Sprintf("target: %s.-80.replica", sourceKeyspace), "Query was routed to the target before partial SwitchTraffic") workflowExec := tstWorkflowExec // We cannot Complete a partial move tables at the moment because // it will find that all traffic has (obviously) not been switched. - err = workflowExec(t, "", workflowName, "", targetKs, "", workflowActionComplete, "", "", "", workflowExecOptsPartial80Dash) + err = workflowExec(t, "", workflowName, "", targetKeyspace, "", workflowActionComplete, "", "", "", workflowExecOptsPartial80Dash) require.Error(t, err) // Confirm global routing rules: -80 should still be be routed to customer @@ -367,8 +370,8 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { require.True(t, isEmptyWorkflowShowOutput(output)) // Be sure we've deleted the original workflow. - _, _ = vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "delete", "--workflow", wf, "--shards", opts.shardSubset) - output, err = vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "show", "--workflow", wf, "--shards", opts.shardSubset) + _, _ = vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKeyspace, "delete", "--workflow", wf, "--shards", opts.shardSubset) + output, err = vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKeyspace, "show", "--workflow", wf, "--shards", opts.shardSubset) require.NoError(t, err, output) require.True(t, isEmptyWorkflowShowOutput(output)) } diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index 3faa9e76a78..ebeb381b62b 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -33,8 +33,6 @@ import ( // TestMoveTablesTZ tests the conversion of datetime based on the source timezone passed to the MoveTables workflow func TestMoveTablesTZ(t *testing.T) { workflow := "tz" - sourceKs := "product" - targetKs := "customer" ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) ksReverseWorkflow := fmt.Sprintf("%s.%s_reverse", sourceKs, workflow) @@ -163,7 +161,7 @@ func TestMoveTablesTZ(t *testing.T) { // user should be either running this query or have set their location in their driver to map from the time in Vitess/UTC to local query = "select id, convert_tz(dt1, 'UTC', 'US/Pacific') dt1, convert_tz(dt2, 'UTC', 'US/Pacific') dt2, convert_tz(ts1, 'UTC', 'US/Pacific') ts1 from datze" - qrTargetUSPacific, err := customerTab.QueryTablet(query, "customer", true) + qrTargetUSPacific, err := customerTab.QueryTablet(query, targetKs, true) require.NoError(t, err) require.NotNil(t, qrTargetUSPacific) require.Equal(t, len(qrSourceUSPacific.Rows), len(qrTargetUSPacific.Rows)) @@ -189,7 +187,7 @@ func TestMoveTablesTZ(t *testing.T) { } // inserts to test date conversions in reverse replication - execVtgateQuery(t, vtgateConn, "customer", "insert into datze(id, dt2) values (13, '2022-01-01 18:20:30')") - execVtgateQuery(t, vtgateConn, "customer", "insert into datze(id, dt2) values (14, '2022-04-01 12:06:07')") + execVtgateQuery(t, vtgateConn, targetKs, "insert into datze(id, dt2) values (13, '2022-01-01 18:20:30')") + execVtgateQuery(t, vtgateConn, targetKs, "insert into datze(id, dt2) values (14, '2022-04-01 12:06:07')") doVDiff(t, ksReverseWorkflow, "") } diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index 7f4d55e7757..7e35a6a9894 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -33,6 +33,7 @@ import ( "google.golang.org/protobuf/proto" "vitess.io/vitess/go/ptr" + "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" @@ -73,8 +74,8 @@ var testCases = []*testCase{ name: "MoveTables/unsharded to two shards", workflow: "p1c2", typ: "MoveTables", - sourceKs: "product", - targetKs: "customer", + sourceKs: sourceKs, + targetKs: targetKs, sourceShards: "0", targetShards: "-80,80-", tabletBaseID: 200, @@ -94,8 +95,8 @@ var testCases = []*testCase{ name: "Reshard Merge/split 2 to 3", workflow: "c2c3", typ: "Reshard", - sourceKs: "customer", - targetKs: "customer", + sourceKs: targetKs, + targetKs: targetKs, sourceShards: "-80,80-", targetShards: "-40,40-a0,a0-", tabletBaseID: 400, @@ -109,8 +110,8 @@ var testCases = []*testCase{ name: "Reshard/merge 3 to 1", workflow: "c3c1", typ: "Reshard", - sourceKs: "customer", - targetKs: "customer", + sourceKs: targetKs, + targetKs: targetKs, sourceShards: "-40,40-a0,a0-", targetShards: "0", tabletBaseID: 700, @@ -132,9 +133,7 @@ func checkVDiffCountStat(t *testing.T, tablet *cluster.VttabletProcess, expected func TestVDiff2(t *testing.T) { cellNames := "zone5,zone1,zone2,zone3,zone4" - sourceKs := "product" sourceShards := []string{"0"} - targetKs := "customer" targetShards := []string{"-80", "80-"} extraVTTabletArgs = []string{ // This forces us to use multiple vstream packets even with small test tables. @@ -281,7 +280,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, require.Equal(t, int64(0), leadRestarts, "expected VDiffRestartedTableDiffsCount stat to be 0 for the Lead table, got %d", leadRestarts) // Cleanup the created customer records so as not to slow down the rest of the test. - delstmt := fmt.Sprintf("delete from %s.customer order by cid desc limit %d", sourceKs, chunkSize) + delstmt := fmt.Sprintf("delete from %s.customer order by cid desc limit %d", sqlescape.EscapeID(sourceKs), chunkSize) for i := int64(0); i < totalRowsToCreate; i += chunkSize { _, err := vtgateConn.ExecuteFetch(delstmt, int(chunkSize), false) require.NoError(t, err, "failed to cleanup added customer records: %v", err) diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go index d668701100e..aee4378c8d5 100644 --- a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -36,7 +36,7 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { vc = NewVitessCluster(t, nil) defer vc.TearDown() - sourceKeyspace := "product" + sourceKeyspace := sourceKs shardName := "0" cell := vc.Cells[cellName] @@ -45,7 +45,7 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { verifyClusterHealth(t, vc) insertInitialData(t) targetTabletId := 200 - targetKeyspace := "customer" + targetKeyspace := targetKs vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, targetTabletId, sourceKsOpts) index := 1000 @@ -69,8 +69,7 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { time.Sleep(10 * time.Millisecond) } } - targetKs := vc.Cells[cellName].Keyspaces[targetKeyspace] - targetTab := targetKs.Shards["0"].Tablets[fmt.Sprintf("%s-%d", cellName, targetTabletId)].Vttablet + targetTab := vc.Cells[cellName].Keyspaces[targetKeyspace].Shards["0"].Tablets[fmt.Sprintf("%s-%d", cellName, targetTabletId)].Vttablet require.NotNil(t, targetTab) time.Sleep(15 * time.Second) // wait for some rows to be inserted. @@ -110,7 +109,7 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { loadCancel() // confirm that show all shows the correct workflow and only that workflow. - output, err := vc.VtctldClient.ExecuteCommandWithOutput("VDiff", "--format", "json", "--workflow", "wf1", "--target-keyspace", "customer", "show", "all") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("VDiff", "--format", "json", "--workflow", "wf1", "--target-keyspace", targetKs, "show", "all") require.NoError(t, err) log.Infof("VDiff output: %s", output) count := gjson.Get(output, "..#").Int() @@ -118,5 +117,5 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { ksName := gjson.Get(output, "0.Keyspace").String() require.Equal(t, int64(1), count) require.Equal(t, "wf1", wf) - require.Equal(t, "customer", ksName) + require.Equal(t, targetKs, ksName) } diff --git a/go/test/endtoend/vreplication/vdiff_online_ddl_test.go b/go/test/endtoend/vreplication/vdiff_online_ddl_test.go index 76283ac551b..35bb59be37d 100644 --- a/go/test/endtoend/vreplication/vdiff_online_ddl_test.go +++ b/go/test/endtoend/vreplication/vdiff_online_ddl_test.go @@ -32,7 +32,7 @@ func TestOnlineDDLVDiff(t *testing.T) { defaultReplicas = 0 vc = setupMinimalCluster(t) defer vc.TearDown() - keyspace := "product" + keyspace := sourceKs ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index c02e7be1332..358a56be806 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -35,6 +35,7 @@ import ( "google.golang.org/protobuf/encoding/protojson" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/throttler" @@ -97,7 +98,7 @@ func TestVReplicationDDLHandling(t *testing.T) { ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) table := "orders" newColumn := "ddltest" - cell := "zone1" + cell := defaultCellName shard := "0" vc = NewVitessCluster(t, nil) defer vc.TearDown() @@ -258,7 +259,7 @@ func TestVreplicationCopyThrottling(t *testing.T) { // because of the InnoDB History List length. moveTablesActionWithTabletTypes(t, "Create", defaultCell.Name, workflow, sourceKs, targetKs, table, "primary", true) // Wait for the copy phase to start - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflow), binlogdatapb.VReplicationWorkflowState_Copying.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("`%s`.%s", targetKs, workflow), binlogdatapb.VReplicationWorkflowState_Copying.String()) // The initial copy phase should be blocking on the history list. confirmWorkflowHasCopiedNoData(t, targetKs, workflow) releaseInnoDBRowHistory(t, trxConn) @@ -287,7 +288,6 @@ func testBasicVreplicationWorkflow(t *testing.T, binlogRowImage string) { // If limited == true, we only run a limited set of workflows. func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string) { var err error - defaultCellName := "zone1" vc = NewVitessCluster(t, nil) defer vc.TearDown() // Keep the cluster processes minimal to deal with CI resource constraints @@ -359,7 +359,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string if strings.ToLower(binlogRowImage) == "noblob" { return } - _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use %s", targetKs), 1, false) + _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use `%s`", targetKs), 1, false) require.NoError(t, err, "error using %s keyspace: %v", targetKs, err) res, err := vtgateConn.ExecuteFetch("select count(*) from customer where name is not null", 1, false) require.NoError(t, err, "error getting current row count in customer: %v", err) @@ -372,10 +372,10 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string require.NoError(t, err, "error executing %q: %v", insert, err) vindexName := "customer_name_keyspace_id" - err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace=product", "create", "--keyspace=customer", + err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace", sourceKs, "create", "--keyspace", targetKs, "--type=consistent_lookup", "--table-owner=customer", "--table-owner-columns=name,cid", "--ignore-nulls", "--tablet-types=PRIMARY") require.NoError(t, err, "error executing LookupVindex create: %v", err) - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", sourceKs, vindexName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("`%s`.%s", sourceKs, vindexName), binlogdatapb.VReplicationWorkflowState_Running.String()) waitForRowCount(t, vtgateConn, sourceKs, vindexName, int(rows)) customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", targetKs) require.NoError(t, err, "error executing GetVSchema: %v", err) @@ -383,7 +383,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string require.NotNil(t, vdx, "lookup vindex %s not found", vindexName) require.Equal(t, "true", vdx.Get("params.write_only").String(), "expected write_only parameter to be true") - err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace=product", "externalize", "--keyspace=customer") + err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace", sourceKs, "externalize", "--keyspace", targetKs) require.NoError(t, err, "error executing LookupVindex externalize: %v", err) customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", targetKs) require.NoError(t, err, "error executing GetVSchema: %v", err) @@ -391,7 +391,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string require.NotNil(t, vdx, "lookup vindex %s not found", vindexName) require.NotEqual(t, "true", vdx.Get("params.write_only").String(), "did not expect write_only parameter to be true") - err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace=product", "internalize", "--keyspace=customer") + err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace", sourceKs, "internalize", "--keyspace", targetKs) require.NoError(t, err, "error executing LookupVindex internalize: %v", err) customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", targetKs) require.NoError(t, err, "error executing GetVSchema: %v", err) @@ -714,8 +714,8 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { vc = NewVitessCluster(t, &clusterOptions{cells: cells}) defer vc.TearDown() - keyspace := sourceKs shard := "0" + table := "product" // Run the e2e test with binlog_row_image=NOBLOB and // binlog_row_value_options=PARTIAL_JSON. @@ -724,7 +724,7 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { cell1 := vc.Cells["zone1"] cell2 := vc.Cells["zone2"] - vc.AddKeyspace(t, []*Cell{cell1, cell2}, keyspace, shard, initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell1, cell2}, sourceKs, shard, initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) // Add cell alias containing only zone2 result, err := vc.VtctldClient.ExecuteCommandWithOutput("AddCellsAlias", "--cells", "zone2", "alias") @@ -735,7 +735,7 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { vtgate := cell1.Vtgates[0] t.Run("VStreamFrom", func(t *testing.T) { - testVStreamFrom(t, vtgate, keyspace, 2) + testVStreamFrom(t, vtgate, table, 2) }) shardCustomer(t, true, []*Cell{cell1, cell2}, "alias", false) isTableInDenyList(t, vc, fmt.Sprintf("%s/0", sourceKs), "customer") @@ -978,8 +978,8 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "show", "--workflow", workflow) require.NoError(t, err) - require.Contains(t, output, "'customer.reverse_bits'") - require.Contains(t, output, "'customer.bmd5'") + require.Contains(t, output, fmt.Sprintf("'%s.reverse_bits'", targetKs)) + require.Contains(t, output, fmt.Sprintf("'%s.bmd5'", targetKs)) insertQuery1 = "insert into customer(cid, name) values(1002, 'tempCustomer5')" assertQueryExecutesOnTablet(t, vtgateConn, productTab, sourceKs, insertQuery1, matchInsertQuery1) @@ -1007,7 +1007,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl require.False(t, exists) for _, shard := range strings.Split("-80,80-", ",") { - expectNumberOfStreams(t, vtgateConn, "shardCustomerTargetStreams", "p2c", "customer:"+shard, 0) + expectNumberOfStreams(t, vtgateConn, "shardCustomerTargetStreams", "p2c", fmt.Sprintf("%s:%s", targetKs, shard), 0) } expectNumberOfStreams(t, vtgateConn, "shardCustomerReverseStreams", "p2c_reverse", fmt.Sprintf("%s:0", sourceKs), 0) @@ -1031,13 +1031,13 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl execVtgateQuery(t, vtgateConn, targetKs, "delete from customer where name like 'tempCustomer%'") waitForRowCountInTablet(t, customerTab1, targetKs, "customer", 1) waitForRowCountInTablet(t, customerTab2, targetKs, "customer", 2) - waitForRowCount(t, vtgateConn, targetKs, fmt.Sprintf("%s.customer", targetKs), 3) + waitForRowCount(t, vtgateConn, targetKs, fmt.Sprintf("%s.customer", sqlescape.EscapeID(targetKs)), 3) query = "insert into customer (name, cid) values('george', 5)" execVtgateQuery(t, vtgateConn, targetKs, query) waitForRowCountInTablet(t, customerTab1, targetKs, "customer", 1) waitForRowCountInTablet(t, customerTab2, targetKs, "customer", 3) - waitForRowCount(t, vtgateConn, targetKs, "customer.customer", 4) + waitForRowCount(t, vtgateConn, targetKs, fmt.Sprintf("%s.customer", sqlescape.EscapeID(targetKs)), 4) } }) } diff --git a/go/test/endtoend/vreplication/vreplication_test_env.go b/go/test/endtoend/vreplication/vreplication_test_env.go index c62d871380d..5362c03b2ca 100644 --- a/go/test/endtoend/vreplication/vreplication_test_env.go +++ b/go/test/endtoend/vreplication/vreplication_test_env.go @@ -16,32 +16,44 @@ limitations under the License. package vreplication +import "fmt" + +const ( + // Defaults used for all tests. + workflowName = "wf1" + sourceKs = "test-product" + targetKs = "test-customer" + ksWorkflow = targetKs + "." + workflowName + reverseKsWorkflow = sourceKs + "." + workflowName + "_reverse" + defaultCellName = "zone1" +) + var dryRunResultsSwitchWritesCustomerShard = []string{ - "Lock keyspace product", - "Lock keyspace customer", - "Mirroring 0.00 percent of traffic from keyspace product to keyspace customer for tablet types [PRIMARY]", - "/Stop writes on keyspace product for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]: [keyspace:product;shard:0;position:", + fmt.Sprintf("Lock keyspace %s", sourceKs), + fmt.Sprintf("Lock keyspace %s", targetKs), + fmt.Sprintf("Mirroring 0.00 percent of traffic from keyspace %s to keyspace %s for tablet types [PRIMARY]", sourceKs, targetKs), + fmt.Sprintf("/Stop writes on keyspace %s for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]: [keyspace:%s;shard:0;position:", sourceKs, sourceKs), "Wait for vreplication on stopped streams to catchup for up to 30s", "Create reverse vreplication workflow p2c_reverse", "Create journal entries on source databases", - "Enable writes on keyspace customer for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]", - "Switch routing from keyspace product to keyspace customer", + fmt.Sprintf("Enable writes on keyspace %s for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]", targetKs), + fmt.Sprintf("Switch routing from keyspace %s to keyspace %s", sourceKs, targetKs), "Routing rules for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] will be updated", "Switch writes completed, freeze and delete vreplication streams on: [tablet:200,tablet:300]", "Start reverse vreplication streams on: [tablet:100]", - "Mark vreplication streams frozen on: [keyspace:customer;shard:-80;tablet:200;workflow:p2c;dbname:vt_customer,keyspace:customer;shard:80-;tablet:300;workflow:p2c;dbname:vt_customer]", - "Unlock keyspace customer", - "Unlock keyspace product", + fmt.Sprintf("Mark vreplication streams frozen on: [keyspace:%s;shard:-80;tablet:200;workflow:p2c;dbname:vt_%s,keyspace:%s;shard:80-;tablet:300;workflow:p2c;dbname:vt_%s]", targetKs, targetKs, targetKs, targetKs), + fmt.Sprintf("Unlock keyspace %s", targetKs), + fmt.Sprintf("Unlock keyspace %s", sourceKs), "", // Additional empty newline in the output } var dryRunResultsReadCustomerShard = []string{ - "Lock keyspace product", - "Mirroring 0.00 percent of traffic from keyspace product to keyspace customer for tablet types [RDONLY,REPLICA]", - "Switch reads for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] to keyspace customer for tablet types [RDONLY,REPLICA]", + fmt.Sprintf("Lock keyspace %s", sourceKs), + fmt.Sprintf("Mirroring 0.00 percent of traffic from keyspace %s to keyspace %s for tablet types [RDONLY,REPLICA]", sourceKs, targetKs), + fmt.Sprintf("Switch reads for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] to keyspace %s for tablet types [RDONLY,REPLICA]", targetKs), "Routing rules for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] will be updated", - "Serving VSchema will be rebuilt for the customer keyspace", - "Unlock keyspace product", + fmt.Sprintf("Serving VSchema will be rebuilt for the %s keyspace", targetKs), + fmt.Sprintf("Unlock keyspace %s", sourceKs), "", // Additional empty newline in the output } diff --git a/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go index a17cb73a50d..409fd88448a 100644 --- a/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go +++ b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go @@ -66,8 +66,8 @@ func TestVtctldclientCLI(t *testing.T) { require.NotNil(t, zone2) defer vc.TearDown() - sourceKeyspaceName := "product" - targetKeyspaceName := "customer" + sourceKeyspaceName := sourceKs + targetKeyspaceName := targetKs var mt iMoveTables workflowName := "wf1" @@ -184,7 +184,7 @@ func TestVtctldclientCLI(t *testing.T) { require.NotNil(vc.t, resp) require.NotNil(vc.t, resp.ShardStreams) require.Equal(vc.t, len(resp.ShardStreams), 2) - keyspace := "customer" + keyspace := targetKs for _, shard := range []string{"80-c0", "c0-"} { streams := resp.ShardStreams[fmt.Sprintf("%s/%s", keyspace, shard)] require.Equal(vc.t, 1, len(streams.Streams)) @@ -272,7 +272,7 @@ func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetK confirmNoRoutingRules(t) for _, tab := range targetTabs { alias := fmt.Sprintf("zone1-%d", tab.TabletUID) - query := "update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_customer' and workflow = 'wf1'" + query := fmt.Sprintf("update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_%s' and workflow = 'wf1'", targetKeyspace) output, err := vc.VtctldClient.ExecuteCommandWithOutput("ExecuteFetchAsDBA", alias, query) require.NoError(t, err, output) } @@ -289,70 +289,70 @@ func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetK (*mt).SwitchReads() validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateReadsSwitched) (*mt).ReverseReads() validateReadsRouteToSource(t, "replica") - validateTableRoutingRule(t, "customer", "replica", targetKs, sourceKs) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", targetKeyspace, sourceKeyspace) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateReadsSwitched, wrangler.WorkflowStateNotSwitched) (*mt).SwitchReadsAndWrites() validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) validateWritesRouteToTarget(t) - validateTableRoutingRule(t, "customer", "", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "", sourceKeyspace, targetKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateAllSwitched) (*mt).ReverseReadsAndWrites() validateReadsRouteToSource(t, "replica") - validateTableRoutingRule(t, "customer", "replica", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", targetKeyspace, sourceKeyspace) validateWritesRouteToSource(t) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateAllSwitched, wrangler.WorkflowStateNotSwitched) (*mt).SwitchReadsAndWrites() validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) validateWritesRouteToTarget(t) - validateTableRoutingRule(t, "customer", "", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "", sourceKeyspace, targetKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateAllSwitched) (*mt).ReverseReads() validateReadsRouteToSource(t, "replica") - validateTableRoutingRule(t, "customer", "replica", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", targetKeyspace, sourceKeyspace) validateWritesRouteToTarget(t) - validateTableRoutingRule(t, "customer", "", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "", sourceKeyspace, targetKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateAllSwitched, wrangler.WorkflowStateWritesSwitched) (*mt).ReverseWrites() validateReadsRouteToSource(t, "replica") - validateTableRoutingRule(t, "customer", "replica", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", targetKeyspace, sourceKeyspace) validateWritesRouteToSource(t) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateWritesSwitched, wrangler.WorkflowStateNotSwitched) (*mt).SwitchReadsAndWrites() validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) validateWritesRouteToTarget(t) - validateTableRoutingRule(t, "customer", "", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "", sourceKeyspace, targetKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateAllSwitched) (*mt).ReverseWrites() validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) validateWritesRouteToSource(t) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateAllSwitched, wrangler.WorkflowStateReadsSwitched) (*mt).ReverseReads() validateReadsRouteToSource(t, "replica") - validateTableRoutingRule(t, "customer", "replica", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", targetKeyspace, sourceKeyspace) validateWritesRouteToSource(t) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateReadsSwitched, wrangler.WorkflowStateNotSwitched) // Confirm that everything is still in sync after our switch fest. @@ -360,9 +360,9 @@ func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetK (*mt).SwitchReadsAndWrites() validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) validateWritesRouteToTarget(t) - validateTableRoutingRule(t, "customer", "", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "", sourceKeyspace, targetKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateAllSwitched) (*mt).Complete() @@ -408,8 +408,8 @@ func testMoveTablesFlags3(t *testing.T, sourceKeyspace, targetKeyspace string, t mt.SwitchReads() wf := mt.(iWorkflow) validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateReadsSwitched) mt.Cancel() confirmNoRoutingRules(t) @@ -584,7 +584,7 @@ func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) for _, tab := range targetTabs { alias := fmt.Sprintf("zone1-%d", tab.TabletUID) - query := "update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_customer' and workflow = '" + workflowName + "'" + query := fmt.Sprintf("update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_%s' and workflow = '%s'", keyspace, workflowName) output, err := vc.VtctldClient.ExecuteCommandWithOutput("ExecuteFetchAsDBA", alias, query) require.NoError(t, err, output) } @@ -754,7 +754,7 @@ func validateReshardResponse(rs iReshard) { require.NotNil(vc.t, resp) require.NotNil(vc.t, resp.ShardStreams) require.Equal(vc.t, len(resp.ShardStreams), 2) - keyspace := "customer" + keyspace := targetKs for _, shard := range []string{"-40", "40-80"} { streams := resp.ShardStreams[fmt.Sprintf("%s/%s", keyspace, shard)] require.Equal(vc.t, 1, len(streams.Streams)) @@ -768,9 +768,9 @@ func validateReshardWorkflow(t *testing.T, workflows []*vtctldatapb.Workflow) { require.Equal(t, "reshard", wf.Name) require.Equal(t, binlogdatapb.VReplicationWorkflowType_Reshard.String(), wf.WorkflowType) require.Equal(t, "None", wf.WorkflowSubType) - require.Equal(t, "customer", wf.Target.Keyspace) + require.Equal(t, targetKs, wf.Target.Keyspace) require.Equal(t, 2, len(wf.Target.Shards)) - require.Equal(t, "customer", wf.Source.Keyspace) + require.Equal(t, targetKs, wf.Source.Keyspace) require.Equal(t, 1, len(wf.Source.Shards)) require.False(t, wf.DeferSecondaryKeys) @@ -919,9 +919,9 @@ func validateMoveTablesWorkflow(t *testing.T, workflows []*vtctldatapb.Workflow) require.Equal(t, "wf1", wf.Name) require.Equal(t, binlogdatapb.VReplicationWorkflowType_MoveTables.String(), wf.WorkflowType) require.Equal(t, "None", wf.WorkflowSubType) - require.Equal(t, "customer", wf.Target.Keyspace) + require.Equal(t, targetKs, wf.Target.Keyspace) require.Equal(t, 2, len(wf.Target.Shards)) - require.Equal(t, "product", wf.Source.Keyspace) + require.Equal(t, sourceKs, wf.Source.Keyspace) require.Equal(t, 1, len(wf.Source.Shards)) require.False(t, wf.DeferSecondaryKeys) diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index d9153667c13..d03a1c90fd2 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -49,7 +49,7 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { defaultRdonly = 0 defaultCell := vc.Cells[vc.CellNames[0]] - vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) verifyClusterHealth(t, vc) ctx := context.Background() @@ -60,7 +60,7 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { defer vstreamConn.Close() vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ - Keyspace: "product", + Keyspace: sourceKs, Shard: "0", Gtid: "", }}} @@ -90,9 +90,9 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { // present in the filter before running the VStream. for range 10 { id++ - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into product (pid, description) values (%d, 'description%d')", id+100, id)) - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into merchant (mname, category) values ('mname%d', 'category%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into product (pid, description) values (%d, 'description%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into merchant (mname, category) values ('mname%d', 'category%d')", id+100, id)) } // Stream events from the VStream API @@ -157,9 +157,9 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { } insertMu.Lock() id++ - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into product (pid, description) values (%d, 'description%d')", id+100, id)) - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into merchant (mname, category) values ('mname%d', 'category%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into product (pid, description) values (%d, 'description%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into merchant (mname, category) values ('mname%d', 'category%d')", id+100, id)) insertMu.Unlock() } }() @@ -169,9 +169,9 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { time.Sleep(10 * time.Second) // Give the vstream plenty of time to catchup done.Store(true) - qr1 := execVtgateQuery(t, vtgateConn, "product", "select count(*) from customer") - qr2 := execVtgateQuery(t, vtgateConn, "product", "select count(*) from product") - qr3 := execVtgateQuery(t, vtgateConn, "product", "select count(*) from merchant") + qr1 := execVtgateQuery(t, vtgateConn, sourceKs, "select count(*) from customer") + qr2 := execVtgateQuery(t, vtgateConn, sourceKs, "select count(*) from product") + qr3 := execVtgateQuery(t, vtgateConn, sourceKs, "select count(*) from merchant") require.NotNil(t, qr1) require.NotNil(t, qr2) require.NotNil(t, qr3) @@ -213,7 +213,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { defaultRdonly = 0 defaultCell := vc.Cells[vc.CellNames[0]] - vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) verifyClusterHealth(t, vc) insertInitialData(t) vtgate := defaultCell.Vtgates[0] @@ -228,7 +228,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { defer vstreamConn.Close() vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ - Keyspace: "product", + Keyspace: sourceKs, Shard: "0", Gtid: "", }}} @@ -260,7 +260,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { } insertMu.Lock() id++ - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) insertMu.Unlock() } }() @@ -305,7 +305,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { case 1: if failover { insertMu.Lock() - output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", "product/0", "--new-primary=zone1-101") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", fmt.Sprintf("%s/0", sourceKs), "--new-primary=zone1-101") insertMu.Unlock() log.Infof("output of first PRS is %s", output) require.NoError(t, err) @@ -313,7 +313,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { case 2: if failover { insertMu.Lock() - output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", "product/0", "--new-primary=zone1-100") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", fmt.Sprintf("%s/0", sourceKs), "--new-primary=zone1-100") insertMu.Unlock() log.Infof("output of second PRS is %s", output) require.NoError(t, err) @@ -329,7 +329,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { } } - qr := execVtgateQuery(t, vtgateConn, "product", "select count(*) from customer") + qr := execVtgateQuery(t, vtgateConn, sourceKs, "select count(*) from customer") require.NotNil(t, qr) // total number of row events found by the VStream API should match the rows inserted insertedRows, err := qr.Rows[0][0].ToCastInt64() @@ -654,7 +654,7 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven tickCount++ switch tickCount { case 1: - reshard(t, "sharded", "customer", "vstreamCopyMultiKeyspaceReshard", "-80,80-", "-40,40-", baseTabletID+400, nil, nil, nil, nil, defaultCellName, 1) + reshard(t, "sharded", targetKs, "vstreamCopyMultiKeyspaceReshard", "-80,80-", "-40,40-", baseTabletID+400, nil, nil, nil, nil, defaultCellName, 1) reshardDone = true case 60: done = true @@ -1103,7 +1103,7 @@ func TestVStreamStopOnReshardFalse(t *testing.T) { func TestVStreamWithKeyspacesToWatch(t *testing.T) { extraVTGateArgs = append(extraVTGateArgs, []string{ - utils.GetFlagVariantForTests("--keyspaces-to-watch"), "product", + utils.GetFlagVariantForTests("--keyspaces-to-watch"), sourceKs, }...) testVStreamWithFailover(t, false) @@ -1142,7 +1142,7 @@ func doVStream(t *testing.T, vc *VitessCluster, flags *vtgatepb.VStreamFlags) (n done := false vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ - Keyspace: "product", + Keyspace: sourceKs, Shard: "0", Gtid: "", }}} @@ -1167,7 +1167,7 @@ func doVStream(t *testing.T, vc *VitessCluster, flags *vtgatepb.VStreamFlags) (n arr := strings.Split(rowEvent.TableName, ".") require.Equal(t, len(arr), 2) tableName := arr[1] - require.Equal(t, "product", rowEvent.Keyspace) + require.Equal(t, sourceKs, rowEvent.Keyspace) require.Equal(t, "0", rowEvent.Shard) numRowEvents[tableName]++ @@ -1176,7 +1176,7 @@ func doVStream(t *testing.T, vc *VitessCluster, flags *vtgatepb.VStreamFlags) (n arr := strings.Split(fieldEvent.TableName, ".") require.Equal(t, len(arr), 2) tableName := arr[1] - require.Equal(t, "product", fieldEvent.Keyspace) + require.Equal(t, sourceKs, fieldEvent.Keyspace) require.Equal(t, "0", fieldEvent.Shard) numFieldEvents[tableName]++ default: @@ -1215,7 +1215,7 @@ func TestVStreamHeartbeats(t *testing.T) { defaultRdonly = 0 defaultCell := vc.Cells[vc.CellNames[0]] - vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, + vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) verifyClusterHealth(t, vc) insertInitialData(t) @@ -1271,7 +1271,7 @@ func TestVStreamPushdownFilters(t *testing.T) { }) defer vc.TearDown() require.NotNil(t, vc) - ks := "product" + ks := sourceKs shard := "0" defaultCell := vc.Cells[vc.CellNames[0]] diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go index eb4ab0f8257..f88a01e7719 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go @@ -748,7 +748,7 @@ func (tm *TabletManager) GetMaxValueForSequences(ctx context.Context, req *table func (tm *TabletManager) getMaxSequenceValue(ctx context.Context, sm *tabletmanagerdatapb.GetMaxValueForSequencesRequest_SequenceMetadata) (int64, error) { for _, val := range []string{sm.UsingColEscaped, sm.UsingTableDbNameEscaped, sm.UsingTableNameEscaped} { - if val[0] != '`' { + if val[0] != '`' || val[len(val)-1] != '`' { return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "the column (%s), database (%s), and table (%s) names must be escaped", sm.UsingColEscaped, sm.UsingTableDbNameEscaped, sm.UsingTableNameEscaped) } diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go index 18140cd0c5b..fb807eab7c3 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go @@ -1048,7 +1048,7 @@ func findColumn(ti *Table, name sqlparser.IdentifierCI) (int, error) { } // Let's see if the Table only has TableMap event names and if so return a different error. for _, col := range ti.Fields { - if col.Name[0] != '@' { + if !strings.HasPrefix(col.Name, "@") { return 0, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "column %s not found in table %s", sqlparser.String(name), ti.Name) } } From 7f26e0a4db6e5f2eae072993d59b290c8d4bdcd8 Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Fri, 17 Oct 2025 03:15:05 +0000 Subject: [PATCH 06/13] Fix LookupVindex Create Signed-off-by: Matt Lord --- .../vreplication/lookupvindex/lookupvindex.go | 25 ++++++++++++++++--- .../vreplication/vreplication_test.go | 4 +-- go/vt/sqlparser/parse_table_test.go | 8 ++++++ 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go b/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go index d8a8ffed101..62f892d0595 100644 --- a/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go +++ b/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/cmd/vtctldclient/cli" "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/sqlescape" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" @@ -50,7 +51,7 @@ var ( } baseOptions = struct { - // This is where the lookup table and VReplicaiton workflow + // This is where the lookup table and VReplication workflow // will be created. TableKeyspace string // This will be the name of the Lookup Vindex and the name @@ -133,12 +134,20 @@ var ( if !strings.Contains(createOptions.Type, "lookup") { return fmt.Errorf("vindex type must be a lookup vindex") } + escapedTableKeyspace, err := sqlescape.EnsureEscaped(baseOptions.TableKeyspace) + if err != nil { + return fmt.Errorf("invalid table keyspace (%s): %v", baseOptions.TableKeyspace, err) + } + escapedTableName, err := sqlescape.EnsureEscaped(createOptions.TableName) + if err != nil { + return fmt.Errorf("invalid table name (%s): %v", createOptions.TableName, err) + } baseOptions.Vschema = &vschemapb.Keyspace{ Vindexes: map[string]*vschemapb.Vindex{ baseOptions.Name: { Type: createOptions.Type, Params: map[string]string{ - "table": baseOptions.TableKeyspace + "." + createOptions.TableName, + "table": escapedTableKeyspace + "." + escapedTableName, "from": strings.Join(createOptions.TableOwnerColumns, ","), "to": "keyspace_id", "ignore_nulls": fmt.Sprintf("%t", createOptions.IgnoreNulls), @@ -204,15 +213,23 @@ var ( return fmt.Errorf("%s is not a lookup vindex type", vindex.LookupVindexType) } + escapedTableKeyspace, err := sqlescape.EnsureEscaped(baseOptions.TableKeyspace) + if err != nil { + return fmt.Errorf("invalid table keyspace (%s): %v", baseOptions.TableKeyspace, err) + } + escapedTableName, err := sqlescape.EnsureEscaped(createOptions.TableName) + if err != nil { + return fmt.Errorf("invalid table name (%s): %v", vindex.TableName, err) + } vindexes[vindexName] = &vschemapb.Vindex{ Type: vindex.LookupVindexType, Params: map[string]string{ - "table": baseOptions.TableKeyspace + "." + vindex.TableName, + "table": escapedTableKeyspace + "." + escapedTableName, "from": strings.Join(vindex.TableOwnerColumns, ","), "to": "keyspace_id", "ignore_nulls": fmt.Sprintf("%t", vindex.IgnoreNulls), }, - Owner: vindex.TableOwner, + Owner: createOptions.TableOwner, } targetTableColumnVindex := &vschemapb.ColumnVindex{ diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 358a56be806..6ded63f3eb8 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -259,7 +259,7 @@ func TestVreplicationCopyThrottling(t *testing.T) { // because of the InnoDB History List length. moveTablesActionWithTabletTypes(t, "Create", defaultCell.Name, workflow, sourceKs, targetKs, table, "primary", true) // Wait for the copy phase to start - waitForWorkflowState(t, vc, fmt.Sprintf("`%s`.%s", targetKs, workflow), binlogdatapb.VReplicationWorkflowState_Copying.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflow), binlogdatapb.VReplicationWorkflowState_Copying.String()) // The initial copy phase should be blocking on the history list. confirmWorkflowHasCopiedNoData(t, targetKs, workflow) releaseInnoDBRowHistory(t, trxConn) @@ -375,7 +375,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace", sourceKs, "create", "--keyspace", targetKs, "--type=consistent_lookup", "--table-owner=customer", "--table-owner-columns=name,cid", "--ignore-nulls", "--tablet-types=PRIMARY") require.NoError(t, err, "error executing LookupVindex create: %v", err) - waitForWorkflowState(t, vc, fmt.Sprintf("`%s`.%s", sourceKs, vindexName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", sourceKs, vindexName), binlogdatapb.VReplicationWorkflowState_Running.String()) waitForRowCount(t, vtgateConn, sourceKs, vindexName, int(rows)) customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", targetKs) require.NoError(t, err, "error executing GetVSchema: %v", err) diff --git a/go/vt/sqlparser/parse_table_test.go b/go/vt/sqlparser/parse_table_test.go index 5f187cbc6d0..b4770d71f86 100644 --- a/go/vt/sqlparser/parse_table_test.go +++ b/go/vt/sqlparser/parse_table_test.go @@ -55,6 +55,14 @@ func TestParseTable(t *testing.T) { }, { input: "k.t.", err: true, + }, { + input: "`k-t`.t", + keyspace: "k-t", + table: "t", + }, { + input: "`k-t`.`k-t`", + keyspace: "k-t", + table: "k-t", }} parser := NewTestParser() for _, tcase := range testcases { From 629afdb04e258337bae3d591f082de3d766575dc Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Fri, 17 Oct 2025 14:29:19 +0000 Subject: [PATCH 07/13] Improve default keyspace const var names Signed-off-by: Matt Lord --- go/test/endtoend/vreplication/config_test.go | 14 +- go/test/endtoend/vreplication/fk_test.go | 4 +- go/test/endtoend/vreplication/helper_test.go | 4 +- .../vreplication/initial_data_test.go | 40 +- go/test/endtoend/vreplication/migrate_test.go | 44 +- .../vreplication/movetables_buffering_test.go | 6 +- .../movetables_mirrortraffic_test.go | 12 +- .../vreplication/partial_movetables_test.go | 8 +- .../resharding_workflows_v2_test.go | 166 ++++---- .../endtoend/vreplication/sidecardb_test.go | 2 +- .../endtoend/vreplication/time_zone_test.go | 32 +- go/test/endtoend/vreplication/vdiff2_test.go | 140 +++---- .../vdiff_multiple_movetables_test.go | 12 +- .../vreplication/vdiff_online_ddl_test.go | 2 +- .../vreplication/vreplication_test.go | 384 +++++++++--------- .../vreplication/vreplication_test_env.go | 36 +- .../vreplication_vtctldclient_cli_test.go | 16 +- .../vreplication/vschema_load_test.go | 2 +- go/test/endtoend/vreplication/vstream_test.go | 52 +-- 19 files changed, 488 insertions(+), 488 deletions(-) diff --git a/go/test/endtoend/vreplication/config_test.go b/go/test/endtoend/vreplication/config_test.go index c1f38e75d77..798842db66c 100644 --- a/go/test/endtoend/vreplication/config_test.go +++ b/go/test/endtoend/vreplication/config_test.go @@ -442,7 +442,7 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq "create_ddl": "create table cproduct(pid bigint, description varchar(128), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key(pid)) CHARSET=utf8mb4" }] } -`, sourceKs, targetKs) +`, defaultSourceKs, defaultTargetKs) materializeCustomerNameSpec = fmt.Sprintf(` { @@ -455,7 +455,7 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq "create_ddl": "create table if not exists customer_name (cid bigint not null, name varchar(128), primary key(cid), key(name))" }] } -`, targetKs, targetKs) +`, defaultTargetKs, defaultTargetKs) materializeCustomerTypeSpec = fmt.Sprintf(` { @@ -468,7 +468,7 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq "create_ddl": "create table if not exists enterprise_customer (cid bigint not null, name varchar(128), typ varchar(64), primary key(cid), key(typ))" }] } -`, targetKs, targetKs) +`, defaultTargetKs, defaultTargetKs) merchantOrdersVSchema = ` { @@ -523,7 +523,7 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq "create_ddl": "create table morders(oid int, cid int, mname varchar(128), pid int, price int, qty int, total int, total2 int as (10 * total), primary key(oid)) CHARSET=utf8" }] } -`, targetKs) +`, defaultTargetKs) materializeMerchantSalesSpec = fmt.Sprintf(` { @@ -536,7 +536,7 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq "create_ddl": "create table msales(merchant_name varchar(128), kount int, amount int, primary key(merchant_name)) CHARSET=utf8" }] } -`, targetKs) +`, defaultTargetKs) materializeSalesVSchema = ` { @@ -563,7 +563,7 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq "create_ddl": "create table sales(pid int, kount int, amount int, primary key(pid)) CHARSET=utf8" }] } -`, targetKs, sourceKs) +`, defaultTargetKs, defaultSourceKs) materializeRollupSpec = fmt.Sprintf(` { "workflow": "rollup", @@ -575,7 +575,7 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq "create_ddl": "create table rollup(rollupname varchar(100), kount int, primary key (rollupname)) CHARSET=utf8mb4" }] } -`, sourceKs, sourceKs) +`, defaultSourceKs, defaultSourceKs) initialExternalSchema = ` create table review(rid int, pid int, review varbinary(128), primary key(rid)); create table rating(gid int, pid int, rating int, primary key(gid)); diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go index 282d7a63c47..9f5fdea0d65 100644 --- a/go/test/endtoend/vreplication/fk_test.go +++ b/go/test/endtoend/vreplication/fk_test.go @@ -58,7 +58,7 @@ func TestFKWorkflow(t *testing.T) { defer vc.TearDown() cell := vc.Cells[cellName] - vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialFKSourceVSchema, initialFKSchema, 0, 0, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialFKSourceVSchema, initialFKSchema, 0, 0, 100, defaultSourceKsOpts) verifyClusterHealth(t, vc) insertInitialFKData(t) @@ -82,7 +82,7 @@ func TestFKWorkflow(t *testing.T) { targetKeyspace := "fktarget" targetTabletId := 200 - vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialFKTargetVSchema, "", 0, 0, targetTabletId, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialFKTargetVSchema, "", 0, 0, targetTabletId, defaultSourceKsOpts) testFKCancel(t, vc) diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index 444673db5a6..9cde0973b3b 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -650,11 +650,11 @@ func getDebugVar(t *testing.T, port int, varPath []string) (string, error) { return string(val), nil } -func confirmWorkflowHasCopiedNoData(t *testing.T, targetKS, workflow string) { +func confirmWorkflowHasCopiedNoData(t *testing.T, defaultTargetKs, workflow string) { timer := time.NewTimer(defaultTimeout) defer timer.Stop() for { - output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "show", "--workflow", workflow, "--compact", "--include-logs=false") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", defaultTargetKs, "show", "--workflow", workflow, "--compact", "--include-logs=false") require.NoError(t, err, output) streams := gjson.Get(output, "workflows.0.shard_streams.*.streams") streams.ForEach(func(streamId, stream gjson.Result) bool { // For each stream diff --git a/go/test/endtoend/vreplication/initial_data_test.go b/go/test/endtoend/vreplication/initial_data_test.go index e6318e1760c..2fcb485be4c 100644 --- a/go/test/endtoend/vreplication/initial_data_test.go +++ b/go/test/endtoend/vreplication/initial_data_test.go @@ -31,15 +31,15 @@ func insertInitialData(t *testing.T) { defer closeConn() log.Infof("Inserting initial data") lines, _ := os.ReadFile("unsharded_init_data.sql") - execMultipleQueries(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), string(lines)) - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into customer_seq(id, next_id, cache) values(0, 100, 100);") - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into order_seq(id, next_id, cache) values(0, 100, 100);") - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into customer_seq2(id, next_id, cache) values(0, 100, 100);") + execMultipleQueries(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), string(lines)) + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into customer_seq(id, next_id, cache) values(0, 100, 100);") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into order_seq(id, next_id, cache) values(0, 100, 100);") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into customer_seq2(id, next_id, cache) values(0, 100, 100);") log.Infof("Done inserting initial data") - waitForRowCount(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "product", 2) - waitForRowCount(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "customer", 3) - waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "select * from merchant", + waitForRowCount(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "product", 2) + waitForRowCount(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "customer", 3) + waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "select * from merchant", `[[VARCHAR("Monoprice") VARCHAR("eléctronics")] [VARCHAR("newegg") VARCHAR("elec†ronics")]]`) insertJSONValues(t) @@ -52,12 +52,12 @@ func insertJSONValues(t *testing.T) { // insert null value combinations vtgateConn, closeConn := getVTGateConn() defer closeConn() - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into json_tbl(id, j3) values(1, \"{}\")") - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into json_tbl(id, j1, j3) values(2, \"{}\", \"{}\")") - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into json_tbl(id, j2, j3) values(3, \"{}\", \"{}\")") - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into json_tbl(id, j1, j2, j3) values(4, NULL, 'null', '\"null\"')") - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into json_tbl(id, j3) values(5, JSON_QUOTE('null'))") - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "insert into json_tbl(id, j3) values(6, '{}')") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into json_tbl(id, j3) values(1, \"{}\")") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into json_tbl(id, j1, j3) values(2, \"{}\", \"{}\")") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into json_tbl(id, j2, j3) values(3, \"{}\", \"{}\")") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into json_tbl(id, j1, j2, j3) values(4, NULL, 'null', '\"null\"')") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into json_tbl(id, j3) values(5, JSON_QUOTE('null'))") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into json_tbl(id, j3) values(6, '{}')") id := 8 // 6 inserted above and one after copy phase is done @@ -68,7 +68,7 @@ func insertJSONValues(t *testing.T) { j1 := rand.IntN(numJsonValues) j2 := rand.IntN(numJsonValues) query := fmt.Sprintf(q, id, jsonValues[j1], jsonValues[j2]) - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), query) + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), query) } } @@ -82,7 +82,7 @@ func insertMoreCustomers(t *testing.T, numCustomers int) { // that we reserved. vtgateConn, closeConn := getVTGateConn() defer closeConn() - maxID := waitForSequenceValue(t, vtgateConn, sourceKs, "customer_seq", numCustomers) + maxID := waitForSequenceValue(t, vtgateConn, defaultSourceKs, "customer_seq", numCustomers) // So we need to calculate the first value we reserved // from the max. cid := maxID - int64(numCustomers) @@ -97,28 +97,28 @@ func insertMoreCustomers(t *testing.T, numCustomers int) { } cid++ } - execVtgateQuery(t, vtgateConn, targetKs, sql) + execVtgateQuery(t, vtgateConn, defaultTargetKs, sql) } func insertMoreProducts(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() sql := "insert into product(pid, description) values(3, 'cpu'),(4, 'camera'),(5, 'mouse');" - execVtgateQuery(t, vtgateConn, sourceKs, sql) + execVtgateQuery(t, vtgateConn, defaultSourceKs, sql) } func insertMoreProductsForSourceThrottler(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() sql := "insert into product(pid, description) values(103, 'new-cpu'),(104, 'new-camera'),(105, 'new-mouse');" - execVtgateQuery(t, vtgateConn, sourceKs, sql) + execVtgateQuery(t, vtgateConn, defaultSourceKs, sql) } func insertMoreProductsForTargetThrottler(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() sql := "insert into product(pid, description) values(203, 'new-cpu'),(204, 'new-camera'),(205, 'new-mouse');" - execVtgateQuery(t, vtgateConn, sourceKs, sql) + execVtgateQuery(t, vtgateConn, defaultSourceKs, sql) } var blobTableQueries = []string{ @@ -137,6 +137,6 @@ func insertIntoBlobTable(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() for _, query := range blobTableQueries { - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), query) + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), query) } } diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index 31d23547841..7a9c42a73bd 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -63,7 +63,7 @@ func TestMigrateUnsharded(t *testing.T) { }() defaultCell := vc.Cells[vc.CellNames[0]] - _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", + _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) require.NoError(t, err, "failed to create product keyspace") @@ -91,7 +91,7 @@ func TestMigrateUnsharded(t *testing.T) { extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort) insertInitialDataIntoExternalCluster(t, extVtgateConn) - targetPrimary := vc.getPrimaryTablet(t, sourceKs, "0") + targetPrimary := vc.getPrimaryTablet(t, defaultSourceKs, "0") var output, expected string @@ -115,26 +115,26 @@ func TestMigrateUnsharded(t *testing.T) { require.Equal(t, "/vitess/global", gjson.Get(output, "topo_root").String()) }) - ksWorkflow := fmt.Sprintf("%s.e1", sourceKs) + ksWorkflow := fmt.Sprintf("%s.e1", defaultSourceKs) t.Run("migrate from external cluster", func(t *testing.T) { if output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", sourceKs, "--workflow", "e1", + "--target-keyspace", defaultSourceKs, "--workflow", "e1", "create", "--source-keyspace", "rating", "--mount-name", "ext1", "--all-tables", "--cells=extcell1", "--tablet-types=primary,replica"); err != nil { t.Fatalf("Migrate command failed with %+v : %s\n", err, output) } waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", sourceKs), 1) - waitForRowCountInTablet(t, targetPrimary, sourceKs, "rating", 2) - waitForRowCountInTablet(t, targetPrimary, sourceKs, "review", 3) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", defaultSourceKs), 1) + waitForRowCountInTablet(t, targetPrimary, defaultSourceKs, "rating", 2) + waitForRowCountInTablet(t, targetPrimary, defaultSourceKs, "review", 3) execVtgateQuery(t, extVtgateConn, "rating", "insert into review(rid, pid, review) values(4, 1, 'review4');") execVtgateQuery(t, extVtgateConn, "rating", "insert into rating(gid, pid, rating) values(3, 1, 3);") - waitForRowCountInTablet(t, targetPrimary, sourceKs, "rating", 3) - waitForRowCountInTablet(t, targetPrimary, sourceKs, "review", 4) + waitForRowCountInTablet(t, targetPrimary, defaultSourceKs, "rating", 3) + waitForRowCountInTablet(t, targetPrimary, defaultSourceKs, "review", 4) doVDiff(t, ksWorkflow, "extcell1") output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", sourceKs, "--workflow", "e1", "show") + "--target-keyspace", defaultSourceKs, "--workflow", "e1", "show") require.NoError(t, err, "Migrate command failed with %s", output) wf := gjson.Get(output, "workflows").Array()[0] @@ -142,32 +142,32 @@ func TestMigrateUnsharded(t *testing.T) { require.Equal(t, "Migrate", wf.Get("workflow_type").String()) output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", sourceKs, "--workflow", "e1", "status", "--format=json") + "--target-keyspace", defaultSourceKs, "--workflow", "e1", "status", "--format=json") require.NoError(t, err, "Migrate command failed with %s", output) - require.Equal(t, "Running", gjson.Get(output, fmt.Sprintf("shard_streams.%s/0.streams.0.status", sourceKs)).String()) + require.Equal(t, "Running", gjson.Get(output, fmt.Sprintf("shard_streams.%s/0.streams.0.status", defaultSourceKs)).String()) output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", sourceKs, "--workflow", "e1", "complete") + "--target-keyspace", defaultSourceKs, "--workflow", "e1", "complete") require.NoError(t, err, "Migrate command failed with %s", output) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", sourceKs), 0) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", defaultSourceKs), 0) }) t.Run("cancel migrate workflow", func(t *testing.T) { - execVtgateQuery(t, vtgateConn, sourceKs, "drop table review,rating") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "drop table review,rating") output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", sourceKs, "--workflow", "e1", "Create", "--source-keyspace", "rating", + "--target-keyspace", defaultSourceKs, "--workflow", "e1", "Create", "--source-keyspace", "rating", "--mount-name", "ext1", "--all-tables", "--auto-start=false", "--cells=extcell1") require.NoError(t, err, "Migrate command failed with %s", output) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", sourceKs), 1, binlogdatapb.VReplicationWorkflowState_Stopped.String()) - waitForRowCountInTablet(t, targetPrimary, sourceKs, "rating", 0) - waitForRowCountInTablet(t, targetPrimary, sourceKs, "review", 0) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", defaultSourceKs), 1, binlogdatapb.VReplicationWorkflowState_Stopped.String()) + waitForRowCountInTablet(t, targetPrimary, defaultSourceKs, "rating", 0) + waitForRowCountInTablet(t, targetPrimary, defaultSourceKs, "review", 0) output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", sourceKs, "--workflow", "e1", "cancel") + "--target-keyspace", defaultSourceKs, "--workflow", "e1", "cancel") require.NoError(t, err, "Migrate command failed with %s", output) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", sourceKs), 0) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", defaultSourceKs), 0) var found bool found, err = checkIfTableExists(t, vc, "zone1-100", "review") require.NoError(t, err) @@ -246,7 +246,7 @@ func TestMigrateSharded(t *testing.T) { ksWorkflow := "rating.e1" if output, err = extVc.VtctldClient.ExecuteCommandWithOutput("Migrate", "--target-keyspace", "rating", "--workflow", "e1", - "create", "--source-keyspace", targetKs, "--mount-name", "external", "--all-tables", "--cells=zone1", + "create", "--source-keyspace", defaultTargetKs, "--mount-name", "external", "--all-tables", "--cells=zone1", "--tablet-types=primary"); err != nil { require.FailNow(t, "Migrate command failed with %+v : %s\n", err, output) } diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go index 3b13091f161..0f1846455f0 100644 --- a/go/test/endtoend/vreplication/movetables_buffering_test.go +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -26,7 +26,7 @@ func TestMoveTablesBuffering(t *testing.T) { currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables setupMinimalTargetKeyspace(t) tables := "loadtest" - err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, + err := tstWorkflowExec(t, defaultCellName, workflowName, defaultSourceKs, defaultTargetKs, tables, workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) @@ -39,8 +39,8 @@ func TestMoveTablesBuffering(t *testing.T) { catchup(t, targetTab1, workflowName, "MoveTables") catchup(t, targetTab2, workflowName, "MoveTables") - vdiff(t, targetKs, workflowName, "", nil) - waitForLowLag(t, targetKs, workflowName) + vdiff(t, defaultTargetKs, workflowName, "", nil) + waitForLowLag(t, defaultTargetKs, workflowName) for i := 0; i < 10; i++ { tstWorkflowSwitchReadsAndWrites(t) time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) diff --git a/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go b/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go index f7e204ff888..90d880bb793 100644 --- a/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go +++ b/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go @@ -45,9 +45,9 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { workflowInfo: &workflowInfo{ vc: vc, workflowName: workflowName, - targetKeyspace: targetKs, + targetKeyspace: defaultTargetKs, }, - sourceKeyspace: sourceKs, + sourceKeyspace: defaultSourceKs, tables: "customer,loadtest,customer2", mirrorFlags: []string{"--percent", "25"}, } @@ -62,7 +62,7 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { // Mirror rules can be created after a MoveTables workflow is created. mt.MirrorTraffic() confirmMirrorRulesExist(t) - expectMirrorRules(t, sourceKs, targetKs, tables, []topodatapb.TabletType{ + expectMirrorRules(t, defaultSourceKs, defaultTargetKs, tables, []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY, @@ -72,7 +72,7 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { mtwf.mirrorFlags[1] = "50" mt.MirrorTraffic() confirmMirrorRulesExist(t) - expectMirrorRules(t, sourceKs, targetKs, tables, []topodatapb.TabletType{ + expectMirrorRules(t, defaultSourceKs, defaultTargetKs, tables, []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY, @@ -83,7 +83,7 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { mtwf.mirrorFlags[1] = "75" mt.MirrorTraffic() confirmMirrorRulesExist(t) - expectMirrorRules(t, sourceKs, targetKs, tables, []topodatapb.TabletType{ + expectMirrorRules(t, defaultSourceKs, defaultTargetKs, tables, []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY, @@ -103,7 +103,7 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { mtwf.mirrorFlags = append(mtwf.mirrorFlags, "--tablet-types", "primary") mt.MirrorTraffic() confirmMirrorRulesExist(t) - expectMirrorRules(t, sourceKs, targetKs, tables, []topodatapb.TabletType{ + expectMirrorRules(t, defaultSourceKs, defaultTargetKs, tables, []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, }, 100) diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index 3cb4c8b33b1..27df9a7ce8f 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -35,7 +35,7 @@ import ( // Before canceling, we first switch traffic to the target keyspace and then reverse it back to the source keyspace. // This tests that artifacts are being properly cleaned up when a MoveTables ia canceled. func testCancel(t *testing.T) { - sourceKeyspace := targetKs + sourceKeyspace := defaultTargetKs targetKeyspace := "customer2" workflowName := "partial80DashForCancel" ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) @@ -109,8 +109,8 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { }() vc = setupMinimalCluster(t) defer vc.TearDown() - sourceKeyspace := sourceKs - targetKeyspace := targetKs + sourceKeyspace := defaultSourceKs + targetKeyspace := defaultTargetKs workflowName := "wf1" targetTabs := setupMinimalTargetKeyspace(t) targetTab80Dash := targetTabs["80-"] @@ -135,7 +135,7 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { emptyGlobalRoutingRules := "{}\n" - sourceKeyspace = targetKs + sourceKeyspace = defaultTargetKs targetKeyspace = "customer2" // These should be listed in shard order diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 418e1aca8d9..5c0d6e5695b 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -74,11 +74,11 @@ var defaultWorkflowExecOptions = &workflowExecOptions{ } func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) error { - err := tstWorkflowExec(t, defaultCellName, workflowName, targetKs, targetKs, + err := tstWorkflowExec(t, defaultCellName, workflowName, defaultTargetKs, defaultTargetKs, "", workflowActionCreate, "", sourceShards, targetShards, defaultWorkflowExecOptions) require.NoError(t, err) waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) - confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, "") + confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, defaultTargetKs, "") catchup(t, targetTab1, workflowName, "Reshard") catchup(t, targetTab2, workflowName, "Reshard") doVDiff(t, ksWorkflow, "") @@ -89,24 +89,24 @@ func createMoveTablesWorkflow(t *testing.T, tables string) { if tables == "" { tables = "customer" } - err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, + err := tstWorkflowExec(t, defaultCellName, workflowName, defaultSourceKs, defaultTargetKs, tables, workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) - confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, tables) + confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, defaultTargetKs, tables) catchup(t, targetTab1, workflowName, "MoveTables") catchup(t, targetTab2, workflowName, "MoveTables") doVDiff(t, ksWorkflow, "") } func tstWorkflowAction(t *testing.T, action, tabletTypes, cells string) error { - return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, "customer", action, tabletTypes, "", "", defaultWorkflowExecOptions) + return tstWorkflowExec(t, cells, workflowName, defaultSourceKs, defaultTargetKs, "customer", action, tabletTypes, "", "", defaultWorkflowExecOptions) } // tstWorkflowExec executes a MoveTables or Reshard workflow command using // vtctldclient. // tstWorkflowExecVtctl instead. -func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, +func tstWorkflowExec(t *testing.T, cells, workflow, defaultSourceKs, defaultTargetKs, tables, action, tabletTypes, sourceShards, targetShards string, options *workflowExecOptions) error { var args []string @@ -116,12 +116,12 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, args = append(args, "Reshard") } - args = append(args, "--workflow", workflow, "--target-keyspace", targetKs, action) + args = append(args, "--workflow", workflow, "--target-keyspace", defaultTargetKs, action) switch action { case workflowActionCreate: if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_MoveTables { - args = append(args, "--source-keyspace", sourceKs) + args = append(args, "--source-keyspace", defaultSourceKs) if tables != "" { args = append(args, "--tables", tables) } else { @@ -220,7 +220,7 @@ func testWorkflowUpdate(t *testing.T) { _, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", "noexist", "update", "--workflow", "noexist", "--tablet-types", tabletTypes) require.Error(t, err) // Change the tablet-types to rdonly. - resp, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", targetKs, "update", "--workflow", workflowName, "--tablet-types", "rdonly") + resp, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", defaultTargetKs, "update", "--workflow", workflowName, "--tablet-types", "rdonly") require.NoError(t, err, err) // Confirm that we changed the workflow. var ures vtctldatapb.WorkflowUpdateResponse @@ -230,7 +230,7 @@ func testWorkflowUpdate(t *testing.T) { require.Greater(t, len(ures.Details), 0) require.True(t, ures.Details[0].Changed) // Change tablet-types back to primary,replica,rdonly. - resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", targetKs, "update", "--workflow", workflowName, "--tablet-types", tabletTypes) + resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", defaultTargetKs, "update", "--workflow", workflowName, "--tablet-types", tabletTypes) require.NoError(t, err, err) // Confirm that we changed the workflow. err = protojson.Unmarshal([]byte(resp), &ures) @@ -238,7 +238,7 @@ func testWorkflowUpdate(t *testing.T) { require.Greater(t, len(ures.Details), 0) require.True(t, ures.Details[0].Changed) // Execute a no-op as tablet-types is already primary,replica,rdonly. - resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", targetKs, "update", "--workflow", workflowName, "--tablet-types", tabletTypes) + resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", defaultTargetKs, "update", "--workflow", workflowName, "--tablet-types", tabletTypes) require.NoError(t, err, err) // Confirm that we didn't change the workflow. err = protojson.Unmarshal([]byte(resp), &ures) @@ -297,8 +297,8 @@ func validateWritesRouteToSource(t *testing.T) { defer closeConn() insertQuery := "insert into customer(name, cid) values('tempCustomer2', 200)" matchInsertQuery := "insert into customer(`name`, cid) values" - assertQueryExecutesOnTablet(t, vtgateConn, sourceTab, targetKs, insertQuery, matchInsertQuery) - execVtgateQuery(t, vtgateConn, targetKs, "delete from customer where cid = 200") + assertQueryExecutesOnTablet(t, vtgateConn, sourceTab, defaultTargetKs, insertQuery, matchInsertQuery) + execVtgateQuery(t, vtgateConn, defaultTargetKs, "delete from customer where cid = 200") } func validateWritesRouteToTarget(t *testing.T) { @@ -306,10 +306,10 @@ func validateWritesRouteToTarget(t *testing.T) { defer closeConn() insertQuery := "insert into customer(name, cid) values('tempCustomer3', 101)" matchInsertQuery := "insert into customer(`name`, cid) values" - assertQueryExecutesOnTablet(t, vtgateConn, targetTab2, targetKs, insertQuery, matchInsertQuery) + assertQueryExecutesOnTablet(t, vtgateConn, targetTab2, defaultTargetKs, insertQuery, matchInsertQuery) insertQuery = "insert into customer(name, cid) values('tempCustomer3', 102)" - assertQueryExecutesOnTablet(t, vtgateConn, targetTab1, targetKs, insertQuery, matchInsertQuery) - execVtgateQuery(t, vtgateConn, targetKs, "delete from customer where cid in (101, 102)") + assertQueryExecutesOnTablet(t, vtgateConn, targetTab1, defaultTargetKs, insertQuery, matchInsertQuery) + execVtgateQuery(t, vtgateConn, defaultTargetKs, "delete from customer where cid in (101, 102)") } func revert(t *testing.T, workflowType string) { @@ -319,7 +319,7 @@ func revert(t *testing.T, workflowType string) { validateReadsRouteToSource(t, "replica") // cancel the workflow to cleanup - _, err := vc.VtctldClient.ExecuteCommandWithOutput(workflowType, "--target-keyspace", targetKs, "--workflow", workflowName, "cancel") + _, err := vc.VtctldClient.ExecuteCommandWithOutput(workflowType, "--target-keyspace", defaultTargetKs, "--workflow", workflowName, "cancel") require.NoError(t, err, fmt.Sprintf("%s Cancel error: %v", workflowType, err)) } @@ -360,7 +360,7 @@ func TestBasicV2Workflows(t *testing.T) { // Internal tables like the lifecycle ones for OnlineDDL should be ignored ddlSQL := "ALTER TABLE customer MODIFY cid bigint UNSIGNED" - tstApplySchemaOnlineDDL(t, ddlSQL, sourceKs) + tstApplySchemaOnlineDDL(t, ddlSQL, defaultSourceKs) testMoveTablesV2Workflow(t) testReshardV2Workflow(t) @@ -386,72 +386,72 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { // use MoveTables to move customer2 from product to customer using currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables - err := tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, + err := tstWorkflowExec(t, defaultCellName, "wf2", defaultSourceKs, defaultTargetKs, "customer2", workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - waitForWorkflowState(t, vc, fmt.Sprintf("%s.wf2", targetKs), binlogdatapb.VReplicationWorkflowState_Running.String()) - waitForLowLag(t, targetKs, "wf2") + waitForWorkflowState(t, vc, fmt.Sprintf("%s.wf2", defaultTargetKs), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForLowLag(t, defaultTargetKs, "wf2") - err = tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, + err = tstWorkflowExec(t, defaultCellName, "wf2", defaultSourceKs, defaultTargetKs, "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - err = tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, + err = tstWorkflowExec(t, defaultCellName, "wf2", defaultSourceKs, defaultTargetKs, "", workflowActionComplete, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) vtgateConn, closeConn := getVTGateConn() defer closeConn() // sanity check - output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", sourceKs) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultSourceKs) require.NoError(t, err) assert.NotContains(t, output, "customer2\"", "customer2 still found in keyspace product") - waitForRowCount(t, vtgateConn, targetKs, "customer2", 3) + waitForRowCount(t, vtgateConn, defaultTargetKs, "customer2", 3) // check that customer2 has the sequence tag - output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", targetKs) + output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultTargetKs) require.NoError(t, err) assert.Contains(t, output, "\"sequence\": \"customer_seq2\"", "customer2 sequence missing in keyspace customer") // ensure sequence is available to vtgate num := 5 for i := 0; i < num; i++ { - execVtgateQuery(t, vtgateConn, targetKs, "insert into customer2(name) values('a')") + execVtgateQuery(t, vtgateConn, defaultTargetKs, "insert into customer2(name) values('a')") } - waitForRowCount(t, vtgateConn, targetKs, "customer2", 3+num) + waitForRowCount(t, vtgateConn, defaultTargetKs, "customer2", 3+num) want := fmt.Sprintf("[[INT32(%d)]]", 100+num-1) - waitForQueryResult(t, vtgateConn, targetKs, "select max(cid) from customer2", want) + waitForQueryResult(t, vtgateConn, defaultTargetKs, "select max(cid) from customer2", want) // use MoveTables to move customer2 back to product. Note that now the table has an associated sequence - err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, + err = tstWorkflowExec(t, defaultCellName, "wf3", defaultTargetKs, defaultSourceKs, "customer2", workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - waitForWorkflowState(t, vc, fmt.Sprintf("%s.wf3", sourceKs), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.wf3", defaultSourceKs), binlogdatapb.VReplicationWorkflowState_Running.String()) - waitForLowLag(t, sourceKs, "wf3") - err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, + waitForLowLag(t, defaultSourceKs, "wf3") + err = tstWorkflowExec(t, defaultCellName, "wf3", defaultTargetKs, defaultSourceKs, "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, + err = tstWorkflowExec(t, defaultCellName, "wf3", defaultTargetKs, defaultSourceKs, "", workflowActionComplete, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) // sanity check - output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", sourceKs) + output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultSourceKs) require.NoError(t, err) assert.Contains(t, output, "customer2\"", "customer2 not found in keyspace product ") // check that customer2 still has the sequence tag - output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", sourceKs) + output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultSourceKs) require.NoError(t, err) assert.Contains(t, output, "\"sequence\": \"customer_seq2\"", "customer2 still found in keyspace product") // ensure sequence is available to vtgate for i := 0; i < num; i++ { - execVtgateQuery(t, vtgateConn, sourceKs, "insert into customer2(name) values('a')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "insert into customer2(name) values('a')") } - waitForRowCount(t, vtgateConn, sourceKs, "customer2", 3+num+num) - res := execVtgateQuery(t, vtgateConn, sourceKs, "select max(cid) from customer2") + waitForRowCount(t, vtgateConn, defaultSourceKs, "customer2", 3+num+num) + res := execVtgateQuery(t, vtgateConn, defaultSourceKs, "select max(cid) from customer2") cid, err := res.Rows[0][0].ToInt() require.NoError(t, err) require.GreaterOrEqual(t, cid, 100+num+num-1) @@ -473,11 +473,11 @@ func testReplicatingWithPKEnumCols(t *testing.T) { // typ is an enum, with soho having a stored and binlogged value of 2 deleteQuery := "delete from customer where cid = 2 and typ = 'soho'" insertQuery := "insert into customer(cid, name, typ, sport, meta) values(2, 'Paül','soho','cricket',convert(x'7b7d' using utf8mb4))" - execVtgateQuery(t, vtgateConn, sourceKs, deleteQuery) - waitForNoWorkflowLag(t, vc, targetKs, workflowName) + execVtgateQuery(t, vtgateConn, defaultSourceKs, deleteQuery) + waitForNoWorkflowLag(t, vc, defaultTargetKs, workflowName) doVDiff(t, ksWorkflow, "") - execVtgateQuery(t, vtgateConn, sourceKs, insertQuery) - waitForNoWorkflowLag(t, vc, targetKs, workflowName) + execVtgateQuery(t, vtgateConn, defaultSourceKs, insertQuery) + waitForNoWorkflowLag(t, vc, defaultTargetKs, workflowName) doVDiff(t, ksWorkflow, "") } @@ -505,7 +505,7 @@ func testReshardV2Workflow(t *testing.T) { return default: // Use a random customer type for each record. - _ = execVtgateQuery(t, dataGenConn, targetKs, fmt.Sprintf("insert into customer (cid, name, typ) values (%d, 'tempCustomer%d', %s)", + _ = execVtgateQuery(t, dataGenConn, defaultTargetKs, fmt.Sprintf("insert into customer (cid, name, typ) values (%d, 'tempCustomer%d', %s)", id, id, customerTypes[rand.IntN(len(customerTypes))])) } time.Sleep(1 * time.Millisecond) @@ -515,8 +515,8 @@ func testReshardV2Workflow(t *testing.T) { // create internal tables on the original customer shards that should be // ignored and not show up on the new shards - execMultipleQueries(t, vtgateConn, targetKs+"/-80", internalSchema) - execMultipleQueries(t, vtgateConn, targetKs+"/80-", internalSchema) + execMultipleQueries(t, vtgateConn, defaultTargetKs+"/-80", internalSchema) + execMultipleQueries(t, vtgateConn, defaultTargetKs+"/80-", internalSchema) createAdditionalTargetShards(t, "-40,40-80,80-c0,c0-") createReshardWorkflow(t, "-80,80-", "-40,40-80,80-c0,c0-") @@ -525,8 +525,8 @@ func testReshardV2Workflow(t *testing.T) { // Verify that we've properly ignored any internal operational tables // and that they were not copied to the new target shards - verifyNoInternalTables(t, vtgateConn, targetKs+"/-40") - verifyNoInternalTables(t, vtgateConn, targetKs+"/c0-") + verifyNoInternalTables(t, vtgateConn, defaultTargetKs+"/-40") + verifyNoInternalTables(t, vtgateConn, defaultTargetKs+"/c0-") // Confirm that updating Reshard workflows works. testWorkflowUpdate(t) @@ -536,23 +536,23 @@ func testReshardV2Workflow(t *testing.T) { // Confirm that we lost no customer related writes during the Reshard. dataGenCancel() dataGenWg.Wait() - cres := execVtgateQuery(t, dataGenConn, targetKs, "select count(*) from customer") + cres := execVtgateQuery(t, dataGenConn, defaultTargetKs, "select count(*) from customer") require.Len(t, cres.Rows, 1) - waitForNoWorkflowLag(t, vc, targetKs, "customer_name") - cnres := execVtgateQuery(t, dataGenConn, targetKs, "select count(*) from customer_name") + waitForNoWorkflowLag(t, vc, defaultTargetKs, "customer_name") + cnres := execVtgateQuery(t, dataGenConn, defaultTargetKs, "select count(*) from customer_name") require.Len(t, cnres.Rows, 1) require.EqualValues(t, cres.Rows, cnres.Rows) if debugMode { // We expect the row count to differ in enterprise_customer because it is // using a `where typ='enterprise'` filter. So the count is only for debug // info. - ecres := execVtgateQuery(t, dataGenConn, targetKs, "select count(*) from enterprise_customer") + ecres := execVtgateQuery(t, dataGenConn, defaultTargetKs, "select count(*) from enterprise_customer") t.Logf("Done inserting customer data. Record counts in customer: %s, customer_name: %s, enterprise_customer: %s", cres.Rows[0][0].ToString(), cnres.Rows[0][0].ToString(), ecres.Rows[0][0].ToString()) } // We also do a vdiff on the materialize workflows for good measure. - doVtctldclientVDiff(t, targetKs, "customer_name", "", nil) - doVtctldclientVDiff(t, targetKs, "enterprise_customer", "", nil) + doVtctldclientVDiff(t, defaultTargetKs, "customer_name", "", nil) + doVtctldclientVDiff(t, defaultTargetKs, "enterprise_customer", "", nil) } func testMoveTablesV2Workflow(t *testing.T) { @@ -564,7 +564,7 @@ func testMoveTablesV2Workflow(t *testing.T) { if !debugMode { return } - output, err := vc.VtctldClient.ExecuteCommandWithOutput("materialize", "--target-keyspace", targetKs, "show", "--workflow=customer_name", "--compact", "--include-logs=false") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("materialize", "--target-keyspace", defaultTargetKs, "show", "--workflow=customer_name", "--compact", "--include-logs=false") require.NoError(t, err) t.Logf("Materialize show output: %s", output) } @@ -589,7 +589,7 @@ func testMoveTablesV2Workflow(t *testing.T) { require.NoError(t, err) return len(workflows) == 0 } - listAllArgs := []string{"workflow", "--keyspace", targetKs, "list"} + listAllArgs := []string{"workflow", "--keyspace", defaultTargetKs, "list"} output, err := vc.VtctldClient.ExecuteCommandWithOutput(listAllArgs...) require.NoError(t, err) @@ -604,7 +604,7 @@ func testMoveTablesV2Workflow(t *testing.T) { // Verify that we've properly ignored any internal operational tables // and that they were not copied to the new target keyspace - verifyNoInternalTables(t, vtgateConn, targetKs) + verifyNoInternalTables(t, vtgateConn, defaultTargetKs) testReplicatingWithPKEnumCols(t) @@ -674,9 +674,9 @@ func testPartialSwitches(t *testing.T) { tstWorkflowSwitchWrites(t) checkStates(t, nextState, nextState) // idempotency - keyspace := sourceKs + keyspace := defaultSourceKs if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_Reshard { - keyspace = targetKs + keyspace = defaultTargetKs } waitForLowLag(t, keyspace, "wf1_reverse") tstWorkflowReverseReads(t, "replica,rdonly", "zone1") @@ -703,13 +703,13 @@ func testRestOfWorkflow(t *testing.T) { Threshold: throttlerConfig.Threshold * 5, CustomQuery: throttlerConfig.Query, } - res, err := throttler.UpdateThrottlerTopoConfigRaw(vc.VtctldClient, targetKs, req, nil, nil) + res, err := throttler.UpdateThrottlerTopoConfigRaw(vc.VtctldClient, defaultTargetKs, req, nil, nil) require.NoError(t, err, res) testPartialSwitches(t) // test basic forward and reverse flows - waitForLowLag(t, targetKs, "wf1") + waitForLowLag(t, defaultTargetKs, "wf1") tstWorkflowSwitchReads(t, "", "") checkStates(t, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateReadsSwitched) validateReadsRouteToTarget(t, "replica,rdonly") @@ -721,9 +721,9 @@ func testRestOfWorkflow(t *testing.T) { validateWritesRouteToTarget(t) // this function is called for both MoveTables and Reshard, so the reverse workflows exist in different keyspaces - keyspace := sourceKs + keyspace := defaultSourceKs if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_Reshard { - keyspace = targetKs + keyspace = defaultTargetKs } waitForLowLag(t, keyspace, "wf1_reverse") tstWorkflowReverseReads(t, "", "") @@ -736,7 +736,7 @@ func testRestOfWorkflow(t *testing.T) { validateReadsRouteToSource(t, "replica,rdonly") validateWritesRouteToSource(t) - waitForLowLag(t, targetKs, "wf1") + waitForLowLag(t, defaultTargetKs, "wf1") tstWorkflowSwitchWrites(t) checkStates(t, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateWritesSwitched) validateReadsRouteToSource(t, "replica,rdonly") @@ -748,7 +748,7 @@ func testRestOfWorkflow(t *testing.T) { validateReadsRouteToSource(t, "replica,rdonly") validateWritesRouteToSource(t) - waitForLowLag(t, targetKs, "wf1") + waitForLowLag(t, defaultTargetKs, "wf1") tstWorkflowSwitchReads(t, "", "") checkStates(t, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateReadsSwitched) validateReadsRouteToTarget(t, "replica,rdonly") @@ -775,9 +775,9 @@ func testRestOfWorkflow(t *testing.T) { require.Contains(t, err.Error(), wrangler.ErrWorkflowNotFullySwitched) // fully switch and complete - waitForLowLag(t, targetKs, "wf1") - waitForLowLag(t, targetKs, "customer_name") - waitForLowLag(t, targetKs, "enterprise_customer") + waitForLowLag(t, defaultTargetKs, "wf1") + waitForLowLag(t, defaultTargetKs, "customer_name") + waitForLowLag(t, defaultTargetKs, "enterprise_customer") tstWorkflowSwitchReadsAndWrites(t) validateReadsRouteToTarget(t, "replica,rdonly") validateWritesRouteToTarget(t) @@ -792,30 +792,30 @@ func setupCluster(t *testing.T) *VitessCluster { zone1 := vc.Cells["zone1"] zone2 := vc.Cells["zone2"] - vc.AddKeyspace(t, []*Cell{zone1, zone2}, sourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + vc.AddKeyspace(t, []*Cell{zone1, zone2}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) defer getVTGateConn() verifyClusterHealth(t, vc) insertInitialData(t) defaultCell := vc.Cells[vc.CellNames[0]] - sourceTab = vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-100"].Vttablet + sourceTab = vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-100"].Vttablet if defaultReplicas > 0 { - sourceReplicaTab = vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-101"].Vttablet + sourceReplicaTab = vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-101"].Vttablet } if defaultRdonly > 0 { - sourceRdonlyTab = vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-102"].Vttablet + sourceRdonlyTab = vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-102"].Vttablet } return vc } func setupTargetKeyspace(t *testing.T) { - if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"], vc.Cells["zone2"]}, targetKs, "-80,80-", + if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"], vc.Cells["zone2"]}, defaultTargetKs, "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, nil); err != nil { t.Fatal(err) } defaultCell := vc.Cells[vc.CellNames[0]] - custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet if defaultReplicas > 0 { @@ -842,24 +842,24 @@ func setupMinimalCluster(t *testing.T) *VitessCluster { zone1 := vc.Cells["zone1"] - vc.AddKeyspace(t, []*Cell{zone1}, sourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + vc.AddKeyspace(t, []*Cell{zone1}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) verifyClusterHealth(t, vc) insertInitialData(t) - sourceTab = vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-100"].Vttablet + sourceTab = vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-100"].Vttablet return vc } func setupMinimalTargetKeyspace(t *testing.T) map[string]*cluster.VttabletProcess { tablets := make(map[string]*cluster.VttabletProcess) - if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, targetKs, "-80,80-", + if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, defaultTargetKs, "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, nil); err != nil { t.Fatal(err) } defaultCell := vc.Cells[vc.CellNames[0]] - custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet tablets["-80"] = targetTab1 @@ -891,13 +891,13 @@ func switchReadsNew(t *testing.T, workflowType, cells, ksWorkflow string, revers func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias string) { workflow := "wf1" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) + ksWorkflow := fmt.Sprintf("%s.%s", defaultTargetKs, workflow) tables := "customer" setupTargetKeyspace(t) workflowType := "MoveTables" var moveTablesAndWait = func() { - moveTablesAction(t, "Create", sourceCellOrAlias, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Create", sourceCellOrAlias, workflow, defaultSourceKs, defaultTargetKs, tables) catchup(t, targetTab1, workflow, workflowType) catchup(t, targetTab2, workflow, workflowType) doVDiff(t, ksWorkflow, "") @@ -982,9 +982,9 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias func createAdditionalTargetShards(t *testing.T, shards string) { defaultCell := vc.Cells[vc.CellNames[0]] - keyspace := vc.Cells[defaultCell.Name].Keyspaces[targetKs] - require.NoError(t, vc.AddShards(t, []*Cell{defaultCell, vc.Cells["zone2"]}, keyspace, shards, defaultReplicas, defaultRdonly, 400, targetKsOpts)) - custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] + keyspace := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] + require.NoError(t, vc.AddShards(t, []*Cell{defaultCell, vc.Cells["zone2"]}, keyspace, shards, defaultReplicas, defaultRdonly, 400, defaultTargetKsOpts)) + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] targetTab2 = custKs.Shards["80-c0"].Tablets["zone1-600"].Vttablet targetTab1 = custKs.Shards["40-80"].Tablets["zone1-500"].Vttablet targetReplicaTab1 = custKs.Shards["-40"].Tablets["zone1-401"].Vttablet diff --git a/go/test/endtoend/vreplication/sidecardb_test.go b/go/test/endtoend/vreplication/sidecardb_test.go index 54c1a10130f..1f31b0f7190 100644 --- a/go/test/endtoend/vreplication/sidecardb_test.go +++ b/go/test/endtoend/vreplication/sidecardb_test.go @@ -67,7 +67,7 @@ func TestSidecarDB(t *testing.T) { cell1 := vc.Cells[defaultCellName] tablet100 := fmt.Sprintf("%s-100", defaultCellName) tablet101 := fmt.Sprintf("%s-101", defaultCellName) - vc.AddKeyspace(t, []*Cell{cell1}, keyspace, "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell1}, keyspace, "0", initialProductVSchema, initialProductSchema, 1, 0, 100, defaultSourceKsOpts) shard0 := vc.Cells[defaultCellName].Keyspaces[keyspace].Shards[shard] tablet100Port := shard0.Tablets[tablet100].Vttablet.Port tablet101Port := shard0.Tablets[tablet101].Vttablet.Port diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index ebeb381b62b..ec53f14539f 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -33,8 +33,8 @@ import ( // TestMoveTablesTZ tests the conversion of datetime based on the source timezone passed to the MoveTables workflow func TestMoveTablesTZ(t *testing.T) { workflow := "tz" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) - ksReverseWorkflow := fmt.Sprintf("%s.%s_reverse", sourceKs, workflow) + ksWorkflow := fmt.Sprintf("%s.%s", defaultTargetKs, workflow) + ksReverseWorkflow := fmt.Sprintf("%s.%s_reverse", defaultSourceKs, workflow) vc = NewVitessCluster(t, nil) defer vc.TearDown() @@ -42,13 +42,13 @@ func TestMoveTablesTZ(t *testing.T) { cells := []*Cell{defaultCell} cell1 := vc.Cells["zone1"] - vc.AddKeyspace(t, []*Cell{cell1}, sourceKs, "0", initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell1}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, 0, 0, 100, defaultSourceKsOpts) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) - productTab := vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-100"].Vttablet + productTab := vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-100"].Vttablet timeZoneSQLBytes, _ := os.ReadFile("tz.sql") timeZoneSQL := string(timeZoneSQLBytes) @@ -75,23 +75,23 @@ func TestMoveTablesTZ(t *testing.T) { insertInitialData(t) - if _, err := vc.AddKeyspace(t, cells, targetKs, "0", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil { + if _, err := vc.AddKeyspace(t, cells, defaultTargetKs, "0", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, defaultTargetKsOpts); err != nil { t.Fatal(err) } - custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] customerTab := custKs.Shards["0"].Tablets["zone1-200"].Vttablet loadTimeZoneInfo(customerTab, timeZoneSQL, "UTC") tables := "datze" - output, err := vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--workflow", workflow, "--target-keyspace", targetKs, "Create", - "--source-keyspace", sourceKs, "--tables", tables, "--source-time-zone", "US/Pacifik") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--workflow", workflow, "--target-keyspace", defaultTargetKs, "Create", + "--source-keyspace", defaultSourceKs, "--tables", tables, "--source-time-zone", "US/Pacifik") require.Error(t, err, output) require.Contains(t, output, "time zone is invalid") - output, err = vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--workflow", workflow, "--target-keyspace", targetKs, "Create", - "--source-keyspace", sourceKs, "--tables", tables, "--source-time-zone", "US/Pacific") + output, err = vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--workflow", workflow, "--target-keyspace", defaultTargetKs, "Create", + "--source-keyspace", defaultSourceKs, "--tables", tables, "--source-time-zone", "US/Pacific") require.NoError(t, err, output) catchup(t, customerTab, workflow, "MoveTables") @@ -113,11 +113,11 @@ func TestMoveTablesTZ(t *testing.T) { doVDiff(t, ksWorkflow, "") query := "select * from datze" - qrSourceUSPacific, err := productTab.QueryTablet(query, sourceKs, true) + qrSourceUSPacific, err := productTab.QueryTablet(query, defaultSourceKs, true) require.NoError(t, err) require.NotNil(t, qrSourceUSPacific) - qrTargetUTC, err := customerTab.QueryTablet(query, targetKs, true) + qrTargetUTC, err := customerTab.QueryTablet(query, defaultTargetKs, true) require.NoError(t, err) require.NotNil(t, qrTargetUTC) @@ -161,7 +161,7 @@ func TestMoveTablesTZ(t *testing.T) { // user should be either running this query or have set their location in their driver to map from the time in Vitess/UTC to local query = "select id, convert_tz(dt1, 'UTC', 'US/Pacific') dt1, convert_tz(dt2, 'UTC', 'US/Pacific') dt2, convert_tz(ts1, 'UTC', 'US/Pacific') ts1 from datze" - qrTargetUSPacific, err := customerTab.QueryTablet(query, targetKs, true) + qrTargetUSPacific, err := customerTab.QueryTablet(query, defaultTargetKs, true) require.NoError(t, err) require.NotNil(t, qrTargetUSPacific) require.Equal(t, len(qrSourceUSPacific.Rows), len(qrTargetUSPacific.Rows)) @@ -172,7 +172,7 @@ func TestMoveTablesTZ(t *testing.T) { require.Equal(t, row.AsString("dt2", ""), qrTargetUSPacific.Named().Rows[i].AsString("dt2", "")) require.Equal(t, row.AsString("ts1", ""), qrTargetUSPacific.Named().Rows[i].AsString("ts1", "")) } - output, err = vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--target-keyspace", targetKs, "SwitchTraffic", "--workflow", workflow) + output, err = vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--target-keyspace", defaultTargetKs, "SwitchTraffic", "--workflow", workflow) require.NoError(t, err, output) qr, err := productTab.QueryTablet(sqlparser.BuildParsedQuery("select * from %s.vreplication where workflow='%s_reverse'", @@ -187,7 +187,7 @@ func TestMoveTablesTZ(t *testing.T) { } // inserts to test date conversions in reverse replication - execVtgateQuery(t, vtgateConn, targetKs, "insert into datze(id, dt2) values (13, '2022-01-01 18:20:30')") - execVtgateQuery(t, vtgateConn, targetKs, "insert into datze(id, dt2) values (14, '2022-04-01 12:06:07')") + execVtgateQuery(t, vtgateConn, defaultTargetKs, "insert into datze(id, dt2) values (13, '2022-01-01 18:20:30')") + execVtgateQuery(t, vtgateConn, defaultTargetKs, "insert into datze(id, dt2) values (14, '2022-04-01 12:06:07')") doVDiff(t, ksReverseWorkflow, "") } diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index 7e35a6a9894..862e10a3f4f 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -43,12 +43,12 @@ import ( ) type testCase struct { - name, typ, sourceKs, targetKs string - sourceShards, targetShards string - tables string - workflow string - tabletBaseID int - autoRetryError bool // if true, test auto retry on error against this workflow + name, typ, defaultSourceKs, defaultTargetKs string + sourceShards, targetShards string + tables string + workflow string + tabletBaseID int + autoRetryError bool // if true, test auto retry on error against this workflow // If testing auto retry on error, what new rows should be diff'd. These rows must have a PK > all initial rows. retryInsert string resume bool // test resume functionality with this workflow @@ -74,8 +74,8 @@ var testCases = []*testCase{ name: "MoveTables/unsharded to two shards", workflow: "p1c2", typ: "MoveTables", - sourceKs: sourceKs, - targetKs: targetKs, + defaultSourceKs: defaultSourceKs, + defaultTargetKs: defaultTargetKs, sourceShards: "0", targetShards: "-80,80-", tabletBaseID: 200, @@ -92,34 +92,34 @@ var testCases = []*testCase{ }, }, { - name: "Reshard Merge/split 2 to 3", - workflow: "c2c3", - typ: "Reshard", - sourceKs: targetKs, - targetKs: targetKs, - sourceShards: "-80,80-", - targetShards: "-40,40-a0,a0-", - tabletBaseID: 400, - autoRetryError: true, - retryInsert: `insert into customer(cid, name, typ) values(2005149300, 'Testy McTester Jr', 'enterprise'), (2005149350, 'Testy McTester II', 'enterprise')`, - resume: true, - resumeInsert: `insert into customer(cid, name, typ) values(2005149400, 'Testy McTester III', 'enterprise')`, - stop: true, + name: "Reshard Merge/split 2 to 3", + workflow: "c2c3", + typ: "Reshard", + defaultSourceKs: defaultTargetKs, + defaultTargetKs: defaultTargetKs, + sourceShards: "-80,80-", + targetShards: "-40,40-a0,a0-", + tabletBaseID: 400, + autoRetryError: true, + retryInsert: `insert into customer(cid, name, typ) values(2005149300, 'Testy McTester Jr', 'enterprise'), (2005149350, 'Testy McTester II', 'enterprise')`, + resume: true, + resumeInsert: `insert into customer(cid, name, typ) values(2005149400, 'Testy McTester III', 'enterprise')`, + stop: true, }, { - name: "Reshard/merge 3 to 1", - workflow: "c3c1", - typ: "Reshard", - sourceKs: targetKs, - targetKs: targetKs, - sourceShards: "-40,40-a0,a0-", - targetShards: "0", - tabletBaseID: 700, - autoRetryError: true, - retryInsert: `insert into customer(cid, name, typ) values(2005149500, 'Testy McTester IV', 'enterprise')`, - resume: true, - resumeInsert: `insert into customer(cid, name, typ) values(2005149600, 'Testy McTester V', 'enterprise'), (2005149650, 'Testy McTester VI', 'enterprise')`, - stop: true, + name: "Reshard/merge 3 to 1", + workflow: "c3c1", + typ: "Reshard", + defaultSourceKs: defaultTargetKs, + defaultTargetKs: defaultTargetKs, + sourceShards: "-40,40-a0,a0-", + targetShards: "0", + tabletBaseID: 700, + autoRetryError: true, + retryInsert: `insert into customer(cid, name, typ) values(2005149500, 'Testy McTester IV', 'enterprise')`, + resume: true, + resumeInsert: `insert into customer(cid, name, typ) values(2005149600, 'Testy McTester V', 'enterprise'), (2005149650, 'Testy McTester VI', 'enterprise')`, + stop: true, }, } @@ -149,7 +149,7 @@ func TestVDiff2(t *testing.T) { // The primary tablet is only added in the first cell. // We ONLY add primary tablets in this test. - _, err := vc.AddKeyspace(t, []*Cell{zone2, zone1, zone3}, sourceKs, strings.Join(sourceShards, ","), initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) + _, err := vc.AddKeyspace(t, []*Cell{zone2, zone1, zone3}, defaultSourceKs, strings.Join(sourceShards, ","), initialProductVSchema, initialProductSchema, 0, 0, 100, defaultSourceKsOpts) require.NoError(t, err) vtgateConn := vc.GetVTGateConn(t) @@ -160,17 +160,17 @@ func TestVDiff2(t *testing.T) { // Insert null and empty enum values for testing vdiff comparisons for those values. // If we add this to the initial data list, the counts in several other tests will need to change query := `insert into customer(cid, name, typ, sport) values(1001, null, 'soho','')` - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:%s", sourceKs, sourceShards[0]), query) + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:%s", defaultSourceKs, sourceShards[0]), query) - generateMoreCustomers(t, sourceKs, 1000) + generateMoreCustomers(t, defaultSourceKs, 1000) // Create rows in the nopk table using the customer names and random ages between 20 and 100. query = "insert into nopk(name, age) select name, floor(rand()*80)+20 from customer" - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:%s", sourceKs, sourceShards[0]), query) + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:%s", defaultSourceKs, sourceShards[0]), query) // The primary tablet is only added in the first cell. // We ONLY add primary tablets in this test. - tks, err := vc.AddKeyspace(t, []*Cell{zone3, zone1, zone2}, targetKs, strings.Join(targetShards, ","), customerVSchema, customerSchema, 0, 0, 200, targetKsOpts) + tks, err := vc.AddKeyspace(t, []*Cell{zone3, zone1, zone2}, defaultTargetKs, strings.Join(targetShards, ","), customerVSchema, customerSchema, 0, 0, 200, defaultTargetKsOpts) require.NoError(t, err) verifyClusterHealth(t, vc) @@ -178,10 +178,10 @@ func TestVDiff2(t *testing.T) { // (cid) vs (cid,typ) on the source. This confirms that we are able to properly // diff the table when the source and target have a different PK definition. // Remove the 0 date restrictions as the customer table uses them in its DEFAULTs. - execVtgateQuery(t, vtgateConn, targetKs, "set @@session.sql_mode='ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'") - execVtgateQuery(t, vtgateConn, targetKs, customerTableModifiedPK) + execVtgateQuery(t, vtgateConn, defaultTargetKs, "set @@session.sql_mode='ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'") + execVtgateQuery(t, vtgateConn, defaultTargetKs, customerTableModifiedPK) // Set the sql_mode back to the default. - execVtgateQuery(t, vtgateConn, targetKs, "set @@session.sql_mode='ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'") + execVtgateQuery(t, vtgateConn, defaultTargetKs, "set @@session.sql_mode='ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'") for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { @@ -190,7 +190,7 @@ func TestVDiff2(t *testing.T) { }) } - statsTablet := vc.getPrimaryTablet(t, targetKs, targetShards[0]) + statsTablet := vc.getPrimaryTablet(t, defaultTargetKs, targetShards[0]) // We diffed X rows so confirm that the global total is > 0. countStr, err := getDebugVar(t, statsTablet.Port, []string{"VDiffRowsComparedTotal"}) @@ -211,23 +211,23 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, defer vtgateConn.Close() arrTargetShards := strings.Split(tc.targetShards, ",") if tc.typ == "Reshard" { - require.NoError(t, vc.AddShards(t, cells, tks, tc.targetShards, 0, 0, tc.tabletBaseID, targetKsOpts)) + require.NoError(t, vc.AddShards(t, cells, tks, tc.targetShards, 0, 0, tc.tabletBaseID, defaultTargetKsOpts)) } - ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) + ksWorkflow := fmt.Sprintf("%s.%s", tc.defaultTargetKs, tc.workflow) statsShard := arrTargetShards[0] - statsTablet := vc.getPrimaryTablet(t, tc.targetKs, statsShard) + statsTablet := vc.getPrimaryTablet(t, tc.defaultTargetKs, statsShard) var args []string args = append(args, tc.typ) args = append(args, "--workflow", tc.workflow) - args = append(args, "--target-keyspace", tc.targetKs) + args = append(args, "--target-keyspace", tc.defaultTargetKs) allCellNames := getCellNames(nil) args = append(args, "create") args = append(args, "--cells", allCellNames) if tc.typ == "Reshard" { args = append(args, "--source-shards", tc.sourceShards, "--target-shards", tc.targetShards) } else { - args = append(args, "--source-keyspace", tc.sourceKs) + args = append(args, "--source-keyspace", tc.defaultSourceKs) args = append(args, "--tables", tc.tables) } err := vc.VtctldClient.ExecuteCommand(args...) @@ -235,7 +235,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, waitForShardsToCatchup := func() { for _, shard := range arrTargetShards { - tab := vc.getPrimaryTablet(t, tc.targetKs, shard) + tab := vc.getPrimaryTablet(t, tc.defaultTargetKs, shard) catchup(t, tab, tc.workflow, tc.typ) } } @@ -261,17 +261,17 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, totalRowsToCreate := seconds * perSecondCount log.Infof("Test host has %d vCPUs. Generating %d rows in the customer table to test --max-diff-duration", runtime.NumCPU(), totalRowsToCreate) for i := int64(0); i < totalRowsToCreate; i += chunkSize { - generateMoreCustomers(t, sourceKs, chunkSize) + generateMoreCustomers(t, defaultSourceKs, chunkSize) } // Wait for the workflow to catch up after all the inserts. waitForShardsToCatchup() // This flag is only implemented in vtctldclient. - doVtctldclientVDiff(t, tc.targetKs, tc.workflow, allCellNames, nil, "--max-diff-duration", diffDuration) + doVtctldclientVDiff(t, tc.defaultTargetKs, tc.workflow, allCellNames, nil, "--max-diff-duration", diffDuration) // Confirm that the customer table diff was restarted but not others. - tablet := vc.getPrimaryTablet(t, tc.targetKs, arrTargetShards[0]) + tablet := vc.getPrimaryTablet(t, tc.defaultTargetKs, arrTargetShards[0]) stat, err := getDebugVar(t, tablet.Port, []string{"VDiffRestartedTableDiffsCount"}) require.NoError(t, err, "failed to get VDiffRestartedTableDiffsCount stat: %v", err) customerRestarts := gjson.Parse(stat).Get("customer").Int() @@ -280,7 +280,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, require.Equal(t, int64(0), leadRestarts, "expected VDiffRestartedTableDiffsCount stat to be 0 for the Lead table, got %d", leadRestarts) // Cleanup the created customer records so as not to slow down the rest of the test. - delstmt := fmt.Sprintf("delete from %s.customer order by cid desc limit %d", sqlescape.EscapeID(sourceKs), chunkSize) + delstmt := fmt.Sprintf("delete from %s.customer order by cid desc limit %d", sqlescape.EscapeID(defaultSourceKs), chunkSize) for i := int64(0); i < totalRowsToCreate; i += chunkSize { _, err := vtgateConn.ExecuteFetch(delstmt, int(chunkSize), false) require.NoError(t, err, "failed to cleanup added customer records: %v", err) @@ -289,7 +289,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, waitForShardsToCatchup() tc.vdiffCount++ // We only did vtctldclient vdiff create } else { - vdiff(t, tc.targetKs, tc.workflow, allCellNames, nil) + vdiff(t, tc.defaultTargetKs, tc.workflow, allCellNames, nil) tc.vdiffCount++ } checkVDiffCountStat(t, statsTablet, tc.vdiffCount) @@ -329,7 +329,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, } if tc.testCLIFlagHandling { // This creates and then deletes the vdiff so we don't increment the count. - testCLIFlagHandling(t, tc.targetKs, tc.workflow, cells[0]) + testCLIFlagHandling(t, tc.defaultTargetKs, tc.workflow, cells[0]) } checkVDiffCountStat(t, statsTablet, tc.vdiffCount) @@ -345,13 +345,13 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, tc.vdiffCount++ checkVDiffCountStat(t, statsTablet, tc.vdiffCount) - err = vc.VtctldClient.ExecuteCommand(tc.typ, "--workflow", tc.workflow, "--target-keyspace", tc.targetKs, "SwitchTraffic") + err = vc.VtctldClient.ExecuteCommand(tc.typ, "--workflow", tc.workflow, "--target-keyspace", tc.defaultTargetKs, "SwitchTraffic") require.NoError(t, err) - err = vc.VtctldClient.ExecuteCommand(tc.typ, "--workflow", tc.workflow, "--target-keyspace", tc.targetKs, "Complete") + err = vc.VtctldClient.ExecuteCommand(tc.typ, "--workflow", tc.workflow, "--target-keyspace", tc.defaultTargetKs, "Complete") require.NoError(t, err) // Confirm the VDiff data is deleted for the workflow. - testNoOrphanedData(t, tc.targetKs, tc.workflow, arrTargetShards) + testNoOrphanedData(t, tc.defaultTargetKs, tc.workflow, arrTargetShards) tc.vdiffCount = 0 // All vdiffs are deleted, so reset the count and check checkVDiffCountStat(t, statsTablet, tc.vdiffCount) } @@ -376,7 +376,7 @@ func testCLIErrors(t *testing.T, ksWorkflow, cells string) { // testCLIFlagHandling tests that the vtctldclient CLI flags are handled correctly // from vtctldclient->vtctld->vttablet->mysqld. -func testCLIFlagHandling(t *testing.T, targetKs, workflowName string, cell *Cell) { +func testCLIFlagHandling(t *testing.T, defaultTargetKs, workflowName string, cell *Cell) { expectedOptions := &tabletmanagerdatapb.VDiffOptions{ CoreOptions: &tabletmanagerdatapb.VDiffCoreOptions{ MaxRows: 999, @@ -400,7 +400,7 @@ func testCLIFlagHandling(t *testing.T, targetKs, workflowName string, cell *Cell } t.Run("Client flag handling", func(t *testing.T) { - res, err := vc.VtctldClient.ExecuteCommandWithOutput("vdiff", "--target-keyspace", targetKs, "--workflow", workflowName, + res, err := vc.VtctldClient.ExecuteCommandWithOutput("vdiff", "--target-keyspace", defaultTargetKs, "--workflow", workflowName, "create", "--limit", fmt.Sprintf("%d", expectedOptions.CoreOptions.MaxRows), "--max-report-sample-rows", fmt.Sprintf("%d", expectedOptions.ReportOptions.MaxSampleRows), @@ -425,10 +425,10 @@ func testCLIFlagHandling(t *testing.T, targetKs, workflowName string, cell *Cell // Confirm that the options were passed through and saved correctly. query := sqlparser.BuildParsedQuery("select options from %s.vdiff where vdiff_uuid = %s", sidecarDBIdentifier, encodeString(vduuid.String())).Query - tablets := vc.getVttabletsInKeyspace(t, cell, targetKs, "PRIMARY") - require.Greater(t, len(tablets), 0, "no primary tablets found in keyspace %s", targetKs) + tablets := vc.getVttabletsInKeyspace(t, cell, defaultTargetKs, "PRIMARY") + require.Greater(t, len(tablets), 0, "no primary tablets found in keyspace %s", defaultTargetKs) tablet := maps.Values(tablets)[0] - qres, err := tablet.QueryTablet(query, targetKs, false) + qres, err := tablet.QueryTablet(query, defaultTargetKs, false) require.NoError(t, err, "query %q failed: %v", query, err) require.NotNil(t, qres, "query %q returned nil result", query) // Should never happen require.Equal(t, 1, len(qres.Rows), "query %q returned %d rows, expected 1", query, len(qres.Rows)) @@ -442,7 +442,7 @@ func testCLIFlagHandling(t *testing.T, targetKs, workflowName string, cell *Cell // Delete this vdiff as we used --auto-start=false and thus it never starts and // does not provide the normally expected show --verbose --format=json output. - _, output := performVDiff2Action(t, fmt.Sprintf("%s.%s", targetKs, workflowName), "", "delete", vduuid.String(), false) + _, output := performVDiff2Action(t, fmt.Sprintf("%s.%s", defaultTargetKs, workflowName), "", "delete", vduuid.String(), false) require.Equal(t, "completed", gjson.Get(output, "Status").String()) }) } @@ -509,7 +509,7 @@ func testResume(t *testing.T, tc *testCase, cells string) { t.Run("Resume", func(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() - ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) + ksWorkflow := fmt.Sprintf("%s.%s", tc.defaultTargetKs, tc.workflow) // Confirm the last VDiff is in the expected completed state. uuid, output := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false) @@ -521,7 +521,7 @@ func testResume(t *testing.T, tc *testCase, cells string) { expectedNewRows := int64(0) if tc.resumeInsert != "" { - res := execVtgateQuery(t, vtgateConn, tc.sourceKs, tc.resumeInsert) + res := execVtgateQuery(t, vtgateConn, tc.defaultSourceKs, tc.resumeInsert) expectedNewRows = int64(res.RowsAffected) } expectedRows := rowsCompared + expectedNewRows @@ -572,7 +572,7 @@ func testAutoRetryError(t *testing.T, tc *testCase, cells string) { t.Run("Auto retry on error", func(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() - ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) + ksWorkflow := fmt.Sprintf("%s.%s", tc.defaultTargetKs, tc.workflow) // Confirm the last VDiff is in the expected completed state. uuid, output := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false) @@ -586,15 +586,15 @@ func testAutoRetryError(t *testing.T, tc *testCase, cells string) { // compared is cumulative. expectedNewRows := int64(0) if tc.retryInsert != "" { - res := execVtgateQuery(t, vtgateConn, tc.sourceKs, tc.retryInsert) + res := execVtgateQuery(t, vtgateConn, tc.defaultSourceKs, tc.retryInsert) expectedNewRows = int64(res.RowsAffected) } expectedRows := rowsCompared + expectedNewRows // Update the VDiff to simulate an ephemeral error having occurred. for _, shard := range strings.Split(tc.targetShards, ",") { - tab := vc.getPrimaryTablet(t, tc.targetKs, shard) - res, err := tab.QueryTabletWithDB(sqlparser.BuildParsedQuery(sqlSimulateError, sidecarDBIdentifier, sidecarDBIdentifier, encodeString(uuid)).Query, "vt_"+tc.targetKs) + tab := vc.getPrimaryTablet(t, tc.defaultTargetKs, shard) + res, err := tab.QueryTabletWithDB(sqlparser.BuildParsedQuery(sqlSimulateError, sidecarDBIdentifier, sidecarDBIdentifier, encodeString(uuid)).Query, "vt_"+tc.defaultTargetKs) require.NoError(t, err) // Should have updated the vdiff record and at least one vdiff_table record. require.GreaterOrEqual(t, int(res.RowsAffected), 2) diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go index aee4378c8d5..4ca439adc2c 100644 --- a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -36,17 +36,17 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { vc = NewVitessCluster(t, nil) defer vc.TearDown() - sourceKeyspace := sourceKs + sourceKeyspace := defaultSourceKs shardName := "0" cell := vc.Cells[cellName] - vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, 100, defaultSourceKsOpts) verifyClusterHealth(t, vc) insertInitialData(t) targetTabletId := 200 - targetKeyspace := targetKs - vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, targetTabletId, sourceKsOpts) + targetKeyspace := defaultTargetKs + vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, targetTabletId, defaultSourceKsOpts) index := 1000 var loadCtx context.Context @@ -109,7 +109,7 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { loadCancel() // confirm that show all shows the correct workflow and only that workflow. - output, err := vc.VtctldClient.ExecuteCommandWithOutput("VDiff", "--format", "json", "--workflow", "wf1", "--target-keyspace", targetKs, "show", "all") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("VDiff", "--format", "json", "--workflow", "wf1", "--target-keyspace", defaultTargetKs, "show", "all") require.NoError(t, err) log.Infof("VDiff output: %s", output) count := gjson.Get(output, "..#").Int() @@ -117,5 +117,5 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { ksName := gjson.Get(output, "0.Keyspace").String() require.Equal(t, int64(1), count) require.Equal(t, "wf1", wf) - require.Equal(t, targetKs, ksName) + require.Equal(t, defaultTargetKs, ksName) } diff --git a/go/test/endtoend/vreplication/vdiff_online_ddl_test.go b/go/test/endtoend/vreplication/vdiff_online_ddl_test.go index 35bb59be37d..42002a33f93 100644 --- a/go/test/endtoend/vreplication/vdiff_online_ddl_test.go +++ b/go/test/endtoend/vreplication/vdiff_online_ddl_test.go @@ -32,7 +32,7 @@ func TestOnlineDDLVDiff(t *testing.T) { defaultReplicas = 0 vc = setupMinimalCluster(t) defer vc.TearDown() - keyspace := sourceKs + keyspace := defaultSourceKs ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 6ded63f3eb8..14d193a7980 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -61,8 +61,8 @@ var ( vc *VitessCluster defaultRdonly int defaultReplicas int - sourceKsOpts = make(map[string]string) - targetKsOpts = make(map[string]string) + defaultSourceKsOpts = make(map[string]string) + defaultTargetKsOpts = make(map[string]string) httpClient = throttlebase.SetupHTTPClient(time.Second) sourceThrottlerAppName = throttlerapp.VStreamerName targetThrottlerAppName = throttlerapp.VPlayerName @@ -95,7 +95,7 @@ func init() { func TestVReplicationDDLHandling(t *testing.T) { var err error workflow := "onddl_test" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) + ksWorkflow := fmt.Sprintf("%s.%s", defaultTargetKs, workflow) table := "orders" newColumn := "ddltest" cell := defaultCellName @@ -104,10 +104,10 @@ func TestVReplicationDDLHandling(t *testing.T) { defer vc.TearDown() defaultCell := vc.Cells[cell] - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { t.Fatal(err) } - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultTargetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } vtgate := defaultCell.Vtgates[0] @@ -117,20 +117,20 @@ func TestVReplicationDDLHandling(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - sourceTab = vc.getPrimaryTablet(t, sourceKs, shard) - targetTab := vc.getPrimaryTablet(t, targetKs, shard) + sourceTab = vc.getPrimaryTablet(t, defaultSourceKs, shard) + targetTab := vc.getPrimaryTablet(t, defaultTargetKs, shard) insertInitialData(t) - _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use %s", sourceKs), 1, false) + _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use %s", defaultSourceKs), 1, false) require.NoError(t, err) addColDDL := fmt.Sprintf("alter table %s add column %s varchar(64)", table, newColumn) dropColDDL := fmt.Sprintf("alter table %s drop column %s", table, newColumn) checkColQuerySource := fmt.Sprintf("select count(column_name) from information_schema.columns where table_schema='vt_%s' and table_name='%s' and column_name='%s'", - sourceKs, table, newColumn) + defaultSourceKs, table, newColumn) checkColQueryTarget := fmt.Sprintf("select count(column_name) from information_schema.columns where table_schema='vt_%s' and table_name='%s' and column_name='%s'", - targetKs, table, newColumn) + defaultTargetKs, table, newColumn) // expectedAction is the specific action, e.g. ignore, that should have a count of 1. All other // actions should have a count of 0. id is the stream ID to check. @@ -150,7 +150,7 @@ func TestVReplicationDDLHandling(t *testing.T) { } // Test IGNORE behavior - moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_IGNORE.String()) + moveTablesAction(t, "Create", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_IGNORE.String()) // Wait until we get through the copy phase... catchup(t, targetTab, workflow, "MoveTables") // Add new col on source @@ -159,13 +159,13 @@ func TestVReplicationDDLHandling(t *testing.T) { // Confirm workflow is still running fine waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) // Confirm new col does not exist on target - waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") + waitForQueryResult(t, vtgateConn, defaultTargetKs, checkColQueryTarget, "[[INT64(0)]]") // Confirm new col does exist on source - waitForQueryResult(t, vtgateConn, sourceKs, checkColQuerySource, "[[INT64(1)]]") + waitForQueryResult(t, vtgateConn, defaultSourceKs, checkColQuerySource, "[[INT64(1)]]") // Confirm that we updated the stats on the target tablet as expected. checkOnDDLStats(binlogdatapb.OnDDLAction_IGNORE, 1) // Also test Cancel --keep-routing-rules - moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table, "--keep-routing-rules") + moveTablesAction(t, "Cancel", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, table, "--keep-routing-rules") // Confirm that the routing rules were NOT cleared rr, err := vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules") require.NoError(t, err) @@ -182,7 +182,7 @@ func TestVReplicationDDLHandling(t *testing.T) { require.NoError(t, err, "error executing %q: %v", dropColDDL, err) // Test STOP behavior (new col now exists nowhere) - moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_STOP.String()) + moveTablesAction(t, "Create", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_STOP.String()) // Wait until we get through the copy phase... catchup(t, targetTab, workflow, "MoveTables") // Add new col on the source @@ -191,24 +191,24 @@ func TestVReplicationDDLHandling(t *testing.T) { // Confirm that the worfklow stopped because of the DDL waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String(), fmt.Sprintf("Message==Stopped at DDL %s", addColDDL)) // Confirm that the target does not have new col - waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") + waitForQueryResult(t, vtgateConn, defaultTargetKs, checkColQueryTarget, "[[INT64(0)]]") // Confirm that we updated the stats on the target tablet as expected. checkOnDDLStats(binlogdatapb.OnDDLAction_STOP, 2) - moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table) + moveTablesAction(t, "Cancel", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, table) // Test EXEC behavior (new col now exists on source) - moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_EXEC.String()) + moveTablesAction(t, "Create", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_EXEC.String()) // Wait until we get through the copy phase... catchup(t, targetTab, workflow, "MoveTables") // Confirm target has new col from copy phase - waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(1)]]") + waitForQueryResult(t, vtgateConn, defaultTargetKs, checkColQueryTarget, "[[INT64(1)]]") // Drop col on source _, err = vtgateConn.ExecuteFetch(dropColDDL, 1, false) require.NoError(t, err, "error executing %q: %v", dropColDDL, err) // Confirm workflow is still running fine waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) // Confirm new col was dropped on target - waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") + waitForQueryResult(t, vtgateConn, defaultTargetKs, checkColQueryTarget, "[[INT64(0)]]") // Confirm that we updated the stats on the target tablet as expected. checkOnDDLStats(binlogdatapb.OnDDLAction_EXEC, 3) } @@ -238,10 +238,10 @@ func TestVreplicationCopyThrottling(t *testing.T) { parallelInsertWorkers, } - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { t.Fatal(err) } - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultTargetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } vtgate := defaultCell.Vtgates[0] @@ -251,30 +251,30 @@ func TestVreplicationCopyThrottling(t *testing.T) { // have an InnoDB History List length that is less than specified in the tablet's config. // We update rows in a table not part of the MoveTables operation so that we're not blocking // on the LOCK TABLE call but rather the InnoDB History List length. - trxConn := generateInnoDBRowHistory(t, sourceKs, maxSourceTrxHistory) + trxConn := generateInnoDBRowHistory(t, defaultSourceKs, maxSourceTrxHistory) // History should have been generated on the source primary tablet - waitForInnoDBHistoryLength(t, vc.getPrimaryTablet(t, sourceKs, shard), maxSourceTrxHistory) + waitForInnoDBHistoryLength(t, vc.getPrimaryTablet(t, defaultSourceKs, shard), maxSourceTrxHistory) // We need to force primary tablet types as the history list has been increased on the source primary // We use a small timeout and ignore errors as we don't expect the MoveTables to start here // because of the InnoDB History List length. - moveTablesActionWithTabletTypes(t, "Create", defaultCell.Name, workflow, sourceKs, targetKs, table, "primary", true) + moveTablesActionWithTabletTypes(t, "Create", defaultCell.Name, workflow, defaultSourceKs, defaultTargetKs, table, "primary", true) // Wait for the copy phase to start - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflow), binlogdatapb.VReplicationWorkflowState_Copying.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", defaultTargetKs, workflow), binlogdatapb.VReplicationWorkflowState_Copying.String()) // The initial copy phase should be blocking on the history list. - confirmWorkflowHasCopiedNoData(t, targetKs, workflow) + confirmWorkflowHasCopiedNoData(t, defaultTargetKs, workflow) releaseInnoDBRowHistory(t, trxConn) trxConn.Close() } func TestBasicVreplicationWorkflow(t *testing.T) { - sourceKsOpts["DBTypeVersion"] = "mysql-8.0" - targetKsOpts["DBTypeVersion"] = "mysql-8.0" + defaultSourceKsOpts["DBTypeVersion"] = "mysql-8.0" + defaultTargetKsOpts["DBTypeVersion"] = "mysql-8.0" testBasicVreplicationWorkflow(t, "noblob") } func TestVreplicationCopyParallel(t *testing.T) { - sourceKsOpts["DBTypeVersion"] = "mysql-5.7" - targetKsOpts["DBTypeVersion"] = "mysql-5.7" + defaultSourceKsOpts["DBTypeVersion"] = "mysql-5.7" + defaultTargetKsOpts["DBTypeVersion"] = "mysql-5.7" extraVTTabletArgs = []string{ parallelInsertWorkers, } @@ -303,7 +303,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string } defaultCell := vc.Cells[defaultCellName] - vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, defaultSourceKsOpts) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() @@ -315,7 +315,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string // the Lead and Lead-1 tables tested a specific case with binary sharding keys. Drop it now so that we don't // have to update the rest of the tests - execVtgateQuery(t, vtgateConn, targetKs, "drop table `Lead`,`Lead-1`") + execVtgateQuery(t, vtgateConn, defaultTargetKs, "drop table `Lead`,`Lead-1`") validateRollupReplicates(t) shardOrders(t) shardMerchant(t) @@ -335,18 +335,18 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string insertMoreCustomers(t, 16) reshardCustomer2to4Split(t, nil, "") - confirmAllStreamsRunning(t, vtgateConn, fmt.Sprintf("%s:-40", targetKs)) - expectNumberOfStreams(t, vtgateConn, "Customer2to4", "sales", fmt.Sprintf("%s:0", sourceKs), 4) + confirmAllStreamsRunning(t, vtgateConn, fmt.Sprintf("%s:-40", defaultTargetKs)) + expectNumberOfStreams(t, vtgateConn, "Customer2to4", "sales", fmt.Sprintf("%s:0", defaultSourceKs), 4) reshardCustomer3to2SplitMerge(t) - confirmAllStreamsRunning(t, vtgateConn, fmt.Sprintf("%s:-60", targetKs)) - expectNumberOfStreams(t, vtgateConn, "Customer3to2", "sales", fmt.Sprintf("%s:0", sourceKs), 3) + confirmAllStreamsRunning(t, vtgateConn, fmt.Sprintf("%s:-60", defaultTargetKs)) + expectNumberOfStreams(t, vtgateConn, "Customer3to2", "sales", fmt.Sprintf("%s:0", defaultSourceKs), 3) reshardCustomer3to1Merge(t) - confirmAllStreamsRunning(t, vtgateConn, fmt.Sprintf("%s:0", targetKs)) + confirmAllStreamsRunning(t, vtgateConn, fmt.Sprintf("%s:0", defaultTargetKs)) - expectNumberOfStreams(t, vtgateConn, "Customer3to1", "sales", fmt.Sprintf("%s:0", sourceKs), 1) + expectNumberOfStreams(t, vtgateConn, "Customer3to1", "sales", fmt.Sprintf("%s:0", defaultSourceKs), 1) t.Run("Verify CopyState Is Optimized Afterwards", func(t *testing.T) { - tabletMap := vc.getVttabletsInKeyspace(t, defaultCell, targetKs, topodatapb.TabletType_PRIMARY.String()) + tabletMap := vc.getVttabletsInKeyspace(t, defaultCell, defaultTargetKs, topodatapb.TabletType_PRIMARY.String()) require.NotNil(t, tabletMap) require.Greater(t, len(tabletMap), 0) for _, tablet := range tabletMap { @@ -359,8 +359,8 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string if strings.ToLower(binlogRowImage) == "noblob" { return } - _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use `%s`", targetKs), 1, false) - require.NoError(t, err, "error using %s keyspace: %v", targetKs, err) + _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use `%s`", defaultTargetKs), 1, false) + require.NoError(t, err, "error using %s keyspace: %v", defaultTargetKs, err) res, err := vtgateConn.ExecuteFetch("select count(*) from customer where name is not null", 1, false) require.NoError(t, err, "error getting current row count in customer: %v", err) require.Equal(t, 1, len(res.Rows), "expected 1 row in count(*) query, got %d", len(res.Rows)) @@ -372,28 +372,28 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string require.NoError(t, err, "error executing %q: %v", insert, err) vindexName := "customer_name_keyspace_id" - err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace", sourceKs, "create", "--keyspace", targetKs, + err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace", defaultSourceKs, "create", "--keyspace", defaultTargetKs, "--type=consistent_lookup", "--table-owner=customer", "--table-owner-columns=name,cid", "--ignore-nulls", "--tablet-types=PRIMARY") require.NoError(t, err, "error executing LookupVindex create: %v", err) - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", sourceKs, vindexName), binlogdatapb.VReplicationWorkflowState_Running.String()) - waitForRowCount(t, vtgateConn, sourceKs, vindexName, int(rows)) - customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", targetKs) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", defaultSourceKs, vindexName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForRowCount(t, vtgateConn, defaultSourceKs, vindexName, int(rows)) + customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultTargetKs) require.NoError(t, err, "error executing GetVSchema: %v", err) vdx := gjson.Get(customerVSchema, fmt.Sprintf("vindexes.%s", vindexName)) require.NotNil(t, vdx, "lookup vindex %s not found", vindexName) require.Equal(t, "true", vdx.Get("params.write_only").String(), "expected write_only parameter to be true") - err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace", sourceKs, "externalize", "--keyspace", targetKs) + err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace", defaultSourceKs, "externalize", "--keyspace", defaultTargetKs) require.NoError(t, err, "error executing LookupVindex externalize: %v", err) - customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", targetKs) + customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultTargetKs) require.NoError(t, err, "error executing GetVSchema: %v", err) vdx = gjson.Get(customerVSchema, fmt.Sprintf("vindexes.%s", vindexName)) require.NotNil(t, vdx, "lookup vindex %s not found", vindexName) require.NotEqual(t, "true", vdx.Get("params.write_only").String(), "did not expect write_only parameter to be true") - err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace", sourceKs, "internalize", "--keyspace", targetKs) + err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace", defaultSourceKs, "internalize", "--keyspace", defaultTargetKs) require.NoError(t, err, "error executing LookupVindex internalize: %v", err) - customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", targetKs) + customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultTargetKs) require.NoError(t, err, "error executing GetVSchema: %v", err) vdx = gjson.Get(customerVSchema, fmt.Sprintf("vindexes.%s", vindexName)) require.NotNil(t, vdx, "lookup vindex %s not found", vindexName) @@ -402,8 +402,8 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string } func TestV2WorkflowsAcrossDBVersions(t *testing.T) { - sourceKsOpts["DBTypeVersion"] = "mysql-5.7" - targetKsOpts["DBTypeVersion"] = "mysql-8.0" + defaultSourceKsOpts["DBTypeVersion"] = "mysql-5.7" + defaultTargetKsOpts["DBTypeVersion"] = "mysql-8.0" testBasicVreplicationWorkflow(t, "") } @@ -411,8 +411,8 @@ func TestV2WorkflowsAcrossDBVersions(t *testing.T) { // and a MySQL target as while MariaDB is not supported in Vitess v14+ we want // MariaDB users to have a way to migrate into Vitess. func TestMoveTablesMariaDBToMySQL(t *testing.T) { - sourceKsOpts["DBTypeVersion"] = "mariadb-10.10" - targetKsOpts["DBTypeVersion"] = "mysql-8.0" + defaultSourceKsOpts["DBTypeVersion"] = "mariadb-10.10" + defaultTargetKsOpts["DBTypeVersion"] = "mysql-8.0" testVreplicationWorkflows(t, true /* only do MoveTables */, "") } @@ -429,22 +429,22 @@ func TestVStreamFlushBinlog(t *testing.T) { // to deal with CI resource constraints. // This also makes it easier to confirm the behavior as we know exactly // what tablets will be involved. - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { t.Fatal(err) } - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultTargetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } verifyClusterHealth(t, vc) - sourceTab = vc.getPrimaryTablet(t, sourceKs, shard) + sourceTab = vc.getPrimaryTablet(t, defaultSourceKs, shard) insertInitialData(t) tables := "product,customer,merchant,orders" - moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Create", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, tables) // Wait until we get through the copy phase... - catchup(t, vc.getPrimaryTablet(t, targetKs, shard), workflow, "MoveTables") + catchup(t, vc.getPrimaryTablet(t, defaultTargetKs, shard), workflow, "MoveTables") // So far, we should not have rotated any binlogs flushCount := int64(sourceTab.GetVars()["VStreamerFlushedBinlogs"].(float64)) @@ -464,7 +464,7 @@ func TestVStreamFlushBinlog(t *testing.T) { require.Greater(t, res.RowsAffected, uint64(0)) if i%100 == 0 { - res, err := sourceTab.QueryTablet("show binary logs", sourceKs, false) + res, err := sourceTab.QueryTablet("show binary logs", defaultSourceKs, false) require.NoError(t, err) require.NotNil(t, res) require.Greater(t, len(res.Rows), 0) @@ -480,13 +480,13 @@ func TestVStreamFlushBinlog(t *testing.T) { // Now we should rotate the binary logs ONE time on the source, even // though we're opening up multiple result streams (1 per table). runVDiffsSideBySide = false - vdiff(t, targetKs, workflow, defaultCellName, nil) + vdiff(t, defaultTargetKs, workflow, defaultCellName, nil) flushCount = int64(sourceTab.GetVars()["VStreamerFlushedBinlogs"].(float64)) require.Equal(t, flushCount, int64(1), "VStreamerFlushedBinlogs should now be 1") // Now if we do another vdiff, we should NOT rotate the binlogs again // as we haven't been generating a lot of new binlog events. - vdiff(t, targetKs, workflow, defaultCellName, nil) + vdiff(t, defaultTargetKs, workflow, defaultCellName, nil) flushCount = int64(sourceTab.GetVars()["VStreamerFlushedBinlogs"].(float64)) require.Equal(t, flushCount, int64(1), "VStreamerFlushedBinlogs should still be 1") } @@ -497,7 +497,7 @@ func TestVStreamFlushBinlog(t *testing.T) { func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { defaultCellName := "zone1" workflow := "mtnosource" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) + ksWorkflow := fmt.Sprintf("%s.%s", defaultTargetKs, workflow) defaultShard := "0" tables := []string{"customer"} var defaultCell *Cell @@ -542,9 +542,9 @@ func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { } targetShardNames := strings.Split(targetShards, ",") - _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, sourceShards, sourceVSchema, customerTable, 0, 0, 100, nil) + _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, sourceShards, sourceVSchema, customerTable, 0, 0, 100, nil) require.NoError(t, err) - _, err = vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, targetShards, targetVSchema, "", 0, 0, 500, nil) + _, err = vc.AddKeyspace(t, []*Cell{defaultCell}, defaultTargetKs, targetShards, targetVSchema, "", 0, 0, 500, nil) require.NoError(t, err) verifyClusterHealth(t, vc) @@ -552,10 +552,10 @@ func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { insertInitialData(t) } - moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, strings.Join(tables, ","), createArgs...) + moveTablesAction(t, "Create", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, strings.Join(tables, ","), createArgs...) // Wait until we get through the copy phase... for _, targetShard := range targetShardNames { - catchup(t, vc.getPrimaryTablet(t, targetKs, targetShard), workflow, "MoveTables") + catchup(t, vc.getPrimaryTablet(t, defaultTargetKs, targetShard), workflow, "MoveTables") } if switchTraffic { @@ -565,10 +565,10 @@ func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { // Decommission the source keyspace. require.NotZero(t, len(vc.Cells[defaultCellName].Keyspaces)) - require.NotNil(t, vc.Cells[defaultCellName].Keyspaces[sourceKs]) - err = vc.TearDownKeyspace(vc.Cells[defaultCellName].Keyspaces[sourceKs]) + require.NotNil(t, vc.Cells[defaultCellName].Keyspaces[defaultSourceKs]) + err = vc.TearDownKeyspace(vc.Cells[defaultCellName].Keyspaces[defaultSourceKs]) require.NoError(t, err) - vc.DeleteKeyspace(t, sourceKs) + vc.DeleteKeyspace(t, defaultSourceKs) // The command should fail. out, err := vc.VtctldClient.ExecuteCommandWithOutput(completeArgs...) @@ -582,7 +582,7 @@ func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { confirmNoRoutingRules(t) for _, table := range tables { for _, targetShard := range targetShardNames { - tksShard := fmt.Sprintf("%s/%s", targetKs, targetShard) + tksShard := fmt.Sprintf("%s/%s", defaultTargetKs, targetShard) validateTableInDenyList(t, vc, tksShard, table, false) } } @@ -595,30 +595,30 @@ func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { require.NoError(t, err) srrMap := topotools.GetShardRoutingRulesMap(&srr) for _, shard := range targetShardNames { - ksShard := fmt.Sprintf("%s.%s", targetKs, shard) - require.NotEqual(t, srrMap[ksShard], targetKs) + ksShard := fmt.Sprintf("%s.%s", defaultTargetKs, shard) + require.NotEqual(t, srrMap[ksShard], defaultTargetKs) } - confirmNoWorkflows(t, targetKs) + confirmNoWorkflows(t, defaultTargetKs) } t.Run("Workflow Delete", func(t *testing.T) { - args := []string{"Workflow", "--keyspace=" + targetKs, "delete", "--workflow=" + workflow} + args := []string{"Workflow", "--keyspace=" + defaultTargetKs, "delete", "--workflow=" + workflow} run(t, defaultShard, defaultShard, nil, args, false) }) t.Run("MoveTables Cancel", func(t *testing.T) { - args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + targetKs, "cancel"} + args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + defaultTargetKs, "cancel"} run(t, defaultShard, defaultShard, nil, args, false) }) t.Run("MoveTables Partial Cancel", func(t *testing.T) { createArgs := []string{"--source-shards", "-80"} - args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + targetKs, "cancel"} + args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + defaultTargetKs, "cancel"} run(t, "-80,80-", "-80,80-", createArgs, args, true) }) t.Run("MoveTables Complete", func(t *testing.T) { - args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + targetKs, "complete"} + args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + defaultTargetKs, "complete"} run(t, defaultShard, defaultShard, nil, args, true) }) // You can't complete a partial MoveTables workflow. Well, only the @@ -628,7 +628,7 @@ func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { func testVStreamCellFlag(t *testing.T) { vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ - Keyspace: sourceKs, + Keyspace: defaultSourceKs, Shard: "0", Gtid: "", }}} @@ -724,7 +724,7 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { cell1 := vc.Cells["zone1"] cell2 := vc.Cells["zone2"] - vc.AddKeyspace(t, []*Cell{cell1, cell2}, sourceKs, shard, initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell1, cell2}, defaultSourceKs, shard, initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, defaultSourceKsOpts) // Add cell alias containing only zone2 result, err := vc.VtctldClient.ExecuteCommandWithOutput("AddCellsAlias", "--cells", "zone2", "alias") @@ -738,7 +738,7 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { testVStreamFrom(t, vtgate, table, 2) }) shardCustomer(t, true, []*Cell{cell1, cell2}, "alias", false) - isTableInDenyList(t, vc, fmt.Sprintf("%s/0", sourceKs), "customer") + isTableInDenyList(t, vc, fmt.Sprintf("%s/0", defaultSourceKs), "customer") // we tag along this test so as not to create the overhead of creating another cluster testVStreamCellFlag(t) } @@ -810,20 +810,20 @@ func testVStreamFrom(t *testing.T, vtgate *cluster.VtgateProcess, table string, func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAlias string, withOpenTx bool) { t.Run("shardCustomer", func(t *testing.T) { workflow := "p2c" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) - if _, err := vc.AddKeyspace(t, cells, targetKs, "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil { + ksWorkflow := fmt.Sprintf("%s.%s", defaultTargetKs, workflow) + if _, err := vc.AddKeyspace(t, cells, defaultTargetKs, "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, defaultTargetKsOpts); err != nil { t.Fatal(err) } // Assume we are operating on first cell defaultCell := cells[0] - custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] tables := "customer,loadtest,Lead,Lead-1,db_order_test,geom_tbl,json_tbl,blüb_tbl,vdiff_order,reftable" - moveTablesAction(t, "Create", sourceCellOrAlias, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Create", sourceCellOrAlias, workflow, defaultSourceKs, defaultTargetKs, tables) customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet - productTab := vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-100"].Vttablet + productTab := vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-100"].Vttablet // Wait to finish the copy phase for all tables workflowType := "MoveTables" @@ -836,25 +836,25 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() // Confirm that the 0 scale decimal field, dec80, is replicated correctly - execVtgateQuery(t, vtgateConn, sourceKs, "update customer set dec80 = 0") - execVtgateQuery(t, vtgateConn, sourceKs, "update customer set blb = \"new blob data\" where cid=3") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j1 = null, j2 = 'null', j3 = '\"null\"' where id = 5") - execVtgateQuery(t, vtgateConn, sourceKs, "insert into json_tbl(id, j1, j2, j3) values (7, null, 'null', '\"null\"')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update customer set dec80 = 0") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update customer set blb = \"new blob data\" where cid=3") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j1 = null, j2 = 'null', j3 = '\"null\"' where id = 5") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "insert into json_tbl(id, j1, j2, j3) values (7, null, 'null', '\"null\"')") // Test binlog-row-value-options=PARTIAL_JSON - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.role', 'manager')") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.color', 'red')") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.day', 'wednesday')") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_INSERT(JSON_REPLACE(j3, '$.day', 'friday'), '$.favorite_color', 'black')") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(JSON_REMOVE(JSON_REPLACE(j3, '$.day', 'monday'), '$.favorite_color'), '$.hobby', 'skiing') where id = 3") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(JSON_REMOVE(JSON_REPLACE(j3, '$.day', 'tuesday'), '$.favorite_color'), '$.hobby', 'skiing') where id = 4") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(JSON_SET(j3, '$.salary', 110), '$.role', 'IC') where id = 4") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.misc', '{\"address\":\"1012 S Park St\", \"town\":\"Hastings\", \"state\":\"MI\"}') where id = 1") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set id=id+1000, j3=JSON_SET(j3, '$.day', 'friday')") - waitForNoWorkflowLag(t, vc, targetKs, workflow) + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.role', 'manager')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.color', 'red')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.day', 'wednesday')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_INSERT(JSON_REPLACE(j3, '$.day', 'friday'), '$.favorite_color', 'black')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(JSON_REMOVE(JSON_REPLACE(j3, '$.day', 'monday'), '$.favorite_color'), '$.hobby', 'skiing') where id = 3") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(JSON_REMOVE(JSON_REPLACE(j3, '$.day', 'tuesday'), '$.favorite_color'), '$.hobby', 'skiing') where id = 4") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(JSON_SET(j3, '$.salary', 110), '$.role', 'IC') where id = 4") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.misc', '{\"address\":\"1012 S Park St\", \"town\":\"Hastings\", \"state\":\"MI\"}') where id = 1") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set id=id+1000, j3=JSON_SET(j3, '$.day', 'friday')") + waitForNoWorkflowLag(t, vc, defaultTargetKs, workflow) dec80Replicated := false for _, tablet := range []*cluster.VttabletProcess{customerTab1, customerTab2} { // Query the tablet's mysqld directly as the targets will have denied table entries. - dbc, err := tablet.TabletConn(targetKs, true) + dbc, err := tablet.TabletConn(defaultTargetKs, true) require.NoError(t, err) defer dbc.Close() if res := execQuery(t, dbc, "select cid from customer"); len(res.Rows) > 0 { @@ -867,8 +867,8 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl // Insert multiple rows in the loadtest table and immediately delete them to confirm that bulk delete // works the same way with the vplayer optimization enabled and disabled. Currently this optimization // is disabled by default, but enabled in TestCellAliasVreplicationWorkflow. - execVtgateQuery(t, vtgateConn, sourceKs, "insert into loadtest(id, name) values(10001, 'tempCustomer'), (10002, 'tempCustomer2'), (10003, 'tempCustomer3'), (10004, 'tempCustomer4')") - execVtgateQuery(t, vtgateConn, sourceKs, "delete from loadtest where id > 10000") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "insert into loadtest(id, name) values(10001, 'tempCustomer'), (10002, 'tempCustomer2'), (10003, 'tempCustomer3'), (10004, 'tempCustomer4')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "delete from loadtest where id > 10000") // Confirm that all partial query metrics get updated when we are testing the noblob mode. t.Run("validate partial query counts", func(t *testing.T) { @@ -882,7 +882,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl totalInserts, totalUpdates, totalInsertQueries, totalUpdateQueries := 0, 0, 0, 0 for _, tab := range []*cluster.VttabletProcess{tablet200, tablet300} { - insertCount, updateCount, insertQueries, updateQueries := getPartialMetrics(t, fmt.Sprintf("%s.0.p2c.1", sourceKs), tab) + insertCount, updateCount, insertQueries, updateQueries := getPartialMetrics(t, fmt.Sprintf("%s.0.p2c.1", defaultSourceKs), tab) totalInserts += insertCount totalUpdates += updateCount totalInsertQueries += insertQueries @@ -896,10 +896,10 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl }) query := "select cid from customer" - assertQueryExecutesOnTablet(t, vtgateConn, productTab, sourceKs, query, query) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, defaultSourceKs, query, query) insertQuery1 := "insert into customer(cid, name) values(1001, 'tempCustomer1')" matchInsertQuery1 := "insert into customer(cid, `name`) values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */)" - assertQueryExecutesOnTablet(t, vtgateConn, productTab, sourceKs, insertQuery1, matchInsertQuery1) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, defaultSourceKs, insertQuery1, matchInsertQuery1) // FIXME for some reason, these inserts fails on mac, need to investigate, some // vreplication bug because of case insensitiveness of table names on mac? @@ -912,25 +912,25 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl if err != nil { require.FailNow(t, output) } - execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("update `%s` set name='xyz'", tbl)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("update `%s` set name='xyz'", tbl)) } } doVDiff(t, ksWorkflow, "") cellNames := getCellNames(cells) switchReadsDryRun(t, workflowType, cellNames, ksWorkflow, dryRunResultsReadCustomerShard) switchReads(t, workflowType, cellNames, ksWorkflow, false) - assertQueryExecutesOnTablet(t, vtgateConn, productTab, targetKs, query, query) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, defaultTargetKs, query, query) var commit func(t *testing.T) if withOpenTx { commit, _ = vc.startQuery(t, openTxQuery) } switchWritesDryRun(t, workflowType, ksWorkflow, dryRunResultsSwitchWritesCustomerShard) - shardNames := make([]string, 0, len(vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards)) - for shardName := range maps.Keys(vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards) { + shardNames := make([]string, 0, len(vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards)) + for shardName := range maps.Keys(vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards) { shardNames = append(shardNames, shardName) } - testSwitchTrafficPermissionChecks(t, workflowType, sourceKs, shardNames, targetKs, workflow) + testSwitchTrafficPermissionChecks(t, workflowType, defaultSourceKs, shardNames, defaultTargetKs, workflow) testSwitchWritesErrorHandling(t, []*cluster.VttabletProcess{productTab}, []*cluster.VttabletProcess{customerTab1, customerTab2}, workflow, workflowType) @@ -938,12 +938,12 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl // Now let's confirm that it works as expected with an error. switchWrites(t, workflowType, ksWorkflow, false) - checkThatVDiffFails(t, targetKs, workflow) + checkThatVDiffFails(t, defaultTargetKs, workflow) // The original unsharded customer data included an insert with the // vindex column (cid) of 999999, so the backing sequence table should // now have a next_id of 1000000 after SwitchTraffic. - res := execVtgateQuery(t, vtgateConn, sourceKs, "select next_id from customer_seq where id = 0") + res := execVtgateQuery(t, vtgateConn, defaultSourceKs, "select next_id from customer_seq where id = 0") require.Equal(t, "1000000", res.Rows[0][0].ToString()) if withOpenTx && commit != nil { @@ -952,65 +952,65 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl catchup(t, productTab, workflow, "MoveTables") - doVDiff(t, fmt.Sprintf("%s.p2c_reverse", sourceKs), "") + doVDiff(t, fmt.Sprintf("%s.p2c_reverse", defaultSourceKs), "") if withOpenTx { execVtgateQuery(t, vtgateConn, "", deleteOpenTxQuery) } - ksShards := []string{fmt.Sprintf("%s/0", sourceKs), fmt.Sprintf("%s/-80", targetKs), fmt.Sprintf("%s/80-", targetKs)} + ksShards := []string{fmt.Sprintf("%s/0", defaultSourceKs), fmt.Sprintf("%s/-80", defaultTargetKs), fmt.Sprintf("%s/80-", defaultTargetKs)} printShardPositions(vc, ksShards) insertQuery2 := "insert into customer(name, cid) values('tempCustomer2', 100)" matchInsertQuery2 := "insert into customer(`name`, cid) values (:vtg1 /* VARCHAR */, :_cid_0)" - assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, targetKs, insertQuery2, matchInsertQuery2) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, defaultTargetKs, insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer3', 101)" // ID 101, hence due to reverse_bits in shard 80- - assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, targetKs, insertQuery2, matchInsertQuery2) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, defaultTargetKs, insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer4', 102)" // ID 102, hence due to reverse_bits in shard -80 - assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, targetKs, insertQuery2, matchInsertQuery2) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, defaultTargetKs, insertQuery2, matchInsertQuery2) - execVtgateQuery(t, vtgateConn, targetKs, "update customer set meta = convert(x'7b7d' using utf8mb4) where cid = 1") + execVtgateQuery(t, vtgateConn, defaultTargetKs, "update customer set meta = convert(x'7b7d' using utf8mb4) where cid = 1") if testReverse { // Reverse Replicate switchReads(t, workflowType, cellNames, ksWorkflow, true) printShardPositions(vc, ksShards) switchWrites(t, workflowType, ksWorkflow, true) - output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "show", "--workflow", workflow) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", defaultTargetKs, "show", "--workflow", workflow) require.NoError(t, err) - require.Contains(t, output, fmt.Sprintf("'%s.reverse_bits'", targetKs)) - require.Contains(t, output, fmt.Sprintf("'%s.bmd5'", targetKs)) + require.Contains(t, output, fmt.Sprintf("'%s.reverse_bits'", defaultTargetKs)) + require.Contains(t, output, fmt.Sprintf("'%s.bmd5'", defaultTargetKs)) insertQuery1 = "insert into customer(cid, name) values(1002, 'tempCustomer5')" - assertQueryExecutesOnTablet(t, vtgateConn, productTab, sourceKs, insertQuery1, matchInsertQuery1) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, defaultSourceKs, insertQuery1, matchInsertQuery1) // both inserts go into 80-, this tests the edge-case where a stream (-80) has no relevant new events after the previous switch insertQuery1 = "insert into customer(cid, name) values(1003, 'tempCustomer6')" - assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab1, targetKs, insertQuery1, matchInsertQuery1) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab1, defaultTargetKs, insertQuery1, matchInsertQuery1) insertQuery1 = "insert into customer(cid, name) values(1004, 'tempCustomer7')" - assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab2, targetKs, insertQuery1, matchInsertQuery1) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab2, defaultTargetKs, insertQuery1, matchInsertQuery1) - waitForNoWorkflowLag(t, vc, targetKs, workflow) + waitForNoWorkflowLag(t, vc, defaultTargetKs, workflow) // Go forward again switchReads(t, workflowType, cellNames, ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) var exists bool - exists, err = isTableInDenyList(t, vc, fmt.Sprintf("%s/0", sourceKs), "customer") + exists, err = isTableInDenyList(t, vc, fmt.Sprintf("%s/0", defaultSourceKs), "customer") require.NoError(t, err, "Error getting denylist for customer:0") require.True(t, exists) - moveTablesAction(t, "Complete", cellNames, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Complete", cellNames, workflow, defaultSourceKs, defaultTargetKs, tables) - exists, err = isTableInDenyList(t, vc, fmt.Sprintf("%s/0", sourceKs), "customer") + exists, err = isTableInDenyList(t, vc, fmt.Sprintf("%s/0", defaultSourceKs), "customer") require.NoError(t, err, "Error getting denylist for customer:0") require.False(t, exists) for _, shard := range strings.Split("-80,80-", ",") { - expectNumberOfStreams(t, vtgateConn, "shardCustomerTargetStreams", "p2c", fmt.Sprintf("%s:%s", targetKs, shard), 0) + expectNumberOfStreams(t, vtgateConn, "shardCustomerTargetStreams", "p2c", fmt.Sprintf("%s:%s", defaultTargetKs, shard), 0) } - expectNumberOfStreams(t, vtgateConn, "shardCustomerReverseStreams", "p2c_reverse", fmt.Sprintf("%s:0", sourceKs), 0) + expectNumberOfStreams(t, vtgateConn, "shardCustomerReverseStreams", "p2c_reverse", fmt.Sprintf("%s:0", defaultSourceKs), 0) var found bool found, err = checkIfTableExists(t, vc, "zone1-100", "customer") @@ -1022,22 +1022,22 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl require.True(t, found) insertQuery2 = "insert into customer(name, cid) values('tempCustomer8', 103)" // ID 103, hence due to reverse_bits in shard 80- - assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, targetKs, insertQuery2, matchInsertQuery2) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, defaultTargetKs, insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer10', 104)" // ID 105, hence due to reverse_bits in shard -80 - assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, targetKs, insertQuery2, matchInsertQuery2) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, defaultTargetKs, insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer9', 105)" // ID 104, hence due to reverse_bits in shard 80- - assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, targetKs, insertQuery2, matchInsertQuery2) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, defaultTargetKs, insertQuery2, matchInsertQuery2) - execVtgateQuery(t, vtgateConn, targetKs, "delete from customer where name like 'tempCustomer%'") - waitForRowCountInTablet(t, customerTab1, targetKs, "customer", 1) - waitForRowCountInTablet(t, customerTab2, targetKs, "customer", 2) - waitForRowCount(t, vtgateConn, targetKs, fmt.Sprintf("%s.customer", sqlescape.EscapeID(targetKs)), 3) + execVtgateQuery(t, vtgateConn, defaultTargetKs, "delete from customer where name like 'tempCustomer%'") + waitForRowCountInTablet(t, customerTab1, defaultTargetKs, "customer", 1) + waitForRowCountInTablet(t, customerTab2, defaultTargetKs, "customer", 2) + waitForRowCount(t, vtgateConn, defaultTargetKs, fmt.Sprintf("%s.customer", sqlescape.EscapeID(defaultTargetKs)), 3) query = "insert into customer (name, cid) values('george', 5)" - execVtgateQuery(t, vtgateConn, targetKs, query) - waitForRowCountInTablet(t, customerTab1, targetKs, "customer", 1) - waitForRowCountInTablet(t, customerTab2, targetKs, "customer", 3) - waitForRowCount(t, vtgateConn, targetKs, fmt.Sprintf("%s.customer", sqlescape.EscapeID(targetKs)), 4) + execVtgateQuery(t, vtgateConn, defaultTargetKs, query) + waitForRowCountInTablet(t, customerTab1, defaultTargetKs, "customer", 1) + waitForRowCountInTablet(t, customerTab2, defaultTargetKs, "customer", 3) + waitForRowCount(t, vtgateConn, defaultTargetKs, fmt.Sprintf("%s.customer", sqlescape.EscapeID(defaultTargetKs)), 4) } }) } @@ -1047,8 +1047,8 @@ func validateRollupReplicates(t *testing.T) { insertMoreProducts(t) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - waitForRowCount(t, vtgateConn, sourceKs, "rollup", 1) - waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "select rollupname, kount from rollup", + waitForRowCount(t, vtgateConn, defaultSourceKs, "rollup", 1) + waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "select rollupname, kount from rollup", `[[VARCHAR("total") INT32(5)]]`) }) } @@ -1058,12 +1058,12 @@ func reshardCustomer2to4Split(t *testing.T, cells []*Cell, sourceCellOrAlias str vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() counts := map[string]int{"zone1-600": 4, "zone1-700": 5, "zone1-800": 6, "zone1-900": 5} - reshard(t, targetKs, "customer", "c2c4", "-80,80-", "-40,40-80,80-c0,c0-", + reshard(t, defaultTargetKs, "customer", "c2c4", "-80,80-", "-40,40-80,80-c0,c0-", 600, counts, nil, nil, cells, sourceCellOrAlias, 1) - waitForRowCount(t, vtgateConn, targetKs, "customer", 20) + waitForRowCount(t, vtgateConn, defaultTargetKs, "customer", 20) query := "insert into customer (name) values('yoko')" - execVtgateQuery(t, vtgateConn, targetKs, query) - waitForRowCount(t, vtgateConn, targetKs, "customer", 21) + execVtgateQuery(t, vtgateConn, defaultTargetKs, query) + waitForRowCount(t, vtgateConn, defaultTargetKs, "customer", 21) }) } @@ -1134,7 +1134,7 @@ func reshardMerchant3to1Merge(t *testing.T) { func reshardCustomer3to2SplitMerge(t *testing.T) { // -40,40-80,80-c0 => merge/split, c0- stays the same ending up with 3 t.Run("reshardCustomer3to2SplitMerge", func(t *testing.T) { counts := map[string]int{"zone1-1000": 8, "zone1-1100": 8, "zone1-1200": 5} - reshard(t, targetKs, "customer", "c4c3", "-40,40-80,80-c0", "-60,60-c0", + reshard(t, defaultTargetKs, "customer", "c4c3", "-40,40-80,80-c0", "-60,60-c0", 1000, counts, nil, nil, nil, "", 1) }) } @@ -1142,7 +1142,7 @@ func reshardCustomer3to2SplitMerge(t *testing.T) { // -40,40-80,80-c0 => merge/s func reshardCustomer3to1Merge(t *testing.T) { // to unsharded t.Run("reshardCustomer3to1Merge", func(t *testing.T) { counts := map[string]int{"zone1-1500": 21} - reshard(t, targetKs, "customer", "c3c1", "-60,60-c0,c0-", "0", + reshard(t, defaultTargetKs, "customer", "c3c1", "-60,60-c0,c0-", "0", 1500, counts, nil, nil, nil, "", 3) }) } @@ -1161,7 +1161,7 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou callNames := getCellNames(cells) ksWorkflow := ksName + "." + workflow keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] - require.NoError(t, vc.AddShards(t, cells, keyspace, targetShards, defaultReplicas, defaultRdonly, tabletIDBase, targetKsOpts)) + require.NoError(t, vc.AddShards(t, cells, keyspace, targetShards, defaultReplicas, defaultRdonly, tabletIDBase, defaultTargetKsOpts)) tablets := vc.getVttabletsInKeyspace(t, defaultCell, ksName, "primary") var sourceTablets, targetTablets []*cluster.VttabletProcess @@ -1218,11 +1218,11 @@ func shardOrders(t *testing.T) { workflow := "o2c" cell := defaultCell.Name tables := "orders" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) - applyVSchema(t, ordersVSchema, targetKs) - moveTablesAction(t, "Create", cell, workflow, sourceKs, targetKs, tables) + ksWorkflow := fmt.Sprintf("%s.%s", defaultTargetKs, workflow) + applyVSchema(t, ordersVSchema, defaultTargetKs) + moveTablesAction(t, "Create", cell, workflow, defaultSourceKs, defaultTargetKs, tables) - custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet workflowType := "MoveTables" @@ -1231,10 +1231,10 @@ func shardOrders(t *testing.T) { doVDiff(t, ksWorkflow, "") switchReads(t, workflowType, strings.Join(vc.CellNames, ","), ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) - moveTablesAction(t, "Complete", cell, workflow, sourceKs, targetKs, tables) - waitForRowCountInTablet(t, customerTab1, targetKs, "orders", 1) - waitForRowCountInTablet(t, customerTab2, targetKs, "orders", 2) - waitForRowCount(t, vtgateConn, targetKs, "orders", 3) + moveTablesAction(t, "Complete", cell, workflow, defaultSourceKs, defaultTargetKs, tables) + waitForRowCountInTablet(t, customerTab1, defaultTargetKs, "orders", 1) + waitForRowCountInTablet(t, customerTab2, defaultTargetKs, "orders", 2) + waitForRowCount(t, vtgateConn, defaultTargetKs, "orders", 3) }) } @@ -1256,10 +1256,10 @@ func shardMerchant(t *testing.T) { targetKs := merchantKeyspace tables := "merchant" ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, merchantKeyspace, "-80,80-", merchantVSchema, "", defaultReplicas, defaultRdonly, 400, targetKsOpts); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, merchantKeyspace, "-80,80-", merchantVSchema, "", defaultReplicas, defaultRdonly, 400, defaultTargetKsOpts); err != nil { t.Fatal(err) } - moveTablesAction(t, "Create", cell, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Create", cell, workflow, defaultSourceKs, targetKs, tables) merchantKs := vc.Cells[defaultCell.Name].Keyspaces[merchantKeyspace] merchantTab1 := merchantKs.Shards["-80"].Tablets["zone1-400"].Vttablet merchantTab2 := merchantKs.Shards["80-"].Tablets["zone1-500"].Vttablet @@ -1278,7 +1278,7 @@ func shardMerchant(t *testing.T) { if err != nil { require.FailNow(t, output) } - moveTablesAction(t, "Complete", cell, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Complete", cell, workflow, defaultSourceKs, targetKs, tables) waitForRowCountInTablet(t, merchantTab1, merchantKeyspace, "merchant", 1) waitForRowCountInTablet(t, merchantTab2, merchantKeyspace, "merchant", 1) @@ -1321,9 +1321,9 @@ func testMaterializeWithNonExistentTable(t *testing.T) { func materializeProduct(t *testing.T) { t.Run("materializeProduct", func(t *testing.T) { - // Materializing from sourceKs keyspace to targetKs keyspace. + // Materializing from defaultSourceKs keyspace to defaultTargetKs keyspace. workflow := "cproduct" - keyspace := targetKs + keyspace := defaultTargetKs defaultCell := vc.Cells[vc.CellNames[0]] applyVSchema(t, materializeProductVSchema, keyspace) materialize(t, materializeProductSpec) @@ -1333,10 +1333,10 @@ func materializeProduct(t *testing.T) { waitForRowCountInTablet(t, tab, keyspace, workflow, 5) } - productTablets := vc.getVttabletsInKeyspace(t, defaultCell, sourceKs, "primary") + productTablets := vc.getVttabletsInKeyspace(t, defaultCell, defaultSourceKs, "primary") t.Run("throttle-app-product", func(t *testing.T) { // Now, throttle the source side component (vstreamer), and insert some rows. - err := throttler.ThrottleKeyspaceApp(vc.VtctldClient, sourceKs, sourceThrottlerAppName) + err := throttler.ThrottleKeyspaceApp(vc.VtctldClient, defaultSourceKs, sourceThrottlerAppName) assert.NoError(t, err) for _, tab := range productTablets { status, err := throttler.GetThrottlerStatus(vc.VtctldClient, &cluster.Vttablet{Alias: tab.Name}) @@ -1365,12 +1365,12 @@ func materializeProduct(t *testing.T) { for _, tab := range customerTablets { waitForRowCountInTablet(t, tab, keyspace, workflow, 5) // Confirm that we updated the stats on the target tablets as expected. - confirmVReplicationThrottling(t, tab, sourceKs, workflow, sourceThrottlerAppName) + confirmVReplicationThrottling(t, tab, defaultSourceKs, workflow, sourceThrottlerAppName) } }) t.Run("unthrottle-app-product", func(t *testing.T) { // Unthrottle the vstreamer component, and expect the rows to show up. - err := throttler.UnthrottleKeyspaceApp(vc.VtctldClient, sourceKs, sourceThrottlerAppName) + err := throttler.UnthrottleKeyspaceApp(vc.VtctldClient, defaultSourceKs, sourceThrottlerAppName) assert.NoError(t, err) for _, tab := range productTablets { // Give time for unthrottling to take effect and for targets to fetch data. @@ -1419,7 +1419,7 @@ func materializeProduct(t *testing.T) { for _, tab := range customerTablets { waitForRowCountInTablet(t, tab, keyspace, workflow, 8) // Confirm that we updated the stats on the target tablets as expected. - confirmVReplicationThrottling(t, tab, sourceKs, workflow, targetThrottlerAppName) + confirmVReplicationThrottling(t, tab, defaultSourceKs, workflow, targetThrottlerAppName) } }) t.Run("unthrottle-app-customer", func(t *testing.T) { @@ -1445,13 +1445,13 @@ func materializeRollup(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() workflow := "rollup" - applyVSchema(t, materializeSalesVSchema, sourceKs) + applyVSchema(t, materializeSalesVSchema, defaultSourceKs) defaultCell := vc.Cells[vc.CellNames[0]] - productTab := vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-100"].Vttablet + productTab := vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-100"].Vttablet materialize(t, materializeRollupSpec) catchup(t, productTab, workflow, "Materialize") - waitForRowCount(t, vtgateConn, sourceKs, "rollup", 1) - waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "select rollupname, kount from rollup", + waitForRowCount(t, vtgateConn, defaultSourceKs, "rollup", 1) + waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "select rollupname, kount from rollup", `[[VARCHAR("total") INT32(2)]]`) }) } @@ -1460,13 +1460,13 @@ func materializeSales(t *testing.T) { t.Run("materializeSales", func(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - applyVSchema(t, materializeSalesVSchema, sourceKs) + applyVSchema(t, materializeSalesVSchema, defaultSourceKs) materialize(t, materializeSalesSpec) defaultCell := vc.Cells[vc.CellNames[0]] - productTab := vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-100"].Vttablet + productTab := vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-100"].Vttablet catchup(t, productTab, "sales", "Materialize") - waitForRowCount(t, vtgateConn, sourceKs, "sales", 2) - waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", sourceKs), "select kount, amount from sales", + waitForRowCount(t, vtgateConn, defaultSourceKs, "sales", 2) + waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "select kount, amount from sales", `[[INT32(1) INT32(10)] [INT32(2) INT32(35)]]`) }) } @@ -1580,12 +1580,12 @@ func catchup(t *testing.T, vttablet *cluster.VttabletProcess, workflow, info str vttablet.WaitForVReplicationToCatchup(t, workflow, fmt.Sprintf("vt_%s", vttablet.Keyspace), sidecarDBName, maxWait) } -func moveTablesAction(t *testing.T, action, cell, workflow, sourceKs, targetKs, tables string, extraFlags ...string) { +func moveTablesAction(t *testing.T, action, cell, workflow, defaultSourceKs, defaultTargetKs, tables string, extraFlags ...string) { var err error - args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + targetKs, action} + args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + defaultTargetKs, action} switch strings.ToLower(action) { case strings.ToLower(workflowActionCreate): - extraFlags = append(extraFlags, "--source-keyspace="+sourceKs, "--tables="+tables, "--cells="+cell, "--tablet-types=primary,replica,rdonly") + extraFlags = append(extraFlags, "--source-keyspace="+defaultSourceKs, "--tables="+tables, "--cells="+cell, "--tablet-types=primary,replica,rdonly") case strings.ToLower(workflowActionSwitchTraffic): extraFlags = append(extraFlags, "--initialize-target-sequences") } @@ -1599,9 +1599,9 @@ func moveTablesAction(t *testing.T, action, cell, workflow, sourceKs, targetKs, t.Fatalf("MoveTables %s command failed with %+v\n", action, err) } } -func moveTablesActionWithTabletTypes(t *testing.T, action, cell, workflow, sourceKs, targetKs, tables string, tabletTypes string, ignoreErrors bool) { - if err := vc.VtctldClient.ExecuteCommand("MoveTables", "--workflow="+workflow, "--target-keyspace="+targetKs, action, - "--source-keyspace="+sourceKs, "--tables="+tables, "--cells="+cell, "--tablet-types="+tabletTypes); err != nil { +func moveTablesActionWithTabletTypes(t *testing.T, action, cell, workflow, defaultSourceKs, defaultTargetKs, tables string, tabletTypes string, ignoreErrors bool) { + if err := vc.VtctldClient.ExecuteCommand("MoveTables", "--workflow="+workflow, "--target-keyspace="+defaultTargetKs, action, + "--source-keyspace="+defaultSourceKs, "--tables="+tables, "--cells="+cell, "--tablet-types="+tabletTypes); err != nil { if !ignoreErrors { t.Fatalf("MoveTables %s command failed with %+v\n", action, err) } @@ -1715,14 +1715,14 @@ func switchWrites(t *testing.T, workflowType, ksWorkflow string, reverse bool) { } const SwitchWritesTimeout = "91s" // max: 3 tablet picker 30s waits + 1 ensureCanSwitch(t, workflowType, "", ksWorkflow) - targetKs, workflow, found := strings.Cut(ksWorkflow, ".") + defaultTargetKs, workflow, found := strings.Cut(ksWorkflow, ".") require.True(t, found) if workflowType == binlogdatapb.VReplicationWorkflowType_MoveTables.String() { - moveTablesAction(t, command, defaultCellName, workflow, sourceKs, targetKs, "", "--timeout="+SwitchWritesTimeout, "--tablet-types=primary") + moveTablesAction(t, command, defaultCellName, workflow, defaultSourceKs, defaultTargetKs, "", "--timeout="+SwitchWritesTimeout, "--tablet-types=primary") return } output, err := vc.VtctldClient.ExecuteCommandWithOutput(workflowType, "--tablet-types=primary", "--workflow", workflow, - "--target-keyspace", targetKs, command, "--timeout="+SwitchWritesTimeout, "--initialize-target-sequences") + "--target-keyspace", defaultTargetKs, command, "--timeout="+SwitchWritesTimeout, "--initialize-target-sequences") if output != "" { fmt.Printf("Output of switching writes with vtctldclient for %s:\n++++++\n%s\n--------\n", ksWorkflow, output) } @@ -1964,13 +1964,13 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { debug := true if debug { log.Infof("------------------- START Extra debug info %s Switch writes %s", msg, ksWorkflow) - ksShards := []string{fmt.Sprintf("%s/0", sourceKs), fmt.Sprintf("%s/-80", targetKs), fmt.Sprintf("%s/80-", targetKs)} + ksShards := []string{fmt.Sprintf("%s/0", defaultSourceKs), fmt.Sprintf("%s/-80", defaultTargetKs), fmt.Sprintf("%s/80-", defaultTargetKs)} printShardPositions(vc, ksShards) defaultCell := vc.Cells[vc.CellNames[0]] - custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet - productKs := vc.Cells[defaultCell.Name].Keyspaces[sourceKs] + productKs := vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs] productTab := productKs.Shards["0"].Tablets["zone1-100"].Vttablet tabs := []*cluster.VttabletProcess{productTab, customerTab1, customerTab2} queries := []string{ @@ -1997,11 +1997,11 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { // // Returns a db connection used for the transaction which you can use for follow-up // work, such as rolling it back directly or using the releaseInnoDBRowHistory call. -func generateInnoDBRowHistory(t *testing.T, sourceKS string, neededTrxHistory int64) *mysql.Conn { +func generateInnoDBRowHistory(t *testing.T, defaultSourceKs string, neededTrxHistory int64) *mysql.Conn { dbConn1 := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) dbConn2 := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - execQuery(t, dbConn1, "use "+sourceKS) - execQuery(t, dbConn2, "use "+sourceKS) + execQuery(t, dbConn1, "use "+defaultSourceKs) + execQuery(t, dbConn2, "use "+defaultSourceKs) offset := int64(1000) limit := int64(neededTrxHistory * 100) insertStmt := strings.Builder{} diff --git a/go/test/endtoend/vreplication/vreplication_test_env.go b/go/test/endtoend/vreplication/vreplication_test_env.go index 5362c03b2ca..2fe71b822ac 100644 --- a/go/test/endtoend/vreplication/vreplication_test_env.go +++ b/go/test/endtoend/vreplication/vreplication_test_env.go @@ -20,40 +20,40 @@ import "fmt" const ( // Defaults used for all tests. + defaultSourceKs = "test-product" + defaultTargetKs = "test-customer" workflowName = "wf1" - sourceKs = "test-product" - targetKs = "test-customer" - ksWorkflow = targetKs + "." + workflowName - reverseKsWorkflow = sourceKs + "." + workflowName + "_reverse" + ksWorkflow = defaultTargetKs + "." + workflowName + reverseKsWorkflow = defaultSourceKs + "." + workflowName + "_reverse" defaultCellName = "zone1" ) var dryRunResultsSwitchWritesCustomerShard = []string{ - fmt.Sprintf("Lock keyspace %s", sourceKs), - fmt.Sprintf("Lock keyspace %s", targetKs), - fmt.Sprintf("Mirroring 0.00 percent of traffic from keyspace %s to keyspace %s for tablet types [PRIMARY]", sourceKs, targetKs), - fmt.Sprintf("/Stop writes on keyspace %s for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]: [keyspace:%s;shard:0;position:", sourceKs, sourceKs), + fmt.Sprintf("Lock keyspace %s", defaultSourceKs), + fmt.Sprintf("Lock keyspace %s", defaultTargetKs), + fmt.Sprintf("Mirroring 0.00 percent of traffic from keyspace %s to keyspace %s for tablet types [PRIMARY]", defaultSourceKs, defaultTargetKs), + fmt.Sprintf("/Stop writes on keyspace %s for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]: [keyspace:%s;shard:0;position:", defaultSourceKs, defaultSourceKs), "Wait for vreplication on stopped streams to catchup for up to 30s", "Create reverse vreplication workflow p2c_reverse", "Create journal entries on source databases", - fmt.Sprintf("Enable writes on keyspace %s for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]", targetKs), - fmt.Sprintf("Switch routing from keyspace %s to keyspace %s", sourceKs, targetKs), + fmt.Sprintf("Enable writes on keyspace %s for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]", defaultTargetKs), + fmt.Sprintf("Switch routing from keyspace %s to keyspace %s", defaultSourceKs, defaultTargetKs), "Routing rules for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] will be updated", "Switch writes completed, freeze and delete vreplication streams on: [tablet:200,tablet:300]", "Start reverse vreplication streams on: [tablet:100]", - fmt.Sprintf("Mark vreplication streams frozen on: [keyspace:%s;shard:-80;tablet:200;workflow:p2c;dbname:vt_%s,keyspace:%s;shard:80-;tablet:300;workflow:p2c;dbname:vt_%s]", targetKs, targetKs, targetKs, targetKs), - fmt.Sprintf("Unlock keyspace %s", targetKs), - fmt.Sprintf("Unlock keyspace %s", sourceKs), + fmt.Sprintf("Mark vreplication streams frozen on: [keyspace:%s;shard:-80;tablet:200;workflow:p2c;dbname:vt_%s,keyspace:%s;shard:80-;tablet:300;workflow:p2c;dbname:vt_%s]", defaultTargetKs, defaultTargetKs, defaultTargetKs, defaultTargetKs), + fmt.Sprintf("Unlock keyspace %s", defaultTargetKs), + fmt.Sprintf("Unlock keyspace %s", defaultSourceKs), "", // Additional empty newline in the output } var dryRunResultsReadCustomerShard = []string{ - fmt.Sprintf("Lock keyspace %s", sourceKs), - fmt.Sprintf("Mirroring 0.00 percent of traffic from keyspace %s to keyspace %s for tablet types [RDONLY,REPLICA]", sourceKs, targetKs), - fmt.Sprintf("Switch reads for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] to keyspace %s for tablet types [RDONLY,REPLICA]", targetKs), + fmt.Sprintf("Lock keyspace %s", defaultSourceKs), + fmt.Sprintf("Mirroring 0.00 percent of traffic from keyspace %s to keyspace %s for tablet types [RDONLY,REPLICA]", defaultSourceKs, defaultTargetKs), + fmt.Sprintf("Switch reads for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] to keyspace %s for tablet types [RDONLY,REPLICA]", defaultTargetKs), "Routing rules for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] will be updated", - fmt.Sprintf("Serving VSchema will be rebuilt for the %s keyspace", targetKs), - fmt.Sprintf("Unlock keyspace %s", sourceKs), + fmt.Sprintf("Serving VSchema will be rebuilt for the %s keyspace", defaultTargetKs), + fmt.Sprintf("Unlock keyspace %s", defaultSourceKs), "", // Additional empty newline in the output } diff --git a/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go index 409fd88448a..2dd70c65c15 100644 --- a/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go +++ b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go @@ -66,8 +66,8 @@ func TestVtctldclientCLI(t *testing.T) { require.NotNil(t, zone2) defer vc.TearDown() - sourceKeyspaceName := sourceKs - targetKeyspaceName := targetKs + sourceKeyspaceName := defaultSourceKs + targetKeyspaceName := defaultTargetKs var mt iMoveTables workflowName := "wf1" @@ -184,7 +184,7 @@ func TestVtctldclientCLI(t *testing.T) { require.NotNil(vc.t, resp) require.NotNil(vc.t, resp.ShardStreams) require.Equal(vc.t, len(resp.ShardStreams), 2) - keyspace := targetKs + keyspace := defaultTargetKs for _, shard := range []string{"80-c0", "c0-"} { streams := resp.ShardStreams[fmt.Sprintf("%s/%s", keyspace, shard)] require.Equal(vc.t, 1, len(streams.Streams)) @@ -754,7 +754,7 @@ func validateReshardResponse(rs iReshard) { require.NotNil(vc.t, resp) require.NotNil(vc.t, resp.ShardStreams) require.Equal(vc.t, len(resp.ShardStreams), 2) - keyspace := targetKs + keyspace := defaultTargetKs for _, shard := range []string{"-40", "40-80"} { streams := resp.ShardStreams[fmt.Sprintf("%s/%s", keyspace, shard)] require.Equal(vc.t, 1, len(streams.Streams)) @@ -768,9 +768,9 @@ func validateReshardWorkflow(t *testing.T, workflows []*vtctldatapb.Workflow) { require.Equal(t, "reshard", wf.Name) require.Equal(t, binlogdatapb.VReplicationWorkflowType_Reshard.String(), wf.WorkflowType) require.Equal(t, "None", wf.WorkflowSubType) - require.Equal(t, targetKs, wf.Target.Keyspace) + require.Equal(t, defaultTargetKs, wf.Target.Keyspace) require.Equal(t, 2, len(wf.Target.Shards)) - require.Equal(t, targetKs, wf.Source.Keyspace) + require.Equal(t, defaultTargetKs, wf.Source.Keyspace) require.Equal(t, 1, len(wf.Source.Shards)) require.False(t, wf.DeferSecondaryKeys) @@ -919,9 +919,9 @@ func validateMoveTablesWorkflow(t *testing.T, workflows []*vtctldatapb.Workflow) require.Equal(t, "wf1", wf.Name) require.Equal(t, binlogdatapb.VReplicationWorkflowType_MoveTables.String(), wf.WorkflowType) require.Equal(t, "None", wf.WorkflowSubType) - require.Equal(t, targetKs, wf.Target.Keyspace) + require.Equal(t, defaultTargetKs, wf.Target.Keyspace) require.Equal(t, 2, len(wf.Target.Shards)) - require.Equal(t, sourceKs, wf.Source.Keyspace) + require.Equal(t, defaultSourceKs, wf.Source.Keyspace) require.Equal(t, 1, len(wf.Source.Shards)) require.False(t, wf.DeferSecondaryKeys) diff --git a/go/test/endtoend/vreplication/vschema_load_test.go b/go/test/endtoend/vreplication/vschema_load_test.go index e14d3be8720..8b93213b402 100644 --- a/go/test/endtoend/vreplication/vschema_load_test.go +++ b/go/test/endtoend/vreplication/vschema_load_test.go @@ -43,7 +43,7 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { defer vc.TearDown() defaultCell := vc.Cells[vc.CellNames[0]] - vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, 1, 0, 100, defaultSourceKsOpts) vtgateConn := vc.GetVTGateConn(t) defer vtgateConn.Close() diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index d03a1c90fd2..0a4205a722e 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -49,7 +49,7 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { defaultRdonly = 0 defaultCell := vc.Cells[vc.CellNames[0]] - vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) verifyClusterHealth(t, vc) ctx := context.Background() @@ -60,7 +60,7 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { defer vstreamConn.Close() vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ - Keyspace: sourceKs, + Keyspace: defaultSourceKs, Shard: "0", Gtid: "", }}} @@ -90,9 +90,9 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { // present in the filter before running the VStream. for range 10 { id++ - execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) - execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into product (pid, description) values (%d, 'description%d')", id+100, id)) - execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into merchant (mname, category) values ('mname%d', 'category%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into product (pid, description) values (%d, 'description%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into merchant (mname, category) values ('mname%d', 'category%d')", id+100, id)) } // Stream events from the VStream API @@ -157,9 +157,9 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { } insertMu.Lock() id++ - execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) - execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into product (pid, description) values (%d, 'description%d')", id+100, id)) - execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into merchant (mname, category) values ('mname%d', 'category%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into product (pid, description) values (%d, 'description%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into merchant (mname, category) values ('mname%d', 'category%d')", id+100, id)) insertMu.Unlock() } }() @@ -169,9 +169,9 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { time.Sleep(10 * time.Second) // Give the vstream plenty of time to catchup done.Store(true) - qr1 := execVtgateQuery(t, vtgateConn, sourceKs, "select count(*) from customer") - qr2 := execVtgateQuery(t, vtgateConn, sourceKs, "select count(*) from product") - qr3 := execVtgateQuery(t, vtgateConn, sourceKs, "select count(*) from merchant") + qr1 := execVtgateQuery(t, vtgateConn, defaultSourceKs, "select count(*) from customer") + qr2 := execVtgateQuery(t, vtgateConn, defaultSourceKs, "select count(*) from product") + qr3 := execVtgateQuery(t, vtgateConn, defaultSourceKs, "select count(*) from merchant") require.NotNil(t, qr1) require.NotNil(t, qr2) require.NotNil(t, qr3) @@ -213,7 +213,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { defaultRdonly = 0 defaultCell := vc.Cells[vc.CellNames[0]] - vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) verifyClusterHealth(t, vc) insertInitialData(t) vtgate := defaultCell.Vtgates[0] @@ -228,7 +228,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { defer vstreamConn.Close() vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ - Keyspace: sourceKs, + Keyspace: defaultSourceKs, Shard: "0", Gtid: "", }}} @@ -260,7 +260,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { } insertMu.Lock() id++ - execVtgateQuery(t, vtgateConn, sourceKs, fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) insertMu.Unlock() } }() @@ -305,7 +305,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { case 1: if failover { insertMu.Lock() - output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", fmt.Sprintf("%s/0", sourceKs), "--new-primary=zone1-101") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", fmt.Sprintf("%s/0", defaultSourceKs), "--new-primary=zone1-101") insertMu.Unlock() log.Infof("output of first PRS is %s", output) require.NoError(t, err) @@ -313,7 +313,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { case 2: if failover { insertMu.Lock() - output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", fmt.Sprintf("%s/0", sourceKs), "--new-primary=zone1-100") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", fmt.Sprintf("%s/0", defaultSourceKs), "--new-primary=zone1-100") insertMu.Unlock() log.Infof("output of second PRS is %s", output) require.NoError(t, err) @@ -329,7 +329,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { } } - qr := execVtgateQuery(t, vtgateConn, sourceKs, "select count(*) from customer") + qr := execVtgateQuery(t, vtgateConn, defaultSourceKs, "select count(*) from customer") require.NotNil(t, qr) // total number of row events found by the VStream API should match the rows inserted insertedRows, err := qr.Rows[0][0].ToCastInt64() @@ -654,7 +654,7 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven tickCount++ switch tickCount { case 1: - reshard(t, "sharded", targetKs, "vstreamCopyMultiKeyspaceReshard", "-80,80-", "-40,40-", baseTabletID+400, nil, nil, nil, nil, defaultCellName, 1) + reshard(t, "sharded", defaultTargetKs, "vstreamCopyMultiKeyspaceReshard", "-80,80-", "-40,40-", baseTabletID+400, nil, nil, nil, nil, defaultCellName, 1) reshardDone = true case 60: done = true @@ -708,7 +708,7 @@ func TestMultiVStreamsKeyspaceReshard(t *testing.T) { require.NoError(t, err) // Add the new shards. - err = vc.AddShards(t, []*Cell{defaultCell}, keyspace, newShards, defaultReplicas, defaultRdonly, baseTabletID+2000, targetKsOpts) + err = vc.AddShards(t, []*Cell{defaultCell}, keyspace, newShards, defaultReplicas, defaultRdonly, baseTabletID+2000, defaultTargetKsOpts) require.NoError(t, err) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) @@ -904,7 +904,7 @@ func TestMultiVStreamsKeyspaceStopOnReshard(t *testing.T) { require.NoError(t, err) // Add the new shards. - err = vc.AddShards(t, []*Cell{defaultCell}, keyspace, newShards, defaultReplicas, defaultRdonly, baseTabletID+2000, targetKsOpts) + err = vc.AddShards(t, []*Cell{defaultCell}, keyspace, newShards, defaultReplicas, defaultRdonly, baseTabletID+2000, defaultTargetKsOpts) require.NoError(t, err) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) @@ -1103,7 +1103,7 @@ func TestVStreamStopOnReshardFalse(t *testing.T) { func TestVStreamWithKeyspacesToWatch(t *testing.T) { extraVTGateArgs = append(extraVTGateArgs, []string{ - utils.GetFlagVariantForTests("--keyspaces-to-watch"), sourceKs, + utils.GetFlagVariantForTests("--keyspaces-to-watch"), defaultSourceKs, }...) testVStreamWithFailover(t, false) @@ -1142,7 +1142,7 @@ func doVStream(t *testing.T, vc *VitessCluster, flags *vtgatepb.VStreamFlags) (n done := false vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ - Keyspace: sourceKs, + Keyspace: defaultSourceKs, Shard: "0", Gtid: "", }}} @@ -1167,7 +1167,7 @@ func doVStream(t *testing.T, vc *VitessCluster, flags *vtgatepb.VStreamFlags) (n arr := strings.Split(rowEvent.TableName, ".") require.Equal(t, len(arr), 2) tableName := arr[1] - require.Equal(t, sourceKs, rowEvent.Keyspace) + require.Equal(t, defaultSourceKs, rowEvent.Keyspace) require.Equal(t, "0", rowEvent.Shard) numRowEvents[tableName]++ @@ -1176,7 +1176,7 @@ func doVStream(t *testing.T, vc *VitessCluster, flags *vtgatepb.VStreamFlags) (n arr := strings.Split(fieldEvent.TableName, ".") require.Equal(t, len(arr), 2) tableName := arr[1] - require.Equal(t, sourceKs, fieldEvent.Keyspace) + require.Equal(t, defaultSourceKs, fieldEvent.Keyspace) require.Equal(t, "0", fieldEvent.Shard) numFieldEvents[tableName]++ default: @@ -1215,7 +1215,7 @@ func TestVStreamHeartbeats(t *testing.T) { defaultRdonly = 0 defaultCell := vc.Cells[vc.CellNames[0]] - vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialProductVSchema, initialProductSchema, + vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) verifyClusterHealth(t, vc) insertInitialData(t) @@ -1271,7 +1271,7 @@ func TestVStreamPushdownFilters(t *testing.T) { }) defer vc.TearDown() require.NotNil(t, vc) - ks := sourceKs + ks := defaultSourceKs shard := "0" defaultCell := vc.Cells[vc.CellNames[0]] From e5676c0c419d11afe58cb2d1f727d9a531a9d7dd Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Fri, 17 Oct 2025 14:40:35 +0000 Subject: [PATCH 08/13] Improve rest of default var names Signed-off-by: Matt Lord --- go/test/endtoend/vreplication/helper_test.go | 2 +- .../vreplication/movetables_buffering_test.go | 12 +-- .../movetables_mirrortraffic_test.go | 2 +- .../vreplication/multi_tenant_test.go | 4 +- .../resharding_workflows_v2_test.go | 46 ++++----- .../vreplication/vreplication_test_env.go | 12 +-- .../vreplication_vtctldclient_cli_test.go | 95 +++++++++---------- 7 files changed, 86 insertions(+), 87 deletions(-) diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index 9cde0973b3b..d84f02e03f1 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -666,7 +666,7 @@ func confirmWorkflowHasCopiedNoData(t *testing.T, defaultTargetKs, workflow stri (pos.Exists() && pos.String() != "") { require.FailNowf(t, "Unexpected data copied in workflow", "The MoveTables workflow %q copied data in less than %s when it should have been waiting. Show output: %s", - ksWorkflow, defaultTimeout, output) + defaultKsWorkflow, defaultTimeout, output) } return true }) diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go index 0f1846455f0..cc00073b493 100644 --- a/go/test/endtoend/vreplication/movetables_buffering_test.go +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -26,10 +26,10 @@ func TestMoveTablesBuffering(t *testing.T) { currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables setupMinimalTargetKeyspace(t) tables := "loadtest" - err := tstWorkflowExec(t, defaultCellName, workflowName, defaultSourceKs, defaultTargetKs, + err := tstWorkflowExec(t, defaultCellName, defaultWorkflowName, defaultSourceKs, defaultTargetKs, tables, workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) lg := newLoadGenerator(t, vc) go func() { @@ -37,10 +37,10 @@ func TestMoveTablesBuffering(t *testing.T) { }() lg.waitForCount(1000) - catchup(t, targetTab1, workflowName, "MoveTables") - catchup(t, targetTab2, workflowName, "MoveTables") - vdiff(t, defaultTargetKs, workflowName, "", nil) - waitForLowLag(t, defaultTargetKs, workflowName) + catchup(t, targetTab1, defaultWorkflowName, "MoveTables") + catchup(t, targetTab2, defaultWorkflowName, "MoveTables") + vdiff(t, defaultTargetKs, defaultWorkflowName, "", nil) + waitForLowLag(t, defaultTargetKs, defaultWorkflowName) for i := 0; i < 10; i++ { tstWorkflowSwitchReadsAndWrites(t) time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) diff --git a/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go b/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go index 90d880bb793..8d6ea97d2bb 100644 --- a/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go +++ b/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go @@ -57,7 +57,7 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { mt.Create() confirmNoMirrorRules(t) - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) // Mirror rules can be created after a MoveTables workflow is created. mt.MirrorTraffic() diff --git a/go/test/endtoend/vreplication/multi_tenant_test.go b/go/test/endtoend/vreplication/multi_tenant_test.go index c941c13d664..e2c3d2952c7 100644 --- a/go/test/endtoend/vreplication/multi_tenant_test.go +++ b/go/test/endtoend/vreplication/multi_tenant_test.go @@ -236,7 +236,7 @@ func TestMultiTenantSimple(t *testing.T) { // Create again and run it to completion. createFunc() - vdiff(t, targetKeyspace, workflowName, defaultCellName, nil) + vdiff(t, targetKeyspace, defaultWorkflowName, defaultCellName, nil) mt.SwitchReads() confirmOnlyReadsSwitched(t) @@ -396,7 +396,7 @@ func TestMultiTenantSharded(t *testing.T) { // Note: we cannot insert into the target keyspace since that is never routed to the source keyspace. lastIndex = insertRows(lastIndex, sourceKeyspace) waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, mt.workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) - vdiff(t, targetKeyspace, workflowName, defaultCellName, nil) + vdiff(t, targetKeyspace, defaultWorkflowName, defaultCellName, nil) mt.SwitchReadsAndWrites() // Note: here we have already switched, and we can insert into the target keyspace, and it should get reverse // replicated to the source keyspace. The source keyspace is routed to the target keyspace at this point. diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 5c0d6e5695b..0010bd4fd35 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -74,14 +74,14 @@ var defaultWorkflowExecOptions = &workflowExecOptions{ } func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) error { - err := tstWorkflowExec(t, defaultCellName, workflowName, defaultTargetKs, defaultTargetKs, + err := tstWorkflowExec(t, defaultCellName, defaultWorkflowName, defaultTargetKs, defaultTargetKs, "", workflowActionCreate, "", sourceShards, targetShards, defaultWorkflowExecOptions) require.NoError(t, err) - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, defaultTargetKs, "") - catchup(t, targetTab1, workflowName, "Reshard") - catchup(t, targetTab2, workflowName, "Reshard") - doVDiff(t, ksWorkflow, "") + catchup(t, targetTab1, defaultWorkflowName, "Reshard") + catchup(t, targetTab2, defaultWorkflowName, "Reshard") + doVDiff(t, defaultKsWorkflow, "") return nil } @@ -89,18 +89,18 @@ func createMoveTablesWorkflow(t *testing.T, tables string) { if tables == "" { tables = "customer" } - err := tstWorkflowExec(t, defaultCellName, workflowName, defaultSourceKs, defaultTargetKs, + err := tstWorkflowExec(t, defaultCellName, defaultWorkflowName, defaultSourceKs, defaultTargetKs, tables, workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, defaultTargetKs, tables) - catchup(t, targetTab1, workflowName, "MoveTables") - catchup(t, targetTab2, workflowName, "MoveTables") - doVDiff(t, ksWorkflow, "") + catchup(t, targetTab1, defaultWorkflowName, "MoveTables") + catchup(t, targetTab2, defaultWorkflowName, "MoveTables") + doVDiff(t, defaultKsWorkflow, "") } func tstWorkflowAction(t *testing.T, action, tabletTypes, cells string) error { - return tstWorkflowExec(t, cells, workflowName, defaultSourceKs, defaultTargetKs, "customer", action, tabletTypes, "", "", defaultWorkflowExecOptions) + return tstWorkflowExec(t, cells, defaultWorkflowName, defaultSourceKs, defaultTargetKs, "customer", action, tabletTypes, "", "", defaultWorkflowExecOptions) } // tstWorkflowExec executes a MoveTables or Reshard workflow command using @@ -220,7 +220,7 @@ func testWorkflowUpdate(t *testing.T) { _, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", "noexist", "update", "--workflow", "noexist", "--tablet-types", tabletTypes) require.Error(t, err) // Change the tablet-types to rdonly. - resp, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", defaultTargetKs, "update", "--workflow", workflowName, "--tablet-types", "rdonly") + resp, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", defaultTargetKs, "update", "--workflow", defaultWorkflowName, "--tablet-types", "rdonly") require.NoError(t, err, err) // Confirm that we changed the workflow. var ures vtctldatapb.WorkflowUpdateResponse @@ -230,7 +230,7 @@ func testWorkflowUpdate(t *testing.T) { require.Greater(t, len(ures.Details), 0) require.True(t, ures.Details[0].Changed) // Change tablet-types back to primary,replica,rdonly. - resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", defaultTargetKs, "update", "--workflow", workflowName, "--tablet-types", tabletTypes) + resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", defaultTargetKs, "update", "--workflow", defaultWorkflowName, "--tablet-types", tabletTypes) require.NoError(t, err, err) // Confirm that we changed the workflow. err = protojson.Unmarshal([]byte(resp), &ures) @@ -238,7 +238,7 @@ func testWorkflowUpdate(t *testing.T) { require.Greater(t, len(ures.Details), 0) require.True(t, ures.Details[0].Changed) // Execute a no-op as tablet-types is already primary,replica,rdonly. - resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", defaultTargetKs, "update", "--workflow", workflowName, "--tablet-types", tabletTypes) + resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", defaultTargetKs, "update", "--workflow", defaultWorkflowName, "--tablet-types", tabletTypes) require.NoError(t, err, err) // Confirm that we didn't change the workflow. err = protojson.Unmarshal([]byte(resp), &ures) @@ -313,13 +313,13 @@ func validateWritesRouteToTarget(t *testing.T) { } func revert(t *testing.T, workflowType string) { - switchWrites(t, workflowType, ksWorkflow, true) + switchWrites(t, workflowType, defaultKsWorkflow, true) validateWritesRouteToSource(t) - switchReadsNew(t, workflowType, getCellNames(nil), ksWorkflow, true) + switchReadsNew(t, workflowType, getCellNames(nil), defaultKsWorkflow, true) validateReadsRouteToSource(t, "replica") // cancel the workflow to cleanup - _, err := vc.VtctldClient.ExecuteCommandWithOutput(workflowType, "--target-keyspace", defaultTargetKs, "--workflow", workflowName, "cancel") + _, err := vc.VtctldClient.ExecuteCommandWithOutput(workflowType, "--target-keyspace", defaultTargetKs, "--workflow", defaultWorkflowName, "cancel") require.NoError(t, err, fmt.Sprintf("%s Cancel error: %v", workflowType, err)) } @@ -474,11 +474,11 @@ func testReplicatingWithPKEnumCols(t *testing.T) { deleteQuery := "delete from customer where cid = 2 and typ = 'soho'" insertQuery := "insert into customer(cid, name, typ, sport, meta) values(2, 'Paül','soho','cricket',convert(x'7b7d' using utf8mb4))" execVtgateQuery(t, vtgateConn, defaultSourceKs, deleteQuery) - waitForNoWorkflowLag(t, vc, defaultTargetKs, workflowName) - doVDiff(t, ksWorkflow, "") + waitForNoWorkflowLag(t, vc, defaultTargetKs, defaultWorkflowName) + doVDiff(t, defaultKsWorkflow, "") execVtgateQuery(t, vtgateConn, defaultSourceKs, insertQuery) - waitForNoWorkflowLag(t, vc, defaultTargetKs, workflowName) - doVDiff(t, ksWorkflow, "") + waitForNoWorkflowLag(t, vc, defaultTargetKs, defaultWorkflowName) + doVDiff(t, defaultKsWorkflow, "") } func testReshardV2Workflow(t *testing.T) { @@ -598,7 +598,7 @@ func testMoveTablesV2Workflow(t *testing.T) { // The purge table should get skipped/ignored // If it's not then we'll get an error as the table doesn't exist in the vschema createMoveTablesWorkflow(t, "customer,loadtest,vdiff_order,reftable,_vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431") - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) validateReadsRouteToSource(t, "replica,rdonly") validateWritesRouteToSource(t) @@ -960,7 +960,7 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias switchWrites(t, workflowType, ksWorkflow, false) validateWritesRouteToTarget(t) - switchWrites(t, workflowType, reverseKsWorkflow, true) + switchWrites(t, workflowType, defaultReverseKsWorkflow, true) validateWritesRouteToSource(t) validateReadsRouteToSource(t, "replica") diff --git a/go/test/endtoend/vreplication/vreplication_test_env.go b/go/test/endtoend/vreplication/vreplication_test_env.go index 2fe71b822ac..d34c9d0e0ed 100644 --- a/go/test/endtoend/vreplication/vreplication_test_env.go +++ b/go/test/endtoend/vreplication/vreplication_test_env.go @@ -20,12 +20,12 @@ import "fmt" const ( // Defaults used for all tests. - defaultSourceKs = "test-product" - defaultTargetKs = "test-customer" - workflowName = "wf1" - ksWorkflow = defaultTargetKs + "." + workflowName - reverseKsWorkflow = defaultSourceKs + "." + workflowName + "_reverse" - defaultCellName = "zone1" + defaultSourceKs = "test-product" + defaultTargetKs = "test-customer" + defaultWorkflowName = "wf1" + defaultKsWorkflow = defaultTargetKs + "." + defaultWorkflowName + defaultReverseKsWorkflow = defaultSourceKs + "." + defaultWorkflowName + "_reverse" + defaultCellName = "zone1" ) var dryRunResultsSwitchWritesCustomerShard = []string{ diff --git a/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go index 2dd70c65c15..4c7086a6ca9 100644 --- a/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go +++ b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go @@ -69,7 +69,6 @@ func TestVtctldclientCLI(t *testing.T) { sourceKeyspaceName := defaultSourceKs targetKeyspaceName := defaultTargetKs var mt iMoveTables - workflowName := "wf1" sourceReplicaTab = vc.Cells["zone1"].Keyspaces[sourceKeyspaceName].Shards["0"].Tablets["zone1-101"].Vttablet require.NotNil(t, sourceReplicaTab) @@ -91,13 +90,13 @@ func TestVtctldclientCLI(t *testing.T) { testWorkflowList(t, sourceKeyspaceName, targetKeyspaceName) }) t.Run("MoveTablesCreateFlags1", func(t *testing.T) { - testMoveTablesFlags1(t, &mt, sourceKeyspaceName, targetKeyspaceName, workflowName, targetTabs) + testMoveTablesFlags1(t, &mt, sourceKeyspaceName, targetKeyspaceName, defaultWorkflowName, targetTabs) }) t.Run("testWorkflowUpdateConfig", func(t *testing.T) { - testWorkflowUpdateConfig(t, &mt, targetTabs, targetKeyspaceName, workflowName) + testWorkflowUpdateConfig(t, &mt, targetTabs, targetKeyspaceName, defaultWorkflowName) }) t.Run("MoveTablesCreateFlags2", func(t *testing.T) { - testMoveTablesFlags2(t, &mt, sourceKeyspaceName, targetKeyspaceName, workflowName, targetTabs) + testMoveTablesFlags2(t, &mt, sourceKeyspaceName, targetKeyspaceName, defaultWorkflowName, targetTabs) }) t.Run("MoveTablesCompleteFlags3", func(t *testing.T) { testMoveTablesFlags3(t, sourceKeyspaceName, targetKeyspaceName, targetTabs) @@ -192,7 +191,7 @@ func TestVtctldclientCLI(t *testing.T) { } rs.Start() - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, defaultWorkflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) res, err := targetTab1.QueryTablet("show tables", keyspace, true) require.NoError(t, err) @@ -206,8 +205,8 @@ func TestVtctldclientCLI(t *testing.T) { rs.Cancel() - workflowNames := workflowList(keyspace) - require.Empty(t, workflowNames) + defaultWorkflowNames := workflowList(keyspace) + require.Empty(t, defaultWorkflowNames) res, err = targetTab1.QueryTablet("show tables", keyspace, true) require.NoError(t, err) @@ -222,7 +221,7 @@ func TestVtctldclientCLI(t *testing.T) { } // Tests several create flags and some complete flags and validates that some of them are set correctly for the workflow. -func testMoveTablesFlags1(t *testing.T, mt *iMoveTables, sourceKeyspace, targetKeyspace, workflowName string, targetTabs map[string]*cluster.VttabletProcess) { +func testMoveTablesFlags1(t *testing.T, mt *iMoveTables, sourceKeyspace, targetKeyspace, defaultWorkflowName string, targetTabs map[string]*cluster.VttabletProcess) { tables := "customer,customer2" overrides := map[string]string{ "vreplication-net-read-timeout": "6000", @@ -238,10 +237,10 @@ func testMoveTablesFlags1(t *testing.T, mt *iMoveTables, sourceKeyspace, targetK completeFlags := []string{"--keep-routing-rules", "--keep-data"} switchFlags := []string{} // Test one set of MoveTable flags. - *mt = createMoveTables(t, sourceKeyspace, targetKeyspace, workflowName, tables, createFlags, completeFlags, switchFlags) + *mt = createMoveTables(t, sourceKeyspace, targetKeyspace, defaultWorkflowName, tables, createFlags, completeFlags, switchFlags) (*mt).Show() moveTablesResponse := getMoveTablesShowResponse(mt) - workflowResponse := getWorkflow(targetKeyspace, workflowName) + workflowResponse := getWorkflow(targetKeyspace, defaultWorkflowName) // also validates that MoveTables Show and Workflow Show return the same output. require.EqualValues(t, moveTablesResponse.CloneVT(), workflowResponse) @@ -264,8 +263,8 @@ func getMoveTablesShowResponse(mt *iMoveTables) *vtctldatapb.GetWorkflowsRespons } // Validates some of the flags created from the previous test. -func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetKeyspace, workflowName string, targetTabs map[string]*cluster.VttabletProcess) { - ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) +func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetKeyspace, defaultWorkflowName string, targetTabs map[string]*cluster.VttabletProcess) { + ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, defaultWorkflowName) wf := (*mt).(iWorkflow) (*mt).Start() // Need to start because we set auto-start to false. waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) @@ -284,7 +283,7 @@ func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetK (*mt).Start() waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) for _, tab := range targetTabs { - catchup(t, tab, workflowName, "MoveTables") + catchup(t, tab, defaultWorkflowName, "MoveTables") } (*mt).SwitchReads() @@ -356,7 +355,7 @@ func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetK confirmStates(t, &wf, wrangler.WorkflowStateReadsSwitched, wrangler.WorkflowStateNotSwitched) // Confirm that everything is still in sync after our switch fest. - vdiff(t, targetKeyspace, workflowName, "zone1", nil) + vdiff(t, targetKeyspace, defaultWorkflowName, "zone1", nil) (*mt).SwitchReadsAndWrites() validateReadsRouteToTarget(t, "replica") @@ -382,15 +381,15 @@ func testMoveTablesFlags3(t *testing.T, sourceKeyspace, targetKeyspace string, t completeFlags := []string{"--rename-tables"} tables := "customer2" switchFlags := []string{"--enable-reverse-replication=false"} - mt := createMoveTables(t, sourceKeyspace, targetKeyspace, workflowName, tables, createFlags, completeFlags, switchFlags) + mt := createMoveTables(t, sourceKeyspace, targetKeyspace, defaultWorkflowName, tables, createFlags, completeFlags, switchFlags) mt.Start() // Need to start because we set stop-after-copy to true. - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) mt.Stop() // Test stopping workflow. - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) mt.Start() - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) for _, tab := range targetTabs { - catchup(t, tab, workflowName, "MoveTables") + catchup(t, tab, defaultWorkflowName, "MoveTables") } mt.SwitchReadsAndWrites() mt.Complete() @@ -399,11 +398,11 @@ func testMoveTablesFlags3(t *testing.T, sourceKeyspace, targetKeyspace string, t require.False(t, checkTablesExist(t, "zone1-100", []string{"customer2"})) // Confirm that we can cancel a workflow after ONLY switching read traffic. - mt = createMoveTables(t, sourceKeyspace, targetKeyspace, workflowName, "customer", createFlags, nil, nil) + mt = createMoveTables(t, sourceKeyspace, targetKeyspace, defaultWorkflowName, "customer", createFlags, nil, nil) mt.Start() // Need to start because we set stop-after-copy to true. - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) for _, tab := range targetTabs { - catchup(t, tab, workflowName, "MoveTables") + catchup(t, tab, defaultWorkflowName, "MoveTables") } mt.SwitchReads() wf := mt.(iWorkflow) @@ -428,17 +427,17 @@ func testWorkflowList(t *testing.T, sourceKeyspace, targetKeyspace string) { } slices.Sort(wfNames) - workflowNames := workflowList(targetKeyspace) - slices.Sort(workflowNames) - require.EqualValues(t, wfNames, workflowNames) + defaultWorkflowNames := workflowList(targetKeyspace) + slices.Sort(defaultWorkflowNames) + require.EqualValues(t, wfNames, defaultWorkflowNames) workflows := getWorkflows(targetKeyspace) - workflowNames = make([]string, len(workflows.Workflows)) + defaultWorkflowNames = make([]string, len(workflows.Workflows)) for i := range workflows.Workflows { - workflowNames[i] = workflows.Workflows[i].Name + defaultWorkflowNames[i] = workflows.Workflows[i].Name } - slices.Sort(workflowNames) - require.EqualValues(t, wfNames, workflowNames) + slices.Sort(defaultWorkflowNames) + require.EqualValues(t, wfNames, defaultWorkflowNames) } func testWorkflowUpdateConfig(t *testing.T, mt *iMoveTables, targetTabs map[string]*cluster.VttabletProcess, targetKeyspace, workflow string) { @@ -526,12 +525,12 @@ func testWorkflowUpdateConfig(t *testing.T, mt *iMoveTables, targetTabs map[stri } } -func createMoveTables(t *testing.T, sourceKeyspace, targetKeyspace, workflowName, tables string, +func createMoveTables(t *testing.T, sourceKeyspace, targetKeyspace, defaultWorkflowName, tables string, createFlags, completeFlags, switchFlags []string) iMoveTables { mt := newMoveTables(vc, &moveTablesWorkflow{ workflowInfo: &workflowInfo{ vc: vc, - workflowName: workflowName, + workflowName: defaultWorkflowName, targetKeyspace: targetKeyspace, }, sourceKeyspace: sourceKeyspace, @@ -546,7 +545,7 @@ func createMoveTables(t *testing.T, sourceKeyspace, targetKeyspace, workflowName // reshard helpers -func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards string, targetTabs map[string]*cluster.VttabletProcess) { +func splitShard(t *testing.T, keyspace, defaultWorkflowName, sourceShards, targetShards string, targetTabs map[string]*cluster.VttabletProcess) { overrides := map[string]string{ "vreplication-copy-phase-duration": "10h11m12s", "vreplication-experimental-flags": "7", @@ -563,37 +562,37 @@ func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards rs := newReshard(vc, &reshardWorkflow{ workflowInfo: &workflowInfo{ vc: vc, - workflowName: workflowName, + workflowName: defaultWorkflowName, targetKeyspace: keyspace, }, sourceShards: sourceShards, targetShards: targetShards, createFlags: createFlags, }, workflowFlavorVtctld) - ksWorkflow := fmt.Sprintf("%s.%s", keyspace, workflowName) + ksWorkflow := fmt.Sprintf("%s.%s", keyspace, defaultWorkflowName) wf := rs.(iWorkflow) rs.Create() validateReshardResponse(rs) validateOverrides(t, targetTabs, overrides) - workflowResponse := getWorkflow(keyspace, workflowName) + workflowResponse := getWorkflow(keyspace, defaultWorkflowName) reshardShowResponse := getReshardShowResponse(&rs) require.EqualValues(t, reshardShowResponse, workflowResponse) validateReshardWorkflow(t, workflowResponse.Workflows) - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Stopped.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, defaultWorkflowName), binlogdatapb.VReplicationWorkflowState_Stopped.String()) rs.Start() waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) for _, tab := range targetTabs { alias := fmt.Sprintf("zone1-%d", tab.TabletUID) - query := fmt.Sprintf("update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_%s' and workflow = '%s'", keyspace, workflowName) + query := fmt.Sprintf("update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_%s' and workflow = '%s'", keyspace, defaultWorkflowName) output, err := vc.VtctldClient.ExecuteCommandWithOutput("ExecuteFetchAsDBA", alias, query) require.NoError(t, err, output) } rs.Start() - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, defaultWorkflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) rs.Stop() waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) rs.Start() - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, defaultWorkflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) t.Run("Test --shards in workflow start/stop", func(t *testing.T) { // This subtest expects workflow to be running at the start and restarts it at the end. @@ -608,18 +607,18 @@ func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards {"-40,40-80", "start", 2}, } for _, tc := range testCases { - output, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", keyspace, tc.action, "--workflow", workflowName, "--shards", tc.shards) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", keyspace, tc.action, "--workflow", defaultWorkflowName, "--shards", tc.shards) require.NoError(t, err, "failed to %s workflow: %v", tc.action, err) cnt := gjson.Get(output, "details.#").Int() require.EqualValuesf(t, tc.expected, cnt, "expected %d shards, got %d for action %s, shards %s", tc.expected, cnt, tc.action, tc.shards) } }) - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, defaultWorkflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) for _, targetTab := range targetTabs { - catchup(t, targetTab, workflowName, "Reshard") + catchup(t, targetTab, defaultWorkflowName, "Reshard") } - vdiff(t, keyspace, workflowName, "zone1", nil) + vdiff(t, keyspace, defaultWorkflowName, "zone1", nil) shardReadsRouteToSource := func() { require.True(t, getShardRoute(t, keyspace, "-80", "replica")) @@ -638,15 +637,15 @@ func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards } rs.SwitchReadsAndWrites() - waitForLowLag(t, keyspace, workflowName+"_reverse") - vdiff(t, keyspace, workflowName+"_reverse", "zone1", nil) + waitForLowLag(t, keyspace, defaultWorkflowName+"_reverse") + vdiff(t, keyspace, defaultWorkflowName+"_reverse", "zone1", nil) shardReadsRouteToTarget() shardWritesRouteToTarget() confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateAllSwitched) rs.ReverseReadsAndWrites() - waitForLowLag(t, keyspace, workflowName) - vdiff(t, keyspace, workflowName, "zone1", nil) + waitForLowLag(t, keyspace, defaultWorkflowName) + vdiff(t, keyspace, defaultWorkflowName, "zone1", nil) shardReadsRouteToSource() shardWritesRouteToSource() confirmStates(t, &wf, wrangler.WorkflowStateAllSwitched, wrangler.WorkflowStateNotSwitched) @@ -702,7 +701,7 @@ func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards confirmStates(t, &wf, wrangler.WorkflowStateReadsSwitched, wrangler.WorkflowStateNotSwitched) // Confirm that everything is still in sync after our switch fest. - vdiff(t, keyspace, workflowName, "zone1", nil) + vdiff(t, keyspace, defaultWorkflowName, "zone1", nil) rs.SwitchReadsAndWrites() shardReadsRouteToTarget() From 3af99d8fc8ecbdbce0cc5e98e52051f013699a20 Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Fri, 17 Oct 2025 15:28:14 +0000 Subject: [PATCH 09/13] Undo unnecessary change Signed-off-by: Matt Lord --- .../command/vreplication/lookupvindex/lookupvindex.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go b/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go index 62f892d0595..786463e4c6e 100644 --- a/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go +++ b/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go @@ -229,7 +229,7 @@ var ( "to": "keyspace_id", "ignore_nulls": fmt.Sprintf("%t", vindex.IgnoreNulls), }, - Owner: createOptions.TableOwner, + Owner: vindex.TableOwner, } targetTableColumnVindex := &vschemapb.ColumnVindex{ From 8a8a9361d6dee28d27ce0914ce366a0451e8de86 Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Fri, 17 Oct 2025 18:29:44 +0000 Subject: [PATCH 10/13] Minor cleanup Signed-off-by: Matt Lord --- go/test/endtoend/vreplication/vdiff2_test.go | 120 +++++++++---------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index 862e10a3f4f..67749b76694 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -43,12 +43,12 @@ import ( ) type testCase struct { - name, typ, defaultSourceKs, defaultTargetKs string - sourceShards, targetShards string - tables string - workflow string - tabletBaseID int - autoRetryError bool // if true, test auto retry on error against this workflow + name, typ, sourceKs, targetKs string + sourceShards, targetShards string + tables string + workflow string + tabletBaseID int + autoRetryError bool // if true, test auto retry on error against this workflow // If testing auto retry on error, what new rows should be diff'd. These rows must have a PK > all initial rows. retryInsert string resume bool // test resume functionality with this workflow @@ -74,8 +74,8 @@ var testCases = []*testCase{ name: "MoveTables/unsharded to two shards", workflow: "p1c2", typ: "MoveTables", - defaultSourceKs: defaultSourceKs, - defaultTargetKs: defaultTargetKs, + sourceKs: defaultSourceKs, + targetKs: defaultTargetKs, sourceShards: "0", targetShards: "-80,80-", tabletBaseID: 200, @@ -92,34 +92,34 @@ var testCases = []*testCase{ }, }, { - name: "Reshard Merge/split 2 to 3", - workflow: "c2c3", - typ: "Reshard", - defaultSourceKs: defaultTargetKs, - defaultTargetKs: defaultTargetKs, - sourceShards: "-80,80-", - targetShards: "-40,40-a0,a0-", - tabletBaseID: 400, - autoRetryError: true, - retryInsert: `insert into customer(cid, name, typ) values(2005149300, 'Testy McTester Jr', 'enterprise'), (2005149350, 'Testy McTester II', 'enterprise')`, - resume: true, - resumeInsert: `insert into customer(cid, name, typ) values(2005149400, 'Testy McTester III', 'enterprise')`, - stop: true, + name: "Reshard Merge/split 2 to 3", + workflow: "c2c3", + typ: "Reshard", + sourceKs: defaultTargetKs, + targetKs: defaultTargetKs, + sourceShards: "-80,80-", + targetShards: "-40,40-a0,a0-", + tabletBaseID: 400, + autoRetryError: true, + retryInsert: `insert into customer(cid, name, typ) values(2005149300, 'Testy McTester Jr', 'enterprise'), (2005149350, 'Testy McTester II', 'enterprise')`, + resume: true, + resumeInsert: `insert into customer(cid, name, typ) values(2005149400, 'Testy McTester III', 'enterprise')`, + stop: true, }, { - name: "Reshard/merge 3 to 1", - workflow: "c3c1", - typ: "Reshard", - defaultSourceKs: defaultTargetKs, - defaultTargetKs: defaultTargetKs, - sourceShards: "-40,40-a0,a0-", - targetShards: "0", - tabletBaseID: 700, - autoRetryError: true, - retryInsert: `insert into customer(cid, name, typ) values(2005149500, 'Testy McTester IV', 'enterprise')`, - resume: true, - resumeInsert: `insert into customer(cid, name, typ) values(2005149600, 'Testy McTester V', 'enterprise'), (2005149650, 'Testy McTester VI', 'enterprise')`, - stop: true, + name: "Reshard/merge 3 to 1", + workflow: "c3c1", + typ: "Reshard", + sourceKs: defaultTargetKs, + targetKs: defaultTargetKs, + sourceShards: "-40,40-a0,a0-", + targetShards: "0", + tabletBaseID: 700, + autoRetryError: true, + retryInsert: `insert into customer(cid, name, typ) values(2005149500, 'Testy McTester IV', 'enterprise')`, + resume: true, + resumeInsert: `insert into customer(cid, name, typ) values(2005149600, 'Testy McTester V', 'enterprise'), (2005149650, 'Testy McTester VI', 'enterprise')`, + stop: true, }, } @@ -214,20 +214,20 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, require.NoError(t, vc.AddShards(t, cells, tks, tc.targetShards, 0, 0, tc.tabletBaseID, defaultTargetKsOpts)) } - ksWorkflow := fmt.Sprintf("%s.%s", tc.defaultTargetKs, tc.workflow) + ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) statsShard := arrTargetShards[0] - statsTablet := vc.getPrimaryTablet(t, tc.defaultTargetKs, statsShard) + statsTablet := vc.getPrimaryTablet(t, tc.targetKs, statsShard) var args []string args = append(args, tc.typ) args = append(args, "--workflow", tc.workflow) - args = append(args, "--target-keyspace", tc.defaultTargetKs) + args = append(args, "--target-keyspace", tc.targetKs) allCellNames := getCellNames(nil) args = append(args, "create") args = append(args, "--cells", allCellNames) if tc.typ == "Reshard" { args = append(args, "--source-shards", tc.sourceShards, "--target-shards", tc.targetShards) } else { - args = append(args, "--source-keyspace", tc.defaultSourceKs) + args = append(args, "--source-keyspace", tc.sourceKs) args = append(args, "--tables", tc.tables) } err := vc.VtctldClient.ExecuteCommand(args...) @@ -235,7 +235,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, waitForShardsToCatchup := func() { for _, shard := range arrTargetShards { - tab := vc.getPrimaryTablet(t, tc.defaultTargetKs, shard) + tab := vc.getPrimaryTablet(t, tc.targetKs, shard) catchup(t, tab, tc.workflow, tc.typ) } } @@ -261,17 +261,17 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, totalRowsToCreate := seconds * perSecondCount log.Infof("Test host has %d vCPUs. Generating %d rows in the customer table to test --max-diff-duration", runtime.NumCPU(), totalRowsToCreate) for i := int64(0); i < totalRowsToCreate; i += chunkSize { - generateMoreCustomers(t, defaultSourceKs, chunkSize) + generateMoreCustomers(t, tc.sourceKs, chunkSize) } // Wait for the workflow to catch up after all the inserts. waitForShardsToCatchup() // This flag is only implemented in vtctldclient. - doVtctldclientVDiff(t, tc.defaultTargetKs, tc.workflow, allCellNames, nil, "--max-diff-duration", diffDuration) + doVtctldclientVDiff(t, tc.targetKs, tc.workflow, allCellNames, nil, "--max-diff-duration", diffDuration) // Confirm that the customer table diff was restarted but not others. - tablet := vc.getPrimaryTablet(t, tc.defaultTargetKs, arrTargetShards[0]) + tablet := vc.getPrimaryTablet(t, tc.targetKs, arrTargetShards[0]) stat, err := getDebugVar(t, tablet.Port, []string{"VDiffRestartedTableDiffsCount"}) require.NoError(t, err, "failed to get VDiffRestartedTableDiffsCount stat: %v", err) customerRestarts := gjson.Parse(stat).Get("customer").Int() @@ -280,7 +280,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, require.Equal(t, int64(0), leadRestarts, "expected VDiffRestartedTableDiffsCount stat to be 0 for the Lead table, got %d", leadRestarts) // Cleanup the created customer records so as not to slow down the rest of the test. - delstmt := fmt.Sprintf("delete from %s.customer order by cid desc limit %d", sqlescape.EscapeID(defaultSourceKs), chunkSize) + delstmt := fmt.Sprintf("delete from %s.customer order by cid desc limit %d", sqlescape.EscapeID(tc.sourceKs), chunkSize) for i := int64(0); i < totalRowsToCreate; i += chunkSize { _, err := vtgateConn.ExecuteFetch(delstmt, int(chunkSize), false) require.NoError(t, err, "failed to cleanup added customer records: %v", err) @@ -289,7 +289,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, waitForShardsToCatchup() tc.vdiffCount++ // We only did vtctldclient vdiff create } else { - vdiff(t, tc.defaultTargetKs, tc.workflow, allCellNames, nil) + vdiff(t, tc.targetKs, tc.workflow, allCellNames, nil) tc.vdiffCount++ } checkVDiffCountStat(t, statsTablet, tc.vdiffCount) @@ -329,7 +329,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, } if tc.testCLIFlagHandling { // This creates and then deletes the vdiff so we don't increment the count. - testCLIFlagHandling(t, tc.defaultTargetKs, tc.workflow, cells[0]) + testCLIFlagHandling(t, tc.targetKs, tc.workflow, cells[0]) } checkVDiffCountStat(t, statsTablet, tc.vdiffCount) @@ -345,13 +345,13 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, tc.vdiffCount++ checkVDiffCountStat(t, statsTablet, tc.vdiffCount) - err = vc.VtctldClient.ExecuteCommand(tc.typ, "--workflow", tc.workflow, "--target-keyspace", tc.defaultTargetKs, "SwitchTraffic") + err = vc.VtctldClient.ExecuteCommand(tc.typ, "--workflow", tc.workflow, "--target-keyspace", tc.targetKs, "SwitchTraffic") require.NoError(t, err) - err = vc.VtctldClient.ExecuteCommand(tc.typ, "--workflow", tc.workflow, "--target-keyspace", tc.defaultTargetKs, "Complete") + err = vc.VtctldClient.ExecuteCommand(tc.typ, "--workflow", tc.workflow, "--target-keyspace", tc.targetKs, "Complete") require.NoError(t, err) // Confirm the VDiff data is deleted for the workflow. - testNoOrphanedData(t, tc.defaultTargetKs, tc.workflow, arrTargetShards) + testNoOrphanedData(t, tc.targetKs, tc.workflow, arrTargetShards) tc.vdiffCount = 0 // All vdiffs are deleted, so reset the count and check checkVDiffCountStat(t, statsTablet, tc.vdiffCount) } @@ -376,7 +376,7 @@ func testCLIErrors(t *testing.T, ksWorkflow, cells string) { // testCLIFlagHandling tests that the vtctldclient CLI flags are handled correctly // from vtctldclient->vtctld->vttablet->mysqld. -func testCLIFlagHandling(t *testing.T, defaultTargetKs, workflowName string, cell *Cell) { +func testCLIFlagHandling(t *testing.T, targetKs, workflowName string, cell *Cell) { expectedOptions := &tabletmanagerdatapb.VDiffOptions{ CoreOptions: &tabletmanagerdatapb.VDiffCoreOptions{ MaxRows: 999, @@ -400,7 +400,7 @@ func testCLIFlagHandling(t *testing.T, defaultTargetKs, workflowName string, cel } t.Run("Client flag handling", func(t *testing.T) { - res, err := vc.VtctldClient.ExecuteCommandWithOutput("vdiff", "--target-keyspace", defaultTargetKs, "--workflow", workflowName, + res, err := vc.VtctldClient.ExecuteCommandWithOutput("vdiff", "--target-keyspace", targetKs, "--workflow", workflowName, "create", "--limit", fmt.Sprintf("%d", expectedOptions.CoreOptions.MaxRows), "--max-report-sample-rows", fmt.Sprintf("%d", expectedOptions.ReportOptions.MaxSampleRows), @@ -425,10 +425,10 @@ func testCLIFlagHandling(t *testing.T, defaultTargetKs, workflowName string, cel // Confirm that the options were passed through and saved correctly. query := sqlparser.BuildParsedQuery("select options from %s.vdiff where vdiff_uuid = %s", sidecarDBIdentifier, encodeString(vduuid.String())).Query - tablets := vc.getVttabletsInKeyspace(t, cell, defaultTargetKs, "PRIMARY") - require.Greater(t, len(tablets), 0, "no primary tablets found in keyspace %s", defaultTargetKs) + tablets := vc.getVttabletsInKeyspace(t, cell, targetKs, "PRIMARY") + require.Greater(t, len(tablets), 0, "no primary tablets found in keyspace %s", targetKs) tablet := maps.Values(tablets)[0] - qres, err := tablet.QueryTablet(query, defaultTargetKs, false) + qres, err := tablet.QueryTablet(query, targetKs, false) require.NoError(t, err, "query %q failed: %v", query, err) require.NotNil(t, qres, "query %q returned nil result", query) // Should never happen require.Equal(t, 1, len(qres.Rows), "query %q returned %d rows, expected 1", query, len(qres.Rows)) @@ -442,7 +442,7 @@ func testCLIFlagHandling(t *testing.T, defaultTargetKs, workflowName string, cel // Delete this vdiff as we used --auto-start=false and thus it never starts and // does not provide the normally expected show --verbose --format=json output. - _, output := performVDiff2Action(t, fmt.Sprintf("%s.%s", defaultTargetKs, workflowName), "", "delete", vduuid.String(), false) + _, output := performVDiff2Action(t, fmt.Sprintf("%s.%s", targetKs, workflowName), "", "delete", vduuid.String(), false) require.Equal(t, "completed", gjson.Get(output, "Status").String()) }) } @@ -509,7 +509,7 @@ func testResume(t *testing.T, tc *testCase, cells string) { t.Run("Resume", func(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() - ksWorkflow := fmt.Sprintf("%s.%s", tc.defaultTargetKs, tc.workflow) + ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) // Confirm the last VDiff is in the expected completed state. uuid, output := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false) @@ -521,7 +521,7 @@ func testResume(t *testing.T, tc *testCase, cells string) { expectedNewRows := int64(0) if tc.resumeInsert != "" { - res := execVtgateQuery(t, vtgateConn, tc.defaultSourceKs, tc.resumeInsert) + res := execVtgateQuery(t, vtgateConn, tc.sourceKs, tc.resumeInsert) expectedNewRows = int64(res.RowsAffected) } expectedRows := rowsCompared + expectedNewRows @@ -572,7 +572,7 @@ func testAutoRetryError(t *testing.T, tc *testCase, cells string) { t.Run("Auto retry on error", func(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() - ksWorkflow := fmt.Sprintf("%s.%s", tc.defaultTargetKs, tc.workflow) + ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) // Confirm the last VDiff is in the expected completed state. uuid, output := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false) @@ -586,15 +586,15 @@ func testAutoRetryError(t *testing.T, tc *testCase, cells string) { // compared is cumulative. expectedNewRows := int64(0) if tc.retryInsert != "" { - res := execVtgateQuery(t, vtgateConn, tc.defaultSourceKs, tc.retryInsert) + res := execVtgateQuery(t, vtgateConn, tc.sourceKs, tc.retryInsert) expectedNewRows = int64(res.RowsAffected) } expectedRows := rowsCompared + expectedNewRows // Update the VDiff to simulate an ephemeral error having occurred. for _, shard := range strings.Split(tc.targetShards, ",") { - tab := vc.getPrimaryTablet(t, tc.defaultTargetKs, shard) - res, err := tab.QueryTabletWithDB(sqlparser.BuildParsedQuery(sqlSimulateError, sidecarDBIdentifier, sidecarDBIdentifier, encodeString(uuid)).Query, "vt_"+tc.defaultTargetKs) + tab := vc.getPrimaryTablet(t, tc.targetKs, shard) + res, err := tab.QueryTabletWithDB(sqlparser.BuildParsedQuery(sqlSimulateError, sidecarDBIdentifier, sidecarDBIdentifier, encodeString(uuid)).Query, "vt_"+tc.targetKs) require.NoError(t, err) // Should have updated the vdiff record and at least one vdiff_table record. require.GreaterOrEqual(t, int(res.RowsAffected), 2) From f84a074e09411fc263c28d9b5ca5940a8456be95 Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Fri, 17 Oct 2025 18:30:02 +0000 Subject: [PATCH 11/13] Fix flakiness in TestOnlineDDLVDiff Signed-off-by: Matt Lord --- .../vreplication/vdiff_online_ddl_test.go | 22 ++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/go/test/endtoend/vreplication/vdiff_online_ddl_test.go b/go/test/endtoend/vreplication/vdiff_online_ddl_test.go index 42002a33f93..2ac404abe3e 100644 --- a/go/test/endtoend/vreplication/vdiff_online_ddl_test.go +++ b/go/test/endtoend/vreplication/vdiff_online_ddl_test.go @@ -14,6 +14,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/utils" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -90,8 +91,23 @@ func onlineDDLShow(t *testing.T, keyspace, uuid string) *vtctldata.GetSchemaMigr func execOnlineDDL(t *testing.T, strategy, keyspace, query string) string { output, err := vc.VtctldClient.ExecuteCommandWithOutput("ApplySchema", utils.GetFlagVariantForTests("--ddl-strategy"), strategy, "--sql", query, keyspace) require.NoError(t, err, output) - uuid := strings.TrimSpace(output) + output = strings.TrimSpace(output) if strategy != "direct" { + // We expect a UUID as the only output, but when using --ddl_strategy we get a warning mixed into the output: + // Flag --ddl_strategy has been deprecated, use --ddl-strategy instead + // In order to prevent this and other similar future issues, lets hunt for the UUID (which should be on its own line) + // in the returned output. + uuid := "" + lines := strings.Split(output, "\n") + for i := range lines { + line := strings.TrimSpace(lines[i]) + if schema.IsOnlineDDLUUID(line) { + uuid = line + break + } + } + require.NotEmpty(t, uuid, "UUID not returned in ApplySchema command output: %v", output) + output = uuid // return the UUID instead of the original output err = waitForCondition("online ddl to start", func() bool { response := onlineDDLShow(t, keyspace, uuid) if len(response.Migrations) > 0 && @@ -100,13 +116,13 @@ func execOnlineDDL(t *testing.T, strategy, keyspace, query string) string { return true } return false - }, defaultTimeout) + }, workflowStateTimeout) require.NoError(t, err) // The online ddl migration is set to SchemaMigration_RUNNING before it creates the // _vt.vreplication records. Hence wait for the vreplication workflow to be created as well. waitForWorkflowToBeCreated(t, vc, fmt.Sprintf("%s.%s", keyspace, uuid)) } - return uuid + return output } func waitForAdditionalRows(t *testing.T, keyspace, table string, count int) { From 5bd5172bcba11953d282017e05fb94b8325c49d2 Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Fri, 17 Oct 2025 18:57:20 +0000 Subject: [PATCH 12/13] Tweak the table handling Signed-off-by: Matt Lord --- go/vt/vttablet/tabletmanager/rpc_vreplication.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go index f88a01e7719..20bbf3eecf8 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go @@ -857,8 +857,12 @@ func (tm *TabletManager) updateSequenceValue(ctx context.Context, seq *tabletman } func (tm *TabletManager) createSequenceTable(ctx context.Context, tableName string) error { - stmt := sqlparser.BuildParsedQuery(sqlCreateSequenceTable, sqlparser.NewTableName(tableName)) - _, err := tm.ApplySchema(ctx, &tmutils.SchemaChange{ + escapedTableName, err := sqlescape.EnsureEscaped(tableName) + if err != nil { + return err + } + stmt := sqlparser.BuildParsedQuery(sqlCreateSequenceTable, escapedTableName) + _, err = tm.ApplySchema(ctx, &tmutils.SchemaChange{ SQL: stmt.Query, Force: false, AllowReplication: true, From 9c5d6116608abeae8d06bd2471afbeeda79550c8 Mon Sep 17 00:00:00 2001 From: Matt Lord Date: Sat, 18 Oct 2025 15:24:23 +0000 Subject: [PATCH 13/13] Nits Signed-off-by: Matt Lord --- go/vt/vttablet/tabletmanager/rpc_vreplication.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go index 20bbf3eecf8..c329daddcca 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go @@ -747,10 +747,11 @@ func (tm *TabletManager) GetMaxValueForSequences(ctx context.Context, req *table } func (tm *TabletManager) getMaxSequenceValue(ctx context.Context, sm *tabletmanagerdatapb.GetMaxValueForSequencesRequest_SequenceMetadata) (int64, error) { - for _, val := range []string{sm.UsingColEscaped, sm.UsingTableDbNameEscaped, sm.UsingTableNameEscaped} { - if val[0] != '`' || val[len(val)-1] != '`' { + for _, val := range []string{sm.UsingTableDbNameEscaped, sm.UsingTableNameEscaped, sm.UsingColEscaped} { + lv := len(val) + if lv < 3 || val[0] != '`' || val[lv-1] != '`' { return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, - "the column (%s), database (%s), and table (%s) names must be escaped", sm.UsingColEscaped, sm.UsingTableDbNameEscaped, sm.UsingTableNameEscaped) + "the database (%s), table (%s), and column (%s) names must be non-empty escaped values", sm.UsingTableDbNameEscaped, sm.UsingTableNameEscaped, sm.UsingColEscaped) } } query := sqlparser.BuildParsedQuery(sqlGetMaxSequenceVal,