diff --git a/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go b/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go index d8a8ffed101..786463e4c6e 100644 --- a/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go +++ b/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/cmd/vtctldclient/cli" "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/sqlescape" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" @@ -50,7 +51,7 @@ var ( } baseOptions = struct { - // This is where the lookup table and VReplicaiton workflow + // This is where the lookup table and VReplication workflow // will be created. TableKeyspace string // This will be the name of the Lookup Vindex and the name @@ -133,12 +134,20 @@ var ( if !strings.Contains(createOptions.Type, "lookup") { return fmt.Errorf("vindex type must be a lookup vindex") } + escapedTableKeyspace, err := sqlescape.EnsureEscaped(baseOptions.TableKeyspace) + if err != nil { + return fmt.Errorf("invalid table keyspace (%s): %v", baseOptions.TableKeyspace, err) + } + escapedTableName, err := sqlescape.EnsureEscaped(createOptions.TableName) + if err != nil { + return fmt.Errorf("invalid table name (%s): %v", createOptions.TableName, err) + } baseOptions.Vschema = &vschemapb.Keyspace{ Vindexes: map[string]*vschemapb.Vindex{ baseOptions.Name: { Type: createOptions.Type, Params: map[string]string{ - "table": baseOptions.TableKeyspace + "." + createOptions.TableName, + "table": escapedTableKeyspace + "." + escapedTableName, "from": strings.Join(createOptions.TableOwnerColumns, ","), "to": "keyspace_id", "ignore_nulls": fmt.Sprintf("%t", createOptions.IgnoreNulls), @@ -204,10 +213,18 @@ var ( return fmt.Errorf("%s is not a lookup vindex type", vindex.LookupVindexType) } + escapedTableKeyspace, err := sqlescape.EnsureEscaped(baseOptions.TableKeyspace) + if err != nil { + return fmt.Errorf("invalid table keyspace (%s): %v", baseOptions.TableKeyspace, err) + } + escapedTableName, err := sqlescape.EnsureEscaped(createOptions.TableName) + if err != nil { + return fmt.Errorf("invalid table name (%s): %v", vindex.TableName, err) + } vindexes[vindexName] = &vschemapb.Vindex{ Type: vindex.LookupVindexType, Params: map[string]string{ - "table": baseOptions.TableKeyspace + "." + vindex.TableName, + "table": escapedTableKeyspace + "." + escapedTableName, "from": strings.Join(vindex.TableOwnerColumns, ","), "to": "keyspace_id", "ignore_nulls": fmt.Sprintf("%t", vindex.IgnoreNulls), diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index 79eaa4d1735..55c81a48ec6 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -360,7 +360,7 @@ func getClusterOptions(opts *clusterOptions) *clusterOptions { opts = &clusterOptions{} } if opts.cells == nil { - opts.cells = []string{"zone1"} + opts.cells = []string{defaultCellName} } if opts.clusterConfig == nil { opts.clusterConfig = mainClusterConfig diff --git a/go/test/endtoend/vreplication/config_test.go b/go/test/endtoend/vreplication/config_test.go index 53a861b9fa0..798842db66c 100644 --- a/go/test/endtoend/vreplication/config_test.go +++ b/go/test/endtoend/vreplication/config_test.go @@ -431,44 +431,44 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq } } ` - materializeProductSpec = ` + materializeProductSpec = fmt.Sprintf(` { "workflow": "cproduct", - "source_keyspace": "product", - "target_keyspace": "customer", + "source_keyspace": "%s", + "target_keyspace": "%s", "table_settings": [{ "target_table": "cproduct", "source_expression": "select * from product", "create_ddl": "create table cproduct(pid bigint, description varchar(128), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key(pid)) CHARSET=utf8mb4" }] } -` +`, defaultSourceKs, defaultTargetKs) - materializeCustomerNameSpec = ` + materializeCustomerNameSpec = fmt.Sprintf(` { "workflow": "customer_name", - "source_keyspace": "customer", - "target_keyspace": "customer", + "source_keyspace": "%s", + "target_keyspace": "%s", "table_settings": [{ "target_table": "customer_name", "source_expression": "select cid, name from customer", "create_ddl": "create table if not exists customer_name (cid bigint not null, name varchar(128), primary key(cid), key(name))" }] } -` +`, defaultTargetKs, defaultTargetKs) - materializeCustomerTypeSpec = ` + materializeCustomerTypeSpec = fmt.Sprintf(` { "workflow": "enterprise_customer", - "source_keyspace": "customer", - "target_keyspace": "customer", + "source_keyspace": "%s", + "target_keyspace": "%s", "table_settings": [{ "target_table": "enterprise_customer", "source_expression": "select cid, name, typ from customer where typ = 'enterprise'", "create_ddl": "create table if not exists enterprise_customer (cid bigint not null, name varchar(128), typ varchar(64), primary key(cid), key(typ))" }] } -` +`, defaultTargetKs, defaultTargetKs) merchantOrdersVSchema = ` { @@ -512,10 +512,10 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq ` // the merchant-type keyspace allows us to test keyspace names with special characters in them (dash) - materializeMerchantOrdersSpec = ` + materializeMerchantOrdersSpec = fmt.Sprintf(` { "workflow": "morders", - "source_keyspace": "customer", + "source_keyspace": "%s", "target_keyspace": "merchant-type", "table_settings": [{ "target_table": "morders", @@ -523,12 +523,12 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq "create_ddl": "create table morders(oid int, cid int, mname varchar(128), pid int, price int, qty int, total int, total2 int as (10 * total), primary key(oid)) CHARSET=utf8" }] } -` +`, defaultTargetKs) - materializeMerchantSalesSpec = ` + materializeMerchantSalesSpec = fmt.Sprintf(` { "workflow": "msales", - "source_keyspace": "customer", + "source_keyspace": "%s", "target_keyspace": "merchant-type", "table_settings": [{ "target_table": "msales", @@ -536,7 +536,7 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq "create_ddl": "create table msales(merchant_name varchar(128), kount int, amount int, primary key(merchant_name)) CHARSET=utf8" }] } -` +`, defaultTargetKs) materializeSalesVSchema = ` { @@ -552,30 +552,30 @@ create table ukTable (id1 int not null, id2 int not null, name varchar(20), uniq } } ` - materializeSalesSpec = ` + materializeSalesSpec = fmt.Sprintf(` { "workflow": "sales", - "source_keyspace": "customer", - "target_keyspace": "product", + "source_keyspace": "%s", + "target_keyspace": "%s", "table_settings": [{ "target_Table": "sales", "source_expression": "select pid, count(*) as kount, sum(price) as amount from orders group by pid", "create_ddl": "create table sales(pid int, kount int, amount int, primary key(pid)) CHARSET=utf8" }] } -` - materializeRollupSpec = ` +`, defaultTargetKs, defaultSourceKs) + materializeRollupSpec = fmt.Sprintf(` { "workflow": "rollup", - "source_keyspace": "product", - "target_keyspace": "product", + "source_keyspace": "%s", + "target_keyspace": "%s", "table_settings": [{ "target_table": "rollup", "source_expression": "select 'total' as rollupname, count(*) as kount from product group by rollupname", "create_ddl": "create table rollup(rollupname varchar(100), kount int, primary key (rollupname)) CHARSET=utf8mb4" }] } -` +`, defaultSourceKs, defaultSourceKs) initialExternalSchema = ` create table review(rid int, pid int, review varbinary(128), primary key(rid)); create table rating(gid int, pid int, rating int, primary key(gid)); diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go index 282d7a63c47..9f5fdea0d65 100644 --- a/go/test/endtoend/vreplication/fk_test.go +++ b/go/test/endtoend/vreplication/fk_test.go @@ -58,7 +58,7 @@ func TestFKWorkflow(t *testing.T) { defer vc.TearDown() cell := vc.Cells[cellName] - vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialFKSourceVSchema, initialFKSchema, 0, 0, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialFKSourceVSchema, initialFKSchema, 0, 0, 100, defaultSourceKsOpts) verifyClusterHealth(t, vc) insertInitialFKData(t) @@ -82,7 +82,7 @@ func TestFKWorkflow(t *testing.T) { targetKeyspace := "fktarget" targetTabletId := 200 - vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialFKTargetVSchema, "", 0, 0, targetTabletId, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialFKTargetVSchema, "", 0, 0, targetTabletId, defaultSourceKsOpts) testFKCancel(t, vc) diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index fe8fb22b60c..d84f02e03f1 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -288,7 +288,11 @@ func waitForRowCountInTablet(t *testing.T, vttablet *cluster.VttabletProcess, da // Note: you specify the number of values that you want to reserve // and you get back the max value reserved. func waitForSequenceValue(t *testing.T, conn *mysql.Conn, database, sequence string, numVals int) int64 { - query := fmt.Sprintf("select next %d values from %s.%s", numVals, database, sequence) + escapedDB, err := sqlescape.EnsureEscaped(database) + require.NoError(t, err) + escapedSeq, err := sqlescape.EnsureEscaped(sequence) + require.NoError(t, err) + query := fmt.Sprintf("select next %d values from %s.%s", numVals, escapedDB, escapedSeq) timer := time.NewTimer(defaultTimeout) defer timer.Stop() for { @@ -545,7 +549,7 @@ func validateDryRunResults(t *testing.T, output string, want []string) { } if !match { fail = true - require.Fail(t, "invlaid dry run results", "want %s, got %s\n", w, gotDryRun[i]) + require.Fail(t, "invalid dry run results", "want %s, got %s\n", w, gotDryRun[i]) } } if fail { @@ -646,11 +650,11 @@ func getDebugVar(t *testing.T, port int, varPath []string) (string, error) { return string(val), nil } -func confirmWorkflowHasCopiedNoData(t *testing.T, targetKS, workflow string) { +func confirmWorkflowHasCopiedNoData(t *testing.T, defaultTargetKs, workflow string) { timer := time.NewTimer(defaultTimeout) defer timer.Stop() for { - output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "show", "--workflow", workflow, "--compact", "--include-logs=false") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", defaultTargetKs, "show", "--workflow", workflow, "--compact", "--include-logs=false") require.NoError(t, err, output) streams := gjson.Get(output, "workflows.0.shard_streams.*.streams") streams.ForEach(func(streamId, stream gjson.Result) bool { // For each stream @@ -662,7 +666,7 @@ func confirmWorkflowHasCopiedNoData(t *testing.T, targetKS, workflow string) { (pos.Exists() && pos.String() != "") { require.FailNowf(t, "Unexpected data copied in workflow", "The MoveTables workflow %q copied data in less than %s when it should have been waiting. Show output: %s", - ksWorkflow, defaultTimeout, output) + defaultKsWorkflow, defaultTimeout, output) } return true }) diff --git a/go/test/endtoend/vreplication/initial_data_test.go b/go/test/endtoend/vreplication/initial_data_test.go index ea34ef7fddf..2fcb485be4c 100644 --- a/go/test/endtoend/vreplication/initial_data_test.go +++ b/go/test/endtoend/vreplication/initial_data_test.go @@ -31,15 +31,15 @@ func insertInitialData(t *testing.T) { defer closeConn() log.Infof("Inserting initial data") lines, _ := os.ReadFile("unsharded_init_data.sql") - execMultipleQueries(t, vtgateConn, "product:0", string(lines)) - execVtgateQuery(t, vtgateConn, "product:0", "insert into customer_seq(id, next_id, cache) values(0, 100, 100);") - execVtgateQuery(t, vtgateConn, "product:0", "insert into order_seq(id, next_id, cache) values(0, 100, 100);") - execVtgateQuery(t, vtgateConn, "product:0", "insert into customer_seq2(id, next_id, cache) values(0, 100, 100);") + execMultipleQueries(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), string(lines)) + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into customer_seq(id, next_id, cache) values(0, 100, 100);") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into order_seq(id, next_id, cache) values(0, 100, 100);") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into customer_seq2(id, next_id, cache) values(0, 100, 100);") log.Infof("Done inserting initial data") - waitForRowCount(t, vtgateConn, "product:0", "product", 2) - waitForRowCount(t, vtgateConn, "product:0", "customer", 3) - waitForQueryResult(t, vtgateConn, "product:0", "select * from merchant", + waitForRowCount(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "product", 2) + waitForRowCount(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "customer", 3) + waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "select * from merchant", `[[VARCHAR("Monoprice") VARCHAR("eléctronics")] [VARCHAR("newegg") VARCHAR("elec†ronics")]]`) insertJSONValues(t) @@ -52,12 +52,12 @@ func insertJSONValues(t *testing.T) { // insert null value combinations vtgateConn, closeConn := getVTGateConn() defer closeConn() - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(1, \"{}\")") - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j1, j3) values(2, \"{}\", \"{}\")") - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j2, j3) values(3, \"{}\", \"{}\")") - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j1, j2, j3) values(4, NULL, 'null', '\"null\"')") - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(5, JSON_QUOTE('null'))") - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(6, '{}')") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into json_tbl(id, j3) values(1, \"{}\")") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into json_tbl(id, j1, j3) values(2, \"{}\", \"{}\")") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into json_tbl(id, j2, j3) values(3, \"{}\", \"{}\")") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into json_tbl(id, j1, j2, j3) values(4, NULL, 'null', '\"null\"')") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into json_tbl(id, j3) values(5, JSON_QUOTE('null'))") + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "insert into json_tbl(id, j3) values(6, '{}')") id := 8 // 6 inserted above and one after copy phase is done @@ -68,7 +68,7 @@ func insertJSONValues(t *testing.T) { j1 := rand.IntN(numJsonValues) j2 := rand.IntN(numJsonValues) query := fmt.Sprintf(q, id, jsonValues[j1], jsonValues[j2]) - execVtgateQuery(t, vtgateConn, "product:0", query) + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), query) } } @@ -82,7 +82,7 @@ func insertMoreCustomers(t *testing.T, numCustomers int) { // that we reserved. vtgateConn, closeConn := getVTGateConn() defer closeConn() - maxID := waitForSequenceValue(t, vtgateConn, "product", "customer_seq", numCustomers) + maxID := waitForSequenceValue(t, vtgateConn, defaultSourceKs, "customer_seq", numCustomers) // So we need to calculate the first value we reserved // from the max. cid := maxID - int64(numCustomers) @@ -97,28 +97,28 @@ func insertMoreCustomers(t *testing.T, numCustomers int) { } cid++ } - execVtgateQuery(t, vtgateConn, "customer", sql) + execVtgateQuery(t, vtgateConn, defaultTargetKs, sql) } func insertMoreProducts(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() sql := "insert into product(pid, description) values(3, 'cpu'),(4, 'camera'),(5, 'mouse');" - execVtgateQuery(t, vtgateConn, "product", sql) + execVtgateQuery(t, vtgateConn, defaultSourceKs, sql) } func insertMoreProductsForSourceThrottler(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() sql := "insert into product(pid, description) values(103, 'new-cpu'),(104, 'new-camera'),(105, 'new-mouse');" - execVtgateQuery(t, vtgateConn, "product", sql) + execVtgateQuery(t, vtgateConn, defaultSourceKs, sql) } func insertMoreProductsForTargetThrottler(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() sql := "insert into product(pid, description) values(203, 'new-cpu'),(204, 'new-camera'),(205, 'new-mouse');" - execVtgateQuery(t, vtgateConn, "product", sql) + execVtgateQuery(t, vtgateConn, defaultSourceKs, sql) } var blobTableQueries = []string{ @@ -137,6 +137,6 @@ func insertIntoBlobTable(t *testing.T) { vtgateConn, closeConn := getVTGateConn() defer closeConn() for _, query := range blobTableQueries { - execVtgateQuery(t, vtgateConn, "product:0", query) + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), query) } } diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index f654f9129a0..7a9c42a73bd 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -63,7 +63,7 @@ func TestMigrateUnsharded(t *testing.T) { }() defaultCell := vc.Cells[vc.CellNames[0]] - _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", + _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) require.NoError(t, err, "failed to create product keyspace") @@ -91,7 +91,7 @@ func TestMigrateUnsharded(t *testing.T) { extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort) insertInitialDataIntoExternalCluster(t, extVtgateConn) - targetPrimary := vc.getPrimaryTablet(t, "product", "0") + targetPrimary := vc.getPrimaryTablet(t, defaultSourceKs, "0") var output, expected string @@ -115,26 +115,26 @@ func TestMigrateUnsharded(t *testing.T) { require.Equal(t, "/vitess/global", gjson.Get(output, "topo_root").String()) }) - ksWorkflow := "product.e1" + ksWorkflow := fmt.Sprintf("%s.e1", defaultSourceKs) t.Run("migrate from external cluster", func(t *testing.T) { if output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", "product", "--workflow", "e1", + "--target-keyspace", defaultSourceKs, "--workflow", "e1", "create", "--source-keyspace", "rating", "--mount-name", "ext1", "--all-tables", "--cells=extcell1", "--tablet-types=primary,replica"); err != nil { t.Fatalf("Migrate command failed with %+v : %s\n", err, output) } waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1) - waitForRowCountInTablet(t, targetPrimary, "product", "rating", 2) - waitForRowCountInTablet(t, targetPrimary, "product", "review", 3) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", defaultSourceKs), 1) + waitForRowCountInTablet(t, targetPrimary, defaultSourceKs, "rating", 2) + waitForRowCountInTablet(t, targetPrimary, defaultSourceKs, "review", 3) execVtgateQuery(t, extVtgateConn, "rating", "insert into review(rid, pid, review) values(4, 1, 'review4');") execVtgateQuery(t, extVtgateConn, "rating", "insert into rating(gid, pid, rating) values(3, 1, 3);") - waitForRowCountInTablet(t, targetPrimary, "product", "rating", 3) - waitForRowCountInTablet(t, targetPrimary, "product", "review", 4) + waitForRowCountInTablet(t, targetPrimary, defaultSourceKs, "rating", 3) + waitForRowCountInTablet(t, targetPrimary, defaultSourceKs, "review", 4) doVDiff(t, ksWorkflow, "extcell1") output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", "product", "--workflow", "e1", "show") + "--target-keyspace", defaultSourceKs, "--workflow", "e1", "show") require.NoError(t, err, "Migrate command failed with %s", output) wf := gjson.Get(output, "workflows").Array()[0] @@ -142,32 +142,32 @@ func TestMigrateUnsharded(t *testing.T) { require.Equal(t, "Migrate", wf.Get("workflow_type").String()) output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", "product", "--workflow", "e1", "status", "--format=json") + "--target-keyspace", defaultSourceKs, "--workflow", "e1", "status", "--format=json") require.NoError(t, err, "Migrate command failed with %s", output) - require.Equal(t, "Running", gjson.Get(output, "shard_streams.product/0.streams.0.status").String()) + require.Equal(t, "Running", gjson.Get(output, fmt.Sprintf("shard_streams.%s/0.streams.0.status", defaultSourceKs)).String()) output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", "product", "--workflow", "e1", "complete") + "--target-keyspace", defaultSourceKs, "--workflow", "e1", "complete") require.NoError(t, err, "Migrate command failed with %s", output) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 0) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", defaultSourceKs), 0) }) t.Run("cancel migrate workflow", func(t *testing.T) { - execVtgateQuery(t, vtgateConn, "product", "drop table review,rating") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "drop table review,rating") output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", "product", "--workflow", "e1", "Create", "--source-keyspace", "rating", + "--target-keyspace", defaultSourceKs, "--workflow", "e1", "Create", "--source-keyspace", "rating", "--mount-name", "ext1", "--all-tables", "--auto-start=false", "--cells=extcell1") require.NoError(t, err, "Migrate command failed with %s", output) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1, binlogdatapb.VReplicationWorkflowState_Stopped.String()) - waitForRowCountInTablet(t, targetPrimary, "product", "rating", 0) - waitForRowCountInTablet(t, targetPrimary, "product", "review", 0) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", defaultSourceKs), 1, binlogdatapb.VReplicationWorkflowState_Stopped.String()) + waitForRowCountInTablet(t, targetPrimary, defaultSourceKs, "rating", 0) + waitForRowCountInTablet(t, targetPrimary, defaultSourceKs, "review", 0) output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", - "--target-keyspace", "product", "--workflow", "e1", "cancel") + "--target-keyspace", defaultSourceKs, "--workflow", "e1", "cancel") require.NoError(t, err, "Migrate command failed with %s", output) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 0) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", fmt.Sprintf("%s:0", defaultSourceKs), 0) var found bool found, err = checkIfTableExists(t, vc, "zone1-100", "review") require.NoError(t, err) @@ -213,7 +213,7 @@ func TestMigrateSharded(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - setupCustomerKeyspace(t) + setupTargetKeyspace(t) createMoveTablesWorkflow(t, "customer,Lead,datze,customer2") tstWorkflowSwitchReadsAndWrites(t) tstWorkflowComplete(t) @@ -246,7 +246,7 @@ func TestMigrateSharded(t *testing.T) { ksWorkflow := "rating.e1" if output, err = extVc.VtctldClient.ExecuteCommandWithOutput("Migrate", "--target-keyspace", "rating", "--workflow", "e1", - "create", "--source-keyspace", "customer", "--mount-name", "external", "--all-tables", "--cells=zone1", + "create", "--source-keyspace", defaultTargetKs, "--mount-name", "external", "--all-tables", "--cells=zone1", "--tablet-types=primary"); err != nil { require.FailNow(t, "Migrate command failed with %+v : %s\n", err, output) } diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go index da8b9d1f96b..cc00073b493 100644 --- a/go/test/endtoend/vreplication/movetables_buffering_test.go +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -24,12 +24,12 @@ func TestMoveTablesBuffering(t *testing.T) { defer vc.TearDown() currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables - setupMinimalCustomerKeyspace(t) + setupMinimalTargetKeyspace(t) tables := "loadtest" - err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, + err := tstWorkflowExec(t, defaultCellName, defaultWorkflowName, defaultSourceKs, defaultTargetKs, tables, workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) lg := newLoadGenerator(t, vc) go func() { @@ -37,10 +37,10 @@ func TestMoveTablesBuffering(t *testing.T) { }() lg.waitForCount(1000) - catchup(t, targetTab1, workflowName, "MoveTables") - catchup(t, targetTab2, workflowName, "MoveTables") - vdiff(t, targetKs, workflowName, "", nil) - waitForLowLag(t, "customer", workflowName) + catchup(t, targetTab1, defaultWorkflowName, "MoveTables") + catchup(t, targetTab2, defaultWorkflowName, "MoveTables") + vdiff(t, defaultTargetKs, defaultWorkflowName, "", nil) + waitForLowLag(t, defaultTargetKs, defaultWorkflowName) for i := 0; i < 10; i++ { tstWorkflowSwitchReadsAndWrites(t) time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) diff --git a/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go b/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go index e0e7dbfc148..8d6ea97d2bb 100644 --- a/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go +++ b/go/test/endtoend/vreplication/movetables_mirrortraffic_test.go @@ -36,20 +36,18 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { vc = setupMinimalCluster(t) defer vc.TearDown() - sourceKeyspace := "product" - targetKeyspace := "customer" workflowName := "wf1" tables := []string{"customer", "loadtest", "customer2"} - _ = setupMinimalCustomerKeyspace(t) + _ = setupMinimalTargetKeyspace(t) mtwf := &moveTablesWorkflow{ workflowInfo: &workflowInfo{ vc: vc, workflowName: workflowName, - targetKeyspace: targetKeyspace, + targetKeyspace: defaultTargetKs, }, - sourceKeyspace: sourceKeyspace, + sourceKeyspace: defaultSourceKs, tables: "customer,loadtest,customer2", mirrorFlags: []string{"--percent", "25"}, } @@ -59,12 +57,12 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { mt.Create() confirmNoMirrorRules(t) - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) // Mirror rules can be created after a MoveTables workflow is created. mt.MirrorTraffic() confirmMirrorRulesExist(t) - expectMirrorRules(t, sourceKeyspace, targetKeyspace, tables, []topodatapb.TabletType{ + expectMirrorRules(t, defaultSourceKs, defaultTargetKs, tables, []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY, @@ -74,7 +72,7 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { mtwf.mirrorFlags[1] = "50" mt.MirrorTraffic() confirmMirrorRulesExist(t) - expectMirrorRules(t, sourceKeyspace, targetKeyspace, tables, []topodatapb.TabletType{ + expectMirrorRules(t, defaultSourceKs, defaultTargetKs, tables, []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY, @@ -85,7 +83,7 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { mtwf.mirrorFlags[1] = "75" mt.MirrorTraffic() confirmMirrorRulesExist(t) - expectMirrorRules(t, sourceKeyspace, targetKeyspace, tables, []topodatapb.TabletType{ + expectMirrorRules(t, defaultSourceKs, defaultTargetKs, tables, []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY, @@ -105,7 +103,7 @@ func testMoveTablesMirrorTraffic(t *testing.T, flavor workflowFlavor) { mtwf.mirrorFlags = append(mtwf.mirrorFlags, "--tablet-types", "primary") mt.MirrorTraffic() confirmMirrorRulesExist(t) - expectMirrorRules(t, sourceKeyspace, targetKeyspace, tables, []topodatapb.TabletType{ + expectMirrorRules(t, defaultSourceKs, defaultTargetKs, tables, []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, }, 100) diff --git a/go/test/endtoend/vreplication/multi_tenant_test.go b/go/test/endtoend/vreplication/multi_tenant_test.go index c941c13d664..e2c3d2952c7 100644 --- a/go/test/endtoend/vreplication/multi_tenant_test.go +++ b/go/test/endtoend/vreplication/multi_tenant_test.go @@ -236,7 +236,7 @@ func TestMultiTenantSimple(t *testing.T) { // Create again and run it to completion. createFunc() - vdiff(t, targetKeyspace, workflowName, defaultCellName, nil) + vdiff(t, targetKeyspace, defaultWorkflowName, defaultCellName, nil) mt.SwitchReads() confirmOnlyReadsSwitched(t) @@ -396,7 +396,7 @@ func TestMultiTenantSharded(t *testing.T) { // Note: we cannot insert into the target keyspace since that is never routed to the source keyspace. lastIndex = insertRows(lastIndex, sourceKeyspace) waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, mt.workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) - vdiff(t, targetKeyspace, workflowName, defaultCellName, nil) + vdiff(t, targetKeyspace, defaultWorkflowName, defaultCellName, nil) mt.SwitchReadsAndWrites() // Note: here we have already switched, and we can insert into the target keyspace, and it should get reverse // replicated to the source keyspace. The source keyspace is routed to the target keyspace at this point. diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index 88047bb0f59..27df9a7ce8f 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -35,8 +35,8 @@ import ( // Before canceling, we first switch traffic to the target keyspace and then reverse it back to the source keyspace. // This tests that artifacts are being properly cleaned up when a MoveTables ia canceled. func testCancel(t *testing.T) { + sourceKeyspace := defaultTargetKs targetKeyspace := "customer2" - sourceKeyspace := "customer" workflowName := "partial80DashForCancel" ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) // We use a different table in this MoveTables than the subsequent one, so that setting up of the artifacts @@ -109,10 +109,10 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { }() vc = setupMinimalCluster(t) defer vc.TearDown() - sourceKeyspace := "product" - targetKeyspace := "customer" + sourceKeyspace := defaultSourceKs + targetKeyspace := defaultTargetKs workflowName := "wf1" - targetTabs := setupMinimalCustomerKeyspace(t) + targetTabs := setupMinimalTargetKeyspace(t) targetTab80Dash := targetTabs["80-"] targetTabDash80 := targetTabs["-80"] mt := newMoveTables(vc, &moveTablesWorkflow{ @@ -135,11 +135,14 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { emptyGlobalRoutingRules := "{}\n" + sourceKeyspace = defaultTargetKs + targetKeyspace = "customer2" + // These should be listed in shard order emptyShardRoutingRules := `{"rules":[]}` - preCutoverShardRoutingRules := `{"rules":[{"from_keyspace":"customer2","to_keyspace":"customer","shard":"-80"},{"from_keyspace":"customer2","to_keyspace":"customer","shard":"80-"}]}` - halfCutoverShardRoutingRules := `{"rules":[{"from_keyspace":"customer2","to_keyspace":"customer","shard":"-80"},{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}` - postCutoverShardRoutingRules := `{"rules":[{"from_keyspace":"customer","to_keyspace":"customer2","shard":"-80"},{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}` + preCutoverShardRoutingRules := fmt.Sprintf(`{"rules":[{"from_keyspace":"%s","to_keyspace":"%s","shard":"-80"},{"from_keyspace":"%s","to_keyspace":"%s","shard":"80-"}]}`, targetKeyspace, sourceKeyspace, targetKeyspace, sourceKeyspace) + halfCutoverShardRoutingRules := fmt.Sprintf(`{"rules":[{"from_keyspace":"%s","to_keyspace":"%s","shard":"-80"},{"from_keyspace":"%s","to_keyspace":"%s","shard":"80-"}]}`, targetKeyspace, sourceKeyspace, sourceKeyspace, targetKeyspace) + postCutoverShardRoutingRules := fmt.Sprintf(`{"rules":[{"from_keyspace":"%s","to_keyspace":"%s","shard":"-80"},{"from_keyspace":"%s","to_keyspace":"%s","shard":"80-"}]}`, sourceKeyspace, targetKeyspace, sourceKeyspace, targetKeyspace) // Remove any manually applied shard routing rules as these // should be set by SwitchTraffic. @@ -165,8 +168,6 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { } var err error workflowName = "partial80Dash" - sourceKeyspace = "customer" - targetKeyspace = "customer2" shard := "80-" tables := "customer,loadtest" mt80Dash := newMoveTables(vc, &moveTablesWorkflow{ @@ -195,9 +196,9 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { vtgateConn, closeConn := getVTGateConn() defer closeConn() - waitForRowCount(t, vtgateConn, "customer", "customer", 3) // customer: all shards - waitForRowCount(t, vtgateConn, "customer2", "customer", 3) // customer2: all shards - waitForRowCount(t, vtgateConn, "customer2:80-", "customer", 2) // customer2: 80- + waitForRowCount(t, vtgateConn, sourceKeyspace, "customer", 3) // customer: all shards + waitForRowCount(t, vtgateConn, targetKeyspace, "customer", 3) // customer2: all shards + waitForRowCount(t, vtgateConn, fmt.Sprintf("%s:80-", sourceKeyspace), "customer", 2) // customer2: 80- confirmGlobalRoutingToSource := func() { output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules", "--compact") @@ -243,14 +244,14 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { require.NoError(t, err) _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) - require.Contains(t, err.Error(), "target: customer.80-.primary", "Query was routed to the target before any SwitchTraffic") + require.Contains(t, err.Error(), fmt.Sprintf("target: %s.80-.primary", sourceKeyspace), "Query was routed to the target before any SwitchTraffic") log.Infof("Testing reverse route (target->source) for shard NOT being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false) require.NoError(t, err) _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) - require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic") + require.Contains(t, err.Error(), fmt.Sprintf("target: %s.-80.primary", sourceKeyspace), "Query was routed to the target before any SwitchTraffic") // Switch all traffic for the shard mt80Dash.SwitchReadsAndWrites() @@ -275,7 +276,7 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) - require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before partial SwitchTraffic") + require.Contains(t, err.Error(), fmt.Sprintf("target: %s.-80.primary", sourceKeyspace), "Query was routed to the target before partial SwitchTraffic") // Shard targeting _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) @@ -283,7 +284,7 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false) + _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use `%s:80-`", sourceKeyspace), 0, false) require.NoError(t, err) _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) @@ -297,21 +298,21 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) - require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch("use `customer@replica`", 0, false) + require.Contains(t, err.Error(), fmt.Sprintf("target: %s.-80.replica", sourceKeyspace), "Query was routed to the target before partial SwitchTraffic") + _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use `%s@replica`", sourceKeyspace), 0, false) require.NoError(t, err) _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) - require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic") + require.Contains(t, err.Error(), fmt.Sprintf("target: %s.-80.replica", sourceKeyspace), "Query was routed to the target before partial SwitchTraffic") workflowExec := tstWorkflowExec // We cannot Complete a partial move tables at the moment because // it will find that all traffic has (obviously) not been switched. - err = workflowExec(t, "", workflowName, "", targetKs, "", workflowActionComplete, "", "", "", workflowExecOptsPartial80Dash) + err = workflowExec(t, "", workflowName, "", targetKeyspace, "", workflowActionComplete, "", "", "", workflowExecOptsPartial80Dash) require.Error(t, err) // Confirm global routing rules: -80 should still be be routed to customer @@ -369,8 +370,8 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { require.True(t, isEmptyWorkflowShowOutput(output)) // Be sure we've deleted the original workflow. - _, _ = vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "delete", "--workflow", wf, "--shards", opts.shardSubset) - output, err = vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "show", "--workflow", wf, "--shards", opts.shardSubset) + _, _ = vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKeyspace, "delete", "--workflow", wf, "--shards", opts.shardSubset) + output, err = vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKeyspace, "show", "--workflow", wf, "--shards", opts.shardSubset) require.NoError(t, err, output) require.True(t, isEmptyWorkflowShowOutput(output)) } diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index da25df5b5ed..0010bd4fd35 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -45,15 +45,6 @@ import ( vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) -const ( - workflowName = "wf1" - sourceKs = "product" - targetKs = "customer" - ksWorkflow = targetKs + "." + workflowName - reverseKsWorkflow = sourceKs + "." + workflowName + "_reverse" - defaultCellName = "zone1" -) - const ( workflowActionCreate = "Create" workflowActionMirrorTraffic = "Mirror" @@ -83,14 +74,14 @@ var defaultWorkflowExecOptions = &workflowExecOptions{ } func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) error { - err := tstWorkflowExec(t, defaultCellName, workflowName, targetKs, targetKs, + err := tstWorkflowExec(t, defaultCellName, defaultWorkflowName, defaultTargetKs, defaultTargetKs, "", workflowActionCreate, "", sourceShards, targetShards, defaultWorkflowExecOptions) require.NoError(t, err) - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) - confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, "") - catchup(t, targetTab1, workflowName, "Reshard") - catchup(t, targetTab2, workflowName, "Reshard") - doVDiff(t, ksWorkflow, "") + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, defaultTargetKs, "") + catchup(t, targetTab1, defaultWorkflowName, "Reshard") + catchup(t, targetTab2, defaultWorkflowName, "Reshard") + doVDiff(t, defaultKsWorkflow, "") return nil } @@ -98,24 +89,24 @@ func createMoveTablesWorkflow(t *testing.T, tables string) { if tables == "" { tables = "customer" } - err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, + err := tstWorkflowExec(t, defaultCellName, defaultWorkflowName, defaultSourceKs, defaultTargetKs, tables, workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) - confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, tables) - catchup(t, targetTab1, workflowName, "MoveTables") - catchup(t, targetTab2, workflowName, "MoveTables") - doVDiff(t, ksWorkflow, "") + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, defaultTargetKs, tables) + catchup(t, targetTab1, defaultWorkflowName, "MoveTables") + catchup(t, targetTab2, defaultWorkflowName, "MoveTables") + doVDiff(t, defaultKsWorkflow, "") } func tstWorkflowAction(t *testing.T, action, tabletTypes, cells string) error { - return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, "customer", action, tabletTypes, "", "", defaultWorkflowExecOptions) + return tstWorkflowExec(t, cells, defaultWorkflowName, defaultSourceKs, defaultTargetKs, "customer", action, tabletTypes, "", "", defaultWorkflowExecOptions) } // tstWorkflowExec executes a MoveTables or Reshard workflow command using // vtctldclient. // tstWorkflowExecVtctl instead. -func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, +func tstWorkflowExec(t *testing.T, cells, workflow, defaultSourceKs, defaultTargetKs, tables, action, tabletTypes, sourceShards, targetShards string, options *workflowExecOptions) error { var args []string @@ -125,12 +116,12 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, args = append(args, "Reshard") } - args = append(args, "--workflow", workflow, "--target-keyspace", targetKs, action) + args = append(args, "--workflow", workflow, "--target-keyspace", defaultTargetKs, action) switch action { case workflowActionCreate: if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_MoveTables { - args = append(args, "--source-keyspace", sourceKs) + args = append(args, "--source-keyspace", defaultSourceKs) if tables != "" { args = append(args, "--tables", tables) } else { @@ -229,7 +220,7 @@ func testWorkflowUpdate(t *testing.T) { _, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", "noexist", "update", "--workflow", "noexist", "--tablet-types", tabletTypes) require.Error(t, err) // Change the tablet-types to rdonly. - resp, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", targetKs, "update", "--workflow", workflowName, "--tablet-types", "rdonly") + resp, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", defaultTargetKs, "update", "--workflow", defaultWorkflowName, "--tablet-types", "rdonly") require.NoError(t, err, err) // Confirm that we changed the workflow. var ures vtctldatapb.WorkflowUpdateResponse @@ -239,7 +230,7 @@ func testWorkflowUpdate(t *testing.T) { require.Greater(t, len(ures.Details), 0) require.True(t, ures.Details[0].Changed) // Change tablet-types back to primary,replica,rdonly. - resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", targetKs, "update", "--workflow", workflowName, "--tablet-types", tabletTypes) + resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", defaultTargetKs, "update", "--workflow", defaultWorkflowName, "--tablet-types", tabletTypes) require.NoError(t, err, err) // Confirm that we changed the workflow. err = protojson.Unmarshal([]byte(resp), &ures) @@ -247,7 +238,7 @@ func testWorkflowUpdate(t *testing.T) { require.Greater(t, len(ures.Details), 0) require.True(t, ures.Details[0].Changed) // Execute a no-op as tablet-types is already primary,replica,rdonly. - resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", targetKs, "update", "--workflow", workflowName, "--tablet-types", tabletTypes) + resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", defaultTargetKs, "update", "--workflow", defaultWorkflowName, "--tablet-types", tabletTypes) require.NoError(t, err, err) // Confirm that we didn't change the workflow. err = protojson.Unmarshal([]byte(resp), &ures) @@ -306,8 +297,8 @@ func validateWritesRouteToSource(t *testing.T) { defer closeConn() insertQuery := "insert into customer(name, cid) values('tempCustomer2', 200)" matchInsertQuery := "insert into customer(`name`, cid) values" - assertQueryExecutesOnTablet(t, vtgateConn, sourceTab, "customer", insertQuery, matchInsertQuery) - execVtgateQuery(t, vtgateConn, "customer", "delete from customer where cid = 200") + assertQueryExecutesOnTablet(t, vtgateConn, sourceTab, defaultTargetKs, insertQuery, matchInsertQuery) + execVtgateQuery(t, vtgateConn, defaultTargetKs, "delete from customer where cid = 200") } func validateWritesRouteToTarget(t *testing.T) { @@ -315,20 +306,20 @@ func validateWritesRouteToTarget(t *testing.T) { defer closeConn() insertQuery := "insert into customer(name, cid) values('tempCustomer3', 101)" matchInsertQuery := "insert into customer(`name`, cid) values" - assertQueryExecutesOnTablet(t, vtgateConn, targetTab2, "customer", insertQuery, matchInsertQuery) + assertQueryExecutesOnTablet(t, vtgateConn, targetTab2, defaultTargetKs, insertQuery, matchInsertQuery) insertQuery = "insert into customer(name, cid) values('tempCustomer3', 102)" - assertQueryExecutesOnTablet(t, vtgateConn, targetTab1, "customer", insertQuery, matchInsertQuery) - execVtgateQuery(t, vtgateConn, "customer", "delete from customer where cid in (101, 102)") + assertQueryExecutesOnTablet(t, vtgateConn, targetTab1, defaultTargetKs, insertQuery, matchInsertQuery) + execVtgateQuery(t, vtgateConn, defaultTargetKs, "delete from customer where cid in (101, 102)") } func revert(t *testing.T, workflowType string) { - switchWrites(t, workflowType, ksWorkflow, true) + switchWrites(t, workflowType, defaultKsWorkflow, true) validateWritesRouteToSource(t) - switchReadsNew(t, workflowType, getCellNames(nil), ksWorkflow, true) + switchReadsNew(t, workflowType, getCellNames(nil), defaultKsWorkflow, true) validateReadsRouteToSource(t, "replica") // cancel the workflow to cleanup - _, err := vc.VtctldClient.ExecuteCommandWithOutput(workflowType, "--target-keyspace", targetKs, "--workflow", workflowName, "cancel") + _, err := vc.VtctldClient.ExecuteCommandWithOutput(workflowType, "--target-keyspace", defaultTargetKs, "--workflow", defaultWorkflowName, "cancel") require.NoError(t, err, fmt.Sprintf("%s Cancel error: %v", workflowType, err)) } @@ -369,7 +360,7 @@ func TestBasicV2Workflows(t *testing.T) { // Internal tables like the lifecycle ones for OnlineDDL should be ignored ddlSQL := "ALTER TABLE customer MODIFY cid bigint UNSIGNED" - tstApplySchemaOnlineDDL(t, ddlSQL, sourceKs) + tstApplySchemaOnlineDDL(t, ddlSQL, defaultSourceKs) testMoveTablesV2Workflow(t) testReshardV2Workflow(t) @@ -395,72 +386,72 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { // use MoveTables to move customer2 from product to customer using currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables - err := tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, + err := tstWorkflowExec(t, defaultCellName, "wf2", defaultSourceKs, defaultTargetKs, "customer2", workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - waitForWorkflowState(t, vc, "customer.wf2", binlogdatapb.VReplicationWorkflowState_Running.String()) - waitForLowLag(t, "customer", "wf2") + waitForWorkflowState(t, vc, fmt.Sprintf("%s.wf2", defaultTargetKs), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForLowLag(t, defaultTargetKs, "wf2") - err = tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, + err = tstWorkflowExec(t, defaultCellName, "wf2", defaultSourceKs, defaultTargetKs, "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - err = tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, + err = tstWorkflowExec(t, defaultCellName, "wf2", defaultSourceKs, defaultTargetKs, "", workflowActionComplete, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) vtgateConn, closeConn := getVTGateConn() defer closeConn() // sanity check - output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "product") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultSourceKs) require.NoError(t, err) assert.NotContains(t, output, "customer2\"", "customer2 still found in keyspace product") - waitForRowCount(t, vtgateConn, "customer", "customer2", 3) + waitForRowCount(t, vtgateConn, defaultTargetKs, "customer2", 3) // check that customer2 has the sequence tag - output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "customer") + output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultTargetKs) require.NoError(t, err) assert.Contains(t, output, "\"sequence\": \"customer_seq2\"", "customer2 sequence missing in keyspace customer") // ensure sequence is available to vtgate num := 5 for i := 0; i < num; i++ { - execVtgateQuery(t, vtgateConn, "customer", "insert into customer2(name) values('a')") + execVtgateQuery(t, vtgateConn, defaultTargetKs, "insert into customer2(name) values('a')") } - waitForRowCount(t, vtgateConn, "customer", "customer2", 3+num) + waitForRowCount(t, vtgateConn, defaultTargetKs, "customer2", 3+num) want := fmt.Sprintf("[[INT32(%d)]]", 100+num-1) - waitForQueryResult(t, vtgateConn, "customer", "select max(cid) from customer2", want) + waitForQueryResult(t, vtgateConn, defaultTargetKs, "select max(cid) from customer2", want) // use MoveTables to move customer2 back to product. Note that now the table has an associated sequence - err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, + err = tstWorkflowExec(t, defaultCellName, "wf3", defaultTargetKs, defaultSourceKs, "customer2", workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - waitForWorkflowState(t, vc, "product.wf3", binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.wf3", defaultSourceKs), binlogdatapb.VReplicationWorkflowState_Running.String()) - waitForLowLag(t, "product", "wf3") - err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, + waitForLowLag(t, defaultSourceKs, "wf3") + err = tstWorkflowExec(t, defaultCellName, "wf3", defaultTargetKs, defaultSourceKs, "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, + err = tstWorkflowExec(t, defaultCellName, "wf3", defaultTargetKs, defaultSourceKs, "", workflowActionComplete, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) // sanity check - output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "product") + output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultSourceKs) require.NoError(t, err) assert.Contains(t, output, "customer2\"", "customer2 not found in keyspace product ") // check that customer2 still has the sequence tag - output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "product") + output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultSourceKs) require.NoError(t, err) assert.Contains(t, output, "\"sequence\": \"customer_seq2\"", "customer2 still found in keyspace product") // ensure sequence is available to vtgate for i := 0; i < num; i++ { - execVtgateQuery(t, vtgateConn, "product", "insert into customer2(name) values('a')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "insert into customer2(name) values('a')") } - waitForRowCount(t, vtgateConn, "product", "customer2", 3+num+num) - res := execVtgateQuery(t, vtgateConn, "product", "select max(cid) from customer2") + waitForRowCount(t, vtgateConn, defaultSourceKs, "customer2", 3+num+num) + res := execVtgateQuery(t, vtgateConn, defaultSourceKs, "select max(cid) from customer2") cid, err := res.Rows[0][0].ToInt() require.NoError(t, err) require.GreaterOrEqual(t, cid, 100+num+num-1) @@ -482,12 +473,12 @@ func testReplicatingWithPKEnumCols(t *testing.T) { // typ is an enum, with soho having a stored and binlogged value of 2 deleteQuery := "delete from customer where cid = 2 and typ = 'soho'" insertQuery := "insert into customer(cid, name, typ, sport, meta) values(2, 'Paül','soho','cricket',convert(x'7b7d' using utf8mb4))" - execVtgateQuery(t, vtgateConn, sourceKs, deleteQuery) - waitForNoWorkflowLag(t, vc, targetKs, workflowName) - doVDiff(t, ksWorkflow, "") - execVtgateQuery(t, vtgateConn, sourceKs, insertQuery) - waitForNoWorkflowLag(t, vc, targetKs, workflowName) - doVDiff(t, ksWorkflow, "") + execVtgateQuery(t, vtgateConn, defaultSourceKs, deleteQuery) + waitForNoWorkflowLag(t, vc, defaultTargetKs, defaultWorkflowName) + doVDiff(t, defaultKsWorkflow, "") + execVtgateQuery(t, vtgateConn, defaultSourceKs, insertQuery) + waitForNoWorkflowLag(t, vc, defaultTargetKs, defaultWorkflowName) + doVDiff(t, defaultKsWorkflow, "") } func testReshardV2Workflow(t *testing.T) { @@ -514,7 +505,7 @@ func testReshardV2Workflow(t *testing.T) { return default: // Use a random customer type for each record. - _ = execVtgateQuery(t, dataGenConn, "customer", fmt.Sprintf("insert into customer (cid, name, typ) values (%d, 'tempCustomer%d', %s)", + _ = execVtgateQuery(t, dataGenConn, defaultTargetKs, fmt.Sprintf("insert into customer (cid, name, typ) values (%d, 'tempCustomer%d', %s)", id, id, customerTypes[rand.IntN(len(customerTypes))])) } time.Sleep(1 * time.Millisecond) @@ -524,18 +515,18 @@ func testReshardV2Workflow(t *testing.T) { // create internal tables on the original customer shards that should be // ignored and not show up on the new shards - execMultipleQueries(t, vtgateConn, targetKs+"/-80", internalSchema) - execMultipleQueries(t, vtgateConn, targetKs+"/80-", internalSchema) + execMultipleQueries(t, vtgateConn, defaultTargetKs+"/-80", internalSchema) + execMultipleQueries(t, vtgateConn, defaultTargetKs+"/80-", internalSchema) - createAdditionalCustomerShards(t, "-40,40-80,80-c0,c0-") + createAdditionalTargetShards(t, "-40,40-80,80-c0,c0-") createReshardWorkflow(t, "-80,80-", "-40,40-80,80-c0,c0-") validateReadsRouteToSource(t, "replica") validateWritesRouteToSource(t) // Verify that we've properly ignored any internal operational tables // and that they were not copied to the new target shards - verifyNoInternalTables(t, vtgateConn, targetKs+"/-40") - verifyNoInternalTables(t, vtgateConn, targetKs+"/c0-") + verifyNoInternalTables(t, vtgateConn, defaultTargetKs+"/-40") + verifyNoInternalTables(t, vtgateConn, defaultTargetKs+"/c0-") // Confirm that updating Reshard workflows works. testWorkflowUpdate(t) @@ -545,23 +536,23 @@ func testReshardV2Workflow(t *testing.T) { // Confirm that we lost no customer related writes during the Reshard. dataGenCancel() dataGenWg.Wait() - cres := execVtgateQuery(t, dataGenConn, "customer", "select count(*) from customer") + cres := execVtgateQuery(t, dataGenConn, defaultTargetKs, "select count(*) from customer") require.Len(t, cres.Rows, 1) - waitForNoWorkflowLag(t, vc, "customer", "customer_name") - cnres := execVtgateQuery(t, dataGenConn, "customer", "select count(*) from customer_name") + waitForNoWorkflowLag(t, vc, defaultTargetKs, "customer_name") + cnres := execVtgateQuery(t, dataGenConn, defaultTargetKs, "select count(*) from customer_name") require.Len(t, cnres.Rows, 1) require.EqualValues(t, cres.Rows, cnres.Rows) if debugMode { // We expect the row count to differ in enterprise_customer because it is // using a `where typ='enterprise'` filter. So the count is only for debug // info. - ecres := execVtgateQuery(t, dataGenConn, "customer", "select count(*) from enterprise_customer") + ecres := execVtgateQuery(t, dataGenConn, defaultTargetKs, "select count(*) from enterprise_customer") t.Logf("Done inserting customer data. Record counts in customer: %s, customer_name: %s, enterprise_customer: %s", cres.Rows[0][0].ToString(), cnres.Rows[0][0].ToString(), ecres.Rows[0][0].ToString()) } // We also do a vdiff on the materialize workflows for good measure. - doVtctldclientVDiff(t, "customer", "customer_name", "", nil) - doVtctldclientVDiff(t, "customer", "enterprise_customer", "", nil) + doVtctldclientVDiff(t, defaultTargetKs, "customer_name", "", nil) + doVtctldclientVDiff(t, defaultTargetKs, "enterprise_customer", "", nil) } func testMoveTablesV2Workflow(t *testing.T) { @@ -573,13 +564,13 @@ func testMoveTablesV2Workflow(t *testing.T) { if !debugMode { return } - output, err := vc.VtctldClient.ExecuteCommandWithOutput("materialize", "--target-keyspace=customer", "show", "--workflow=customer_name", "--compact", "--include-logs=false") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("materialize", "--target-keyspace", defaultTargetKs, "show", "--workflow=customer_name", "--compact", "--include-logs=false") require.NoError(t, err) t.Logf("Materialize show output: %s", output) } // Test basic forward and reverse flows. - setupCustomerKeyspace(t) + setupTargetKeyspace(t) listOutputContainsWorkflow := func(output string, workflow string) bool { workflows := []string{} @@ -598,7 +589,7 @@ func testMoveTablesV2Workflow(t *testing.T) { require.NoError(t, err) return len(workflows) == 0 } - listAllArgs := []string{"workflow", "--keyspace", "customer", "list"} + listAllArgs := []string{"workflow", "--keyspace", defaultTargetKs, "list"} output, err := vc.VtctldClient.ExecuteCommandWithOutput(listAllArgs...) require.NoError(t, err) @@ -607,13 +598,13 @@ func testMoveTablesV2Workflow(t *testing.T) { // The purge table should get skipped/ignored // If it's not then we'll get an error as the table doesn't exist in the vschema createMoveTablesWorkflow(t, "customer,loadtest,vdiff_order,reftable,_vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431") - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) validateReadsRouteToSource(t, "replica,rdonly") validateWritesRouteToSource(t) // Verify that we've properly ignored any internal operational tables // and that they were not copied to the new target keyspace - verifyNoInternalTables(t, vtgateConn, targetKs) + verifyNoInternalTables(t, vtgateConn, defaultTargetKs) testReplicatingWithPKEnumCols(t) @@ -683,9 +674,9 @@ func testPartialSwitches(t *testing.T) { tstWorkflowSwitchWrites(t) checkStates(t, nextState, nextState) // idempotency - keyspace := "product" + keyspace := defaultSourceKs if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_Reshard { - keyspace = "customer" + keyspace = defaultTargetKs } waitForLowLag(t, keyspace, "wf1_reverse") tstWorkflowReverseReads(t, "replica,rdonly", "zone1") @@ -712,13 +703,13 @@ func testRestOfWorkflow(t *testing.T) { Threshold: throttlerConfig.Threshold * 5, CustomQuery: throttlerConfig.Query, } - res, err := throttler.UpdateThrottlerTopoConfigRaw(vc.VtctldClient, "customer", req, nil, nil) + res, err := throttler.UpdateThrottlerTopoConfigRaw(vc.VtctldClient, defaultTargetKs, req, nil, nil) require.NoError(t, err, res) testPartialSwitches(t) // test basic forward and reverse flows - waitForLowLag(t, "customer", "wf1") + waitForLowLag(t, defaultTargetKs, "wf1") tstWorkflowSwitchReads(t, "", "") checkStates(t, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateReadsSwitched) validateReadsRouteToTarget(t, "replica,rdonly") @@ -730,9 +721,9 @@ func testRestOfWorkflow(t *testing.T) { validateWritesRouteToTarget(t) // this function is called for both MoveTables and Reshard, so the reverse workflows exist in different keyspaces - keyspace := "product" + keyspace := defaultSourceKs if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_Reshard { - keyspace = "customer" + keyspace = defaultTargetKs } waitForLowLag(t, keyspace, "wf1_reverse") tstWorkflowReverseReads(t, "", "") @@ -745,7 +736,7 @@ func testRestOfWorkflow(t *testing.T) { validateReadsRouteToSource(t, "replica,rdonly") validateWritesRouteToSource(t) - waitForLowLag(t, "customer", "wf1") + waitForLowLag(t, defaultTargetKs, "wf1") tstWorkflowSwitchWrites(t) checkStates(t, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateWritesSwitched) validateReadsRouteToSource(t, "replica,rdonly") @@ -757,7 +748,7 @@ func testRestOfWorkflow(t *testing.T) { validateReadsRouteToSource(t, "replica,rdonly") validateWritesRouteToSource(t) - waitForLowLag(t, "customer", "wf1") + waitForLowLag(t, defaultTargetKs, "wf1") tstWorkflowSwitchReads(t, "", "") checkStates(t, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateReadsSwitched) validateReadsRouteToTarget(t, "replica,rdonly") @@ -784,9 +775,9 @@ func testRestOfWorkflow(t *testing.T) { require.Contains(t, err.Error(), wrangler.ErrWorkflowNotFullySwitched) // fully switch and complete - waitForLowLag(t, "customer", "wf1") - waitForLowLag(t, "customer", "customer_name") - waitForLowLag(t, "customer", "enterprise_customer") + waitForLowLag(t, defaultTargetKs, "wf1") + waitForLowLag(t, defaultTargetKs, "customer_name") + waitForLowLag(t, defaultTargetKs, "enterprise_customer") tstWorkflowSwitchReadsAndWrites(t) validateReadsRouteToTarget(t, "replica,rdonly") validateWritesRouteToTarget(t) @@ -801,30 +792,30 @@ func setupCluster(t *testing.T) *VitessCluster { zone1 := vc.Cells["zone1"] zone2 := vc.Cells["zone2"] - vc.AddKeyspace(t, []*Cell{zone1, zone2}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + vc.AddKeyspace(t, []*Cell{zone1, zone2}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) defer getVTGateConn() verifyClusterHealth(t, vc) insertInitialData(t) defaultCell := vc.Cells[vc.CellNames[0]] - sourceTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + sourceTab = vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-100"].Vttablet if defaultReplicas > 0 { - sourceReplicaTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-101"].Vttablet + sourceReplicaTab = vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-101"].Vttablet } if defaultRdonly > 0 { - sourceRdonlyTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-102"].Vttablet + sourceRdonlyTab = vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-102"].Vttablet } return vc } -func setupCustomerKeyspace(t *testing.T) { - if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"], vc.Cells["zone2"]}, "customer", "-80,80-", +func setupTargetKeyspace(t *testing.T) { + if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"], vc.Cells["zone2"]}, defaultTargetKs, "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, nil); err != nil { t.Fatal(err) } defaultCell := vc.Cells[vc.CellNames[0]] - custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet if defaultReplicas > 0 { @@ -851,24 +842,24 @@ func setupMinimalCluster(t *testing.T) *VitessCluster { zone1 := vc.Cells["zone1"] - vc.AddKeyspace(t, []*Cell{zone1}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + vc.AddKeyspace(t, []*Cell{zone1}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) verifyClusterHealth(t, vc) insertInitialData(t) - sourceTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + sourceTab = vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-100"].Vttablet return vc } -func setupMinimalCustomerKeyspace(t *testing.T) map[string]*cluster.VttabletProcess { +func setupMinimalTargetKeyspace(t *testing.T) map[string]*cluster.VttabletProcess { tablets := make(map[string]*cluster.VttabletProcess) - if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, "customer", "-80,80-", + if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, defaultTargetKs, "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, nil); err != nil { t.Fatal(err) } defaultCell := vc.Cells[vc.CellNames[0]] - custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet tablets["-80"] = targetTab1 @@ -900,15 +891,13 @@ func switchReadsNew(t *testing.T, workflowType, cells, ksWorkflow string, revers func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias string) { workflow := "wf1" - sourceKs := "product" - targetKs := "customer" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) + ksWorkflow := fmt.Sprintf("%s.%s", defaultTargetKs, workflow) tables := "customer" - setupCustomerKeyspace(t) + setupTargetKeyspace(t) workflowType := "MoveTables" var moveTablesAndWait = func() { - moveTablesAction(t, "Create", sourceCellOrAlias, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Create", sourceCellOrAlias, workflow, defaultSourceKs, defaultTargetKs, tables) catchup(t, targetTab1, workflow, workflowType) catchup(t, targetTab2, workflow, workflowType) doVDiff(t, ksWorkflow, "") @@ -971,7 +960,7 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias switchWrites(t, workflowType, ksWorkflow, false) validateWritesRouteToTarget(t) - switchWrites(t, workflowType, reverseKsWorkflow, true) + switchWrites(t, workflowType, defaultReverseKsWorkflow, true) validateWritesRouteToSource(t) validateReadsRouteToSource(t, "replica") @@ -991,12 +980,11 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias switchWritesReverseSwitchReadsSwitchWrites() } -func createAdditionalCustomerShards(t *testing.T, shards string) { - ksName := "customer" +func createAdditionalTargetShards(t *testing.T, shards string) { defaultCell := vc.Cells[vc.CellNames[0]] - keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] - require.NoError(t, vc.AddShards(t, []*Cell{defaultCell, vc.Cells["zone2"]}, keyspace, shards, defaultReplicas, defaultRdonly, 400, targetKsOpts)) - custKs := vc.Cells[defaultCell.Name].Keyspaces[ksName] + keyspace := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] + require.NoError(t, vc.AddShards(t, []*Cell{defaultCell, vc.Cells["zone2"]}, keyspace, shards, defaultReplicas, defaultRdonly, 400, defaultTargetKsOpts)) + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] targetTab2 = custKs.Shards["80-c0"].Tablets["zone1-600"].Vttablet targetTab1 = custKs.Shards["40-80"].Tablets["zone1-500"].Vttablet targetReplicaTab1 = custKs.Shards["-40"].Tablets["zone1-401"].Vttablet diff --git a/go/test/endtoend/vreplication/sidecardb_test.go b/go/test/endtoend/vreplication/sidecardb_test.go index 54c1a10130f..1f31b0f7190 100644 --- a/go/test/endtoend/vreplication/sidecardb_test.go +++ b/go/test/endtoend/vreplication/sidecardb_test.go @@ -67,7 +67,7 @@ func TestSidecarDB(t *testing.T) { cell1 := vc.Cells[defaultCellName] tablet100 := fmt.Sprintf("%s-100", defaultCellName) tablet101 := fmt.Sprintf("%s-101", defaultCellName) - vc.AddKeyspace(t, []*Cell{cell1}, keyspace, "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell1}, keyspace, "0", initialProductVSchema, initialProductSchema, 1, 0, 100, defaultSourceKsOpts) shard0 := vc.Cells[defaultCellName].Keyspaces[keyspace].Shards[shard] tablet100Port := shard0.Tablets[tablet100].Vttablet.Port tablet101Port := shard0.Tablets[tablet101].Vttablet.Port diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index 3faa9e76a78..ec53f14539f 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -33,10 +33,8 @@ import ( // TestMoveTablesTZ tests the conversion of datetime based on the source timezone passed to the MoveTables workflow func TestMoveTablesTZ(t *testing.T) { workflow := "tz" - sourceKs := "product" - targetKs := "customer" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) - ksReverseWorkflow := fmt.Sprintf("%s.%s_reverse", sourceKs, workflow) + ksWorkflow := fmt.Sprintf("%s.%s", defaultTargetKs, workflow) + ksReverseWorkflow := fmt.Sprintf("%s.%s_reverse", defaultSourceKs, workflow) vc = NewVitessCluster(t, nil) defer vc.TearDown() @@ -44,13 +42,13 @@ func TestMoveTablesTZ(t *testing.T) { cells := []*Cell{defaultCell} cell1 := vc.Cells["zone1"] - vc.AddKeyspace(t, []*Cell{cell1}, sourceKs, "0", initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell1}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, 0, 0, 100, defaultSourceKsOpts) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) - productTab := vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards["0"].Tablets["zone1-100"].Vttablet + productTab := vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-100"].Vttablet timeZoneSQLBytes, _ := os.ReadFile("tz.sql") timeZoneSQL := string(timeZoneSQLBytes) @@ -77,23 +75,23 @@ func TestMoveTablesTZ(t *testing.T) { insertInitialData(t) - if _, err := vc.AddKeyspace(t, cells, targetKs, "0", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil { + if _, err := vc.AddKeyspace(t, cells, defaultTargetKs, "0", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, defaultTargetKsOpts); err != nil { t.Fatal(err) } - custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] customerTab := custKs.Shards["0"].Tablets["zone1-200"].Vttablet loadTimeZoneInfo(customerTab, timeZoneSQL, "UTC") tables := "datze" - output, err := vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--workflow", workflow, "--target-keyspace", targetKs, "Create", - "--source-keyspace", sourceKs, "--tables", tables, "--source-time-zone", "US/Pacifik") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--workflow", workflow, "--target-keyspace", defaultTargetKs, "Create", + "--source-keyspace", defaultSourceKs, "--tables", tables, "--source-time-zone", "US/Pacifik") require.Error(t, err, output) require.Contains(t, output, "time zone is invalid") - output, err = vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--workflow", workflow, "--target-keyspace", targetKs, "Create", - "--source-keyspace", sourceKs, "--tables", tables, "--source-time-zone", "US/Pacific") + output, err = vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--workflow", workflow, "--target-keyspace", defaultTargetKs, "Create", + "--source-keyspace", defaultSourceKs, "--tables", tables, "--source-time-zone", "US/Pacific") require.NoError(t, err, output) catchup(t, customerTab, workflow, "MoveTables") @@ -115,11 +113,11 @@ func TestMoveTablesTZ(t *testing.T) { doVDiff(t, ksWorkflow, "") query := "select * from datze" - qrSourceUSPacific, err := productTab.QueryTablet(query, sourceKs, true) + qrSourceUSPacific, err := productTab.QueryTablet(query, defaultSourceKs, true) require.NoError(t, err) require.NotNil(t, qrSourceUSPacific) - qrTargetUTC, err := customerTab.QueryTablet(query, targetKs, true) + qrTargetUTC, err := customerTab.QueryTablet(query, defaultTargetKs, true) require.NoError(t, err) require.NotNil(t, qrTargetUTC) @@ -163,7 +161,7 @@ func TestMoveTablesTZ(t *testing.T) { // user should be either running this query or have set their location in their driver to map from the time in Vitess/UTC to local query = "select id, convert_tz(dt1, 'UTC', 'US/Pacific') dt1, convert_tz(dt2, 'UTC', 'US/Pacific') dt2, convert_tz(ts1, 'UTC', 'US/Pacific') ts1 from datze" - qrTargetUSPacific, err := customerTab.QueryTablet(query, "customer", true) + qrTargetUSPacific, err := customerTab.QueryTablet(query, defaultTargetKs, true) require.NoError(t, err) require.NotNil(t, qrTargetUSPacific) require.Equal(t, len(qrSourceUSPacific.Rows), len(qrTargetUSPacific.Rows)) @@ -174,7 +172,7 @@ func TestMoveTablesTZ(t *testing.T) { require.Equal(t, row.AsString("dt2", ""), qrTargetUSPacific.Named().Rows[i].AsString("dt2", "")) require.Equal(t, row.AsString("ts1", ""), qrTargetUSPacific.Named().Rows[i].AsString("ts1", "")) } - output, err = vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--target-keyspace", targetKs, "SwitchTraffic", "--workflow", workflow) + output, err = vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--target-keyspace", defaultTargetKs, "SwitchTraffic", "--workflow", workflow) require.NoError(t, err, output) qr, err := productTab.QueryTablet(sqlparser.BuildParsedQuery("select * from %s.vreplication where workflow='%s_reverse'", @@ -189,7 +187,7 @@ func TestMoveTablesTZ(t *testing.T) { } // inserts to test date conversions in reverse replication - execVtgateQuery(t, vtgateConn, "customer", "insert into datze(id, dt2) values (13, '2022-01-01 18:20:30')") - execVtgateQuery(t, vtgateConn, "customer", "insert into datze(id, dt2) values (14, '2022-04-01 12:06:07')") + execVtgateQuery(t, vtgateConn, defaultTargetKs, "insert into datze(id, dt2) values (13, '2022-01-01 18:20:30')") + execVtgateQuery(t, vtgateConn, defaultTargetKs, "insert into datze(id, dt2) values (14, '2022-04-01 12:06:07')") doVDiff(t, ksReverseWorkflow, "") } diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index 7f4d55e7757..67749b76694 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -33,6 +33,7 @@ import ( "google.golang.org/protobuf/proto" "vitess.io/vitess/go/ptr" + "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" @@ -73,8 +74,8 @@ var testCases = []*testCase{ name: "MoveTables/unsharded to two shards", workflow: "p1c2", typ: "MoveTables", - sourceKs: "product", - targetKs: "customer", + sourceKs: defaultSourceKs, + targetKs: defaultTargetKs, sourceShards: "0", targetShards: "-80,80-", tabletBaseID: 200, @@ -94,8 +95,8 @@ var testCases = []*testCase{ name: "Reshard Merge/split 2 to 3", workflow: "c2c3", typ: "Reshard", - sourceKs: "customer", - targetKs: "customer", + sourceKs: defaultTargetKs, + targetKs: defaultTargetKs, sourceShards: "-80,80-", targetShards: "-40,40-a0,a0-", tabletBaseID: 400, @@ -109,8 +110,8 @@ var testCases = []*testCase{ name: "Reshard/merge 3 to 1", workflow: "c3c1", typ: "Reshard", - sourceKs: "customer", - targetKs: "customer", + sourceKs: defaultTargetKs, + targetKs: defaultTargetKs, sourceShards: "-40,40-a0,a0-", targetShards: "0", tabletBaseID: 700, @@ -132,9 +133,7 @@ func checkVDiffCountStat(t *testing.T, tablet *cluster.VttabletProcess, expected func TestVDiff2(t *testing.T) { cellNames := "zone5,zone1,zone2,zone3,zone4" - sourceKs := "product" sourceShards := []string{"0"} - targetKs := "customer" targetShards := []string{"-80", "80-"} extraVTTabletArgs = []string{ // This forces us to use multiple vstream packets even with small test tables. @@ -150,7 +149,7 @@ func TestVDiff2(t *testing.T) { // The primary tablet is only added in the first cell. // We ONLY add primary tablets in this test. - _, err := vc.AddKeyspace(t, []*Cell{zone2, zone1, zone3}, sourceKs, strings.Join(sourceShards, ","), initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) + _, err := vc.AddKeyspace(t, []*Cell{zone2, zone1, zone3}, defaultSourceKs, strings.Join(sourceShards, ","), initialProductVSchema, initialProductSchema, 0, 0, 100, defaultSourceKsOpts) require.NoError(t, err) vtgateConn := vc.GetVTGateConn(t) @@ -161,17 +160,17 @@ func TestVDiff2(t *testing.T) { // Insert null and empty enum values for testing vdiff comparisons for those values. // If we add this to the initial data list, the counts in several other tests will need to change query := `insert into customer(cid, name, typ, sport) values(1001, null, 'soho','')` - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:%s", sourceKs, sourceShards[0]), query) + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:%s", defaultSourceKs, sourceShards[0]), query) - generateMoreCustomers(t, sourceKs, 1000) + generateMoreCustomers(t, defaultSourceKs, 1000) // Create rows in the nopk table using the customer names and random ages between 20 and 100. query = "insert into nopk(name, age) select name, floor(rand()*80)+20 from customer" - execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:%s", sourceKs, sourceShards[0]), query) + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:%s", defaultSourceKs, sourceShards[0]), query) // The primary tablet is only added in the first cell. // We ONLY add primary tablets in this test. - tks, err := vc.AddKeyspace(t, []*Cell{zone3, zone1, zone2}, targetKs, strings.Join(targetShards, ","), customerVSchema, customerSchema, 0, 0, 200, targetKsOpts) + tks, err := vc.AddKeyspace(t, []*Cell{zone3, zone1, zone2}, defaultTargetKs, strings.Join(targetShards, ","), customerVSchema, customerSchema, 0, 0, 200, defaultTargetKsOpts) require.NoError(t, err) verifyClusterHealth(t, vc) @@ -179,10 +178,10 @@ func TestVDiff2(t *testing.T) { // (cid) vs (cid,typ) on the source. This confirms that we are able to properly // diff the table when the source and target have a different PK definition. // Remove the 0 date restrictions as the customer table uses them in its DEFAULTs. - execVtgateQuery(t, vtgateConn, targetKs, "set @@session.sql_mode='ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'") - execVtgateQuery(t, vtgateConn, targetKs, customerTableModifiedPK) + execVtgateQuery(t, vtgateConn, defaultTargetKs, "set @@session.sql_mode='ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'") + execVtgateQuery(t, vtgateConn, defaultTargetKs, customerTableModifiedPK) // Set the sql_mode back to the default. - execVtgateQuery(t, vtgateConn, targetKs, "set @@session.sql_mode='ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'") + execVtgateQuery(t, vtgateConn, defaultTargetKs, "set @@session.sql_mode='ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'") for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { @@ -191,7 +190,7 @@ func TestVDiff2(t *testing.T) { }) } - statsTablet := vc.getPrimaryTablet(t, targetKs, targetShards[0]) + statsTablet := vc.getPrimaryTablet(t, defaultTargetKs, targetShards[0]) // We diffed X rows so confirm that the global total is > 0. countStr, err := getDebugVar(t, statsTablet.Port, []string{"VDiffRowsComparedTotal"}) @@ -212,7 +211,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, defer vtgateConn.Close() arrTargetShards := strings.Split(tc.targetShards, ",") if tc.typ == "Reshard" { - require.NoError(t, vc.AddShards(t, cells, tks, tc.targetShards, 0, 0, tc.tabletBaseID, targetKsOpts)) + require.NoError(t, vc.AddShards(t, cells, tks, tc.targetShards, 0, 0, tc.tabletBaseID, defaultTargetKsOpts)) } ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) @@ -262,7 +261,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, totalRowsToCreate := seconds * perSecondCount log.Infof("Test host has %d vCPUs. Generating %d rows in the customer table to test --max-diff-duration", runtime.NumCPU(), totalRowsToCreate) for i := int64(0); i < totalRowsToCreate; i += chunkSize { - generateMoreCustomers(t, sourceKs, chunkSize) + generateMoreCustomers(t, tc.sourceKs, chunkSize) } // Wait for the workflow to catch up after all the inserts. @@ -281,7 +280,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, require.Equal(t, int64(0), leadRestarts, "expected VDiffRestartedTableDiffsCount stat to be 0 for the Lead table, got %d", leadRestarts) // Cleanup the created customer records so as not to slow down the rest of the test. - delstmt := fmt.Sprintf("delete from %s.customer order by cid desc limit %d", sourceKs, chunkSize) + delstmt := fmt.Sprintf("delete from %s.customer order by cid desc limit %d", sqlescape.EscapeID(tc.sourceKs), chunkSize) for i := int64(0); i < totalRowsToCreate; i += chunkSize { _, err := vtgateConn.ExecuteFetch(delstmt, int(chunkSize), false) require.NoError(t, err, "failed to cleanup added customer records: %v", err) diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go index d668701100e..4ca439adc2c 100644 --- a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -36,17 +36,17 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { vc = NewVitessCluster(t, nil) defer vc.TearDown() - sourceKeyspace := "product" + sourceKeyspace := defaultSourceKs shardName := "0" cell := vc.Cells[cellName] - vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, 100, defaultSourceKsOpts) verifyClusterHealth(t, vc) insertInitialData(t) targetTabletId := 200 - targetKeyspace := "customer" - vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, targetTabletId, sourceKsOpts) + targetKeyspace := defaultTargetKs + vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, targetTabletId, defaultSourceKsOpts) index := 1000 var loadCtx context.Context @@ -69,8 +69,7 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { time.Sleep(10 * time.Millisecond) } } - targetKs := vc.Cells[cellName].Keyspaces[targetKeyspace] - targetTab := targetKs.Shards["0"].Tablets[fmt.Sprintf("%s-%d", cellName, targetTabletId)].Vttablet + targetTab := vc.Cells[cellName].Keyspaces[targetKeyspace].Shards["0"].Tablets[fmt.Sprintf("%s-%d", cellName, targetTabletId)].Vttablet require.NotNil(t, targetTab) time.Sleep(15 * time.Second) // wait for some rows to be inserted. @@ -110,7 +109,7 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { loadCancel() // confirm that show all shows the correct workflow and only that workflow. - output, err := vc.VtctldClient.ExecuteCommandWithOutput("VDiff", "--format", "json", "--workflow", "wf1", "--target-keyspace", "customer", "show", "all") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("VDiff", "--format", "json", "--workflow", "wf1", "--target-keyspace", defaultTargetKs, "show", "all") require.NoError(t, err) log.Infof("VDiff output: %s", output) count := gjson.Get(output, "..#").Int() @@ -118,5 +117,5 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { ksName := gjson.Get(output, "0.Keyspace").String() require.Equal(t, int64(1), count) require.Equal(t, "wf1", wf) - require.Equal(t, "customer", ksName) + require.Equal(t, defaultTargetKs, ksName) } diff --git a/go/test/endtoend/vreplication/vdiff_online_ddl_test.go b/go/test/endtoend/vreplication/vdiff_online_ddl_test.go index 76283ac551b..2ac404abe3e 100644 --- a/go/test/endtoend/vreplication/vdiff_online_ddl_test.go +++ b/go/test/endtoend/vreplication/vdiff_online_ddl_test.go @@ -14,6 +14,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/utils" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -32,7 +33,7 @@ func TestOnlineDDLVDiff(t *testing.T) { defaultReplicas = 0 vc = setupMinimalCluster(t) defer vc.TearDown() - keyspace := "product" + keyspace := defaultSourceKs ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -90,8 +91,23 @@ func onlineDDLShow(t *testing.T, keyspace, uuid string) *vtctldata.GetSchemaMigr func execOnlineDDL(t *testing.T, strategy, keyspace, query string) string { output, err := vc.VtctldClient.ExecuteCommandWithOutput("ApplySchema", utils.GetFlagVariantForTests("--ddl-strategy"), strategy, "--sql", query, keyspace) require.NoError(t, err, output) - uuid := strings.TrimSpace(output) + output = strings.TrimSpace(output) if strategy != "direct" { + // We expect a UUID as the only output, but when using --ddl_strategy we get a warning mixed into the output: + // Flag --ddl_strategy has been deprecated, use --ddl-strategy instead + // In order to prevent this and other similar future issues, lets hunt for the UUID (which should be on its own line) + // in the returned output. + uuid := "" + lines := strings.Split(output, "\n") + for i := range lines { + line := strings.TrimSpace(lines[i]) + if schema.IsOnlineDDLUUID(line) { + uuid = line + break + } + } + require.NotEmpty(t, uuid, "UUID not returned in ApplySchema command output: %v", output) + output = uuid // return the UUID instead of the original output err = waitForCondition("online ddl to start", func() bool { response := onlineDDLShow(t, keyspace, uuid) if len(response.Migrations) > 0 && @@ -100,13 +116,13 @@ func execOnlineDDL(t *testing.T, strategy, keyspace, query string) string { return true } return false - }, defaultTimeout) + }, workflowStateTimeout) require.NoError(t, err) // The online ddl migration is set to SchemaMigration_RUNNING before it creates the // _vt.vreplication records. Hence wait for the vreplication workflow to be created as well. waitForWorkflowToBeCreated(t, vc, fmt.Sprintf("%s.%s", keyspace, uuid)) } - return uuid + return output } func waitForAdditionalRows(t *testing.T, keyspace, table string, count int) { diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 4df6710bba0..14d193a7980 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -35,6 +35,7 @@ import ( "google.golang.org/protobuf/encoding/protojson" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/throttler" @@ -60,8 +61,8 @@ var ( vc *VitessCluster defaultRdonly int defaultReplicas int - sourceKsOpts = make(map[string]string) - targetKsOpts = make(map[string]string) + defaultSourceKsOpts = make(map[string]string) + defaultTargetKsOpts = make(map[string]string) httpClient = throttlebase.SetupHTTPClient(time.Second) sourceThrottlerAppName = throttlerapp.VStreamerName targetThrottlerAppName = throttlerapp.VPlayerName @@ -94,19 +95,19 @@ func init() { func TestVReplicationDDLHandling(t *testing.T) { var err error workflow := "onddl_test" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) + ksWorkflow := fmt.Sprintf("%s.%s", defaultTargetKs, workflow) table := "orders" newColumn := "ddltest" - cell := "zone1" + cell := defaultCellName shard := "0" vc = NewVitessCluster(t, nil) defer vc.TearDown() defaultCell := vc.Cells[cell] - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { t.Fatal(err) } - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultTargetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } vtgate := defaultCell.Vtgates[0] @@ -116,20 +117,20 @@ func TestVReplicationDDLHandling(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - sourceTab = vc.getPrimaryTablet(t, sourceKs, shard) - targetTab := vc.getPrimaryTablet(t, targetKs, shard) + sourceTab = vc.getPrimaryTablet(t, defaultSourceKs, shard) + targetTab := vc.getPrimaryTablet(t, defaultTargetKs, shard) insertInitialData(t) - _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use %s", sourceKs), 1, false) + _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use %s", defaultSourceKs), 1, false) require.NoError(t, err) addColDDL := fmt.Sprintf("alter table %s add column %s varchar(64)", table, newColumn) dropColDDL := fmt.Sprintf("alter table %s drop column %s", table, newColumn) checkColQuerySource := fmt.Sprintf("select count(column_name) from information_schema.columns where table_schema='vt_%s' and table_name='%s' and column_name='%s'", - sourceKs, table, newColumn) + defaultSourceKs, table, newColumn) checkColQueryTarget := fmt.Sprintf("select count(column_name) from information_schema.columns where table_schema='vt_%s' and table_name='%s' and column_name='%s'", - targetKs, table, newColumn) + defaultTargetKs, table, newColumn) // expectedAction is the specific action, e.g. ignore, that should have a count of 1. All other // actions should have a count of 0. id is the stream ID to check. @@ -149,7 +150,7 @@ func TestVReplicationDDLHandling(t *testing.T) { } // Test IGNORE behavior - moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_IGNORE.String()) + moveTablesAction(t, "Create", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_IGNORE.String()) // Wait until we get through the copy phase... catchup(t, targetTab, workflow, "MoveTables") // Add new col on source @@ -158,13 +159,13 @@ func TestVReplicationDDLHandling(t *testing.T) { // Confirm workflow is still running fine waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) // Confirm new col does not exist on target - waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") + waitForQueryResult(t, vtgateConn, defaultTargetKs, checkColQueryTarget, "[[INT64(0)]]") // Confirm new col does exist on source - waitForQueryResult(t, vtgateConn, sourceKs, checkColQuerySource, "[[INT64(1)]]") + waitForQueryResult(t, vtgateConn, defaultSourceKs, checkColQuerySource, "[[INT64(1)]]") // Confirm that we updated the stats on the target tablet as expected. checkOnDDLStats(binlogdatapb.OnDDLAction_IGNORE, 1) // Also test Cancel --keep-routing-rules - moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table, "--keep-routing-rules") + moveTablesAction(t, "Cancel", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, table, "--keep-routing-rules") // Confirm that the routing rules were NOT cleared rr, err := vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules") require.NoError(t, err) @@ -181,7 +182,7 @@ func TestVReplicationDDLHandling(t *testing.T) { require.NoError(t, err, "error executing %q: %v", dropColDDL, err) // Test STOP behavior (new col now exists nowhere) - moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_STOP.String()) + moveTablesAction(t, "Create", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_STOP.String()) // Wait until we get through the copy phase... catchup(t, targetTab, workflow, "MoveTables") // Add new col on the source @@ -190,24 +191,24 @@ func TestVReplicationDDLHandling(t *testing.T) { // Confirm that the worfklow stopped because of the DDL waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String(), fmt.Sprintf("Message==Stopped at DDL %s", addColDDL)) // Confirm that the target does not have new col - waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") + waitForQueryResult(t, vtgateConn, defaultTargetKs, checkColQueryTarget, "[[INT64(0)]]") // Confirm that we updated the stats on the target tablet as expected. checkOnDDLStats(binlogdatapb.OnDDLAction_STOP, 2) - moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table) + moveTablesAction(t, "Cancel", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, table) // Test EXEC behavior (new col now exists on source) - moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_EXEC.String()) + moveTablesAction(t, "Create", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_EXEC.String()) // Wait until we get through the copy phase... catchup(t, targetTab, workflow, "MoveTables") // Confirm target has new col from copy phase - waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(1)]]") + waitForQueryResult(t, vtgateConn, defaultTargetKs, checkColQueryTarget, "[[INT64(1)]]") // Drop col on source _, err = vtgateConn.ExecuteFetch(dropColDDL, 1, false) require.NoError(t, err, "error executing %q: %v", dropColDDL, err) // Confirm workflow is still running fine waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) // Confirm new col was dropped on target - waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") + waitForQueryResult(t, vtgateConn, defaultTargetKs, checkColQueryTarget, "[[INT64(0)]]") // Confirm that we updated the stats on the target tablet as expected. checkOnDDLStats(binlogdatapb.OnDDLAction_EXEC, 3) } @@ -237,10 +238,10 @@ func TestVreplicationCopyThrottling(t *testing.T) { parallelInsertWorkers, } - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { t.Fatal(err) } - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultTargetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } vtgate := defaultCell.Vtgates[0] @@ -250,30 +251,30 @@ func TestVreplicationCopyThrottling(t *testing.T) { // have an InnoDB History List length that is less than specified in the tablet's config. // We update rows in a table not part of the MoveTables operation so that we're not blocking // on the LOCK TABLE call but rather the InnoDB History List length. - trxConn := generateInnoDBRowHistory(t, sourceKs, maxSourceTrxHistory) + trxConn := generateInnoDBRowHistory(t, defaultSourceKs, maxSourceTrxHistory) // History should have been generated on the source primary tablet - waitForInnoDBHistoryLength(t, vc.getPrimaryTablet(t, sourceKs, shard), maxSourceTrxHistory) + waitForInnoDBHistoryLength(t, vc.getPrimaryTablet(t, defaultSourceKs, shard), maxSourceTrxHistory) // We need to force primary tablet types as the history list has been increased on the source primary // We use a small timeout and ignore errors as we don't expect the MoveTables to start here // because of the InnoDB History List length. - moveTablesActionWithTabletTypes(t, "Create", defaultCell.Name, workflow, sourceKs, targetKs, table, "primary", true) + moveTablesActionWithTabletTypes(t, "Create", defaultCell.Name, workflow, defaultSourceKs, defaultTargetKs, table, "primary", true) // Wait for the copy phase to start - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflow), binlogdatapb.VReplicationWorkflowState_Copying.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", defaultTargetKs, workflow), binlogdatapb.VReplicationWorkflowState_Copying.String()) // The initial copy phase should be blocking on the history list. - confirmWorkflowHasCopiedNoData(t, targetKs, workflow) + confirmWorkflowHasCopiedNoData(t, defaultTargetKs, workflow) releaseInnoDBRowHistory(t, trxConn) trxConn.Close() } func TestBasicVreplicationWorkflow(t *testing.T) { - sourceKsOpts["DBTypeVersion"] = "mysql-8.0" - targetKsOpts["DBTypeVersion"] = "mysql-8.0" + defaultSourceKsOpts["DBTypeVersion"] = "mysql-8.0" + defaultTargetKsOpts["DBTypeVersion"] = "mysql-8.0" testBasicVreplicationWorkflow(t, "noblob") } func TestVreplicationCopyParallel(t *testing.T) { - sourceKsOpts["DBTypeVersion"] = "mysql-5.7" - targetKsOpts["DBTypeVersion"] = "mysql-5.7" + defaultSourceKsOpts["DBTypeVersion"] = "mysql-5.7" + defaultTargetKsOpts["DBTypeVersion"] = "mysql-5.7" extraVTTabletArgs = []string{ parallelInsertWorkers, } @@ -287,7 +288,6 @@ func testBasicVreplicationWorkflow(t *testing.T, binlogRowImage string) { // If limited == true, we only run a limited set of workflows. func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string) { var err error - defaultCellName := "zone1" vc = NewVitessCluster(t, nil) defer vc.TearDown() // Keep the cluster processes minimal to deal with CI resource constraints @@ -303,7 +303,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string } defaultCell := vc.Cells[defaultCellName] - vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, defaultSourceKsOpts) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() @@ -315,7 +315,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string // the Lead and Lead-1 tables tested a specific case with binary sharding keys. Drop it now so that we don't // have to update the rest of the tests - execVtgateQuery(t, vtgateConn, "customer", "drop table `Lead`,`Lead-1`") + execVtgateQuery(t, vtgateConn, defaultTargetKs, "drop table `Lead`,`Lead-1`") validateRollupReplicates(t) shardOrders(t) shardMerchant(t) @@ -335,18 +335,18 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string insertMoreCustomers(t, 16) reshardCustomer2to4Split(t, nil, "") - confirmAllStreamsRunning(t, vtgateConn, "customer:-40") - expectNumberOfStreams(t, vtgateConn, "Customer2to4", "sales", "product:0", 4) + confirmAllStreamsRunning(t, vtgateConn, fmt.Sprintf("%s:-40", defaultTargetKs)) + expectNumberOfStreams(t, vtgateConn, "Customer2to4", "sales", fmt.Sprintf("%s:0", defaultSourceKs), 4) reshardCustomer3to2SplitMerge(t) - confirmAllStreamsRunning(t, vtgateConn, "customer:-60") - expectNumberOfStreams(t, vtgateConn, "Customer3to2", "sales", "product:0", 3) + confirmAllStreamsRunning(t, vtgateConn, fmt.Sprintf("%s:-60", defaultTargetKs)) + expectNumberOfStreams(t, vtgateConn, "Customer3to2", "sales", fmt.Sprintf("%s:0", defaultSourceKs), 3) reshardCustomer3to1Merge(t) - confirmAllStreamsRunning(t, vtgateConn, "customer:0") + confirmAllStreamsRunning(t, vtgateConn, fmt.Sprintf("%s:0", defaultTargetKs)) - expectNumberOfStreams(t, vtgateConn, "Customer3to1", "sales", "product:0", 1) + expectNumberOfStreams(t, vtgateConn, "Customer3to1", "sales", fmt.Sprintf("%s:0", defaultSourceKs), 1) t.Run("Verify CopyState Is Optimized Afterwards", func(t *testing.T) { - tabletMap := vc.getVttabletsInKeyspace(t, defaultCell, "customer", topodatapb.TabletType_PRIMARY.String()) + tabletMap := vc.getVttabletsInKeyspace(t, defaultCell, defaultTargetKs, topodatapb.TabletType_PRIMARY.String()) require.NotNil(t, tabletMap) require.Greater(t, len(tabletMap), 0) for _, tablet := range tabletMap { @@ -359,8 +359,8 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string if strings.ToLower(binlogRowImage) == "noblob" { return } - _, err = vtgateConn.ExecuteFetch("use customer", 1, false) - require.NoError(t, err, "error using customer keyspace: %v", err) + _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use `%s`", defaultTargetKs), 1, false) + require.NoError(t, err, "error using %s keyspace: %v", defaultTargetKs, err) res, err := vtgateConn.ExecuteFetch("select count(*) from customer where name is not null", 1, false) require.NoError(t, err, "error getting current row count in customer: %v", err) require.Equal(t, 1, len(res.Rows), "expected 1 row in count(*) query, got %d", len(res.Rows)) @@ -372,28 +372,28 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string require.NoError(t, err, "error executing %q: %v", insert, err) vindexName := "customer_name_keyspace_id" - err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace=product", "create", "--keyspace=customer", + err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace", defaultSourceKs, "create", "--keyspace", defaultTargetKs, "--type=consistent_lookup", "--table-owner=customer", "--table-owner-columns=name,cid", "--ignore-nulls", "--tablet-types=PRIMARY") require.NoError(t, err, "error executing LookupVindex create: %v", err) - waitForWorkflowState(t, vc, fmt.Sprintf("product.%s", vindexName), binlogdatapb.VReplicationWorkflowState_Running.String()) - waitForRowCount(t, vtgateConn, "product", vindexName, int(rows)) - customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "customer") + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", defaultSourceKs, vindexName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForRowCount(t, vtgateConn, defaultSourceKs, vindexName, int(rows)) + customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultTargetKs) require.NoError(t, err, "error executing GetVSchema: %v", err) vdx := gjson.Get(customerVSchema, fmt.Sprintf("vindexes.%s", vindexName)) require.NotNil(t, vdx, "lookup vindex %s not found", vindexName) require.Equal(t, "true", vdx.Get("params.write_only").String(), "expected write_only parameter to be true") - err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace=product", "externalize", "--keyspace=customer") + err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace", defaultSourceKs, "externalize", "--keyspace", defaultTargetKs) require.NoError(t, err, "error executing LookupVindex externalize: %v", err) - customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "customer") + customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultTargetKs) require.NoError(t, err, "error executing GetVSchema: %v", err) vdx = gjson.Get(customerVSchema, fmt.Sprintf("vindexes.%s", vindexName)) require.NotNil(t, vdx, "lookup vindex %s not found", vindexName) require.NotEqual(t, "true", vdx.Get("params.write_only").String(), "did not expect write_only parameter to be true") - err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace=product", "internalize", "--keyspace=customer") + err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace", defaultSourceKs, "internalize", "--keyspace", defaultTargetKs) require.NoError(t, err, "error executing LookupVindex internalize: %v", err) - customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "customer") + customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", defaultTargetKs) require.NoError(t, err, "error executing GetVSchema: %v", err) vdx = gjson.Get(customerVSchema, fmt.Sprintf("vindexes.%s", vindexName)) require.NotNil(t, vdx, "lookup vindex %s not found", vindexName) @@ -402,8 +402,8 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string } func TestV2WorkflowsAcrossDBVersions(t *testing.T) { - sourceKsOpts["DBTypeVersion"] = "mysql-5.7" - targetKsOpts["DBTypeVersion"] = "mysql-8.0" + defaultSourceKsOpts["DBTypeVersion"] = "mysql-5.7" + defaultTargetKsOpts["DBTypeVersion"] = "mysql-8.0" testBasicVreplicationWorkflow(t, "") } @@ -411,8 +411,8 @@ func TestV2WorkflowsAcrossDBVersions(t *testing.T) { // and a MySQL target as while MariaDB is not supported in Vitess v14+ we want // MariaDB users to have a way to migrate into Vitess. func TestMoveTablesMariaDBToMySQL(t *testing.T) { - sourceKsOpts["DBTypeVersion"] = "mariadb-10.10" - targetKsOpts["DBTypeVersion"] = "mysql-8.0" + defaultSourceKsOpts["DBTypeVersion"] = "mariadb-10.10" + defaultTargetKsOpts["DBTypeVersion"] = "mysql-8.0" testVreplicationWorkflows(t, true /* only do MoveTables */, "") } @@ -429,22 +429,22 @@ func TestVStreamFlushBinlog(t *testing.T) { // to deal with CI resource constraints. // This also makes it easier to confirm the behavior as we know exactly // what tablets will be involved. - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { t.Fatal(err) } - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultTargetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } verifyClusterHealth(t, vc) - sourceTab = vc.getPrimaryTablet(t, sourceKs, shard) + sourceTab = vc.getPrimaryTablet(t, defaultSourceKs, shard) insertInitialData(t) tables := "product,customer,merchant,orders" - moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Create", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, tables) // Wait until we get through the copy phase... - catchup(t, vc.getPrimaryTablet(t, targetKs, shard), workflow, "MoveTables") + catchup(t, vc.getPrimaryTablet(t, defaultTargetKs, shard), workflow, "MoveTables") // So far, we should not have rotated any binlogs flushCount := int64(sourceTab.GetVars()["VStreamerFlushedBinlogs"].(float64)) @@ -464,7 +464,7 @@ func TestVStreamFlushBinlog(t *testing.T) { require.Greater(t, res.RowsAffected, uint64(0)) if i%100 == 0 { - res, err := sourceTab.QueryTablet("show binary logs", sourceKs, false) + res, err := sourceTab.QueryTablet("show binary logs", defaultSourceKs, false) require.NoError(t, err) require.NotNil(t, res) require.Greater(t, len(res.Rows), 0) @@ -480,13 +480,13 @@ func TestVStreamFlushBinlog(t *testing.T) { // Now we should rotate the binary logs ONE time on the source, even // though we're opening up multiple result streams (1 per table). runVDiffsSideBySide = false - vdiff(t, targetKs, workflow, defaultCellName, nil) + vdiff(t, defaultTargetKs, workflow, defaultCellName, nil) flushCount = int64(sourceTab.GetVars()["VStreamerFlushedBinlogs"].(float64)) require.Equal(t, flushCount, int64(1), "VStreamerFlushedBinlogs should now be 1") // Now if we do another vdiff, we should NOT rotate the binlogs again // as we haven't been generating a lot of new binlog events. - vdiff(t, targetKs, workflow, defaultCellName, nil) + vdiff(t, defaultTargetKs, workflow, defaultCellName, nil) flushCount = int64(sourceTab.GetVars()["VStreamerFlushedBinlogs"].(float64)) require.Equal(t, flushCount, int64(1), "VStreamerFlushedBinlogs should still be 1") } @@ -497,7 +497,7 @@ func TestVStreamFlushBinlog(t *testing.T) { func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { defaultCellName := "zone1" workflow := "mtnosource" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) + ksWorkflow := fmt.Sprintf("%s.%s", defaultTargetKs, workflow) defaultShard := "0" tables := []string{"customer"} var defaultCell *Cell @@ -542,9 +542,9 @@ func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { } targetShardNames := strings.Split(targetShards, ",") - _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, sourceShards, sourceVSchema, customerTable, 0, 0, 100, nil) + _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, sourceShards, sourceVSchema, customerTable, 0, 0, 100, nil) require.NoError(t, err) - _, err = vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, targetShards, targetVSchema, "", 0, 0, 500, nil) + _, err = vc.AddKeyspace(t, []*Cell{defaultCell}, defaultTargetKs, targetShards, targetVSchema, "", 0, 0, 500, nil) require.NoError(t, err) verifyClusterHealth(t, vc) @@ -552,10 +552,10 @@ func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { insertInitialData(t) } - moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, strings.Join(tables, ","), createArgs...) + moveTablesAction(t, "Create", defaultCellName, workflow, defaultSourceKs, defaultTargetKs, strings.Join(tables, ","), createArgs...) // Wait until we get through the copy phase... for _, targetShard := range targetShardNames { - catchup(t, vc.getPrimaryTablet(t, targetKs, targetShard), workflow, "MoveTables") + catchup(t, vc.getPrimaryTablet(t, defaultTargetKs, targetShard), workflow, "MoveTables") } if switchTraffic { @@ -565,10 +565,10 @@ func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { // Decommission the source keyspace. require.NotZero(t, len(vc.Cells[defaultCellName].Keyspaces)) - require.NotNil(t, vc.Cells[defaultCellName].Keyspaces[sourceKs]) - err = vc.TearDownKeyspace(vc.Cells[defaultCellName].Keyspaces[sourceKs]) + require.NotNil(t, vc.Cells[defaultCellName].Keyspaces[defaultSourceKs]) + err = vc.TearDownKeyspace(vc.Cells[defaultCellName].Keyspaces[defaultSourceKs]) require.NoError(t, err) - vc.DeleteKeyspace(t, sourceKs) + vc.DeleteKeyspace(t, defaultSourceKs) // The command should fail. out, err := vc.VtctldClient.ExecuteCommandWithOutput(completeArgs...) @@ -582,7 +582,7 @@ func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { confirmNoRoutingRules(t) for _, table := range tables { for _, targetShard := range targetShardNames { - tksShard := fmt.Sprintf("%s/%s", targetKs, targetShard) + tksShard := fmt.Sprintf("%s/%s", defaultTargetKs, targetShard) validateTableInDenyList(t, vc, tksShard, table, false) } } @@ -595,30 +595,30 @@ func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { require.NoError(t, err) srrMap := topotools.GetShardRoutingRulesMap(&srr) for _, shard := range targetShardNames { - ksShard := fmt.Sprintf("%s.%s", targetKs, shard) - require.NotEqual(t, srrMap[ksShard], targetKs) + ksShard := fmt.Sprintf("%s.%s", defaultTargetKs, shard) + require.NotEqual(t, srrMap[ksShard], defaultTargetKs) } - confirmNoWorkflows(t, targetKs) + confirmNoWorkflows(t, defaultTargetKs) } t.Run("Workflow Delete", func(t *testing.T) { - args := []string{"Workflow", "--keyspace=" + targetKs, "delete", "--workflow=" + workflow} + args := []string{"Workflow", "--keyspace=" + defaultTargetKs, "delete", "--workflow=" + workflow} run(t, defaultShard, defaultShard, nil, args, false) }) t.Run("MoveTables Cancel", func(t *testing.T) { - args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + targetKs, "cancel"} + args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + defaultTargetKs, "cancel"} run(t, defaultShard, defaultShard, nil, args, false) }) t.Run("MoveTables Partial Cancel", func(t *testing.T) { createArgs := []string{"--source-shards", "-80"} - args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + targetKs, "cancel"} + args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + defaultTargetKs, "cancel"} run(t, "-80,80-", "-80,80-", createArgs, args, true) }) t.Run("MoveTables Complete", func(t *testing.T) { - args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + targetKs, "complete"} + args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + defaultTargetKs, "complete"} run(t, defaultShard, defaultShard, nil, args, true) }) // You can't complete a partial MoveTables workflow. Well, only the @@ -628,7 +628,7 @@ func TestMoveTablesIgnoreSourceKeyspace(t *testing.T) { func testVStreamCellFlag(t *testing.T) { vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ - Keyspace: "product", + Keyspace: defaultSourceKs, Shard: "0", Gtid: "", }}} @@ -714,8 +714,8 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { vc = NewVitessCluster(t, &clusterOptions{cells: cells}) defer vc.TearDown() - keyspace := "product" shard := "0" + table := "product" // Run the e2e test with binlog_row_image=NOBLOB and // binlog_row_value_options=PARTIAL_JSON. @@ -724,7 +724,7 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { cell1 := vc.Cells["zone1"] cell2 := vc.Cells["zone2"] - vc.AddKeyspace(t, []*Cell{cell1, cell2}, keyspace, shard, initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell1, cell2}, defaultSourceKs, shard, initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, defaultSourceKsOpts) // Add cell alias containing only zone2 result, err := vc.VtctldClient.ExecuteCommandWithOutput("AddCellsAlias", "--cells", "zone2", "alias") @@ -735,10 +735,10 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { vtgate := cell1.Vtgates[0] t.Run("VStreamFrom", func(t *testing.T) { - testVStreamFrom(t, vtgate, keyspace, 2) + testVStreamFrom(t, vtgate, table, 2) }) shardCustomer(t, true, []*Cell{cell1, cell2}, "alias", false) - isTableInDenyList(t, vc, "product/0", "customer") + isTableInDenyList(t, vc, fmt.Sprintf("%s/0", defaultSourceKs), "customer") // we tag along this test so as not to create the overhead of creating another cluster testVStreamCellFlag(t) } @@ -810,22 +810,20 @@ func testVStreamFrom(t *testing.T, vtgate *cluster.VtgateProcess, table string, func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAlias string, withOpenTx bool) { t.Run("shardCustomer", func(t *testing.T) { workflow := "p2c" - sourceKs := "product" - targetKs := "customer" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) - if _, err := vc.AddKeyspace(t, cells, "customer", "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil { + ksWorkflow := fmt.Sprintf("%s.%s", defaultTargetKs, workflow) + if _, err := vc.AddKeyspace(t, cells, defaultTargetKs, "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, defaultTargetKsOpts); err != nil { t.Fatal(err) } // Assume we are operating on first cell defaultCell := cells[0] - custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] tables := "customer,loadtest,Lead,Lead-1,db_order_test,geom_tbl,json_tbl,blüb_tbl,vdiff_order,reftable" - moveTablesAction(t, "Create", sourceCellOrAlias, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Create", sourceCellOrAlias, workflow, defaultSourceKs, defaultTargetKs, tables) customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet - productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + productTab := vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-100"].Vttablet // Wait to finish the copy phase for all tables workflowType := "MoveTables" @@ -838,25 +836,25 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() // Confirm that the 0 scale decimal field, dec80, is replicated correctly - execVtgateQuery(t, vtgateConn, sourceKs, "update customer set dec80 = 0") - execVtgateQuery(t, vtgateConn, sourceKs, "update customer set blb = \"new blob data\" where cid=3") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j1 = null, j2 = 'null', j3 = '\"null\"' where id = 5") - execVtgateQuery(t, vtgateConn, sourceKs, "insert into json_tbl(id, j1, j2, j3) values (7, null, 'null', '\"null\"')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update customer set dec80 = 0") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update customer set blb = \"new blob data\" where cid=3") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j1 = null, j2 = 'null', j3 = '\"null\"' where id = 5") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "insert into json_tbl(id, j1, j2, j3) values (7, null, 'null', '\"null\"')") // Test binlog-row-value-options=PARTIAL_JSON - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.role', 'manager')") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.color', 'red')") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.day', 'wednesday')") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_INSERT(JSON_REPLACE(j3, '$.day', 'friday'), '$.favorite_color', 'black')") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(JSON_REMOVE(JSON_REPLACE(j3, '$.day', 'monday'), '$.favorite_color'), '$.hobby', 'skiing') where id = 3") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(JSON_REMOVE(JSON_REPLACE(j3, '$.day', 'tuesday'), '$.favorite_color'), '$.hobby', 'skiing') where id = 4") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(JSON_SET(j3, '$.salary', 110), '$.role', 'IC') where id = 4") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.misc', '{\"address\":\"1012 S Park St\", \"town\":\"Hastings\", \"state\":\"MI\"}') where id = 1") - execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set id=id+1000, j3=JSON_SET(j3, '$.day', 'friday')") - waitForNoWorkflowLag(t, vc, targetKs, workflow) + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.role', 'manager')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.color', 'red')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.day', 'wednesday')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_INSERT(JSON_REPLACE(j3, '$.day', 'friday'), '$.favorite_color', 'black')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(JSON_REMOVE(JSON_REPLACE(j3, '$.day', 'monday'), '$.favorite_color'), '$.hobby', 'skiing') where id = 3") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(JSON_REMOVE(JSON_REPLACE(j3, '$.day', 'tuesday'), '$.favorite_color'), '$.hobby', 'skiing') where id = 4") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(JSON_SET(j3, '$.salary', 110), '$.role', 'IC') where id = 4") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.misc', '{\"address\":\"1012 S Park St\", \"town\":\"Hastings\", \"state\":\"MI\"}') where id = 1") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "update json_tbl set id=id+1000, j3=JSON_SET(j3, '$.day', 'friday')") + waitForNoWorkflowLag(t, vc, defaultTargetKs, workflow) dec80Replicated := false for _, tablet := range []*cluster.VttabletProcess{customerTab1, customerTab2} { // Query the tablet's mysqld directly as the targets will have denied table entries. - dbc, err := tablet.TabletConn(targetKs, true) + dbc, err := tablet.TabletConn(defaultTargetKs, true) require.NoError(t, err) defer dbc.Close() if res := execQuery(t, dbc, "select cid from customer"); len(res.Rows) > 0 { @@ -869,8 +867,8 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl // Insert multiple rows in the loadtest table and immediately delete them to confirm that bulk delete // works the same way with the vplayer optimization enabled and disabled. Currently this optimization // is disabled by default, but enabled in TestCellAliasVreplicationWorkflow. - execVtgateQuery(t, vtgateConn, sourceKs, "insert into loadtest(id, name) values(10001, 'tempCustomer'), (10002, 'tempCustomer2'), (10003, 'tempCustomer3'), (10004, 'tempCustomer4')") - execVtgateQuery(t, vtgateConn, sourceKs, "delete from loadtest where id > 10000") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "insert into loadtest(id, name) values(10001, 'tempCustomer'), (10002, 'tempCustomer2'), (10003, 'tempCustomer3'), (10004, 'tempCustomer4')") + execVtgateQuery(t, vtgateConn, defaultSourceKs, "delete from loadtest where id > 10000") // Confirm that all partial query metrics get updated when we are testing the noblob mode. t.Run("validate partial query counts", func(t *testing.T) { @@ -884,7 +882,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl totalInserts, totalUpdates, totalInsertQueries, totalUpdateQueries := 0, 0, 0, 0 for _, tab := range []*cluster.VttabletProcess{tablet200, tablet300} { - insertCount, updateCount, insertQueries, updateQueries := getPartialMetrics(t, "product.0.p2c.1", tab) + insertCount, updateCount, insertQueries, updateQueries := getPartialMetrics(t, fmt.Sprintf("%s.0.p2c.1", defaultSourceKs), tab) totalInserts += insertCount totalUpdates += updateCount totalInsertQueries += insertQueries @@ -898,10 +896,10 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl }) query := "select cid from customer" - assertQueryExecutesOnTablet(t, vtgateConn, productTab, "product", query, query) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, defaultSourceKs, query, query) insertQuery1 := "insert into customer(cid, name) values(1001, 'tempCustomer1')" matchInsertQuery1 := "insert into customer(cid, `name`) values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */)" - assertQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, defaultSourceKs, insertQuery1, matchInsertQuery1) // FIXME for some reason, these inserts fails on mac, need to investigate, some // vreplication bug because of case insensitiveness of table names on mac? @@ -914,25 +912,25 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl if err != nil { require.FailNow(t, output) } - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("update `%s` set name='xyz'", tbl)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("update `%s` set name='xyz'", tbl)) } } doVDiff(t, ksWorkflow, "") cellNames := getCellNames(cells) switchReadsDryRun(t, workflowType, cellNames, ksWorkflow, dryRunResultsReadCustomerShard) switchReads(t, workflowType, cellNames, ksWorkflow, false) - assertQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", query, query) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, defaultTargetKs, query, query) var commit func(t *testing.T) if withOpenTx { commit, _ = vc.startQuery(t, openTxQuery) } switchWritesDryRun(t, workflowType, ksWorkflow, dryRunResultsSwitchWritesCustomerShard) - shardNames := make([]string, 0, len(vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards)) - for shardName := range maps.Keys(vc.Cells[defaultCell.Name].Keyspaces[sourceKs].Shards) { + shardNames := make([]string, 0, len(vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards)) + for shardName := range maps.Keys(vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards) { shardNames = append(shardNames, shardName) } - testSwitchTrafficPermissionChecks(t, workflowType, sourceKs, shardNames, targetKs, workflow) + testSwitchTrafficPermissionChecks(t, workflowType, defaultSourceKs, shardNames, defaultTargetKs, workflow) testSwitchWritesErrorHandling(t, []*cluster.VttabletProcess{productTab}, []*cluster.VttabletProcess{customerTab1, customerTab2}, workflow, workflowType) @@ -940,12 +938,12 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl // Now let's confirm that it works as expected with an error. switchWrites(t, workflowType, ksWorkflow, false) - checkThatVDiffFails(t, targetKs, workflow) + checkThatVDiffFails(t, defaultTargetKs, workflow) // The original unsharded customer data included an insert with the // vindex column (cid) of 999999, so the backing sequence table should // now have a next_id of 1000000 after SwitchTraffic. - res := execVtgateQuery(t, vtgateConn, sourceKs, "select next_id from customer_seq where id = 0") + res := execVtgateQuery(t, vtgateConn, defaultSourceKs, "select next_id from customer_seq where id = 0") require.Equal(t, "1000000", res.Rows[0][0].ToString()) if withOpenTx && commit != nil { @@ -954,65 +952,65 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl catchup(t, productTab, workflow, "MoveTables") - doVDiff(t, "product.p2c_reverse", "") + doVDiff(t, fmt.Sprintf("%s.p2c_reverse", defaultSourceKs), "") if withOpenTx { execVtgateQuery(t, vtgateConn, "", deleteOpenTxQuery) } - ksShards := []string{"product/0", "customer/-80", "customer/80-"} + ksShards := []string{fmt.Sprintf("%s/0", defaultSourceKs), fmt.Sprintf("%s/-80", defaultTargetKs), fmt.Sprintf("%s/80-", defaultTargetKs)} printShardPositions(vc, ksShards) insertQuery2 := "insert into customer(name, cid) values('tempCustomer2', 100)" matchInsertQuery2 := "insert into customer(`name`, cid) values (:vtg1 /* VARCHAR */, :_cid_0)" - assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, defaultTargetKs, insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer3', 101)" // ID 101, hence due to reverse_bits in shard 80- - assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, defaultTargetKs, insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer4', 102)" // ID 102, hence due to reverse_bits in shard -80 - assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, defaultTargetKs, insertQuery2, matchInsertQuery2) - execVtgateQuery(t, vtgateConn, "customer", "update customer set meta = convert(x'7b7d' using utf8mb4) where cid = 1") + execVtgateQuery(t, vtgateConn, defaultTargetKs, "update customer set meta = convert(x'7b7d' using utf8mb4) where cid = 1") if testReverse { // Reverse Replicate switchReads(t, workflowType, cellNames, ksWorkflow, true) printShardPositions(vc, ksShards) switchWrites(t, workflowType, ksWorkflow, true) - output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "show", "--workflow", workflow) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", defaultTargetKs, "show", "--workflow", workflow) require.NoError(t, err) - require.Contains(t, output, "'customer.reverse_bits'") - require.Contains(t, output, "'customer.bmd5'") + require.Contains(t, output, fmt.Sprintf("'%s.reverse_bits'", defaultTargetKs)) + require.Contains(t, output, fmt.Sprintf("'%s.bmd5'", defaultTargetKs)) insertQuery1 = "insert into customer(cid, name) values(1002, 'tempCustomer5')" - assertQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, defaultSourceKs, insertQuery1, matchInsertQuery1) // both inserts go into 80-, this tests the edge-case where a stream (-80) has no relevant new events after the previous switch insertQuery1 = "insert into customer(cid, name) values(1003, 'tempCustomer6')" - assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery1, matchInsertQuery1) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab1, defaultTargetKs, insertQuery1, matchInsertQuery1) insertQuery1 = "insert into customer(cid, name) values(1004, 'tempCustomer7')" - assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery1, matchInsertQuery1) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab2, defaultTargetKs, insertQuery1, matchInsertQuery1) - waitForNoWorkflowLag(t, vc, targetKs, workflow) + waitForNoWorkflowLag(t, vc, defaultTargetKs, workflow) // Go forward again switchReads(t, workflowType, cellNames, ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) var exists bool - exists, err = isTableInDenyList(t, vc, "product/0", "customer") + exists, err = isTableInDenyList(t, vc, fmt.Sprintf("%s/0", defaultSourceKs), "customer") require.NoError(t, err, "Error getting denylist for customer:0") require.True(t, exists) - moveTablesAction(t, "Complete", cellNames, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Complete", cellNames, workflow, defaultSourceKs, defaultTargetKs, tables) - exists, err = isTableInDenyList(t, vc, "product/0", "customer") + exists, err = isTableInDenyList(t, vc, fmt.Sprintf("%s/0", defaultSourceKs), "customer") require.NoError(t, err, "Error getting denylist for customer:0") require.False(t, exists) for _, shard := range strings.Split("-80,80-", ",") { - expectNumberOfStreams(t, vtgateConn, "shardCustomerTargetStreams", "p2c", "customer:"+shard, 0) + expectNumberOfStreams(t, vtgateConn, "shardCustomerTargetStreams", "p2c", fmt.Sprintf("%s:%s", defaultTargetKs, shard), 0) } - expectNumberOfStreams(t, vtgateConn, "shardCustomerReverseStreams", "p2c_reverse", "product:0", 0) + expectNumberOfStreams(t, vtgateConn, "shardCustomerReverseStreams", "p2c_reverse", fmt.Sprintf("%s:0", defaultSourceKs), 0) var found bool found, err = checkIfTableExists(t, vc, "zone1-100", "customer") @@ -1024,22 +1022,22 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl require.True(t, found) insertQuery2 = "insert into customer(name, cid) values('tempCustomer8', 103)" // ID 103, hence due to reverse_bits in shard 80- - assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, defaultTargetKs, insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer10', 104)" // ID 105, hence due to reverse_bits in shard -80 - assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, defaultTargetKs, insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer9', 105)" // ID 104, hence due to reverse_bits in shard 80- - assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, defaultTargetKs, insertQuery2, matchInsertQuery2) - execVtgateQuery(t, vtgateConn, "customer", "delete from customer where name like 'tempCustomer%'") - waitForRowCountInTablet(t, customerTab1, "customer", "customer", 1) - waitForRowCountInTablet(t, customerTab2, "customer", "customer", 2) - waitForRowCount(t, vtgateConn, "customer", "customer.customer", 3) + execVtgateQuery(t, vtgateConn, defaultTargetKs, "delete from customer where name like 'tempCustomer%'") + waitForRowCountInTablet(t, customerTab1, defaultTargetKs, "customer", 1) + waitForRowCountInTablet(t, customerTab2, defaultTargetKs, "customer", 2) + waitForRowCount(t, vtgateConn, defaultTargetKs, fmt.Sprintf("%s.customer", sqlescape.EscapeID(defaultTargetKs)), 3) query = "insert into customer (name, cid) values('george', 5)" - execVtgateQuery(t, vtgateConn, "customer", query) - waitForRowCountInTablet(t, customerTab1, "customer", "customer", 1) - waitForRowCountInTablet(t, customerTab2, "customer", "customer", 3) - waitForRowCount(t, vtgateConn, "customer", "customer.customer", 4) + execVtgateQuery(t, vtgateConn, defaultTargetKs, query) + waitForRowCountInTablet(t, customerTab1, defaultTargetKs, "customer", 1) + waitForRowCountInTablet(t, customerTab2, defaultTargetKs, "customer", 3) + waitForRowCount(t, vtgateConn, defaultTargetKs, fmt.Sprintf("%s.customer", sqlescape.EscapeID(defaultTargetKs)), 4) } }) } @@ -1049,8 +1047,8 @@ func validateRollupReplicates(t *testing.T) { insertMoreProducts(t) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - waitForRowCount(t, vtgateConn, "product", "rollup", 1) - waitForQueryResult(t, vtgateConn, "product:0", "select rollupname, kount from rollup", + waitForRowCount(t, vtgateConn, defaultSourceKs, "rollup", 1) + waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "select rollupname, kount from rollup", `[[VARCHAR("total") INT32(5)]]`) }) } @@ -1059,14 +1057,13 @@ func reshardCustomer2to4Split(t *testing.T, cells []*Cell, sourceCellOrAlias str t.Run("reshardCustomer2to4Split", func(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - ksName := "customer" counts := map[string]int{"zone1-600": 4, "zone1-700": 5, "zone1-800": 6, "zone1-900": 5} - reshard(t, ksName, "customer", "c2c4", "-80,80-", "-40,40-80,80-c0,c0-", + reshard(t, defaultTargetKs, "customer", "c2c4", "-80,80-", "-40,40-80,80-c0,c0-", 600, counts, nil, nil, cells, sourceCellOrAlias, 1) - waitForRowCount(t, vtgateConn, ksName, "customer", 20) + waitForRowCount(t, vtgateConn, defaultTargetKs, "customer", 20) query := "insert into customer (name) values('yoko')" - execVtgateQuery(t, vtgateConn, ksName, query) - waitForRowCount(t, vtgateConn, ksName, "customer", 21) + execVtgateQuery(t, vtgateConn, defaultTargetKs, query) + waitForRowCount(t, vtgateConn, defaultTargetKs, "customer", 21) }) } @@ -1136,18 +1133,16 @@ func reshardMerchant3to1Merge(t *testing.T) { func reshardCustomer3to2SplitMerge(t *testing.T) { // -40,40-80,80-c0 => merge/split, c0- stays the same ending up with 3 t.Run("reshardCustomer3to2SplitMerge", func(t *testing.T) { - ksName := "customer" counts := map[string]int{"zone1-1000": 8, "zone1-1100": 8, "zone1-1200": 5} - reshard(t, ksName, "customer", "c4c3", "-40,40-80,80-c0", "-60,60-c0", + reshard(t, defaultTargetKs, "customer", "c4c3", "-40,40-80,80-c0", "-60,60-c0", 1000, counts, nil, nil, nil, "", 1) }) } func reshardCustomer3to1Merge(t *testing.T) { // to unsharded t.Run("reshardCustomer3to1Merge", func(t *testing.T) { - ksName := "customer" counts := map[string]int{"zone1-1500": 21} - reshard(t, ksName, "customer", "c3c1", "-60,60-c0,c0-", "0", + reshard(t, defaultTargetKs, "customer", "c3c1", "-60,60-c0,c0-", "0", 1500, counts, nil, nil, nil, "", 3) }) } @@ -1166,7 +1161,7 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou callNames := getCellNames(cells) ksWorkflow := ksName + "." + workflow keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] - require.NoError(t, vc.AddShards(t, cells, keyspace, targetShards, defaultReplicas, defaultRdonly, tabletIDBase, targetKsOpts)) + require.NoError(t, vc.AddShards(t, cells, keyspace, targetShards, defaultReplicas, defaultRdonly, tabletIDBase, defaultTargetKsOpts)) tablets := vc.getVttabletsInKeyspace(t, defaultCell, ksName, "primary") var sourceTablets, targetTablets []*cluster.VttabletProcess @@ -1222,14 +1217,12 @@ func shardOrders(t *testing.T) { defaultCell := vc.Cells[vc.CellNames[0]] workflow := "o2c" cell := defaultCell.Name - sourceKs := "product" - targetKs := "customer" tables := "orders" - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) - applyVSchema(t, ordersVSchema, targetKs) - moveTablesAction(t, "Create", cell, workflow, sourceKs, targetKs, tables) + ksWorkflow := fmt.Sprintf("%s.%s", defaultTargetKs, workflow) + applyVSchema(t, ordersVSchema, defaultTargetKs) + moveTablesAction(t, "Create", cell, workflow, defaultSourceKs, defaultTargetKs, tables) - custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet workflowType := "MoveTables" @@ -1238,10 +1231,10 @@ func shardOrders(t *testing.T) { doVDiff(t, ksWorkflow, "") switchReads(t, workflowType, strings.Join(vc.CellNames, ","), ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) - moveTablesAction(t, "Complete", cell, workflow, sourceKs, targetKs, tables) - waitForRowCountInTablet(t, customerTab1, "customer", "orders", 1) - waitForRowCountInTablet(t, customerTab2, "customer", "orders", 2) - waitForRowCount(t, vtgateConn, "customer", "orders", 3) + moveTablesAction(t, "Complete", cell, workflow, defaultSourceKs, defaultTargetKs, tables) + waitForRowCountInTablet(t, customerTab1, defaultTargetKs, "orders", 1) + waitForRowCountInTablet(t, customerTab2, defaultTargetKs, "orders", 2) + waitForRowCount(t, vtgateConn, defaultTargetKs, "orders", 3) }) } @@ -1260,14 +1253,13 @@ func shardMerchant(t *testing.T) { workflow := "p2m" defaultCell := vc.Cells[vc.CellNames[0]] cell := defaultCell.Name - sourceKs := "product" targetKs := merchantKeyspace tables := "merchant" ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) - if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, merchantKeyspace, "-80,80-", merchantVSchema, "", defaultReplicas, defaultRdonly, 400, targetKsOpts); err != nil { + if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, merchantKeyspace, "-80,80-", merchantVSchema, "", defaultReplicas, defaultRdonly, 400, defaultTargetKsOpts); err != nil { t.Fatal(err) } - moveTablesAction(t, "Create", cell, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Create", cell, workflow, defaultSourceKs, targetKs, tables) merchantKs := vc.Cells[defaultCell.Name].Keyspaces[merchantKeyspace] merchantTab1 := merchantKs.Shards["-80"].Tablets["zone1-400"].Vttablet merchantTab2 := merchantKs.Shards["80-"].Tablets["zone1-500"].Vttablet @@ -1286,7 +1278,7 @@ func shardMerchant(t *testing.T) { if err != nil { require.FailNow(t, output) } - moveTablesAction(t, "Complete", cell, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Complete", cell, workflow, defaultSourceKs, targetKs, tables) waitForRowCountInTablet(t, merchantTab1, merchantKeyspace, "merchant", 1) waitForRowCountInTablet(t, merchantTab2, merchantKeyspace, "merchant", 1) @@ -1329,9 +1321,9 @@ func testMaterializeWithNonExistentTable(t *testing.T) { func materializeProduct(t *testing.T) { t.Run("materializeProduct", func(t *testing.T) { - // Materializing from "product" keyspace to "customer" keyspace. + // Materializing from defaultSourceKs keyspace to defaultTargetKs keyspace. workflow := "cproduct" - keyspace := "customer" + keyspace := defaultTargetKs defaultCell := vc.Cells[vc.CellNames[0]] applyVSchema(t, materializeProductVSchema, keyspace) materialize(t, materializeProductSpec) @@ -1341,10 +1333,10 @@ func materializeProduct(t *testing.T) { waitForRowCountInTablet(t, tab, keyspace, workflow, 5) } - productTablets := vc.getVttabletsInKeyspace(t, defaultCell, "product", "primary") + productTablets := vc.getVttabletsInKeyspace(t, defaultCell, defaultSourceKs, "primary") t.Run("throttle-app-product", func(t *testing.T) { // Now, throttle the source side component (vstreamer), and insert some rows. - err := throttler.ThrottleKeyspaceApp(vc.VtctldClient, "product", sourceThrottlerAppName) + err := throttler.ThrottleKeyspaceApp(vc.VtctldClient, defaultSourceKs, sourceThrottlerAppName) assert.NoError(t, err) for _, tab := range productTablets { status, err := throttler.GetThrottlerStatus(vc.VtctldClient, &cluster.Vttablet{Alias: tab.Name}) @@ -1373,12 +1365,12 @@ func materializeProduct(t *testing.T) { for _, tab := range customerTablets { waitForRowCountInTablet(t, tab, keyspace, workflow, 5) // Confirm that we updated the stats on the target tablets as expected. - confirmVReplicationThrottling(t, tab, sourceKs, workflow, sourceThrottlerAppName) + confirmVReplicationThrottling(t, tab, defaultSourceKs, workflow, sourceThrottlerAppName) } }) t.Run("unthrottle-app-product", func(t *testing.T) { // Unthrottle the vstreamer component, and expect the rows to show up. - err := throttler.UnthrottleKeyspaceApp(vc.VtctldClient, "product", sourceThrottlerAppName) + err := throttler.UnthrottleKeyspaceApp(vc.VtctldClient, defaultSourceKs, sourceThrottlerAppName) assert.NoError(t, err) for _, tab := range productTablets { // Give time for unthrottling to take effect and for targets to fetch data. @@ -1427,7 +1419,7 @@ func materializeProduct(t *testing.T) { for _, tab := range customerTablets { waitForRowCountInTablet(t, tab, keyspace, workflow, 8) // Confirm that we updated the stats on the target tablets as expected. - confirmVReplicationThrottling(t, tab, sourceKs, workflow, targetThrottlerAppName) + confirmVReplicationThrottling(t, tab, defaultSourceKs, workflow, targetThrottlerAppName) } }) t.Run("unthrottle-app-customer", func(t *testing.T) { @@ -1452,15 +1444,14 @@ func materializeRollup(t *testing.T) { t.Run("materializeRollup", func(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - keyspace := "product" workflow := "rollup" - applyVSchema(t, materializeSalesVSchema, keyspace) + applyVSchema(t, materializeSalesVSchema, defaultSourceKs) defaultCell := vc.Cells[vc.CellNames[0]] - productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + productTab := vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-100"].Vttablet materialize(t, materializeRollupSpec) catchup(t, productTab, workflow, "Materialize") - waitForRowCount(t, vtgateConn, "product", "rollup", 1) - waitForQueryResult(t, vtgateConn, "product:0", "select rollupname, kount from rollup", + waitForRowCount(t, vtgateConn, defaultSourceKs, "rollup", 1) + waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "select rollupname, kount from rollup", `[[VARCHAR("total") INT32(2)]]`) }) } @@ -1469,14 +1460,13 @@ func materializeSales(t *testing.T) { t.Run("materializeSales", func(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() - keyspace := "product" - applyVSchema(t, materializeSalesVSchema, keyspace) + applyVSchema(t, materializeSalesVSchema, defaultSourceKs) materialize(t, materializeSalesSpec) defaultCell := vc.Cells[vc.CellNames[0]] - productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + productTab := vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs].Shards["0"].Tablets["zone1-100"].Vttablet catchup(t, productTab, "sales", "Materialize") - waitForRowCount(t, vtgateConn, "product", "sales", 2) - waitForQueryResult(t, vtgateConn, "product:0", "select kount, amount from sales", + waitForRowCount(t, vtgateConn, defaultSourceKs, "sales", 2) + waitForQueryResult(t, vtgateConn, fmt.Sprintf("%s:0", defaultSourceKs), "select kount, amount from sales", `[[INT32(1) INT32(10)] [INT32(2) INT32(35)]]`) }) } @@ -1590,12 +1580,12 @@ func catchup(t *testing.T, vttablet *cluster.VttabletProcess, workflow, info str vttablet.WaitForVReplicationToCatchup(t, workflow, fmt.Sprintf("vt_%s", vttablet.Keyspace), sidecarDBName, maxWait) } -func moveTablesAction(t *testing.T, action, cell, workflow, sourceKs, targetKs, tables string, extraFlags ...string) { +func moveTablesAction(t *testing.T, action, cell, workflow, defaultSourceKs, defaultTargetKs, tables string, extraFlags ...string) { var err error - args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + targetKs, action} + args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + defaultTargetKs, action} switch strings.ToLower(action) { case strings.ToLower(workflowActionCreate): - extraFlags = append(extraFlags, "--source-keyspace="+sourceKs, "--tables="+tables, "--cells="+cell, "--tablet-types=primary,replica,rdonly") + extraFlags = append(extraFlags, "--source-keyspace="+defaultSourceKs, "--tables="+tables, "--cells="+cell, "--tablet-types=primary,replica,rdonly") case strings.ToLower(workflowActionSwitchTraffic): extraFlags = append(extraFlags, "--initialize-target-sequences") } @@ -1609,9 +1599,9 @@ func moveTablesAction(t *testing.T, action, cell, workflow, sourceKs, targetKs, t.Fatalf("MoveTables %s command failed with %+v\n", action, err) } } -func moveTablesActionWithTabletTypes(t *testing.T, action, cell, workflow, sourceKs, targetKs, tables string, tabletTypes string, ignoreErrors bool) { - if err := vc.VtctldClient.ExecuteCommand("MoveTables", "--workflow="+workflow, "--target-keyspace="+targetKs, action, - "--source-keyspace="+sourceKs, "--tables="+tables, "--cells="+cell, "--tablet-types="+tabletTypes); err != nil { +func moveTablesActionWithTabletTypes(t *testing.T, action, cell, workflow, defaultSourceKs, defaultTargetKs, tables string, tabletTypes string, ignoreErrors bool) { + if err := vc.VtctldClient.ExecuteCommand("MoveTables", "--workflow="+workflow, "--target-keyspace="+defaultTargetKs, action, + "--source-keyspace="+defaultSourceKs, "--tables="+tables, "--cells="+cell, "--tablet-types="+tabletTypes); err != nil { if !ignoreErrors { t.Fatalf("MoveTables %s command failed with %+v\n", action, err) } @@ -1725,14 +1715,14 @@ func switchWrites(t *testing.T, workflowType, ksWorkflow string, reverse bool) { } const SwitchWritesTimeout = "91s" // max: 3 tablet picker 30s waits + 1 ensureCanSwitch(t, workflowType, "", ksWorkflow) - targetKs, workflow, found := strings.Cut(ksWorkflow, ".") + defaultTargetKs, workflow, found := strings.Cut(ksWorkflow, ".") require.True(t, found) if workflowType == binlogdatapb.VReplicationWorkflowType_MoveTables.String() { - moveTablesAction(t, command, defaultCellName, workflow, sourceKs, targetKs, "", "--timeout="+SwitchWritesTimeout, "--tablet-types=primary") + moveTablesAction(t, command, defaultCellName, workflow, defaultSourceKs, defaultTargetKs, "", "--timeout="+SwitchWritesTimeout, "--tablet-types=primary") return } output, err := vc.VtctldClient.ExecuteCommandWithOutput(workflowType, "--tablet-types=primary", "--workflow", workflow, - "--target-keyspace", targetKs, command, "--timeout="+SwitchWritesTimeout, "--initialize-target-sequences") + "--target-keyspace", defaultTargetKs, command, "--timeout="+SwitchWritesTimeout, "--initialize-target-sequences") if output != "" { fmt.Printf("Output of switching writes with vtctldclient for %s:\n++++++\n%s\n--------\n", ksWorkflow, output) } @@ -1974,13 +1964,13 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { debug := true if debug { log.Infof("------------------- START Extra debug info %s Switch writes %s", msg, ksWorkflow) - ksShards := []string{"product/0", "customer/-80", "customer/80-"} + ksShards := []string{fmt.Sprintf("%s/0", defaultSourceKs), fmt.Sprintf("%s/-80", defaultTargetKs), fmt.Sprintf("%s/80-", defaultTargetKs)} printShardPositions(vc, ksShards) defaultCell := vc.Cells[vc.CellNames[0]] - custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + custKs := vc.Cells[defaultCell.Name].Keyspaces[defaultTargetKs] customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet - productKs := vc.Cells[defaultCell.Name].Keyspaces["product"] + productKs := vc.Cells[defaultCell.Name].Keyspaces[defaultSourceKs] productTab := productKs.Shards["0"].Tablets["zone1-100"].Vttablet tabs := []*cluster.VttabletProcess{productTab, customerTab1, customerTab2} queries := []string{ @@ -2007,11 +1997,11 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { // // Returns a db connection used for the transaction which you can use for follow-up // work, such as rolling it back directly or using the releaseInnoDBRowHistory call. -func generateInnoDBRowHistory(t *testing.T, sourceKS string, neededTrxHistory int64) *mysql.Conn { +func generateInnoDBRowHistory(t *testing.T, defaultSourceKs string, neededTrxHistory int64) *mysql.Conn { dbConn1 := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) dbConn2 := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - execQuery(t, dbConn1, "use "+sourceKS) - execQuery(t, dbConn2, "use "+sourceKS) + execQuery(t, dbConn1, "use "+defaultSourceKs) + execQuery(t, dbConn2, "use "+defaultSourceKs) offset := int64(1000) limit := int64(neededTrxHistory * 100) insertStmt := strings.Builder{} diff --git a/go/test/endtoend/vreplication/vreplication_test_env.go b/go/test/endtoend/vreplication/vreplication_test_env.go index c62d871380d..d34c9d0e0ed 100644 --- a/go/test/endtoend/vreplication/vreplication_test_env.go +++ b/go/test/endtoend/vreplication/vreplication_test_env.go @@ -16,32 +16,44 @@ limitations under the License. package vreplication +import "fmt" + +const ( + // Defaults used for all tests. + defaultSourceKs = "test-product" + defaultTargetKs = "test-customer" + defaultWorkflowName = "wf1" + defaultKsWorkflow = defaultTargetKs + "." + defaultWorkflowName + defaultReverseKsWorkflow = defaultSourceKs + "." + defaultWorkflowName + "_reverse" + defaultCellName = "zone1" +) + var dryRunResultsSwitchWritesCustomerShard = []string{ - "Lock keyspace product", - "Lock keyspace customer", - "Mirroring 0.00 percent of traffic from keyspace product to keyspace customer for tablet types [PRIMARY]", - "/Stop writes on keyspace product for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]: [keyspace:product;shard:0;position:", + fmt.Sprintf("Lock keyspace %s", defaultSourceKs), + fmt.Sprintf("Lock keyspace %s", defaultTargetKs), + fmt.Sprintf("Mirroring 0.00 percent of traffic from keyspace %s to keyspace %s for tablet types [PRIMARY]", defaultSourceKs, defaultTargetKs), + fmt.Sprintf("/Stop writes on keyspace %s for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]: [keyspace:%s;shard:0;position:", defaultSourceKs, defaultSourceKs), "Wait for vreplication on stopped streams to catchup for up to 30s", "Create reverse vreplication workflow p2c_reverse", "Create journal entries on source databases", - "Enable writes on keyspace customer for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]", - "Switch routing from keyspace product to keyspace customer", + fmt.Sprintf("Enable writes on keyspace %s for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]", defaultTargetKs), + fmt.Sprintf("Switch routing from keyspace %s to keyspace %s", defaultSourceKs, defaultTargetKs), "Routing rules for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] will be updated", "Switch writes completed, freeze and delete vreplication streams on: [tablet:200,tablet:300]", "Start reverse vreplication streams on: [tablet:100]", - "Mark vreplication streams frozen on: [keyspace:customer;shard:-80;tablet:200;workflow:p2c;dbname:vt_customer,keyspace:customer;shard:80-;tablet:300;workflow:p2c;dbname:vt_customer]", - "Unlock keyspace customer", - "Unlock keyspace product", + fmt.Sprintf("Mark vreplication streams frozen on: [keyspace:%s;shard:-80;tablet:200;workflow:p2c;dbname:vt_%s,keyspace:%s;shard:80-;tablet:300;workflow:p2c;dbname:vt_%s]", defaultTargetKs, defaultTargetKs, defaultTargetKs, defaultTargetKs), + fmt.Sprintf("Unlock keyspace %s", defaultTargetKs), + fmt.Sprintf("Unlock keyspace %s", defaultSourceKs), "", // Additional empty newline in the output } var dryRunResultsReadCustomerShard = []string{ - "Lock keyspace product", - "Mirroring 0.00 percent of traffic from keyspace product to keyspace customer for tablet types [RDONLY,REPLICA]", - "Switch reads for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] to keyspace customer for tablet types [RDONLY,REPLICA]", + fmt.Sprintf("Lock keyspace %s", defaultSourceKs), + fmt.Sprintf("Mirroring 0.00 percent of traffic from keyspace %s to keyspace %s for tablet types [RDONLY,REPLICA]", defaultSourceKs, defaultTargetKs), + fmt.Sprintf("Switch reads for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] to keyspace %s for tablet types [RDONLY,REPLICA]", defaultTargetKs), "Routing rules for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] will be updated", - "Serving VSchema will be rebuilt for the customer keyspace", - "Unlock keyspace product", + fmt.Sprintf("Serving VSchema will be rebuilt for the %s keyspace", defaultTargetKs), + fmt.Sprintf("Unlock keyspace %s", defaultSourceKs), "", // Additional empty newline in the output } diff --git a/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go index 58af882d5b7..4c7086a6ca9 100644 --- a/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go +++ b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go @@ -66,17 +66,16 @@ func TestVtctldclientCLI(t *testing.T) { require.NotNil(t, zone2) defer vc.TearDown() - sourceKeyspaceName := "product" - targetKeyspaceName := "customer" + sourceKeyspaceName := defaultSourceKs + targetKeyspaceName := defaultTargetKs var mt iMoveTables - workflowName := "wf1" sourceReplicaTab = vc.Cells["zone1"].Keyspaces[sourceKeyspaceName].Shards["0"].Tablets["zone1-101"].Vttablet require.NotNil(t, sourceReplicaTab) sourceTab = vc.Cells["zone1"].Keyspaces[sourceKeyspaceName].Shards["0"].Tablets["zone1-100"].Vttablet require.NotNil(t, sourceTab) - targetTabs := setupMinimalCustomerKeyspace(t) + targetTabs := setupMinimalTargetKeyspace(t) targetTab1 = targetTabs["-80"] require.NotNil(t, targetTab1) targetTab2 = targetTabs["80-"] @@ -91,13 +90,13 @@ func TestVtctldclientCLI(t *testing.T) { testWorkflowList(t, sourceKeyspaceName, targetKeyspaceName) }) t.Run("MoveTablesCreateFlags1", func(t *testing.T) { - testMoveTablesFlags1(t, &mt, sourceKeyspaceName, targetKeyspaceName, workflowName, targetTabs) + testMoveTablesFlags1(t, &mt, sourceKeyspaceName, targetKeyspaceName, defaultWorkflowName, targetTabs) }) t.Run("testWorkflowUpdateConfig", func(t *testing.T) { - testWorkflowUpdateConfig(t, &mt, targetTabs, targetKeyspaceName, workflowName) + testWorkflowUpdateConfig(t, &mt, targetTabs, targetKeyspaceName, defaultWorkflowName) }) t.Run("MoveTablesCreateFlags2", func(t *testing.T) { - testMoveTablesFlags2(t, &mt, sourceKeyspaceName, targetKeyspaceName, workflowName, targetTabs) + testMoveTablesFlags2(t, &mt, sourceKeyspaceName, targetKeyspaceName, defaultWorkflowName, targetTabs) }) t.Run("MoveTablesCompleteFlags3", func(t *testing.T) { testMoveTablesFlags3(t, sourceKeyspaceName, targetKeyspaceName, targetTabs) @@ -184,7 +183,7 @@ func TestVtctldclientCLI(t *testing.T) { require.NotNil(vc.t, resp) require.NotNil(vc.t, resp.ShardStreams) require.Equal(vc.t, len(resp.ShardStreams), 2) - keyspace := "customer" + keyspace := defaultTargetKs for _, shard := range []string{"80-c0", "c0-"} { streams := resp.ShardStreams[fmt.Sprintf("%s/%s", keyspace, shard)] require.Equal(vc.t, 1, len(streams.Streams)) @@ -192,7 +191,7 @@ func TestVtctldclientCLI(t *testing.T) { } rs.Start() - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, defaultWorkflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) res, err := targetTab1.QueryTablet("show tables", keyspace, true) require.NoError(t, err) @@ -206,8 +205,8 @@ func TestVtctldclientCLI(t *testing.T) { rs.Cancel() - workflowNames := workflowList(keyspace) - require.Empty(t, workflowNames) + defaultWorkflowNames := workflowList(keyspace) + require.Empty(t, defaultWorkflowNames) res, err = targetTab1.QueryTablet("show tables", keyspace, true) require.NoError(t, err) @@ -222,7 +221,7 @@ func TestVtctldclientCLI(t *testing.T) { } // Tests several create flags and some complete flags and validates that some of them are set correctly for the workflow. -func testMoveTablesFlags1(t *testing.T, mt *iMoveTables, sourceKeyspace, targetKeyspace, workflowName string, targetTabs map[string]*cluster.VttabletProcess) { +func testMoveTablesFlags1(t *testing.T, mt *iMoveTables, sourceKeyspace, targetKeyspace, defaultWorkflowName string, targetTabs map[string]*cluster.VttabletProcess) { tables := "customer,customer2" overrides := map[string]string{ "vreplication-net-read-timeout": "6000", @@ -238,10 +237,10 @@ func testMoveTablesFlags1(t *testing.T, mt *iMoveTables, sourceKeyspace, targetK completeFlags := []string{"--keep-routing-rules", "--keep-data"} switchFlags := []string{} // Test one set of MoveTable flags. - *mt = createMoveTables(t, sourceKeyspace, targetKeyspace, workflowName, tables, createFlags, completeFlags, switchFlags) + *mt = createMoveTables(t, sourceKeyspace, targetKeyspace, defaultWorkflowName, tables, createFlags, completeFlags, switchFlags) (*mt).Show() moveTablesResponse := getMoveTablesShowResponse(mt) - workflowResponse := getWorkflow(targetKeyspace, workflowName) + workflowResponse := getWorkflow(targetKeyspace, defaultWorkflowName) // also validates that MoveTables Show and Workflow Show return the same output. require.EqualValues(t, moveTablesResponse.CloneVT(), workflowResponse) @@ -264,15 +263,15 @@ func getMoveTablesShowResponse(mt *iMoveTables) *vtctldatapb.GetWorkflowsRespons } // Validates some of the flags created from the previous test. -func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetKeyspace, workflowName string, targetTabs map[string]*cluster.VttabletProcess) { - ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) +func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetKeyspace, defaultWorkflowName string, targetTabs map[string]*cluster.VttabletProcess) { + ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, defaultWorkflowName) wf := (*mt).(iWorkflow) (*mt).Start() // Need to start because we set auto-start to false. waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) confirmNoRoutingRules(t) for _, tab := range targetTabs { alias := fmt.Sprintf("zone1-%d", tab.TabletUID) - query := "update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_customer' and workflow = 'wf1'" + query := fmt.Sprintf("update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_%s' and workflow = 'wf1'", targetKeyspace) output, err := vc.VtctldClient.ExecuteCommandWithOutput("ExecuteFetchAsDBA", alias, query) require.NoError(t, err, output) } @@ -284,85 +283,85 @@ func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetK (*mt).Start() waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) for _, tab := range targetTabs { - catchup(t, tab, workflowName, "MoveTables") + catchup(t, tab, defaultWorkflowName, "MoveTables") } (*mt).SwitchReads() validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateReadsSwitched) (*mt).ReverseReads() validateReadsRouteToSource(t, "replica") - validateTableRoutingRule(t, "customer", "replica", targetKs, sourceKs) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", targetKeyspace, sourceKeyspace) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateReadsSwitched, wrangler.WorkflowStateNotSwitched) (*mt).SwitchReadsAndWrites() validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) validateWritesRouteToTarget(t) - validateTableRoutingRule(t, "customer", "", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "", sourceKeyspace, targetKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateAllSwitched) (*mt).ReverseReadsAndWrites() validateReadsRouteToSource(t, "replica") - validateTableRoutingRule(t, "customer", "replica", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", targetKeyspace, sourceKeyspace) validateWritesRouteToSource(t) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateAllSwitched, wrangler.WorkflowStateNotSwitched) (*mt).SwitchReadsAndWrites() validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) validateWritesRouteToTarget(t) - validateTableRoutingRule(t, "customer", "", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "", sourceKeyspace, targetKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateAllSwitched) (*mt).ReverseReads() validateReadsRouteToSource(t, "replica") - validateTableRoutingRule(t, "customer", "replica", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", targetKeyspace, sourceKeyspace) validateWritesRouteToTarget(t) - validateTableRoutingRule(t, "customer", "", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "", sourceKeyspace, targetKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateAllSwitched, wrangler.WorkflowStateWritesSwitched) (*mt).ReverseWrites() validateReadsRouteToSource(t, "replica") - validateTableRoutingRule(t, "customer", "replica", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", targetKeyspace, sourceKeyspace) validateWritesRouteToSource(t) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateWritesSwitched, wrangler.WorkflowStateNotSwitched) (*mt).SwitchReadsAndWrites() validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) validateWritesRouteToTarget(t) - validateTableRoutingRule(t, "customer", "", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "", sourceKeyspace, targetKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateAllSwitched) (*mt).ReverseWrites() validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) validateWritesRouteToSource(t) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateAllSwitched, wrangler.WorkflowStateReadsSwitched) (*mt).ReverseReads() validateReadsRouteToSource(t, "replica") - validateTableRoutingRule(t, "customer", "replica", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", targetKeyspace, sourceKeyspace) validateWritesRouteToSource(t) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateReadsSwitched, wrangler.WorkflowStateNotSwitched) // Confirm that everything is still in sync after our switch fest. - vdiff(t, targetKeyspace, workflowName, "zone1", nil) + vdiff(t, targetKeyspace, defaultWorkflowName, "zone1", nil) (*mt).SwitchReadsAndWrites() validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) validateWritesRouteToTarget(t) - validateTableRoutingRule(t, "customer", "", sourceKs, targetKs) + validateTableRoutingRule(t, "customer", "", sourceKeyspace, targetKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateAllSwitched) (*mt).Complete() @@ -382,15 +381,15 @@ func testMoveTablesFlags3(t *testing.T, sourceKeyspace, targetKeyspace string, t completeFlags := []string{"--rename-tables"} tables := "customer2" switchFlags := []string{"--enable-reverse-replication=false"} - mt := createMoveTables(t, sourceKeyspace, targetKeyspace, workflowName, tables, createFlags, completeFlags, switchFlags) + mt := createMoveTables(t, sourceKeyspace, targetKeyspace, defaultWorkflowName, tables, createFlags, completeFlags, switchFlags) mt.Start() // Need to start because we set stop-after-copy to true. - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) mt.Stop() // Test stopping workflow. - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) mt.Start() - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) for _, tab := range targetTabs { - catchup(t, tab, workflowName, "MoveTables") + catchup(t, tab, defaultWorkflowName, "MoveTables") } mt.SwitchReadsAndWrites() mt.Complete() @@ -399,17 +398,17 @@ func testMoveTablesFlags3(t *testing.T, sourceKeyspace, targetKeyspace string, t require.False(t, checkTablesExist(t, "zone1-100", []string{"customer2"})) // Confirm that we can cancel a workflow after ONLY switching read traffic. - mt = createMoveTables(t, sourceKeyspace, targetKeyspace, workflowName, "customer", createFlags, nil, nil) + mt = createMoveTables(t, sourceKeyspace, targetKeyspace, defaultWorkflowName, "customer", createFlags, nil, nil) mt.Start() // Need to start because we set stop-after-copy to true. - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, defaultKsWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) for _, tab := range targetTabs { - catchup(t, tab, workflowName, "MoveTables") + catchup(t, tab, defaultWorkflowName, "MoveTables") } mt.SwitchReads() wf := mt.(iWorkflow) validateReadsRouteToTarget(t, "replica") - validateTableRoutingRule(t, "customer", "replica", sourceKs, targetKs) - validateTableRoutingRule(t, "customer", "", targetKs, sourceKs) + validateTableRoutingRule(t, "customer", "replica", sourceKeyspace, targetKeyspace) + validateTableRoutingRule(t, "customer", "", targetKeyspace, sourceKeyspace) confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateReadsSwitched) mt.Cancel() confirmNoRoutingRules(t) @@ -428,17 +427,17 @@ func testWorkflowList(t *testing.T, sourceKeyspace, targetKeyspace string) { } slices.Sort(wfNames) - workflowNames := workflowList(targetKeyspace) - slices.Sort(workflowNames) - require.EqualValues(t, wfNames, workflowNames) + defaultWorkflowNames := workflowList(targetKeyspace) + slices.Sort(defaultWorkflowNames) + require.EqualValues(t, wfNames, defaultWorkflowNames) workflows := getWorkflows(targetKeyspace) - workflowNames = make([]string, len(workflows.Workflows)) + defaultWorkflowNames = make([]string, len(workflows.Workflows)) for i := range workflows.Workflows { - workflowNames[i] = workflows.Workflows[i].Name + defaultWorkflowNames[i] = workflows.Workflows[i].Name } - slices.Sort(workflowNames) - require.EqualValues(t, wfNames, workflowNames) + slices.Sort(defaultWorkflowNames) + require.EqualValues(t, wfNames, defaultWorkflowNames) } func testWorkflowUpdateConfig(t *testing.T, mt *iMoveTables, targetTabs map[string]*cluster.VttabletProcess, targetKeyspace, workflow string) { @@ -526,12 +525,12 @@ func testWorkflowUpdateConfig(t *testing.T, mt *iMoveTables, targetTabs map[stri } } -func createMoveTables(t *testing.T, sourceKeyspace, targetKeyspace, workflowName, tables string, +func createMoveTables(t *testing.T, sourceKeyspace, targetKeyspace, defaultWorkflowName, tables string, createFlags, completeFlags, switchFlags []string) iMoveTables { mt := newMoveTables(vc, &moveTablesWorkflow{ workflowInfo: &workflowInfo{ vc: vc, - workflowName: workflowName, + workflowName: defaultWorkflowName, targetKeyspace: targetKeyspace, }, sourceKeyspace: sourceKeyspace, @@ -546,7 +545,7 @@ func createMoveTables(t *testing.T, sourceKeyspace, targetKeyspace, workflowName // reshard helpers -func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards string, targetTabs map[string]*cluster.VttabletProcess) { +func splitShard(t *testing.T, keyspace, defaultWorkflowName, sourceShards, targetShards string, targetTabs map[string]*cluster.VttabletProcess) { overrides := map[string]string{ "vreplication-copy-phase-duration": "10h11m12s", "vreplication-experimental-flags": "7", @@ -563,37 +562,37 @@ func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards rs := newReshard(vc, &reshardWorkflow{ workflowInfo: &workflowInfo{ vc: vc, - workflowName: workflowName, + workflowName: defaultWorkflowName, targetKeyspace: keyspace, }, sourceShards: sourceShards, targetShards: targetShards, createFlags: createFlags, }, workflowFlavorVtctld) - ksWorkflow := fmt.Sprintf("%s.%s", keyspace, workflowName) + ksWorkflow := fmt.Sprintf("%s.%s", keyspace, defaultWorkflowName) wf := rs.(iWorkflow) rs.Create() validateReshardResponse(rs) validateOverrides(t, targetTabs, overrides) - workflowResponse := getWorkflow(keyspace, workflowName) + workflowResponse := getWorkflow(keyspace, defaultWorkflowName) reshardShowResponse := getReshardShowResponse(&rs) require.EqualValues(t, reshardShowResponse, workflowResponse) validateReshardWorkflow(t, workflowResponse.Workflows) - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Stopped.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, defaultWorkflowName), binlogdatapb.VReplicationWorkflowState_Stopped.String()) rs.Start() waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) for _, tab := range targetTabs { alias := fmt.Sprintf("zone1-%d", tab.TabletUID) - query := "update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_customer' and workflow = '" + workflowName + "'" + query := fmt.Sprintf("update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_%s' and workflow = '%s'", keyspace, defaultWorkflowName) output, err := vc.VtctldClient.ExecuteCommandWithOutput("ExecuteFetchAsDBA", alias, query) require.NoError(t, err, output) } rs.Start() - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, defaultWorkflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) rs.Stop() waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) rs.Start() - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, defaultWorkflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) t.Run("Test --shards in workflow start/stop", func(t *testing.T) { // This subtest expects workflow to be running at the start and restarts it at the end. @@ -608,18 +607,18 @@ func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards {"-40,40-80", "start", 2}, } for _, tc := range testCases { - output, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", keyspace, tc.action, "--workflow", workflowName, "--shards", tc.shards) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", keyspace, tc.action, "--workflow", defaultWorkflowName, "--shards", tc.shards) require.NoError(t, err, "failed to %s workflow: %v", tc.action, err) cnt := gjson.Get(output, "details.#").Int() require.EqualValuesf(t, tc.expected, cnt, "expected %d shards, got %d for action %s, shards %s", tc.expected, cnt, tc.action, tc.shards) } }) - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, defaultWorkflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) for _, targetTab := range targetTabs { - catchup(t, targetTab, workflowName, "Reshard") + catchup(t, targetTab, defaultWorkflowName, "Reshard") } - vdiff(t, keyspace, workflowName, "zone1", nil) + vdiff(t, keyspace, defaultWorkflowName, "zone1", nil) shardReadsRouteToSource := func() { require.True(t, getShardRoute(t, keyspace, "-80", "replica")) @@ -638,15 +637,15 @@ func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards } rs.SwitchReadsAndWrites() - waitForLowLag(t, keyspace, workflowName+"_reverse") - vdiff(t, keyspace, workflowName+"_reverse", "zone1", nil) + waitForLowLag(t, keyspace, defaultWorkflowName+"_reverse") + vdiff(t, keyspace, defaultWorkflowName+"_reverse", "zone1", nil) shardReadsRouteToTarget() shardWritesRouteToTarget() confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateAllSwitched) rs.ReverseReadsAndWrites() - waitForLowLag(t, keyspace, workflowName) - vdiff(t, keyspace, workflowName, "zone1", nil) + waitForLowLag(t, keyspace, defaultWorkflowName) + vdiff(t, keyspace, defaultWorkflowName, "zone1", nil) shardReadsRouteToSource() shardWritesRouteToSource() confirmStates(t, &wf, wrangler.WorkflowStateAllSwitched, wrangler.WorkflowStateNotSwitched) @@ -702,7 +701,7 @@ func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards confirmStates(t, &wf, wrangler.WorkflowStateReadsSwitched, wrangler.WorkflowStateNotSwitched) // Confirm that everything is still in sync after our switch fest. - vdiff(t, keyspace, workflowName, "zone1", nil) + vdiff(t, keyspace, defaultWorkflowName, "zone1", nil) rs.SwitchReadsAndWrites() shardReadsRouteToTarget() @@ -754,7 +753,7 @@ func validateReshardResponse(rs iReshard) { require.NotNil(vc.t, resp) require.NotNil(vc.t, resp.ShardStreams) require.Equal(vc.t, len(resp.ShardStreams), 2) - keyspace := "customer" + keyspace := defaultTargetKs for _, shard := range []string{"-40", "40-80"} { streams := resp.ShardStreams[fmt.Sprintf("%s/%s", keyspace, shard)] require.Equal(vc.t, 1, len(streams.Streams)) @@ -768,9 +767,9 @@ func validateReshardWorkflow(t *testing.T, workflows []*vtctldatapb.Workflow) { require.Equal(t, "reshard", wf.Name) require.Equal(t, binlogdatapb.VReplicationWorkflowType_Reshard.String(), wf.WorkflowType) require.Equal(t, "None", wf.WorkflowSubType) - require.Equal(t, "customer", wf.Target.Keyspace) + require.Equal(t, defaultTargetKs, wf.Target.Keyspace) require.Equal(t, 2, len(wf.Target.Shards)) - require.Equal(t, "customer", wf.Source.Keyspace) + require.Equal(t, defaultTargetKs, wf.Source.Keyspace) require.Equal(t, 1, len(wf.Source.Shards)) require.False(t, wf.DeferSecondaryKeys) @@ -919,9 +918,9 @@ func validateMoveTablesWorkflow(t *testing.T, workflows []*vtctldatapb.Workflow) require.Equal(t, "wf1", wf.Name) require.Equal(t, binlogdatapb.VReplicationWorkflowType_MoveTables.String(), wf.WorkflowType) require.Equal(t, "None", wf.WorkflowSubType) - require.Equal(t, "customer", wf.Target.Keyspace) + require.Equal(t, defaultTargetKs, wf.Target.Keyspace) require.Equal(t, 2, len(wf.Target.Shards)) - require.Equal(t, "product", wf.Source.Keyspace) + require.Equal(t, defaultSourceKs, wf.Source.Keyspace) require.Equal(t, 1, len(wf.Source.Shards)) require.False(t, wf.DeferSecondaryKeys) diff --git a/go/test/endtoend/vreplication/vschema_load_test.go b/go/test/endtoend/vreplication/vschema_load_test.go index e14d3be8720..8b93213b402 100644 --- a/go/test/endtoend/vreplication/vschema_load_test.go +++ b/go/test/endtoend/vreplication/vschema_load_test.go @@ -43,7 +43,7 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { defer vc.TearDown() defaultCell := vc.Cells[vc.CellNames[0]] - vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, 1, 0, 100, defaultSourceKsOpts) vtgateConn := vc.GetVTGateConn(t) defer vtgateConn.Close() diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index d9153667c13..0a4205a722e 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -49,7 +49,7 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { defaultRdonly = 0 defaultCell := vc.Cells[vc.CellNames[0]] - vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) verifyClusterHealth(t, vc) ctx := context.Background() @@ -60,7 +60,7 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { defer vstreamConn.Close() vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ - Keyspace: "product", + Keyspace: defaultSourceKs, Shard: "0", Gtid: "", }}} @@ -90,9 +90,9 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { // present in the filter before running the VStream. for range 10 { id++ - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into product (pid, description) values (%d, 'description%d')", id+100, id)) - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into merchant (mname, category) values ('mname%d', 'category%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into product (pid, description) values (%d, 'description%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into merchant (mname, category) values ('mname%d', 'category%d')", id+100, id)) } // Stream events from the VStream API @@ -157,9 +157,9 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { } insertMu.Lock() id++ - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into product (pid, description) values (%d, 'description%d')", id+100, id)) - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into merchant (mname, category) values ('mname%d', 'category%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into product (pid, description) values (%d, 'description%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into merchant (mname, category) values ('mname%d', 'category%d')", id+100, id)) insertMu.Unlock() } }() @@ -169,9 +169,9 @@ func TestVStreamWithTablesToSkipCopyFlag(t *testing.T) { time.Sleep(10 * time.Second) // Give the vstream plenty of time to catchup done.Store(true) - qr1 := execVtgateQuery(t, vtgateConn, "product", "select count(*) from customer") - qr2 := execVtgateQuery(t, vtgateConn, "product", "select count(*) from product") - qr3 := execVtgateQuery(t, vtgateConn, "product", "select count(*) from merchant") + qr1 := execVtgateQuery(t, vtgateConn, defaultSourceKs, "select count(*) from customer") + qr2 := execVtgateQuery(t, vtgateConn, defaultSourceKs, "select count(*) from product") + qr3 := execVtgateQuery(t, vtgateConn, defaultSourceKs, "select count(*) from merchant") require.NotNil(t, qr1) require.NotNil(t, qr2) require.NotNil(t, qr3) @@ -213,7 +213,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { defaultRdonly = 0 defaultCell := vc.Cells[vc.CellNames[0]] - vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) verifyClusterHealth(t, vc) insertInitialData(t) vtgate := defaultCell.Vtgates[0] @@ -228,7 +228,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { defer vstreamConn.Close() vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ - Keyspace: "product", + Keyspace: defaultSourceKs, Shard: "0", Gtid: "", }}} @@ -260,7 +260,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { } insertMu.Lock() id++ - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) + execVtgateQuery(t, vtgateConn, defaultSourceKs, fmt.Sprintf("insert into customer (cid, name) values (%d, 'customer%d')", id+100, id)) insertMu.Unlock() } }() @@ -305,7 +305,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { case 1: if failover { insertMu.Lock() - output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", "product/0", "--new-primary=zone1-101") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", fmt.Sprintf("%s/0", defaultSourceKs), "--new-primary=zone1-101") insertMu.Unlock() log.Infof("output of first PRS is %s", output) require.NoError(t, err) @@ -313,7 +313,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { case 2: if failover { insertMu.Lock() - output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", "product/0", "--new-primary=zone1-100") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", fmt.Sprintf("%s/0", defaultSourceKs), "--new-primary=zone1-100") insertMu.Unlock() log.Infof("output of second PRS is %s", output) require.NoError(t, err) @@ -329,7 +329,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { } } - qr := execVtgateQuery(t, vtgateConn, "product", "select count(*) from customer") + qr := execVtgateQuery(t, vtgateConn, defaultSourceKs, "select count(*) from customer") require.NotNil(t, qr) // total number of row events found by the VStream API should match the rows inserted insertedRows, err := qr.Rows[0][0].ToCastInt64() @@ -654,7 +654,7 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven tickCount++ switch tickCount { case 1: - reshard(t, "sharded", "customer", "vstreamCopyMultiKeyspaceReshard", "-80,80-", "-40,40-", baseTabletID+400, nil, nil, nil, nil, defaultCellName, 1) + reshard(t, "sharded", defaultTargetKs, "vstreamCopyMultiKeyspaceReshard", "-80,80-", "-40,40-", baseTabletID+400, nil, nil, nil, nil, defaultCellName, 1) reshardDone = true case 60: done = true @@ -708,7 +708,7 @@ func TestMultiVStreamsKeyspaceReshard(t *testing.T) { require.NoError(t, err) // Add the new shards. - err = vc.AddShards(t, []*Cell{defaultCell}, keyspace, newShards, defaultReplicas, defaultRdonly, baseTabletID+2000, targetKsOpts) + err = vc.AddShards(t, []*Cell{defaultCell}, keyspace, newShards, defaultReplicas, defaultRdonly, baseTabletID+2000, defaultTargetKsOpts) require.NoError(t, err) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) @@ -904,7 +904,7 @@ func TestMultiVStreamsKeyspaceStopOnReshard(t *testing.T) { require.NoError(t, err) // Add the new shards. - err = vc.AddShards(t, []*Cell{defaultCell}, keyspace, newShards, defaultReplicas, defaultRdonly, baseTabletID+2000, targetKsOpts) + err = vc.AddShards(t, []*Cell{defaultCell}, keyspace, newShards, defaultReplicas, defaultRdonly, baseTabletID+2000, defaultTargetKsOpts) require.NoError(t, err) vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) @@ -1103,7 +1103,7 @@ func TestVStreamStopOnReshardFalse(t *testing.T) { func TestVStreamWithKeyspacesToWatch(t *testing.T) { extraVTGateArgs = append(extraVTGateArgs, []string{ - utils.GetFlagVariantForTests("--keyspaces-to-watch"), "product", + utils.GetFlagVariantForTests("--keyspaces-to-watch"), defaultSourceKs, }...) testVStreamWithFailover(t, false) @@ -1142,7 +1142,7 @@ func doVStream(t *testing.T, vc *VitessCluster, flags *vtgatepb.VStreamFlags) (n done := false vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ - Keyspace: "product", + Keyspace: defaultSourceKs, Shard: "0", Gtid: "", }}} @@ -1167,7 +1167,7 @@ func doVStream(t *testing.T, vc *VitessCluster, flags *vtgatepb.VStreamFlags) (n arr := strings.Split(rowEvent.TableName, ".") require.Equal(t, len(arr), 2) tableName := arr[1] - require.Equal(t, "product", rowEvent.Keyspace) + require.Equal(t, defaultSourceKs, rowEvent.Keyspace) require.Equal(t, "0", rowEvent.Shard) numRowEvents[tableName]++ @@ -1176,7 +1176,7 @@ func doVStream(t *testing.T, vc *VitessCluster, flags *vtgatepb.VStreamFlags) (n arr := strings.Split(fieldEvent.TableName, ".") require.Equal(t, len(arr), 2) tableName := arr[1] - require.Equal(t, "product", fieldEvent.Keyspace) + require.Equal(t, defaultSourceKs, fieldEvent.Keyspace) require.Equal(t, "0", fieldEvent.Shard) numFieldEvents[tableName]++ default: @@ -1215,7 +1215,7 @@ func TestVStreamHeartbeats(t *testing.T) { defaultRdonly = 0 defaultCell := vc.Cells[vc.CellNames[0]] - vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, + vc.AddKeyspace(t, []*Cell{defaultCell}, defaultSourceKs, "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) verifyClusterHealth(t, vc) insertInitialData(t) @@ -1271,7 +1271,7 @@ func TestVStreamPushdownFilters(t *testing.T) { }) defer vc.TearDown() require.NotNil(t, vc) - ks := "product" + ks := defaultSourceKs shard := "0" defaultCell := vc.Cells[vc.CellNames[0]] diff --git a/go/vt/sqlparser/parse_table_test.go b/go/vt/sqlparser/parse_table_test.go index 5f187cbc6d0..b4770d71f86 100644 --- a/go/vt/sqlparser/parse_table_test.go +++ b/go/vt/sqlparser/parse_table_test.go @@ -55,6 +55,14 @@ func TestParseTable(t *testing.T) { }, { input: "k.t.", err: true, + }, { + input: "`k-t`.t", + keyspace: "k-t", + table: "t", + }, { + input: "`k-t`.`k-t`", + keyspace: "k-t", + table: "k-t", }} parser := NewTestParser() for _, tcase := range testcases { diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go index 67c38545812..c329daddcca 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go @@ -747,6 +747,13 @@ func (tm *TabletManager) GetMaxValueForSequences(ctx context.Context, req *table } func (tm *TabletManager) getMaxSequenceValue(ctx context.Context, sm *tabletmanagerdatapb.GetMaxValueForSequencesRequest_SequenceMetadata) (int64, error) { + for _, val := range []string{sm.UsingTableDbNameEscaped, sm.UsingTableNameEscaped, sm.UsingColEscaped} { + lv := len(val) + if lv < 3 || val[0] != '`' || val[lv-1] != '`' { + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, + "the database (%s), table (%s), and column (%s) names must be non-empty escaped values", sm.UsingTableDbNameEscaped, sm.UsingTableNameEscaped, sm.UsingColEscaped) + } + } query := sqlparser.BuildParsedQuery(sqlGetMaxSequenceVal, sm.UsingColEscaped, sm.UsingTableDbNameEscaped, @@ -797,6 +804,11 @@ func (tm *TabletManager) updateSequenceValue(ctx context.Context, seq *tabletman if tm.Tablet().DbNameOverride != "" { seq.BackingTableDbName = tm.Tablet().DbNameOverride } + backingTableDbNameEscaped, err := sqlescape.EnsureEscaped(seq.BackingTableDbName) + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid database name %s specified for sequence backing table: %v", + seq.BackingTableDbName, err) + } backingTableNameEscaped, err := sqlescape.EnsureEscaped(seq.BackingTableName) if err != nil { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid table name %s specified for sequence backing table: %v", @@ -804,8 +816,8 @@ func (tm *TabletManager) updateSequenceValue(ctx context.Context, seq *tabletman } log.Infof("Updating sequence %s.%s to %d", seq.BackingTableDbName, seq.BackingTableName, nextVal) initQuery := sqlparser.BuildParsedQuery(sqlInitSequenceTable, - seq.BackingTableDbName, - seq.BackingTableName, + backingTableDbNameEscaped, + backingTableNameEscaped, nextVal, nextVal, nextVal, @@ -828,7 +840,7 @@ func (tm *TabletManager) updateSequenceValue(ctx context.Context, seq *tabletman return vterrors.Errorf( vtrpcpb.Code_INTERNAL, "failed to initialize the backing sequence table %s.%s: %v", - seq.BackingTableDbName, seq.BackingTableName, err, + backingTableDbNameEscaped, backingTableNameEscaped, err, ) } @@ -842,12 +854,16 @@ func (tm *TabletManager) updateSequenceValue(ctx context.Context, seq *tabletman return vterrors.Errorf( vtrpcpb.Code_INTERNAL, "failed to initialize the backing sequence table %s.%s after retries. Last error: %v", - seq.BackingTableDbName, backingTableNameEscaped, err) + backingTableDbNameEscaped, backingTableNameEscaped, err) } -func (tm *TabletManager) createSequenceTable(ctx context.Context, escapedTableName string) error { +func (tm *TabletManager) createSequenceTable(ctx context.Context, tableName string) error { + escapedTableName, err := sqlescape.EnsureEscaped(tableName) + if err != nil { + return err + } stmt := sqlparser.BuildParsedQuery(sqlCreateSequenceTable, escapedTableName) - _, err := tm.ApplySchema(ctx, &tmutils.SchemaChange{ + _, err = tm.ApplySchema(ctx, &tmutils.SchemaChange{ SQL: stmt.Query, Force: false, AllowReplication: true,