diff --git a/doc/Upgrading.md b/doc/Upgrading.md index efc5f59361a..625c61aaafa 100644 --- a/doc/Upgrading.md +++ b/doc/Upgrading.md @@ -2,7 +2,7 @@ This document highlights things to look after when upgrading a Vitess production installation to a newer Vitess release. -Generally speaking, upgrading Vitess is a safe and and easy process because it is explicitly designed for it. This is because in YouTube we follow the practice of releasing new versions often (usually from the tip of the Git master branch). +Generally speaking, upgrading Vitess is a safe and easy process because it is explicitly designed for it. This is because in YouTube we follow the practice of releasing new versions often (usually from the tip of the Git master branch). ## Compatibility diff --git a/doc/V3HighLevelDesign.md b/doc/V3HighLevelDesign.md index 6e87e140043..b599f7e89ba 100644 --- a/doc/V3HighLevelDesign.md +++ b/doc/V3HighLevelDesign.md @@ -1311,7 +1311,7 @@ When two nodes are grouped, the current join condition becomes the root of the n * If it’s a JOIN, the new property is the more restrictive of the two nodes. So, if one of them is a Route, then the new node is also a Route. * For a LEFT JOIN, the new property is the same as the LHS node. -If the grouping conditions are not met, then the node remains a join node. In this case, we have to see if the ON clause conditions can be pushed down into the left and/or right nodes. By the fact that the current join is split into two, the ON clause cannot be be pushed as is. Instead, we use associativity rules to our benefit and merge the ON clause conditions into the WHERE clauses of the underlying nodes. The rules are the same as the ones described for a normal WHERE clause. +If the grouping conditions are not met, then the node remains a join node. In this case, we have to see if the ON clause conditions can be pushed down into the left and/or right nodes. By the fact that the current join is split into two, the ON clause cannot be pushed as is. Instead, we use associativity rules to our benefit and merge the ON clause conditions into the WHERE clauses of the underlying nodes. The rules are the same as the ones described for a normal WHERE clause. But left joins are slightly different, because the join condition is applied *to the RHS only*. Also, the condition cannot be further pushed into other nested left joins, because they will change the meaning of the statement. For example: diff --git a/go/vt/automation/wait_for_filtered_replication_task.go b/go/vt/automation/wait_for_filtered_replication_task.go index 63d910f4d5c..c820a75801c 100644 --- a/go/vt/automation/wait_for_filtered_replication_task.go +++ b/go/vt/automation/wait_for_filtered_replication_task.go @@ -23,7 +23,7 @@ import ( ) // WaitForFilteredReplicationTask runs vtctl WaitForFilteredReplication to block until the destination master -// (i.e. the receiving side of the filtered replication) has caught up up to max_delay with the source shard. +// (i.e. the receiving side of the filtered replication) has caught up to max_delay with the source shard. type WaitForFilteredReplicationTask struct { } diff --git a/go/vt/throttler/max_replication_lag_module_test.go b/go/vt/throttler/max_replication_lag_module_test.go index 0d57a68d3fc..e5537e64f24 100644 --- a/go/vt/throttler/max_replication_lag_module_test.go +++ b/go/vt/throttler/max_replication_lag_module_test.go @@ -283,7 +283,7 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_Timeout(t *testing.T) { } // r2 as "replica under test" did not report its lag for too long. - // We'll ignore it from now and and let other replicas trigger rate changes. + // We'll ignore it from now and let other replicas trigger rate changes. // r1 @ 173s, 0s lag // time for r1 must be > 172s (70s + 40s + 62s) which is // (last rate change + test duration + max duration between increases). diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go index ba7f25a2f2d..594f2d80d3a 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go @@ -102,7 +102,7 @@ func TestDBConnExec(t *testing.T) { startCounts = tabletenv.MySQLStats.Counts() - // Set the connection fail flag and and try again. + // Set the connection fail flag and try again. // This time the initial query fails as does the reconnect attempt. db.EnableConnFail() _, err = dbConn.Exec(ctx, sql, 1, false) diff --git a/go/vt/workflow/parallel_runner.go b/go/vt/workflow/parallel_runner.go index 847bfb8b868..da8d0e30479 100644 --- a/go/vt/workflow/parallel_runner.go +++ b/go/vt/workflow/parallel_runner.go @@ -138,7 +138,7 @@ func (p *ParallelRunner) Run() error { } select { case <-p.ctx.Done(): - // Break this run and return early. Do not try to to execute any subsequent tasks. + // Break this run and return early. Do not try to execute any subsequent tasks. log.Infof("Workflow is cancelled, remaining tasks will be aborted") return nil default: diff --git a/test/TestingStrategy.md b/test/TestingStrategy.md index 234fe9bd941..bc742ffcae5 100644 --- a/test/TestingStrategy.md +++ b/test/TestingStrategy.md @@ -36,7 +36,7 @@ Due to its constant nature, this is not an appropriate framework to test cluster These tests run more complicated setups, and take a lot more resources. They are meant to test end-to-end behaviors of the Vitess ecosystem, and complement the unit tests. -For instance, we test each RPC interaction independently (client to vtgate, vtgate to vttablet, vttablet to MySQL, see previous sections). But is is also good to have an end-to-end test that validates everything works together. +For instance, we test each RPC interaction independently (client to vtgate, vtgate to vttablet, vttablet to MySQL, see previous sections). But is also good to have an end-to-end test that validates everything works together. These tests almost always launch a topology service, a few mysqld instances, a few vttablets, a vtctld process, a few vtgates, ... They use the real production processes, and real replication. This setup is mandatory for properly testing re-sharding, cluster operations, ... They all however run on the same machine, so they might be limited by the environment.