Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go/test/endtoend/vtgate/lookup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ func TestConsistentLookup(t *testing.T) {
mysqlErr := err.(*sqlerror.SQLError)
assert.Equal(t, sqlerror.ERDupEntry, mysqlErr.Num)
assert.Equal(t, "23000", mysqlErr.State)
assert.ErrorContains(t, mysqlErr, "reverted partial DML execution")
assert.ErrorContains(t, mysqlErr, "lookup.Create: target: ks.80-.primary: vttablet: (errno 1062) (sqlstate 23000)")

// Simple delete.
utils.Exec(t, conn, "begin")
Expand Down
98 changes: 91 additions & 7 deletions go/test/endtoend/vtgate/queries/dml/dml_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,92 @@ import (
"github.com/stretchr/testify/require"
)

// TestUniqueLookupDuplicateEntries should fail if the is duplicate in unique lookup column.
func TestUniqueLookupDuplicateEntries(t *testing.T) {
mcmp, closer := start(t)
defer closer()

// initial row
utils.Exec(t, mcmp.VtConn, "insert into s_tbl(id, num) values (1,10)")
utils.AssertMatches(t, mcmp.VtConn, "select id, num from s_tbl order by id", `[[INT64(1) INT64(10)]]`)
utils.AssertMatches(t, mcmp.VtConn, "select num, hex(keyspace_id) from num_vdx_tbl order by num", `[[INT64(10) VARCHAR("166B40B44ABA4BD6")]]`)

// insert duplicate row
utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) values (2,10)", "lookup.Create: target: sks.-80.primary: vttablet: "+
"Duplicate entry '10' for key 'num_vdx_tbl.PRIMARY'")
utils.AssertMatches(t, mcmp.VtConn, "select id, num from s_tbl order by id", `[[INT64(1) INT64(10)]]`)
utils.AssertMatches(t, mcmp.VtConn, "select num, hex(keyspace_id) from num_vdx_tbl order by num", `[[INT64(10) VARCHAR("166B40B44ABA4BD6")]]`)

// insert duplicate row in multi-row insert multi shard
utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) values (3,20), (4,20),(5,30)",
"transaction rolled back to reverse changes of partial DML execution: target: sks.80-.primary: vttablet: "+
"Duplicate entry '20' for key 'num_vdx_tbl.PRIMARY'")
utils.AssertMatches(t, mcmp.VtConn, "select id, num from s_tbl order by id", `[[INT64(1) INT64(10)]]`)
utils.AssertMatches(t, mcmp.VtConn, "select num, hex(keyspace_id) from num_vdx_tbl order by num", `[[INT64(10) VARCHAR("166B40B44ABA4BD6")]]`)

// insert duplicate row in multi-row insert - lookup single shard
utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) values (3,20), (4,20)",
"transaction rolled back to reverse changes of partial DML execution: lookup.Create: target: sks.80-.primary: vttablet: "+
"Duplicate entry '20' for key 'num_vdx_tbl.PRIMARY'")
utils.AssertMatches(t, mcmp.VtConn, "select id, num from s_tbl order by id", `[[INT64(1) INT64(10)]]`)
utils.AssertMatches(t, mcmp.VtConn, "select num, hex(keyspace_id) from num_vdx_tbl order by num", `[[INT64(10) VARCHAR("166B40B44ABA4BD6")]]`)

// insert second row to test with limit update.
utils.Exec(t, mcmp.VtConn, "insert into s_tbl(id, num) values (10,100)")
utils.AssertMatches(t, mcmp.VtConn, "select id, num from s_tbl order by id", `[[INT64(1) INT64(10)] [INT64(10) INT64(100)]]`)
utils.AssertMatches(t, mcmp.VtConn, "select num, hex(keyspace_id) from num_vdx_tbl order by num", `[[INT64(10) VARCHAR("166B40B44ABA4BD6")] [INT64(100) VARCHAR("594764E1A2B2D98E")]]`)

// update with limit 1 succeed.
utils.Exec(t, mcmp.VtConn, "update s_tbl set num = 30 order by id limit 1")
utils.AssertMatches(t, mcmp.VtConn, "select id, num from s_tbl order by id", `[[INT64(1) INT64(30)] [INT64(10) INT64(100)]]`)
utils.AssertMatches(t, mcmp.VtConn, "select num, hex(keyspace_id) from num_vdx_tbl order by num", `[[INT64(30) VARCHAR("166B40B44ABA4BD6")] [INT64(100) VARCHAR("594764E1A2B2D98E")]]`)

// update to same value on multiple row should fail.
utils.AssertContainsError(t, mcmp.VtConn, "update s_tbl set num = 40 limit 2",
"lookup.Create: transaction rolled back to reverse changes of partial DML execution: target: sks.80-.primary: vttablet: "+
"rpc error: code = AlreadyExists desc = Duplicate entry '40' for key 'num_vdx_tbl.PRIMARY'")
utils.AssertMatches(t, mcmp.VtConn, "select id, num from s_tbl order by id", `[[INT64(1) INT64(30)] [INT64(10) INT64(100)]]`)
utils.AssertMatches(t, mcmp.VtConn, "select num, hex(keyspace_id) from num_vdx_tbl order by num", `[[INT64(30) VARCHAR("166B40B44ABA4BD6")] [INT64(100) VARCHAR("594764E1A2B2D98E")]]`)
}

// TestUniqueLookupDuplicateIgnore tests the insert ignore on lookup table.
func TestUniqueLookupDuplicateIgnore(t *testing.T) {
mcmp, closer := start(t)
defer closer()

// initial row
utils.Exec(t, mcmp.VtConn, "insert into s_tbl(id, num) values (1,10)")
utils.AssertMatches(t, mcmp.VtConn, "select id, num from s_tbl order by id", `[[INT64(1) INT64(10)]]`)
utils.AssertMatches(t, mcmp.VtConn, "select num, hex(keyspace_id) from num_vdx_tbl order by num", `[[INT64(10) VARCHAR("166B40B44ABA4BD6")]]`)

// insert ignore duplicate row
qr := utils.Exec(t, mcmp.VtConn, "insert ignore into s_tbl(id, num) values (2,10)")
assert.EqualValues(t, 0, qr.RowsAffected)
utils.AssertMatches(t, mcmp.VtConn, "select id, num from s_tbl order by id", `[[INT64(1) INT64(10)]]`)
utils.AssertMatches(t, mcmp.VtConn, "select num, hex(keyspace_id) from num_vdx_tbl order by num", `[[INT64(10) VARCHAR("166B40B44ABA4BD6")]]`)

// insert duplicate row in multi-row insert - lookup single shard
// Current behavior does not work as expected—one of the rows should be inserted.
// The lookup table is updated, but the main table is not. This is a bug in Vitess.
// The issue occurs because the table has two vindex columns (`num` and `col`), both of which ignore nulls during vindex insertion.
// In the `INSERT IGNORE` case, after the vindex create API call, a verify call checks if the row exists in the lookup table.
// - If the row exists, it is inserted into the main table.
// - If the row does not exist, the main table insertion is skipped.
// Since the `col` column is null, the row is not inserted into the lookup table, causing the main table insertion to be ignored.
qr = utils.Exec(t, mcmp.VtConn, "insert ignore into s_tbl(id, num) values (3,20), (4,20)")
assert.EqualValues(t, 0, qr.RowsAffected)
utils.AssertMatches(t, mcmp.VtConn, "select id, num from s_tbl order by id", `[[INT64(1) INT64(10)]]`)
utils.AssertMatches(t, mcmp.VtConn, "select num, hex(keyspace_id) from num_vdx_tbl order by num", `[[INT64(10) VARCHAR("166B40B44ABA4BD6")] [INT64(20) VARCHAR("4EB190C9A2FA169C")]]`)

// insert duplicate row in multi-row insert - vindex values are not null
qr = utils.Exec(t, mcmp.VtConn, "insert ignore into s_tbl(id, num, col) values (3,20, 30), (4,20, 40)")
assert.EqualValues(t, 1, qr.RowsAffected)
utils.AssertMatches(t, mcmp.VtConn, "select id, num, col from s_tbl order by id", `[[INT64(1) INT64(10) NULL] [INT64(3) INT64(20) INT64(30)]]`)
utils.AssertMatches(t, mcmp.VtConn, "select num, hex(keyspace_id) from num_vdx_tbl order by num", `[[INT64(10) VARCHAR("166B40B44ABA4BD6")] [INT64(20) VARCHAR("4EB190C9A2FA169C")]]`)
utils.AssertMatches(t, mcmp.VtConn, "select col, hex(keyspace_id) from col_vdx_tbl order by col", `[[INT64(30) VARCHAR("4EB190C9A2FA169C")]]`)

}

func TestMultiEqual(t *testing.T) {
if clusterInstance.HasPartialKeyspaces {
t.Skip("test uses multiple keyspaces, test framework only supports partial keyspace testing for a single keyspace")
Expand Down Expand Up @@ -80,7 +166,7 @@ func TestDeleteWithLimit(t *testing.T) {
defer closer()

// initial rows
mcmp.Exec("insert into s_tbl(id, col) values (1,10), (2,10), (3,10), (4,20), (5,5), (6,15), (7,17), (8,80)")
mcmp.Exec("insert into s_tbl(id, col) values (1,10), (4,20), (5,5), (6,15), (7,17), (8,80)")
mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,4), (1,2,2), (2,3,5), (2,4,55)")

// delete with limit
Expand All @@ -92,25 +178,23 @@ func TestDeleteWithLimit(t *testing.T) {

// check rows
mcmp.AssertMatches(`select id, col from s_tbl order by id`,
`[[INT64(3) INT64(10)] [INT64(4) INT64(20)] [INT64(6) INT64(15)] [INT64(7) INT64(17)] [INT64(8) INT64(80)]]`)
`[[INT64(4) INT64(20)] [INT64(7) INT64(17)] [INT64(8) INT64(80)]]`)
// 2 rows matches but limit is 1, so any one of the row can remain in table.
mcmp.AssertMatchesAnyNoCompare(`select region_id, oid, cust_no from order_tbl order by oid`,
`[[INT64(1) INT64(2) INT64(2)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(55)]]`,
`[[INT64(1) INT64(1) INT64(4)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(55)]]`)

// delete with limit
qr = mcmp.Exec(`delete from s_tbl where col < 20 limit 2`)
qr = mcmp.Exec(`delete from s_tbl where col < 25 limit 2`)
require.EqualValues(t, 2, qr.RowsAffected)

qr = mcmp.Exec(`delete from order_tbl limit 5`)
require.EqualValues(t, 3, qr.RowsAffected)

// check rows
// 3 rows matches `col < 20` but limit is 2 so any one of them can remain in the table.
mcmp.AssertMatchesAnyNoCompare(`select id, col from s_tbl order by id`,
`[[INT64(4) INT64(20)] [INT64(7) INT64(17)] [INT64(8) INT64(80)]]`,
`[[INT64(3) INT64(10)] [INT64(4) INT64(20)] [INT64(8) INT64(80)]]`,
`[[INT64(4) INT64(20)] [INT64(6) INT64(15)] [INT64(8) INT64(80)]]`)
mcmp.AssertMatches(`select id, col from s_tbl order by id`,
`[[INT64(8) INT64(80)]]`)
mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`,
`[]`)

Expand Down
28 changes: 17 additions & 11 deletions go/vt/vterrors/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,23 +27,29 @@ const (
// RxOp regex for operation not allowed error
var RxOp = regexp.MustCompile("operation not allowed in state (NOT_SERVING|SHUTTING_DOWN)")

// TxEngineClosed for transaction engine closed error
const TxEngineClosed = "tx engine can't accept new connections in state %v"

// WrongTablet for invalid tablet type error
const WrongTablet = "wrong tablet type"

// RxWrongTablet regex for invalid tablet type error
var RxWrongTablet = regexp.MustCompile("(wrong|invalid) tablet type")

// Constants for error messages
const (
// TxEngineClosed for transaction engine closed error
TxEngineClosed = "tx engine can't accept new connections in state %v"

// PrimaryVindexNotSet is the error message to be used when there is no primary vindex found on a table
PrimaryVindexNotSet = "table '%s' does not have a primary vindex"

// WrongTablet for invalid tablet type error
WrongTablet = "wrong tablet type"

// TxKillerRollback purpose when acquire lock on connection for rolling back transaction.
TxKillerRollback = "in use: for tx killer rollback"

// RevertedPartialExec is the error message to be used when a partial DML execution failure is reverted using savepoint.
RevertedPartialExec = "reverted partial DML execution failure"

// TxRollbackOnPartialExec is the error message to be used when a transaction is rolled back to reverse changes of partial DML execution
TxRollbackOnPartialExec = "transaction rolled back to reverse changes of partial DML execution"
)

// TxKillerRollback purpose when acquire lock on connection for rolling back transaction.
const TxKillerRollback = "in use: for tx killer rollback"
// RxWrongTablet regex for invalid tablet type error
var RxWrongTablet = regexp.MustCompile("(wrong|invalid) tablet type")

// TxClosed regex for connection closed
var TxClosed = regexp.MustCompile("transaction ([a-z0-9:]+) (?:ended|not found|in use: for tx killer rollback)")
65 changes: 65 additions & 0 deletions go/vt/vtgate/executor_dml_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,9 @@ import (
"vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/test/utils"
"vitess.io/vitess/go/vt/discovery"
querypb "vitess.io/vitess/go/vt/proto/query"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
Expand Down Expand Up @@ -3140,3 +3142,66 @@ func TestDeleteMultiTable(t *testing.T) {
// delete from `user` where (`user`.id) in ::dml_vals - 1 shard
testQueryLog(t, executor, logChan, "TestExecute", "DELETE", "delete `user` from `user` join music on `user`.col = music.col where music.user_id = 1", 18)
}

func TestConsistentLookupInsert(t *testing.T) {
ctx := utils.LeakCheckContext(t)

// Special setup
cell := "zone1"
ks := "TestExecutor"
hc := discovery.NewFakeHealthCheck(nil)
s := createSandbox(ks)
s.ShardSpec = "-80-"
s.VSchema = executorVSchema
serv := newSandboxForCells(ctx, []string{cell})
resolver := newTestResolver(ctx, hc, serv, cell)
sbc1 := hc.AddTestTablet(cell, "-80", 1, ks, "-80", topodatapb.TabletType_PRIMARY, true, 1, nil)
sbc2 := hc.AddTestTablet(cell, "80-", 1, ks, "80-", topodatapb.TabletType_PRIMARY, true, 1, nil)

executor := createExecutor(ctx, serv, cell, resolver)
defer executor.Close()

logChan := executor.queryLogger.Subscribe("Test")
defer executor.queryLogger.Unsubscribe(logChan)

session := NewAutocommitSession(&vtgatepb.Session{})

t.Run("transaction rollback due to partial execution error, no duplicate handling", func(t *testing.T) {
sbc1.EphemeralShardErr = sqlerror.NewSQLError(sqlerror.ERDupEntry, sqlerror.SSConstraintViolation, "Duplicate entry '10' for key 't1_lkp_idx.PRIMARY'")
sbc2.SetResults([]*sqltypes.Result{{RowsAffected: 1}})
_, err := executorExecSession(ctx, executor, "insert into t1(id, unq_col) values (1, 10), (4, 10), (50, 4)", nil, session.Session)
assert.ErrorContains(t, err,
"lookup.Create: transaction rolled back to reverse changes of partial DML execution: target: TestExecutor.-80.primary: "+
"Duplicate entry '10' for key 't1_lkp_idx.PRIMARY' (errno 1062) (sqlstate 23000)")

assert.EqualValues(t, 0, sbc1.ExecCount.Load())
assert.EqualValues(t, 1, sbc2.ExecCount.Load())

testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into t1_lkp_idx(unq_col, keyspace_id) values (:unq_col_0, :keyspace_id_0), (:unq_col_1, :keyspace_id_1), (:unq_col_2, :keyspace_id_2)", 2)
testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into t1(id, unq_col) values (1, 10), (4, 10), (50, 4)", 0)
})

sbc1.ExecCount.Store(0)
sbc2.ExecCount.Store(0)
session = NewAutocommitSession(session.Session)

t.Run("duplicate handling failing on same unique column value", func(t *testing.T) {
sbc1.EphemeralShardErr = sqlerror.NewSQLError(sqlerror.ERDupEntry, sqlerror.SSConstraintViolation, "Duplicate entry '10' for key 't1_lkp_idx.PRIMARY'")
sbc1.SetResults([]*sqltypes.Result{
sqltypes.MakeTestResult(sqltypes.MakeTestFields("keyspace_id", "varbinary")),
{RowsAffected: 1},
})
_, err := executorExecSession(ctx, executor, "insert into t1(id, unq_col) values (1, 10), (4, 10)", nil, session.Session)
assert.ErrorContains(t, err,
"transaction rolled back to reverse changes of partial DML execution: lookup.Create: target: TestExecutor.-80.primary: "+
"Duplicate entry '10' for key 't1_lkp_idx.PRIMARY' (errno 1062) (sqlstate 23000)")

assert.EqualValues(t, 2, sbc1.ExecCount.Load())
assert.EqualValues(t, 0, sbc2.ExecCount.Load())

testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into t1_lkp_idx(unq_col, keyspace_id) values (:unq_col_0, :keyspace_id_0), (:unq_col_1, :keyspace_id_1)", 1)
testQueryLog(t, executor, logChan, "VindexCreate", "SELECT", "select keyspace_id from t1_lkp_idx where unq_col = :unq_col for update", 1)
testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into t1_lkp_idx(unq_col, keyspace_id) values (:unq_col, :keyspace_id)", 1)
testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into t1(id, unq_col) values (1, 10), (4, 10)", 0)
})
}
5 changes: 3 additions & 2 deletions go/vt/vtgate/plan_execute.go
Original file line number Diff line number Diff line change
Expand Up @@ -367,15 +367,16 @@ func (e *Executor) rollbackPartialExec(ctx context.Context, safeSession *SafeSes
_, _, err = e.execute(ctx, nil, safeSession, rQuery, bindVars, logStats)
// If no error, the revert is successful with the savepoint. Notify the reason as error to the client.
if err == nil {
errMsg.WriteString("reverted partial DML execution failure")
errMsg.WriteString(vterrors.RevertedPartialExec)
return vterrors.New(vtrpcpb.Code_ABORTED, errMsg.String())
}
// not able to rollback changes of the failed query, so have to abort the complete transaction.
}

// abort the transaction.
_ = e.txConn.Rollback(ctx, safeSession)
errMsg.WriteString("transaction rolled back to reverse changes of partial DML execution")

errMsg.WriteString(vterrors.TxRollbackOnPartialExec)
if err != nil {
return vterrors.Wrap(err, errMsg.String())
}
Expand Down
4 changes: 3 additions & 1 deletion go/vt/vtgate/vcursor_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -544,7 +544,9 @@ func (vc *vcursorImpl) Execute(ctx context.Context, method string, query string,
}

qr, err := vc.executor.Execute(ctx, nil, method, session, vc.marginComments.Leading+query+vc.marginComments.Trailing, bindVars)
vc.setRollbackOnPartialExecIfRequired(err != nil, rollbackOnError)
// If there is no error, it indicates at least one successful execution,
// meaning a rollback should be triggered if a failure occurs later.
vc.setRollbackOnPartialExecIfRequired(err == nil, rollbackOnError)

return qr, err
}
Expand Down
41 changes: 35 additions & 6 deletions go/vt/vtgate/vindexes/consistent_lookup.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,17 @@ import (
"fmt"
"strings"

"github.com/cespare/xxhash/v2"

"vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/evalengine"

querypb "vitess.io/vitess/go/vt/proto/query"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/evalengine"
)

const (
Expand Down Expand Up @@ -342,25 +343,53 @@ func (lu *clCommon) Verify(ctx context.Context, vcursor VCursor, ids []sqltypes.

// Create reserves the id by inserting it into the vindex table.
func (lu *clCommon) Create(ctx context.Context, vcursor VCursor, rowsColValues [][]sqltypes.Value, ksids [][]byte, ignoreMode bool) error {
// Attempt to insert values into the lookup vindex table.
origErr := lu.lkp.createCustom(ctx, vcursor, rowsColValues, ksidsToValues(ksids), ignoreMode, vtgatepb.CommitOrder_PRE)
if origErr == nil {
return nil
}

// If the transaction is already rolled back. We should not handle the case for duplicate error.
if strings.Contains(origErr.Error(), vterrors.TxRollbackOnPartialExec) {
return origErr
}

// Try and convert the error to a MySQL error
sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(origErr).(*sqlerror.SQLError)
// If it is a MySQL error and its code is of duplicate entry, then we would like to continue
// Otherwise, we return the error

// If the error is NOT a duplicate entry error, return it immediately.
if !(isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERDupEntry) {
return origErr
}

// Map to track unique row hashes and their original index in `rowsColValues`.
rowHashIndex := make(map[uint64]int, len(rowsColValues))
for i, row := range rowsColValues {
rowKey := hashKeyXXH(row)
// If a row with the same hash exists, perform an explicit value check to avoid hash collisions.
if idx, exists := rowHashIndex[rowKey]; exists && sqltypes.RowEqual(row, rowsColValues[idx]) {
return origErr // Exact duplicate found, return the original error.
}
rowHashIndex[rowKey] = i

// Attempt to handle the duplicate entry.
if err := lu.handleDup(ctx, vcursor, row, ksids[i], origErr); err != nil {
return err
}
}
return nil
}

// hashKeyXXH generates a fast 64-bit hash for a row using xxHash.
// This is optimized for performance and helps detect duplicates efficiently.
func hashKeyXXH(row []sqltypes.Value) uint64 {
h := xxhash.New()
for _, col := range row {
_, _ = h.Write([]byte(col.String())) // Ignoring error as xxHash Write never fails
}
return h.Sum64()
}

func (lu *clCommon) handleDup(ctx context.Context, vcursor VCursor, values []sqltypes.Value, ksid []byte, dupError error) error {
bindVars := make(map[string]*querypb.BindVariable, len(values))
for colnum, val := range values {
Expand Down
2 changes: 1 addition & 1 deletion go/vt/vtgate/vindexes/vschema.go
Original file line number Diff line number Diff line change
Expand Up @@ -1394,7 +1394,7 @@ func ChooseVindexForType(typ querypb.Type) (string, error) {

// FindBestColVindex finds the best ColumnVindex for VReplication.
func FindBestColVindex(table *Table) (*ColumnVindex, error) {
if table.ColumnVindexes == nil || len(table.ColumnVindexes) == 0 {
if len(table.ColumnVindexes) == 0 {
return nil, vterrors.Errorf(
vtrpcpb.Code_INVALID_ARGUMENT,
"table %s has no vindex",
Expand Down
Loading