From 3fb294471c991d8e44c44b0c538a9f9bedc1e0f9 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Tue, 2 Jul 2019 10:30:40 -0700 Subject: [PATCH 01/17] migrater: initial cut Signed-off-by: Sugu Sougoumarane --- go/vt/proto/binlogdata/binlogdata.pb.go | 362 +++++++---- .../vreplication/controller_plan.go | 56 +- .../vreplication/controller_plan_test.go | 4 +- .../tabletmanager/vreplication/engine.go | 33 +- .../tabletmanager/vreplication/engine_test.go | 2 + go/vt/wrangler/migrate_writes.go | 569 ++++++++++++++++++ proto/binlogdata.proto | 15 + py/vtproto/binlogdata_pb2.py | 160 ++++- 8 files changed, 1049 insertions(+), 152 deletions(-) create mode 100644 go/vt/wrangler/migrate_writes.go diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index a0b4c05757a..47fa6baf57a 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -48,7 +48,7 @@ func (x OnDDLAction) String() string { return proto.EnumName(OnDDLAction_name, int32(x)) } func (OnDDLAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{0} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{0} } // VEventType enumerates the event types. @@ -73,6 +73,7 @@ const ( VEventType_FIELD VEventType = 13 VEventType_HEARTBEAT VEventType = 14 VEventType_VGTID VEventType = 15 + VEventType_JOURNAL VEventType = 16 ) var VEventType_name = map[int32]string{ @@ -92,6 +93,7 @@ var VEventType_name = map[int32]string{ 13: "FIELD", 14: "HEARTBEAT", 15: "VGTID", + 16: "JOURNAL", } var VEventType_value = map[string]int32{ "UNKNOWN": 0, @@ -110,13 +112,14 @@ var VEventType_value = map[string]int32{ "FIELD": 13, "HEARTBEAT": 14, "VGTID": 15, + "JOURNAL": 16, } func (x VEventType) String() string { return proto.EnumName(VEventType_name, int32(x)) } func (VEventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{1} } type BinlogTransaction_Statement_Category int32 @@ -164,7 +167,7 @@ func (x BinlogTransaction_Statement_Category) String() string { return proto.EnumName(BinlogTransaction_Statement_Category_name, int32(x)) } func (BinlogTransaction_Statement_Category) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1, 0, 0} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{1, 0, 0} } // Charset is the per-statement charset info from a QUERY_EVENT binlog entry. @@ -184,7 +187,7 @@ func (m *Charset) Reset() { *m = Charset{} } func (m *Charset) String() string { return proto.CompactTextString(m) } func (*Charset) ProtoMessage() {} func (*Charset) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{0} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{0} } func (m *Charset) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Charset.Unmarshal(m, b) @@ -241,7 +244,7 @@ func (m *BinlogTransaction) Reset() { *m = BinlogTransaction{} } func (m *BinlogTransaction) String() string { return proto.CompactTextString(m) } func (*BinlogTransaction) ProtoMessage() {} func (*BinlogTransaction) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{1} } func (m *BinlogTransaction) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogTransaction.Unmarshal(m, b) @@ -291,7 +294,7 @@ func (m *BinlogTransaction_Statement) Reset() { *m = BinlogTransaction_S func (m *BinlogTransaction_Statement) String() string { return proto.CompactTextString(m) } func (*BinlogTransaction_Statement) ProtoMessage() {} func (*BinlogTransaction_Statement) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1, 0} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{1, 0} } func (m *BinlogTransaction_Statement) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogTransaction_Statement.Unmarshal(m, b) @@ -349,7 +352,7 @@ func (m *StreamKeyRangeRequest) Reset() { *m = StreamKeyRangeRequest{} } func (m *StreamKeyRangeRequest) String() string { return proto.CompactTextString(m) } func (*StreamKeyRangeRequest) ProtoMessage() {} func (*StreamKeyRangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{2} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{2} } func (m *StreamKeyRangeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamKeyRangeRequest.Unmarshal(m, b) @@ -402,7 +405,7 @@ func (m *StreamKeyRangeResponse) Reset() { *m = StreamKeyRangeResponse{} func (m *StreamKeyRangeResponse) String() string { return proto.CompactTextString(m) } func (*StreamKeyRangeResponse) ProtoMessage() {} func (*StreamKeyRangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{3} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{3} } func (m *StreamKeyRangeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamKeyRangeResponse.Unmarshal(m, b) @@ -446,7 +449,7 @@ func (m *StreamTablesRequest) Reset() { *m = StreamTablesRequest{} } func (m *StreamTablesRequest) String() string { return proto.CompactTextString(m) } func (*StreamTablesRequest) ProtoMessage() {} func (*StreamTablesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{4} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{4} } func (m *StreamTablesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTablesRequest.Unmarshal(m, b) @@ -499,7 +502,7 @@ func (m *StreamTablesResponse) Reset() { *m = StreamTablesResponse{} } func (m *StreamTablesResponse) String() string { return proto.CompactTextString(m) } func (*StreamTablesResponse) ProtoMessage() {} func (*StreamTablesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{5} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{5} } func (m *StreamTablesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTablesResponse.Unmarshal(m, b) @@ -544,7 +547,7 @@ func (m *Rule) Reset() { *m = Rule{} } func (m *Rule) String() string { return proto.CompactTextString(m) } func (*Rule) ProtoMessage() {} func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{6} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{6} } func (m *Rule) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Rule.Unmarshal(m, b) @@ -591,7 +594,7 @@ func (m *Filter) Reset() { *m = Filter{} } func (m *Filter) String() string { return proto.CompactTextString(m) } func (*Filter) ProtoMessage() {} func (*Filter) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{7} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{7} } func (m *Filter) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Filter.Unmarshal(m, b) @@ -646,7 +649,7 @@ func (m *BinlogSource) Reset() { *m = BinlogSource{} } func (m *BinlogSource) String() string { return proto.CompactTextString(m) } func (*BinlogSource) ProtoMessage() {} func (*BinlogSource) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{8} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{8} } func (m *BinlogSource) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogSource.Unmarshal(m, b) @@ -728,7 +731,7 @@ func (m *RowChange) Reset() { *m = RowChange{} } func (m *RowChange) String() string { return proto.CompactTextString(m) } func (*RowChange) ProtoMessage() {} func (*RowChange) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{9} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{9} } func (m *RowChange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RowChange.Unmarshal(m, b) @@ -775,7 +778,7 @@ func (m *RowEvent) Reset() { *m = RowEvent{} } func (m *RowEvent) String() string { return proto.CompactTextString(m) } func (*RowEvent) ProtoMessage() {} func (*RowEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{10} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{10} } func (m *RowEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RowEvent.Unmarshal(m, b) @@ -821,7 +824,7 @@ func (m *FieldEvent) Reset() { *m = FieldEvent{} } func (m *FieldEvent) String() string { return proto.CompactTextString(m) } func (*FieldEvent) ProtoMessage() {} func (*FieldEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{11} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{11} } func (m *FieldEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FieldEvent.Unmarshal(m, b) @@ -868,7 +871,7 @@ func (m *ShardGtid) Reset() { *m = ShardGtid{} } func (m *ShardGtid) String() string { return proto.CompactTextString(m) } func (*ShardGtid) ProtoMessage() {} func (*ShardGtid) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{12} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{12} } func (m *ShardGtid) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ShardGtid.Unmarshal(m, b) @@ -920,7 +923,7 @@ func (m *VGtid) Reset() { *m = VGtid{} } func (m *VGtid) String() string { return proto.CompactTextString(m) } func (*VGtid) ProtoMessage() {} func (*VGtid) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{13} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{13} } func (m *VGtid) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VGtid.Unmarshal(m, b) @@ -947,6 +950,122 @@ func (m *VGtid) GetShardGtids() []*ShardGtid { return nil } +type KeyspaceShard struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyspaceShard) Reset() { *m = KeyspaceShard{} } +func (m *KeyspaceShard) String() string { return proto.CompactTextString(m) } +func (*KeyspaceShard) ProtoMessage() {} +func (*KeyspaceShard) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{14} +} +func (m *KeyspaceShard) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyspaceShard.Unmarshal(m, b) +} +func (m *KeyspaceShard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyspaceShard.Marshal(b, m, deterministic) +} +func (dst *KeyspaceShard) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyspaceShard.Merge(dst, src) +} +func (m *KeyspaceShard) XXX_Size() int { + return xxx_messageInfo_KeyspaceShard.Size(m) +} +func (m *KeyspaceShard) XXX_DiscardUnknown() { + xxx_messageInfo_KeyspaceShard.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyspaceShard proto.InternalMessageInfo + +func (m *KeyspaceShard) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *KeyspaceShard) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +type Journal struct { + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Tables []string `protobuf:"bytes,2,rep,name=tables,proto3" json:"tables,omitempty"` + LocalPosition string `protobuf:"bytes,3,opt,name=local_position,json=localPosition,proto3" json:"local_position,omitempty"` + ShardGtids []*ShardGtid `protobuf:"bytes,4,rep,name=shard_gtids,json=shardGtids,proto3" json:"shard_gtids,omitempty"` + Participants []*KeyspaceShard `protobuf:"bytes,5,rep,name=participants,proto3" json:"participants,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Journal) Reset() { *m = Journal{} } +func (m *Journal) String() string { return proto.CompactTextString(m) } +func (*Journal) ProtoMessage() {} +func (*Journal) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{15} +} +func (m *Journal) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Journal.Unmarshal(m, b) +} +func (m *Journal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Journal.Marshal(b, m, deterministic) +} +func (dst *Journal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Journal.Merge(dst, src) +} +func (m *Journal) XXX_Size() int { + return xxx_messageInfo_Journal.Size(m) +} +func (m *Journal) XXX_DiscardUnknown() { + xxx_messageInfo_Journal.DiscardUnknown(m) +} + +var xxx_messageInfo_Journal proto.InternalMessageInfo + +func (m *Journal) GetId() int64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Journal) GetTables() []string { + if m != nil { + return m.Tables + } + return nil +} + +func (m *Journal) GetLocalPosition() string { + if m != nil { + return m.LocalPosition + } + return "" +} + +func (m *Journal) GetShardGtids() []*ShardGtid { + if m != nil { + return m.ShardGtids + } + return nil +} + +func (m *Journal) GetParticipants() []*KeyspaceShard { + if m != nil { + return m.Participants + } + return nil +} + // VEvent represents a vstream event type VEvent struct { Type VEventType `protobuf:"varint,1,opt,name=type,proto3,enum=binlogdata.VEventType" json:"type,omitempty"` @@ -956,6 +1075,7 @@ type VEvent struct { RowEvent *RowEvent `protobuf:"bytes,5,opt,name=row_event,json=rowEvent,proto3" json:"row_event,omitempty"` FieldEvent *FieldEvent `protobuf:"bytes,6,opt,name=field_event,json=fieldEvent,proto3" json:"field_event,omitempty"` Vgtid *VGtid `protobuf:"bytes,7,opt,name=vgtid,proto3" json:"vgtid,omitempty"` + Journal *Journal `protobuf:"bytes,8,opt,name=journal,proto3" json:"journal,omitempty"` // current_time specifies the current time to handle clock skew. CurrentTime int64 `protobuf:"varint,20,opt,name=current_time,json=currentTime,proto3" json:"current_time,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -967,7 +1087,7 @@ func (m *VEvent) Reset() { *m = VEvent{} } func (m *VEvent) String() string { return proto.CompactTextString(m) } func (*VEvent) ProtoMessage() {} func (*VEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{14} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{16} } func (m *VEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VEvent.Unmarshal(m, b) @@ -1036,6 +1156,13 @@ func (m *VEvent) GetVgtid() *VGtid { return nil } +func (m *VEvent) GetJournal() *Journal { + if m != nil { + return m.Journal + } + return nil +} + func (m *VEvent) GetCurrentTime() int64 { if m != nil { return m.CurrentTime @@ -1059,7 +1186,7 @@ func (m *VStreamRequest) Reset() { *m = VStreamRequest{} } func (m *VStreamRequest) String() string { return proto.CompactTextString(m) } func (*VStreamRequest) ProtoMessage() {} func (*VStreamRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{15} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{17} } func (m *VStreamRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamRequest.Unmarshal(m, b) @@ -1126,7 +1253,7 @@ func (m *VStreamResponse) Reset() { *m = VStreamResponse{} } func (m *VStreamResponse) String() string { return proto.CompactTextString(m) } func (*VStreamResponse) ProtoMessage() {} func (*VStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{16} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{18} } func (m *VStreamResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamResponse.Unmarshal(m, b) @@ -1169,7 +1296,7 @@ func (m *VStreamRowsRequest) Reset() { *m = VStreamRowsRequest{} } func (m *VStreamRowsRequest) String() string { return proto.CompactTextString(m) } func (*VStreamRowsRequest) ProtoMessage() {} func (*VStreamRowsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{17} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{19} } func (m *VStreamRowsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamRowsRequest.Unmarshal(m, b) @@ -1240,7 +1367,7 @@ func (m *VStreamRowsResponse) Reset() { *m = VStreamRowsResponse{} } func (m *VStreamRowsResponse) String() string { return proto.CompactTextString(m) } func (*VStreamRowsResponse) ProtoMessage() {} func (*VStreamRowsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{18} + return fileDescriptor_binlogdata_1f081d4c0b940318, []int{20} } func (m *VStreamRowsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamRowsResponse.Unmarshal(m, b) @@ -1311,6 +1438,8 @@ func init() { proto.RegisterType((*FieldEvent)(nil), "binlogdata.FieldEvent") proto.RegisterType((*ShardGtid)(nil), "binlogdata.ShardGtid") proto.RegisterType((*VGtid)(nil), "binlogdata.VGtid") + proto.RegisterType((*KeyspaceShard)(nil), "binlogdata.KeyspaceShard") + proto.RegisterType((*Journal)(nil), "binlogdata.Journal") proto.RegisterType((*VEvent)(nil), "binlogdata.VEvent") proto.RegisterType((*VStreamRequest)(nil), "binlogdata.VStreamRequest") proto.RegisterType((*VStreamResponse)(nil), "binlogdata.VStreamResponse") @@ -1321,94 +1450,101 @@ func init() { proto.RegisterEnum("binlogdata.BinlogTransaction_Statement_Category", BinlogTransaction_Statement_Category_name, BinlogTransaction_Statement_Category_value) } -func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_e3df2e837eaa5305) } - -var fileDescriptor_binlogdata_e3df2e837eaa5305 = []byte{ - // 1372 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0xdd, 0x72, 0xdb, 0x54, - 0x10, 0xae, 0x6d, 0xf9, 0x6f, 0x95, 0x26, 0xca, 0xc9, 0x0f, 0x9e, 0x0c, 0x65, 0x82, 0x06, 0x68, - 0xc8, 0x0c, 0x4e, 0x31, 0x50, 0xae, 0xa0, 0xe3, 0x1f, 0xd5, 0x75, 0xab, 0xd8, 0xe9, 0xb1, 0x9a, - 0x32, 0xbd, 0xd1, 0x28, 0xd2, 0x71, 0xa2, 0x89, 0x2c, 0x39, 0xd2, 0xb1, 0x83, 0x1f, 0x80, 0xe1, - 0x01, 0xb8, 0xe5, 0x05, 0xb8, 0xe3, 0x05, 0xb8, 0x63, 0x78, 0x13, 0xde, 0x83, 0x39, 0x3f, 0x92, - 0xed, 0xb4, 0xb4, 0x81, 0x19, 0x2e, 0xb8, 0xc9, 0xec, 0xff, 0xd9, 0xfd, 0x76, 0xbd, 0xda, 0x80, - 0x76, 0xe6, 0x87, 0x41, 0x74, 0xee, 0x39, 0xd4, 0xa9, 0x4f, 0xe2, 0x88, 0x46, 0x08, 0x16, 0x92, - 0x3d, 0x75, 0x46, 0xe3, 0x89, 0x2b, 0x14, 0x7b, 0xea, 0xd5, 0x94, 0xc4, 0x73, 0xc9, 0xac, 0xd3, - 0x68, 0x12, 0x2d, 0xbc, 0xf4, 0x63, 0x28, 0xb7, 0x2f, 0x9c, 0x38, 0x21, 0x14, 0xed, 0x42, 0xc9, - 0x0d, 0x7c, 0x12, 0xd2, 0x5a, 0x6e, 0x3f, 0x77, 0x50, 0xc4, 0x92, 0x43, 0x08, 0x14, 0x37, 0x0a, - 0xc3, 0x5a, 0x9e, 0x4b, 0x39, 0xcd, 0x6c, 0x13, 0x12, 0xcf, 0x48, 0x5c, 0x2b, 0x08, 0x5b, 0xc1, - 0xe9, 0x7f, 0x16, 0x60, 0xb3, 0xc5, 0xf3, 0xb0, 0x62, 0x27, 0x4c, 0x1c, 0x97, 0xfa, 0x51, 0x88, - 0xba, 0x00, 0x09, 0x75, 0x28, 0x19, 0x93, 0x90, 0x26, 0xb5, 0xdc, 0x7e, 0xe1, 0x40, 0x6d, 0xdc, - 0xaf, 0x2f, 0x55, 0xf0, 0x9a, 0x4b, 0x7d, 0x98, 0xda, 0xe3, 0x25, 0x57, 0xd4, 0x00, 0x95, 0xcc, - 0x48, 0x48, 0x6d, 0x1a, 0x5d, 0x92, 0xb0, 0xa6, 0xec, 0xe7, 0x0e, 0xd4, 0xc6, 0x66, 0x5d, 0x14, - 0x68, 0x30, 0x8d, 0xc5, 0x14, 0x18, 0x48, 0x46, 0xef, 0xfd, 0x91, 0x87, 0x6a, 0x16, 0x0d, 0x99, - 0x50, 0x71, 0x1d, 0x4a, 0xce, 0xa3, 0x78, 0xce, 0xcb, 0x5c, 0x6f, 0x3c, 0xb8, 0x65, 0x22, 0xf5, - 0xb6, 0xf4, 0xc3, 0x59, 0x04, 0xf4, 0x19, 0x94, 0x5d, 0x81, 0x1e, 0x47, 0x47, 0x6d, 0x6c, 0x2d, - 0x07, 0x93, 0xc0, 0xe2, 0xd4, 0x06, 0x69, 0x50, 0x48, 0xae, 0x02, 0x0e, 0xd9, 0x1a, 0x66, 0xa4, - 0xfe, 0x4b, 0x0e, 0x2a, 0x69, 0x5c, 0xb4, 0x05, 0x1b, 0x2d, 0xd3, 0x7e, 0xd1, 0xc7, 0x46, 0x7b, - 0xd0, 0xed, 0xf7, 0x5e, 0x19, 0x1d, 0xed, 0x0e, 0x5a, 0x83, 0x4a, 0xcb, 0xb4, 0x5b, 0x46, 0xb7, - 0xd7, 0xd7, 0x72, 0xe8, 0x2e, 0x54, 0x5b, 0xa6, 0xdd, 0x1e, 0x1c, 0x1f, 0xf7, 0x2c, 0x2d, 0x8f, - 0x36, 0x40, 0x6d, 0x99, 0x36, 0x1e, 0x98, 0x66, 0xab, 0xd9, 0x7e, 0xa6, 0x15, 0xd0, 0x0e, 0x6c, - 0xb6, 0x4c, 0xbb, 0x73, 0x6c, 0xda, 0x1d, 0xe3, 0x04, 0x1b, 0xed, 0xa6, 0x65, 0x74, 0x34, 0x05, - 0x01, 0x94, 0x98, 0xb8, 0x63, 0x6a, 0x45, 0x49, 0x0f, 0x0d, 0x4b, 0x2b, 0xc9, 0x70, 0xbd, 0xfe, - 0xd0, 0xc0, 0x96, 0x56, 0x96, 0xec, 0x8b, 0x93, 0x4e, 0xd3, 0x32, 0xb4, 0x8a, 0x64, 0x3b, 0x86, - 0x69, 0x58, 0x86, 0x56, 0x7d, 0xaa, 0x54, 0xf2, 0x5a, 0xe1, 0xa9, 0x52, 0x29, 0x68, 0x8a, 0xfe, - 0x53, 0x0e, 0x76, 0x86, 0x34, 0x26, 0xce, 0xf8, 0x19, 0x99, 0x63, 0x27, 0x3c, 0x27, 0x98, 0x5c, - 0x4d, 0x49, 0x42, 0xd1, 0x1e, 0x54, 0x26, 0x51, 0xe2, 0x33, 0xec, 0x38, 0xc0, 0x55, 0x9c, 0xf1, - 0xe8, 0x08, 0xaa, 0x97, 0x64, 0x6e, 0xc7, 0xcc, 0x5e, 0x02, 0x86, 0xea, 0xd9, 0x40, 0x66, 0x91, - 0x2a, 0x97, 0x92, 0x5a, 0xc6, 0xb7, 0xf0, 0x6e, 0x7c, 0xf5, 0x11, 0xec, 0xde, 0x4c, 0x2a, 0x99, - 0x44, 0x61, 0x42, 0x90, 0x09, 0x48, 0x38, 0xda, 0x74, 0xd1, 0x5b, 0x9e, 0x9f, 0xda, 0xb8, 0xf7, - 0xd6, 0x01, 0xc0, 0x9b, 0x67, 0x37, 0x45, 0xfa, 0xf7, 0xb0, 0x25, 0xde, 0xb1, 0x9c, 0xb3, 0x80, - 0x24, 0xb7, 0x29, 0x7d, 0x17, 0x4a, 0x94, 0x1b, 0xd7, 0xf2, 0xfb, 0x85, 0x83, 0x2a, 0x96, 0xdc, - 0x3f, 0xad, 0xd0, 0x83, 0xed, 0xd5, 0x97, 0xff, 0x93, 0xfa, 0xbe, 0x04, 0x05, 0x4f, 0x03, 0x82, - 0xb6, 0xa1, 0x38, 0x76, 0xa8, 0x7b, 0x21, 0xab, 0x11, 0x0c, 0x2b, 0x65, 0xe4, 0x07, 0x94, 0xc4, - 0xbc, 0x85, 0x55, 0x2c, 0x39, 0xfd, 0x01, 0x94, 0x1e, 0x73, 0x0a, 0x7d, 0x02, 0xc5, 0x78, 0xca, - 0x6a, 0x15, 0x3f, 0x75, 0x6d, 0x39, 0x01, 0x16, 0x18, 0x0b, 0xb5, 0xfe, 0x73, 0x1e, 0xd6, 0x44, - 0x42, 0xc3, 0x68, 0x1a, 0xbb, 0x84, 0x21, 0x78, 0x49, 0xe6, 0xc9, 0xc4, 0x71, 0x49, 0x8a, 0x60, - 0xca, 0xb3, 0x64, 0x92, 0x0b, 0x27, 0xf6, 0xe4, 0xab, 0x82, 0x41, 0x5f, 0x81, 0xca, 0x91, 0xa4, - 0x36, 0x9d, 0x4f, 0x08, 0xc7, 0x70, 0xbd, 0xb1, 0xbd, 0x18, 0x2a, 0x8e, 0x13, 0xb5, 0xe6, 0x13, - 0x82, 0x81, 0x66, 0xf4, 0xea, 0x24, 0x2a, 0xb7, 0x98, 0xc4, 0x45, 0xff, 0x8a, 0x2b, 0xfd, 0x3b, - 0xcc, 0xc0, 0x28, 0xc9, 0x28, 0x4b, 0xb5, 0x0a, 0x38, 0x52, 0x80, 0x50, 0x1d, 0x4a, 0x51, 0x68, - 0x7b, 0x5e, 0x50, 0x2b, 0xf3, 0x34, 0xdf, 0x5b, 0xb6, 0x1d, 0x84, 0x9d, 0x8e, 0xd9, 0x14, 0x2d, - 0x29, 0x46, 0x61, 0xc7, 0x0b, 0xf4, 0xe7, 0x50, 0xc5, 0xd1, 0x75, 0xfb, 0x82, 0x27, 0xa0, 0x43, - 0xe9, 0x8c, 0x8c, 0xa2, 0x98, 0xc8, 0xae, 0x82, 0xdc, 0x7a, 0x38, 0xba, 0xc6, 0x52, 0x83, 0xf6, - 0xa1, 0xe8, 0x8c, 0xd2, 0xc6, 0xac, 0x9a, 0x08, 0x85, 0xee, 0x40, 0x05, 0x47, 0xd7, 0x7c, 0x53, - 0xa2, 0x7b, 0x20, 0x10, 0xb1, 0x43, 0x67, 0x9c, 0xc2, 0x5d, 0xe5, 0x92, 0xbe, 0x33, 0x26, 0xe8, - 0x21, 0xa8, 0x71, 0x74, 0x6d, 0xbb, 0xfc, 0x79, 0x31, 0xb6, 0x6a, 0x63, 0x67, 0xa5, 0x95, 0x69, - 0x72, 0x18, 0xe2, 0x94, 0x4c, 0xf4, 0xe7, 0x00, 0x8f, 0x7d, 0x12, 0x78, 0xb7, 0x7a, 0xe4, 0x23, - 0x06, 0x1f, 0x09, 0xbc, 0x34, 0xfe, 0x9a, 0x4c, 0x99, 0x47, 0xc0, 0x52, 0xc7, 0x80, 0x18, 0xb2, - 0x6e, 0x77, 0xa9, 0xef, 0xfd, 0x8b, 0x19, 0x41, 0xa0, 0x9c, 0x53, 0xdf, 0xe3, 0xc3, 0x51, 0xc5, - 0x9c, 0xd6, 0x1f, 0x41, 0xf1, 0x94, 0x87, 0x7b, 0x08, 0x2a, 0xb7, 0xb2, 0x99, 0x38, 0x9d, 0xd8, - 0x95, 0x32, 0xb3, 0xa7, 0x31, 0x24, 0x29, 0x99, 0xe8, 0xbf, 0xe6, 0xa1, 0x74, 0x2a, 0x6a, 0x3c, - 0x04, 0x85, 0x0f, 0x9f, 0xf8, 0x9e, 0xec, 0x2e, 0xfb, 0x0a, 0x0b, 0x3e, 0x7e, 0xdc, 0x06, 0xbd, - 0x0f, 0x55, 0xea, 0x8f, 0x49, 0x42, 0x9d, 0xf1, 0x84, 0x67, 0x59, 0xc0, 0x0b, 0xc1, 0x9b, 0x32, - 0x65, 0x1f, 0x0d, 0x36, 0x32, 0x0a, 0x17, 0x31, 0x12, 0x7d, 0x0e, 0x55, 0xd6, 0x19, 0xfe, 0x8d, - 0xab, 0x15, 0x79, 0xab, 0xb7, 0x6f, 0xf4, 0x85, 0x3f, 0x8b, 0x2b, 0x71, 0xda, 0xeb, 0xaf, 0x41, - 0xe5, 0x58, 0x4a, 0x27, 0x31, 0xab, 0xbb, 0xab, 0xb3, 0x9a, 0xf6, 0x0c, 0xc3, 0x68, 0xd1, 0xbf, - 0xfb, 0x50, 0x9c, 0xf1, 0x94, 0xca, 0xf2, 0x5b, 0xbb, 0x5c, 0x1c, 0x07, 0x45, 0xe8, 0xd1, 0x87, - 0xb0, 0xe6, 0x4e, 0xe3, 0x98, 0x7f, 0x9c, 0xfd, 0x31, 0xa9, 0x6d, 0xf3, 0xda, 0x54, 0x29, 0xb3, - 0xfc, 0x31, 0xd1, 0x7f, 0xcc, 0xc3, 0xfa, 0xa9, 0x58, 0x5f, 0xe9, 0xca, 0x7c, 0x04, 0x5b, 0x64, - 0x34, 0x22, 0x2e, 0xf5, 0x67, 0xc4, 0x76, 0x9d, 0x20, 0x20, 0xb1, 0xed, 0x7b, 0x72, 0xc4, 0x37, - 0xea, 0xe2, 0x8c, 0x69, 0x73, 0x79, 0xaf, 0x83, 0x37, 0x33, 0x5b, 0x29, 0xf2, 0x90, 0x01, 0x5b, - 0xfe, 0x78, 0x4c, 0x3c, 0xdf, 0xa1, 0xcb, 0x01, 0xc4, 0x0f, 0x60, 0x47, 0x4e, 0xd3, 0xa9, 0xd5, - 0x75, 0x28, 0x59, 0x84, 0xc9, 0x3c, 0xb2, 0x30, 0x1f, 0xb3, 0x9f, 0x77, 0x7c, 0x9e, 0x6d, 0xe1, - 0xbb, 0xd2, 0xd3, 0xe2, 0x42, 0x2c, 0x95, 0x2b, 0x1b, 0x5e, 0xb9, 0xb1, 0xe1, 0x17, 0x9b, 0xa0, - 0xf8, 0xae, 0x4d, 0xa0, 0x7f, 0x03, 0x1b, 0x19, 0x10, 0x72, 0x83, 0x1f, 0x42, 0x89, 0xf7, 0x26, - 0x1d, 0x41, 0xf4, 0xfa, 0x18, 0x61, 0x69, 0xa1, 0xff, 0x90, 0x07, 0x94, 0xfa, 0x47, 0xd7, 0xc9, - 0xff, 0x14, 0xcc, 0x6d, 0x28, 0x72, 0xb9, 0x44, 0x52, 0x30, 0x0c, 0x87, 0xc0, 0x49, 0xe8, 0xe4, - 0x32, 0x83, 0x51, 0x38, 0x3f, 0x67, 0x7f, 0x31, 0x49, 0xa6, 0x01, 0xc5, 0xd2, 0x42, 0xff, 0x2d, - 0x07, 0x5b, 0x2b, 0x38, 0x48, 0x2c, 0x17, 0x5b, 0x25, 0xf7, 0xf7, 0x5b, 0x05, 0x1d, 0x40, 0x65, - 0x72, 0xf9, 0x96, 0xed, 0x93, 0x69, 0xdf, 0xf8, 0xb3, 0xfc, 0x00, 0x94, 0x38, 0xba, 0x4e, 0x6a, - 0x0a, 0xf7, 0x5c, 0x5e, 0xb5, 0x5c, 0xce, 0xf6, 0xf5, 0x4a, 0x1d, 0x2b, 0xfb, 0x5a, 0x68, 0x0e, - 0xbf, 0x05, 0x75, 0x69, 0xed, 0xb3, 0xcb, 0xac, 0xd7, 0xed, 0x0f, 0xb0, 0xa1, 0xdd, 0x41, 0x15, - 0x50, 0x86, 0xd6, 0xe0, 0x44, 0xcb, 0x31, 0xca, 0xf8, 0xce, 0x68, 0x8b, 0x6b, 0x8f, 0x51, 0xb6, - 0x34, 0x2a, 0x1c, 0xfe, 0x9e, 0x03, 0x58, 0x6c, 0x18, 0xa4, 0x42, 0xf9, 0x45, 0xff, 0x59, 0x7f, - 0xf0, 0xb2, 0x2f, 0x02, 0x74, 0xad, 0x5e, 0x47, 0xcb, 0xa1, 0x2a, 0x14, 0xc5, 0xf9, 0x98, 0x67, - 0x2f, 0xc8, 0xdb, 0xb1, 0xc0, 0x0e, 0xcb, 0xec, 0x70, 0x54, 0x50, 0x19, 0x0a, 0xd9, 0x79, 0x28, - 0xef, 0xc1, 0x12, 0x0b, 0x88, 0x8d, 0x13, 0xb3, 0xd9, 0x36, 0xb4, 0x32, 0x53, 0x64, 0x97, 0x21, - 0x40, 0x29, 0x3d, 0x0b, 0x99, 0x27, 0x3b, 0x26, 0x81, 0xbd, 0x33, 0xb0, 0x9e, 0x18, 0x58, 0x53, - 0x99, 0x0c, 0x0f, 0x5e, 0x6a, 0x6b, 0x4c, 0xf6, 0xb8, 0x67, 0x98, 0x1d, 0xed, 0x2e, 0xbb, 0x26, - 0x9f, 0x18, 0x4d, 0x6c, 0xb5, 0x8c, 0xa6, 0xa5, 0xad, 0x33, 0xcd, 0x29, 0x4f, 0x70, 0xa3, 0xf5, - 0xe9, 0xab, 0xfb, 0x33, 0x9f, 0x92, 0x24, 0xa9, 0xfb, 0xd1, 0x91, 0xa0, 0x8e, 0xce, 0xa3, 0xa3, - 0x19, 0x3d, 0xe2, 0xff, 0xa3, 0x1c, 0x2d, 0x7e, 0x08, 0x67, 0x25, 0x2e, 0xf9, 0xe2, 0xaf, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x59, 0xa0, 0xff, 0x30, 0xff, 0x0c, 0x00, 0x00, +func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_1f081d4c0b940318) } + +var fileDescriptor_binlogdata_1f081d4c0b940318 = []byte{ + // 1484 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0xcb, 0x72, 0xe3, 0x54, + 0x13, 0x1e, 0xdb, 0x92, 0x2f, 0xad, 0x5c, 0x94, 0x93, 0xcb, 0xef, 0x3f, 0xc5, 0x50, 0x41, 0xc5, + 0x30, 0x21, 0x55, 0x38, 0x83, 0x81, 0x61, 0x35, 0x4c, 0xf9, 0xa2, 0xc9, 0x38, 0x51, 0xec, 0xcc, + 0x89, 0x92, 0xa1, 0x66, 0xa3, 0x52, 0xa4, 0x93, 0x44, 0x44, 0x96, 0x3c, 0xd2, 0x71, 0x42, 0x1e, + 0x80, 0xe2, 0x01, 0xd8, 0xf2, 0x02, 0x3c, 0x04, 0x5b, 0xb6, 0x14, 0x4f, 0xc0, 0x8a, 0xf7, 0xa0, + 0xce, 0x45, 0xb2, 0x9d, 0xb9, 0x65, 0xa8, 0x62, 0xc1, 0xc6, 0xd5, 0xa7, 0x4f, 0x77, 0xab, 0xfb, + 0xeb, 0x3e, 0xdd, 0x6d, 0xd0, 0x4f, 0x82, 0x28, 0x8c, 0xcf, 0x7c, 0x97, 0xba, 0x8d, 0x51, 0x12, + 0xd3, 0x18, 0xc1, 0x84, 0xb3, 0xae, 0x5d, 0xd2, 0x64, 0xe4, 0x89, 0x8b, 0x75, 0xed, 0xe5, 0x98, + 0x24, 0xd7, 0xf2, 0xb0, 0x40, 0xe3, 0x51, 0x3c, 0xd1, 0x32, 0xf6, 0xa1, 0xd2, 0x39, 0x77, 0x93, + 0x94, 0x50, 0xb4, 0x06, 0x65, 0x2f, 0x0c, 0x48, 0x44, 0xeb, 0x85, 0x8d, 0xc2, 0xa6, 0x8a, 0xe5, + 0x09, 0x21, 0x50, 0xbc, 0x38, 0x8a, 0xea, 0x45, 0xce, 0xe5, 0x34, 0x93, 0x4d, 0x49, 0x72, 0x49, + 0x92, 0x7a, 0x49, 0xc8, 0x8a, 0x93, 0xf1, 0x57, 0x09, 0x96, 0xda, 0xdc, 0x0f, 0x3b, 0x71, 0xa3, + 0xd4, 0xf5, 0x68, 0x10, 0x47, 0x68, 0x07, 0x20, 0xa5, 0x2e, 0x25, 0x43, 0x12, 0xd1, 0xb4, 0x5e, + 0xd8, 0x28, 0x6d, 0x6a, 0xcd, 0xfb, 0x8d, 0xa9, 0x08, 0x5e, 0x51, 0x69, 0x1c, 0x66, 0xf2, 0x78, + 0x4a, 0x15, 0x35, 0x41, 0x23, 0x97, 0x24, 0xa2, 0x0e, 0x8d, 0x2f, 0x48, 0x54, 0x57, 0x36, 0x0a, + 0x9b, 0x5a, 0x73, 0xa9, 0x21, 0x02, 0x34, 0xd9, 0x8d, 0xcd, 0x2e, 0x30, 0x90, 0x9c, 0x5e, 0xff, + 0xad, 0x08, 0xb5, 0xdc, 0x1a, 0xb2, 0xa0, 0xea, 0xb9, 0x94, 0x9c, 0xc5, 0xc9, 0x35, 0x0f, 0x73, + 0xa1, 0xf9, 0xe0, 0x96, 0x8e, 0x34, 0x3a, 0x52, 0x0f, 0xe7, 0x16, 0xd0, 0x67, 0x50, 0xf1, 0x04, + 0x7a, 0x1c, 0x1d, 0xad, 0xb9, 0x3c, 0x6d, 0x4c, 0x02, 0x8b, 0x33, 0x19, 0xa4, 0x43, 0x29, 0x7d, + 0x19, 0x72, 0xc8, 0xe6, 0x30, 0x23, 0x8d, 0x5f, 0x0a, 0x50, 0xcd, 0xec, 0xa2, 0x65, 0x58, 0x6c, + 0x5b, 0xce, 0x51, 0x1f, 0x9b, 0x9d, 0xc1, 0x4e, 0xbf, 0xf7, 0xc2, 0xec, 0xea, 0x77, 0xd0, 0x1c, + 0x54, 0xdb, 0x96, 0xd3, 0x36, 0x77, 0x7a, 0x7d, 0xbd, 0x80, 0xe6, 0xa1, 0xd6, 0xb6, 0x9c, 0xce, + 0x60, 0x7f, 0xbf, 0x67, 0xeb, 0x45, 0xb4, 0x08, 0x5a, 0xdb, 0x72, 0xf0, 0xc0, 0xb2, 0xda, 0xad, + 0xce, 0x9e, 0x5e, 0x42, 0xab, 0xb0, 0xd4, 0xb6, 0x9c, 0xee, 0xbe, 0xe5, 0x74, 0xcd, 0x03, 0x6c, + 0x76, 0x5a, 0xb6, 0xd9, 0xd5, 0x15, 0x04, 0x50, 0x66, 0xec, 0xae, 0xa5, 0xab, 0x92, 0x3e, 0x34, + 0x6d, 0xbd, 0x2c, 0xcd, 0xf5, 0xfa, 0x87, 0x26, 0xb6, 0xf5, 0x8a, 0x3c, 0x1e, 0x1d, 0x74, 0x5b, + 0xb6, 0xa9, 0x57, 0xe5, 0xb1, 0x6b, 0x5a, 0xa6, 0x6d, 0xea, 0xb5, 0x5d, 0xa5, 0x5a, 0xd4, 0x4b, + 0xbb, 0x4a, 0xb5, 0xa4, 0x2b, 0xc6, 0x4f, 0x05, 0x58, 0x3d, 0xa4, 0x09, 0x71, 0x87, 0x7b, 0xe4, + 0x1a, 0xbb, 0xd1, 0x19, 0xc1, 0xe4, 0xe5, 0x98, 0xa4, 0x14, 0xad, 0x43, 0x75, 0x14, 0xa7, 0x01, + 0xc3, 0x8e, 0x03, 0x5c, 0xc3, 0xf9, 0x19, 0x6d, 0x43, 0xed, 0x82, 0x5c, 0x3b, 0x09, 0x93, 0x97, + 0x80, 0xa1, 0x46, 0x5e, 0x90, 0xb9, 0xa5, 0xea, 0x85, 0xa4, 0xa6, 0xf1, 0x2d, 0xbd, 0x1b, 0x5f, + 0xe3, 0x14, 0xd6, 0x6e, 0x3a, 0x95, 0x8e, 0xe2, 0x28, 0x25, 0xc8, 0x02, 0x24, 0x14, 0x1d, 0x3a, + 0xc9, 0x2d, 0xf7, 0x4f, 0x6b, 0xde, 0x7d, 0x6b, 0x01, 0xe0, 0xa5, 0x93, 0x9b, 0x2c, 0xe3, 0x7b, + 0x58, 0x16, 0xdf, 0xb1, 0xdd, 0x93, 0x90, 0xa4, 0xb7, 0x09, 0x7d, 0x0d, 0xca, 0x94, 0x0b, 0xd7, + 0x8b, 0x1b, 0xa5, 0xcd, 0x1a, 0x96, 0xa7, 0xf7, 0x8d, 0xd0, 0x87, 0x95, 0xd9, 0x2f, 0xff, 0x2b, + 0xf1, 0x7d, 0x09, 0x0a, 0x1e, 0x87, 0x04, 0xad, 0x80, 0x3a, 0x74, 0xa9, 0x77, 0x2e, 0xa3, 0x11, + 0x07, 0x16, 0xca, 0x69, 0x10, 0x52, 0x92, 0xf0, 0x14, 0xd6, 0xb0, 0x3c, 0x19, 0x0f, 0xa0, 0xfc, + 0x84, 0x53, 0xe8, 0x13, 0x50, 0x93, 0x31, 0x8b, 0x55, 0x3c, 0x75, 0x7d, 0xda, 0x01, 0x66, 0x18, + 0x8b, 0x6b, 0xe3, 0xe7, 0x22, 0xcc, 0x09, 0x87, 0x0e, 0xe3, 0x71, 0xe2, 0x11, 0x86, 0xe0, 0x05, + 0xb9, 0x4e, 0x47, 0xae, 0x47, 0x32, 0x04, 0xb3, 0x33, 0x73, 0x26, 0x3d, 0x77, 0x13, 0x5f, 0x7e, + 0x55, 0x1c, 0xd0, 0x57, 0xa0, 0x71, 0x24, 0xa9, 0x43, 0xaf, 0x47, 0x84, 0x63, 0xb8, 0xd0, 0x5c, + 0x99, 0x14, 0x15, 0xc7, 0x89, 0xda, 0xd7, 0x23, 0x82, 0x81, 0xe6, 0xf4, 0x6c, 0x25, 0x2a, 0xb7, + 0xa8, 0xc4, 0x49, 0xfe, 0xd4, 0x99, 0xfc, 0x6d, 0xe5, 0x60, 0x94, 0xa5, 0x95, 0xa9, 0x58, 0x05, + 0x1c, 0x19, 0x40, 0xa8, 0x01, 0xe5, 0x38, 0x72, 0x7c, 0x3f, 0xac, 0x57, 0xb8, 0x9b, 0xff, 0x9b, + 0x96, 0x1d, 0x44, 0xdd, 0xae, 0xd5, 0x12, 0x29, 0x51, 0xe3, 0xa8, 0xeb, 0x87, 0xc6, 0x33, 0xa8, + 0xe1, 0xf8, 0xaa, 0x73, 0xce, 0x1d, 0x30, 0xa0, 0x7c, 0x42, 0x4e, 0xe3, 0x84, 0xc8, 0xac, 0x82, + 0xec, 0x7a, 0x38, 0xbe, 0xc2, 0xf2, 0x06, 0x6d, 0x80, 0xea, 0x9e, 0x66, 0x89, 0x99, 0x15, 0x11, + 0x17, 0x86, 0x0b, 0x55, 0x1c, 0x5f, 0xf1, 0x4e, 0x89, 0xee, 0x82, 0x40, 0xc4, 0x89, 0xdc, 0x61, + 0x06, 0x77, 0x8d, 0x73, 0xfa, 0xee, 0x90, 0xa0, 0x87, 0xa0, 0x25, 0xf1, 0x95, 0xe3, 0xf1, 0xcf, + 0x8b, 0xb2, 0xd5, 0x9a, 0xab, 0x33, 0xa9, 0xcc, 0x9c, 0xc3, 0x90, 0x64, 0x64, 0x6a, 0x3c, 0x03, + 0x78, 0x12, 0x90, 0xd0, 0xbf, 0xd5, 0x47, 0x3e, 0x66, 0xf0, 0x91, 0xd0, 0xcf, 0xec, 0xcf, 0x49, + 0x97, 0xb9, 0x05, 0x2c, 0xef, 0x18, 0x10, 0x87, 0x2c, 0xdb, 0x3b, 0x34, 0xf0, 0xff, 0x41, 0x8d, + 0x20, 0x50, 0xce, 0x68, 0xe0, 0xf3, 0xe2, 0xa8, 0x61, 0x4e, 0x1b, 0x8f, 0x41, 0x3d, 0xe6, 0xe6, + 0x1e, 0x82, 0xc6, 0xa5, 0x1c, 0xc6, 0xce, 0x2a, 0x76, 0x26, 0xcc, 0xfc, 0xd3, 0x18, 0xd2, 0x8c, + 0x4c, 0x8d, 0x16, 0xcc, 0xef, 0xc9, 0xcf, 0x72, 0x81, 0xf7, 0xf7, 0xcb, 0xf8, 0xbd, 0x00, 0x95, + 0xdd, 0x78, 0x9c, 0x44, 0x6e, 0x88, 0x16, 0xa0, 0x18, 0xf8, 0x5c, 0xaf, 0x84, 0x8b, 0x81, 0xff, + 0xc6, 0x7e, 0x71, 0x0f, 0x16, 0xc2, 0xd8, 0x73, 0x43, 0x27, 0xef, 0x34, 0x22, 0xaa, 0x79, 0xce, + 0x3d, 0xc8, 0xda, 0xcd, 0x8d, 0xa8, 0x94, 0x5b, 0x46, 0x85, 0x1e, 0xc1, 0xdc, 0xc8, 0x4d, 0x68, + 0xe0, 0x05, 0x23, 0x97, 0xcd, 0x6a, 0x95, 0x2b, 0xfe, 0x7f, 0x5a, 0x71, 0x26, 0x6a, 0x3c, 0x23, + 0x6e, 0xfc, 0x59, 0x84, 0xf2, 0xb1, 0x48, 0xfc, 0x16, 0x28, 0xfc, 0x45, 0x8a, 0x21, 0xbb, 0x36, + 0x6d, 0x41, 0x48, 0xf0, 0x37, 0xc9, 0x65, 0xd0, 0x07, 0x50, 0xa3, 0xc1, 0x90, 0xa4, 0xd4, 0x1d, + 0x8e, 0x38, 0x44, 0x25, 0x3c, 0x61, 0xbc, 0x2e, 0x7d, 0x6c, 0x92, 0xb2, 0x77, 0xa4, 0x70, 0x16, + 0x23, 0xd1, 0xe7, 0x50, 0x63, 0xe5, 0xca, 0x07, 0x7f, 0x5d, 0xe5, 0xf5, 0xbf, 0x72, 0xa3, 0x58, + 0xf9, 0x67, 0x71, 0x35, 0xc9, 0x1e, 0xc0, 0xd7, 0xa0, 0xf1, 0x02, 0x93, 0x4a, 0xe2, 0x01, 0xaf, + 0xcd, 0x3e, 0xe0, 0xac, 0x90, 0x31, 0x9c, 0x4e, 0x8a, 0xfa, 0x3e, 0xa8, 0x97, 0xdc, 0xa5, 0x8a, + 0x5c, 0x40, 0xa6, 0x83, 0xe3, 0x98, 0x8a, 0x7b, 0xd6, 0xdd, 0xbf, 0x13, 0x09, 0xae, 0x57, 0x5f, + 0xed, 0xee, 0x32, 0xf7, 0x38, 0x93, 0x41, 0x1f, 0xc1, 0x9c, 0x37, 0x4e, 0x12, 0xbe, 0xe0, 0x04, + 0x43, 0x52, 0x5f, 0xe1, 0x50, 0x68, 0x92, 0x67, 0x07, 0x43, 0x62, 0xfc, 0x58, 0x84, 0x85, 0x63, + 0x31, 0x02, 0xb2, 0xb1, 0xf3, 0x18, 0x96, 0xc9, 0xe9, 0x29, 0xf1, 0x68, 0x70, 0x49, 0x1c, 0xcf, + 0x0d, 0x43, 0x92, 0x38, 0xb2, 0x96, 0xb4, 0xe6, 0x62, 0x43, 0xac, 0x82, 0x1d, 0xce, 0xef, 0x75, + 0xf1, 0x52, 0x2e, 0x2b, 0x59, 0x3e, 0x32, 0x61, 0x39, 0x18, 0x0e, 0x89, 0x1f, 0xb8, 0x74, 0xda, + 0x80, 0x68, 0x22, 0xab, 0xf2, 0x45, 0x1e, 0xdb, 0x3b, 0x2e, 0x25, 0x13, 0x33, 0xb9, 0x46, 0x6e, + 0xe6, 0x1e, 0x2b, 0xd9, 0xe4, 0x2c, 0x9f, 0x64, 0xf3, 0x52, 0xd3, 0xe6, 0x4c, 0x2c, 0x2f, 0x67, + 0xa6, 0xa4, 0x72, 0x63, 0x4a, 0x4e, 0xba, 0xa9, 0xfa, 0xae, 0x6e, 0x6a, 0x3c, 0x82, 0xc5, 0x1c, + 0x08, 0x39, 0x05, 0xb7, 0xa0, 0xcc, 0x53, 0x99, 0x3d, 0x63, 0xf4, 0x6a, 0xd5, 0x61, 0x29, 0x61, + 0xfc, 0x50, 0x04, 0x94, 0xe9, 0xc7, 0x57, 0xe9, 0x7f, 0x14, 0xcc, 0x15, 0x50, 0x39, 0x5f, 0x22, + 0x29, 0x0e, 0x0c, 0x87, 0xd0, 0x4d, 0xe9, 0xe8, 0x22, 0x87, 0x51, 0x28, 0x3f, 0x63, 0xbf, 0x98, + 0xa4, 0xe3, 0x90, 0x62, 0x29, 0x61, 0xfc, 0x5a, 0x80, 0xe5, 0x19, 0x1c, 0x24, 0x96, 0x93, 0xce, + 0x5c, 0x78, 0x73, 0x67, 0x46, 0x9b, 0x50, 0x1d, 0x5d, 0xbc, 0xa5, 0x83, 0xe7, 0xb7, 0xaf, 0x7d, + 0xc5, 0x1f, 0x82, 0x92, 0xc4, 0x57, 0x59, 0x7b, 0x9a, 0x1e, 0x57, 0x9c, 0xcf, 0x66, 0xde, 0x4c, + 0x1c, 0x33, 0x33, 0x4f, 0xdc, 0x6c, 0x7d, 0x03, 0xda, 0xd4, 0xe8, 0x64, 0xdb, 0x6d, 0x6f, 0xa7, + 0x3f, 0xc0, 0xa6, 0x7e, 0x07, 0x55, 0x41, 0x39, 0xb4, 0x07, 0x07, 0x7a, 0x81, 0x51, 0xe6, 0xb7, + 0x66, 0x47, 0x6c, 0xcc, 0x8c, 0x72, 0xa4, 0x50, 0x69, 0xeb, 0x8f, 0x02, 0xc0, 0xa4, 0x21, 0x21, + 0x0d, 0x2a, 0x47, 0xfd, 0xbd, 0xfe, 0xe0, 0x79, 0x5f, 0x18, 0xd8, 0xb1, 0x7b, 0x5d, 0xbd, 0x80, + 0x6a, 0xa0, 0x8a, 0x15, 0xbc, 0xc8, 0xbe, 0x20, 0xf7, 0xef, 0x12, 0x5b, 0xce, 0xf3, 0xe5, 0x5b, + 0x41, 0x15, 0x28, 0xe5, 0x2b, 0xb6, 0xdc, 0xa9, 0xcb, 0xcc, 0x20, 0x36, 0x0f, 0xac, 0x56, 0xc7, + 0xd4, 0x2b, 0xec, 0x22, 0xdf, 0xae, 0x01, 0xca, 0xd9, 0x6a, 0xcd, 0x34, 0xd9, 0x42, 0x0e, 0xec, + 0x3b, 0x03, 0xfb, 0xa9, 0x89, 0x75, 0x8d, 0xf1, 0xf0, 0xe0, 0xb9, 0x3e, 0xc7, 0x78, 0x4f, 0x7a, + 0xa6, 0xd5, 0xd5, 0xe7, 0xd9, 0x46, 0xfe, 0xd4, 0x6c, 0x61, 0xbb, 0x6d, 0xb6, 0x6c, 0x7d, 0x81, + 0xdd, 0x1c, 0x73, 0x07, 0x17, 0xd9, 0x67, 0x76, 0x07, 0x47, 0xb8, 0xdf, 0xb2, 0x74, 0xbd, 0xfd, + 0xe9, 0x8b, 0xfb, 0x97, 0x01, 0x25, 0x69, 0xda, 0x08, 0xe2, 0x6d, 0x41, 0x6d, 0x9f, 0xc5, 0xdb, + 0x97, 0x74, 0x9b, 0xff, 0xe9, 0xdb, 0x9e, 0xbc, 0x8a, 0x93, 0x32, 0xe7, 0x7c, 0xf1, 0x77, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x0c, 0x14, 0xe6, 0xb2, 0x50, 0x0e, 0x00, 0x00, } diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go index 1a964e7b02a..359cc782505 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go @@ -35,6 +35,7 @@ const ( updateQuery deleteQuery selectQuery + reshardingJournalQuery ) // buildControllerPlan parses the input query and returns an appropriate plan. @@ -58,15 +59,23 @@ func buildControllerPlan(query string) (*controllerPlan, error) { } func buildInsertPlan(ins *sqlparser.Insert) (*controllerPlan, error) { + switch sqlparser.String(ins.Table) { + case reshardingJournalTableName: + return &controllerPlan{ + opcode: reshardingJournalQuery, + query: sqlparser.String(ins), + }, nil + case vreplicationTableName: + // no-op + default: + return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(ins.Table)) + } if ins.Action != sqlparser.InsertStr { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(ins)) } if ins.Ignore != "" { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(ins)) } - if sqlparser.String(ins.Table) != "_vt.vreplication" { - return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(ins.Table)) - } if ins.Partitions != nil { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(ins)) } @@ -106,7 +115,15 @@ func buildInsertPlan(ins *sqlparser.Insert) (*controllerPlan, error) { } func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { - if sqlparser.String(upd.TableExprs) != "_vt.vreplication" { + switch sqlparser.String(upd.TableExprs) { + case reshardingJournalTableName: + return &controllerPlan{ + opcode: reshardingJournalQuery, + query: sqlparser.String(upd), + }, nil + case vreplicationTableName: + // no-op + default: return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(upd.TableExprs)) } if upd.OrderBy != nil || upd.Limit != nil { @@ -131,12 +148,20 @@ func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { } func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { + switch sqlparser.String(del.TableExprs) { + case reshardingJournalTableName: + return &controllerPlan{ + opcode: reshardingJournalQuery, + query: sqlparser.String(del), + }, nil + case vreplicationTableName: + // no-op + default: + return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(del.TableExprs)) + } if del.Targets != nil { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(del)) } - if sqlparser.String(del.TableExprs) != "_vt.vreplication" { - return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(del.TableExprs)) - } if del.Partitions != nil { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(del)) } @@ -157,13 +182,20 @@ func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { } func buildSelectPlan(sel *sqlparser.Select) (*controllerPlan, error) { - if sqlparser.String(sel.From) != "_vt.vreplication" { + switch sqlparser.String(sel.From) { + case reshardingJournalTableName: + return &controllerPlan{ + opcode: reshardingJournalQuery, + query: sqlparser.String(sel), + }, nil + case vreplicationTableName: + return &controllerPlan{ + opcode: selectQuery, + query: sqlparser.String(sel), + }, nil + default: return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(sel.From)) } - return &controllerPlan{ - opcode: selectQuery, - query: sqlparser.String(sel), - }, nil } func extractID(where *sqlparser.Where) (int, error) { diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go index 533668a2955..18296c76b8f 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go @@ -124,8 +124,8 @@ func TestControllerPlan(t *testing.T) { in: "delete from a where id = 1", err: "invalid table name: a", }, { - in: "delete a, b from a where id = 1", - err: "unsupported construct: delete a, b from a where id = 1", + in: "delete a, b from _vt.vreplication where id = 1", + err: "unsupported construct: delete a, b from _vt.vreplication where id = 1", }, { in: "delete from _vt.vreplication where id = 1 order by id", err: "unsupported construct: delete from _vt.vreplication where id = 1 order by id asc", diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine.go b/go/vt/vttablet/tabletmanager/vreplication/engine.go index bbe760f46d4..76a8ce639fe 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine.go @@ -32,6 +32,17 @@ import ( "vitess.io/vitess/go/vt/topo" ) +const ( + reshardingJournalTableName = "_vt.resharding_journal" + vreplicationTableName = "_vt.vreplication" + CreateReshardingJournalTable = `create table if not exists _vt.resharding_journal( + id bigint, + db_name varbinary(255), + val blob, + primary key (id) +) ENGINE=InnoDB` +) + var tabletTypesStr = flag.String("vreplication_tablet_type", "REPLICA", "comma separated list of tablet types used as a source") // waitRetryTime can be changed to a smaller value for tests. @@ -102,7 +113,7 @@ func (vre *Engine) Open(ctx context.Context) error { // executeFetchMaybeCreateTable calls DBClient.ExecuteFetch and does one retry if // there's a failure due to mysql.ERNoSuchTable or mysql.ERBadDb which can be fixed -// by re-creating the _vt.vreplication table. +// by re-creating the vreplication tables. func (vre *Engine) executeFetchMaybeCreateTable(dbClient binlogplayer.DBClient, query string, maxrows int) (qr *sqltypes.Result, err error) { qr, err = dbClient.ExecuteFetch(query, maxrows) @@ -110,29 +121,33 @@ func (vre *Engine) executeFetchMaybeCreateTable(dbClient binlogplayer.DBClient, return } - // If it's a bad table or db, it could be because _vt.vreplication wasn't created. - // In that case we can try creating it again. + // If it's a bad table or db, it could be because the vreplication tables weren't created. + // In that case we can try creating then again. merr, isSQLErr := err.(*mysql.SQLError) if !isSQLErr || !(merr.Num == mysql.ERNoSuchTable || merr.Num == mysql.ERBadDb || merr.Num == mysql.ERBadFieldError) { return qr, err } - log.Info("Looks like _vt.vreplication table may not exist. Trying to recreate... ") + log.Info("Looks like the vreplcation tables may not exist. Trying to recreate... ") if merr.Num == mysql.ERNoSuchTable || merr.Num == mysql.ERBadDb { for _, query := range binlogplayer.CreateVReplicationTable() { if _, merr := dbClient.ExecuteFetch(query, 0); merr != nil { - log.Warningf("Failed to ensure _vt.vreplication table exists: %v", merr) + log.Warningf("Failed to ensure %s exists: %v", vreplicationTableName, merr) return nil, err } } + if _, merr := dbClient.ExecuteFetch(CreateReshardingJournalTable, 0); merr != nil { + log.Warningf("Failed to ensure %s exists: %v", reshardingJournalTableName, merr) + return nil, err + } } if merr.Num == mysql.ERBadFieldError { - log.Info("Adding column to table _vt.vreplication") + log.Infof("Adding column to table %s", vreplicationTableName) for _, query := range binlogplayer.AlterVReplicationTable() { if _, merr := dbClient.ExecuteFetch(query, 0); merr != nil { merr, isSQLErr := err.(*mysql.SQLError) if !isSQLErr || !(merr.Num == mysql.ERDupFieldName) { - log.Warningf("Failed to alter _vt.vreplication table: %v", merr) + log.Warningf("Failed to alter %s table: %v", vreplicationTableName, merr) return nil, err } } @@ -287,8 +302,8 @@ func (vre *Engine) Exec(query string) (*sqltypes.Result, error) { delete(vre.controllers, plan.id) } return vre.executeFetchMaybeCreateTable(dbClient, plan.query, 1) - case selectQuery: - // select queries are passed through. + case selectQuery, reshardingJournalQuery: + // select and resharding journal queries are passed through. return vre.executeFetchMaybeCreateTable(dbClient, plan.query, 10000) } panic("unreachable") diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go index a4ac882dd10..9d644c0a202 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go @@ -412,6 +412,7 @@ func TestCreateDBAndTable(t *testing.T) { dbClient.ExpectRequest("CREATE DATABASE IF NOT EXISTS _vt", &sqltypes.Result{}, nil) dbClient.ExpectRequest("DROP TABLE IF EXISTS _vt.blp_checkpoint", &sqltypes.Result{}, nil) dbClient.ExpectRequestRE("CREATE TABLE IF NOT EXISTS _vt.vreplication.*", &sqltypes.Result{}, nil) + dbClient.ExpectRequestRE("create table if not exists _vt.resharding_journal.*", &sqltypes.Result{}, nil) dbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) // Non-recoverable error. @@ -425,6 +426,7 @@ func TestCreateDBAndTable(t *testing.T) { dbClient.ExpectRequest("CREATE DATABASE IF NOT EXISTS _vt", &sqltypes.Result{}, nil) dbClient.ExpectRequest("DROP TABLE IF EXISTS _vt.blp_checkpoint", &sqltypes.Result{}, nil) dbClient.ExpectRequestRE("CREATE TABLE IF NOT EXISTS _vt.vreplication.*", &sqltypes.Result{}, nil) + dbClient.ExpectRequestRE("create table if not exists _vt.resharding_journal.*", &sqltypes.Result{}, nil) dbClient.ExpectRequest("insert into _vt.vreplication values (null)", &sqltypes.Result{InsertID: 1}, nil) diff --git a/go/vt/wrangler/migrate_writes.go b/go/vt/wrangler/migrate_writes.go new file mode 100644 index 00000000000..8aa9a86f1c4 --- /dev/null +++ b/go/vt/wrangler/migrate_writes.go @@ -0,0 +1,569 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "fmt" + "hash/fnv" + "math" + "sort" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/sync2" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/concurrency" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +// MigrationType specifies the type of migration. +type MigrationType int + +// The following constants define the migration type. +const ( + MigrateTables = MigrationType(iota) + MigrateShards +) + +type accessType int + +const ( + allowWrites = accessType(iota) + disallowWrites +) + +type migrater struct { + migrationType MigrationType + wr *Wrangler + id int64 + sources map[topo.KeyspaceShard]*miSource + targets map[topo.KeyspaceShard]*miTarget + sourceKeyspace string + targetKeyspace string + tables []string +} + +type miTarget struct { + shard *topo.ShardInfo + master *topo.TabletInfo + sources map[uint32]*binlogdatapb.BinlogSource + position string +} + +type miSource struct { + shard *topo.ShardInfo + master *topo.TabletInfo + position string + journaled bool +} + +// MigrateWrites is a generic way of migrating write traffic for a resharding workflow. +func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType MigrationType, streams map[topo.KeyspaceShard][]uint32, filteredReplicationWaitTime time.Duration) error { + mi, err := wr.buildMigrater(ctx, migrationType, streams) + if err != nil { + return err + } + journalsExist, err := mi.checkJournals(ctx) + if err != nil { + return err + } + if !journalsExist { + if err := mi.stopSourceWrites(ctx); err != nil { + mi.cancelMigration(ctx) + return err + } + if err := mi.waitForCatchup(ctx, filteredReplicationWaitTime); err != nil { + mi.cancelMigration(ctx) + return err + } + } + if err := mi.createJournals(ctx); err != nil { + return err + } + if err := mi.createReverseReplication(ctx); err != nil { + return err + } + if err := mi.allowTargetWrites(ctx); err != nil { + return err + } + if err := mi.changeRouting(ctx); err != nil { + return err + } + mi.deleteTargetVReplication(ctx) + return nil +} + +func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType MigrationType, streams map[topo.KeyspaceShard][]uint32) (*migrater, error) { + mi := &migrater{ + migrationType: migrationType, + id: hashStreams(streams), + targets: make(map[topo.KeyspaceShard]*miTarget), + sources: make(map[topo.KeyspaceShard]*miSource), + } + tableMap := make(map[string]bool) + for targetks, uids := range streams { + targetShard, err := mi.wr.ts.GetShard(ctx, targetks.Keyspace, targetks.Shard) + if err != nil { + return nil, err + } + targetMaster, err := mi.wr.ts.GetTablet(ctx, targetShard.MasterAlias) + if err != nil { + return nil, err + } + mi.targets[targetks] = &miTarget{ + shard: targetShard, + master: targetMaster, + sources: make(map[uint32]*binlogdatapb.BinlogSource), + } + if mi.targetKeyspace == "" { + mi.targetKeyspace = targetks.Keyspace + } + for _, uid := range uids { + p3qr, err := mi.wr.tmc.VReplicationExec(ctx, targetMaster.Tablet, fmt.Sprintf("select source from _vt.vreplication where id=%d", uid)) + if err != nil { + return nil, err + } + qr := sqltypes.Proto3ToResult(p3qr) + if len(qr.Rows) < 1 || len(qr.Rows[0]) < 1 { + return nil, fmt.Errorf("VReplication stream %d not found for %s:%s", int(uid), targetks.Keyspace, targetks.Shard) + } + str := qr.Rows[0][0].ToString() + var binlogSource binlogdatapb.BinlogSource + if err := proto.UnmarshalText(str, &binlogSource); err != nil { + return nil, err + } + mi.targets[targetks].sources[uid] = &binlogSource + + sourceks := topo.KeyspaceShard{Keyspace: binlogSource.Keyspace, Shard: binlogSource.Shard} + if _, ok := mi.sources[sourceks]; !ok { + sourceShard, err := mi.wr.ts.GetShard(ctx, binlogSource.Keyspace, binlogSource.Shard) + if err != nil { + return nil, err + } + sourceMaster, err := mi.wr.ts.GetTablet(ctx, sourceShard.MasterAlias) + if err != nil { + return nil, err + } + mi.sources[sourceks] = &miSource{ + shard: sourceShard, + master: sourceMaster, + } + + for _, rule := range binlogSource.Filter.Rules { + tableMap[rule.Match] = true + } + + if mi.sourceKeyspace == "" { + mi.sourceKeyspace = sourceks.Keyspace + } + } + } + } + for t := range tableMap { + mi.tables = append(mi.tables, t) + } + return mi, nil +} + +// hashStreams produces a reproduceable hash based on the input parameters. +func hashStreams(streams map[topo.KeyspaceShard][]uint32) int64 { + var expanded []string + for ks, uids := range streams { + for _, uid := range uids { + expanded = append(expanded, fmt.Sprintf("%s:%s:%d", ks.Keyspace, ks.Shard, uid)) + } + } + sort.Strings(expanded) + hasher := fnv.New64() + for _, str := range expanded { + hasher.Write([]byte(str)) + } + // Convert to int64 after dropping the highest bit. + return int64(hasher.Sum64() & math.MaxInt64) +} + +func (mi *migrater) checkJournals(ctx context.Context) (journalsExist bool, err error) { + var exist sync2.AtomicBool + err = mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { + statement := fmt.Sprintf("select 1 from _vt.resharding_journal where id=%v", mi.id) + p3qr, err := mi.wr.tmc.VReplicationExec(ctx, source.master.Tablet, statement) + if err != nil { + return err + } + if len(p3qr.Rows) >= 1 { + exist.Set(true) + source.journaled = true + } + return nil + }) + return exist.Get(), err +} + +func (mi *migrater) stopSourceWrites(ctx context.Context) error { + var err error + if mi.migrationType == MigrateTables { + err = mi.changeTableSourceWrites(ctx, disallowWrites) + } else { + err = mi.changeShardsAccess(ctx, mi.sourceKeyspace, mi.sourceShards(), disallowWrites) + } + if err != nil { + return err + } + return mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { + var err error + source.position, err = mi.wr.tmc.MasterPosition(ctx, source.master.Tablet) + return err + }) +} + +func (mi *migrater) changeTableSourceWrites(ctx context.Context, access accessType) error { + return mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { + if _, err := mi.wr.ts.UpdateShardFields(ctx, sourceks.Keyspace, sourceks.Shard, func(si *topo.ShardInfo) error { + return si.UpdateSourceBlacklistedTables(ctx, topodatapb.TabletType_MASTER, nil, access == allowWrites /* remove */, mi.tables) + }); err != nil { + return err + } + return mi.wr.tmc.RefreshState(ctx, source.master.Tablet) + }) +} + +func (mi *migrater) waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, filteredReplicationWaitTime) + defer cancel() + + return mi.forAllUids(func(target *miTarget, uid uint32) error { + bls := target.sources[uid] + source := mi.sources[topo.KeyspaceShard{Keyspace: bls.Keyspace, Shard: bls.Shard}] + if err := mi.wr.tmc.VReplicationWaitForPos(ctx, target.master.Tablet, int(uid), source.position); err != nil { + return err + } + if _, err := mi.wr.tmc.VReplicationExec(ctx, target.master.Tablet, binlogplayer.StopVReplication(uid, "stopped for cutover")); err != nil { + return err + } + var err error + target.position, err = mi.wr.tmc.MasterPosition(ctx, target.master.Tablet) + return err + }) +} + +func (mi *migrater) cancelMigration(ctx context.Context) { + var err error + if mi.migrationType == MigrateTables { + err = mi.changeTableSourceWrites(ctx, allowWrites) + } else { + err = mi.changeShardsAccess(ctx, mi.sourceKeyspace, mi.sourceShards(), allowWrites) + } + if err != nil { + mi.wr.Logger().Errorf("Cancel migration failed:", err) + } + + err = mi.forAllUids(func(target *miTarget, uid uint32) error { + if _, err := mi.wr.tmc.VReplicationExec(ctx, target.master.Tablet, binlogplayer.StartVReplication(uid)); err != nil { + return err + } + return nil + }) + if err != nil { + mi.wr.Logger().Errorf("Cancel migration failed: could not restart vreplication: %v", err) + return + } +} + +func (mi *migrater) createJournals(ctx context.Context) error { + return mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { + if source.journaled { + return nil + } + journal := &binlogdatapb.Journal{ + Id: mi.id, + Tables: mi.tables, + LocalPosition: source.position, + } + participantMap := make(map[topo.KeyspaceShard]bool) + for targetks, target := range mi.targets { + found := true + for _, tsource := range target.sources { + if sourceks == (topo.KeyspaceShard{Keyspace: tsource.Keyspace, Shard: tsource.Shard}) { + found = true + break + } + } + if !found { + continue + } + journal.ShardGtids = append(journal.ShardGtids, &binlogdatapb.ShardGtid{ + Keyspace: targetks.Keyspace, + Shard: targetks.Shard, + Gtid: target.position, + }) + for _, tsource := range target.sources { + participantMap[topo.KeyspaceShard{Keyspace: tsource.Keyspace, Shard: tsource.Shard}] = true + } + for ks := range participantMap { + journal.Participants = append(journal.Participants, &binlogdatapb.KeyspaceShard{ + Keyspace: ks.Keyspace, + Shard: ks.Shard, + }) + } + } + statement := fmt.Sprintf("insert into _vt.resharding_journal "+ + "(id, db_name, val) "+ + "values (%v, %v, %v)", + mi.id, encodeString(source.master.DbName()), encodeString(journal.String())) + if _, err := mi.wr.tmc.VReplicationExec(ctx, source.master.Tablet, statement); err != nil { + return err + } + return nil + }) +} + +func (mi *migrater) createReverseReplication(ctx context.Context) error { + vs, err := mi.wr.ts.GetVSchema(ctx, mi.sourceKeyspace) + if err != nil { + return err + } + ksschema, err := vindexes.BuildKeyspaceSchema(vs, mi.sourceKeyspace) + if err != nil { + return err + } + return mi.forAllUids(func(target *miTarget, uid uint32) error { + bls := target.sources[uid] + source := mi.sources[topo.KeyspaceShard{Keyspace: bls.Keyspace, Shard: bls.Shard}] + reverseBls := &binlogdatapb.BinlogSource{ + Keyspace: target.shard.Keyspace(), + Shard: target.shard.ShardName(), + TabletType: bls.TabletType, + Filter: &binlogdatapb.Filter{}, + } + for _, rule := range bls.Filter.Rules { + var filter string + if strings.HasPrefix(rule.Match, "/") { + if ksschema.Keyspace.Sharded { + filter = bls.Shard + } + } else { + var inKeyrange string + if ksschema.Keyspace.Sharded { + vtable, ok := ksschema.Tables[rule.Match] + if !ok { + return fmt.Errorf("table %s not found in vschema", rule.Match) + } + // TODO(sougou): handle degenerate cases like sequence, etc. + // We currently assume the primary vindex is the best way to filter, which may not be true. + inKeyrange = fmt.Sprintf(" where in_keyrange(%s, '%s', '%s')", sqlparser.String(vtable.ColumnVindexes[0].Columns[0]), vs.Vindexes[vtable.ColumnVindexes[0].Name].Type, bls.Shard) + } + filter = fmt.Sprintf("select * from %s%s", rule.Match, inKeyrange) + } + reverseBls.Filter.Rules = append(reverseBls.Filter.Rules, &binlogdatapb.Rule{ + Match: rule.Match, + Filter: filter, + }) + } + + _, err := mi.wr.VReplicationExec(ctx, source.master.Alias, binlogplayer.CreateVReplicationState("ReversedResharding", reverseBls, target.position, binlogplayer.BlpStopped, source.master.DbName())) + return err + }) +} + +func (mi *migrater) allowTargetWrites(ctx context.Context) error { + if mi.migrationType == MigrateTables { + return mi.allowTableTargetWrites(ctx) + } + return mi.changeShardsAccess(ctx, mi.targetKeyspace, mi.targetShards(), allowWrites) +} + +func (mi *migrater) allowTableTargetWrites(ctx context.Context) error { + return mi.forAllTargets(func(targetks topo.KeyspaceShard, target *miTarget) error { + if _, err := mi.wr.ts.UpdateShardFields(ctx, targetks.Keyspace, targetks.Shard, func(si *topo.ShardInfo) error { + return si.UpdateSourceBlacklistedTables(ctx, topodatapb.TabletType_MASTER, nil, true, mi.tables) + }); err != nil { + return err + } + return mi.wr.tmc.RefreshState(ctx, target.master.Tablet) + }) +} + +func (mi *migrater) changeRouting(ctx context.Context) error { + if mi.migrationType == MigrateTables { + return mi.changeTableRouting(ctx) + } + return mi.changeShardRouting(ctx) +} + +func (mi *migrater) changeTableRouting(ctx context.Context) error { + rules, err := mi.wr.getRoutingRules(ctx) + if err != nil { + return err + } + for _, table := range mi.tables { + for _, tabletType := range []topodatapb.TabletType{topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY} { + tt := strings.ToLower(tabletType.String()) + delete(rules, table+"@"+tt) + delete(rules, mi.sourceKeyspace+"."+table+"@"+tt) + delete(rules, mi.targetKeyspace+"."+table+"@"+tt) + } + delete(rules, mi.targetKeyspace+"."+table) + rules[table] = []string{mi.targetKeyspace + "." + table} + rules[mi.sourceKeyspace+"."+table] = []string{mi.targetKeyspace + "." + table} + } + if err := mi.wr.saveRoutingRules(ctx, rules); err != nil { + return err + } + return mi.wr.ts.RebuildSrvVSchema(ctx, nil) +} + +func (mi *migrater) changeShardRouting(ctx context.Context) error { + err := mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { + _, err := mi.wr.ts.UpdateShardFields(ctx, source.shard.Keyspace(), source.shard.ShardName(), func(si *topo.ShardInfo) error { + si.IsMasterServing = false + return nil + }) + return err + }) + if err != nil { + return err + } + err = mi.forAllTargets(func(targetks topo.KeyspaceShard, target *miTarget) error { + _, err := mi.wr.ts.UpdateShardFields(ctx, target.shard.Keyspace(), target.shard.ShardName(), func(si *topo.ShardInfo) error { + si.IsMasterServing = true + return nil + }) + return err + }) + if err != nil { + return err + } + return mi.wr.ts.MigrateServedType(ctx, mi.targetKeyspace, mi.targetShards(), mi.sourceShards(), topodatapb.TabletType_MASTER, nil) +} + +func (mi *migrater) deleteTargetVReplication(ctx context.Context) { + _ = mi.forAllUids(func(target *miTarget, uid uint32) error { + if _, err := mi.wr.tmc.VReplicationExec(ctx, target.master.Tablet, binlogplayer.DeleteVReplication(uid)); err != nil { + mi.wr.Logger().Errorf("Final cleanup: could not delete vreplication, please delete stopped streams manually: %v", err) + } + return nil + }) +} + +func (mi *migrater) changeShardsAccess(ctx context.Context, keyspace string, shards []*topo.ShardInfo, access accessType) error { + if err := mi.wr.ts.UpdateDisableQueryService(ctx, mi.sourceKeyspace, shards, topodatapb.TabletType_MASTER, nil, access == disallowWrites /* disable */); err != nil { + return err + } + return mi.wr.refreshMasters(ctx, shards) +} + +func (mi *migrater) forAllSources(f func(topo.KeyspaceShard, *miSource) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for sourceks, source := range mi.sources { + wg.Add(1) + go func(sourceks topo.KeyspaceShard, source *miSource) { + defer wg.Done() + + if err := f(sourceks, source); err != nil { + allErrors.RecordError(err) + } + }(sourceks, source) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +func (mi *migrater) forAllTargets(f func(topo.KeyspaceShard, *miTarget) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for targetks, target := range mi.targets { + wg.Add(1) + go func(targetks topo.KeyspaceShard, target *miTarget) { + defer wg.Done() + + if err := f(targetks, target); err != nil { + allErrors.RecordError(err) + } + }(targetks, target) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +func (mi *migrater) forAllUids(f func(target *miTarget, uid uint32) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, target := range mi.targets { + for uid := range target.sources { + wg.Add(1) + go func(target *miTarget, uid uint32) { + defer wg.Done() + + if err := f(target, uid); err != nil { + allErrors.RecordError(err) + } + }(target, uid) + } + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +func (mi *migrater) sourceShards() []*topo.ShardInfo { + shards := make([]*topo.ShardInfo, 0, len(mi.sources)) + for _, source := range mi.sources { + shards = append(shards, source.shard) + } + return shards +} + +func (mi *migrater) targetShards() []*topo.ShardInfo { + shards := make([]*topo.ShardInfo, 0, len(mi.targets)) + for _, target := range mi.targets { + shards = append(shards, target.shard) + } + return shards +} + +func (wr *Wrangler) getRoutingRules(ctx context.Context) (map[string][]string, error) { + rrs, err := wr.ts.GetRoutingRules(ctx) + if err != nil { + return nil, err + } + rules := make(map[string][]string, len(rrs.Rules)) + for _, rr := range rrs.Rules { + rules[rr.FromTable] = rr.ToTables + } + return rules, nil +} + +func (wr *Wrangler) saveRoutingRules(ctx context.Context, rules map[string][]string) error { + rrs := &vschemapb.RoutingRules{Rules: make([]*vschemapb.RoutingRule, 0, len(rules))} + for from, to := range rules { + rrs.Rules = append(rrs.Rules, &vschemapb.RoutingRule{ + FromTable: from, + ToTables: to, + }) + } + return wr.ts.SaveRoutingRules(ctx, rrs) +} diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto index 021f1fbf189..dd4de0f3935 100644 --- a/proto/binlogdata.proto +++ b/proto/binlogdata.proto @@ -187,6 +187,7 @@ enum VEventType { FIELD = 13; HEARTBEAT = 14; VGTID = 15; + JOURNAL = 16; } // RowChange represents one row change @@ -216,6 +217,19 @@ message VGtid { repeated ShardGtid shard_gtids = 1; } +message KeyspaceShard { + string keyspace = 1; + string shard = 2; +} + +message Journal { + int64 id = 1; + repeated string tables = 2; + string local_position = 3; + repeated ShardGtid shard_gtids = 4; + repeated KeyspaceShard participants = 5; +} + // VEvent represents a vstream event message VEvent { VEventType type = 1; @@ -225,6 +239,7 @@ message VEvent { RowEvent row_event = 5; FieldEvent field_event = 6; VGtid vgtid = 7; + Journal journal = 8; // current_time specifies the current time to handle clock skew. int64 current_time = 20; } diff --git a/py/vtproto/binlogdata_pb2.py b/py/vtproto/binlogdata_pb2.py index c744f3b7280..dcac54576b0 100644 --- a/py/vtproto/binlogdata_pb2.py +++ b/py/vtproto/binlogdata_pb2.py @@ -23,7 +23,7 @@ package='binlogdata', syntax='proto3', serialized_options=_b('Z\'vitess.io/vitess/go/vt/proto/binlogdata'), - serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bvtrpc.proto\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"%\n\x04Rule\x12\r\n\x05match\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\")\n\x06\x46ilter\x12\x1f\n\x05rules\x18\x01 \x03(\x0b\x32\x10.binlogdata.Rule\"\xde\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x12\"\n\x06\x66ilter\x18\x06 \x01(\x0b\x32\x12.binlogdata.Filter\x12\'\n\x06on_ddl\x18\x07 \x01(\x0e\x32\x17.binlogdata.OnDDLAction\"B\n\tRowChange\x12\x1a\n\x06\x62\x65\x66ore\x18\x01 \x01(\x0b\x32\n.query.Row\x12\x19\n\x05\x61\x66ter\x18\x02 \x01(\x0b\x32\n.query.Row\"J\n\x08RowEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12*\n\x0brow_changes\x18\x02 \x03(\x0b\x32\x15.binlogdata.RowChange\">\n\nFieldEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x1c\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x0c.query.Field\":\n\tShardGtid\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12\x0c\n\x04gtid\x18\x03 \x01(\t\"3\n\x05VGtid\x12*\n\x0bshard_gtids\x18\x01 \x03(\x0b\x32\x15.binlogdata.ShardGtid\"\xea\x01\n\x06VEvent\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.binlogdata.VEventType\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x0b\n\x03\x64\x64l\x18\x04 \x01(\t\x12\'\n\trow_event\x18\x05 \x01(\x0b\x32\x14.binlogdata.RowEvent\x12+\n\x0b\x66ield_event\x18\x06 \x01(\x0b\x32\x16.binlogdata.FieldEvent\x12 \n\x05vgtid\x18\x07 \x01(\x0b\x32\x11.binlogdata.VGtid\x12\x14\n\x0c\x63urrent_time\x18\x14 \x01(\x03\"\xc7\x01\n\x0eVStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x10\n\x08position\x18\x04 \x01(\t\x12\"\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x12.binlogdata.Filter\"5\n\x0fVStreamResponse\x12\"\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x12.binlogdata.VEvent\"\xc8\x01\n\x12VStreamRowsRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\r\n\x05query\x18\x04 \x01(\t\x12\"\n\x06lastpk\x18\x05 \x01(\x0b\x32\x12.query.QueryResult\"\x97\x01\n\x13VStreamRowsResponse\x12\x1c\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x0c.query.Field\x12\x1e\n\x08pkfields\x18\x02 \x03(\x0b\x32\x0c.query.Field\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x18\n\x04rows\x18\x04 \x03(\x0b\x32\n.query.Row\x12\x1a\n\x06lastpk\x18\x05 \x01(\x0b\x32\n.query.Row*>\n\x0bOnDDLAction\x12\n\n\x06IGNORE\x10\x00\x12\x08\n\x04STOP\x10\x01\x12\x08\n\x04\x45XEC\x10\x02\x12\x0f\n\x0b\x45XEC_IGNORE\x10\x03*\xc4\x01\n\nVEventType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04GTID\x10\x01\x12\t\n\x05\x42\x45GIN\x10\x02\x12\n\n\x06\x43OMMIT\x10\x03\x12\x0c\n\x08ROLLBACK\x10\x04\x12\x07\n\x03\x44\x44L\x10\x05\x12\n\n\x06INSERT\x10\x06\x12\x0b\n\x07REPLACE\x10\x07\x12\n\n\x06UPDATE\x10\x08\x12\n\n\x06\x44\x45LETE\x10\t\x12\x07\n\x03SET\x10\n\x12\t\n\x05OTHER\x10\x0b\x12\x07\n\x03ROW\x10\x0c\x12\t\n\x05\x46IELD\x10\r\x12\r\n\tHEARTBEAT\x10\x0e\x12\t\n\x05VGTID\x10\x0f\x42)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3') + serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bvtrpc.proto\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"%\n\x04Rule\x12\r\n\x05match\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\")\n\x06\x46ilter\x12\x1f\n\x05rules\x18\x01 \x03(\x0b\x32\x10.binlogdata.Rule\"\xde\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x12\"\n\x06\x66ilter\x18\x06 \x01(\x0b\x32\x12.binlogdata.Filter\x12\'\n\x06on_ddl\x18\x07 \x01(\x0e\x32\x17.binlogdata.OnDDLAction\"B\n\tRowChange\x12\x1a\n\x06\x62\x65\x66ore\x18\x01 \x01(\x0b\x32\n.query.Row\x12\x19\n\x05\x61\x66ter\x18\x02 \x01(\x0b\x32\n.query.Row\"J\n\x08RowEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12*\n\x0brow_changes\x18\x02 \x03(\x0b\x32\x15.binlogdata.RowChange\">\n\nFieldEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x1c\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x0c.query.Field\":\n\tShardGtid\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12\x0c\n\x04gtid\x18\x03 \x01(\t\"3\n\x05VGtid\x12*\n\x0bshard_gtids\x18\x01 \x03(\x0b\x32\x15.binlogdata.ShardGtid\"0\n\rKeyspaceShard\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\"\x9a\x01\n\x07Journal\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12\x16\n\x0elocal_position\x18\x03 \x01(\t\x12*\n\x0bshard_gtids\x18\x04 \x03(\x0b\x32\x15.binlogdata.ShardGtid\x12/\n\x0cparticipants\x18\x05 \x03(\x0b\x32\x19.binlogdata.KeyspaceShard\"\x90\x02\n\x06VEvent\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.binlogdata.VEventType\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x0b\n\x03\x64\x64l\x18\x04 \x01(\t\x12\'\n\trow_event\x18\x05 \x01(\x0b\x32\x14.binlogdata.RowEvent\x12+\n\x0b\x66ield_event\x18\x06 \x01(\x0b\x32\x16.binlogdata.FieldEvent\x12 \n\x05vgtid\x18\x07 \x01(\x0b\x32\x11.binlogdata.VGtid\x12$\n\x07journal\x18\x08 \x01(\x0b\x32\x13.binlogdata.Journal\x12\x14\n\x0c\x63urrent_time\x18\x14 \x01(\x03\"\xc7\x01\n\x0eVStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x10\n\x08position\x18\x04 \x01(\t\x12\"\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x12.binlogdata.Filter\"5\n\x0fVStreamResponse\x12\"\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x12.binlogdata.VEvent\"\xc8\x01\n\x12VStreamRowsRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\r\n\x05query\x18\x04 \x01(\t\x12\"\n\x06lastpk\x18\x05 \x01(\x0b\x32\x12.query.QueryResult\"\x97\x01\n\x13VStreamRowsResponse\x12\x1c\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x0c.query.Field\x12\x1e\n\x08pkfields\x18\x02 \x03(\x0b\x32\x0c.query.Field\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x18\n\x04rows\x18\x04 \x03(\x0b\x32\n.query.Row\x12\x1a\n\x06lastpk\x18\x05 \x01(\x0b\x32\n.query.Row*>\n\x0bOnDDLAction\x12\n\n\x06IGNORE\x10\x00\x12\x08\n\x04STOP\x10\x01\x12\x08\n\x04\x45XEC\x10\x02\x12\x0f\n\x0b\x45XEC_IGNORE\x10\x03*\xd1\x01\n\nVEventType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04GTID\x10\x01\x12\t\n\x05\x42\x45GIN\x10\x02\x12\n\n\x06\x43OMMIT\x10\x03\x12\x0c\n\x08ROLLBACK\x10\x04\x12\x07\n\x03\x44\x44L\x10\x05\x12\n\n\x06INSERT\x10\x06\x12\x0b\n\x07REPLACE\x10\x07\x12\n\n\x06UPDATE\x10\x08\x12\n\n\x06\x44\x45LETE\x10\t\x12\x07\n\x03SET\x10\n\x12\t\n\x05OTHER\x10\x0b\x12\x07\n\x03ROW\x10\x0c\x12\t\n\x05\x46IELD\x10\r\x12\r\n\tHEARTBEAT\x10\x0e\x12\t\n\x05VGTID\x10\x0f\x12\x0b\n\x07JOURNAL\x10\x10\x42)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3') , dependencies=[vtrpc__pb2.DESCRIPTOR,query__pb2.DESCRIPTOR,topodata__pb2.DESCRIPTOR,]) @@ -52,8 +52,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2433, - serialized_end=2495, + serialized_start=2678, + serialized_end=2740, ) _sym_db.RegisterEnumDescriptor(_ONDDLACTION) @@ -128,11 +128,15 @@ name='VGTID', index=15, number=15, serialized_options=None, type=None), + _descriptor.EnumValueDescriptor( + name='JOURNAL', index=16, number=16, + serialized_options=None, + type=None), ], containing_type=None, serialized_options=None, - serialized_start=2498, - serialized_end=2694, + serialized_start=2743, + serialized_end=2952, ) _sym_db.RegisterEnumDescriptor(_VEVENTTYPE) @@ -157,6 +161,7 @@ FIELD = 13 HEARTBEAT = 14 VGTID = 15 +JOURNAL = 16 _BINLOGTRANSACTION_STATEMENT_CATEGORY = _descriptor.EnumDescriptor( @@ -826,6 +831,103 @@ ) +_KEYSPACESHARD = _descriptor.Descriptor( + name='KeyspaceShard', + full_name='binlogdata.KeyspaceShard', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='keyspace', full_name='binlogdata.KeyspaceShard.keyspace', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='shard', full_name='binlogdata.KeyspaceShard.shard', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1582, + serialized_end=1630, +) + + +_JOURNAL = _descriptor.Descriptor( + name='Journal', + full_name='binlogdata.Journal', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='id', full_name='binlogdata.Journal.id', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='tables', full_name='binlogdata.Journal.tables', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='local_position', full_name='binlogdata.Journal.local_position', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='shard_gtids', full_name='binlogdata.Journal.shard_gtids', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='participants', full_name='binlogdata.Journal.participants', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1633, + serialized_end=1787, +) + + _VEVENT = _descriptor.Descriptor( name='VEvent', full_name='binlogdata.VEvent', @@ -883,7 +985,14 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='current_time', full_name='binlogdata.VEvent.current_time', index=7, + name='journal', full_name='binlogdata.VEvent.journal', index=7, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='current_time', full_name='binlogdata.VEvent.current_time', index=8, number=20, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, @@ -901,8 +1010,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1583, - serialized_end=1817, + serialized_start=1790, + serialized_end=2062, ) @@ -960,8 +1069,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1820, - serialized_end=2019, + serialized_start=2065, + serialized_end=2264, ) @@ -991,8 +1100,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2021, - serialized_end=2074, + serialized_start=2266, + serialized_end=2319, ) @@ -1050,8 +1159,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2077, - serialized_end=2277, + serialized_start=2322, + serialized_end=2522, ) @@ -1109,8 +1218,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2280, - serialized_end=2431, + serialized_start=2525, + serialized_end=2676, ) _BINLOGTRANSACTION_STATEMENT.fields_by_name['category'].enum_type = _BINLOGTRANSACTION_STATEMENT_CATEGORY @@ -1134,10 +1243,13 @@ _ROWEVENT.fields_by_name['row_changes'].message_type = _ROWCHANGE _FIELDEVENT.fields_by_name['fields'].message_type = query__pb2._FIELD _VGTID.fields_by_name['shard_gtids'].message_type = _SHARDGTID +_JOURNAL.fields_by_name['shard_gtids'].message_type = _SHARDGTID +_JOURNAL.fields_by_name['participants'].message_type = _KEYSPACESHARD _VEVENT.fields_by_name['type'].enum_type = _VEVENTTYPE _VEVENT.fields_by_name['row_event'].message_type = _ROWEVENT _VEVENT.fields_by_name['field_event'].message_type = _FIELDEVENT _VEVENT.fields_by_name['vgtid'].message_type = _VGTID +_VEVENT.fields_by_name['journal'].message_type = _JOURNAL _VSTREAMREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID _VSTREAMREQUEST.fields_by_name['immediate_caller_id'].message_type = query__pb2._VTGATECALLERID _VSTREAMREQUEST.fields_by_name['target'].message_type = query__pb2._TARGET @@ -1165,6 +1277,8 @@ DESCRIPTOR.message_types_by_name['FieldEvent'] = _FIELDEVENT DESCRIPTOR.message_types_by_name['ShardGtid'] = _SHARDGTID DESCRIPTOR.message_types_by_name['VGtid'] = _VGTID +DESCRIPTOR.message_types_by_name['KeyspaceShard'] = _KEYSPACESHARD +DESCRIPTOR.message_types_by_name['Journal'] = _JOURNAL DESCRIPTOR.message_types_by_name['VEvent'] = _VEVENT DESCRIPTOR.message_types_by_name['VStreamRequest'] = _VSTREAMREQUEST DESCRIPTOR.message_types_by_name['VStreamResponse'] = _VSTREAMRESPONSE @@ -1280,6 +1394,20 @@ )) _sym_db.RegisterMessage(VGtid) +KeyspaceShard = _reflection.GeneratedProtocolMessageType('KeyspaceShard', (_message.Message,), dict( + DESCRIPTOR = _KEYSPACESHARD, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.KeyspaceShard) + )) +_sym_db.RegisterMessage(KeyspaceShard) + +Journal = _reflection.GeneratedProtocolMessageType('Journal', (_message.Message,), dict( + DESCRIPTOR = _JOURNAL, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.Journal) + )) +_sym_db.RegisterMessage(Journal) + VEvent = _reflection.GeneratedProtocolMessageType('VEvent', (_message.Message,), dict( DESCRIPTOR = _VEVENT, __module__ = 'binlogdata_pb2' From f1d09580332eef8c04bfb5e3e45a12c0fd967acf Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Fri, 5 Jul 2019 13:00:23 -0700 Subject: [PATCH 02/17] migrater: migrate reads and validate Signed-off-by: Sugu Sougoumarane --- .../{migrate_writes.go => migrate.go} | 149 +++++++++++++++++- 1 file changed, 143 insertions(+), 6 deletions(-) rename go/vt/wrangler/{migrate_writes.go => migrate.go} (77%) diff --git a/go/vt/wrangler/migrate_writes.go b/go/vt/wrangler/migrate.go similarity index 77% rename from go/vt/wrangler/migrate_writes.go rename to go/vt/wrangler/migrate.go index 8aa9a86f1c4..e59436ba6ab 100644 --- a/go/vt/wrangler/migrate_writes.go +++ b/go/vt/wrangler/migrate.go @@ -20,6 +20,7 @@ import ( "fmt" "hash/fnv" "math" + "reflect" "sort" "strings" "sync" @@ -49,6 +50,13 @@ const ( MigrateShards ) +type migrateDirection int + +const ( + directionForward = migrateDirection(iota) + directionBackward +) + type accessType int const ( @@ -81,12 +89,30 @@ type miSource struct { journaled bool } +// MigrateWrites is a generic way of migrating write traffic for a resharding workflow. +func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType MigrationType, streams map[topo.KeyspaceShard][]uint32, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { + mi, err := wr.buildMigrater(ctx, migrationType, streams) + if err != nil { + return err + } + if err := mi.validate(ctx); err != nil { + return err + } + if mi.migrationType == MigrateTables { + return mi.migrateTableReads(ctx, cells, servedType, direction) + } + return mi.migrateShardReads(ctx, cells, servedType, direction) +} + // MigrateWrites is a generic way of migrating write traffic for a resharding workflow. func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType MigrationType, streams map[topo.KeyspaceShard][]uint32, filteredReplicationWaitTime time.Duration) error { mi, err := wr.buildMigrater(ctx, migrationType, streams) if err != nil { return err } + if err := mi.validate(ctx); err != nil { + return err + } journalsExist, err := mi.checkJournals(ctx) if err != nil { return err @@ -120,11 +146,11 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType MigrationTy func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType MigrationType, streams map[topo.KeyspaceShard][]uint32) (*migrater, error) { mi := &migrater{ migrationType: migrationType, + wr: wr, id: hashStreams(streams), targets: make(map[topo.KeyspaceShard]*miTarget), sources: make(map[topo.KeyspaceShard]*miSource), } - tableMap := make(map[string]bool) for targetks, uids := range streams { targetShard, err := mi.wr.ts.GetShard(ctx, targetks.Keyspace, targetks.Shard) if err != nil { @@ -134,6 +160,9 @@ func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType MigrationTy if err != nil { return nil, err } + if _, ok := mi.targets[targetks]; ok { + return nil, fmt.Errorf("duplicate targets: %v", targetks) + } mi.targets[targetks] = &miTarget{ shard: targetShard, master: targetMaster, @@ -141,6 +170,8 @@ func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType MigrationTy } if mi.targetKeyspace == "" { mi.targetKeyspace = targetks.Keyspace + } else if mi.targetKeyspace != targetks.Keyspace { + return nil, fmt.Errorf("target keyspaces are mismatched across streams: %v vs %v", mi.targetKeyspace, targetks.Keyspace) } for _, uid := range uids { p3qr, err := mi.wr.tmc.VReplicationExec(ctx, targetMaster.Tablet, fmt.Sprintf("select source from _vt.vreplication where id=%d", uid)) @@ -173,19 +204,30 @@ func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType MigrationTy master: sourceMaster, } - for _, rule := range binlogSource.Filter.Rules { - tableMap[rule.Match] = true + if mi.tables == nil { + for _, rule := range binlogSource.Filter.Rules { + mi.tables = append(mi.tables, rule.Match) + } + sort.Strings(mi.tables) + } else { + var tables []string + for _, rule := range binlogSource.Filter.Rules { + tables = append(tables, rule.Match) + } + sort.Strings(tables) + if !reflect.DeepEqual(mi.tables, tables) { + return nil, fmt.Errorf("table lists are mismatched across streams: %v vs %v", mi.tables, tables) + } } if mi.sourceKeyspace == "" { mi.sourceKeyspace = sourceks.Keyspace + } else if mi.sourceKeyspace != sourceks.Keyspace { + return nil, fmt.Errorf("source keyspaces are mismatched across streams: %v vs %v", mi.sourceKeyspace, sourceks.Keyspace) } } } } - for t := range tableMap { - mi.tables = append(mi.tables, t) - } return mi, nil } @@ -206,6 +248,101 @@ func hashStreams(streams map[topo.KeyspaceShard][]uint32) int64 { return int64(hasher.Sum64() & math.MaxInt64) } +func (mi *migrater) validate(ctx context.Context) error { + // Ensure no duplicate sources in each target. + for _, target := range mi.targets { + uniqueSources := make(map[topo.KeyspaceShard]uint32) + for uid, binlogSource := range target.sources { + sourceks := topo.KeyspaceShard{Keyspace: binlogSource.Keyspace, Shard: binlogSource.Shard} + if suid, ok := uniqueSources[sourceks]; ok { + return fmt.Errorf("duplicate sources for uids: %v and %v", suid, uid) + } + uniqueSources[sourceks] = uid + } + } + if mi.migrationType == MigrateTables { + // For table migration, all shards must be present. + if err := mi.compareShards(ctx, mi.sourceKeyspace, mi.sourceShards()); err != nil { + return err + } + if err := mi.compareShards(ctx, mi.targetKeyspace, mi.targetShards()); err != nil { + return err + } + } else { + // For shard migration, source and target keyspace must match, and source and target shards must not match. + if mi.sourceKeyspace != mi.targetKeyspace { + return fmt.Errorf("source and target keyspace must match: %v vs %v", mi.sourceKeyspace, mi.targetKeyspace) + } + for sourceks, _ := range mi.sources { + if _, ok := mi.targets[sourceks]; ok { + return fmt.Errorf("target shard matches a source shard: %v", sourceks) + } + } + } + return nil +} + +func (mi *migrater) compareShards(ctx context.Context, keyspace string, sis []*topo.ShardInfo) error { + var shards []string + for _, si := range sis { + shards = append(shards, si.ShardName()) + } + topoShards, err := mi.wr.ts.GetShardNames(ctx, keyspace) + if err != nil { + return err + } + sort.Strings(topoShards) + sort.Strings(shards) + if !reflect.DeepEqual(topoShards, shards) { + return fmt.Errorf("mismatched shards for keyspace %s: topo: %v vs migrate command: %v", keyspace, topoShards, shards) + } + return nil +} + +func (mi *migrater) migrateTableReads(ctx context.Context, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { + rules, err := mi.wr.getRoutingRules(ctx) + if err != nil { + return err + } + var fromKeyspace, toKeyspace string + if direction == directionForward { + fromKeyspace, toKeyspace = mi.sourceKeyspace, mi.targetKeyspace + } else { + fromKeyspace, toKeyspace = mi.targetKeyspace, mi.sourceKeyspace + } + tt := strings.ToLower(servedType.String()) + for _, table := range mi.tables { + for _, fromt := range []string{ + table + "@" + tt, + fromKeyspace + "." + table + "@" + tt, + toKeyspace + "." + table + "@" + tt, + } { + rules[fromt] = []string{toKeyspace + "." + table} + } + } + if err := mi.wr.saveRoutingRules(ctx, rules); err != nil { + return err + } + return mi.wr.ts.RebuildSrvVSchema(ctx, cells) +} + +func (mi *migrater) migrateShardReads(ctx context.Context, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { + var fromShards, toShards []*topo.ShardInfo + if direction == directionForward { + fromShards, toShards = mi.sourceShards(), mi.targetShards() + } else { + fromShards, toShards = mi.targetShards(), mi.sourceShards() + } + + if err := mi.wr.updateShardRecords(ctx, mi.sourceKeyspace, fromShards, cells, servedType, true /* isFrom */, false /* clearSourceShards */); err != nil { + return err + } + if err := mi.wr.updateShardRecords(ctx, mi.sourceKeyspace, toShards, cells, servedType, false, false); err != nil { + return err + } + return mi.wr.ts.MigrateServedType(ctx, mi.sourceKeyspace, toShards, fromShards, servedType, cells) +} + func (mi *migrater) checkJournals(ctx context.Context) (journalsExist bool, err error) { var exist sync2.AtomicBool err = mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { From bcf5787d7738c9a9d304a3dabe7d66ac1856dc4b Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sat, 6 Jul 2019 01:38:19 -0700 Subject: [PATCH 03/17] migrater: test framework and initial test Signed-off-by: Sugu Sougoumarane --- go/vt/binlog/binlogplayer/mock_dbclient.go | 31 +- go/vt/wrangler/fake_dbclient.go | 93 ++++++ go/vt/wrangler/fake_tablet_test.go | 228 +++++++++++++ go/vt/wrangler/migrate.go | 208 ++++++++---- go/vt/wrangler/migrate_test.go | 369 +++++++++++++++++++++ 5 files changed, 863 insertions(+), 66 deletions(-) create mode 100644 go/vt/wrangler/fake_dbclient.go create mode 100644 go/vt/wrangler/fake_tablet_test.go create mode 100644 go/vt/wrangler/migrate_test.go diff --git a/go/vt/binlog/binlogplayer/mock_dbclient.go b/go/vt/binlog/binlogplayer/mock_dbclient.go index fd606c7d482..ecf1d7a299b 100644 --- a/go/vt/binlog/binlogplayer/mock_dbclient.go +++ b/go/vt/binlog/binlogplayer/mock_dbclient.go @@ -17,6 +17,7 @@ limitations under the License. package binlogplayer import ( + "fmt" "regexp" "testing" "time" @@ -84,12 +85,17 @@ func (dc *MockDBClient) ExpectRequestRE(queryRE string, result *sqltypes.Result, // dc.t.Fatalf is executed on 1 second timeout. Wait should // not be called concurrently with ExpectRequest. func (dc *MockDBClient) Wait() { - dc.t.Helper() + if dc.t != nil { + dc.t.Helper() + } select { case <-dc.done: return case <-time.After(5 * time.Second): - dc.t.Fatalf("timeout waiting for requests, want: %v", dc.expect[dc.currentResult].query) + if dc.t != nil { + dc.t.Fatalf("timeout waiting for requests, want: %v", dc.expect[dc.currentResult].query) + } + panic("timeout") } } @@ -127,19 +133,30 @@ func (dc *MockDBClient) Close() { // ExecuteFetch is part of the DBClient interface func (dc *MockDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Result, err error) { - dc.t.Helper() - dc.t.Logf("DBClient query: %v", query) + if dc.t != nil { + dc.t.Helper() + dc.t.Logf("DBClient query: %v", query) + } if dc.currentResult >= len(dc.expect) { - dc.t.Fatalf("DBClientMock: query: %s, no more requests are expected", query) + if dc.t != nil { + dc.t.Fatalf("DBClientMock: query: %s, no more requests are expected", query) + } + return nil, fmt.Errorf("DBClientMock: query: %s, no more requests are expected", query) } result := dc.expect[dc.currentResult] if result.re == nil { if query != result.query { - dc.t.Fatalf("DBClientMock: query: %s, want %s", query, result.query) + if dc.t != nil { + dc.t.Fatalf("DBClientMock: query: %s, want %s", query, result.query) + } + return nil, fmt.Errorf("DBClientMock: query: %s, want %s", query, result.query) } } else { if !result.re.MatchString(query) { - dc.t.Fatalf("DBClientMock: query: %s, must match %s", query, result.query) + if dc.t != nil { + dc.t.Fatalf("DBClientMock: query: %s, must match %s", query, result.query) + } + return nil, fmt.Errorf("DBClientMock: query: %s, must match %s", query, result.query) } } dc.currentResult++ diff --git a/go/vt/wrangler/fake_dbclient.go b/go/vt/wrangler/fake_dbclient.go new file mode 100644 index 00000000000..9ed2e25719b --- /dev/null +++ b/go/vt/wrangler/fake_dbclient.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "fmt" + "regexp" + + "vitess.io/vitess/go/sqltypes" +) + +type dbResult struct { + result *sqltypes.Result + err error +} + +// fakeDBClient fakes a binlog_player.DBClient. +type fakeDBClient struct { + queries map[string]*dbResult + queriesRE map[*regexp.Regexp]*dbResult +} + +// NewfakeDBClient returns a new DBClientMock. +func newFakeDBClient() *fakeDBClient { + return &fakeDBClient{ + queries: make(map[string]*dbResult), + queriesRE: make(map[*regexp.Regexp]*dbResult), + } +} + +func (dc *fakeDBClient) setResult(query string, result *sqltypes.Result, err error) { + dc.queries[query] = &dbResult{result: result, err: err} +} + +func (dc *fakeDBClient) setResultRE(query string, result *sqltypes.Result, err error) { + dc.queriesRE[regexp.MustCompile(query)] = &dbResult{result: result, err: err} +} + +// DBName is part of the DBClient interface +func (dc *fakeDBClient) DBName() string { + return "db" +} + +// Connect is part of the DBClient interface +func (dc *fakeDBClient) Connect() error { + return nil +} + +// Begin is part of the DBClient interface +func (dc *fakeDBClient) Begin() error { + return nil +} + +// Commit is part of the DBClient interface +func (dc *fakeDBClient) Commit() error { + return nil +} + +// Rollback is part of the DBClient interface +func (dc *fakeDBClient) Rollback() error { + return nil +} + +// Close is part of the DBClient interface +func (dc *fakeDBClient) Close() { +} + +// ExecuteFetch is part of the DBClient interface +func (dc *fakeDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Result, err error) { + if dbr := dc.queries[query]; dbr != nil { + return dbr.result, dbr.err + } + for re, dbr := range dc.queriesRE { + if re.MatchString(query) { + return dbr.result, dbr.err + } + } + return nil, fmt.Errorf("unexpected query: %s", query) +} diff --git a/go/vt/wrangler/fake_tablet_test.go b/go/vt/wrangler/fake_tablet_test.go new file mode 100644 index 00000000000..ba119556685 --- /dev/null +++ b/go/vt/wrangler/fake_tablet_test.go @@ -0,0 +1,228 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "fmt" + "net" + "net/http" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vttablet/grpctmserver" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + "vitess.io/vitess/go/vt/vttablet/tabletmanager" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + // import the gRPC client implementation for tablet manager + _ "vitess.io/vitess/go/vt/vttablet/grpctmclient" + + // import the gRPC client implementation for query service + _ "vitess.io/vitess/go/vt/vttablet/grpctabletconn" +) + +// This file was copied from testlib. All tests from testlib should be moved +// to the current directory. In order to move tests from there, we have to +// remove the circular dependency it causes (through vtctl dependence). +// The tests in this diectory call wrangler functions directory. So, there's +// no circular dependency. + +// This file contains utility methods for unit tests. +// We allow the creation of fake tablets, and running their event loop based +// on a FakeMysqlDaemon. + +// FakeTablet keeps track of a fake tablet in memory. It has: +// - a Tablet record (used for creating the tablet, kept for user's information) +// - a FakeMysqlDaemon (used by the fake event loop) +// - a 'done' channel (used to terminate the fake event loop) +type FakeTablet struct { + // Tablet and FakeMysqlDaemon are populated at NewFakeTablet time. + // We also create the RPCServer, so users can register more services + // before calling StartActionLoop(). + Tablet *topodatapb.Tablet + FakeMysqlDaemon *fakemysqldaemon.FakeMysqlDaemon + RPCServer *grpc.Server + + // The following fields are created when we start the event loop for + // the tablet, and closed / cleared when we stop it. + // The Listener is used by the gRPC server. + Agent *tabletmanager.ActionAgent + Listener net.Listener + + // These optional fields are used if the tablet also needs to + // listen on the 'vt' port. + StartHTTPServer bool + HTTPListener net.Listener + HTTPServer *http.Server +} + +// TabletOption is an interface for changing tablet parameters. +// It's a way to pass multiple parameters to NewFakeTablet without +// making it too cumbersome. +type TabletOption func(tablet *topodatapb.Tablet) + +// TabletKeyspaceShard is the option to set the tablet keyspace and shard +func TabletKeyspaceShard(t *testing.T, keyspace, shard string) TabletOption { + return func(tablet *topodatapb.Tablet) { + tablet.Keyspace = keyspace + shard, kr, err := topo.ValidateShardName(shard) + if err != nil { + t.Fatalf("cannot ValidateShardName value %v", shard) + } + tablet.Shard = shard + tablet.KeyRange = kr + } +} + +// NewFakeTablet creates the test tablet in the topology. 'uid' +// has to be between 0 and 99. All the tablet info will be derived +// from that. Look at the implementation if you need values. +// Use TabletOption implementations if you need to change values at creation. +// 'db' can be nil if the test doesn't use a database at all. +func NewFakeTablet(t *testing.T, wr *Wrangler, cell string, uid uint32, tabletType topodatapb.TabletType, db *fakesqldb.DB, options ...TabletOption) *FakeTablet { + if uid > 99 { + t.Fatalf("uid has to be between 0 and 99: %v", uid) + } + mysqlPort := int32(3300 + uid) + hostname := fmt.Sprintf("%v.%d", cell, uid) + tablet := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: cell, Uid: uid}, + Hostname: hostname, + MysqlHostname: hostname, + PortMap: map[string]int32{ + "vt": int32(8100 + uid), + "grpc": int32(8200 + uid), + }, + Keyspace: "test_keyspace", + Shard: "0", + Type: tabletType, + } + topoproto.SetMysqlPort(tablet, mysqlPort) + for _, option := range options { + option(tablet) + } + if err := wr.InitTablet(context.Background(), tablet, false /* allowMasterOverride */, true /* createShardAndKeyspace */, false /* allowUpdate */); err != nil { + t.Fatalf("cannot create tablet %v: %v", uid, err) + } + + // create a FakeMysqlDaemon with the right information by default + fakeMysqlDaemon := fakemysqldaemon.NewFakeMysqlDaemon(db) + fakeMysqlDaemon.MysqlPort = mysqlPort + + return &FakeTablet{ + Tablet: tablet, + FakeMysqlDaemon: fakeMysqlDaemon, + RPCServer: grpc.NewServer(), + } +} + +// StartActionLoop will start the action loop for a fake tablet, +// using ft.FakeMysqlDaemon as the backing mysqld. +func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { + if ft.Agent != nil { + t.Fatalf("Agent for %v is already running", ft.Tablet.Alias) + } + + // Listen on a random port for gRPC. + var err error + ft.Listener, err = net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Cannot listen: %v", err) + } + gRPCPort := int32(ft.Listener.Addr().(*net.TCPAddr).Port) + + // If needed, listen on a random port for HTTP. + vtPort := ft.Tablet.PortMap["vt"] + if ft.StartHTTPServer { + ft.HTTPListener, err = net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Cannot listen on http port: %v", err) + } + handler := http.NewServeMux() + ft.HTTPServer = &http.Server{ + Handler: handler, + } + go ft.HTTPServer.Serve(ft.HTTPListener) + vtPort = int32(ft.HTTPListener.Addr().(*net.TCPAddr).Port) + } + + // Create a test agent on that port, and re-read the record + // (it has new ports and IP). + ft.Agent = tabletmanager.NewTestActionAgent(context.Background(), wr.TopoServer(), ft.Tablet.Alias, vtPort, gRPCPort, ft.FakeMysqlDaemon, nil) + ft.Tablet = ft.Agent.Tablet() + + // Register the gRPC server, and starts listening. + grpctmserver.RegisterForTest(ft.RPCServer, ft.Agent) + go ft.RPCServer.Serve(ft.Listener) + + // And wait for it to serve, so we don't start using it before it's + // ready. + timeout := 5 * time.Second + step := 10 * time.Millisecond + c := tmclient.NewTabletManagerClient() + for timeout >= 0 { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + err := c.Ping(ctx, ft.Agent.Tablet()) + cancel() + if err == nil { + break + } + time.Sleep(step) + timeout -= step + } + if timeout < 0 { + panic("StartActionLoop failed.") + } +} + +// StopActionLoop will stop the Action Loop for the given FakeTablet +func (ft *FakeTablet) StopActionLoop(t *testing.T) { + if ft.Agent == nil { + t.Fatalf("Agent for %v is not running", ft.Tablet.Alias) + } + if ft.StartHTTPServer { + ft.HTTPListener.Close() + } + ft.Listener.Close() + ft.Agent.Stop() + ft.Agent = nil + ft.Listener = nil + ft.HTTPListener = nil +} + +// Target returns the keyspace/shard/type info of this tablet as Target. +func (ft *FakeTablet) Target() querypb.Target { + return querypb.Target{ + Keyspace: ft.Tablet.Keyspace, + Shard: ft.Tablet.Shard, + TabletType: ft.Tablet.Type, + } +} + +func init() { + // enforce we will use the right protocol (gRPC) in all unit tests + *tmclient.TabletManagerProtocol = "grpc" + *tabletconn.TabletProtocol = "grpc" +} diff --git a/go/vt/wrangler/migrate.go b/go/vt/wrangler/migrate.go index e59436ba6ab..b080a7bcc46 100644 --- a/go/vt/wrangler/migrate.go +++ b/go/vt/wrangler/migrate.go @@ -32,6 +32,7 @@ import ( "vitess.io/vitess/go/sync2" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/key" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" @@ -89,8 +90,11 @@ type miSource struct { journaled bool } -// MigrateWrites is a generic way of migrating write traffic for a resharding workflow. +// MigrateReads is a generic way of migrating read traffic for a resharding workflow. func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType MigrationType, streams map[topo.KeyspaceShard][]uint32, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { + if servedType != topodatapb.TabletType_REPLICA && servedType != topodatapb.TabletType_RDONLY { + return fmt.Errorf("tablet type must be REPLICA or RDONLY: %v", servedType) + } mi, err := wr.buildMigrater(ctx, migrationType, streams) if err != nil { return err @@ -98,6 +102,13 @@ func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType MigrationTyp if err := mi.validate(ctx); err != nil { return err } + + ctx, unlock, lockErr := wr.ts.LockKeyspace(ctx, mi.sourceKeyspace, "MigrateReads") + if lockErr != nil { + return lockErr + } + defer unlock(&err) + if mi.migrationType == MigrateTables { return mi.migrateTableReads(ctx, cells, servedType, direction) } @@ -113,6 +124,16 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType MigrationTy if err := mi.validate(ctx); err != nil { return err } + if err := mi.validateForWrite(ctx); err != nil { + return err + } + + ctx, unlock, lockErr := wr.ts.LockKeyspace(ctx, mi.sourceKeyspace, "MigrateWrites") + if lockErr != nil { + return lockErr + } + defer unlock(&err) + journalsExist, err := mi.checkJournals(ctx) if err != nil { return err @@ -182,48 +203,50 @@ func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType MigrationTy if len(qr.Rows) < 1 || len(qr.Rows[0]) < 1 { return nil, fmt.Errorf("VReplication stream %d not found for %s:%s", int(uid), targetks.Keyspace, targetks.Shard) } - str := qr.Rows[0][0].ToString() - var binlogSource binlogdatapb.BinlogSource - if err := proto.UnmarshalText(str, &binlogSource); err != nil { - return nil, err - } - mi.targets[targetks].sources[uid] = &binlogSource - - sourceks := topo.KeyspaceShard{Keyspace: binlogSource.Keyspace, Shard: binlogSource.Shard} - if _, ok := mi.sources[sourceks]; !ok { - sourceShard, err := mi.wr.ts.GetShard(ctx, binlogSource.Keyspace, binlogSource.Shard) - if err != nil { - return nil, err - } - sourceMaster, err := mi.wr.ts.GetTablet(ctx, sourceShard.MasterAlias) - if err != nil { + for _, row := range qr.Rows { + str := row[0].ToString() + var binlogSource binlogdatapb.BinlogSource + if err := proto.UnmarshalText(str, &binlogSource); err != nil { return nil, err } - mi.sources[sourceks] = &miSource{ - shard: sourceShard, - master: sourceMaster, - } + mi.targets[targetks].sources[uid] = &binlogSource - if mi.tables == nil { - for _, rule := range binlogSource.Filter.Rules { - mi.tables = append(mi.tables, rule.Match) + sourceks := topo.KeyspaceShard{Keyspace: binlogSource.Keyspace, Shard: binlogSource.Shard} + if _, ok := mi.sources[sourceks]; !ok { + sourceShard, err := mi.wr.ts.GetShard(ctx, binlogSource.Keyspace, binlogSource.Shard) + if err != nil { + return nil, err } - sort.Strings(mi.tables) - } else { - var tables []string - for _, rule := range binlogSource.Filter.Rules { - tables = append(tables, rule.Match) + sourceMaster, err := mi.wr.ts.GetTablet(ctx, sourceShard.MasterAlias) + if err != nil { + return nil, err } - sort.Strings(tables) - if !reflect.DeepEqual(mi.tables, tables) { - return nil, fmt.Errorf("table lists are mismatched across streams: %v vs %v", mi.tables, tables) + mi.sources[sourceks] = &miSource{ + shard: sourceShard, + master: sourceMaster, + } + + if mi.tables == nil { + for _, rule := range binlogSource.Filter.Rules { + mi.tables = append(mi.tables, rule.Match) + } + sort.Strings(mi.tables) + } else { + var tables []string + for _, rule := range binlogSource.Filter.Rules { + tables = append(tables, rule.Match) + } + sort.Strings(tables) + if !reflect.DeepEqual(mi.tables, tables) { + return nil, fmt.Errorf("table lists are mismatched across streams: %v vs %v", mi.tables, tables) + } } - } - if mi.sourceKeyspace == "" { - mi.sourceKeyspace = sourceks.Keyspace - } else if mi.sourceKeyspace != sourceks.Keyspace { - return nil, fmt.Errorf("source keyspaces are mismatched across streams: %v vs %v", mi.sourceKeyspace, sourceks.Keyspace) + if mi.sourceKeyspace == "" { + mi.sourceKeyspace = sourceks.Keyspace + } else if mi.sourceKeyspace != sourceks.Keyspace { + return nil, fmt.Errorf("source keyspaces are mismatched across streams: %v vs %v", mi.sourceKeyspace, sourceks.Keyspace) + } } } } @@ -261,19 +284,26 @@ func (mi *migrater) validate(ctx context.Context) error { } } if mi.migrationType == MigrateTables { - // For table migration, all shards must be present. + // All shards must be present. if err := mi.compareShards(ctx, mi.sourceKeyspace, mi.sourceShards()); err != nil { return err } if err := mi.compareShards(ctx, mi.targetKeyspace, mi.targetShards()); err != nil { return err } - } else { - // For shard migration, source and target keyspace must match, and source and target shards must not match. + // Wildcard table names not allowed. + for _, table := range mi.tables { + if strings.HasPrefix(table, "/") { + return fmt.Errorf("cannot migrate streams with wild card table names: %v", table) + } + } + } else { // MigrateShards + // Source and target keyspace must match if mi.sourceKeyspace != mi.targetKeyspace { return fmt.Errorf("source and target keyspace must match: %v vs %v", mi.sourceKeyspace, mi.targetKeyspace) } - for sourceks, _ := range mi.sources { + // Source and target shards must not match. + for sourceks := range mi.sources { if _, ok := mi.targets[sourceks]; ok { return fmt.Errorf("target shard matches a source shard: %v", sourceks) } @@ -282,6 +312,61 @@ func (mi *migrater) validate(ctx context.Context) error { return nil } +func (mi *migrater) validateForWrite(ctx context.Context) error { + if mi.migrationType == MigrateTables { + return mi.validateTableForWrite(ctx) + } + return mi.validateShardForWrite(ctx) +} + +func (mi *migrater) validateTableForWrite(ctx context.Context) error { + rules, err := mi.wr.getRoutingRules(ctx) + if err != nil { + return err + } + for _, table := range mi.tables { + for _, tabletType := range []topodatapb.TabletType{topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY} { + tt := strings.ToLower(tabletType.String()) + if rules[table+"@"+tt] == nil || rules[mi.targetKeyspace+"."+table+"@"+tt] == nil { + return fmt.Errorf("missing tablet type specific routing, read-only traffic must be migrated before migrating writes: %v", table) + } + } + } + return nil +} + +func (mi *migrater) validateShardForWrite(ctx context.Context) error { + srvKeyspaces, err := mi.wr.ts.GetSrvKeyspaceAllCells(ctx, mi.sourceKeyspace) + if err != nil { + return err + } + + // Checking one shard is enough. + var si *topo.ShardInfo + for _, source := range mi.sources { + si = source.shard + break + } + + for _, srvKeyspace := range srvKeyspaces { + var shardServedTypes []string + for _, partition := range srvKeyspace.GetPartitions() { + if partition.GetServedType() == topodatapb.TabletType_MASTER { + continue + } + for _, shardReference := range partition.GetShardReferences() { + if key.KeyRangeEqual(shardReference.GetKeyRange(), si.GetKeyRange()) { + shardServedTypes = append(shardServedTypes, partition.GetServedType().String()) + } + } + } + if len(shardServedTypes) > 0 { + return fmt.Errorf("cannot migrate MASTER away from %v/%v until everything else is migrated. Make sure that the following types are migrated first: %v", si.Keyspace(), si.ShardName(), strings.Join(shardServedTypes, ", ")) + } + } + return nil +} + func (mi *migrater) compareShards(ctx context.Context, keyspace string, sis []*topo.ShardInfo) error { var shards []string for _, si := range sis { @@ -304,20 +389,17 @@ func (mi *migrater) migrateTableReads(ctx context.Context, cells []string, serve if err != nil { return err } - var fromKeyspace, toKeyspace string - if direction == directionForward { - fromKeyspace, toKeyspace = mi.sourceKeyspace, mi.targetKeyspace - } else { - fromKeyspace, toKeyspace = mi.targetKeyspace, mi.sourceKeyspace - } + // We assume that the following rules were setup when the targets were created: + // table -> sourceKeyspace.table + // targetKeyspace.table -> sourceKeyspace.table tt := strings.ToLower(servedType.String()) for _, table := range mi.tables { - for _, fromt := range []string{ - table + "@" + tt, - fromKeyspace + "." + table + "@" + tt, - toKeyspace + "." + table + "@" + tt, - } { - rules[fromt] = []string{toKeyspace + "." + table} + if direction == directionForward { + rules[table+"@"+tt] = []string{mi.targetKeyspace + "." + table} + rules[mi.targetKeyspace+"."+table+"@"+tt] = []string{mi.targetKeyspace + "." + table} + } else { + delete(rules, table+"@"+tt) + delete(rules, mi.targetKeyspace+"."+table+"@"+tt) } } if err := mi.wr.saveRoutingRules(ctx, rules); err != nil { @@ -460,12 +542,12 @@ func (mi *migrater) createJournals(ctx context.Context) error { for _, tsource := range target.sources { participantMap[topo.KeyspaceShard{Keyspace: tsource.Keyspace, Shard: tsource.Shard}] = true } - for ks := range participantMap { - journal.Participants = append(journal.Participants, &binlogdatapb.KeyspaceShard{ - Keyspace: ks.Keyspace, - Shard: ks.Shard, - }) - } + } + for ks := range participantMap { + journal.Participants = append(journal.Participants, &binlogdatapb.KeyspaceShard{ + Keyspace: ks.Keyspace, + Shard: ks.Shard, + }) } statement := fmt.Sprintf("insert into _vt.resharding_journal "+ "(id, db_name, val) "+ @@ -556,11 +638,19 @@ func (mi *migrater) changeTableRouting(ctx context.Context) error { if err != nil { return err } + // We assume that the following rules were setup when the targets were created: + // table -> sourceKeyspace.table + // targetKeyspace.table -> sourceKeyspace.table + // Additionally, MigrateReads would have added rules like this: + // table@replica -> targetKeyspace.table + // targetKeyspace.table@replica -> targetKeyspace.table + // After this step, only the following rules will be left: + // table -> targetKeyspace.table + // sourceKeyspace.table -> targetKeyspace.table for _, table := range mi.tables { for _, tabletType := range []topodatapb.TabletType{topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY} { tt := strings.ToLower(tabletType.String()) delete(rules, table+"@"+tt) - delete(rules, mi.sourceKeyspace+"."+table+"@"+tt) delete(rules, mi.targetKeyspace+"."+table+"@"+tt) } delete(rules, mi.targetKeyspace+"."+table) diff --git a/go/vt/wrangler/migrate_test.go b/go/vt/wrangler/migrate_test.go new file mode 100644 index 00000000000..8f11148f1a3 --- /dev/null +++ b/go/vt/wrangler/migrate_test.go @@ -0,0 +1,369 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "fmt" + "strings" + "testing" + "time" + + "golang.org/x/net/context" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/logutil" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/vt/vttablet/tmclient" +) + +func TestShardMigrateReads(t *testing.T) { + ctx := context.Background() + ts := memorytopo.NewServer("cell1", "cell2") + wr := New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + + // Create cluster with "ks" as keyspace. -40,40- as serving, -80,80- as non-serving. + source1Master := NewFakeTablet(t, wr, "cell1", 10, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "-40")) + source1Replica := NewFakeTablet(t, wr, "cell1", 11, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "-40")) + source1Rdonly := NewFakeTablet(t, wr, "cell1", 12, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "-40")) + + source2Master := NewFakeTablet(t, wr, "cell1", 20, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "40-")) + source2Replica := NewFakeTablet(t, wr, "cell1", 21, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "40-")) + source22Rdonly := NewFakeTablet(t, wr, "cell1", 22, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "40-")) + + dest1Master := NewFakeTablet(t, wr, "cell1", 30, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "-80")) + dest1Replica := NewFakeTablet(t, wr, "cell1", 31, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "-80")) + dest1Rdonly := NewFakeTablet(t, wr, "cell1", 32, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "-80")) + + dest2Master := NewFakeTablet(t, wr, "cell1", 40, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "80-")) + dest2Replica := NewFakeTablet(t, wr, "cell1", 41, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "80-")) + dest2Rdonly := NewFakeTablet(t, wr, "cell1", 42, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "80-")) + + vs := &vschemapb.Keyspace{Sharded: true} + if err := wr.ts.SaveVSchema(ctx, "ks", vs); err != nil { + t.Fatal(err) + } + if err := wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { + t.Fatal(err) + } + + err := topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), ts, "ks", []string{"cell1"}) + if err != nil { + t.Fatalf("RebuildKeyspaceLocked failed: %v", err) + } + checkShardServedTypes(t, ts, "-40", 3) + checkShardServedTypes(t, ts, "40-", 3) + checkShardServedTypes(t, ts, "-80", 0) + checkShardServedTypes(t, ts, "80-", 0) + + source1Replica.StartActionLoop(t, wr) + defer source1Replica.StopActionLoop(t) + source1Rdonly.StartActionLoop(t, wr) + defer source1Rdonly.StopActionLoop(t) + source1Master.StartActionLoop(t, wr) + defer source1Master.StopActionLoop(t) + + source2Replica.StartActionLoop(t, wr) + defer source2Replica.StopActionLoop(t) + source22Rdonly.StartActionLoop(t, wr) + defer source22Rdonly.StopActionLoop(t) + source2Master.StartActionLoop(t, wr) + defer source2Master.StopActionLoop(t) + + dest1Replica.StartActionLoop(t, wr) + defer dest1Replica.StopActionLoop(t) + dest1Rdonly.StartActionLoop(t, wr) + defer dest1Rdonly.StopActionLoop(t) + dest1Master.StartActionLoop(t, wr) + defer dest1Master.StopActionLoop(t) + + dest2Replica.StartActionLoop(t, wr) + defer dest2Replica.StopActionLoop(t) + dest2Rdonly.StartActionLoop(t, wr) + defer dest2Rdonly.StopActionLoop(t) + dest2Master.StartActionLoop(t, wr) + defer dest2Master.StopActionLoop(t) + + // Override with a fake VREngine after Agent is initialized in action loop. + dbDest1Client := newFakeDBClient() + dbClientFactory1 := func() binlogplayer.DBClient { return dbDest1Client } + dest1Master.Agent.VREngine = vreplication.NewEngine(ts, "", dest1Master.FakeMysqlDaemon, dbClientFactory1, dbDest1Client.DBName()) + dbDest1Client.setResult("use _vt", &sqltypes.Result{}, nil) + dbDest1Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) + if err := dest1Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + dbDest2Client := newFakeDBClient() + dbClientFactory2 := func() binlogplayer.DBClient { return dbDest2Client } + dest2Master.Agent.VREngine = vreplication.NewEngine(ts, "", dest2Master.FakeMysqlDaemon, dbClientFactory2, dbDest2Client.DBName()) + dbDest2Client.setResult("use _vt", &sqltypes.Result{}, nil) + dbDest2Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) + if err := dest2Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + dbSource1Client := newFakeDBClient() + dbClientFactory3 := func() binlogplayer.DBClient { return dbSource1Client } + source1Master.Agent.VREngine = vreplication.NewEngine(ts, "", source1Master.FakeMysqlDaemon, dbClientFactory3, dbSource1Client.DBName()) + dbSource1Client.setResult("use _vt", &sqltypes.Result{}, nil) + dbSource1Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) + if err := source1Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + dbSource2Client := newFakeDBClient() + dbClientFactory4 := func() binlogplayer.DBClient { return dbSource2Client } + source2Master.Agent.VREngine = vreplication.NewEngine(ts, "", source2Master.FakeMysqlDaemon, dbClientFactory4, dbSource2Client.DBName()) + dbSource2Client.setResult("use _vt", &sqltypes.Result{}, nil) + dbSource2Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) + if err := source2Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + bls1 := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "-80", + }}, + }, + } + dbDest1Client.setResult("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "source", + "varchar", + ), + fmt.Sprintf("%v", bls1), + ), nil) + bls2 := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "-80", + }}, + }, + } + dbDest1Client.setResult("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "source", + "varchar", + ), + fmt.Sprintf("%v", bls2), + ), nil) + bls3 := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "80-", + }}, + }, + } + dbDest2Client.setResult("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "source", + "varchar", + ), + fmt.Sprintf("%v", bls3), + ), nil) + bls4 := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "80-", + }}, + }, + } + dbDest2Client.setResult("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "source", + "varchar", + ), + fmt.Sprintf("%v", bls4), + ), nil) + + streams := map[topo.KeyspaceShard][]uint32{ + {Keyspace: "ks", Shard: "-80"}: {1, 2}, + {Keyspace: "ks", Shard: "80-"}: {1, 2}, + } + + err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_RDONLY, directionForward) + if err != nil { + t.Fatal(err) + } + checkShardServedTypes(t, ts, "-40", 2) + checkShardServedTypes(t, ts, "40-", 2) + checkShardServedTypes(t, ts, "-80", 1) + checkShardServedTypes(t, ts, "80-", 1) + + err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_REPLICA, directionForward) + if err != nil { + t.Fatal(err) + } + checkShardServedTypes(t, ts, "-40", 1) + checkShardServedTypes(t, ts, "40-", 1) + checkShardServedTypes(t, ts, "-80", 2) + checkShardServedTypes(t, ts, "80-", 2) + + err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_RDONLY, directionBackward) + if err != nil { + t.Fatal(err) + } + checkShardServedTypes(t, ts, "-40", 2) + checkShardServedTypes(t, ts, "40-", 2) + checkShardServedTypes(t, ts, "-80", 1) + checkShardServedTypes(t, ts, "80-", 1) + + err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_MASTER, directionForward) + want := "tablet type must be REPLICA or RDONLY: MASTER" + if err == nil || err.Error() != want { + t.Errorf("MigrateReads(master) err: %v, want %v", err, want) + } + + err = wr.MigrateWrites(ctx, MigrateShards, streams, 1*time.Second) + want = "cannot migrate MASTER away" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites err: %v, want %v", err, want) + } + + err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_RDONLY, directionForward) + if err != nil { + t.Fatal(err) + } + + source1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + source2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + dest1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } + dest2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } + + // Check for journals. + dbSource1Client.setResult("select 1 from _vt.resharding_journal where id = 4076872238118445101", &sqltypes.Result{}, nil) + dbSource2Client.setResult("select 1 from _vt.resharding_journal where id = 4076872238118445101", &sqltypes.Result{}, nil) + + // Wait for position: Reads current state, updates to Stopped, and re-reads. + state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "pos|state|message", + "varchar|varchar|varchar"), + "MariaDB/5-456-892|Running|", + ) + dbDest1Client.setResult("select pos, state, message from _vt.vreplication where id=1", state, nil) + dbDest2Client.setResult("select pos, state, message from _vt.vreplication where id=1", state, nil) + dbDest1Client.setResult("select pos, state, message from _vt.vreplication where id=2", state, nil) + dbDest2Client.setResult("select pos, state, message from _vt.vreplication where id=2", state, nil) + dbDest1Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + dbDest2Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + dbDest1Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + dbDest2Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + dbDest1Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) + dbDest2Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) + dbDest1Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) + dbDest2Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) + + // Create journals. + journal := "insert into _vt.resharding_journal.*4076872238118445101.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" + dbSource1Client.setResultRE(journal, &sqltypes.Result{}, nil) + dbSource2Client.setResultRE(journal, &sqltypes.Result{}, nil) + + // Create reverse replicaions. + dbSource1Client.setResultRE("insert into _vt.vreplication.*-80.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + dbSource1Client.setResultRE("insert into _vt.vreplication.*80-.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + dbSource2Client.setResultRE("insert into _vt.vreplication.*-80.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + dbSource2Client.setResultRE("insert into _vt.vreplication.*80-.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + dbSource1Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) + dbSource2Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) + dbSource1Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) + dbSource2Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) + + // Delete the target replications. + dbDest1Client.setResult("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + dbDest2Client.setResult("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + dbDest1Client.setResult("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + dbDest2Client.setResult("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + err = wr.MigrateWrites(ctx, MigrateShards, streams, 1*time.Second) + if err != nil { + t.Fatal(err) + } +} + +func checkShardServedTypes(t *testing.T, ts *topo.Server, shard string, expected int) { + t.Helper() + ctx := context.Background() + si, err := ts.GetShard(ctx, "ks", shard) + if err != nil { + t.Fatalf("GetShard failed: %v", err) + } + + servedTypes, err := ts.GetShardServingTypes(ctx, si) + if err != nil { + t.Fatalf("GetShard failed: %v", err) + } + + if len(servedTypes) != expected { + t.Errorf("shard %v has wrong served types: got: %v, expected: %v", shard, len(servedTypes), expected) + } +} From 82cfaf115fee83385f271bd3e81b02a41004d6e9 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sat, 6 Jul 2019 09:15:23 -0700 Subject: [PATCH 04/17] migrater: quick cleanup Signed-off-by: Sugu Sougoumarane --- go/vt/binlog/binlogplayer/mock_dbclient.go | 31 +++++-------------- ...fake_dbclient.go => fake_dbclient_test.go} | 0 go/vt/wrangler/{migrate.go => migrater.go} | 0 .../{migrate_test.go => migrater_test.go} | 0 4 files changed, 7 insertions(+), 24 deletions(-) rename go/vt/wrangler/{fake_dbclient.go => fake_dbclient_test.go} (100%) rename go/vt/wrangler/{migrate.go => migrater.go} (100%) rename go/vt/wrangler/{migrate_test.go => migrater_test.go} (100%) diff --git a/go/vt/binlog/binlogplayer/mock_dbclient.go b/go/vt/binlog/binlogplayer/mock_dbclient.go index ecf1d7a299b..fd606c7d482 100644 --- a/go/vt/binlog/binlogplayer/mock_dbclient.go +++ b/go/vt/binlog/binlogplayer/mock_dbclient.go @@ -17,7 +17,6 @@ limitations under the License. package binlogplayer import ( - "fmt" "regexp" "testing" "time" @@ -85,17 +84,12 @@ func (dc *MockDBClient) ExpectRequestRE(queryRE string, result *sqltypes.Result, // dc.t.Fatalf is executed on 1 second timeout. Wait should // not be called concurrently with ExpectRequest. func (dc *MockDBClient) Wait() { - if dc.t != nil { - dc.t.Helper() - } + dc.t.Helper() select { case <-dc.done: return case <-time.After(5 * time.Second): - if dc.t != nil { - dc.t.Fatalf("timeout waiting for requests, want: %v", dc.expect[dc.currentResult].query) - } - panic("timeout") + dc.t.Fatalf("timeout waiting for requests, want: %v", dc.expect[dc.currentResult].query) } } @@ -133,30 +127,19 @@ func (dc *MockDBClient) Close() { // ExecuteFetch is part of the DBClient interface func (dc *MockDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Result, err error) { - if dc.t != nil { - dc.t.Helper() - dc.t.Logf("DBClient query: %v", query) - } + dc.t.Helper() + dc.t.Logf("DBClient query: %v", query) if dc.currentResult >= len(dc.expect) { - if dc.t != nil { - dc.t.Fatalf("DBClientMock: query: %s, no more requests are expected", query) - } - return nil, fmt.Errorf("DBClientMock: query: %s, no more requests are expected", query) + dc.t.Fatalf("DBClientMock: query: %s, no more requests are expected", query) } result := dc.expect[dc.currentResult] if result.re == nil { if query != result.query { - if dc.t != nil { - dc.t.Fatalf("DBClientMock: query: %s, want %s", query, result.query) - } - return nil, fmt.Errorf("DBClientMock: query: %s, want %s", query, result.query) + dc.t.Fatalf("DBClientMock: query: %s, want %s", query, result.query) } } else { if !result.re.MatchString(query) { - if dc.t != nil { - dc.t.Fatalf("DBClientMock: query: %s, must match %s", query, result.query) - } - return nil, fmt.Errorf("DBClientMock: query: %s, must match %s", query, result.query) + dc.t.Fatalf("DBClientMock: query: %s, must match %s", query, result.query) } } dc.currentResult++ diff --git a/go/vt/wrangler/fake_dbclient.go b/go/vt/wrangler/fake_dbclient_test.go similarity index 100% rename from go/vt/wrangler/fake_dbclient.go rename to go/vt/wrangler/fake_dbclient_test.go diff --git a/go/vt/wrangler/migrate.go b/go/vt/wrangler/migrater.go similarity index 100% rename from go/vt/wrangler/migrate.go rename to go/vt/wrangler/migrater.go diff --git a/go/vt/wrangler/migrate_test.go b/go/vt/wrangler/migrater_test.go similarity index 100% rename from go/vt/wrangler/migrate_test.go rename to go/vt/wrangler/migrater_test.go From fa295dec75e847a9eb293d6d41ef8b4c83e7cf94 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sat, 6 Jul 2019 20:31:12 -0700 Subject: [PATCH 05/17] migrater: more tests Signed-off-by: Sugu Sougoumarane --- go/vt/wrangler/migrater.go | 15 +- go/vt/wrangler/migrater_test.go | 680 ++++++++++++++++++++++++++++---- 2 files changed, 620 insertions(+), 75 deletions(-) diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go index b080a7bcc46..7878b34cd51 100644 --- a/go/vt/wrangler/migrater.go +++ b/go/vt/wrangler/migrater.go @@ -128,11 +128,19 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType MigrationTy return err } - ctx, unlock, lockErr := wr.ts.LockKeyspace(ctx, mi.sourceKeyspace, "MigrateWrites") + ctx, sourceUnlock, lockErr := wr.ts.LockKeyspace(ctx, mi.sourceKeyspace, "MigrateWrites") if lockErr != nil { return lockErr } - defer unlock(&err) + defer sourceUnlock(&err) + if mi.targetKeyspace != mi.sourceKeyspace { + tctx, targetUnlock, lockErr := wr.ts.LockKeyspace(ctx, mi.targetKeyspace, "MigrateWrites") + if lockErr != nil { + return lockErr + } + ctx = tctx + defer targetUnlock(&err) + } journalsExist, err := mi.checkJournals(ctx) if err != nil { @@ -524,7 +532,7 @@ func (mi *migrater) createJournals(ctx context.Context) error { } participantMap := make(map[topo.KeyspaceShard]bool) for targetks, target := range mi.targets { - found := true + found := false for _, tsource := range target.sources { if sourceks == (topo.KeyspaceShard{Keyspace: tsource.Keyspace, Shard: tsource.Shard}) { found = true @@ -655,7 +663,6 @@ func (mi *migrater) changeTableRouting(ctx context.Context) error { } delete(rules, mi.targetKeyspace+"."+table) rules[table] = []string{mi.targetKeyspace + "." + table} - rules[mi.sourceKeyspace+"."+table] = []string{mi.targetKeyspace + "." + table} } if err := mi.wr.saveRoutingRules(ctx, rules); err != nil { return err diff --git a/go/vt/wrangler/migrater_test.go b/go/vt/wrangler/migrater_test.go index 8f11148f1a3..392ba27404e 100644 --- a/go/vt/wrangler/migrater_test.go +++ b/go/vt/wrangler/migrater_test.go @@ -18,6 +18,7 @@ package wrangler import ( "fmt" + "reflect" "strings" "testing" "time" @@ -37,7 +38,428 @@ import ( "vitess.io/vitess/go/vt/vttablet/tmclient" ) -func TestShardMigrateReads(t *testing.T) { +func TestTableMigrate(t *testing.T) { + ctx := context.Background() + ts := memorytopo.NewServer("cell1", "cell2") + wr := New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + + // Create cluster: ks1:-40,40- and ks2:-80,80-. + source1Master := NewFakeTablet(t, wr, "cell1", 10, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks1", "-40")) + source1Replica := NewFakeTablet(t, wr, "cell1", 11, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks1", "-40")) + source1Rdonly := NewFakeTablet(t, wr, "cell1", 12, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks1", "-40")) + + source2Master := NewFakeTablet(t, wr, "cell1", 20, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks1", "40-")) + source2Replica := NewFakeTablet(t, wr, "cell1", 21, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks1", "40-")) + source22Rdonly := NewFakeTablet(t, wr, "cell1", 22, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks1", "40-")) + + dest1Master := NewFakeTablet(t, wr, "cell1", 30, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks2", "-80")) + dest1Replica := NewFakeTablet(t, wr, "cell1", 31, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks2", "-80")) + dest1Rdonly := NewFakeTablet(t, wr, "cell1", 32, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks2", "-80")) + + dest2Master := NewFakeTablet(t, wr, "cell1", 40, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks2", "80-")) + dest2Replica := NewFakeTablet(t, wr, "cell1", 41, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks2", "80-")) + dest2Rdonly := NewFakeTablet(t, wr, "cell1", 42, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks2", "80-")) + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "hash", + }}, + }, + "t2": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "hash", + }}, + }, + }, + } + if err := wr.ts.SaveVSchema(ctx, "ks1", vs); err != nil { + t.Fatal(err) + } + if err := wr.ts.SaveVSchema(ctx, "ks2", vs); err != nil { + t.Fatal(err) + } + if err := wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { + t.Fatal(err) + } + err := topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), ts, "ks1", []string{"cell1"}) + if err != nil { + t.Fatal(err) + } + err = topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), ts, "ks2", []string{"cell1"}) + if err != nil { + t.Fatal(err) + } + checkServedTypes(t, ts, "ks1:-40", 3) + checkServedTypes(t, ts, "ks1:40-", 3) + checkServedTypes(t, ts, "ks2:-80", 3) + checkServedTypes(t, ts, "ks2:80-", 3) + + source1Replica.StartActionLoop(t, wr) + defer source1Replica.StopActionLoop(t) + source1Rdonly.StartActionLoop(t, wr) + defer source1Rdonly.StopActionLoop(t) + source1Master.StartActionLoop(t, wr) + defer source1Master.StopActionLoop(t) + + source2Replica.StartActionLoop(t, wr) + defer source2Replica.StopActionLoop(t) + source22Rdonly.StartActionLoop(t, wr) + defer source22Rdonly.StopActionLoop(t) + source2Master.StartActionLoop(t, wr) + defer source2Master.StopActionLoop(t) + + dest1Replica.StartActionLoop(t, wr) + defer dest1Replica.StopActionLoop(t) + dest1Rdonly.StartActionLoop(t, wr) + defer dest1Rdonly.StopActionLoop(t) + dest1Master.StartActionLoop(t, wr) + defer dest1Master.StopActionLoop(t) + + dest2Replica.StartActionLoop(t, wr) + defer dest2Replica.StopActionLoop(t) + dest2Rdonly.StartActionLoop(t, wr) + defer dest2Rdonly.StopActionLoop(t) + dest2Master.StartActionLoop(t, wr) + defer dest2Master.StopActionLoop(t) + + // Override with a fake VREngine after Agent is initialized in action loop. + dbDest1Client := newFakeDBClient() + dbClientFactory1 := func() binlogplayer.DBClient { return dbDest1Client } + dest1Master.Agent.VREngine = vreplication.NewEngine(ts, "", dest1Master.FakeMysqlDaemon, dbClientFactory1, dbDest1Client.DBName()) + dbDest1Client.setResult("use _vt", &sqltypes.Result{}, nil) + dbDest1Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) + if err := dest1Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + dbDest2Client := newFakeDBClient() + dbClientFactory2 := func() binlogplayer.DBClient { return dbDest2Client } + dest2Master.Agent.VREngine = vreplication.NewEngine(ts, "", dest2Master.FakeMysqlDaemon, dbClientFactory2, dbDest2Client.DBName()) + dbDest2Client.setResult("use _vt", &sqltypes.Result{}, nil) + dbDest2Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) + if err := dest2Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + dbSource1Client := newFakeDBClient() + dbClientFactory3 := func() binlogplayer.DBClient { return dbSource1Client } + source1Master.Agent.VREngine = vreplication.NewEngine(ts, "", source1Master.FakeMysqlDaemon, dbClientFactory3, dbSource1Client.DBName()) + dbSource1Client.setResult("use _vt", &sqltypes.Result{}, nil) + dbSource1Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) + if err := source1Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + dbSource2Client := newFakeDBClient() + dbClientFactory4 := func() binlogplayer.DBClient { return dbSource2Client } + source2Master.Agent.VREngine = vreplication.NewEngine(ts, "", source2Master.FakeMysqlDaemon, dbClientFactory4, dbSource2Client.DBName()) + dbSource2Client.setResult("use _vt", &sqltypes.Result{}, nil) + dbSource2Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) + if err := source2Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + // Emulate the following replication streams (many-to-many table migration): + // -40 -> -80 + // 40- -> -80 + // 40- -> 80- + // -40 will only have one target, and 80- will have only one source. + bls1 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('-80')", + }}, + }, + } + dbDest1Client.setResult("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls1), + ), nil) + bls2 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('-80')", + }}, + }, + } + dbDest1Client.setResult("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls2), + ), nil) + bls3 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('80-')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('80-')", + }}, + }, + } + dbDest2Client.setResult("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls3), + ), nil) + + if err := wr.saveRoutingRules(ctx, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + }); err != nil { + t.Fatal(err) + } + if err := wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { + t.Fatal(err) + } + + streams := map[topo.KeyspaceShard][]uint32{ + {Keyspace: "ks2", Shard: "-80"}: {1, 2}, + {Keyspace: "ks2", Shard: "80-"}: {1}, + } + + err = wr.MigrateReads(ctx, MigrateTables, streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + if err != nil { + t.Fatal(err) + } + checkCellRouting(t, wr, "cell1", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + }) + checkCellRouting(t, wr, "cell2", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + }) + + // Migrate is additive. cell2 also migrates rdonly. + err = wr.MigrateReads(ctx, MigrateTables, streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + if err != nil { + t.Fatal(err) + } + checkCellRouting(t, wr, "cell1", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + }) + checkCellRouting(t, wr, "cell2", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + }) + + err = wr.MigrateReads(ctx, MigrateTables, streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + }) + + err = wr.MigrateReads(ctx, MigrateTables, streams, nil, topodatapb.TabletType_REPLICA, directionForward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + }) + + err = wr.MigrateReads(ctx, MigrateTables, streams, nil, topodatapb.TabletType_RDONLY, directionBackward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + }) + + err = wr.MigrateReads(ctx, MigrateTables, streams, nil, topodatapb.TabletType_MASTER, directionForward) + want := "tablet type must be REPLICA or RDONLY: MASTER" + if err == nil || err.Error() != want { + t.Errorf("MigrateReads(master) err: %v, want %v", err, want) + } + + err = wr.MigrateWrites(ctx, MigrateTables, streams, 1*time.Second) + want = "missing tablet type specific routing, read-only traffic must be migrated before migrating writes" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites err: %v, want %v", err, want) + } + + err = wr.MigrateReads(ctx, MigrateTables, streams, nil, topodatapb.TabletType_RDONLY, directionForward) + if err != nil { + t.Fatal(err) + } + + source1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + source2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + dest1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } + dest2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } + + // Check for journals. + dbSource1Client.setResult("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) + dbSource2Client.setResult("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) + + // Wait for position: Reads current state, updates to Stopped, and re-reads. + state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "pos|state|message", + "varchar|varchar|varchar"), + "MariaDB/5-456-892|Running|", + ) + dbDest1Client.setResult("select pos, state, message from _vt.vreplication where id=1", state, nil) + dbDest2Client.setResult("select pos, state, message from _vt.vreplication where id=1", state, nil) + dbDest1Client.setResult("select pos, state, message from _vt.vreplication where id=2", state, nil) + dbDest1Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + dbDest2Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + dbDest1Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + dbDest1Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) + dbDest2Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) + dbDest1Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) + + // Create journals. + journal1 := "insert into _vt.resharding_journal.*445516443381867838.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" + dbSource1Client.setResultRE(journal1, &sqltypes.Result{}, nil) + journal2 := "insert into _vt.resharding_journal.*445516443381867838.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*80.*participants.*40.*40" + dbSource2Client.setResultRE(journal2, &sqltypes.Result{}, nil) + + // Create reverse replicaions. + dbSource1Client.setResultRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*-40.*t2.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + dbSource2Client.setResultRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + dbSource2Client.setResultRE("insert into _vt.vreplication.*ks2.*80-.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + dbSource1Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) + dbSource2Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) + dbSource2Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) + + // Delete the target replications. + dbDest1Client.setResult("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + dbDest2Client.setResult("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + dbDest1Client.setResult("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + + err = wr.MigrateWrites(ctx, MigrateTables, streams, 1*time.Second) + if err != nil { + t.Fatal(err) + } + + checkRouting(t, wr, map[string][]string{ + "t1": {"ks2.t1"}, + "t2": {"ks2.t2"}, + }) + checkBlacklist(t, ts, "ks1:-40", []string{"t1", "t2"}) + checkBlacklist(t, ts, "ks1:40-", []string{"t1", "t2"}) + checkBlacklist(t, ts, "ks2:-80", nil) + checkBlacklist(t, ts, "ks2:80-", nil) +} + +func TestShardMigrate(t *testing.T) { ctx := context.Background() ts := memorytopo.NewServer("cell1", "cell2") wr := New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) @@ -66,15 +488,14 @@ func TestShardMigrateReads(t *testing.T) { if err := wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { t.Fatal(err) } - - err := topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), ts, "ks", []string{"cell1"}) + err := topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), ts, "ks", nil) if err != nil { - t.Fatalf("RebuildKeyspaceLocked failed: %v", err) + t.Fatal(err) } - checkShardServedTypes(t, ts, "-40", 3) - checkShardServedTypes(t, ts, "40-", 3) - checkShardServedTypes(t, ts, "-80", 0) - checkShardServedTypes(t, ts, "80-", 0) + checkServedTypes(t, ts, "ks:-40", 3) + checkServedTypes(t, ts, "ks:40-", 3) + checkServedTypes(t, ts, "ks:-80", 0) + checkServedTypes(t, ts, "ks:80-", 0) source1Replica.StartActionLoop(t, wr) defer source1Replica.StopActionLoop(t) @@ -141,6 +562,11 @@ func TestShardMigrateReads(t *testing.T) { t.Fatal(err) } + // Emulate the following replication streams (simultaneous split and merge): + // -40 -> -80 + // 40- -> -80 + // 40- -> 80- + // -40 will only have one target, and 80- will have only one source. bls1 := &binlogdatapb.BinlogSource{ Keyspace: "ks", Shard: "-40", @@ -151,11 +577,9 @@ func TestShardMigrateReads(t *testing.T) { }}, }, } - dbDest1Client.setResult("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "source", - "varchar", - ), + dbDest1Client.setResult("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), fmt.Sprintf("%v", bls1), ), nil) bls2 := &binlogdatapb.BinlogSource{ @@ -168,31 +592,12 @@ func TestShardMigrateReads(t *testing.T) { }}, }, } - dbDest1Client.setResult("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "source", - "varchar", - ), + dbDest1Client.setResult("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), fmt.Sprintf("%v", bls2), ), nil) bls3 := &binlogdatapb.BinlogSource{ - Keyspace: "ks", - Shard: "-40", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "/.*", - Filter: "80-", - }}, - }, - } - dbDest2Client.setResult("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "source", - "varchar", - ), - fmt.Sprintf("%v", bls3), - ), nil) - bls4 := &binlogdatapb.BinlogSource{ Keyspace: "ks", Shard: "40-", Filter: &binlogdatapb.Filter{ @@ -202,45 +607,82 @@ func TestShardMigrateReads(t *testing.T) { }}, }, } - dbDest2Client.setResult("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "source", - "varchar", - ), - fmt.Sprintf("%v", bls4), + dbDest2Client.setResult("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls3), ), nil) streams := map[topo.KeyspaceShard][]uint32{ {Keyspace: "ks", Shard: "-80"}: {1, 2}, - {Keyspace: "ks", Shard: "80-"}: {1, 2}, + {Keyspace: "ks", Shard: "80-"}: {1}, } + err = wr.MigrateReads(ctx, MigrateShards, streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + if err != nil { + t.Fatal(err) + } + checkCellServedTypes(t, ts, "ks:-40", "cell1", 2) + checkCellServedTypes(t, ts, "ks:40-", "cell1", 2) + checkCellServedTypes(t, ts, "ks:-80", "cell1", 1) + checkCellServedTypes(t, ts, "ks:80-", "cell1", 1) + checkCellServedTypes(t, ts, "ks:-40", "cell2", 3) + checkCellServedTypes(t, ts, "ks:40-", "cell2", 3) + checkCellServedTypes(t, ts, "ks:-80", "cell2", 0) + checkCellServedTypes(t, ts, "ks:80-", "cell2", 0) + + err = wr.MigrateReads(ctx, MigrateShards, streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + if err != nil { + t.Fatal(err) + } + checkCellServedTypes(t, ts, "ks:-40", "cell1", 2) + checkCellServedTypes(t, ts, "ks:40-", "cell1", 2) + checkCellServedTypes(t, ts, "ks:-80", "cell1", 1) + checkCellServedTypes(t, ts, "ks:80-", "cell1", 1) + checkCellServedTypes(t, ts, "ks:-40", "cell2", 2) + checkCellServedTypes(t, ts, "ks:40-", "cell2", 2) + checkCellServedTypes(t, ts, "ks:-80", "cell2", 1) + checkCellServedTypes(t, ts, "ks:80-", "cell2", 1) + + err = wr.MigrateReads(ctx, MigrateShards, streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + if err != nil { + t.Fatal(err) + } + checkCellServedTypes(t, ts, "ks:-40", "cell1", 2) + checkCellServedTypes(t, ts, "ks:40-", "cell1", 2) + checkCellServedTypes(t, ts, "ks:-80", "cell1", 1) + checkCellServedTypes(t, ts, "ks:80-", "cell1", 1) + checkCellServedTypes(t, ts, "ks:-40", "cell2", 3) + checkCellServedTypes(t, ts, "ks:40-", "cell2", 3) + checkCellServedTypes(t, ts, "ks:-80", "cell2", 0) + checkCellServedTypes(t, ts, "ks:80-", "cell2", 0) + err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - checkShardServedTypes(t, ts, "-40", 2) - checkShardServedTypes(t, ts, "40-", 2) - checkShardServedTypes(t, ts, "-80", 1) - checkShardServedTypes(t, ts, "80-", 1) + checkServedTypes(t, ts, "ks:-40", 2) + checkServedTypes(t, ts, "ks:40-", 2) + checkServedTypes(t, ts, "ks:-80", 1) + checkServedTypes(t, ts, "ks:80-", 1) err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } - checkShardServedTypes(t, ts, "-40", 1) - checkShardServedTypes(t, ts, "40-", 1) - checkShardServedTypes(t, ts, "-80", 2) - checkShardServedTypes(t, ts, "80-", 2) + checkServedTypes(t, ts, "ks:-40", 1) + checkServedTypes(t, ts, "ks:40-", 1) + checkServedTypes(t, ts, "ks:-80", 2) + checkServedTypes(t, ts, "ks:80-", 2) err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_RDONLY, directionBackward) if err != nil { t.Fatal(err) } - checkShardServedTypes(t, ts, "-40", 2) - checkShardServedTypes(t, ts, "40-", 2) - checkShardServedTypes(t, ts, "-80", 1) - checkShardServedTypes(t, ts, "80-", 1) + checkServedTypes(t, ts, "ks:-40", 2) + checkServedTypes(t, ts, "ks:40-", 2) + checkServedTypes(t, ts, "ks:-80", 1) + checkServedTypes(t, ts, "ks:80-", 1) err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_MASTER, directionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" @@ -297,8 +739,8 @@ func TestShardMigrateReads(t *testing.T) { } // Check for journals. - dbSource1Client.setResult("select 1 from _vt.resharding_journal where id = 4076872238118445101", &sqltypes.Result{}, nil) - dbSource2Client.setResult("select 1 from _vt.resharding_journal where id = 4076872238118445101", &sqltypes.Result{}, nil) + dbSource1Client.setResult("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) + dbSource2Client.setResult("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) // Wait for position: Reads current state, updates to Stopped, and re-reads. state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( @@ -309,11 +751,9 @@ func TestShardMigrateReads(t *testing.T) { dbDest1Client.setResult("select pos, state, message from _vt.vreplication where id=1", state, nil) dbDest2Client.setResult("select pos, state, message from _vt.vreplication where id=1", state, nil) dbDest1Client.setResult("select pos, state, message from _vt.vreplication where id=2", state, nil) - dbDest2Client.setResult("select pos, state, message from _vt.vreplication where id=2", state, nil) dbDest1Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) dbDest2Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) dbDest1Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) - dbDest2Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( "id|state", "int64|varchar"), @@ -322,48 +762,146 @@ func TestShardMigrateReads(t *testing.T) { dbDest1Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) dbDest2Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) dbDest1Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) - dbDest2Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) // Create journals. - journal := "insert into _vt.resharding_journal.*4076872238118445101.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" - dbSource1Client.setResultRE(journal, &sqltypes.Result{}, nil) - dbSource2Client.setResultRE(journal, &sqltypes.Result{}, nil) + journal1 := "insert into _vt.resharding_journal.*8372031610433464572.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" + dbSource1Client.setResultRE(journal1, &sqltypes.Result{}, nil) + journal2 := "insert into _vt.resharding_journal.*8372031610433464572.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" + dbSource2Client.setResultRE(journal2, &sqltypes.Result{}, nil) // Create reverse replicaions. dbSource1Client.setResultRE("insert into _vt.vreplication.*-80.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) - dbSource1Client.setResultRE("insert into _vt.vreplication.*80-.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) dbSource2Client.setResultRE("insert into _vt.vreplication.*-80.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) dbSource2Client.setResultRE("insert into _vt.vreplication.*80-.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) dbSource1Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) dbSource2Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) - dbSource1Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) dbSource2Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) // Delete the target replications. dbDest1Client.setResult("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) dbDest2Client.setResult("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) dbDest1Client.setResult("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - dbDest2Client.setResult("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + err = wr.MigrateWrites(ctx, MigrateShards, streams, 1*time.Second) if err != nil { t.Fatal(err) } + + checkServedTypes(t, ts, "ks:-40", 0) + checkServedTypes(t, ts, "ks:40-", 0) + checkServedTypes(t, ts, "ks:-80", 3) + checkServedTypes(t, ts, "ks:80-", 3) + + checkIsMasterServing(t, ts, "ks:-40", false) + checkIsMasterServing(t, ts, "ks:40-", false) + checkIsMasterServing(t, ts, "ks:-80", true) + checkIsMasterServing(t, ts, "ks:80-", true) +} + +func checkRouting(t *testing.T, wr *Wrangler, want map[string][]string) { + t.Helper() + ctx := context.Background() + got, err := wr.getRoutingRules(ctx) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("rules:\n%v, want\n%v", got, want) + } + cells, err := wr.ts.GetCellInfoNames(ctx) + if err != nil { + t.Fatal(err) + } + for _, cell := range cells { + checkCellRouting(t, wr, cell, want) + } +} + +func checkCellRouting(t *testing.T, wr *Wrangler, cell string, want map[string][]string) { + t.Helper() + ctx := context.Background() + svs, err := wr.ts.GetSrvVSchema(ctx, cell) + if err != nil { + t.Fatal(err) + } + got := make(map[string][]string) + for _, rr := range svs.RoutingRules.Rules { + got[rr.FromTable] = append(got[rr.FromTable], rr.ToTables...) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("srv rules for cell %s:\n%v, want\n%v", cell, got, want) + } } -func checkShardServedTypes(t *testing.T, ts *topo.Server, shard string, expected int) { +func checkBlacklist(t *testing.T, ts *topo.Server, keyspaceShard string, want []string) { t.Helper() ctx := context.Background() - si, err := ts.GetShard(ctx, "ks", shard) + splits := strings.Split(keyspaceShard, ":") + si, err := ts.GetShard(ctx, splits[0], splits[1]) if err != nil { - t.Fatalf("GetShard failed: %v", err) + t.Fatal(err) + } + tc := si.GetTabletControl(topodatapb.TabletType_MASTER) + var got []string + if tc != nil { + got = tc.BlacklistedTables + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Blacklisted tables for %v: %v, want %v", keyspaceShard, got, want) + } +} + +func checkServedTypes(t *testing.T, ts *topo.Server, keyspaceShard string, want int) { + t.Helper() + ctx := context.Background() + splits := strings.Split(keyspaceShard, ":") + si, err := ts.GetShard(ctx, splits[0], splits[1]) + if err != nil { + t.Fatal(err) } servedTypes, err := ts.GetShardServingTypes(ctx, si) if err != nil { - t.Fatalf("GetShard failed: %v", err) + t.Fatal(err) } - if len(servedTypes) != expected { - t.Errorf("shard %v has wrong served types: got: %v, expected: %v", shard, len(servedTypes), expected) + if len(servedTypes) != want { + t.Errorf("shard %v has wrong served types: got: %v, want: %v", keyspaceShard, len(servedTypes), want) + } +} + +func checkCellServedTypes(t *testing.T, ts *topo.Server, keyspaceShard, cell string, want int) { + t.Helper() + ctx := context.Background() + splits := strings.Split(keyspaceShard, ":") + srvKeyspace, err := ts.GetSrvKeyspace(ctx, cell, splits[0]) + if err != nil { + t.Fatal(err) + } + count := 0 +outer: + for _, partition := range srvKeyspace.GetPartitions() { + for _, ref := range partition.ShardReferences { + if ref.Name == splits[1] { + count++ + continue outer + } + } + } + if count != want { + t.Errorf("serving types for keyspaceShard %s, cell %s: %d, want %d", keyspaceShard, cell, count, want) + } +} + +func checkIsMasterServing(t *testing.T, ts *topo.Server, keyspaceShard string, want bool) { + t.Helper() + ctx := context.Background() + splits := strings.Split(keyspaceShard, ":") + si, err := ts.GetShard(ctx, splits[0], splits[1]) + if err != nil { + t.Fatal(err) + } + if want != si.IsMasterServing { + t.Errorf("IsMasterServing(%v): %v, want %v", keyspaceShard, si.IsMasterServing, want) } } From 0614231fac3292e04918a66bfa6fc74f22f254b6 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 7 Jul 2019 12:44:42 -0700 Subject: [PATCH 06/17] migrater: more tests and cleanup Signed-off-by: Sugu Sougoumarane --- go/vt/wrangler/fake_dbclient_test.go | 41 ++- go/vt/wrangler/migrater.go | 1 - go/vt/wrangler/migrater_test.go | 420 ++++++++++++++++++--------- 3 files changed, 311 insertions(+), 151 deletions(-) diff --git a/go/vt/wrangler/fake_dbclient_test.go b/go/vt/wrangler/fake_dbclient_test.go index 9ed2e25719b..1f2ca1f54a7 100644 --- a/go/vt/wrangler/fake_dbclient_test.go +++ b/go/vt/wrangler/fake_dbclient_test.go @@ -19,35 +19,46 @@ package wrangler import ( "fmt" "regexp" + "testing" "vitess.io/vitess/go/sqltypes" ) +func verifyQueries(t *testing.T, dcs []*fakeDBClient) { + for _, dc := range dcs { + dc.verifyQueries(t) + } +} + type dbResult struct { result *sqltypes.Result err error + called bool } // fakeDBClient fakes a binlog_player.DBClient. type fakeDBClient struct { queries map[string]*dbResult - queriesRE map[*regexp.Regexp]*dbResult + queriesRE map[string]*dbResult } // NewfakeDBClient returns a new DBClientMock. func newFakeDBClient() *fakeDBClient { return &fakeDBClient{ - queries: make(map[string]*dbResult), - queriesRE: make(map[*regexp.Regexp]*dbResult), + queries: map[string]*dbResult{ + "use _vt": {result: &sqltypes.Result{}, called: true}, + "select * from _vt.vreplication where db_name='db'": {result: &sqltypes.Result{}}, + }, + queriesRE: make(map[string]*dbResult), } } -func (dc *fakeDBClient) setResult(query string, result *sqltypes.Result, err error) { +func (dc *fakeDBClient) addQuery(query string, result *sqltypes.Result, err error) { dc.queries[query] = &dbResult{result: result, err: err} } -func (dc *fakeDBClient) setResultRE(query string, result *sqltypes.Result, err error) { - dc.queriesRE[regexp.MustCompile(query)] = &dbResult{result: result, err: err} +func (dc *fakeDBClient) addQueryRE(query string, result *sqltypes.Result, err error) { + dc.queriesRE[query] = &dbResult{result: result, err: err} } // DBName is part of the DBClient interface @@ -82,12 +93,28 @@ func (dc *fakeDBClient) Close() { // ExecuteFetch is part of the DBClient interface func (dc *fakeDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Result, err error) { if dbr := dc.queries[query]; dbr != nil { + dbr.called = true return dbr.result, dbr.err } for re, dbr := range dc.queriesRE { - if re.MatchString(query) { + if regexp.MustCompile(re).MatchString(query) { + dbr.called = true return dbr.result, dbr.err } } return nil, fmt.Errorf("unexpected query: %s", query) } + +func (dc *fakeDBClient) verifyQueries(t *testing.T) { + t.Helper() + for query, dbr := range dc.queries { + if !dbr.called { + t.Errorf("query: %v was not called", query) + } + } + for query, dbr := range dc.queriesRE { + if !dbr.called { + t.Errorf("query: %v was not called", query) + } + } +} diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go index 7878b34cd51..cdfb257b1af 100644 --- a/go/vt/wrangler/migrater.go +++ b/go/vt/wrangler/migrater.go @@ -516,7 +516,6 @@ func (mi *migrater) cancelMigration(ctx context.Context) { }) if err != nil { mi.wr.Logger().Errorf("Cancel migration failed: could not restart vreplication: %v", err) - return } } diff --git a/go/vt/wrangler/migrater_test.go b/go/vt/wrangler/migrater_test.go index 392ba27404e..92fda1dfddc 100644 --- a/go/vt/wrangler/migrater_test.go +++ b/go/vt/wrangler/migrater_test.go @@ -38,6 +38,8 @@ import ( "vitess.io/vitess/go/vt/vttablet/tmclient" ) +// TestTableMigrate tests table mode migrations. +// This has to be kept in sync with TestShardMigrate. func TestTableMigrate(t *testing.T) { ctx := context.Background() ts := memorytopo.NewServer("cell1", "cell2") @@ -136,8 +138,6 @@ func TestTableMigrate(t *testing.T) { dbDest1Client := newFakeDBClient() dbClientFactory1 := func() binlogplayer.DBClient { return dbDest1Client } dest1Master.Agent.VREngine = vreplication.NewEngine(ts, "", dest1Master.FakeMysqlDaemon, dbClientFactory1, dbDest1Client.DBName()) - dbDest1Client.setResult("use _vt", &sqltypes.Result{}, nil) - dbDest1Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) if err := dest1Master.Agent.VREngine.Open(ctx); err != nil { t.Fatal(err) } @@ -145,8 +145,6 @@ func TestTableMigrate(t *testing.T) { dbDest2Client := newFakeDBClient() dbClientFactory2 := func() binlogplayer.DBClient { return dbDest2Client } dest2Master.Agent.VREngine = vreplication.NewEngine(ts, "", dest2Master.FakeMysqlDaemon, dbClientFactory2, dbDest2Client.DBName()) - dbDest2Client.setResult("use _vt", &sqltypes.Result{}, nil) - dbDest2Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) if err := dest2Master.Agent.VREngine.Open(ctx); err != nil { t.Fatal(err) } @@ -154,8 +152,6 @@ func TestTableMigrate(t *testing.T) { dbSource1Client := newFakeDBClient() dbClientFactory3 := func() binlogplayer.DBClient { return dbSource1Client } source1Master.Agent.VREngine = vreplication.NewEngine(ts, "", source1Master.FakeMysqlDaemon, dbClientFactory3, dbSource1Client.DBName()) - dbSource1Client.setResult("use _vt", &sqltypes.Result{}, nil) - dbSource1Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) if err := source1Master.Agent.VREngine.Open(ctx); err != nil { t.Fatal(err) } @@ -163,12 +159,12 @@ func TestTableMigrate(t *testing.T) { dbSource2Client := newFakeDBClient() dbClientFactory4 := func() binlogplayer.DBClient { return dbSource2Client } source2Master.Agent.VREngine = vreplication.NewEngine(ts, "", source2Master.FakeMysqlDaemon, dbClientFactory4, dbSource2Client.DBName()) - dbSource2Client.setResult("use _vt", &sqltypes.Result{}, nil) - dbSource2Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) if err := source2Master.Agent.VREngine.Open(ctx); err != nil { t.Fatal(err) } + allDBClients := []*fakeDBClient{dbDest1Client, dbDest2Client, dbSource1Client, dbSource2Client} + // Emulate the following replication streams (many-to-many table migration): // -40 -> -80 // 40- -> -80 @@ -187,7 +183,7 @@ func TestTableMigrate(t *testing.T) { }}, }, } - dbDest1Client.setResult("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( "source", "varchar"), fmt.Sprintf("%v", bls1), @@ -205,7 +201,7 @@ func TestTableMigrate(t *testing.T) { }}, }, } - dbDest1Client.setResult("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + dbDest1Client.addQuery("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( "source", "varchar"), fmt.Sprintf("%v", bls2), @@ -223,7 +219,7 @@ func TestTableMigrate(t *testing.T) { }}, }, } - dbDest2Client.setResult("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + dbDest2Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( "source", "varchar"), fmt.Sprintf("%v", bls3), @@ -241,11 +237,50 @@ func TestTableMigrate(t *testing.T) { t.Fatal(err) } + source1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + source2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + dest1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } + dest2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } + streams := map[topo.KeyspaceShard][]uint32{ {Keyspace: "ks2", Shard: "-80"}: {1, 2}, {Keyspace: "ks2", Shard: "80-"}: {1}, } + //------------------------------------------------------------------------------------------------------------------- + // Single cell RDONLY migration. err = wr.MigrateReads(ctx, MigrateTables, streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) @@ -266,8 +301,13 @@ func TestTableMigrate(t *testing.T) { "t2": {"ks1.t2"}, "ks2.t2": {"ks1.t2"}, }) + verifyQueries(t, allDBClients) - // Migrate is additive. cell2 also migrates rdonly. + //------------------------------------------------------------------------------------------------------------------- + // Other cell REPLICA migration. + // The global routing already contains redirections for rdonly. + // So, adding routes for replica and deploying to cell2 will also cause + // cell2 to migrat rdonly. This is a quirk that can be fixed later if necessary. err = wr.MigrateReads(ctx, MigrateTables, streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) @@ -296,7 +336,10 @@ func TestTableMigrate(t *testing.T) { "t2@replica": {"ks2.t2"}, "ks2.t2@replica": {"ks2.t2"}, }) + verifyQueries(t, allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // Single cell backward REPLICA migration. err = wr.MigrateReads(ctx, MigrateTables, streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) if err != nil { t.Fatal(err) @@ -311,7 +354,10 @@ func TestTableMigrate(t *testing.T) { "t2@rdonly": {"ks2.t2"}, "ks2.t2@rdonly": {"ks2.t2"}, }) + verifyQueries(t, allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // Migrate all REPLICA. err = wr.MigrateReads(ctx, MigrateTables, streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) @@ -330,7 +376,10 @@ func TestTableMigrate(t *testing.T) { "t2@replica": {"ks2.t2"}, "ks2.t2@replica": {"ks2.t2"}, }) + verifyQueries(t, allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // All cells RDONLY backward migration. err = wr.MigrateReads(ctx, MigrateTables, streams, nil, topodatapb.TabletType_RDONLY, directionBackward) if err != nil { t.Fatal(err) @@ -345,64 +394,52 @@ func TestTableMigrate(t *testing.T) { "t2@replica": {"ks2.t2"}, "ks2.t2@replica": {"ks2.t2"}, }) + verifyQueries(t, allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // Can't migrate master with MigrateReads. err = wr.MigrateReads(ctx, MigrateTables, streams, nil, topodatapb.TabletType_MASTER, directionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) } + verifyQueries(t, allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. err = wr.MigrateWrites(ctx, MigrateTables, streams, 1*time.Second) want = "missing tablet type specific routing, read-only traffic must be migrated before migrating writes" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites err: %v, want %v", err, want) } + verifyQueries(t, allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // Test MigrateWrites cancelation on failure. + + // Migrate all the reads first. err = wr.MigrateReads(ctx, MigrateTables, streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - - source1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 892, - }, - }, - } - source2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 892, - }, - }, - } - dest1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 893, - }, - }, - } - dest2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 893, - }, - }, - } + checkRouting(t, wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + }) // Check for journals. - dbSource1Client.setResult("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) - dbSource2Client.setResult("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) + dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) + dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) // Wait for position: Reads current state, updates to Stopped, and re-reads. state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( @@ -410,39 +447,73 @@ func TestTableMigrate(t *testing.T) { "varchar|varchar|varchar"), "MariaDB/5-456-892|Running|", ) - dbDest1Client.setResult("select pos, state, message from _vt.vreplication where id=1", state, nil) - dbDest2Client.setResult("select pos, state, message from _vt.vreplication where id=1", state, nil) - dbDest1Client.setResult("select pos, state, message from _vt.vreplication where id=2", state, nil) - dbDest1Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) - dbDest2Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) - dbDest1Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + dbDest2Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil) + dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + dbDest2Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( "id|state", "int64|varchar"), "1|Stopped", ) - dbDest1Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) - dbDest2Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) - dbDest1Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) + dbDest1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + dbDest2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + dbDest1Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Cancel Migration + cancel1 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 1" + cancel2 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 2" + dbDest1Client.addQuery(cancel1, &sqltypes.Result{}, nil) + dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) + dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) + + err = wr.MigrateWrites(ctx, MigrateTables, streams, 0*time.Second) + want = "DeadlineExceeded" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) + } + checkRouting(t, wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + }) + checkBlacklist(t, ts, "ks1:-40", nil) + checkBlacklist(t, ts, "ks1:40-", nil) + checkBlacklist(t, ts, "ks2:-80", nil) + checkBlacklist(t, ts, "ks2:80-", nil) + + //------------------------------------------------------------------------------------------------------------------- + // Test successful MigrateWrites. // Create journals. journal1 := "insert into _vt.resharding_journal.*445516443381867838.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" - dbSource1Client.setResultRE(journal1, &sqltypes.Result{}, nil) + dbSource1Client.addQueryRE(journal1, &sqltypes.Result{}, nil) journal2 := "insert into _vt.resharding_journal.*445516443381867838.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*80.*participants.*40.*40" - dbSource2Client.setResultRE(journal2, &sqltypes.Result{}, nil) + dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) - // Create reverse replicaions. - dbSource1Client.setResultRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*-40.*t2.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) - dbSource2Client.setResultRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) - dbSource2Client.setResultRE("insert into _vt.vreplication.*ks2.*80-.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) - dbSource1Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) - dbSource2Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) - dbSource2Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) + // Create backward replicaions. + dbSource1Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*-40.*t2.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*80-.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) // Delete the target replications. - dbDest1Client.setResult("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) - dbDest2Client.setResult("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) - dbDest1Client.setResult("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) err = wr.MigrateWrites(ctx, MigrateTables, streams, 1*time.Second) if err != nil { @@ -457,8 +528,12 @@ func TestTableMigrate(t *testing.T) { checkBlacklist(t, ts, "ks1:40-", []string{"t1", "t2"}) checkBlacklist(t, ts, "ks2:-80", nil) checkBlacklist(t, ts, "ks2:80-", nil) + + verifyQueries(t, allDBClients) } +// TestShardMigrate tests table mode migrations. +// This has to be kept in sync with TestTableMigrate. func TestShardMigrate(t *testing.T) { ctx := context.Background() ts := memorytopo.NewServer("cell1", "cell2") @@ -529,8 +604,6 @@ func TestShardMigrate(t *testing.T) { dbDest1Client := newFakeDBClient() dbClientFactory1 := func() binlogplayer.DBClient { return dbDest1Client } dest1Master.Agent.VREngine = vreplication.NewEngine(ts, "", dest1Master.FakeMysqlDaemon, dbClientFactory1, dbDest1Client.DBName()) - dbDest1Client.setResult("use _vt", &sqltypes.Result{}, nil) - dbDest1Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) if err := dest1Master.Agent.VREngine.Open(ctx); err != nil { t.Fatal(err) } @@ -538,8 +611,6 @@ func TestShardMigrate(t *testing.T) { dbDest2Client := newFakeDBClient() dbClientFactory2 := func() binlogplayer.DBClient { return dbDest2Client } dest2Master.Agent.VREngine = vreplication.NewEngine(ts, "", dest2Master.FakeMysqlDaemon, dbClientFactory2, dbDest2Client.DBName()) - dbDest2Client.setResult("use _vt", &sqltypes.Result{}, nil) - dbDest2Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) if err := dest2Master.Agent.VREngine.Open(ctx); err != nil { t.Fatal(err) } @@ -547,8 +618,6 @@ func TestShardMigrate(t *testing.T) { dbSource1Client := newFakeDBClient() dbClientFactory3 := func() binlogplayer.DBClient { return dbSource1Client } source1Master.Agent.VREngine = vreplication.NewEngine(ts, "", source1Master.FakeMysqlDaemon, dbClientFactory3, dbSource1Client.DBName()) - dbSource1Client.setResult("use _vt", &sqltypes.Result{}, nil) - dbSource1Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) if err := source1Master.Agent.VREngine.Open(ctx); err != nil { t.Fatal(err) } @@ -556,12 +625,12 @@ func TestShardMigrate(t *testing.T) { dbSource2Client := newFakeDBClient() dbClientFactory4 := func() binlogplayer.DBClient { return dbSource2Client } source2Master.Agent.VREngine = vreplication.NewEngine(ts, "", source2Master.FakeMysqlDaemon, dbClientFactory4, dbSource2Client.DBName()) - dbSource2Client.setResult("use _vt", &sqltypes.Result{}, nil) - dbSource2Client.setResult("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) if err := source2Master.Agent.VREngine.Open(ctx); err != nil { t.Fatal(err) } + allDBClients := []*fakeDBClient{dbDest1Client, dbDest2Client, dbSource1Client, dbSource2Client} + // Emulate the following replication streams (simultaneous split and merge): // -40 -> -80 // 40- -> -80 @@ -577,7 +646,7 @@ func TestShardMigrate(t *testing.T) { }}, }, } - dbDest1Client.setResult("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( "source", "varchar"), fmt.Sprintf("%v", bls1), @@ -592,7 +661,7 @@ func TestShardMigrate(t *testing.T) { }}, }, } - dbDest1Client.setResult("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + dbDest1Client.addQuery("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( "source", "varchar"), fmt.Sprintf("%v", bls2), @@ -607,17 +676,56 @@ func TestShardMigrate(t *testing.T) { }}, }, } - dbDest2Client.setResult("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + dbDest2Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( "source", "varchar"), fmt.Sprintf("%v", bls3), ), nil) + source1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + source2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + dest1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } + dest2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } + streams := map[topo.KeyspaceShard][]uint32{ {Keyspace: "ks", Shard: "-80"}: {1, 2}, {Keyspace: "ks", Shard: "80-"}: {1}, } + //------------------------------------------------------------------------------------------------------------------- + // Single cell RDONLY migration. err = wr.MigrateReads(ctx, MigrateShards, streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) @@ -630,7 +738,10 @@ func TestShardMigrate(t *testing.T) { checkCellServedTypes(t, ts, "ks:40-", "cell2", 3) checkCellServedTypes(t, ts, "ks:-80", "cell2", 0) checkCellServedTypes(t, ts, "ks:80-", "cell2", 0) + verifyQueries(t, allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // Other cell REPLICA migration. err = wr.MigrateReads(ctx, MigrateShards, streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) @@ -643,7 +754,10 @@ func TestShardMigrate(t *testing.T) { checkCellServedTypes(t, ts, "ks:40-", "cell2", 2) checkCellServedTypes(t, ts, "ks:-80", "cell2", 1) checkCellServedTypes(t, ts, "ks:80-", "cell2", 1) + verifyQueries(t, allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // Single cell backward REPLICA migration. err = wr.MigrateReads(ctx, MigrateShards, streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) if err != nil { t.Fatal(err) @@ -656,7 +770,13 @@ func TestShardMigrate(t *testing.T) { checkCellServedTypes(t, ts, "ks:40-", "cell2", 3) checkCellServedTypes(t, ts, "ks:-80", "cell2", 0) checkCellServedTypes(t, ts, "ks:80-", "cell2", 0) + verifyQueries(t, allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // Migrate all RDONLY. + // This is an extra step that does not exist in the tables test. + // The per-cell migration mechanism is different for tables. So, this + // extra step is needed to bring things in sync. err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) @@ -665,7 +785,10 @@ func TestShardMigrate(t *testing.T) { checkServedTypes(t, ts, "ks:40-", 2) checkServedTypes(t, ts, "ks:-80", 1) checkServedTypes(t, ts, "ks:80-", 1) + verifyQueries(t, allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // Migrate all REPLICA. err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) @@ -674,7 +797,10 @@ func TestShardMigrate(t *testing.T) { checkServedTypes(t, ts, "ks:40-", 1) checkServedTypes(t, ts, "ks:-80", 2) checkServedTypes(t, ts, "ks:80-", 2) + verifyQueries(t, allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // All cells RDONLY backward migration. err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_RDONLY, directionBackward) if err != nil { t.Fatal(err) @@ -683,64 +809,46 @@ func TestShardMigrate(t *testing.T) { checkServedTypes(t, ts, "ks:40-", 2) checkServedTypes(t, ts, "ks:-80", 1) checkServedTypes(t, ts, "ks:80-", 1) + verifyQueries(t, allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // Can't migrate master with MigrateReads. err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_MASTER, directionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) } + verifyQueries(t, allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. err = wr.MigrateWrites(ctx, MigrateShards, streams, 1*time.Second) want = "cannot migrate MASTER away" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites err: %v, want %v", err, want) } + verifyQueries(t, allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Test MigrateWrites cancelation on failure. + // Migrate all the reads first. err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - - source1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 892, - }, - }, - } - source2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 892, - }, - }, - } - dest1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 893, - }, - }, - } - dest2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 893, - }, - }, - } + checkServedTypes(t, ts, "ks:-40", 1) + checkServedTypes(t, ts, "ks:40-", 1) + checkServedTypes(t, ts, "ks:-80", 2) + checkServedTypes(t, ts, "ks:80-", 2) + checkIsMasterServing(t, ts, "ks:-40", true) + checkIsMasterServing(t, ts, "ks:40-", true) + checkIsMasterServing(t, ts, "ks:-80", false) + checkIsMasterServing(t, ts, "ks:80-", false) // Check for journals. - dbSource1Client.setResult("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) - dbSource2Client.setResult("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) + dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) + dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) // Wait for position: Reads current state, updates to Stopped, and re-reads. state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( @@ -748,39 +856,63 @@ func TestShardMigrate(t *testing.T) { "varchar|varchar|varchar"), "MariaDB/5-456-892|Running|", ) - dbDest1Client.setResult("select pos, state, message from _vt.vreplication where id=1", state, nil) - dbDest2Client.setResult("select pos, state, message from _vt.vreplication where id=1", state, nil) - dbDest1Client.setResult("select pos, state, message from _vt.vreplication where id=2", state, nil) - dbDest1Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) - dbDest2Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) - dbDest1Client.setResult("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + dbDest2Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil) + dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + dbDest2Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( "id|state", "int64|varchar"), "1|Stopped", ) - dbDest1Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) - dbDest2Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) - dbDest1Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) + dbDest1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + dbDest2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + dbDest1Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Cancel Migration + cancel1 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 1" + cancel2 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 2" + dbDest1Client.addQuery(cancel1, &sqltypes.Result{}, nil) + dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) + dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) + + err = wr.MigrateWrites(ctx, MigrateShards, streams, 0*time.Second) + want = "DeadlineExceeded" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) + } + checkServedTypes(t, ts, "ks:-40", 1) + checkServedTypes(t, ts, "ks:40-", 1) + checkServedTypes(t, ts, "ks:-80", 2) + checkServedTypes(t, ts, "ks:80-", 2) + checkIsMasterServing(t, ts, "ks:-40", true) + checkIsMasterServing(t, ts, "ks:40-", true) + checkIsMasterServing(t, ts, "ks:-80", false) + checkIsMasterServing(t, ts, "ks:80-", false) + + //------------------------------------------------------------------------------------------------------------------- + // Test successful MigrateWrites. // Create journals. journal1 := "insert into _vt.resharding_journal.*8372031610433464572.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" - dbSource1Client.setResultRE(journal1, &sqltypes.Result{}, nil) + dbSource1Client.addQueryRE(journal1, &sqltypes.Result{}, nil) journal2 := "insert into _vt.resharding_journal.*8372031610433464572.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" - dbSource2Client.setResultRE(journal2, &sqltypes.Result{}, nil) + dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) - // Create reverse replicaions. - dbSource1Client.setResultRE("insert into _vt.vreplication.*-80.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) - dbSource2Client.setResultRE("insert into _vt.vreplication.*-80.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) - dbSource2Client.setResultRE("insert into _vt.vreplication.*80-.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) - dbSource1Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) - dbSource2Client.setResult("select * from _vt.vreplication where id = 1", stopped, nil) - dbSource2Client.setResult("select * from _vt.vreplication where id = 2", stopped, nil) + // Create backward replicaions. + dbSource1Client.addQueryRE("insert into _vt.vreplication.*-80.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + dbSource2Client.addQueryRE("insert into _vt.vreplication.*-80.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + dbSource2Client.addQueryRE("insert into _vt.vreplication.*80-.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) // Delete the target replications. - dbDest1Client.setResult("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) - dbDest2Client.setResult("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) - dbDest1Client.setResult("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) err = wr.MigrateWrites(ctx, MigrateShards, streams, 1*time.Second) if err != nil { @@ -796,6 +928,8 @@ func TestShardMigrate(t *testing.T) { checkIsMasterServing(t, ts, "ks:40-", false) checkIsMasterServing(t, ts, "ks:-80", true) checkIsMasterServing(t, ts, "ks:80-", true) + + verifyQueries(t, allDBClients) } func checkRouting(t *testing.T, wr *Wrangler, want map[string][]string) { From 12d92d83904ee48294c7f3f1ed5c1decbc6ca0e3 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 7 Jul 2019 14:10:26 -0700 Subject: [PATCH 07/17] migrater: refactor tests, and add more Signed-off-by: Sugu Sougoumarane --- go/vt/wrangler/fake_tablet_test.go | 16 +- go/vt/wrangler/migater_env_test.go | 397 ++++++++++++++ go/vt/wrangler/migrater_test.go | 843 +++++++++-------------------- 3 files changed, 650 insertions(+), 606 deletions(-) create mode 100644 go/vt/wrangler/migater_env_test.go diff --git a/go/vt/wrangler/fake_tablet_test.go b/go/vt/wrangler/fake_tablet_test.go index ba119556685..042533fa8ef 100644 --- a/go/vt/wrangler/fake_tablet_test.go +++ b/go/vt/wrangler/fake_tablet_test.go @@ -53,11 +53,11 @@ import ( // We allow the creation of fake tablets, and running their event loop based // on a FakeMysqlDaemon. -// FakeTablet keeps track of a fake tablet in memory. It has: +// fakeTablet keeps track of a fake tablet in memory. It has: // - a Tablet record (used for creating the tablet, kept for user's information) // - a FakeMysqlDaemon (used by the fake event loop) // - a 'done' channel (used to terminate the fake event loop) -type FakeTablet struct { +type fakeTablet struct { // Tablet and FakeMysqlDaemon are populated at NewFakeTablet time. // We also create the RPCServer, so users can register more services // before calling StartActionLoop(). @@ -96,12 +96,12 @@ func TabletKeyspaceShard(t *testing.T, keyspace, shard string) TabletOption { } } -// NewFakeTablet creates the test tablet in the topology. 'uid' +// newFakeTablet creates the test tablet in the topology. 'uid' // has to be between 0 and 99. All the tablet info will be derived // from that. Look at the implementation if you need values. // Use TabletOption implementations if you need to change values at creation. // 'db' can be nil if the test doesn't use a database at all. -func NewFakeTablet(t *testing.T, wr *Wrangler, cell string, uid uint32, tabletType topodatapb.TabletType, db *fakesqldb.DB, options ...TabletOption) *FakeTablet { +func newFakeTablet(t *testing.T, wr *Wrangler, cell string, uid uint32, tabletType topodatapb.TabletType, db *fakesqldb.DB, options ...TabletOption) *fakeTablet { if uid > 99 { t.Fatalf("uid has to be between 0 and 99: %v", uid) } @@ -131,7 +131,7 @@ func NewFakeTablet(t *testing.T, wr *Wrangler, cell string, uid uint32, tabletTy fakeMysqlDaemon := fakemysqldaemon.NewFakeMysqlDaemon(db) fakeMysqlDaemon.MysqlPort = mysqlPort - return &FakeTablet{ + return &fakeTablet{ Tablet: tablet, FakeMysqlDaemon: fakeMysqlDaemon, RPCServer: grpc.NewServer(), @@ -140,7 +140,7 @@ func NewFakeTablet(t *testing.T, wr *Wrangler, cell string, uid uint32, tabletTy // StartActionLoop will start the action loop for a fake tablet, // using ft.FakeMysqlDaemon as the backing mysqld. -func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { +func (ft *fakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { if ft.Agent != nil { t.Fatalf("Agent for %v is already running", ft.Tablet.Alias) } @@ -198,7 +198,7 @@ func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { } // StopActionLoop will stop the Action Loop for the given FakeTablet -func (ft *FakeTablet) StopActionLoop(t *testing.T) { +func (ft *fakeTablet) StopActionLoop(t *testing.T) { if ft.Agent == nil { t.Fatalf("Agent for %v is not running", ft.Tablet.Alias) } @@ -213,7 +213,7 @@ func (ft *FakeTablet) StopActionLoop(t *testing.T) { } // Target returns the keyspace/shard/type info of this tablet as Target. -func (ft *FakeTablet) Target() querypb.Target { +func (ft *fakeTablet) Target() querypb.Target { return querypb.Target{ Keyspace: ft.Tablet.Keyspace, Shard: ft.Tablet.Shard, diff --git a/go/vt/wrangler/migater_env_test.go b/go/vt/wrangler/migater_env_test.go new file mode 100644 index 00000000000..a4ff17a0e9b --- /dev/null +++ b/go/vt/wrangler/migater_env_test.go @@ -0,0 +1,397 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "fmt" + "testing" + + "golang.org/x/net/context" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/logutil" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/vt/vttablet/tmclient" +) + +type testMigraterEnv struct { + ts *topo.Server + wr *Wrangler + source1Master, source1Replica, source1Rdonly *fakeTablet + source2Master, source2Replica, source2Rdonly *fakeTablet + dest1Master, dest1Replica, dest1Rdonly *fakeTablet + dest2Master, dest2Replica, dest2Rdonly *fakeTablet + dbSource1Client, dbSource2Client *fakeDBClient + dbDest1Client, dbDest2Client *fakeDBClient + allDBClients []*fakeDBClient + streams map[topo.KeyspaceShard][]uint32 +} + +func newTestTableMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { + tme := &testMigraterEnv{} + tme.ts = memorytopo.NewServer("cell1", "cell2") + tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) + + // Create cluster: ks1:-40,40- and ks2:-80,80-. + tme.source1Master = newFakeTablet(t, tme.wr, "cell1", 10, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks1", "-40")) + tme.source1Replica = newFakeTablet(t, tme.wr, "cell1", 11, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks1", "-40")) + tme.source1Rdonly = newFakeTablet(t, tme.wr, "cell1", 12, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks1", "-40")) + + tme.source2Master = newFakeTablet(t, tme.wr, "cell1", 20, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks1", "40-")) + tme.source2Replica = newFakeTablet(t, tme.wr, "cell1", 21, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks1", "40-")) + tme.source2Rdonly = newFakeTablet(t, tme.wr, "cell1", 22, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks1", "40-")) + + tme.dest1Master = newFakeTablet(t, tme.wr, "cell1", 30, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks2", "-80")) + tme.dest1Replica = newFakeTablet(t, tme.wr, "cell1", 31, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks2", "-80")) + tme.dest1Rdonly = newFakeTablet(t, tme.wr, "cell1", 32, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks2", "-80")) + + tme.dest2Master = newFakeTablet(t, tme.wr, "cell1", 40, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks2", "80-")) + tme.dest2Replica = newFakeTablet(t, tme.wr, "cell1", 41, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks2", "80-")) + tme.dest2Rdonly = newFakeTablet(t, tme.wr, "cell1", 42, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks2", "80-")) + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "hash", + }}, + }, + "t2": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "hash", + }}, + }, + }, + } + if err := tme.ts.SaveVSchema(ctx, "ks1", vs); err != nil { + t.Fatal(err) + } + if err := tme.ts.SaveVSchema(ctx, "ks2", vs); err != nil { + t.Fatal(err) + } + if err := tme.ts.RebuildSrvVSchema(ctx, nil); err != nil { + t.Fatal(err) + } + err := topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks1", []string{"cell1"}) + if err != nil { + t.Fatal(err) + } + err = topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks2", []string{"cell1"}) + if err != nil { + t.Fatal(err) + } + + tme.startTablets(t) + tme.createDBClients(ctx, t) + tme.setMasterPositions() + + // Emulate the following replication streams (many-to-many table migration): + // -40 -> -80 + // 40- -> -80 + // 40- -> 80- + // -40 will only have one target, and 80- will have only one source. + bls1 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('-80')", + }}, + }, + } + tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls1), + ), nil) + bls2 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('-80')", + }}, + }, + } + tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls2), + ), nil) + bls3 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('80-')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('80-')", + }}, + }, + } + tme.dbDest2Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls3), + ), nil) + + if err := tme.wr.saveRoutingRules(ctx, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + }); err != nil { + t.Fatal(err) + } + if err := tme.ts.RebuildSrvVSchema(ctx, nil); err != nil { + t.Fatal(err) + } + + tme.streams = map[topo.KeyspaceShard][]uint32{ + {Keyspace: "ks2", Shard: "-80"}: {1, 2}, + {Keyspace: "ks2", Shard: "80-"}: {1}, + } + return tme +} + +func newTestShardMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { + tme := &testMigraterEnv{} + tme.ts = memorytopo.NewServer("cell1", "cell2") + tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) + + // Create cluster with "ks" as keyspace. -40,40- as serving, -80,80- as non-serving. + tme.source1Master = newFakeTablet(t, tme.wr, "cell1", 10, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "-40")) + tme.source1Replica = newFakeTablet(t, tme.wr, "cell1", 11, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "-40")) + tme.source1Rdonly = newFakeTablet(t, tme.wr, "cell1", 12, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "-40")) + + tme.source2Master = newFakeTablet(t, tme.wr, "cell1", 20, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "40-")) + tme.source2Replica = newFakeTablet(t, tme.wr, "cell1", 21, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "40-")) + tme.source2Rdonly = newFakeTablet(t, tme.wr, "cell1", 22, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "40-")) + + tme.dest1Master = newFakeTablet(t, tme.wr, "cell1", 30, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "-80")) + tme.dest1Replica = newFakeTablet(t, tme.wr, "cell1", 31, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "-80")) + tme.dest1Rdonly = newFakeTablet(t, tme.wr, "cell1", 32, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "-80")) + + tme.dest2Master = newFakeTablet(t, tme.wr, "cell1", 40, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "80-")) + tme.dest2Replica = newFakeTablet(t, tme.wr, "cell1", 41, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "80-")) + tme.dest2Rdonly = newFakeTablet(t, tme.wr, "cell1", 42, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "80-")) + + vs := &vschemapb.Keyspace{Sharded: true} + if err := tme.ts.SaveVSchema(ctx, "ks", vs); err != nil { + t.Fatal(err) + } + if err := tme.ts.RebuildSrvVSchema(ctx, nil); err != nil { + t.Fatal(err) + } + err := topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks", nil) + if err != nil { + t.Fatal(err) + } + + tme.startTablets(t) + tme.createDBClients(ctx, t) + tme.setMasterPositions() + + // Emulate the following replication streams (simultaneous split and merge): + // -40 -> -80 + // 40- -> -80 + // 40- -> 80- + // -40 will only have one target, and 80- will have only one source. + bls1 := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "-80", + }}, + }, + } + tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls1), + ), nil) + bls2 := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "-80", + }}, + }, + } + tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls2), + ), nil) + bls3 := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "80-", + }}, + }, + } + tme.dbDest2Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls3), + ), nil) + + tme.streams = map[topo.KeyspaceShard][]uint32{ + {Keyspace: "ks", Shard: "-80"}: {1, 2}, + {Keyspace: "ks", Shard: "80-"}: {1}, + } + return tme +} + +func (tme *testMigraterEnv) startTablets(t *testing.T) { + tme.source1Replica.StartActionLoop(t, tme.wr) + tme.source1Rdonly.StartActionLoop(t, tme.wr) + tme.source1Master.StartActionLoop(t, tme.wr) + + tme.source2Replica.StartActionLoop(t, tme.wr) + tme.source2Rdonly.StartActionLoop(t, tme.wr) + tme.source2Master.StartActionLoop(t, tme.wr) + + tme.dest1Replica.StartActionLoop(t, tme.wr) + tme.dest1Rdonly.StartActionLoop(t, tme.wr) + tme.dest1Master.StartActionLoop(t, tme.wr) + + tme.dest2Replica.StartActionLoop(t, tme.wr) + tme.dest2Rdonly.StartActionLoop(t, tme.wr) + tme.dest2Master.StartActionLoop(t, tme.wr) +} + +func (tme *testMigraterEnv) stopTablets(t *testing.T) { + tme.source1Replica.StopActionLoop(t) + tme.source1Rdonly.StopActionLoop(t) + tme.source1Master.StopActionLoop(t) + + tme.source2Replica.StopActionLoop(t) + tme.source2Rdonly.StopActionLoop(t) + tme.source2Master.StopActionLoop(t) + + tme.dest1Replica.StopActionLoop(t) + tme.dest1Rdonly.StopActionLoop(t) + tme.dest1Master.StopActionLoop(t) + + tme.dest2Replica.StopActionLoop(t) + tme.dest2Rdonly.StopActionLoop(t) + tme.dest2Master.StopActionLoop(t) +} + +func (tme *testMigraterEnv) createDBClients(ctx context.Context, t *testing.T) { + tme.dbDest1Client = newFakeDBClient() + dbClientFactory1 := func() binlogplayer.DBClient { return tme.dbDest1Client } + tme.dest1Master.Agent.VREngine = vreplication.NewEngine(tme.ts, "", tme.dest1Master.FakeMysqlDaemon, dbClientFactory1, tme.dbDest1Client.DBName()) + if err := tme.dest1Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + tme.dbDest2Client = newFakeDBClient() + dbClientFactory2 := func() binlogplayer.DBClient { return tme.dbDest2Client } + tme.dest2Master.Agent.VREngine = vreplication.NewEngine(tme.ts, "", tme.dest2Master.FakeMysqlDaemon, dbClientFactory2, tme.dbDest2Client.DBName()) + if err := tme.dest2Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + tme.dbSource1Client = newFakeDBClient() + dbClientFactory3 := func() binlogplayer.DBClient { return tme.dbSource1Client } + tme.source1Master.Agent.VREngine = vreplication.NewEngine(tme.ts, "", tme.source1Master.FakeMysqlDaemon, dbClientFactory3, tme.dbSource1Client.DBName()) + if err := tme.source1Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + tme.dbSource2Client = newFakeDBClient() + dbClientFactory4 := func() binlogplayer.DBClient { return tme.dbSource2Client } + tme.source2Master.Agent.VREngine = vreplication.NewEngine(tme.ts, "", tme.source2Master.FakeMysqlDaemon, dbClientFactory4, tme.dbSource2Client.DBName()) + if err := tme.source2Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + tme.allDBClients = []*fakeDBClient{tme.dbDest1Client, tme.dbDest2Client, tme.dbSource1Client, tme.dbSource2Client} +} + +func (tme *testMigraterEnv) setMasterPositions() { + tme.source1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + tme.source2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + tme.dest1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } + tme.dest2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } +} diff --git a/go/vt/wrangler/migrater_test.go b/go/vt/wrangler/migrater_test.go index 92fda1dfddc..61eac28a1d2 100644 --- a/go/vt/wrangler/migrater_test.go +++ b/go/vt/wrangler/migrater_test.go @@ -17,275 +17,32 @@ limitations under the License. package wrangler import ( - "fmt" + "errors" "reflect" "strings" "testing" "time" "golang.org/x/net/context" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/binlog/binlogplayer" - "vitess.io/vitess/go/vt/logutil" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/topotools" - "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" - "vitess.io/vitess/go/vt/vttablet/tmclient" ) // TestTableMigrate tests table mode migrations. // This has to be kept in sync with TestShardMigrate. func TestTableMigrate(t *testing.T) { ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2") - wr := New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - - // Create cluster: ks1:-40,40- and ks2:-80,80-. - source1Master := NewFakeTablet(t, wr, "cell1", 10, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks1", "-40")) - source1Replica := NewFakeTablet(t, wr, "cell1", 11, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks1", "-40")) - source1Rdonly := NewFakeTablet(t, wr, "cell1", 12, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks1", "-40")) - - source2Master := NewFakeTablet(t, wr, "cell1", 20, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks1", "40-")) - source2Replica := NewFakeTablet(t, wr, "cell1", 21, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks1", "40-")) - source22Rdonly := NewFakeTablet(t, wr, "cell1", 22, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks1", "40-")) - - dest1Master := NewFakeTablet(t, wr, "cell1", 30, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks2", "-80")) - dest1Replica := NewFakeTablet(t, wr, "cell1", 31, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks2", "-80")) - dest1Rdonly := NewFakeTablet(t, wr, "cell1", 32, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks2", "-80")) - - dest2Master := NewFakeTablet(t, wr, "cell1", 40, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks2", "80-")) - dest2Replica := NewFakeTablet(t, wr, "cell1", 41, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks2", "80-")) - dest2Rdonly := NewFakeTablet(t, wr, "cell1", 42, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks2", "80-")) - - vs := &vschemapb.Keyspace{ - Sharded: true, - Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", - }, - }, - Tables: map[string]*vschemapb.Table{ - "t1": { - ColumnVindexes: []*vschemapb.ColumnVindex{{ - Column: "c1", - Name: "hash", - }}, - }, - "t2": { - ColumnVindexes: []*vschemapb.ColumnVindex{{ - Column: "c1", - Name: "hash", - }}, - }, - }, - } - if err := wr.ts.SaveVSchema(ctx, "ks1", vs); err != nil { - t.Fatal(err) - } - if err := wr.ts.SaveVSchema(ctx, "ks2", vs); err != nil { - t.Fatal(err) - } - if err := wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { - t.Fatal(err) - } - err := topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), ts, "ks1", []string{"cell1"}) - if err != nil { - t.Fatal(err) - } - err = topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), ts, "ks2", []string{"cell1"}) - if err != nil { - t.Fatal(err) - } - checkServedTypes(t, ts, "ks1:-40", 3) - checkServedTypes(t, ts, "ks1:40-", 3) - checkServedTypes(t, ts, "ks2:-80", 3) - checkServedTypes(t, ts, "ks2:80-", 3) - - source1Replica.StartActionLoop(t, wr) - defer source1Replica.StopActionLoop(t) - source1Rdonly.StartActionLoop(t, wr) - defer source1Rdonly.StopActionLoop(t) - source1Master.StartActionLoop(t, wr) - defer source1Master.StopActionLoop(t) - - source2Replica.StartActionLoop(t, wr) - defer source2Replica.StopActionLoop(t) - source22Rdonly.StartActionLoop(t, wr) - defer source22Rdonly.StopActionLoop(t) - source2Master.StartActionLoop(t, wr) - defer source2Master.StopActionLoop(t) - - dest1Replica.StartActionLoop(t, wr) - defer dest1Replica.StopActionLoop(t) - dest1Rdonly.StartActionLoop(t, wr) - defer dest1Rdonly.StopActionLoop(t) - dest1Master.StartActionLoop(t, wr) - defer dest1Master.StopActionLoop(t) - - dest2Replica.StartActionLoop(t, wr) - defer dest2Replica.StopActionLoop(t) - dest2Rdonly.StartActionLoop(t, wr) - defer dest2Rdonly.StopActionLoop(t) - dest2Master.StartActionLoop(t, wr) - defer dest2Master.StopActionLoop(t) - - // Override with a fake VREngine after Agent is initialized in action loop. - dbDest1Client := newFakeDBClient() - dbClientFactory1 := func() binlogplayer.DBClient { return dbDest1Client } - dest1Master.Agent.VREngine = vreplication.NewEngine(ts, "", dest1Master.FakeMysqlDaemon, dbClientFactory1, dbDest1Client.DBName()) - if err := dest1Master.Agent.VREngine.Open(ctx); err != nil { - t.Fatal(err) - } - - dbDest2Client := newFakeDBClient() - dbClientFactory2 := func() binlogplayer.DBClient { return dbDest2Client } - dest2Master.Agent.VREngine = vreplication.NewEngine(ts, "", dest2Master.FakeMysqlDaemon, dbClientFactory2, dbDest2Client.DBName()) - if err := dest2Master.Agent.VREngine.Open(ctx); err != nil { - t.Fatal(err) - } - - dbSource1Client := newFakeDBClient() - dbClientFactory3 := func() binlogplayer.DBClient { return dbSource1Client } - source1Master.Agent.VREngine = vreplication.NewEngine(ts, "", source1Master.FakeMysqlDaemon, dbClientFactory3, dbSource1Client.DBName()) - if err := source1Master.Agent.VREngine.Open(ctx); err != nil { - t.Fatal(err) - } - - dbSource2Client := newFakeDBClient() - dbClientFactory4 := func() binlogplayer.DBClient { return dbSource2Client } - source2Master.Agent.VREngine = vreplication.NewEngine(ts, "", source2Master.FakeMysqlDaemon, dbClientFactory4, dbSource2Client.DBName()) - if err := source2Master.Agent.VREngine.Open(ctx); err != nil { - t.Fatal(err) - } - - allDBClients := []*fakeDBClient{dbDest1Client, dbDest2Client, dbSource1Client, dbSource2Client} - - // Emulate the following replication streams (many-to-many table migration): - // -40 -> -80 - // 40- -> -80 - // 40- -> 80- - // -40 will only have one target, and 80- will have only one source. - bls1 := &binlogdatapb.BinlogSource{ - Keyspace: "ks1", - Shard: "-40", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: "select * from t1 where in_keyrange('-80')", - }, { - Match: "t2", - Filter: "select * from t2 where in_keyrange('-80')", - }}, - }, - } - dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls1), - ), nil) - bls2 := &binlogdatapb.BinlogSource{ - Keyspace: "ks1", - Shard: "40-", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: "select * from t1 where in_keyrange('-80')", - }, { - Match: "t2", - Filter: "select * from t2 where in_keyrange('-80')", - }}, - }, - } - dbDest1Client.addQuery("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls2), - ), nil) - bls3 := &binlogdatapb.BinlogSource{ - Keyspace: "ks1", - Shard: "40-", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: "select * from t1 where in_keyrange('80-')", - }, { - Match: "t2", - Filter: "select * from t2 where in_keyrange('80-')", - }}, - }, - } - dbDest2Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls3), - ), nil) - - if err := wr.saveRoutingRules(ctx, map[string][]string{ - "t1": {"ks1.t1"}, - "ks2.t1": {"ks1.t1"}, - "t2": {"ks1.t2"}, - "ks2.t2": {"ks1.t2"}, - }); err != nil { - t.Fatal(err) - } - if err := wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { - t.Fatal(err) - } - - source1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 892, - }, - }, - } - source2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 892, - }, - }, - } - dest1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 893, - }, - }, - } - dest2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 893, - }, - }, - } - - streams := map[topo.KeyspaceShard][]uint32{ - {Keyspace: "ks2", Shard: "-80"}: {1, 2}, - {Keyspace: "ks2", Shard: "80-"}: {1}, - } + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) //------------------------------------------------------------------------------------------------------------------- // Single cell RDONLY migration. - err = wr.MigrateReads(ctx, MigrateTables, streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - checkCellRouting(t, wr, "cell1", map[string][]string{ + checkCellRouting(t, tme.wr, "cell1", map[string][]string{ "t1": {"ks1.t1"}, "ks2.t1": {"ks1.t1"}, "t2": {"ks1.t2"}, @@ -295,24 +52,24 @@ func TestTableMigrate(t *testing.T) { "t2@rdonly": {"ks2.t2"}, "ks2.t2@rdonly": {"ks2.t2"}, }) - checkCellRouting(t, wr, "cell2", map[string][]string{ + checkCellRouting(t, tme.wr, "cell2", map[string][]string{ "t1": {"ks1.t1"}, "ks2.t1": {"ks1.t1"}, "t2": {"ks1.t2"}, "ks2.t2": {"ks1.t2"}, }) - verifyQueries(t, allDBClients) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // Other cell REPLICA migration. // The global routing already contains redirections for rdonly. // So, adding routes for replica and deploying to cell2 will also cause // cell2 to migrat rdonly. This is a quirk that can be fixed later if necessary. - err = wr.MigrateReads(ctx, MigrateTables, streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } - checkCellRouting(t, wr, "cell1", map[string][]string{ + checkCellRouting(t, tme.wr, "cell1", map[string][]string{ "t1": {"ks1.t1"}, "ks2.t1": {"ks1.t1"}, "t2": {"ks1.t2"}, @@ -322,7 +79,7 @@ func TestTableMigrate(t *testing.T) { "t2@rdonly": {"ks2.t2"}, "ks2.t2@rdonly": {"ks2.t2"}, }) - checkCellRouting(t, wr, "cell2", map[string][]string{ + checkCellRouting(t, tme.wr, "cell2", map[string][]string{ "t1": {"ks1.t1"}, "ks2.t1": {"ks1.t1"}, "t2": {"ks1.t2"}, @@ -336,15 +93,15 @@ func TestTableMigrate(t *testing.T) { "t2@replica": {"ks2.t2"}, "ks2.t2@replica": {"ks2.t2"}, }) - verifyQueries(t, allDBClients) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // Single cell backward REPLICA migration. - err = wr.MigrateReads(ctx, MigrateTables, streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) if err != nil { t.Fatal(err) } - checkRouting(t, wr, map[string][]string{ + checkRouting(t, tme.wr, map[string][]string{ "t1": {"ks1.t1"}, "ks2.t1": {"ks1.t1"}, "t2": {"ks1.t2"}, @@ -354,15 +111,15 @@ func TestTableMigrate(t *testing.T) { "t2@rdonly": {"ks2.t2"}, "ks2.t2@rdonly": {"ks2.t2"}, }) - verifyQueries(t, allDBClients) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // Migrate all REPLICA. - err = wr.MigrateReads(ctx, MigrateTables, streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } - checkRouting(t, wr, map[string][]string{ + checkRouting(t, tme.wr, map[string][]string{ "t1": {"ks1.t1"}, "ks2.t1": {"ks1.t1"}, "t2": {"ks1.t2"}, @@ -376,15 +133,15 @@ func TestTableMigrate(t *testing.T) { "t2@replica": {"ks2.t2"}, "ks2.t2@replica": {"ks2.t2"}, }) - verifyQueries(t, allDBClients) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // All cells RDONLY backward migration. - err = wr.MigrateReads(ctx, MigrateTables, streams, nil, topodatapb.TabletType_RDONLY, directionBackward) + err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionBackward) if err != nil { t.Fatal(err) } - checkRouting(t, wr, map[string][]string{ + checkRouting(t, tme.wr, map[string][]string{ "t1": {"ks1.t1"}, "ks2.t1": {"ks1.t1"}, "t2": {"ks1.t2"}, @@ -394,35 +151,35 @@ func TestTableMigrate(t *testing.T) { "t2@replica": {"ks2.t2"}, "ks2.t2@replica": {"ks2.t2"}, }) - verifyQueries(t, allDBClients) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // Can't migrate master with MigrateReads. - err = wr.MigrateReads(ctx, MigrateTables, streams, nil, topodatapb.TabletType_MASTER, directionForward) + err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_MASTER, directionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) } - verifyQueries(t, allDBClients) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. - err = wr.MigrateWrites(ctx, MigrateTables, streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, MigrateTables, tme.streams, 1*time.Second) want = "missing tablet type specific routing, read-only traffic must be migrated before migrating writes" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites err: %v, want %v", err, want) } - verifyQueries(t, allDBClients) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // Test MigrateWrites cancelation on failure. // Migrate all the reads first. - err = wr.MigrateReads(ctx, MigrateTables, streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - checkRouting(t, wr, map[string][]string{ + checkRouting(t, tme.wr, map[string][]string{ "t1": {"ks1.t1"}, "ks2.t1": {"ks1.t1"}, "t2": {"ks1.t2"}, @@ -438,8 +195,8 @@ func TestTableMigrate(t *testing.T) { }) // Check for journals. - dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) - dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) // Wait for position: Reads current state, updates to Stopped, and re-reads. state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( @@ -447,34 +204,34 @@ func TestTableMigrate(t *testing.T) { "varchar|varchar|varchar"), "MariaDB/5-456-892|Running|", ) - dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) - dbDest2Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) - dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil) - dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) - dbDest2Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) - dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest2Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( "id|state", "int64|varchar"), "1|Stopped", ) - dbDest1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) - dbDest2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) - dbDest1Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) // Cancel Migration cancel1 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 1" cancel2 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 2" - dbDest1Client.addQuery(cancel1, &sqltypes.Result{}, nil) - dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) - dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) - err = wr.MigrateWrites(ctx, MigrateTables, streams, 0*time.Second) + err = tme.wr.MigrateWrites(ctx, MigrateTables, tme.streams, 0*time.Second) want = "DeadlineExceeded" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) } - checkRouting(t, wr, map[string][]string{ + checkRouting(t, tme.wr, map[string][]string{ "t1": {"ks1.t1"}, "ks2.t1": {"ks1.t1"}, "t2": {"ks1.t2"}, @@ -488,367 +245,188 @@ func TestTableMigrate(t *testing.T) { "t2@rdonly": {"ks2.t2"}, "ks2.t2@rdonly": {"ks2.t2"}, }) - checkBlacklist(t, ts, "ks1:-40", nil) - checkBlacklist(t, ts, "ks1:40-", nil) - checkBlacklist(t, ts, "ks2:-80", nil) - checkBlacklist(t, ts, "ks2:80-", nil) + checkBlacklist(t, tme.ts, "ks1:-40", nil) + checkBlacklist(t, tme.ts, "ks1:40-", nil) + checkBlacklist(t, tme.ts, "ks2:-80", nil) + checkBlacklist(t, tme.ts, "ks2:80-", nil) //------------------------------------------------------------------------------------------------------------------- // Test successful MigrateWrites. // Create journals. journal1 := "insert into _vt.resharding_journal.*445516443381867838.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" - dbSource1Client.addQueryRE(journal1, &sqltypes.Result{}, nil) + tme.dbSource1Client.addQueryRE(journal1, &sqltypes.Result{}, nil) journal2 := "insert into _vt.resharding_journal.*445516443381867838.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*80.*participants.*40.*40" - dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) + tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) // Create backward replicaions. - dbSource1Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*-40.*t2.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) - dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) - dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*80-.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) - dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) - dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) - dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + tme.dbSource1Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*-40.*t2.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*80-.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + tme.dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) // Delete the target replications. - dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) - dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) - dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = wr.MigrateWrites(ctx, MigrateTables, streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, MigrateTables, tme.streams, 1*time.Second) if err != nil { t.Fatal(err) } - checkRouting(t, wr, map[string][]string{ + checkRouting(t, tme.wr, map[string][]string{ "t1": {"ks2.t1"}, "t2": {"ks2.t2"}, }) - checkBlacklist(t, ts, "ks1:-40", []string{"t1", "t2"}) - checkBlacklist(t, ts, "ks1:40-", []string{"t1", "t2"}) - checkBlacklist(t, ts, "ks2:-80", nil) - checkBlacklist(t, ts, "ks2:80-", nil) + checkBlacklist(t, tme.ts, "ks1:-40", []string{"t1", "t2"}) + checkBlacklist(t, tme.ts, "ks1:40-", []string{"t1", "t2"}) + checkBlacklist(t, tme.ts, "ks2:-80", nil) + checkBlacklist(t, tme.ts, "ks2:80-", nil) - verifyQueries(t, allDBClients) + verifyQueries(t, tme.allDBClients) } // TestShardMigrate tests table mode migrations. // This has to be kept in sync with TestTableMigrate. func TestShardMigrate(t *testing.T) { ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2") - wr := New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - - // Create cluster with "ks" as keyspace. -40,40- as serving, -80,80- as non-serving. - source1Master := NewFakeTablet(t, wr, "cell1", 10, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "-40")) - source1Replica := NewFakeTablet(t, wr, "cell1", 11, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "-40")) - source1Rdonly := NewFakeTablet(t, wr, "cell1", 12, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "-40")) - - source2Master := NewFakeTablet(t, wr, "cell1", 20, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "40-")) - source2Replica := NewFakeTablet(t, wr, "cell1", 21, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "40-")) - source22Rdonly := NewFakeTablet(t, wr, "cell1", 22, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "40-")) - - dest1Master := NewFakeTablet(t, wr, "cell1", 30, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "-80")) - dest1Replica := NewFakeTablet(t, wr, "cell1", 31, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "-80")) - dest1Rdonly := NewFakeTablet(t, wr, "cell1", 32, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "-80")) - - dest2Master := NewFakeTablet(t, wr, "cell1", 40, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "80-")) - dest2Replica := NewFakeTablet(t, wr, "cell1", 41, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "80-")) - dest2Rdonly := NewFakeTablet(t, wr, "cell1", 42, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "80-")) - - vs := &vschemapb.Keyspace{Sharded: true} - if err := wr.ts.SaveVSchema(ctx, "ks", vs); err != nil { - t.Fatal(err) - } - if err := wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { - t.Fatal(err) - } - err := topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), ts, "ks", nil) - if err != nil { - t.Fatal(err) - } - checkServedTypes(t, ts, "ks:-40", 3) - checkServedTypes(t, ts, "ks:40-", 3) - checkServedTypes(t, ts, "ks:-80", 0) - checkServedTypes(t, ts, "ks:80-", 0) - - source1Replica.StartActionLoop(t, wr) - defer source1Replica.StopActionLoop(t) - source1Rdonly.StartActionLoop(t, wr) - defer source1Rdonly.StopActionLoop(t) - source1Master.StartActionLoop(t, wr) - defer source1Master.StopActionLoop(t) - - source2Replica.StartActionLoop(t, wr) - defer source2Replica.StopActionLoop(t) - source22Rdonly.StartActionLoop(t, wr) - defer source22Rdonly.StopActionLoop(t) - source2Master.StartActionLoop(t, wr) - defer source2Master.StopActionLoop(t) - - dest1Replica.StartActionLoop(t, wr) - defer dest1Replica.StopActionLoop(t) - dest1Rdonly.StartActionLoop(t, wr) - defer dest1Rdonly.StopActionLoop(t) - dest1Master.StartActionLoop(t, wr) - defer dest1Master.StopActionLoop(t) - - dest2Replica.StartActionLoop(t, wr) - defer dest2Replica.StopActionLoop(t) - dest2Rdonly.StartActionLoop(t, wr) - defer dest2Rdonly.StopActionLoop(t) - dest2Master.StartActionLoop(t, wr) - defer dest2Master.StopActionLoop(t) - - // Override with a fake VREngine after Agent is initialized in action loop. - dbDest1Client := newFakeDBClient() - dbClientFactory1 := func() binlogplayer.DBClient { return dbDest1Client } - dest1Master.Agent.VREngine = vreplication.NewEngine(ts, "", dest1Master.FakeMysqlDaemon, dbClientFactory1, dbDest1Client.DBName()) - if err := dest1Master.Agent.VREngine.Open(ctx); err != nil { - t.Fatal(err) - } - - dbDest2Client := newFakeDBClient() - dbClientFactory2 := func() binlogplayer.DBClient { return dbDest2Client } - dest2Master.Agent.VREngine = vreplication.NewEngine(ts, "", dest2Master.FakeMysqlDaemon, dbClientFactory2, dbDest2Client.DBName()) - if err := dest2Master.Agent.VREngine.Open(ctx); err != nil { - t.Fatal(err) - } - - dbSource1Client := newFakeDBClient() - dbClientFactory3 := func() binlogplayer.DBClient { return dbSource1Client } - source1Master.Agent.VREngine = vreplication.NewEngine(ts, "", source1Master.FakeMysqlDaemon, dbClientFactory3, dbSource1Client.DBName()) - if err := source1Master.Agent.VREngine.Open(ctx); err != nil { - t.Fatal(err) - } - - dbSource2Client := newFakeDBClient() - dbClientFactory4 := func() binlogplayer.DBClient { return dbSource2Client } - source2Master.Agent.VREngine = vreplication.NewEngine(ts, "", source2Master.FakeMysqlDaemon, dbClientFactory4, dbSource2Client.DBName()) - if err := source2Master.Agent.VREngine.Open(ctx); err != nil { - t.Fatal(err) - } - - allDBClients := []*fakeDBClient{dbDest1Client, dbDest2Client, dbSource1Client, dbSource2Client} - - // Emulate the following replication streams (simultaneous split and merge): - // -40 -> -80 - // 40- -> -80 - // 40- -> 80- - // -40 will only have one target, and 80- will have only one source. - bls1 := &binlogdatapb.BinlogSource{ - Keyspace: "ks", - Shard: "-40", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "/.*", - Filter: "-80", - }}, - }, - } - dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls1), - ), nil) - bls2 := &binlogdatapb.BinlogSource{ - Keyspace: "ks", - Shard: "40-", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "/.*", - Filter: "-80", - }}, - }, - } - dbDest1Client.addQuery("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls2), - ), nil) - bls3 := &binlogdatapb.BinlogSource{ - Keyspace: "ks", - Shard: "40-", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "/.*", - Filter: "80-", - }}, - }, - } - dbDest2Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls3), - ), nil) - - source1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 892, - }, - }, - } - source2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 892, - }, - }, - } - dest1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 893, - }, - }, - } - dest2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - mysql.MariadbGTID{ - Domain: 5, - Server: 456, - Sequence: 893, - }, - }, - } + tme := newTestShardMigrater(ctx, t) + defer tme.stopTablets(t) - streams := map[topo.KeyspaceShard][]uint32{ - {Keyspace: "ks", Shard: "-80"}: {1, 2}, - {Keyspace: "ks", Shard: "80-"}: {1}, - } + // Initial check + checkServedTypes(t, tme.ts, "ks:-40", 3) + checkServedTypes(t, tme.ts, "ks:40-", 3) + checkServedTypes(t, tme.ts, "ks:-80", 0) + checkServedTypes(t, tme.ts, "ks:80-", 0) //------------------------------------------------------------------------------------------------------------------- // Single cell RDONLY migration. - err = wr.MigrateReads(ctx, MigrateShards, streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - checkCellServedTypes(t, ts, "ks:-40", "cell1", 2) - checkCellServedTypes(t, ts, "ks:40-", "cell1", 2) - checkCellServedTypes(t, ts, "ks:-80", "cell1", 1) - checkCellServedTypes(t, ts, "ks:80-", "cell1", 1) - checkCellServedTypes(t, ts, "ks:-40", "cell2", 3) - checkCellServedTypes(t, ts, "ks:40-", "cell2", 3) - checkCellServedTypes(t, ts, "ks:-80", "cell2", 0) - checkCellServedTypes(t, ts, "ks:80-", "cell2", 0) - verifyQueries(t, allDBClients) + checkCellServedTypes(t, tme.ts, "ks:-40", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:-40", "cell2", 3) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell2", 3) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell2", 0) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell2", 0) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // Other cell REPLICA migration. - err = wr.MigrateReads(ctx, MigrateShards, streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } - checkCellServedTypes(t, ts, "ks:-40", "cell1", 2) - checkCellServedTypes(t, ts, "ks:40-", "cell1", 2) - checkCellServedTypes(t, ts, "ks:-80", "cell1", 1) - checkCellServedTypes(t, ts, "ks:80-", "cell1", 1) - checkCellServedTypes(t, ts, "ks:-40", "cell2", 2) - checkCellServedTypes(t, ts, "ks:40-", "cell2", 2) - checkCellServedTypes(t, ts, "ks:-80", "cell2", 1) - checkCellServedTypes(t, ts, "ks:80-", "cell2", 1) - verifyQueries(t, allDBClients) + checkCellServedTypes(t, tme.ts, "ks:-40", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:-40", "cell2", 2) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell2", 2) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell2", 1) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell2", 1) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // Single cell backward REPLICA migration. - err = wr.MigrateReads(ctx, MigrateShards, streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) if err != nil { t.Fatal(err) } - checkCellServedTypes(t, ts, "ks:-40", "cell1", 2) - checkCellServedTypes(t, ts, "ks:40-", "cell1", 2) - checkCellServedTypes(t, ts, "ks:-80", "cell1", 1) - checkCellServedTypes(t, ts, "ks:80-", "cell1", 1) - checkCellServedTypes(t, ts, "ks:-40", "cell2", 3) - checkCellServedTypes(t, ts, "ks:40-", "cell2", 3) - checkCellServedTypes(t, ts, "ks:-80", "cell2", 0) - checkCellServedTypes(t, ts, "ks:80-", "cell2", 0) - verifyQueries(t, allDBClients) + checkCellServedTypes(t, tme.ts, "ks:-40", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:-40", "cell2", 3) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell2", 3) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell2", 0) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell2", 0) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // Migrate all RDONLY. // This is an extra step that does not exist in the tables test. // The per-cell migration mechanism is different for tables. So, this // extra step is needed to bring things in sync. - err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - checkServedTypes(t, ts, "ks:-40", 2) - checkServedTypes(t, ts, "ks:40-", 2) - checkServedTypes(t, ts, "ks:-80", 1) - checkServedTypes(t, ts, "ks:80-", 1) - verifyQueries(t, allDBClients) + checkServedTypes(t, tme.ts, "ks:-40", 2) + checkServedTypes(t, tme.ts, "ks:40-", 2) + checkServedTypes(t, tme.ts, "ks:-80", 1) + checkServedTypes(t, tme.ts, "ks:80-", 1) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // Migrate all REPLICA. - err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } - checkServedTypes(t, ts, "ks:-40", 1) - checkServedTypes(t, ts, "ks:40-", 1) - checkServedTypes(t, ts, "ks:-80", 2) - checkServedTypes(t, ts, "ks:80-", 2) - verifyQueries(t, allDBClients) + checkServedTypes(t, tme.ts, "ks:-40", 1) + checkServedTypes(t, tme.ts, "ks:40-", 1) + checkServedTypes(t, tme.ts, "ks:-80", 2) + checkServedTypes(t, tme.ts, "ks:80-", 2) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // All cells RDONLY backward migration. - err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_RDONLY, directionBackward) + err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_RDONLY, directionBackward) if err != nil { t.Fatal(err) } - checkServedTypes(t, ts, "ks:-40", 2) - checkServedTypes(t, ts, "ks:40-", 2) - checkServedTypes(t, ts, "ks:-80", 1) - checkServedTypes(t, ts, "ks:80-", 1) - verifyQueries(t, allDBClients) + checkServedTypes(t, tme.ts, "ks:-40", 2) + checkServedTypes(t, tme.ts, "ks:40-", 2) + checkServedTypes(t, tme.ts, "ks:-80", 1) + checkServedTypes(t, tme.ts, "ks:80-", 1) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // Can't migrate master with MigrateReads. - err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_MASTER, directionForward) + err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_MASTER, directionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) } - verifyQueries(t, allDBClients) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. - err = wr.MigrateWrites(ctx, MigrateShards, streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, MigrateShards, tme.streams, 1*time.Second) want = "cannot migrate MASTER away" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites err: %v, want %v", err, want) } - verifyQueries(t, allDBClients) + verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- // Test MigrateWrites cancelation on failure. // Migrate all the reads first. - err = wr.MigrateReads(ctx, MigrateShards, streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - checkServedTypes(t, ts, "ks:-40", 1) - checkServedTypes(t, ts, "ks:40-", 1) - checkServedTypes(t, ts, "ks:-80", 2) - checkServedTypes(t, ts, "ks:80-", 2) - checkIsMasterServing(t, ts, "ks:-40", true) - checkIsMasterServing(t, ts, "ks:40-", true) - checkIsMasterServing(t, ts, "ks:-80", false) - checkIsMasterServing(t, ts, "ks:80-", false) + checkServedTypes(t, tme.ts, "ks:-40", 1) + checkServedTypes(t, tme.ts, "ks:40-", 1) + checkServedTypes(t, tme.ts, "ks:-80", 2) + checkServedTypes(t, tme.ts, "ks:80-", 2) + checkIsMasterServing(t, tme.ts, "ks:-40", true) + checkIsMasterServing(t, tme.ts, "ks:40-", true) + checkIsMasterServing(t, tme.ts, "ks:-80", false) + checkIsMasterServing(t, tme.ts, "ks:80-", false) // Check for journals. - dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) - dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) // Wait for position: Reads current state, updates to Stopped, and re-reads. state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( @@ -856,80 +434,149 @@ func TestShardMigrate(t *testing.T) { "varchar|varchar|varchar"), "MariaDB/5-456-892|Running|", ) - dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) - dbDest2Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) - dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil) - dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) - dbDest2Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) - dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest2Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( "id|state", "int64|varchar"), "1|Stopped", ) - dbDest1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) - dbDest2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) - dbDest1Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) // Cancel Migration cancel1 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 1" cancel2 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 2" - dbDest1Client.addQuery(cancel1, &sqltypes.Result{}, nil) - dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) - dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) - err = wr.MigrateWrites(ctx, MigrateShards, streams, 0*time.Second) + err = tme.wr.MigrateWrites(ctx, MigrateShards, tme.streams, 0*time.Second) want = "DeadlineExceeded" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) } - checkServedTypes(t, ts, "ks:-40", 1) - checkServedTypes(t, ts, "ks:40-", 1) - checkServedTypes(t, ts, "ks:-80", 2) - checkServedTypes(t, ts, "ks:80-", 2) - checkIsMasterServing(t, ts, "ks:-40", true) - checkIsMasterServing(t, ts, "ks:40-", true) - checkIsMasterServing(t, ts, "ks:-80", false) - checkIsMasterServing(t, ts, "ks:80-", false) + checkServedTypes(t, tme.ts, "ks:-40", 1) + checkServedTypes(t, tme.ts, "ks:40-", 1) + checkServedTypes(t, tme.ts, "ks:-80", 2) + checkServedTypes(t, tme.ts, "ks:80-", 2) + checkIsMasterServing(t, tme.ts, "ks:-40", true) + checkIsMasterServing(t, tme.ts, "ks:40-", true) + checkIsMasterServing(t, tme.ts, "ks:-80", false) + checkIsMasterServing(t, tme.ts, "ks:80-", false) //------------------------------------------------------------------------------------------------------------------- // Test successful MigrateWrites. // Create journals. journal1 := "insert into _vt.resharding_journal.*8372031610433464572.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" - dbSource1Client.addQueryRE(journal1, &sqltypes.Result{}, nil) + tme.dbSource1Client.addQueryRE(journal1, &sqltypes.Result{}, nil) journal2 := "insert into _vt.resharding_journal.*8372031610433464572.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" - dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) + tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) // Create backward replicaions. - dbSource1Client.addQueryRE("insert into _vt.vreplication.*-80.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) - dbSource2Client.addQueryRE("insert into _vt.vreplication.*-80.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) - dbSource2Client.addQueryRE("insert into _vt.vreplication.*80-.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) - dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) - dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) - dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + tme.dbSource1Client.addQueryRE("insert into _vt.vreplication.*-80.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*-80.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*80-.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + tme.dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) // Delete the target replications. - dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) - dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) - dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + + err = tme.wr.MigrateWrites(ctx, MigrateShards, tme.streams, 1*time.Second) + if err != nil { + t.Fatal(err) + } + + checkServedTypes(t, tme.ts, "ks:-40", 0) + checkServedTypes(t, tme.ts, "ks:40-", 0) + checkServedTypes(t, tme.ts, "ks:-80", 3) + checkServedTypes(t, tme.ts, "ks:80-", 3) + + checkIsMasterServing(t, tme.ts, "ks:-40", false) + checkIsMasterServing(t, tme.ts, "ks:40-", false) + checkIsMasterServing(t, tme.ts, "ks:-80", true) + checkIsMasterServing(t, tme.ts, "ks:80-", true) + + verifyQueries(t, tme.allDBClients) +} - err = wr.MigrateWrites(ctx, MigrateShards, streams, 1*time.Second) +// TestMigrateFailJournal tests that cancel doesn't get called after point of no return. +func TestMigrateFailJournal(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } + err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + if err != nil { + t.Fatal(err) + } + + // Check for journals. + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) + + // Wait for position: Reads current state, updates to Stopped, and re-reads. + state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "pos|state|message", + "varchar|varchar|varchar"), + "MariaDB/5-456-892|Running|", + ) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest2Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Cancel Migration: these must not get called. + cancel1 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 1" + cancel2 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 2" + tme.dbDest1Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) - checkServedTypes(t, ts, "ks:-40", 0) - checkServedTypes(t, ts, "ks:40-", 0) - checkServedTypes(t, ts, "ks:-80", 3) - checkServedTypes(t, ts, "ks:80-", 3) + // Make the journal call fail. + tme.dbSource1Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) + tme.dbSource2Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) - checkIsMasterServing(t, ts, "ks:-40", false) - checkIsMasterServing(t, ts, "ks:40-", false) - checkIsMasterServing(t, ts, "ks:-80", true) - checkIsMasterServing(t, ts, "ks:80-", true) + err = tme.wr.MigrateWrites(ctx, MigrateTables, tme.streams, 1*time.Second) + want := "journaling intentionally failed" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) + } - verifyQueries(t, allDBClients) + // Verify that cancel didn't happen. + if tme.dbDest1Client.queries[cancel1].called { + t.Errorf("tme.dbDest1Client.queries[cancel1].called: %v, want false", tme.dbDest1Client.queries[cancel1]) + } + if tme.dbDest2Client.queries[cancel1].called { + t.Errorf("tme.dbDest1Client.queries[cancel1].called: %v, want false", tme.dbDest1Client.queries[cancel1]) + } + if tme.dbDest1Client.queries[cancel2].called { + t.Errorf("tme.dbDest1Client.queries[cancel1].called: %v, want false", tme.dbDest1Client.queries[cancel1]) + } } func checkRouting(t *testing.T, wr *Wrangler, want map[string][]string) { From e359f3e05887a289165a71be3c6f43714e184178 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 7 Jul 2019 20:10:41 -0700 Subject: [PATCH 08/17] migrater: unit tests done Signed-off-by: Sugu Sougoumarane --- go/vt/wrangler/migrater.go | 32 +- ...gater_env_test.go => migrater_env_test.go} | 0 go/vt/wrangler/migrater_test.go | 373 ++++++++++++++++++ 3 files changed, 402 insertions(+), 3 deletions(-) rename go/vt/wrangler/{migater_env_test.go => migrater_env_test.go} (100%) diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go index cdfb257b1af..2fc2ea50ed4 100644 --- a/go/vt/wrangler/migrater.go +++ b/go/vt/wrangler/migrater.go @@ -155,6 +155,11 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType MigrationTy mi.cancelMigration(ctx) return err } + } else { + // Need to gather positions in case all journals were not created. + if err := mi.gatherPositions(ctx); err != nil { + return err + } } if err := mi.createJournals(ctx); err != nil { return err @@ -189,9 +194,6 @@ func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType MigrationTy if err != nil { return nil, err } - if _, ok := mi.targets[targetks]; ok { - return nil, fmt.Errorf("duplicate targets: %v", targetks) - } mi.targets[targetks] = &miTarget{ shard: targetShard, master: targetMaster, @@ -482,6 +484,7 @@ func (mi *migrater) waitForCatchup(ctx context.Context, filteredReplicationWaitT ctx, cancel := context.WithTimeout(ctx, filteredReplicationWaitTime) defer cancel() + var mu sync.Mutex return mi.forAllUids(func(target *miTarget, uid uint32) error { bls := target.sources[uid] source := mi.sources[topo.KeyspaceShard{Keyspace: bls.Keyspace, Shard: bls.Shard}] @@ -491,6 +494,13 @@ func (mi *migrater) waitForCatchup(ctx context.Context, filteredReplicationWaitT if _, err := mi.wr.tmc.VReplicationExec(ctx, target.master.Tablet, binlogplayer.StopVReplication(uid, "stopped for cutover")); err != nil { return err } + + // Need lock because a target can have multiple uids. + mu.Lock() + defer mu.Unlock() + if target.position != "" { + return nil + } var err error target.position, err = mi.wr.tmc.MasterPosition(ctx, target.master.Tablet) return err @@ -519,6 +529,22 @@ func (mi *migrater) cancelMigration(ctx context.Context) { } } +func (mi *migrater) gatherPositions(ctx context.Context) error { + err := mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { + var err error + source.position, err = mi.wr.tmc.MasterPosition(ctx, source.master.Tablet) + return err + }) + if err != nil { + return err + } + return mi.forAllTargets(func(targetks topo.KeyspaceShard, target *miTarget) error { + var err error + target.position, err = mi.wr.tmc.MasterPosition(ctx, target.master.Tablet) + return err + }) +} + func (mi *migrater) createJournals(ctx context.Context) error { return mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { if source.journaled { diff --git a/go/vt/wrangler/migater_env_test.go b/go/vt/wrangler/migrater_env_test.go similarity index 100% rename from go/vt/wrangler/migater_env_test.go rename to go/vt/wrangler/migrater_env_test.go diff --git a/go/vt/wrangler/migrater_test.go b/go/vt/wrangler/migrater_test.go index 61eac28a1d2..8b6c63ca451 100644 --- a/go/vt/wrangler/migrater_test.go +++ b/go/vt/wrangler/migrater_test.go @@ -18,6 +18,7 @@ package wrangler import ( "errors" + "fmt" "reflect" "strings" "testing" @@ -25,6 +26,7 @@ import ( "golang.org/x/net/context" "vitess.io/vitess/go/sqltypes" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" ) @@ -511,6 +513,7 @@ func TestShardMigrate(t *testing.T) { } // TestMigrateFailJournal tests that cancel doesn't get called after point of no return. +// No need to test this for shard migrate because code paths are the same. func TestMigrateFailJournal(t *testing.T) { ctx := context.Background() tme := newTestTableMigrater(ctx, t) @@ -579,6 +582,376 @@ func TestMigrateFailJournal(t *testing.T) { } } +func TestTableMigrateJournalExists(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + if err != nil { + t.Fatal(err) + } + err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + if err != nil { + t.Fatal(err) + } + + // Show one journal as created. + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1"), nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) + + // Create the missing journal. + journal2 := "insert into _vt.resharding_journal.*445516443381867838.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*80.*participants.*40.*40" + tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) + + // Create backward replicaions. + tme.dbSource1Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*-40.*t2.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*80-.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + tme.dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Delete the target replications. + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + + err = tme.wr.MigrateWrites(ctx, MigrateTables, tme.streams, 1*time.Second) + if err != nil { + t.Fatal(err) + } + + // Routes will be redone. + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks2.t1"}, + "t2": {"ks2.t2"}, + }) + // We're showing that there are no blacklisted tables. But in real life, + // tables on ks1 should be blacklisted from the previous failed attempt. + checkBlacklist(t, tme.ts, "ks1:-40", nil) + checkBlacklist(t, tme.ts, "ks1:40-", nil) + checkBlacklist(t, tme.ts, "ks2:-80", nil) + checkBlacklist(t, tme.ts, "ks2:80-", nil) + + verifyQueries(t, tme.allDBClients) +} + +func TestShardMigrateJournalExists(t *testing.T) { + ctx := context.Background() + tme := newTestShardMigrater(ctx, t) + defer tme.stopTablets(t) + + err := tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + if err != nil { + t.Fatal(err) + } + err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + if err != nil { + t.Fatal(err) + } + + // Show one journal as created. + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 8372031610433464572", sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1"), nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) + + // Create the missing journal. + journal2 := "insert into _vt.resharding_journal.*8372031610433464572.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" + tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) + + // Create backward replicaions. + tme.dbSource1Client.addQueryRE("insert into _vt.vreplication.*-80.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*-80.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*80-.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + tme.dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Delete the target replications. + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + + err = tme.wr.MigrateWrites(ctx, MigrateShards, tme.streams, 1*time.Second) + if err != nil { + t.Fatal(err) + } + + checkServedTypes(t, tme.ts, "ks:-40", 0) + checkServedTypes(t, tme.ts, "ks:40-", 0) + checkServedTypes(t, tme.ts, "ks:-80", 3) + checkServedTypes(t, tme.ts, "ks:80-", 3) + + checkIsMasterServing(t, tme.ts, "ks:-40", false) + checkIsMasterServing(t, tme.ts, "ks:40-", false) + checkIsMasterServing(t, tme.ts, "ks:-80", true) + checkIsMasterServing(t, tme.ts, "ks:80-", true) + + verifyQueries(t, tme.allDBClients) +} + +func TestMigrateDistinctTargets(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + bls := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('-80')", + }}, + }, + } + tme.dbSource1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls), + ), nil) + tme.streams[topo.KeyspaceShard{Keyspace: "ks1", Shard: "-40"}] = []uint32{1} + + err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + want := "target keyspaces are mismatched across streams" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestMigrateDistinctSources(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + bls := &binlogdatapb.BinlogSource{ + Keyspace: "ks2", + Shard: "-80", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('-80')", + }}, + }, + } + tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls), + ), nil) + + err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + want := "source keyspaces are mismatched across streams" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestMigrateVReplicationStreamNotFound(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + + err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + want := "VReplication stream 1 not found for ks2:-80" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestMigrateMismatchedTables(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + bls := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }}, + }, + } + tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls), + ), nil) + + err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + want := "table lists are mismatched across streams" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestMigrateDupUidSources(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + bls := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('80-')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('80-')", + }}, + }, + } + tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls), + ), nil) + + err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + want := "duplicate sources for uids" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestTableMigrateAllShardsNotPresent(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + tme.streams = map[topo.KeyspaceShard][]uint32{ + {Keyspace: "ks2", Shard: "-80"}: {1, 2}, + } + + err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + want := "mismatched shards for keyspace" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestMigrateNoTableWildcards(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + bls1 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "", + }}, + }, + } + tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls1), + ), nil) + bls2 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "", + }}, + }, + } + tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls2), + ), nil) + bls3 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "", + }}, + }, + } + tme.dbDest2Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls3), + ), nil) + + err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + want := "cannot migrate streams with wild card table names" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestShardMigrateSourceTargetMismatch(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + err := tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + want := "source and target keyspace must match" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestShardMigrateTargetMatchesSource(t *testing.T) { + ctx := context.Background() + tme := newTestShardMigrater(ctx, t) + defer tme.stopTablets(t) + + bls := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "-80", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "-80", + }}, + }, + } + tme.dbSource1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source", + "varchar"), + fmt.Sprintf("%v", bls), + ), nil) + + tme.streams[topo.KeyspaceShard{Keyspace: "ks", Shard: "-40"}] = []uint32{1} + + err := tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + want := "target shard matches a source shard" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + func checkRouting(t *testing.T, wr *Wrangler, want map[string][]string) { t.Helper() ctx := context.Background() From da43f739cc17ec15642862e1a3d89a04e3c9fb14 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 7 Jul 2019 20:29:26 -0700 Subject: [PATCH 09/17] migrater: make table routing rules symmetrical Signed-off-by: Sugu Sougoumarane --- go/vt/wrangler/migrater.go | 4 ++++ go/vt/wrangler/migrater_test.go | 36 +++++++++++++++++++++++++++++---- 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go index 2fc2ea50ed4..58759a1bef5 100644 --- a/go/vt/wrangler/migrater.go +++ b/go/vt/wrangler/migrater.go @@ -407,9 +407,11 @@ func (mi *migrater) migrateTableReads(ctx context.Context, cells []string, serve if direction == directionForward { rules[table+"@"+tt] = []string{mi.targetKeyspace + "." + table} rules[mi.targetKeyspace+"."+table+"@"+tt] = []string{mi.targetKeyspace + "." + table} + rules[mi.sourceKeyspace+"."+table+"@"+tt] = []string{mi.targetKeyspace + "." + table} } else { delete(rules, table+"@"+tt) delete(rules, mi.targetKeyspace+"."+table+"@"+tt) + delete(rules, mi.sourceKeyspace+"."+table+"@"+tt) } } if err := mi.wr.saveRoutingRules(ctx, rules); err != nil { @@ -685,9 +687,11 @@ func (mi *migrater) changeTableRouting(ctx context.Context) error { tt := strings.ToLower(tabletType.String()) delete(rules, table+"@"+tt) delete(rules, mi.targetKeyspace+"."+table+"@"+tt) + delete(rules, mi.sourceKeyspace+"."+table+"@"+tt) } delete(rules, mi.targetKeyspace+"."+table) rules[table] = []string{mi.targetKeyspace + "." + table} + rules[mi.sourceKeyspace+"."+table] = []string{mi.targetKeyspace + "." + table} } if err := mi.wr.saveRoutingRules(ctx, rules); err != nil { return err diff --git a/go/vt/wrangler/migrater_test.go b/go/vt/wrangler/migrater_test.go index 8b6c63ca451..891d58bfb1a 100644 --- a/go/vt/wrangler/migrater_test.go +++ b/go/vt/wrangler/migrater_test.go @@ -51,8 +51,10 @@ func TestTableMigrate(t *testing.T) { "ks2.t2": {"ks1.t2"}, "t1@rdonly": {"ks2.t1"}, "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, "t2@rdonly": {"ks2.t2"}, "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, }) checkCellRouting(t, tme.wr, "cell2", map[string][]string{ "t1": {"ks1.t1"}, @@ -78,8 +80,10 @@ func TestTableMigrate(t *testing.T) { "ks2.t2": {"ks1.t2"}, "t1@rdonly": {"ks2.t1"}, "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, "t2@rdonly": {"ks2.t2"}, "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, }) checkCellRouting(t, tme.wr, "cell2", map[string][]string{ "t1": {"ks1.t1"}, @@ -88,12 +92,16 @@ func TestTableMigrate(t *testing.T) { "ks2.t2": {"ks1.t2"}, "t1@rdonly": {"ks2.t1"}, "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, "t2@rdonly": {"ks2.t2"}, "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, "t1@replica": {"ks2.t1"}, "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, "t2@replica": {"ks2.t2"}, "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, }) verifyQueries(t, tme.allDBClients) @@ -110,8 +118,10 @@ func TestTableMigrate(t *testing.T) { "ks2.t2": {"ks1.t2"}, "t1@rdonly": {"ks2.t1"}, "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, "t2@rdonly": {"ks2.t2"}, "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, }) verifyQueries(t, tme.allDBClients) @@ -128,12 +138,16 @@ func TestTableMigrate(t *testing.T) { "ks2.t2": {"ks1.t2"}, "t1@rdonly": {"ks2.t1"}, "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, "t2@rdonly": {"ks2.t2"}, "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, "t1@replica": {"ks2.t1"}, "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, "t2@replica": {"ks2.t2"}, "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, }) verifyQueries(t, tme.allDBClients) @@ -150,8 +164,10 @@ func TestTableMigrate(t *testing.T) { "ks2.t2": {"ks1.t2"}, "t1@replica": {"ks2.t1"}, "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, "t2@replica": {"ks2.t2"}, "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, }) verifyQueries(t, tme.allDBClients) @@ -188,12 +204,16 @@ func TestTableMigrate(t *testing.T) { "ks2.t2": {"ks1.t2"}, "t1@replica": {"ks2.t1"}, "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, "t2@replica": {"ks2.t2"}, "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, "t1@rdonly": {"ks2.t1"}, "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, "t2@rdonly": {"ks2.t2"}, "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, }) // Check for journals. @@ -240,12 +260,16 @@ func TestTableMigrate(t *testing.T) { "ks2.t2": {"ks1.t2"}, "t1@replica": {"ks2.t1"}, "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, "t2@replica": {"ks2.t2"}, "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, "t1@rdonly": {"ks2.t1"}, "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, "t2@rdonly": {"ks2.t2"}, "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, }) checkBlacklist(t, tme.ts, "ks1:-40", nil) checkBlacklist(t, tme.ts, "ks1:40-", nil) @@ -280,8 +304,10 @@ func TestTableMigrate(t *testing.T) { } checkRouting(t, tme.wr, map[string][]string{ - "t1": {"ks2.t1"}, - "t2": {"ks2.t2"}, + "t1": {"ks2.t1"}, + "ks1.t1": {"ks2.t1"}, + "t2": {"ks2.t2"}, + "ks1.t2": {"ks2.t2"}, }) checkBlacklist(t, tme.ts, "ks1:-40", []string{"t1", "t2"}) checkBlacklist(t, tme.ts, "ks1:40-", []string{"t1", "t2"}) @@ -629,8 +655,10 @@ func TestTableMigrateJournalExists(t *testing.T) { // Routes will be redone. checkRouting(t, tme.wr, map[string][]string{ - "t1": {"ks2.t1"}, - "t2": {"ks2.t2"}, + "t1": {"ks2.t1"}, + "ks1.t1": {"ks2.t1"}, + "t2": {"ks2.t2"}, + "ks1.t2": {"ks2.t2"}, }) // We're showing that there are no blacklisted tables. But in real life, // tables on ks1 should be blacklisted from the previous failed attempt. From 19f23cc041c2e823a7b9c6fc15d3c222a466f3a6 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 7 Jul 2019 21:23:35 -0700 Subject: [PATCH 10/17] migrater: add more fields to Journal Signed-off-by: Sugu Sougoumarane --- go/vt/proto/binlogdata/binlogdata.pb.go | 298 ++++++++++++++---------- go/vt/wrangler/migrater.go | 35 ++- go/vt/wrangler/migrater_test.go | 84 +++---- proto/binlogdata.proto | 16 +- py/vtproto/binlogdata_pb2.py | 89 +++++-- 5 files changed, 305 insertions(+), 217 deletions(-) diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index 47fa6baf57a..24983945643 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -48,7 +48,7 @@ func (x OnDDLAction) String() string { return proto.EnumName(OnDDLAction_name, int32(x)) } func (OnDDLAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{0} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{0} } // VEventType enumerates the event types. @@ -119,7 +119,31 @@ func (x VEventType) String() string { return proto.EnumName(VEventType_name, int32(x)) } func (VEventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{1} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1} +} + +// MigrationType specifies the type of migration for the Journal. +type MigrationType int32 + +const ( + MigrationType_TABLES MigrationType = 0 + MigrationType_SHARDS MigrationType = 1 +) + +var MigrationType_name = map[int32]string{ + 0: "TABLES", + 1: "SHARDS", +} +var MigrationType_value = map[string]int32{ + "TABLES": 0, + "SHARDS": 1, +} + +func (x MigrationType) String() string { + return proto.EnumName(MigrationType_name, int32(x)) +} +func (MigrationType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{2} } type BinlogTransaction_Statement_Category int32 @@ -167,7 +191,7 @@ func (x BinlogTransaction_Statement_Category) String() string { return proto.EnumName(BinlogTransaction_Statement_Category_name, int32(x)) } func (BinlogTransaction_Statement_Category) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{1, 0, 0} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1, 0, 0} } // Charset is the per-statement charset info from a QUERY_EVENT binlog entry. @@ -187,7 +211,7 @@ func (m *Charset) Reset() { *m = Charset{} } func (m *Charset) String() string { return proto.CompactTextString(m) } func (*Charset) ProtoMessage() {} func (*Charset) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{0} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{0} } func (m *Charset) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Charset.Unmarshal(m, b) @@ -244,7 +268,7 @@ func (m *BinlogTransaction) Reset() { *m = BinlogTransaction{} } func (m *BinlogTransaction) String() string { return proto.CompactTextString(m) } func (*BinlogTransaction) ProtoMessage() {} func (*BinlogTransaction) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{1} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1} } func (m *BinlogTransaction) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogTransaction.Unmarshal(m, b) @@ -294,7 +318,7 @@ func (m *BinlogTransaction_Statement) Reset() { *m = BinlogTransaction_S func (m *BinlogTransaction_Statement) String() string { return proto.CompactTextString(m) } func (*BinlogTransaction_Statement) ProtoMessage() {} func (*BinlogTransaction_Statement) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{1, 0} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1, 0} } func (m *BinlogTransaction_Statement) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogTransaction_Statement.Unmarshal(m, b) @@ -352,7 +376,7 @@ func (m *StreamKeyRangeRequest) Reset() { *m = StreamKeyRangeRequest{} } func (m *StreamKeyRangeRequest) String() string { return proto.CompactTextString(m) } func (*StreamKeyRangeRequest) ProtoMessage() {} func (*StreamKeyRangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{2} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{2} } func (m *StreamKeyRangeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamKeyRangeRequest.Unmarshal(m, b) @@ -405,7 +429,7 @@ func (m *StreamKeyRangeResponse) Reset() { *m = StreamKeyRangeResponse{} func (m *StreamKeyRangeResponse) String() string { return proto.CompactTextString(m) } func (*StreamKeyRangeResponse) ProtoMessage() {} func (*StreamKeyRangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{3} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{3} } func (m *StreamKeyRangeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamKeyRangeResponse.Unmarshal(m, b) @@ -449,7 +473,7 @@ func (m *StreamTablesRequest) Reset() { *m = StreamTablesRequest{} } func (m *StreamTablesRequest) String() string { return proto.CompactTextString(m) } func (*StreamTablesRequest) ProtoMessage() {} func (*StreamTablesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{4} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{4} } func (m *StreamTablesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTablesRequest.Unmarshal(m, b) @@ -502,7 +526,7 @@ func (m *StreamTablesResponse) Reset() { *m = StreamTablesResponse{} } func (m *StreamTablesResponse) String() string { return proto.CompactTextString(m) } func (*StreamTablesResponse) ProtoMessage() {} func (*StreamTablesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{5} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{5} } func (m *StreamTablesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTablesResponse.Unmarshal(m, b) @@ -547,7 +571,7 @@ func (m *Rule) Reset() { *m = Rule{} } func (m *Rule) String() string { return proto.CompactTextString(m) } func (*Rule) ProtoMessage() {} func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{6} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{6} } func (m *Rule) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Rule.Unmarshal(m, b) @@ -594,7 +618,7 @@ func (m *Filter) Reset() { *m = Filter{} } func (m *Filter) String() string { return proto.CompactTextString(m) } func (*Filter) ProtoMessage() {} func (*Filter) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{7} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{7} } func (m *Filter) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Filter.Unmarshal(m, b) @@ -649,7 +673,7 @@ func (m *BinlogSource) Reset() { *m = BinlogSource{} } func (m *BinlogSource) String() string { return proto.CompactTextString(m) } func (*BinlogSource) ProtoMessage() {} func (*BinlogSource) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{8} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{8} } func (m *BinlogSource) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogSource.Unmarshal(m, b) @@ -731,7 +755,7 @@ func (m *RowChange) Reset() { *m = RowChange{} } func (m *RowChange) String() string { return proto.CompactTextString(m) } func (*RowChange) ProtoMessage() {} func (*RowChange) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{9} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{9} } func (m *RowChange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RowChange.Unmarshal(m, b) @@ -778,7 +802,7 @@ func (m *RowEvent) Reset() { *m = RowEvent{} } func (m *RowEvent) String() string { return proto.CompactTextString(m) } func (*RowEvent) ProtoMessage() {} func (*RowEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{10} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{10} } func (m *RowEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RowEvent.Unmarshal(m, b) @@ -824,7 +848,7 @@ func (m *FieldEvent) Reset() { *m = FieldEvent{} } func (m *FieldEvent) String() string { return proto.CompactTextString(m) } func (*FieldEvent) ProtoMessage() {} func (*FieldEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{11} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{11} } func (m *FieldEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FieldEvent.Unmarshal(m, b) @@ -871,7 +895,7 @@ func (m *ShardGtid) Reset() { *m = ShardGtid{} } func (m *ShardGtid) String() string { return proto.CompactTextString(m) } func (*ShardGtid) ProtoMessage() {} func (*ShardGtid) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{12} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{12} } func (m *ShardGtid) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ShardGtid.Unmarshal(m, b) @@ -923,7 +947,7 @@ func (m *VGtid) Reset() { *m = VGtid{} } func (m *VGtid) String() string { return proto.CompactTextString(m) } func (*VGtid) ProtoMessage() {} func (*VGtid) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{13} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{13} } func (m *VGtid) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VGtid.Unmarshal(m, b) @@ -962,7 +986,7 @@ func (m *KeyspaceShard) Reset() { *m = KeyspaceShard{} } func (m *KeyspaceShard) String() string { return proto.CompactTextString(m) } func (*KeyspaceShard) ProtoMessage() {} func (*KeyspaceShard) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{14} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{14} } func (m *KeyspaceShard) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_KeyspaceShard.Unmarshal(m, b) @@ -998,10 +1022,12 @@ func (m *KeyspaceShard) GetShard() string { type Journal struct { Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Tables []string `protobuf:"bytes,2,rep,name=tables,proto3" json:"tables,omitempty"` - LocalPosition string `protobuf:"bytes,3,opt,name=local_position,json=localPosition,proto3" json:"local_position,omitempty"` - ShardGtids []*ShardGtid `protobuf:"bytes,4,rep,name=shard_gtids,json=shardGtids,proto3" json:"shard_gtids,omitempty"` - Participants []*KeyspaceShard `protobuf:"bytes,5,rep,name=participants,proto3" json:"participants,omitempty"` + MigrationType MigrationType `protobuf:"varint,2,opt,name=migration_type,json=migrationType,proto3,enum=binlogdata.MigrationType" json:"migration_type,omitempty"` + Tables []string `protobuf:"bytes,3,rep,name=tables,proto3" json:"tables,omitempty"` + LocalPosition string `protobuf:"bytes,4,opt,name=local_position,json=localPosition,proto3" json:"local_position,omitempty"` + ShardGtids []*ShardGtid `protobuf:"bytes,5,rep,name=shard_gtids,json=shardGtids,proto3" json:"shard_gtids,omitempty"` + Participants []*KeyspaceShard `protobuf:"bytes,6,rep,name=participants,proto3" json:"participants,omitempty"` + ReversedIds []int64 `protobuf:"varint,7,rep,packed,name=reversed_ids,json=reversedIds,proto3" json:"reversed_ids,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1011,7 +1037,7 @@ func (m *Journal) Reset() { *m = Journal{} } func (m *Journal) String() string { return proto.CompactTextString(m) } func (*Journal) ProtoMessage() {} func (*Journal) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{15} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{15} } func (m *Journal) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Journal.Unmarshal(m, b) @@ -1038,6 +1064,13 @@ func (m *Journal) GetId() int64 { return 0 } +func (m *Journal) GetMigrationType() MigrationType { + if m != nil { + return m.MigrationType + } + return MigrationType_TABLES +} + func (m *Journal) GetTables() []string { if m != nil { return m.Tables @@ -1066,6 +1099,13 @@ func (m *Journal) GetParticipants() []*KeyspaceShard { return nil } +func (m *Journal) GetReversedIds() []int64 { + if m != nil { + return m.ReversedIds + } + return nil +} + // VEvent represents a vstream event type VEvent struct { Type VEventType `protobuf:"varint,1,opt,name=type,proto3,enum=binlogdata.VEventType" json:"type,omitempty"` @@ -1087,7 +1127,7 @@ func (m *VEvent) Reset() { *m = VEvent{} } func (m *VEvent) String() string { return proto.CompactTextString(m) } func (*VEvent) ProtoMessage() {} func (*VEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{16} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{16} } func (m *VEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VEvent.Unmarshal(m, b) @@ -1186,7 +1226,7 @@ func (m *VStreamRequest) Reset() { *m = VStreamRequest{} } func (m *VStreamRequest) String() string { return proto.CompactTextString(m) } func (*VStreamRequest) ProtoMessage() {} func (*VStreamRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{17} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{17} } func (m *VStreamRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamRequest.Unmarshal(m, b) @@ -1253,7 +1293,7 @@ func (m *VStreamResponse) Reset() { *m = VStreamResponse{} } func (m *VStreamResponse) String() string { return proto.CompactTextString(m) } func (*VStreamResponse) ProtoMessage() {} func (*VStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{18} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{18} } func (m *VStreamResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamResponse.Unmarshal(m, b) @@ -1296,7 +1336,7 @@ func (m *VStreamRowsRequest) Reset() { *m = VStreamRowsRequest{} } func (m *VStreamRowsRequest) String() string { return proto.CompactTextString(m) } func (*VStreamRowsRequest) ProtoMessage() {} func (*VStreamRowsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{19} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{19} } func (m *VStreamRowsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamRowsRequest.Unmarshal(m, b) @@ -1367,7 +1407,7 @@ func (m *VStreamRowsResponse) Reset() { *m = VStreamRowsResponse{} } func (m *VStreamRowsResponse) String() string { return proto.CompactTextString(m) } func (*VStreamRowsResponse) ProtoMessage() {} func (*VStreamRowsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_1f081d4c0b940318, []int{20} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{20} } func (m *VStreamRowsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamRowsResponse.Unmarshal(m, b) @@ -1447,104 +1487,110 @@ func init() { proto.RegisterType((*VStreamRowsResponse)(nil), "binlogdata.VStreamRowsResponse") proto.RegisterEnum("binlogdata.OnDDLAction", OnDDLAction_name, OnDDLAction_value) proto.RegisterEnum("binlogdata.VEventType", VEventType_name, VEventType_value) + proto.RegisterEnum("binlogdata.MigrationType", MigrationType_name, MigrationType_value) proto.RegisterEnum("binlogdata.BinlogTransaction_Statement_Category", BinlogTransaction_Statement_Category_name, BinlogTransaction_Statement_Category_value) } -func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_1f081d4c0b940318) } - -var fileDescriptor_binlogdata_1f081d4c0b940318 = []byte{ - // 1484 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0xcb, 0x72, 0xe3, 0x54, - 0x13, 0x1e, 0xdb, 0x92, 0x2f, 0xad, 0x5c, 0x94, 0x93, 0xcb, 0xef, 0x3f, 0xc5, 0x50, 0x41, 0xc5, - 0x30, 0x21, 0x55, 0x38, 0x83, 0x81, 0x61, 0x35, 0x4c, 0xf9, 0xa2, 0xc9, 0x38, 0x51, 0xec, 0xcc, - 0x89, 0x92, 0xa1, 0x66, 0xa3, 0x52, 0xa4, 0x93, 0x44, 0x44, 0x96, 0x3c, 0xd2, 0x71, 0x42, 0x1e, - 0x80, 0xe2, 0x01, 0xd8, 0xf2, 0x02, 0x3c, 0x04, 0x5b, 0xb6, 0x14, 0x4f, 0xc0, 0x8a, 0xf7, 0xa0, - 0xce, 0x45, 0xb2, 0x9d, 0xb9, 0x65, 0xa8, 0x62, 0xc1, 0xc6, 0xd5, 0xa7, 0x4f, 0x77, 0xab, 0xfb, - 0xeb, 0x3e, 0xdd, 0x6d, 0xd0, 0x4f, 0x82, 0x28, 0x8c, 0xcf, 0x7c, 0x97, 0xba, 0x8d, 0x51, 0x12, - 0xd3, 0x18, 0xc1, 0x84, 0xb3, 0xae, 0x5d, 0xd2, 0x64, 0xe4, 0x89, 0x8b, 0x75, 0xed, 0xe5, 0x98, - 0x24, 0xd7, 0xf2, 0xb0, 0x40, 0xe3, 0x51, 0x3c, 0xd1, 0x32, 0xf6, 0xa1, 0xd2, 0x39, 0x77, 0x93, - 0x94, 0x50, 0xb4, 0x06, 0x65, 0x2f, 0x0c, 0x48, 0x44, 0xeb, 0x85, 0x8d, 0xc2, 0xa6, 0x8a, 0xe5, - 0x09, 0x21, 0x50, 0xbc, 0x38, 0x8a, 0xea, 0x45, 0xce, 0xe5, 0x34, 0x93, 0x4d, 0x49, 0x72, 0x49, - 0x92, 0x7a, 0x49, 0xc8, 0x8a, 0x93, 0xf1, 0x57, 0x09, 0x96, 0xda, 0xdc, 0x0f, 0x3b, 0x71, 0xa3, - 0xd4, 0xf5, 0x68, 0x10, 0x47, 0x68, 0x07, 0x20, 0xa5, 0x2e, 0x25, 0x43, 0x12, 0xd1, 0xb4, 0x5e, - 0xd8, 0x28, 0x6d, 0x6a, 0xcd, 0xfb, 0x8d, 0xa9, 0x08, 0x5e, 0x51, 0x69, 0x1c, 0x66, 0xf2, 0x78, - 0x4a, 0x15, 0x35, 0x41, 0x23, 0x97, 0x24, 0xa2, 0x0e, 0x8d, 0x2f, 0x48, 0x54, 0x57, 0x36, 0x0a, - 0x9b, 0x5a, 0x73, 0xa9, 0x21, 0x02, 0x34, 0xd9, 0x8d, 0xcd, 0x2e, 0x30, 0x90, 0x9c, 0x5e, 0xff, - 0xad, 0x08, 0xb5, 0xdc, 0x1a, 0xb2, 0xa0, 0xea, 0xb9, 0x94, 0x9c, 0xc5, 0xc9, 0x35, 0x0f, 0x73, - 0xa1, 0xf9, 0xe0, 0x96, 0x8e, 0x34, 0x3a, 0x52, 0x0f, 0xe7, 0x16, 0xd0, 0x67, 0x50, 0xf1, 0x04, - 0x7a, 0x1c, 0x1d, 0xad, 0xb9, 0x3c, 0x6d, 0x4c, 0x02, 0x8b, 0x33, 0x19, 0xa4, 0x43, 0x29, 0x7d, - 0x19, 0x72, 0xc8, 0xe6, 0x30, 0x23, 0x8d, 0x5f, 0x0a, 0x50, 0xcd, 0xec, 0xa2, 0x65, 0x58, 0x6c, - 0x5b, 0xce, 0x51, 0x1f, 0x9b, 0x9d, 0xc1, 0x4e, 0xbf, 0xf7, 0xc2, 0xec, 0xea, 0x77, 0xd0, 0x1c, - 0x54, 0xdb, 0x96, 0xd3, 0x36, 0x77, 0x7a, 0x7d, 0xbd, 0x80, 0xe6, 0xa1, 0xd6, 0xb6, 0x9c, 0xce, - 0x60, 0x7f, 0xbf, 0x67, 0xeb, 0x45, 0xb4, 0x08, 0x5a, 0xdb, 0x72, 0xf0, 0xc0, 0xb2, 0xda, 0xad, - 0xce, 0x9e, 0x5e, 0x42, 0xab, 0xb0, 0xd4, 0xb6, 0x9c, 0xee, 0xbe, 0xe5, 0x74, 0xcd, 0x03, 0x6c, - 0x76, 0x5a, 0xb6, 0xd9, 0xd5, 0x15, 0x04, 0x50, 0x66, 0xec, 0xae, 0xa5, 0xab, 0x92, 0x3e, 0x34, - 0x6d, 0xbd, 0x2c, 0xcd, 0xf5, 0xfa, 0x87, 0x26, 0xb6, 0xf5, 0x8a, 0x3c, 0x1e, 0x1d, 0x74, 0x5b, - 0xb6, 0xa9, 0x57, 0xe5, 0xb1, 0x6b, 0x5a, 0xa6, 0x6d, 0xea, 0xb5, 0x5d, 0xa5, 0x5a, 0xd4, 0x4b, - 0xbb, 0x4a, 0xb5, 0xa4, 0x2b, 0xc6, 0x4f, 0x05, 0x58, 0x3d, 0xa4, 0x09, 0x71, 0x87, 0x7b, 0xe4, - 0x1a, 0xbb, 0xd1, 0x19, 0xc1, 0xe4, 0xe5, 0x98, 0xa4, 0x14, 0xad, 0x43, 0x75, 0x14, 0xa7, 0x01, - 0xc3, 0x8e, 0x03, 0x5c, 0xc3, 0xf9, 0x19, 0x6d, 0x43, 0xed, 0x82, 0x5c, 0x3b, 0x09, 0x93, 0x97, - 0x80, 0xa1, 0x46, 0x5e, 0x90, 0xb9, 0xa5, 0xea, 0x85, 0xa4, 0xa6, 0xf1, 0x2d, 0xbd, 0x1b, 0x5f, - 0xe3, 0x14, 0xd6, 0x6e, 0x3a, 0x95, 0x8e, 0xe2, 0x28, 0x25, 0xc8, 0x02, 0x24, 0x14, 0x1d, 0x3a, - 0xc9, 0x2d, 0xf7, 0x4f, 0x6b, 0xde, 0x7d, 0x6b, 0x01, 0xe0, 0xa5, 0x93, 0x9b, 0x2c, 0xe3, 0x7b, - 0x58, 0x16, 0xdf, 0xb1, 0xdd, 0x93, 0x90, 0xa4, 0xb7, 0x09, 0x7d, 0x0d, 0xca, 0x94, 0x0b, 0xd7, - 0x8b, 0x1b, 0xa5, 0xcd, 0x1a, 0x96, 0xa7, 0xf7, 0x8d, 0xd0, 0x87, 0x95, 0xd9, 0x2f, 0xff, 0x2b, - 0xf1, 0x7d, 0x09, 0x0a, 0x1e, 0x87, 0x04, 0xad, 0x80, 0x3a, 0x74, 0xa9, 0x77, 0x2e, 0xa3, 0x11, - 0x07, 0x16, 0xca, 0x69, 0x10, 0x52, 0x92, 0xf0, 0x14, 0xd6, 0xb0, 0x3c, 0x19, 0x0f, 0xa0, 0xfc, - 0x84, 0x53, 0xe8, 0x13, 0x50, 0x93, 0x31, 0x8b, 0x55, 0x3c, 0x75, 0x7d, 0xda, 0x01, 0x66, 0x18, - 0x8b, 0x6b, 0xe3, 0xe7, 0x22, 0xcc, 0x09, 0x87, 0x0e, 0xe3, 0x71, 0xe2, 0x11, 0x86, 0xe0, 0x05, - 0xb9, 0x4e, 0x47, 0xae, 0x47, 0x32, 0x04, 0xb3, 0x33, 0x73, 0x26, 0x3d, 0x77, 0x13, 0x5f, 0x7e, - 0x55, 0x1c, 0xd0, 0x57, 0xa0, 0x71, 0x24, 0xa9, 0x43, 0xaf, 0x47, 0x84, 0x63, 0xb8, 0xd0, 0x5c, - 0x99, 0x14, 0x15, 0xc7, 0x89, 0xda, 0xd7, 0x23, 0x82, 0x81, 0xe6, 0xf4, 0x6c, 0x25, 0x2a, 0xb7, - 0xa8, 0xc4, 0x49, 0xfe, 0xd4, 0x99, 0xfc, 0x6d, 0xe5, 0x60, 0x94, 0xa5, 0x95, 0xa9, 0x58, 0x05, - 0x1c, 0x19, 0x40, 0xa8, 0x01, 0xe5, 0x38, 0x72, 0x7c, 0x3f, 0xac, 0x57, 0xb8, 0x9b, 0xff, 0x9b, - 0x96, 0x1d, 0x44, 0xdd, 0xae, 0xd5, 0x12, 0x29, 0x51, 0xe3, 0xa8, 0xeb, 0x87, 0xc6, 0x33, 0xa8, - 0xe1, 0xf8, 0xaa, 0x73, 0xce, 0x1d, 0x30, 0xa0, 0x7c, 0x42, 0x4e, 0xe3, 0x84, 0xc8, 0xac, 0x82, - 0xec, 0x7a, 0x38, 0xbe, 0xc2, 0xf2, 0x06, 0x6d, 0x80, 0xea, 0x9e, 0x66, 0x89, 0x99, 0x15, 0x11, - 0x17, 0x86, 0x0b, 0x55, 0x1c, 0x5f, 0xf1, 0x4e, 0x89, 0xee, 0x82, 0x40, 0xc4, 0x89, 0xdc, 0x61, - 0x06, 0x77, 0x8d, 0x73, 0xfa, 0xee, 0x90, 0xa0, 0x87, 0xa0, 0x25, 0xf1, 0x95, 0xe3, 0xf1, 0xcf, - 0x8b, 0xb2, 0xd5, 0x9a, 0xab, 0x33, 0xa9, 0xcc, 0x9c, 0xc3, 0x90, 0x64, 0x64, 0x6a, 0x3c, 0x03, - 0x78, 0x12, 0x90, 0xd0, 0xbf, 0xd5, 0x47, 0x3e, 0x66, 0xf0, 0x91, 0xd0, 0xcf, 0xec, 0xcf, 0x49, - 0x97, 0xb9, 0x05, 0x2c, 0xef, 0x18, 0x10, 0x87, 0x2c, 0xdb, 0x3b, 0x34, 0xf0, 0xff, 0x41, 0x8d, - 0x20, 0x50, 0xce, 0x68, 0xe0, 0xf3, 0xe2, 0xa8, 0x61, 0x4e, 0x1b, 0x8f, 0x41, 0x3d, 0xe6, 0xe6, - 0x1e, 0x82, 0xc6, 0xa5, 0x1c, 0xc6, 0xce, 0x2a, 0x76, 0x26, 0xcc, 0xfc, 0xd3, 0x18, 0xd2, 0x8c, - 0x4c, 0x8d, 0x16, 0xcc, 0xef, 0xc9, 0xcf, 0x72, 0x81, 0xf7, 0xf7, 0xcb, 0xf8, 0xbd, 0x00, 0x95, - 0xdd, 0x78, 0x9c, 0x44, 0x6e, 0x88, 0x16, 0xa0, 0x18, 0xf8, 0x5c, 0xaf, 0x84, 0x8b, 0x81, 0xff, - 0xc6, 0x7e, 0x71, 0x0f, 0x16, 0xc2, 0xd8, 0x73, 0x43, 0x27, 0xef, 0x34, 0x22, 0xaa, 0x79, 0xce, - 0x3d, 0xc8, 0xda, 0xcd, 0x8d, 0xa8, 0x94, 0x5b, 0x46, 0x85, 0x1e, 0xc1, 0xdc, 0xc8, 0x4d, 0x68, - 0xe0, 0x05, 0x23, 0x97, 0xcd, 0x6a, 0x95, 0x2b, 0xfe, 0x7f, 0x5a, 0x71, 0x26, 0x6a, 0x3c, 0x23, - 0x6e, 0xfc, 0x59, 0x84, 0xf2, 0xb1, 0x48, 0xfc, 0x16, 0x28, 0xfc, 0x45, 0x8a, 0x21, 0xbb, 0x36, - 0x6d, 0x41, 0x48, 0xf0, 0x37, 0xc9, 0x65, 0xd0, 0x07, 0x50, 0xa3, 0xc1, 0x90, 0xa4, 0xd4, 0x1d, - 0x8e, 0x38, 0x44, 0x25, 0x3c, 0x61, 0xbc, 0x2e, 0x7d, 0x6c, 0x92, 0xb2, 0x77, 0xa4, 0x70, 0x16, - 0x23, 0xd1, 0xe7, 0x50, 0x63, 0xe5, 0xca, 0x07, 0x7f, 0x5d, 0xe5, 0xf5, 0xbf, 0x72, 0xa3, 0x58, - 0xf9, 0x67, 0x71, 0x35, 0xc9, 0x1e, 0xc0, 0xd7, 0xa0, 0xf1, 0x02, 0x93, 0x4a, 0xe2, 0x01, 0xaf, - 0xcd, 0x3e, 0xe0, 0xac, 0x90, 0x31, 0x9c, 0x4e, 0x8a, 0xfa, 0x3e, 0xa8, 0x97, 0xdc, 0xa5, 0x8a, - 0x5c, 0x40, 0xa6, 0x83, 0xe3, 0x98, 0x8a, 0x7b, 0xd6, 0xdd, 0xbf, 0x13, 0x09, 0xae, 0x57, 0x5f, - 0xed, 0xee, 0x32, 0xf7, 0x38, 0x93, 0x41, 0x1f, 0xc1, 0x9c, 0x37, 0x4e, 0x12, 0xbe, 0xe0, 0x04, - 0x43, 0x52, 0x5f, 0xe1, 0x50, 0x68, 0x92, 0x67, 0x07, 0x43, 0x62, 0xfc, 0x58, 0x84, 0x85, 0x63, - 0x31, 0x02, 0xb2, 0xb1, 0xf3, 0x18, 0x96, 0xc9, 0xe9, 0x29, 0xf1, 0x68, 0x70, 0x49, 0x1c, 0xcf, - 0x0d, 0x43, 0x92, 0x38, 0xb2, 0x96, 0xb4, 0xe6, 0x62, 0x43, 0xac, 0x82, 0x1d, 0xce, 0xef, 0x75, - 0xf1, 0x52, 0x2e, 0x2b, 0x59, 0x3e, 0x32, 0x61, 0x39, 0x18, 0x0e, 0x89, 0x1f, 0xb8, 0x74, 0xda, - 0x80, 0x68, 0x22, 0xab, 0xf2, 0x45, 0x1e, 0xdb, 0x3b, 0x2e, 0x25, 0x13, 0x33, 0xb9, 0x46, 0x6e, - 0xe6, 0x1e, 0x2b, 0xd9, 0xe4, 0x2c, 0x9f, 0x64, 0xf3, 0x52, 0xd3, 0xe6, 0x4c, 0x2c, 0x2f, 0x67, - 0xa6, 0xa4, 0x72, 0x63, 0x4a, 0x4e, 0xba, 0xa9, 0xfa, 0xae, 0x6e, 0x6a, 0x3c, 0x82, 0xc5, 0x1c, - 0x08, 0x39, 0x05, 0xb7, 0xa0, 0xcc, 0x53, 0x99, 0x3d, 0x63, 0xf4, 0x6a, 0xd5, 0x61, 0x29, 0x61, - 0xfc, 0x50, 0x04, 0x94, 0xe9, 0xc7, 0x57, 0xe9, 0x7f, 0x14, 0xcc, 0x15, 0x50, 0x39, 0x5f, 0x22, - 0x29, 0x0e, 0x0c, 0x87, 0xd0, 0x4d, 0xe9, 0xe8, 0x22, 0x87, 0x51, 0x28, 0x3f, 0x63, 0xbf, 0x98, - 0xa4, 0xe3, 0x90, 0x62, 0x29, 0x61, 0xfc, 0x5a, 0x80, 0xe5, 0x19, 0x1c, 0x24, 0x96, 0x93, 0xce, - 0x5c, 0x78, 0x73, 0x67, 0x46, 0x9b, 0x50, 0x1d, 0x5d, 0xbc, 0xa5, 0x83, 0xe7, 0xb7, 0xaf, 0x7d, - 0xc5, 0x1f, 0x82, 0x92, 0xc4, 0x57, 0x59, 0x7b, 0x9a, 0x1e, 0x57, 0x9c, 0xcf, 0x66, 0xde, 0x4c, - 0x1c, 0x33, 0x33, 0x4f, 0xdc, 0x6c, 0x7d, 0x03, 0xda, 0xd4, 0xe8, 0x64, 0xdb, 0x6d, 0x6f, 0xa7, - 0x3f, 0xc0, 0xa6, 0x7e, 0x07, 0x55, 0x41, 0x39, 0xb4, 0x07, 0x07, 0x7a, 0x81, 0x51, 0xe6, 0xb7, - 0x66, 0x47, 0x6c, 0xcc, 0x8c, 0x72, 0xa4, 0x50, 0x69, 0xeb, 0x8f, 0x02, 0xc0, 0xa4, 0x21, 0x21, - 0x0d, 0x2a, 0x47, 0xfd, 0xbd, 0xfe, 0xe0, 0x79, 0x5f, 0x18, 0xd8, 0xb1, 0x7b, 0x5d, 0xbd, 0x80, - 0x6a, 0xa0, 0x8a, 0x15, 0xbc, 0xc8, 0xbe, 0x20, 0xf7, 0xef, 0x12, 0x5b, 0xce, 0xf3, 0xe5, 0x5b, - 0x41, 0x15, 0x28, 0xe5, 0x2b, 0xb6, 0xdc, 0xa9, 0xcb, 0xcc, 0x20, 0x36, 0x0f, 0xac, 0x56, 0xc7, - 0xd4, 0x2b, 0xec, 0x22, 0xdf, 0xae, 0x01, 0xca, 0xd9, 0x6a, 0xcd, 0x34, 0xd9, 0x42, 0x0e, 0xec, - 0x3b, 0x03, 0xfb, 0xa9, 0x89, 0x75, 0x8d, 0xf1, 0xf0, 0xe0, 0xb9, 0x3e, 0xc7, 0x78, 0x4f, 0x7a, - 0xa6, 0xd5, 0xd5, 0xe7, 0xd9, 0x46, 0xfe, 0xd4, 0x6c, 0x61, 0xbb, 0x6d, 0xb6, 0x6c, 0x7d, 0x81, - 0xdd, 0x1c, 0x73, 0x07, 0x17, 0xd9, 0x67, 0x76, 0x07, 0x47, 0xb8, 0xdf, 0xb2, 0x74, 0xbd, 0xfd, - 0xe9, 0x8b, 0xfb, 0x97, 0x01, 0x25, 0x69, 0xda, 0x08, 0xe2, 0x6d, 0x41, 0x6d, 0x9f, 0xc5, 0xdb, - 0x97, 0x74, 0x9b, 0xff, 0xe9, 0xdb, 0x9e, 0xbc, 0x8a, 0x93, 0x32, 0xe7, 0x7c, 0xf1, 0x77, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x0c, 0x14, 0xe6, 0xb2, 0x50, 0x0e, 0x00, 0x00, +func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_db2d20dd0016de21) } + +var fileDescriptor_binlogdata_db2d20dd0016de21 = []byte{ + // 1558 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0xcb, 0x72, 0xdb, 0xca, + 0x11, 0x35, 0x09, 0xf0, 0xd5, 0x90, 0x28, 0x68, 0xf4, 0x08, 0xa3, 0x8a, 0x53, 0x0a, 0x2a, 0x8e, + 0x14, 0x55, 0x85, 0x72, 0x98, 0xc4, 0x59, 0x39, 0x0e, 0x1f, 0xb0, 0x4c, 0x09, 0x22, 0xe5, 0x21, + 0x24, 0xa7, 0xbc, 0x41, 0x41, 0xc4, 0x48, 0x42, 0x04, 0x02, 0x34, 0x30, 0xa4, 0xa2, 0x0f, 0x48, + 0xe5, 0x03, 0xb2, 0xcd, 0x0f, 0x64, 0x9f, 0x6d, 0xb6, 0xd9, 0xe7, 0x0b, 0xb2, 0xca, 0x7f, 0xdc, + 0x9a, 0x07, 0x40, 0x42, 0xf6, 0xb5, 0xe5, 0x5b, 0x75, 0x17, 0x77, 0xc3, 0xea, 0xe9, 0xe9, 0xe7, + 0x41, 0x4f, 0x77, 0x13, 0xf4, 0x4b, 0x3f, 0x0c, 0xa2, 0x6b, 0xcf, 0xa5, 0x6e, 0x73, 0x1a, 0x47, + 0x34, 0x42, 0xb0, 0xe0, 0xec, 0x68, 0x73, 0x1a, 0x4f, 0xc7, 0xe2, 0x62, 0x47, 0xfb, 0x30, 0x23, + 0xf1, 0xbd, 0x3c, 0xd4, 0x69, 0x34, 0x8d, 0x16, 0x5a, 0xc6, 0x29, 0x54, 0xba, 0x37, 0x6e, 0x9c, + 0x10, 0x8a, 0xb6, 0xa1, 0x3c, 0x0e, 0x7c, 0x12, 0xd2, 0x46, 0x61, 0xb7, 0xb0, 0x5f, 0xc2, 0xf2, + 0x84, 0x10, 0xa8, 0xe3, 0x28, 0x0c, 0x1b, 0x45, 0xce, 0xe5, 0x34, 0x93, 0x4d, 0x48, 0x3c, 0x27, + 0x71, 0x43, 0x11, 0xb2, 0xe2, 0x64, 0xfc, 0x5f, 0x81, 0xf5, 0x0e, 0x8f, 0xc3, 0x8e, 0xdd, 0x30, + 0x71, 0xc7, 0xd4, 0x8f, 0x42, 0x74, 0x04, 0x90, 0x50, 0x97, 0x92, 0x09, 0x09, 0x69, 0xd2, 0x28, + 0xec, 0x2a, 0xfb, 0x5a, 0x6b, 0xaf, 0xb9, 0x94, 0xc1, 0x47, 0x2a, 0xcd, 0x51, 0x2a, 0x8f, 0x97, + 0x54, 0x51, 0x0b, 0x34, 0x32, 0x27, 0x21, 0x75, 0x68, 0x74, 0x4b, 0xc2, 0x86, 0xba, 0x5b, 0xd8, + 0xd7, 0x5a, 0xeb, 0x4d, 0x91, 0xa0, 0xc9, 0x6e, 0x6c, 0x76, 0x81, 0x81, 0x64, 0xf4, 0xce, 0x7f, + 0x8a, 0x50, 0xcb, 0xac, 0x21, 0x0b, 0xaa, 0x63, 0x97, 0x92, 0xeb, 0x28, 0xbe, 0xe7, 0x69, 0xd6, + 0x5b, 0xcf, 0x1f, 0x19, 0x48, 0xb3, 0x2b, 0xf5, 0x70, 0x66, 0x01, 0xfd, 0x0a, 0x2a, 0x63, 0x81, + 0x1e, 0x47, 0x47, 0x6b, 0x6d, 0x2c, 0x1b, 0x93, 0xc0, 0xe2, 0x54, 0x06, 0xe9, 0xa0, 0x24, 0x1f, + 0x02, 0x0e, 0xd9, 0x0a, 0x66, 0xa4, 0xf1, 0xcf, 0x02, 0x54, 0x53, 0xbb, 0x68, 0x03, 0xd6, 0x3a, + 0x96, 0x73, 0x3e, 0xc0, 0x66, 0x77, 0x78, 0x34, 0xe8, 0xbf, 0x37, 0x7b, 0xfa, 0x13, 0xb4, 0x02, + 0xd5, 0x8e, 0xe5, 0x74, 0xcc, 0xa3, 0xfe, 0x40, 0x2f, 0xa0, 0x55, 0xa8, 0x75, 0x2c, 0xa7, 0x3b, + 0x3c, 0x3d, 0xed, 0xdb, 0x7a, 0x11, 0xad, 0x81, 0xd6, 0xb1, 0x1c, 0x3c, 0xb4, 0xac, 0x4e, 0xbb, + 0x7b, 0xa2, 0x2b, 0x68, 0x0b, 0xd6, 0x3b, 0x96, 0xd3, 0x3b, 0xb5, 0x9c, 0x9e, 0x79, 0x86, 0xcd, + 0x6e, 0xdb, 0x36, 0x7b, 0xba, 0x8a, 0x00, 0xca, 0x8c, 0xdd, 0xb3, 0xf4, 0x92, 0xa4, 0x47, 0xa6, + 0xad, 0x97, 0xa5, 0xb9, 0xfe, 0x60, 0x64, 0x62, 0x5b, 0xaf, 0xc8, 0xe3, 0xf9, 0x59, 0xaf, 0x6d, + 0x9b, 0x7a, 0x55, 0x1e, 0x7b, 0xa6, 0x65, 0xda, 0xa6, 0x5e, 0x3b, 0x56, 0xab, 0x45, 0x5d, 0x39, + 0x56, 0xab, 0x8a, 0xae, 0x1a, 0x7f, 0x2f, 0xc0, 0xd6, 0x88, 0xc6, 0xc4, 0x9d, 0x9c, 0x90, 0x7b, + 0xec, 0x86, 0xd7, 0x04, 0x93, 0x0f, 0x33, 0x92, 0x50, 0xb4, 0x03, 0xd5, 0x69, 0x94, 0xf8, 0x0c, + 0x3b, 0x0e, 0x70, 0x0d, 0x67, 0x67, 0x74, 0x08, 0xb5, 0x5b, 0x72, 0xef, 0xc4, 0x4c, 0x5e, 0x02, + 0x86, 0x9a, 0x59, 0x41, 0x66, 0x96, 0xaa, 0xb7, 0x92, 0x5a, 0xc6, 0x57, 0xf9, 0x32, 0xbe, 0xc6, + 0x15, 0x6c, 0x3f, 0x0c, 0x2a, 0x99, 0x46, 0x61, 0x42, 0x90, 0x05, 0x48, 0x28, 0x3a, 0x74, 0xf1, + 0x6d, 0x79, 0x7c, 0x5a, 0xeb, 0xe9, 0x67, 0x0b, 0x00, 0xaf, 0x5f, 0x3e, 0x64, 0x19, 0x7f, 0x81, + 0x0d, 0xe1, 0xc7, 0x76, 0x2f, 0x03, 0x92, 0x3c, 0x26, 0xf5, 0x6d, 0x28, 0x53, 0x2e, 0xdc, 0x28, + 0xee, 0x2a, 0xfb, 0x35, 0x2c, 0x4f, 0x5f, 0x9b, 0xa1, 0x07, 0x9b, 0x79, 0xcf, 0xdf, 0x4b, 0x7e, + 0xbf, 0x05, 0x15, 0xcf, 0x02, 0x82, 0x36, 0xa1, 0x34, 0x71, 0xe9, 0xf8, 0x46, 0x66, 0x23, 0x0e, + 0x2c, 0x95, 0x2b, 0x3f, 0xa0, 0x24, 0xe6, 0x9f, 0xb0, 0x86, 0xe5, 0xc9, 0x78, 0x0e, 0xe5, 0xd7, + 0x9c, 0x42, 0xbf, 0x80, 0x52, 0x3c, 0x63, 0xb9, 0x8a, 0xa7, 0xae, 0x2f, 0x07, 0xc0, 0x0c, 0x63, + 0x71, 0x6d, 0xfc, 0xa3, 0x08, 0x2b, 0x22, 0xa0, 0x51, 0x34, 0x8b, 0xc7, 0x84, 0x21, 0x78, 0x4b, + 0xee, 0x93, 0xa9, 0x3b, 0x26, 0x29, 0x82, 0xe9, 0x99, 0x05, 0x93, 0xdc, 0xb8, 0xb1, 0x27, 0xbd, + 0x8a, 0x03, 0xfa, 0x1d, 0x68, 0x1c, 0x49, 0xea, 0xd0, 0xfb, 0x29, 0xe1, 0x18, 0xd6, 0x5b, 0x9b, + 0x8b, 0xa2, 0xe2, 0x38, 0x51, 0xfb, 0x7e, 0x4a, 0x30, 0xd0, 0x8c, 0xce, 0x57, 0xa2, 0xfa, 0x88, + 0x4a, 0x5c, 0x7c, 0xbf, 0x52, 0xee, 0xfb, 0x1d, 0x64, 0x60, 0x94, 0xa5, 0x95, 0xa5, 0x5c, 0x05, + 0x1c, 0x29, 0x40, 0xa8, 0x09, 0xe5, 0x28, 0x74, 0x3c, 0x2f, 0x68, 0x54, 0x78, 0x98, 0x3f, 0x5a, + 0x96, 0x1d, 0x86, 0xbd, 0x9e, 0xd5, 0x16, 0x9f, 0xa4, 0x14, 0x85, 0x3d, 0x2f, 0x30, 0xde, 0x42, + 0x0d, 0x47, 0x77, 0xdd, 0x1b, 0x1e, 0x80, 0x01, 0xe5, 0x4b, 0x72, 0x15, 0xc5, 0x44, 0x7e, 0x55, + 0x90, 0x5d, 0x0f, 0x47, 0x77, 0x58, 0xde, 0xa0, 0x5d, 0x28, 0xb9, 0x57, 0xe9, 0x87, 0xc9, 0x8b, + 0x88, 0x0b, 0xc3, 0x85, 0x2a, 0x8e, 0xee, 0x78, 0xa7, 0x44, 0x4f, 0x41, 0x20, 0xe2, 0x84, 0xee, + 0x24, 0x85, 0xbb, 0xc6, 0x39, 0x03, 0x77, 0x42, 0xd0, 0x0b, 0xd0, 0xe2, 0xe8, 0xce, 0x19, 0x73, + 0xf7, 0xa2, 0x6c, 0xb5, 0xd6, 0x56, 0xee, 0x53, 0xa6, 0xc1, 0x61, 0x88, 0x53, 0x32, 0x31, 0xde, + 0x02, 0xbc, 0xf6, 0x49, 0xe0, 0x3d, 0xca, 0xc9, 0xcf, 0x19, 0x7c, 0x24, 0xf0, 0x52, 0xfb, 0x2b, + 0x32, 0x64, 0x6e, 0x01, 0xcb, 0x3b, 0x06, 0xc4, 0x88, 0x7d, 0xed, 0x23, 0xea, 0x7b, 0xdf, 0xa1, + 0x46, 0x10, 0xa8, 0xd7, 0xd4, 0xf7, 0x78, 0x71, 0xd4, 0x30, 0xa7, 0x8d, 0x57, 0x50, 0xba, 0xe0, + 0xe6, 0x5e, 0x80, 0xc6, 0xa5, 0x1c, 0xc6, 0x4e, 0x2b, 0x36, 0x97, 0x66, 0xe6, 0x1a, 0x43, 0x92, + 0x92, 0x89, 0xd1, 0x86, 0xd5, 0x13, 0xe9, 0x96, 0x0b, 0x7c, 0x7d, 0x5c, 0xc6, 0xbf, 0x8a, 0x50, + 0x39, 0x8e, 0x66, 0x71, 0xe8, 0x06, 0xa8, 0x0e, 0x45, 0xdf, 0xe3, 0x7a, 0x0a, 0x2e, 0xfa, 0x1e, + 0xfa, 0x23, 0xd4, 0x27, 0xfe, 0x75, 0xec, 0xb2, 0x7a, 0x10, 0xa5, 0x5d, 0xe4, 0x35, 0xf3, 0xe3, + 0xe5, 0xc8, 0x4e, 0x53, 0x09, 0x5e, 0xdf, 0xab, 0x93, 0xe5, 0xe3, 0x52, 0xc5, 0x2a, 0xb9, 0x8a, + 0x7d, 0x06, 0xf5, 0x20, 0x1a, 0xbb, 0x81, 0x93, 0xf5, 0x2a, 0x95, 0x07, 0xb5, 0xca, 0xb9, 0x67, + 0x69, 0xc3, 0x7a, 0x80, 0x4b, 0xe9, 0x91, 0xb8, 0xa0, 0x97, 0xb0, 0x32, 0x75, 0x63, 0xea, 0x8f, + 0xfd, 0xa9, 0xcb, 0xa6, 0x7d, 0x99, 0x2b, 0xe6, 0xc2, 0xce, 0xe1, 0x86, 0x73, 0xe2, 0xe8, 0x67, + 0xb0, 0x12, 0x93, 0x39, 0x89, 0x13, 0xe2, 0x39, 0xcc, 0x6f, 0x65, 0x57, 0xd9, 0x57, 0xb0, 0x96, + 0xf2, 0xfa, 0x5e, 0x62, 0xfc, 0xaf, 0x08, 0xe5, 0x0b, 0x51, 0x5d, 0x07, 0xa0, 0x72, 0x6c, 0xc4, + 0x24, 0xdf, 0x5e, 0x76, 0x22, 0x24, 0x38, 0x30, 0x5c, 0x06, 0xfd, 0x04, 0x6a, 0xd4, 0x9f, 0x90, + 0x84, 0xba, 0x93, 0x29, 0x07, 0x53, 0xc1, 0x0b, 0xc6, 0xa7, 0x6a, 0x84, 0x8d, 0x6b, 0xf6, 0x58, + 0x05, 0x3c, 0x8c, 0x44, 0xbf, 0x86, 0x1a, 0x7b, 0x13, 0x7c, 0xbb, 0x68, 0x94, 0xf8, 0x23, 0xdb, + 0x7c, 0xf0, 0x22, 0xb8, 0x5b, 0x5c, 0x8d, 0xd3, 0x57, 0xf6, 0x7b, 0xd0, 0x78, 0x15, 0x4b, 0x25, + 0xd1, 0x25, 0xb6, 0xf3, 0x5d, 0x22, 0x7d, 0x2d, 0x18, 0xae, 0x16, 0x2f, 0x67, 0x0f, 0x4a, 0x73, + 0x1e, 0x52, 0x45, 0x6e, 0x39, 0xcb, 0xc9, 0x71, 0xd8, 0xc5, 0x3d, 0x1b, 0x21, 0x7f, 0x16, 0x55, + 0xd4, 0xa8, 0x7e, 0x3c, 0x42, 0x64, 0x81, 0xe1, 0x54, 0x86, 0x21, 0x3c, 0x9e, 0xc5, 0x31, 0xdf, + 0xa2, 0xfc, 0x09, 0x69, 0x6c, 0x72, 0x28, 0x34, 0xc9, 0xb3, 0xfd, 0x09, 0x31, 0xfe, 0x56, 0x84, + 0xfa, 0x85, 0x98, 0x33, 0xe9, 0x6c, 0x7b, 0x05, 0x1b, 0xe4, 0xea, 0x8a, 0x8c, 0xa9, 0x3f, 0x27, + 0xce, 0xd8, 0x0d, 0x02, 0x12, 0x3b, 0xb2, 0x60, 0xb5, 0xd6, 0x5a, 0x53, 0xec, 0x9b, 0x5d, 0xce, + 0xef, 0xf7, 0xf0, 0x7a, 0x26, 0x2b, 0x59, 0x1e, 0x32, 0x61, 0xc3, 0x9f, 0x4c, 0x88, 0xe7, 0xbb, + 0x74, 0xd9, 0x80, 0xe8, 0x54, 0x5b, 0xf2, 0xd9, 0x5f, 0xd8, 0x47, 0x2e, 0x25, 0x0b, 0x33, 0x99, + 0x46, 0x66, 0xe6, 0x19, 0xab, 0xea, 0xf8, 0x3a, 0x1b, 0x97, 0xab, 0x52, 0xd3, 0xe6, 0x4c, 0x2c, + 0x2f, 0x73, 0xa3, 0x58, 0x7d, 0x30, 0x8a, 0x17, 0x2d, 0xbb, 0xf4, 0xa5, 0x96, 0x6d, 0xbc, 0x84, + 0xb5, 0x0c, 0x08, 0x39, 0x6a, 0x0f, 0xa0, 0xcc, 0x3f, 0x65, 0xda, 0x2b, 0xd0, 0xc7, 0x55, 0x87, + 0xa5, 0x84, 0xf1, 0xd7, 0x22, 0xa0, 0x54, 0x3f, 0xba, 0x4b, 0x7e, 0xa0, 0x60, 0x6e, 0x42, 0x89, + 0xf3, 0x25, 0x92, 0xe2, 0xc0, 0x70, 0x08, 0xdc, 0x84, 0x4e, 0x6f, 0x33, 0x18, 0x85, 0xf2, 0x5b, + 0xf6, 0x8b, 0x49, 0x32, 0x0b, 0x28, 0x96, 0x12, 0xc6, 0xbf, 0x0b, 0xb0, 0x91, 0xc3, 0x41, 0x62, + 0xb9, 0x68, 0xff, 0x85, 0x6f, 0x6f, 0xff, 0x68, 0x1f, 0xaa, 0xd3, 0xdb, 0xcf, 0x8c, 0x89, 0xec, + 0xf6, 0x93, 0xaf, 0xf8, 0xa7, 0xa0, 0xc6, 0xd1, 0x5d, 0xd2, 0x50, 0xb9, 0xe6, 0xf2, 0x4c, 0xe4, + 0x7c, 0x36, 0x58, 0x73, 0x79, 0xe4, 0x06, 0xab, 0xb8, 0x39, 0xf8, 0x03, 0x68, 0x4b, 0xf3, 0x99, + 0xad, 0xd0, 0xfd, 0xa3, 0xc1, 0x10, 0x9b, 0xfa, 0x13, 0x54, 0x05, 0x75, 0x64, 0x0f, 0xcf, 0xf4, + 0x02, 0xa3, 0xcc, 0x3f, 0x99, 0x5d, 0xb1, 0x96, 0x33, 0xca, 0x91, 0x42, 0xca, 0xc1, 0x7f, 0x0b, + 0x00, 0x8b, 0x86, 0x84, 0x34, 0xa8, 0x9c, 0x0f, 0x4e, 0x06, 0xc3, 0x77, 0x03, 0x61, 0xe0, 0xc8, + 0xee, 0xf7, 0xf4, 0x02, 0xaa, 0x41, 0x49, 0xec, 0xf9, 0x45, 0xe6, 0x41, 0x2e, 0xf9, 0x0a, 0xfb, + 0x07, 0x90, 0x6d, 0xf8, 0x2a, 0xaa, 0x80, 0x92, 0xed, 0xf1, 0x72, 0x71, 0x2f, 0x33, 0x83, 0xd8, + 0x3c, 0xb3, 0xda, 0x5d, 0x53, 0xaf, 0xb0, 0x8b, 0x6c, 0x85, 0x07, 0x28, 0xa7, 0xfb, 0x3b, 0xd3, + 0x64, 0x5b, 0x3f, 0x30, 0x3f, 0x43, 0xfb, 0x8d, 0x89, 0x75, 0x8d, 0xf1, 0xf0, 0xf0, 0x9d, 0xbe, + 0xc2, 0x78, 0xaf, 0xfb, 0xa6, 0xd5, 0xd3, 0x57, 0xd9, 0xda, 0xff, 0xc6, 0x6c, 0x63, 0xbb, 0x63, + 0xb6, 0x6d, 0xbd, 0xce, 0x6e, 0x2e, 0x78, 0x80, 0x6b, 0xcc, 0xcd, 0xf1, 0xf0, 0x1c, 0x0f, 0xda, + 0x96, 0xae, 0x1f, 0xec, 0xc1, 0x6a, 0x6e, 0xfe, 0x30, 0x5f, 0x76, 0xbb, 0x63, 0x99, 0x23, 0xfd, + 0x09, 0xa3, 0x47, 0x6f, 0xda, 0xb8, 0x37, 0xd2, 0x0b, 0x9d, 0x5f, 0xbe, 0xdf, 0x9b, 0xfb, 0x94, + 0x24, 0x49, 0xd3, 0x8f, 0x0e, 0x05, 0x75, 0x78, 0x1d, 0x1d, 0xce, 0xe9, 0x21, 0xff, 0x0b, 0x7a, + 0xb8, 0x78, 0x3e, 0x97, 0x65, 0xce, 0xf9, 0xcd, 0x37, 0x01, 0x00, 0x00, 0xff, 0xff, 0x2e, 0xb4, + 0x72, 0xde, 0xde, 0x0e, 0x00, 0x00, } diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go index 58759a1bef5..7e4c7a71c2e 100644 --- a/go/vt/wrangler/migrater.go +++ b/go/vt/wrangler/migrater.go @@ -42,15 +42,6 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" ) -// MigrationType specifies the type of migration. -type MigrationType int - -// The following constants define the migration type. -const ( - MigrateTables = MigrationType(iota) - MigrateShards -) - type migrateDirection int const ( @@ -66,7 +57,7 @@ const ( ) type migrater struct { - migrationType MigrationType + migrationType binlogdatapb.MigrationType wr *Wrangler id int64 sources map[topo.KeyspaceShard]*miSource @@ -91,7 +82,7 @@ type miSource struct { } // MigrateReads is a generic way of migrating read traffic for a resharding workflow. -func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType MigrationType, streams map[topo.KeyspaceShard][]uint32, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { +func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType binlogdatapb.MigrationType, streams map[topo.KeyspaceShard][]uint32, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { if servedType != topodatapb.TabletType_REPLICA && servedType != topodatapb.TabletType_RDONLY { return fmt.Errorf("tablet type must be REPLICA or RDONLY: %v", servedType) } @@ -109,14 +100,14 @@ func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType MigrationTyp } defer unlock(&err) - if mi.migrationType == MigrateTables { + if mi.migrationType == binlogdatapb.MigrationType_TABLES { return mi.migrateTableReads(ctx, cells, servedType, direction) } return mi.migrateShardReads(ctx, cells, servedType, direction) } // MigrateWrites is a generic way of migrating write traffic for a resharding workflow. -func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType MigrationType, streams map[topo.KeyspaceShard][]uint32, filteredReplicationWaitTime time.Duration) error { +func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType binlogdatapb.MigrationType, streams map[topo.KeyspaceShard][]uint32, filteredReplicationWaitTime time.Duration) error { mi, err := wr.buildMigrater(ctx, migrationType, streams) if err != nil { return err @@ -177,7 +168,7 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType MigrationTy return nil } -func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType MigrationType, streams map[topo.KeyspaceShard][]uint32) (*migrater, error) { +func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType binlogdatapb.MigrationType, streams map[topo.KeyspaceShard][]uint32) (*migrater, error) { mi := &migrater{ migrationType: migrationType, wr: wr, @@ -185,6 +176,7 @@ func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType MigrationTy targets: make(map[topo.KeyspaceShard]*miTarget), sources: make(map[topo.KeyspaceShard]*miSource), } + mi.wr.Logger().Infof("Migration ID for streams %v: %d", streams, mi.id) for targetks, uids := range streams { targetShard, err := mi.wr.ts.GetShard(ctx, targetks.Keyspace, targetks.Shard) if err != nil { @@ -293,7 +285,7 @@ func (mi *migrater) validate(ctx context.Context) error { uniqueSources[sourceks] = uid } } - if mi.migrationType == MigrateTables { + if mi.migrationType == binlogdatapb.MigrationType_TABLES { // All shards must be present. if err := mi.compareShards(ctx, mi.sourceKeyspace, mi.sourceShards()); err != nil { return err @@ -307,7 +299,7 @@ func (mi *migrater) validate(ctx context.Context) error { return fmt.Errorf("cannot migrate streams with wild card table names: %v", table) } } - } else { // MigrateShards + } else { // binlogdatapb.MigrationType_SHARDS // Source and target keyspace must match if mi.sourceKeyspace != mi.targetKeyspace { return fmt.Errorf("source and target keyspace must match: %v vs %v", mi.sourceKeyspace, mi.targetKeyspace) @@ -323,7 +315,7 @@ func (mi *migrater) validate(ctx context.Context) error { } func (mi *migrater) validateForWrite(ctx context.Context) error { - if mi.migrationType == MigrateTables { + if mi.migrationType == binlogdatapb.MigrationType_TABLES { return mi.validateTableForWrite(ctx) } return mi.validateShardForWrite(ctx) @@ -456,7 +448,7 @@ func (mi *migrater) checkJournals(ctx context.Context) (journalsExist bool, err func (mi *migrater) stopSourceWrites(ctx context.Context) error { var err error - if mi.migrationType == MigrateTables { + if mi.migrationType == binlogdatapb.MigrationType_TABLES { err = mi.changeTableSourceWrites(ctx, disallowWrites) } else { err = mi.changeShardsAccess(ctx, mi.sourceKeyspace, mi.sourceShards(), disallowWrites) @@ -511,7 +503,7 @@ func (mi *migrater) waitForCatchup(ctx context.Context, filteredReplicationWaitT func (mi *migrater) cancelMigration(ctx context.Context) { var err error - if mi.migrationType == MigrateTables { + if mi.migrationType == binlogdatapb.MigrationType_TABLES { err = mi.changeTableSourceWrites(ctx, allowWrites) } else { err = mi.changeShardsAccess(ctx, mi.sourceKeyspace, mi.sourceShards(), allowWrites) @@ -554,6 +546,7 @@ func (mi *migrater) createJournals(ctx context.Context) error { } journal := &binlogdatapb.Journal{ Id: mi.id, + MigrationType: mi.migrationType, Tables: mi.tables, LocalPosition: source.position, } @@ -644,7 +637,7 @@ func (mi *migrater) createReverseReplication(ctx context.Context) error { } func (mi *migrater) allowTargetWrites(ctx context.Context) error { - if mi.migrationType == MigrateTables { + if mi.migrationType == binlogdatapb.MigrationType_TABLES { return mi.allowTableTargetWrites(ctx) } return mi.changeShardsAccess(ctx, mi.targetKeyspace, mi.targetShards(), allowWrites) @@ -662,7 +655,7 @@ func (mi *migrater) allowTableTargetWrites(ctx context.Context) error { } func (mi *migrater) changeRouting(ctx context.Context) error { - if mi.migrationType == MigrateTables { + if mi.migrationType == binlogdatapb.MigrationType_TABLES { return mi.changeTableRouting(ctx) } return mi.changeShardRouting(ctx) diff --git a/go/vt/wrangler/migrater_test.go b/go/vt/wrangler/migrater_test.go index 891d58bfb1a..f7edc690af1 100644 --- a/go/vt/wrangler/migrater_test.go +++ b/go/vt/wrangler/migrater_test.go @@ -40,7 +40,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell RDONLY migration. - err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -69,7 +69,7 @@ func TestTableMigrate(t *testing.T) { // The global routing already contains redirections for rdonly. // So, adding routes for replica and deploying to cell2 will also cause // cell2 to migrat rdonly. This is a quirk that can be fixed later if necessary. - err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -107,7 +107,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell backward REPLICA migration. - err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) if err != nil { t.Fatal(err) } @@ -127,7 +127,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Migrate all REPLICA. - err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -153,7 +153,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // All cells RDONLY backward migration. - err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionBackward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionBackward) if err != nil { t.Fatal(err) } @@ -173,7 +173,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate master with MigrateReads. - err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_MASTER, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_MASTER, directionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) @@ -182,7 +182,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. - err = tme.wr.MigrateWrites(ctx, MigrateTables, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, 1*time.Second) want = "missing tablet type specific routing, read-only traffic must be migrated before migrating writes" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites err: %v, want %v", err, want) @@ -193,7 +193,7 @@ func TestTableMigrate(t *testing.T) { // Test MigrateWrites cancelation on failure. // Migrate all the reads first. - err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -248,7 +248,7 @@ func TestTableMigrate(t *testing.T) { tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, MigrateTables, tme.streams, 0*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, 0*time.Second) want = "DeadlineExceeded" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) @@ -298,7 +298,7 @@ func TestTableMigrate(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, MigrateTables, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, 1*time.Second) if err != nil { t.Fatal(err) } @@ -332,7 +332,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell RDONLY migration. - err := tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -348,7 +348,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Other cell REPLICA migration. - err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -364,7 +364,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell backward REPLICA migration. - err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) if err != nil { t.Fatal(err) } @@ -383,7 +383,7 @@ func TestShardMigrate(t *testing.T) { // This is an extra step that does not exist in the tables test. // The per-cell migration mechanism is different for tables. So, this // extra step is needed to bring things in sync. - err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -395,7 +395,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Migrate all REPLICA. - err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -407,7 +407,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // All cells RDONLY backward migration. - err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_RDONLY, directionBackward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_RDONLY, directionBackward) if err != nil { t.Fatal(err) } @@ -419,7 +419,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate master with MigrateReads. - err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_MASTER, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_MASTER, directionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) @@ -428,7 +428,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. - err = tme.wr.MigrateWrites(ctx, MigrateShards, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, 1*time.Second) want = "cannot migrate MASTER away" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites err: %v, want %v", err, want) @@ -439,7 +439,7 @@ func TestShardMigrate(t *testing.T) { // Test MigrateWrites cancelation on failure. // Migrate all the reads first. - err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -484,7 +484,7 @@ func TestShardMigrate(t *testing.T) { tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, MigrateShards, tme.streams, 0*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, 0*time.Second) want = "DeadlineExceeded" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) @@ -502,9 +502,9 @@ func TestShardMigrate(t *testing.T) { // Test successful MigrateWrites. // Create journals. - journal1 := "insert into _vt.resharding_journal.*8372031610433464572.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" + journal1 := "insert into _vt.resharding_journal.*8372031610433464572.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" tme.dbSource1Client.addQueryRE(journal1, &sqltypes.Result{}, nil) - journal2 := "insert into _vt.resharding_journal.*8372031610433464572.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" + journal2 := "insert into _vt.resharding_journal.*8372031610433464572.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) // Create backward replicaions. @@ -520,7 +520,7 @@ func TestShardMigrate(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, MigrateShards, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, 1*time.Second) if err != nil { t.Fatal(err) } @@ -545,11 +545,11 @@ func TestMigrateFailJournal(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -590,7 +590,7 @@ func TestMigrateFailJournal(t *testing.T) { tme.dbSource1Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) tme.dbSource2Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) - err = tme.wr.MigrateWrites(ctx, MigrateTables, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, 1*time.Second) want := "journaling intentionally failed" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) @@ -613,11 +613,11 @@ func TestTableMigrateJournalExists(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -648,7 +648,7 @@ func TestTableMigrateJournalExists(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, MigrateTables, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, 1*time.Second) if err != nil { t.Fatal(err) } @@ -675,11 +675,11 @@ func TestShardMigrateJournalExists(t *testing.T) { tme := newTestShardMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -689,7 +689,7 @@ func TestShardMigrateJournalExists(t *testing.T) { tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) // Create the missing journal. - journal2 := "insert into _vt.resharding_journal.*8372031610433464572.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" + journal2 := "insert into _vt.resharding_journal.*8372031610433464572.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) // Create backward replicaions. @@ -710,7 +710,7 @@ func TestShardMigrateJournalExists(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, MigrateShards, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, 1*time.Second) if err != nil { t.Fatal(err) } @@ -753,7 +753,7 @@ func TestMigrateDistinctTargets(t *testing.T) { ), nil) tme.streams[topo.KeyspaceShard{Keyspace: "ks1", Shard: "-40"}] = []uint32{1} - err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "target keyspaces are mismatched across streams" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -784,7 +784,7 @@ func TestMigrateDistinctSources(t *testing.T) { fmt.Sprintf("%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "source keyspaces are mismatched across streams" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -798,7 +798,7 @@ func TestMigrateVReplicationStreamNotFound(t *testing.T) { tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) - err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "VReplication stream 1 not found for ks2:-80" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -826,7 +826,7 @@ func TestMigrateMismatchedTables(t *testing.T) { fmt.Sprintf("%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "table lists are mismatched across streams" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -857,7 +857,7 @@ func TestMigrateDupUidSources(t *testing.T) { fmt.Sprintf("%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "duplicate sources for uids" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -873,7 +873,7 @@ func TestTableMigrateAllShardsNotPresent(t *testing.T) { {Keyspace: "ks2", Shard: "-80"}: {1, 2}, } - err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "mismatched shards for keyspace" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -931,7 +931,7 @@ func TestMigrateNoTableWildcards(t *testing.T) { fmt.Sprintf("%v", bls3), ), nil) - err := tme.wr.MigrateReads(ctx, MigrateTables, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "cannot migrate streams with wild card table names" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -943,7 +943,7 @@ func TestShardMigrateSourceTargetMismatch(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "source and target keyspace must match" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -973,7 +973,7 @@ func TestShardMigrateTargetMatchesSource(t *testing.T) { tme.streams[topo.KeyspaceShard{Keyspace: "ks", Shard: "-40"}] = []uint32{1} - err := tme.wr.MigrateReads(ctx, MigrateShards, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "target shard matches a source shard" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto index dd4de0f3935..b28dbc5356f 100644 --- a/proto/binlogdata.proto +++ b/proto/binlogdata.proto @@ -222,12 +222,20 @@ message KeyspaceShard { string shard = 2; } +// MigrationType specifies the type of migration for the Journal. +enum MigrationType { + TABLES = 0; + SHARDS = 1; +} + message Journal { int64 id = 1; - repeated string tables = 2; - string local_position = 3; - repeated ShardGtid shard_gtids = 4; - repeated KeyspaceShard participants = 5; + MigrationType migration_type = 2; + repeated string tables = 3; + string local_position = 4; + repeated ShardGtid shard_gtids = 5; + repeated KeyspaceShard participants = 6; + repeated int64 reversed_ids = 7; } // VEvent represents a vstream event diff --git a/py/vtproto/binlogdata_pb2.py b/py/vtproto/binlogdata_pb2.py index dcac54576b0..53eda7da5b1 100644 --- a/py/vtproto/binlogdata_pb2.py +++ b/py/vtproto/binlogdata_pb2.py @@ -23,7 +23,7 @@ package='binlogdata', syntax='proto3', serialized_options=_b('Z\'vitess.io/vitess/go/vt/proto/binlogdata'), - serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bvtrpc.proto\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"%\n\x04Rule\x12\r\n\x05match\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\")\n\x06\x46ilter\x12\x1f\n\x05rules\x18\x01 \x03(\x0b\x32\x10.binlogdata.Rule\"\xde\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x12\"\n\x06\x66ilter\x18\x06 \x01(\x0b\x32\x12.binlogdata.Filter\x12\'\n\x06on_ddl\x18\x07 \x01(\x0e\x32\x17.binlogdata.OnDDLAction\"B\n\tRowChange\x12\x1a\n\x06\x62\x65\x66ore\x18\x01 \x01(\x0b\x32\n.query.Row\x12\x19\n\x05\x61\x66ter\x18\x02 \x01(\x0b\x32\n.query.Row\"J\n\x08RowEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12*\n\x0brow_changes\x18\x02 \x03(\x0b\x32\x15.binlogdata.RowChange\">\n\nFieldEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x1c\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x0c.query.Field\":\n\tShardGtid\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12\x0c\n\x04gtid\x18\x03 \x01(\t\"3\n\x05VGtid\x12*\n\x0bshard_gtids\x18\x01 \x03(\x0b\x32\x15.binlogdata.ShardGtid\"0\n\rKeyspaceShard\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\"\x9a\x01\n\x07Journal\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12\x16\n\x0elocal_position\x18\x03 \x01(\t\x12*\n\x0bshard_gtids\x18\x04 \x03(\x0b\x32\x15.binlogdata.ShardGtid\x12/\n\x0cparticipants\x18\x05 \x03(\x0b\x32\x19.binlogdata.KeyspaceShard\"\x90\x02\n\x06VEvent\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.binlogdata.VEventType\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x0b\n\x03\x64\x64l\x18\x04 \x01(\t\x12\'\n\trow_event\x18\x05 \x01(\x0b\x32\x14.binlogdata.RowEvent\x12+\n\x0b\x66ield_event\x18\x06 \x01(\x0b\x32\x16.binlogdata.FieldEvent\x12 \n\x05vgtid\x18\x07 \x01(\x0b\x32\x11.binlogdata.VGtid\x12$\n\x07journal\x18\x08 \x01(\x0b\x32\x13.binlogdata.Journal\x12\x14\n\x0c\x63urrent_time\x18\x14 \x01(\x03\"\xc7\x01\n\x0eVStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x10\n\x08position\x18\x04 \x01(\t\x12\"\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x12.binlogdata.Filter\"5\n\x0fVStreamResponse\x12\"\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x12.binlogdata.VEvent\"\xc8\x01\n\x12VStreamRowsRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\r\n\x05query\x18\x04 \x01(\t\x12\"\n\x06lastpk\x18\x05 \x01(\x0b\x32\x12.query.QueryResult\"\x97\x01\n\x13VStreamRowsResponse\x12\x1c\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x0c.query.Field\x12\x1e\n\x08pkfields\x18\x02 \x03(\x0b\x32\x0c.query.Field\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x18\n\x04rows\x18\x04 \x03(\x0b\x32\n.query.Row\x12\x1a\n\x06lastpk\x18\x05 \x01(\x0b\x32\n.query.Row*>\n\x0bOnDDLAction\x12\n\n\x06IGNORE\x10\x00\x12\x08\n\x04STOP\x10\x01\x12\x08\n\x04\x45XEC\x10\x02\x12\x0f\n\x0b\x45XEC_IGNORE\x10\x03*\xd1\x01\n\nVEventType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04GTID\x10\x01\x12\t\n\x05\x42\x45GIN\x10\x02\x12\n\n\x06\x43OMMIT\x10\x03\x12\x0c\n\x08ROLLBACK\x10\x04\x12\x07\n\x03\x44\x44L\x10\x05\x12\n\n\x06INSERT\x10\x06\x12\x0b\n\x07REPLACE\x10\x07\x12\n\n\x06UPDATE\x10\x08\x12\n\n\x06\x44\x45LETE\x10\t\x12\x07\n\x03SET\x10\n\x12\t\n\x05OTHER\x10\x0b\x12\x07\n\x03ROW\x10\x0c\x12\t\n\x05\x46IELD\x10\r\x12\r\n\tHEARTBEAT\x10\x0e\x12\t\n\x05VGTID\x10\x0f\x12\x0b\n\x07JOURNAL\x10\x10\x42)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3') + serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bvtrpc.proto\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"%\n\x04Rule\x12\r\n\x05match\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\")\n\x06\x46ilter\x12\x1f\n\x05rules\x18\x01 \x03(\x0b\x32\x10.binlogdata.Rule\"\xde\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x12\"\n\x06\x66ilter\x18\x06 \x01(\x0b\x32\x12.binlogdata.Filter\x12\'\n\x06on_ddl\x18\x07 \x01(\x0e\x32\x17.binlogdata.OnDDLAction\"B\n\tRowChange\x12\x1a\n\x06\x62\x65\x66ore\x18\x01 \x01(\x0b\x32\n.query.Row\x12\x19\n\x05\x61\x66ter\x18\x02 \x01(\x0b\x32\n.query.Row\"J\n\x08RowEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12*\n\x0brow_changes\x18\x02 \x03(\x0b\x32\x15.binlogdata.RowChange\">\n\nFieldEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x1c\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x0c.query.Field\":\n\tShardGtid\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12\x0c\n\x04gtid\x18\x03 \x01(\t\"3\n\x05VGtid\x12*\n\x0bshard_gtids\x18\x01 \x03(\x0b\x32\x15.binlogdata.ShardGtid\"0\n\rKeyspaceShard\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\"\xe3\x01\n\x07Journal\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x31\n\x0emigration_type\x18\x02 \x01(\x0e\x32\x19.binlogdata.MigrationType\x12\x0e\n\x06tables\x18\x03 \x03(\t\x12\x16\n\x0elocal_position\x18\x04 \x01(\t\x12*\n\x0bshard_gtids\x18\x05 \x03(\x0b\x32\x15.binlogdata.ShardGtid\x12/\n\x0cparticipants\x18\x06 \x03(\x0b\x32\x19.binlogdata.KeyspaceShard\x12\x14\n\x0creversed_ids\x18\x07 \x03(\x03\"\x90\x02\n\x06VEvent\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.binlogdata.VEventType\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x0b\n\x03\x64\x64l\x18\x04 \x01(\t\x12\'\n\trow_event\x18\x05 \x01(\x0b\x32\x14.binlogdata.RowEvent\x12+\n\x0b\x66ield_event\x18\x06 \x01(\x0b\x32\x16.binlogdata.FieldEvent\x12 \n\x05vgtid\x18\x07 \x01(\x0b\x32\x11.binlogdata.VGtid\x12$\n\x07journal\x18\x08 \x01(\x0b\x32\x13.binlogdata.Journal\x12\x14\n\x0c\x63urrent_time\x18\x14 \x01(\x03\"\xc7\x01\n\x0eVStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x10\n\x08position\x18\x04 \x01(\t\x12\"\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x12.binlogdata.Filter\"5\n\x0fVStreamResponse\x12\"\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x12.binlogdata.VEvent\"\xc8\x01\n\x12VStreamRowsRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\r\n\x05query\x18\x04 \x01(\t\x12\"\n\x06lastpk\x18\x05 \x01(\x0b\x32\x12.query.QueryResult\"\x97\x01\n\x13VStreamRowsResponse\x12\x1c\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x0c.query.Field\x12\x1e\n\x08pkfields\x18\x02 \x03(\x0b\x32\x0c.query.Field\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x18\n\x04rows\x18\x04 \x03(\x0b\x32\n.query.Row\x12\x1a\n\x06lastpk\x18\x05 \x01(\x0b\x32\n.query.Row*>\n\x0bOnDDLAction\x12\n\n\x06IGNORE\x10\x00\x12\x08\n\x04STOP\x10\x01\x12\x08\n\x04\x45XEC\x10\x02\x12\x0f\n\x0b\x45XEC_IGNORE\x10\x03*\xd1\x01\n\nVEventType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04GTID\x10\x01\x12\t\n\x05\x42\x45GIN\x10\x02\x12\n\n\x06\x43OMMIT\x10\x03\x12\x0c\n\x08ROLLBACK\x10\x04\x12\x07\n\x03\x44\x44L\x10\x05\x12\n\n\x06INSERT\x10\x06\x12\x0b\n\x07REPLACE\x10\x07\x12\n\n\x06UPDATE\x10\x08\x12\n\n\x06\x44\x45LETE\x10\t\x12\x07\n\x03SET\x10\n\x12\t\n\x05OTHER\x10\x0b\x12\x07\n\x03ROW\x10\x0c\x12\t\n\x05\x46IELD\x10\r\x12\r\n\tHEARTBEAT\x10\x0e\x12\t\n\x05VGTID\x10\x0f\x12\x0b\n\x07JOURNAL\x10\x10*\'\n\rMigrationType\x12\n\n\x06TABLES\x10\x00\x12\n\n\x06SHARDS\x10\x01\x42)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3') , dependencies=[vtrpc__pb2.DESCRIPTOR,query__pb2.DESCRIPTOR,topodata__pb2.DESCRIPTOR,]) @@ -52,8 +52,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2678, - serialized_end=2740, + serialized_start=2751, + serialized_end=2813, ) _sym_db.RegisterEnumDescriptor(_ONDDLACTION) @@ -135,12 +135,35 @@ ], containing_type=None, serialized_options=None, - serialized_start=2743, - serialized_end=2952, + serialized_start=2816, + serialized_end=3025, ) _sym_db.RegisterEnumDescriptor(_VEVENTTYPE) VEventType = enum_type_wrapper.EnumTypeWrapper(_VEVENTTYPE) +_MIGRATIONTYPE = _descriptor.EnumDescriptor( + name='MigrationType', + full_name='binlogdata.MigrationType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='TABLES', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SHARDS', index=1, number=1, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=3027, + serialized_end=3066, +) +_sym_db.RegisterEnumDescriptor(_MIGRATIONTYPE) + +MigrationType = enum_type_wrapper.EnumTypeWrapper(_MIGRATIONTYPE) IGNORE = 0 STOP = 1 EXEC = 2 @@ -162,6 +185,8 @@ HEARTBEAT = 14 VGTID = 15 JOURNAL = 16 +TABLES = 0 +SHARDS = 1 _BINLOGTRANSACTION_STATEMENT_CATEGORY = _descriptor.EnumDescriptor( @@ -884,29 +909,43 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='tables', full_name='binlogdata.Journal.tables', index=1, - number=2, type=9, cpp_type=9, label=3, + name='migration_type', full_name='binlogdata.Journal.migration_type', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='tables', full_name='binlogdata.Journal.tables', index=2, + number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='local_position', full_name='binlogdata.Journal.local_position', index=2, - number=3, type=9, cpp_type=9, label=1, + name='local_position', full_name='binlogdata.Journal.local_position', index=3, + number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='shard_gtids', full_name='binlogdata.Journal.shard_gtids', index=3, - number=4, type=11, cpp_type=10, label=3, + name='shard_gtids', full_name='binlogdata.Journal.shard_gtids', index=4, + number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='participants', full_name='binlogdata.Journal.participants', index=4, - number=5, type=11, cpp_type=10, label=3, + name='participants', full_name='binlogdata.Journal.participants', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='reversed_ids', full_name='binlogdata.Journal.reversed_ids', index=6, + number=7, type=3, cpp_type=2, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, @@ -924,7 +963,7 @@ oneofs=[ ], serialized_start=1633, - serialized_end=1787, + serialized_end=1860, ) @@ -1010,8 +1049,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1790, - serialized_end=2062, + serialized_start=1863, + serialized_end=2135, ) @@ -1069,8 +1108,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2065, - serialized_end=2264, + serialized_start=2138, + serialized_end=2337, ) @@ -1100,8 +1139,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2266, - serialized_end=2319, + serialized_start=2339, + serialized_end=2392, ) @@ -1159,8 +1198,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2322, - serialized_end=2522, + serialized_start=2395, + serialized_end=2595, ) @@ -1218,8 +1257,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2525, - serialized_end=2676, + serialized_start=2598, + serialized_end=2749, ) _BINLOGTRANSACTION_STATEMENT.fields_by_name['category'].enum_type = _BINLOGTRANSACTION_STATEMENT_CATEGORY @@ -1243,6 +1282,7 @@ _ROWEVENT.fields_by_name['row_changes'].message_type = _ROWCHANGE _FIELDEVENT.fields_by_name['fields'].message_type = query__pb2._FIELD _VGTID.fields_by_name['shard_gtids'].message_type = _SHARDGTID +_JOURNAL.fields_by_name['migration_type'].enum_type = _MIGRATIONTYPE _JOURNAL.fields_by_name['shard_gtids'].message_type = _SHARDGTID _JOURNAL.fields_by_name['participants'].message_type = _KEYSPACESHARD _VEVENT.fields_by_name['type'].enum_type = _VEVENTTYPE @@ -1286,6 +1326,7 @@ DESCRIPTOR.message_types_by_name['VStreamRowsResponse'] = _VSTREAMROWSRESPONSE DESCRIPTOR.enum_types_by_name['OnDDLAction'] = _ONDDLACTION DESCRIPTOR.enum_types_by_name['VEventType'] = _VEVENTTYPE +DESCRIPTOR.enum_types_by_name['MigrationType'] = _MIGRATIONTYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) Charset = _reflection.GeneratedProtocolMessageType('Charset', (_message.Message,), dict( From 61af9f7dc574a3f1341ccb388b64309fc1da35fb Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 7 Jul 2019 22:12:43 -0700 Subject: [PATCH 11/17] migrater: add logs and comments Signed-off-by: Sugu Sougoumarane --- go/vt/wrangler/migrater.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go index 7e4c7a71c2e..58fb80cb7e7 100644 --- a/go/vt/wrangler/migrater.go +++ b/go/vt/wrangler/migrater.go @@ -42,6 +42,7 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" ) +// migrateDirection specifies the migration direction. type migrateDirection int const ( @@ -49,6 +50,7 @@ const ( directionBackward ) +// accessType specifies the type of access for a shard (allow/disallow writes). type accessType int const ( @@ -56,6 +58,8 @@ const ( disallowWrites ) +// migrater contains the metadata for migrating read and write traffic +// for vreplication streams. type migrater struct { migrationType binlogdatapb.MigrationType wr *Wrangler @@ -67,6 +71,7 @@ type migrater struct { tables []string } +// miTarget contains the metadata for each migration target. type miTarget struct { shard *topo.ShardInfo master *topo.TabletInfo @@ -74,6 +79,7 @@ type miTarget struct { position string } +// miSource contains the metadata for each migration source. type miSource struct { shard *topo.ShardInfo master *topo.TabletInfo @@ -94,6 +100,7 @@ func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType binlogdatapb return err } + // For reads, locking the source keysppace is sufficient. ctx, unlock, lockErr := wr.ts.LockKeyspace(ctx, mi.sourceKeyspace, "MigrateReads") if lockErr != nil { return lockErr @@ -112,6 +119,7 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType binlogdatap if err != nil { return err } + mi.wr.Logger().Infof("Built migration metadata: %+v", mi) if err := mi.validate(ctx); err != nil { return err } @@ -119,6 +127,7 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType binlogdatap return err } + // Need to lock both source and target keyspaces. ctx, sourceUnlock, lockErr := wr.ts.LockKeyspace(ctx, mi.sourceKeyspace, "MigrateWrites") if lockErr != nil { return lockErr @@ -138,6 +147,7 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType binlogdatap return err } if !journalsExist { + mi.wr.Logger().Infof("No previous journals were found. Proceeding normally.") if err := mi.stopSourceWrites(ctx); err != nil { mi.cancelMigration(ctx) return err @@ -147,11 +157,14 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType binlogdatap return err } } else { + mi.wr.Logger().Infof("Journals were found. Completing the left over steps.") // Need to gather positions in case all journals were not created. if err := mi.gatherPositions(ctx); err != nil { return err } } + // This is the point of no return. Once a journal is created, + // traffic can be redirected to target shards. if err := mi.createJournals(ctx); err != nil { return err } @@ -394,6 +407,8 @@ func (mi *migrater) migrateTableReads(ctx context.Context, cells []string, serve // We assume that the following rules were setup when the targets were created: // table -> sourceKeyspace.table // targetKeyspace.table -> sourceKeyspace.table + // For forward migration, we add tablet type specific rules to redirect traffic to the target. + // For backward, we delete them. tt := strings.ToLower(servedType.String()) for _, table := range mi.tables { if direction == directionForward { @@ -459,6 +474,7 @@ func (mi *migrater) stopSourceWrites(ctx context.Context) error { return mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { var err error source.position, err = mi.wr.tmc.MasterPosition(ctx, source.master.Tablet) + mi.wr.Logger().Infof("Position for source %v: %v", sourceks, source.position) return err }) } @@ -497,6 +513,7 @@ func (mi *migrater) waitForCatchup(ctx context.Context, filteredReplicationWaitT } var err error target.position, err = mi.wr.tmc.MasterPosition(ctx, target.master.Tablet) + mi.wr.Logger().Infof("Position for uid %v: %v", uid, target.position) return err }) } @@ -527,6 +544,7 @@ func (mi *migrater) gatherPositions(ctx context.Context) error { err := mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { var err error source.position, err = mi.wr.tmc.MasterPosition(ctx, source.master.Tablet) + mi.wr.Logger().Infof("Position for source %v: %v", sourceks, source.position) return err }) if err != nil { @@ -535,6 +553,7 @@ func (mi *migrater) gatherPositions(ctx context.Context) error { return mi.forAllTargets(func(targetks topo.KeyspaceShard, target *miTarget) error { var err error target.position, err = mi.wr.tmc.MasterPosition(ctx, target.master.Tablet) + mi.wr.Logger().Infof("Position for target %v: %v", targetks, target.position) return err }) } @@ -577,6 +596,7 @@ func (mi *migrater) createJournals(ctx context.Context) error { Shard: ks.Shard, }) } + mi.wr.Logger().Infof("Creating journal: %v", journal) statement := fmt.Sprintf("insert into _vt.resharding_journal "+ "(id, db_name, val) "+ "values (%v, %v, %v)", @@ -681,10 +701,13 @@ func (mi *migrater) changeTableRouting(ctx context.Context) error { delete(rules, table+"@"+tt) delete(rules, mi.targetKeyspace+"."+table+"@"+tt) delete(rules, mi.sourceKeyspace+"."+table+"@"+tt) + mi.wr.Logger().Infof("Delete routing: %v %v %v", table+"@"+tt, mi.targetKeyspace+"."+table+"@"+tt, mi.sourceKeyspace+"."+table+"@"+tt) } delete(rules, mi.targetKeyspace+"."+table) + mi.wr.Logger().Infof("Delete routing: %v", mi.targetKeyspace+"."+table) rules[table] = []string{mi.targetKeyspace + "." + table} rules[mi.sourceKeyspace+"."+table] = []string{mi.targetKeyspace + "." + table} + mi.wr.Logger().Infof("Add routing: %v %v", table, mi.sourceKeyspace+"."+table) } if err := mi.wr.saveRoutingRules(ctx, rules); err != nil { return err From 1a88365c77dceab13066b9d23f40d8d28683f1d1 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Mon, 8 Jul 2019 12:18:33 -0700 Subject: [PATCH 12/17] migrater: simplify parameters Since we don't support multiple source or target keyspaces, the uid parameters can be simplified to only have shards as keys, and there will be a separate targetKeyspace parameter that applies to all shards. Signed-off-by: Sugu Sougoumarane --- go/vt/wrangler/migrater.go | 224 ++++++++++++++-------------- go/vt/wrangler/migrater_env_test.go | 17 ++- go/vt/wrangler/migrater_test.go | 146 +++++++----------- 3 files changed, 177 insertions(+), 210 deletions(-) diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go index 58fb80cb7e7..05f6e2316bd 100644 --- a/go/vt/wrangler/migrater.go +++ b/go/vt/wrangler/migrater.go @@ -64,8 +64,8 @@ type migrater struct { migrationType binlogdatapb.MigrationType wr *Wrangler id int64 - sources map[topo.KeyspaceShard]*miSource - targets map[topo.KeyspaceShard]*miTarget + sources map[string]*miSource + targets map[string]*miTarget sourceKeyspace string targetKeyspace string tables []string @@ -73,7 +73,7 @@ type migrater struct { // miTarget contains the metadata for each migration target. type miTarget struct { - shard *topo.ShardInfo + si *topo.ShardInfo master *topo.TabletInfo sources map[uint32]*binlogdatapb.BinlogSource position string @@ -81,18 +81,18 @@ type miTarget struct { // miSource contains the metadata for each migration source. type miSource struct { - shard *topo.ShardInfo + si *topo.ShardInfo master *topo.TabletInfo position string journaled bool } // MigrateReads is a generic way of migrating read traffic for a resharding workflow. -func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType binlogdatapb.MigrationType, streams map[topo.KeyspaceShard][]uint32, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { +func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType binlogdatapb.MigrationType, targetKeyspace string, streams map[string][]uint32, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { if servedType != topodatapb.TabletType_REPLICA && servedType != topodatapb.TabletType_RDONLY { return fmt.Errorf("tablet type must be REPLICA or RDONLY: %v", servedType) } - mi, err := wr.buildMigrater(ctx, migrationType, streams) + mi, err := wr.buildMigrater(ctx, migrationType, targetKeyspace, streams) if err != nil { return err } @@ -114,8 +114,8 @@ func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType binlogdatapb } // MigrateWrites is a generic way of migrating write traffic for a resharding workflow. -func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType binlogdatapb.MigrationType, streams map[topo.KeyspaceShard][]uint32, filteredReplicationWaitTime time.Duration) error { - mi, err := wr.buildMigrater(ctx, migrationType, streams) +func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType binlogdatapb.MigrationType, targetKeyspace string, streams map[string][]uint32, filteredReplicationWaitTime time.Duration) error { + mi, err := wr.buildMigrater(ctx, migrationType, targetKeyspace, streams) if err != nil { return err } @@ -181,34 +181,30 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType binlogdatap return nil } -func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType binlogdatapb.MigrationType, streams map[topo.KeyspaceShard][]uint32) (*migrater, error) { +func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType binlogdatapb.MigrationType, targetKeyspace string, streams map[string][]uint32) (*migrater, error) { mi := &migrater{ - migrationType: migrationType, - wr: wr, - id: hashStreams(streams), - targets: make(map[topo.KeyspaceShard]*miTarget), - sources: make(map[topo.KeyspaceShard]*miSource), + migrationType: migrationType, + wr: wr, + id: hashStreams(targetKeyspace, streams), + targets: make(map[string]*miTarget), + sources: make(map[string]*miSource), + targetKeyspace: targetKeyspace, } mi.wr.Logger().Infof("Migration ID for streams %v: %d", streams, mi.id) - for targetks, uids := range streams { - targetShard, err := mi.wr.ts.GetShard(ctx, targetks.Keyspace, targetks.Shard) + for targetShard, uids := range streams { + targetsi, err := mi.wr.ts.GetShard(ctx, targetKeyspace, targetShard) if err != nil { return nil, err } - targetMaster, err := mi.wr.ts.GetTablet(ctx, targetShard.MasterAlias) + targetMaster, err := mi.wr.ts.GetTablet(ctx, targetsi.MasterAlias) if err != nil { return nil, err } - mi.targets[targetks] = &miTarget{ - shard: targetShard, + mi.targets[targetShard] = &miTarget{ + si: targetsi, master: targetMaster, sources: make(map[uint32]*binlogdatapb.BinlogSource), } - if mi.targetKeyspace == "" { - mi.targetKeyspace = targetks.Keyspace - } else if mi.targetKeyspace != targetks.Keyspace { - return nil, fmt.Errorf("target keyspaces are mismatched across streams: %v vs %v", mi.targetKeyspace, targetks.Keyspace) - } for _, uid := range uids { p3qr, err := mi.wr.tmc.VReplicationExec(ctx, targetMaster.Tablet, fmt.Sprintf("select source from _vt.vreplication where id=%d", uid)) if err != nil { @@ -216,51 +212,51 @@ func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType binlogdatap } qr := sqltypes.Proto3ToResult(p3qr) if len(qr.Rows) < 1 || len(qr.Rows[0]) < 1 { - return nil, fmt.Errorf("VReplication stream %d not found for %s:%s", int(uid), targetks.Keyspace, targetks.Shard) + return nil, fmt.Errorf("VReplication stream %d not found for %s:%s", int(uid), targetKeyspace, targetShard) } for _, row := range qr.Rows { str := row[0].ToString() - var binlogSource binlogdatapb.BinlogSource - if err := proto.UnmarshalText(str, &binlogSource); err != nil { + var bls binlogdatapb.BinlogSource + if err := proto.UnmarshalText(str, &bls); err != nil { return nil, err } - mi.targets[targetks].sources[uid] = &binlogSource + mi.targets[targetShard].sources[uid] = &bls - sourceks := topo.KeyspaceShard{Keyspace: binlogSource.Keyspace, Shard: binlogSource.Shard} - if _, ok := mi.sources[sourceks]; !ok { - sourceShard, err := mi.wr.ts.GetShard(ctx, binlogSource.Keyspace, binlogSource.Shard) - if err != nil { - return nil, err - } - sourceMaster, err := mi.wr.ts.GetTablet(ctx, sourceShard.MasterAlias) - if err != nil { - return nil, err - } - mi.sources[sourceks] = &miSource{ - shard: sourceShard, - master: sourceMaster, - } + if mi.sourceKeyspace == "" { + mi.sourceKeyspace = bls.Keyspace + } else if mi.sourceKeyspace != bls.Keyspace { + return nil, fmt.Errorf("source keyspaces are mismatched across streams: %v vs %v", mi.sourceKeyspace, bls.Keyspace) + } + if _, ok := mi.sources[bls.Shard]; ok { + continue + } - if mi.tables == nil { - for _, rule := range binlogSource.Filter.Rules { - mi.tables = append(mi.tables, rule.Match) - } - sort.Strings(mi.tables) - } else { - var tables []string - for _, rule := range binlogSource.Filter.Rules { - tables = append(tables, rule.Match) - } - sort.Strings(tables) - if !reflect.DeepEqual(mi.tables, tables) { - return nil, fmt.Errorf("table lists are mismatched across streams: %v vs %v", mi.tables, tables) - } - } + sourcesi, err := mi.wr.ts.GetShard(ctx, bls.Keyspace, bls.Shard) + if err != nil { + return nil, err + } + sourceMaster, err := mi.wr.ts.GetTablet(ctx, sourcesi.MasterAlias) + if err != nil { + return nil, err + } + mi.sources[bls.Shard] = &miSource{ + si: sourcesi, + master: sourceMaster, + } - if mi.sourceKeyspace == "" { - mi.sourceKeyspace = sourceks.Keyspace - } else if mi.sourceKeyspace != sourceks.Keyspace { - return nil, fmt.Errorf("source keyspaces are mismatched across streams: %v vs %v", mi.sourceKeyspace, sourceks.Keyspace) + if mi.tables == nil { + for _, rule := range bls.Filter.Rules { + mi.tables = append(mi.tables, rule.Match) + } + sort.Strings(mi.tables) + } else { + var tables []string + for _, rule := range bls.Filter.Rules { + tables = append(tables, rule.Match) + } + sort.Strings(tables) + if !reflect.DeepEqual(mi.tables, tables) { + return nil, fmt.Errorf("table lists are mismatched across streams: %v vs %v", mi.tables, tables) } } } @@ -270,15 +266,16 @@ func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType binlogdatap } // hashStreams produces a reproduceable hash based on the input parameters. -func hashStreams(streams map[topo.KeyspaceShard][]uint32) int64 { +func hashStreams(targetKeyspace string, streams map[string][]uint32) int64 { var expanded []string - for ks, uids := range streams { + for shard, uids := range streams { for _, uid := range uids { - expanded = append(expanded, fmt.Sprintf("%s:%s:%d", ks.Keyspace, ks.Shard, uid)) + expanded = append(expanded, fmt.Sprintf("%s:%d", shard, uid)) } } sort.Strings(expanded) hasher := fnv.New64() + hasher.Write([]byte(targetKeyspace)) for _, str := range expanded { hasher.Write([]byte(str)) } @@ -289,13 +286,12 @@ func hashStreams(streams map[topo.KeyspaceShard][]uint32) int64 { func (mi *migrater) validate(ctx context.Context) error { // Ensure no duplicate sources in each target. for _, target := range mi.targets { - uniqueSources := make(map[topo.KeyspaceShard]uint32) - for uid, binlogSource := range target.sources { - sourceks := topo.KeyspaceShard{Keyspace: binlogSource.Keyspace, Shard: binlogSource.Shard} - if suid, ok := uniqueSources[sourceks]; ok { + uniqueSources := make(map[string]uint32) + for uid, bls := range target.sources { + if suid, ok := uniqueSources[bls.Shard]; ok { return fmt.Errorf("duplicate sources for uids: %v and %v", suid, uid) } - uniqueSources[sourceks] = uid + uniqueSources[bls.Shard] = uid } } if mi.migrationType == binlogdatapb.MigrationType_TABLES { @@ -318,9 +314,9 @@ func (mi *migrater) validate(ctx context.Context) error { return fmt.Errorf("source and target keyspace must match: %v vs %v", mi.sourceKeyspace, mi.targetKeyspace) } // Source and target shards must not match. - for sourceks := range mi.sources { - if _, ok := mi.targets[sourceks]; ok { - return fmt.Errorf("target shard matches a source shard: %v", sourceks) + for sourceShard := range mi.sources { + if _, ok := mi.targets[sourceShard]; ok { + return fmt.Errorf("target shard matches a source shard: %v", sourceShard) } } } @@ -359,7 +355,7 @@ func (mi *migrater) validateShardForWrite(ctx context.Context) error { // Checking one shard is enough. var si *topo.ShardInfo for _, source := range mi.sources { - si = source.shard + si = source.si break } @@ -446,7 +442,7 @@ func (mi *migrater) migrateShardReads(ctx context.Context, cells []string, serve func (mi *migrater) checkJournals(ctx context.Context) (journalsExist bool, err error) { var exist sync2.AtomicBool - err = mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { + err = mi.forAllSources(func(source *miSource) error { statement := fmt.Sprintf("select 1 from _vt.resharding_journal where id=%v", mi.id) p3qr, err := mi.wr.tmc.VReplicationExec(ctx, source.master.Tablet, statement) if err != nil { @@ -471,17 +467,17 @@ func (mi *migrater) stopSourceWrites(ctx context.Context) error { if err != nil { return err } - return mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { + return mi.forAllSources(func(source *miSource) error { var err error source.position, err = mi.wr.tmc.MasterPosition(ctx, source.master.Tablet) - mi.wr.Logger().Infof("Position for source %v: %v", sourceks, source.position) + mi.wr.Logger().Infof("Position for source %v:%v: %v", mi.sourceKeyspace, source.si.ShardName(), source.position) return err }) } func (mi *migrater) changeTableSourceWrites(ctx context.Context, access accessType) error { - return mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { - if _, err := mi.wr.ts.UpdateShardFields(ctx, sourceks.Keyspace, sourceks.Shard, func(si *topo.ShardInfo) error { + return mi.forAllSources(func(source *miSource) error { + if _, err := mi.wr.ts.UpdateShardFields(ctx, mi.sourceKeyspace, source.si.ShardName(), func(si *topo.ShardInfo) error { return si.UpdateSourceBlacklistedTables(ctx, topodatapb.TabletType_MASTER, nil, access == allowWrites /* remove */, mi.tables) }); err != nil { return err @@ -497,7 +493,7 @@ func (mi *migrater) waitForCatchup(ctx context.Context, filteredReplicationWaitT var mu sync.Mutex return mi.forAllUids(func(target *miTarget, uid uint32) error { bls := target.sources[uid] - source := mi.sources[topo.KeyspaceShard{Keyspace: bls.Keyspace, Shard: bls.Shard}] + source := mi.sources[bls.Shard] if err := mi.wr.tmc.VReplicationWaitForPos(ctx, target.master.Tablet, int(uid), source.position); err != nil { return err } @@ -541,25 +537,25 @@ func (mi *migrater) cancelMigration(ctx context.Context) { } func (mi *migrater) gatherPositions(ctx context.Context) error { - err := mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { + err := mi.forAllSources(func(source *miSource) error { var err error source.position, err = mi.wr.tmc.MasterPosition(ctx, source.master.Tablet) - mi.wr.Logger().Infof("Position for source %v: %v", sourceks, source.position) + mi.wr.Logger().Infof("Position for source %v:%v: %v", mi.sourceKeyspace, source.si.ShardName(), source.position) return err }) if err != nil { return err } - return mi.forAllTargets(func(targetks topo.KeyspaceShard, target *miTarget) error { + return mi.forAllTargets(func(target *miTarget) error { var err error target.position, err = mi.wr.tmc.MasterPosition(ctx, target.master.Tablet) - mi.wr.Logger().Infof("Position for target %v: %v", targetks, target.position) + mi.wr.Logger().Infof("Position for target %v:%v: %v", mi.targetKeyspace, target.si.ShardName(), target.position) return err }) } func (mi *migrater) createJournals(ctx context.Context) error { - return mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { + return mi.forAllSources(func(source *miSource) error { if source.journaled { return nil } @@ -569,11 +565,11 @@ func (mi *migrater) createJournals(ctx context.Context) error { Tables: mi.tables, LocalPosition: source.position, } - participantMap := make(map[topo.KeyspaceShard]bool) - for targetks, target := range mi.targets { + participantMap := make(map[string]bool) + for targetShard, target := range mi.targets { found := false for _, tsource := range target.sources { - if sourceks == (topo.KeyspaceShard{Keyspace: tsource.Keyspace, Shard: tsource.Shard}) { + if source.si.ShardName() == tsource.Shard { found = true break } @@ -582,18 +578,18 @@ func (mi *migrater) createJournals(ctx context.Context) error { continue } journal.ShardGtids = append(journal.ShardGtids, &binlogdatapb.ShardGtid{ - Keyspace: targetks.Keyspace, - Shard: targetks.Shard, + Keyspace: mi.targetKeyspace, + Shard: targetShard, Gtid: target.position, }) for _, tsource := range target.sources { - participantMap[topo.KeyspaceShard{Keyspace: tsource.Keyspace, Shard: tsource.Shard}] = true + participantMap[tsource.Shard] = true } } - for ks := range participantMap { + for shard := range participantMap { journal.Participants = append(journal.Participants, &binlogdatapb.KeyspaceShard{ - Keyspace: ks.Keyspace, - Shard: ks.Shard, + Keyspace: mi.sourceKeyspace, + Shard: shard, }) } mi.wr.Logger().Infof("Creating journal: %v", journal) @@ -619,10 +615,10 @@ func (mi *migrater) createReverseReplication(ctx context.Context) error { } return mi.forAllUids(func(target *miTarget, uid uint32) error { bls := target.sources[uid] - source := mi.sources[topo.KeyspaceShard{Keyspace: bls.Keyspace, Shard: bls.Shard}] + source := mi.sources[bls.Shard] reverseBls := &binlogdatapb.BinlogSource{ - Keyspace: target.shard.Keyspace(), - Shard: target.shard.ShardName(), + Keyspace: mi.targetKeyspace, + Shard: target.si.ShardName(), TabletType: bls.TabletType, Filter: &binlogdatapb.Filter{}, } @@ -664,8 +660,8 @@ func (mi *migrater) allowTargetWrites(ctx context.Context) error { } func (mi *migrater) allowTableTargetWrites(ctx context.Context) error { - return mi.forAllTargets(func(targetks topo.KeyspaceShard, target *miTarget) error { - if _, err := mi.wr.ts.UpdateShardFields(ctx, targetks.Keyspace, targetks.Shard, func(si *topo.ShardInfo) error { + return mi.forAllTargets(func(target *miTarget) error { + if _, err := mi.wr.ts.UpdateShardFields(ctx, mi.targetKeyspace, target.si.ShardName(), func(si *topo.ShardInfo) error { return si.UpdateSourceBlacklistedTables(ctx, topodatapb.TabletType_MASTER, nil, true, mi.tables) }); err != nil { return err @@ -716,8 +712,8 @@ func (mi *migrater) changeTableRouting(ctx context.Context) error { } func (mi *migrater) changeShardRouting(ctx context.Context) error { - err := mi.forAllSources(func(sourceks topo.KeyspaceShard, source *miSource) error { - _, err := mi.wr.ts.UpdateShardFields(ctx, source.shard.Keyspace(), source.shard.ShardName(), func(si *topo.ShardInfo) error { + err := mi.forAllSources(func(source *miSource) error { + _, err := mi.wr.ts.UpdateShardFields(ctx, mi.sourceKeyspace, source.si.ShardName(), func(si *topo.ShardInfo) error { si.IsMasterServing = false return nil }) @@ -726,8 +722,8 @@ func (mi *migrater) changeShardRouting(ctx context.Context) error { if err != nil { return err } - err = mi.forAllTargets(func(targetks topo.KeyspaceShard, target *miTarget) error { - _, err := mi.wr.ts.UpdateShardFields(ctx, target.shard.Keyspace(), target.shard.ShardName(), func(si *topo.ShardInfo) error { + err = mi.forAllTargets(func(target *miTarget) error { + _, err := mi.wr.ts.UpdateShardFields(ctx, mi.targetKeyspace, target.si.ShardName(), func(si *topo.ShardInfo) error { si.IsMasterServing = true return nil }) @@ -755,35 +751,35 @@ func (mi *migrater) changeShardsAccess(ctx context.Context, keyspace string, sha return mi.wr.refreshMasters(ctx, shards) } -func (mi *migrater) forAllSources(f func(topo.KeyspaceShard, *miSource) error) error { +func (mi *migrater) forAllSources(f func(*miSource) error) error { var wg sync.WaitGroup allErrors := &concurrency.AllErrorRecorder{} - for sourceks, source := range mi.sources { + for _, source := range mi.sources { wg.Add(1) - go func(sourceks topo.KeyspaceShard, source *miSource) { + go func(source *miSource) { defer wg.Done() - if err := f(sourceks, source); err != nil { + if err := f(source); err != nil { allErrors.RecordError(err) } - }(sourceks, source) + }(source) } wg.Wait() return allErrors.AggrError(vterrors.Aggregate) } -func (mi *migrater) forAllTargets(f func(topo.KeyspaceShard, *miTarget) error) error { +func (mi *migrater) forAllTargets(f func(*miTarget) error) error { var wg sync.WaitGroup allErrors := &concurrency.AllErrorRecorder{} - for targetks, target := range mi.targets { + for _, target := range mi.targets { wg.Add(1) - go func(targetks topo.KeyspaceShard, target *miTarget) { + go func(target *miTarget) { defer wg.Done() - if err := f(targetks, target); err != nil { + if err := f(target); err != nil { allErrors.RecordError(err) } - }(targetks, target) + }(target) } wg.Wait() return allErrors.AggrError(vterrors.Aggregate) @@ -811,7 +807,7 @@ func (mi *migrater) forAllUids(f func(target *miTarget, uid uint32) error) error func (mi *migrater) sourceShards() []*topo.ShardInfo { shards := make([]*topo.ShardInfo, 0, len(mi.sources)) for _, source := range mi.sources { - shards = append(shards, source.shard) + shards = append(shards, source.si) } return shards } @@ -819,7 +815,7 @@ func (mi *migrater) sourceShards() []*topo.ShardInfo { func (mi *migrater) targetShards() []*topo.ShardInfo { shards := make([]*topo.ShardInfo, 0, len(mi.targets)) for _, target := range mi.targets { - shards = append(shards, target.shard) + shards = append(shards, target.si) } return shards } diff --git a/go/vt/wrangler/migrater_env_test.go b/go/vt/wrangler/migrater_env_test.go index a4ff17a0e9b..6d7db1f226f 100644 --- a/go/vt/wrangler/migrater_env_test.go +++ b/go/vt/wrangler/migrater_env_test.go @@ -45,7 +45,8 @@ type testMigraterEnv struct { dbSource1Client, dbSource2Client *fakeDBClient dbDest1Client, dbDest2Client *fakeDBClient allDBClients []*fakeDBClient - streams map[topo.KeyspaceShard][]uint32 + targetKeyspace string + streams map[string][]uint32 } func newTestTableMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { @@ -186,9 +187,10 @@ func newTestTableMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { t.Fatal(err) } - tme.streams = map[topo.KeyspaceShard][]uint32{ - {Keyspace: "ks2", Shard: "-80"}: {1, 2}, - {Keyspace: "ks2", Shard: "80-"}: {1}, + tme.targetKeyspace = "ks2" + tme.streams = map[string][]uint32{ + "-80": {1, 2}, + "80-": {1}, } return tme } @@ -282,9 +284,10 @@ func newTestShardMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { fmt.Sprintf("%v", bls3), ), nil) - tme.streams = map[topo.KeyspaceShard][]uint32{ - {Keyspace: "ks", Shard: "-80"}: {1, 2}, - {Keyspace: "ks", Shard: "80-"}: {1}, + tme.targetKeyspace = "ks" + tme.streams = map[string][]uint32{ + "-80": {1, 2}, + "80-": {1}, } return tme } diff --git a/go/vt/wrangler/migrater_test.go b/go/vt/wrangler/migrater_test.go index f7edc690af1..08459b50305 100644 --- a/go/vt/wrangler/migrater_test.go +++ b/go/vt/wrangler/migrater_test.go @@ -40,7 +40,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell RDONLY migration. - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -69,7 +69,7 @@ func TestTableMigrate(t *testing.T) { // The global routing already contains redirections for rdonly. // So, adding routes for replica and deploying to cell2 will also cause // cell2 to migrat rdonly. This is a quirk that can be fixed later if necessary. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -107,7 +107,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell backward REPLICA migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) if err != nil { t.Fatal(err) } @@ -127,7 +127,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Migrate all REPLICA. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -153,7 +153,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // All cells RDONLY backward migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionBackward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionBackward) if err != nil { t.Fatal(err) } @@ -173,7 +173,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate master with MigrateReads. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_MASTER, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_MASTER, directionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) @@ -182,7 +182,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, 1*time.Second) want = "missing tablet type specific routing, read-only traffic must be migrated before migrating writes" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites err: %v, want %v", err, want) @@ -193,7 +193,7 @@ func TestTableMigrate(t *testing.T) { // Test MigrateWrites cancelation on failure. // Migrate all the reads first. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -217,8 +217,8 @@ func TestTableMigrate(t *testing.T) { }) // Check for journals. - tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) - tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) // Wait for position: Reads current state, updates to Stopped, and re-reads. state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( @@ -248,7 +248,7 @@ func TestTableMigrate(t *testing.T) { tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, 0*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, 0*time.Second) want = "DeadlineExceeded" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) @@ -280,9 +280,9 @@ func TestTableMigrate(t *testing.T) { // Test successful MigrateWrites. // Create journals. - journal1 := "insert into _vt.resharding_journal.*445516443381867838.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" + journal1 := "insert into _vt.resharding_journal.*9113431017721636330.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" tme.dbSource1Client.addQueryRE(journal1, &sqltypes.Result{}, nil) - journal2 := "insert into _vt.resharding_journal.*445516443381867838.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*80.*participants.*40.*40" + journal2 := "insert into _vt.resharding_journal.*9113431017721636330.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*80.*participants.*40.*40" tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) // Create backward replicaions. @@ -298,7 +298,7 @@ func TestTableMigrate(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, 1*time.Second) if err != nil { t.Fatal(err) } @@ -332,7 +332,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell RDONLY migration. - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -348,7 +348,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Other cell REPLICA migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -364,7 +364,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell backward REPLICA migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) if err != nil { t.Fatal(err) } @@ -383,7 +383,7 @@ func TestShardMigrate(t *testing.T) { // This is an extra step that does not exist in the tables test. // The per-cell migration mechanism is different for tables. So, this // extra step is needed to bring things in sync. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -395,7 +395,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Migrate all REPLICA. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -407,7 +407,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // All cells RDONLY backward migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_RDONLY, directionBackward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionBackward) if err != nil { t.Fatal(err) } @@ -419,7 +419,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate master with MigrateReads. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_MASTER, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_MASTER, directionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) @@ -428,7 +428,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, 1*time.Second) want = "cannot migrate MASTER away" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites err: %v, want %v", err, want) @@ -439,7 +439,7 @@ func TestShardMigrate(t *testing.T) { // Test MigrateWrites cancelation on failure. // Migrate all the reads first. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -453,8 +453,8 @@ func TestShardMigrate(t *testing.T) { checkIsMasterServing(t, tme.ts, "ks:80-", false) // Check for journals. - tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) - tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 6432976123657117098", &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 6432976123657117098", &sqltypes.Result{}, nil) // Wait for position: Reads current state, updates to Stopped, and re-reads. state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( @@ -484,7 +484,7 @@ func TestShardMigrate(t *testing.T) { tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, 0*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, 0*time.Second) want = "DeadlineExceeded" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) @@ -502,9 +502,9 @@ func TestShardMigrate(t *testing.T) { // Test successful MigrateWrites. // Create journals. - journal1 := "insert into _vt.resharding_journal.*8372031610433464572.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" + journal1 := "insert into _vt.resharding_journal.*6432976123657117098.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" tme.dbSource1Client.addQueryRE(journal1, &sqltypes.Result{}, nil) - journal2 := "insert into _vt.resharding_journal.*8372031610433464572.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" + journal2 := "insert into _vt.resharding_journal.*6432976123657117098.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) // Create backward replicaions. @@ -520,7 +520,7 @@ func TestShardMigrate(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, 1*time.Second) if err != nil { t.Fatal(err) } @@ -545,18 +545,18 @@ func TestMigrateFailJournal(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } // Check for journals. - tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) - tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) // Wait for position: Reads current state, updates to Stopped, and re-reads. state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( @@ -590,7 +590,7 @@ func TestMigrateFailJournal(t *testing.T) { tme.dbSource1Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) tme.dbSource2Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, 1*time.Second) want := "journaling intentionally failed" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) @@ -613,21 +613,21 @@ func TestTableMigrateJournalExists(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } // Show one journal as created. - tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1"), nil) - tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 445516443381867838", &sqltypes.Result{}, nil) + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1"), nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) // Create the missing journal. - journal2 := "insert into _vt.resharding_journal.*445516443381867838.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*80.*participants.*40.*40" + journal2 := "insert into _vt.resharding_journal.*9113431017721636330.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*80.*participants.*40.*40" tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) // Create backward replicaions. @@ -648,7 +648,7 @@ func TestTableMigrateJournalExists(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, 1*time.Second) if err != nil { t.Fatal(err) } @@ -675,21 +675,21 @@ func TestShardMigrateJournalExists(t *testing.T) { tme := newTestShardMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } // Show one journal as created. - tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 8372031610433464572", sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1"), nil) - tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 8372031610433464572", &sqltypes.Result{}, nil) + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 6432976123657117098", sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1"), nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 6432976123657117098", &sqltypes.Result{}, nil) // Create the missing journal. - journal2 := "insert into _vt.resharding_journal.*8372031610433464572.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" + journal2 := "insert into _vt.resharding_journal.*6432976123657117098.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) // Create backward replicaions. @@ -710,7 +710,7 @@ func TestShardMigrateJournalExists(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, 1*time.Second) if err != nil { t.Fatal(err) } @@ -728,38 +728,6 @@ func TestShardMigrateJournalExists(t *testing.T) { verifyQueries(t, tme.allDBClients) } -func TestMigrateDistinctTargets(t *testing.T) { - ctx := context.Background() - tme := newTestTableMigrater(ctx, t) - defer tme.stopTablets(t) - - bls := &binlogdatapb.BinlogSource{ - Keyspace: "ks1", - Shard: "-40", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: "select * from t1 where in_keyrange('-80')", - }, { - Match: "t2", - Filter: "select * from t2 where in_keyrange('-80')", - }}, - }, - } - tme.dbSource1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls), - ), nil) - tme.streams[topo.KeyspaceShard{Keyspace: "ks1", Shard: "-40"}] = []uint32{1} - - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) - want := "target keyspaces are mismatched across streams" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("MigrateReads: %v, must contain %v", err, want) - } -} - func TestMigrateDistinctSources(t *testing.T) { ctx := context.Background() tme := newTestTableMigrater(ctx, t) @@ -784,7 +752,7 @@ func TestMigrateDistinctSources(t *testing.T) { fmt.Sprintf("%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "source keyspaces are mismatched across streams" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -798,7 +766,7 @@ func TestMigrateVReplicationStreamNotFound(t *testing.T) { tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "VReplication stream 1 not found for ks2:-80" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -826,7 +794,7 @@ func TestMigrateMismatchedTables(t *testing.T) { fmt.Sprintf("%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "table lists are mismatched across streams" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -857,7 +825,7 @@ func TestMigrateDupUidSources(t *testing.T) { fmt.Sprintf("%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "duplicate sources for uids" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -869,11 +837,11 @@ func TestTableMigrateAllShardsNotPresent(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - tme.streams = map[topo.KeyspaceShard][]uint32{ - {Keyspace: "ks2", Shard: "-80"}: {1, 2}, + tme.streams = map[string][]uint32{ + "-80": {1, 2}, } - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "mismatched shards for keyspace" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -931,7 +899,7 @@ func TestMigrateNoTableWildcards(t *testing.T) { fmt.Sprintf("%v", bls3), ), nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "cannot migrate streams with wild card table names" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -943,7 +911,7 @@ func TestShardMigrateSourceTargetMismatch(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "source and target keyspace must match" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -971,9 +939,9 @@ func TestShardMigrateTargetMatchesSource(t *testing.T) { fmt.Sprintf("%v", bls), ), nil) - tme.streams[topo.KeyspaceShard{Keyspace: "ks", Shard: "-40"}] = []uint32{1} + tme.streams["-40"] = []uint32{1} - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) want := "target shard matches a source shard" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) From 40ef5ff329cfbf12f0336fe405f194969ff2573c Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Mon, 8 Jul 2019 16:28:34 -0700 Subject: [PATCH 13/17] migrater: use workflow as input Specifying shards and uids wasn't user-friendly. Specifying workflow names should better. However, it will be the user's responsibilty to keep them unique. Signed-off-by: Sugu Sougoumarane --- go/vt/wrangler/migrater.go | 168 +++++++++++++------------ go/vt/wrangler/migrater_env_test.go | 49 ++++---- go/vt/wrangler/migrater_test.go | 184 +++++++++++----------------- 3 files changed, 187 insertions(+), 214 deletions(-) diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go index 05f6e2316bd..6db2a1d688c 100644 --- a/go/vt/wrangler/migrater.go +++ b/go/vt/wrangler/migrater.go @@ -88,11 +88,11 @@ type miSource struct { } // MigrateReads is a generic way of migrating read traffic for a resharding workflow. -func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType binlogdatapb.MigrationType, targetKeyspace string, streams map[string][]uint32, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { +func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType binlogdatapb.MigrationType, targetKeyspace, workflow string, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { if servedType != topodatapb.TabletType_REPLICA && servedType != topodatapb.TabletType_RDONLY { return fmt.Errorf("tablet type must be REPLICA or RDONLY: %v", servedType) } - mi, err := wr.buildMigrater(ctx, migrationType, targetKeyspace, streams) + mi, err := wr.buildMigrater(ctx, migrationType, targetKeyspace, workflow) if err != nil { return err } @@ -114,8 +114,8 @@ func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType binlogdatapb } // MigrateWrites is a generic way of migrating write traffic for a resharding workflow. -func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType binlogdatapb.MigrationType, targetKeyspace string, streams map[string][]uint32, filteredReplicationWaitTime time.Duration) error { - mi, err := wr.buildMigrater(ctx, migrationType, targetKeyspace, streams) +func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType binlogdatapb.MigrationType, targetKeyspace, workflow string, filteredReplicationWaitTime time.Duration) error { + mi, err := wr.buildMigrater(ctx, migrationType, targetKeyspace, workflow) if err != nil { return err } @@ -181,95 +181,119 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType binlogdatap return nil } -func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType binlogdatapb.MigrationType, targetKeyspace string, streams map[string][]uint32) (*migrater, error) { +func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType binlogdatapb.MigrationType, targetKeyspace, workflow string) (*migrater, error) { + targets, err := wr.buildMigrationTargets(ctx, targetKeyspace, workflow) + if err != nil { + return nil, err + } + mi := &migrater{ migrationType: migrationType, wr: wr, - id: hashStreams(targetKeyspace, streams), - targets: make(map[string]*miTarget), + id: hashStreams(targetKeyspace, targets), + targets: targets, sources: make(map[string]*miSource), targetKeyspace: targetKeyspace, } - mi.wr.Logger().Infof("Migration ID for streams %v: %d", streams, mi.id) - for targetShard, uids := range streams { - targetsi, err := mi.wr.ts.GetShard(ctx, targetKeyspace, targetShard) + mi.wr.Logger().Infof("Migration ID for workflow %s: %d", workflow, mi.id) + + // Build the sources + for _, target := range targets { + for _, bls := range target.sources { + if mi.sourceKeyspace == "" { + mi.sourceKeyspace = bls.Keyspace + } else if mi.sourceKeyspace != bls.Keyspace { + return nil, fmt.Errorf("source keyspaces are mismatched across streams: %v vs %v", mi.sourceKeyspace, bls.Keyspace) + } + if _, ok := mi.sources[bls.Shard]; ok { + continue + } + + sourcesi, err := mi.wr.ts.GetShard(ctx, bls.Keyspace, bls.Shard) + if err != nil { + return nil, err + } + sourceMaster, err := mi.wr.ts.GetTablet(ctx, sourcesi.MasterAlias) + if err != nil { + return nil, err + } + mi.sources[bls.Shard] = &miSource{ + si: sourcesi, + master: sourceMaster, + } + + if mi.tables == nil { + for _, rule := range bls.Filter.Rules { + mi.tables = append(mi.tables, rule.Match) + } + sort.Strings(mi.tables) + } else { + var tables []string + for _, rule := range bls.Filter.Rules { + tables = append(tables, rule.Match) + } + sort.Strings(tables) + if !reflect.DeepEqual(mi.tables, tables) { + return nil, fmt.Errorf("table lists are mismatched across streams: %v vs %v", mi.tables, tables) + } + } + } + } + return mi, nil +} + +func (wr *Wrangler) buildMigrationTargets(ctx context.Context, targetKeyspace, workflow string) (targets map[string]*miTarget, err error) { + targets = make(map[string]*miTarget) + targetShards, err := wr.ts.GetShardNames(ctx, targetKeyspace) + if err != nil { + return nil, err + } + for _, targetShard := range targetShards { + targetsi, err := wr.ts.GetShard(ctx, targetKeyspace, targetShard) + if err != nil { + return nil, err + } + targetMaster, err := wr.ts.GetTablet(ctx, targetsi.MasterAlias) if err != nil { return nil, err } - targetMaster, err := mi.wr.ts.GetTablet(ctx, targetsi.MasterAlias) + p3qr, err := wr.tmc.VReplicationExec(ctx, targetMaster.Tablet, fmt.Sprintf("select id, source from _vt.vreplication where workflow='%s' and db_name='%s'", workflow, targetMaster.DbName())) if err != nil { return nil, err } - mi.targets[targetShard] = &miTarget{ + if len(p3qr.Rows) < 1 { + continue + } + + targets[targetShard] = &miTarget{ si: targetsi, master: targetMaster, sources: make(map[uint32]*binlogdatapb.BinlogSource), } - for _, uid := range uids { - p3qr, err := mi.wr.tmc.VReplicationExec(ctx, targetMaster.Tablet, fmt.Sprintf("select source from _vt.vreplication where id=%d", uid)) + qr := sqltypes.Proto3ToResult(p3qr) + for _, row := range qr.Rows { + id, err := sqltypes.ToInt64(row[0]) if err != nil { return nil, err } - qr := sqltypes.Proto3ToResult(p3qr) - if len(qr.Rows) < 1 || len(qr.Rows[0]) < 1 { - return nil, fmt.Errorf("VReplication stream %d not found for %s:%s", int(uid), targetKeyspace, targetShard) - } - for _, row := range qr.Rows { - str := row[0].ToString() - var bls binlogdatapb.BinlogSource - if err := proto.UnmarshalText(str, &bls); err != nil { - return nil, err - } - mi.targets[targetShard].sources[uid] = &bls - - if mi.sourceKeyspace == "" { - mi.sourceKeyspace = bls.Keyspace - } else if mi.sourceKeyspace != bls.Keyspace { - return nil, fmt.Errorf("source keyspaces are mismatched across streams: %v vs %v", mi.sourceKeyspace, bls.Keyspace) - } - if _, ok := mi.sources[bls.Shard]; ok { - continue - } - - sourcesi, err := mi.wr.ts.GetShard(ctx, bls.Keyspace, bls.Shard) - if err != nil { - return nil, err - } - sourceMaster, err := mi.wr.ts.GetTablet(ctx, sourcesi.MasterAlias) - if err != nil { - return nil, err - } - mi.sources[bls.Shard] = &miSource{ - si: sourcesi, - master: sourceMaster, - } - - if mi.tables == nil { - for _, rule := range bls.Filter.Rules { - mi.tables = append(mi.tables, rule.Match) - } - sort.Strings(mi.tables) - } else { - var tables []string - for _, rule := range bls.Filter.Rules { - tables = append(tables, rule.Match) - } - sort.Strings(tables) - if !reflect.DeepEqual(mi.tables, tables) { - return nil, fmt.Errorf("table lists are mismatched across streams: %v vs %v", mi.tables, tables) - } - } + var bls binlogdatapb.BinlogSource + if err := proto.UnmarshalText(row[1].ToString(), &bls); err != nil { + return nil, err } + targets[targetShard].sources[uint32(id)] = &bls } } - return mi, nil + if len(targets) == 0 { + return nil, fmt.Errorf("no streams found in keyspace %s for: %s", targetKeyspace, workflow) + } + return targets, nil } // hashStreams produces a reproduceable hash based on the input parameters. -func hashStreams(targetKeyspace string, streams map[string][]uint32) int64 { +func hashStreams(targetKeyspace string, targets map[string]*miTarget) int64 { var expanded []string - for shard, uids := range streams { - for _, uid := range uids { + for shard, target := range targets { + for uid := range target.sources { expanded = append(expanded, fmt.Sprintf("%s:%d", shard, uid)) } } @@ -284,16 +308,6 @@ func hashStreams(targetKeyspace string, streams map[string][]uint32) int64 { } func (mi *migrater) validate(ctx context.Context) error { - // Ensure no duplicate sources in each target. - for _, target := range mi.targets { - uniqueSources := make(map[string]uint32) - for uid, bls := range target.sources { - if suid, ok := uniqueSources[bls.Shard]; ok { - return fmt.Errorf("duplicate sources for uids: %v and %v", suid, uid) - } - uniqueSources[bls.Shard] = uid - } - } if mi.migrationType == binlogdatapb.MigrationType_TABLES { // All shards must be present. if err := mi.compareShards(ctx, mi.sourceKeyspace, mi.sourceShards()); err != nil { diff --git a/go/vt/wrangler/migrater_env_test.go b/go/vt/wrangler/migrater_env_test.go index 6d7db1f226f..5384591ba48 100644 --- a/go/vt/wrangler/migrater_env_test.go +++ b/go/vt/wrangler/migrater_env_test.go @@ -35,6 +35,9 @@ import ( "vitess.io/vitess/go/vt/vttablet/tmclient" ) +const vreplQueryks = "select id, source from _vt.vreplication where workflow = 'test' and db_name = 'vt_ks'" +const vreplQueryks2 = "select id, source from _vt.vreplication where workflow = 'test' and db_name = 'vt_ks2'" + type testMigraterEnv struct { ts *topo.Server wr *Wrangler @@ -133,11 +136,6 @@ func newTestTableMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { }}, }, } - tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls1), - ), nil) bls2 := &binlogdatapb.BinlogSource{ Keyspace: "ks1", Shard: "40-", @@ -151,10 +149,11 @@ func newTestTableMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { }}, }, } - tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls2), + tme.dbDest1Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls1), + fmt.Sprintf("2|%v", bls2), ), nil) bls3 := &binlogdatapb.BinlogSource{ Keyspace: "ks1", @@ -169,10 +168,10 @@ func newTestTableMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { }}, }, } - tme.dbDest2Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls3), + tme.dbDest2Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls3), ), nil) if err := tme.wr.saveRoutingRules(ctx, map[string][]string{ @@ -248,11 +247,6 @@ func newTestShardMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { }}, }, } - tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls1), - ), nil) bls2 := &binlogdatapb.BinlogSource{ Keyspace: "ks", Shard: "40-", @@ -263,10 +257,11 @@ func newTestShardMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { }}, }, } - tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls2), + tme.dbDest1Client.addQuery(vreplQueryks, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls1), + fmt.Sprintf("2|%v", bls2), ), nil) bls3 := &binlogdatapb.BinlogSource{ Keyspace: "ks", @@ -278,10 +273,10 @@ func newTestShardMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { }}, }, } - tme.dbDest2Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls3), + tme.dbDest2Client.addQuery(vreplQueryks, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls3), ), nil) tme.targetKeyspace = "ks" @@ -289,6 +284,8 @@ func newTestShardMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { "-80": {1, 2}, "80-": {1}, } + tme.dbSource1Client.addQuery(vreplQueryks, &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery(vreplQueryks, &sqltypes.Result{}, nil) return tme } diff --git a/go/vt/wrangler/migrater_test.go b/go/vt/wrangler/migrater_test.go index 08459b50305..c4c5eff46e2 100644 --- a/go/vt/wrangler/migrater_test.go +++ b/go/vt/wrangler/migrater_test.go @@ -40,7 +40,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell RDONLY migration. - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -69,7 +69,7 @@ func TestTableMigrate(t *testing.T) { // The global routing already contains redirections for rdonly. // So, adding routes for replica and deploying to cell2 will also cause // cell2 to migrat rdonly. This is a quirk that can be fixed later if necessary. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -107,7 +107,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell backward REPLICA migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) if err != nil { t.Fatal(err) } @@ -127,7 +127,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Migrate all REPLICA. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -153,7 +153,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // All cells RDONLY backward migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionBackward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionBackward) if err != nil { t.Fatal(err) } @@ -173,7 +173,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate master with MigrateReads. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_MASTER, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_MASTER, directionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) @@ -182,7 +182,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", 1*time.Second) want = "missing tablet type specific routing, read-only traffic must be migrated before migrating writes" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites err: %v, want %v", err, want) @@ -193,7 +193,7 @@ func TestTableMigrate(t *testing.T) { // Test MigrateWrites cancelation on failure. // Migrate all the reads first. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -248,7 +248,7 @@ func TestTableMigrate(t *testing.T) { tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, 0*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", 0*time.Second) want = "DeadlineExceeded" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) @@ -298,7 +298,7 @@ func TestTableMigrate(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", 1*time.Second) if err != nil { t.Fatal(err) } @@ -332,7 +332,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell RDONLY migration. - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -348,7 +348,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Other cell REPLICA migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -364,7 +364,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell backward REPLICA migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) if err != nil { t.Fatal(err) } @@ -383,7 +383,7 @@ func TestShardMigrate(t *testing.T) { // This is an extra step that does not exist in the tables test. // The per-cell migration mechanism is different for tables. So, this // extra step is needed to bring things in sync. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -395,7 +395,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Migrate all REPLICA. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -407,7 +407,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // All cells RDONLY backward migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionBackward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionBackward) if err != nil { t.Fatal(err) } @@ -419,7 +419,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate master with MigrateReads. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_MASTER, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_MASTER, directionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) @@ -428,7 +428,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", 1*time.Second) want = "cannot migrate MASTER away" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites err: %v, want %v", err, want) @@ -439,7 +439,7 @@ func TestShardMigrate(t *testing.T) { // Test MigrateWrites cancelation on failure. // Migrate all the reads first. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -484,7 +484,7 @@ func TestShardMigrate(t *testing.T) { tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, 0*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", 0*time.Second) want = "DeadlineExceeded" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) @@ -520,7 +520,7 @@ func TestShardMigrate(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", 1*time.Second) if err != nil { t.Fatal(err) } @@ -545,11 +545,11 @@ func TestMigrateFailJournal(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -590,7 +590,7 @@ func TestMigrateFailJournal(t *testing.T) { tme.dbSource1Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) tme.dbSource2Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", 1*time.Second) want := "journaling intentionally failed" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) @@ -613,11 +613,11 @@ func TestTableMigrateJournalExists(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -648,7 +648,7 @@ func TestTableMigrateJournalExists(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", 1*time.Second) if err != nil { t.Fatal(err) } @@ -675,11 +675,11 @@ func TestShardMigrateJournalExists(t *testing.T) { tme := newTestShardMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -710,7 +710,7 @@ func TestShardMigrateJournalExists(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, 1*time.Second) + err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", 1*time.Second) if err != nil { t.Fatal(err) } @@ -728,6 +728,21 @@ func TestShardMigrateJournalExists(t *testing.T) { verifyQueries(t, tme.allDBClients) } +func TestMigrateNoStreamsFound(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + tme.dbDest1Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) + + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + want := "no streams found in keyspace ks2 for: test" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + func TestMigrateDistinctSources(t *testing.T) { ctx := context.Background() tme := newTestTableMigrater(ctx, t) @@ -746,33 +761,19 @@ func TestMigrateDistinctSources(t *testing.T) { }}, }, } - tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls), + tme.dbDest1Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) want := "source keyspaces are mismatched across streams" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) } } -func TestMigrateVReplicationStreamNotFound(t *testing.T) { - ctx := context.Background() - tme := newTestTableMigrater(ctx, t) - defer tme.stopTablets(t) - - tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) - - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) - want := "VReplication stream 1 not found for ks2:-80" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("MigrateReads: %v, must contain %v", err, want) - } -} - func TestMigrateMismatchedTables(t *testing.T) { ctx := context.Background() tme := newTestTableMigrater(ctx, t) @@ -788,60 +789,27 @@ func TestMigrateMismatchedTables(t *testing.T) { }}, }, } - tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls), + tme.dbDest1Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) want := "table lists are mismatched across streams" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) } } -func TestMigrateDupUidSources(t *testing.T) { - ctx := context.Background() - tme := newTestTableMigrater(ctx, t) - defer tme.stopTablets(t) - - bls := &binlogdatapb.BinlogSource{ - Keyspace: "ks1", - Shard: "40-", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: "select * from t1 where in_keyrange('80-')", - }, { - Match: "t2", - Filter: "select * from t2 where in_keyrange('80-')", - }}, - }, - } - tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls), - ), nil) - - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) - want := "duplicate sources for uids" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("MigrateReads: %v, must contain %v", err, want) - } -} - func TestTableMigrateAllShardsNotPresent(t *testing.T) { ctx := context.Background() tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - tme.streams = map[string][]uint32{ - "-80": {1, 2}, - } + tme.dbDest1Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) want := "mismatched shards for keyspace" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -863,11 +831,6 @@ func TestMigrateNoTableWildcards(t *testing.T) { }}, }, } - tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls1), - ), nil) bls2 := &binlogdatapb.BinlogSource{ Keyspace: "ks1", Shard: "40-", @@ -878,10 +841,11 @@ func TestMigrateNoTableWildcards(t *testing.T) { }}, }, } - tme.dbDest1Client.addQuery("select source from _vt.vreplication where id = 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls2), + tme.dbDest1Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls1), + fmt.Sprintf("2|%v", bls2), ), nil) bls3 := &binlogdatapb.BinlogSource{ Keyspace: "ks1", @@ -893,13 +857,13 @@ func TestMigrateNoTableWildcards(t *testing.T) { }}, }, } - tme.dbDest2Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls3), + tme.dbDest2Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls3), ), nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) want := "cannot migrate streams with wild card table names" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -911,7 +875,7 @@ func TestShardMigrateSourceTargetMismatch(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) want := "source and target keyspace must match" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -933,15 +897,13 @@ func TestShardMigrateTargetMatchesSource(t *testing.T) { }}, }, } - tme.dbSource1Client.addQuery("select source from _vt.vreplication where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "source", - "varchar"), - fmt.Sprintf("%v", bls), + tme.dbDest1Client.addQuery(vreplQueryks, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls), ), nil) - tme.streams["-40"] = []uint32{1} - - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, tme.streams, nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) want := "target shard matches a source shard" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) From 0257cc06f74be69c664cddcc801bbe3c43412488 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Mon, 8 Jul 2019 19:01:52 -0700 Subject: [PATCH 14/17] migrater: auto-detect migration type Signed-off-by: Sugu Sougoumarane --- go/vt/wrangler/migrater.go | 48 +++++++++--------- go/vt/wrangler/migrater_test.go | 90 +++++++++++++++------------------ 2 files changed, 66 insertions(+), 72 deletions(-) diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go index 6db2a1d688c..6052acb0c7b 100644 --- a/go/vt/wrangler/migrater.go +++ b/go/vt/wrangler/migrater.go @@ -88,11 +88,11 @@ type miSource struct { } // MigrateReads is a generic way of migrating read traffic for a resharding workflow. -func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType binlogdatapb.MigrationType, targetKeyspace, workflow string, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { +func (wr *Wrangler) MigrateReads(ctx context.Context, targetKeyspace, workflow string, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { if servedType != topodatapb.TabletType_REPLICA && servedType != topodatapb.TabletType_RDONLY { return fmt.Errorf("tablet type must be REPLICA or RDONLY: %v", servedType) } - mi, err := wr.buildMigrater(ctx, migrationType, targetKeyspace, workflow) + mi, err := wr.buildMigrater(ctx, targetKeyspace, workflow) if err != nil { return err } @@ -114,29 +114,29 @@ func (wr *Wrangler) MigrateReads(ctx context.Context, migrationType binlogdatapb } // MigrateWrites is a generic way of migrating write traffic for a resharding workflow. -func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType binlogdatapb.MigrationType, targetKeyspace, workflow string, filteredReplicationWaitTime time.Duration) error { - mi, err := wr.buildMigrater(ctx, migrationType, targetKeyspace, workflow) +func (wr *Wrangler) MigrateWrites(ctx context.Context, targetKeyspace, workflow string, filteredReplicationWaitTime time.Duration) (journalID int64, err error) { + mi, err := wr.buildMigrater(ctx, targetKeyspace, workflow) if err != nil { - return err + return 0, err } mi.wr.Logger().Infof("Built migration metadata: %+v", mi) if err := mi.validate(ctx); err != nil { - return err + return 0, err } if err := mi.validateForWrite(ctx); err != nil { - return err + return 0, err } // Need to lock both source and target keyspaces. ctx, sourceUnlock, lockErr := wr.ts.LockKeyspace(ctx, mi.sourceKeyspace, "MigrateWrites") if lockErr != nil { - return lockErr + return 0, lockErr } defer sourceUnlock(&err) if mi.targetKeyspace != mi.sourceKeyspace { tctx, targetUnlock, lockErr := wr.ts.LockKeyspace(ctx, mi.targetKeyspace, "MigrateWrites") if lockErr != nil { - return lockErr + return 0, lockErr } ctx = tctx defer targetUnlock(&err) @@ -144,51 +144,50 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, migrationType binlogdatap journalsExist, err := mi.checkJournals(ctx) if err != nil { - return err + return 0, err } if !journalsExist { mi.wr.Logger().Infof("No previous journals were found. Proceeding normally.") if err := mi.stopSourceWrites(ctx); err != nil { mi.cancelMigration(ctx) - return err + return 0, err } if err := mi.waitForCatchup(ctx, filteredReplicationWaitTime); err != nil { mi.cancelMigration(ctx) - return err + return 0, err } } else { mi.wr.Logger().Infof("Journals were found. Completing the left over steps.") // Need to gather positions in case all journals were not created. if err := mi.gatherPositions(ctx); err != nil { - return err + return 0, err } } // This is the point of no return. Once a journal is created, // traffic can be redirected to target shards. if err := mi.createJournals(ctx); err != nil { - return err + return 0, err } if err := mi.createReverseReplication(ctx); err != nil { - return err + return 0, err } if err := mi.allowTargetWrites(ctx); err != nil { - return err + return 0, err } if err := mi.changeRouting(ctx); err != nil { - return err + return 0, err } mi.deleteTargetVReplication(ctx) - return nil + return mi.id, nil } -func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType binlogdatapb.MigrationType, targetKeyspace, workflow string) (*migrater, error) { +func (wr *Wrangler) buildMigrater(ctx context.Context, targetKeyspace, workflow string) (*migrater, error) { targets, err := wr.buildMigrationTargets(ctx, targetKeyspace, workflow) if err != nil { return nil, err } mi := &migrater{ - migrationType: migrationType, wr: wr, id: hashStreams(targetKeyspace, targets), targets: targets, @@ -239,6 +238,11 @@ func (wr *Wrangler) buildMigrater(ctx context.Context, migrationType binlogdatap } } } + if mi.sourceKeyspace != mi.targetKeyspace { + mi.migrationType = binlogdatapb.MigrationType_TABLES + } else { + mi.migrationType = binlogdatapb.MigrationType_SHARDS + } return mi, nil } @@ -323,10 +327,6 @@ func (mi *migrater) validate(ctx context.Context) error { } } } else { // binlogdatapb.MigrationType_SHARDS - // Source and target keyspace must match - if mi.sourceKeyspace != mi.targetKeyspace { - return fmt.Errorf("source and target keyspace must match: %v vs %v", mi.sourceKeyspace, mi.targetKeyspace) - } // Source and target shards must not match. for sourceShard := range mi.sources { if _, ok := mi.targets[sourceShard]; ok { diff --git a/go/vt/wrangler/migrater_test.go b/go/vt/wrangler/migrater_test.go index c4c5eff46e2..6e9c9c93e6e 100644 --- a/go/vt/wrangler/migrater_test.go +++ b/go/vt/wrangler/migrater_test.go @@ -40,7 +40,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell RDONLY migration. - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -69,7 +69,7 @@ func TestTableMigrate(t *testing.T) { // The global routing already contains redirections for rdonly. // So, adding routes for replica and deploying to cell2 will also cause // cell2 to migrat rdonly. This is a quirk that can be fixed later if necessary. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -107,7 +107,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell backward REPLICA migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) if err != nil { t.Fatal(err) } @@ -127,7 +127,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Migrate all REPLICA. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -153,7 +153,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // All cells RDONLY backward migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionBackward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionBackward) if err != nil { t.Fatal(err) } @@ -173,7 +173,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate master with MigrateReads. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_MASTER, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_MASTER, directionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) @@ -182,7 +182,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", 1*time.Second) + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) want = "missing tablet type specific routing, read-only traffic must be migrated before migrating writes" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites err: %v, want %v", err, want) @@ -193,7 +193,7 @@ func TestTableMigrate(t *testing.T) { // Test MigrateWrites cancelation on failure. // Migrate all the reads first. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -248,7 +248,7 @@ func TestTableMigrate(t *testing.T) { tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", 0*time.Second) + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 0*time.Second) want = "DeadlineExceeded" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) @@ -298,10 +298,13 @@ func TestTableMigrate(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", 1*time.Second) + journalID, err := tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) if err != nil { t.Fatal(err) } + if journalID != 9113431017721636330 { + t.Errorf("journal id: %d, want 9113431017721636330", journalID) + } checkRouting(t, tme.wr, map[string][]string{ "t1": {"ks2.t1"}, @@ -332,7 +335,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell RDONLY migration. - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -348,7 +351,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Other cell REPLICA migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -364,7 +367,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell backward REPLICA migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) if err != nil { t.Fatal(err) } @@ -383,7 +386,7 @@ func TestShardMigrate(t *testing.T) { // This is an extra step that does not exist in the tables test. // The per-cell migration mechanism is different for tables. So, this // extra step is needed to bring things in sync. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -395,7 +398,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Migrate all REPLICA. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -407,7 +410,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // All cells RDONLY backward migration. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionBackward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionBackward) if err != nil { t.Fatal(err) } @@ -419,7 +422,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate master with MigrateReads. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_MASTER, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_MASTER, directionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) @@ -428,7 +431,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", 1*time.Second) + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) want = "cannot migrate MASTER away" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites err: %v, want %v", err, want) @@ -439,7 +442,7 @@ func TestShardMigrate(t *testing.T) { // Test MigrateWrites cancelation on failure. // Migrate all the reads first. - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } @@ -484,7 +487,7 @@ func TestShardMigrate(t *testing.T) { tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", 0*time.Second) + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 0*time.Second) want = "DeadlineExceeded" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) @@ -520,10 +523,13 @@ func TestShardMigrate(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", 1*time.Second) + journalID, err := tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) if err != nil { t.Fatal(err) } + if journalID != 6432976123657117098 { + t.Errorf("journal id: %d, want 6432976123657117098", journalID) + } checkServedTypes(t, tme.ts, "ks:-40", 0) checkServedTypes(t, tme.ts, "ks:40-", 0) @@ -545,11 +551,11 @@ func TestMigrateFailJournal(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -590,7 +596,7 @@ func TestMigrateFailJournal(t *testing.T) { tme.dbSource1Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) tme.dbSource2Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", 1*time.Second) + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) want := "journaling intentionally failed" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) @@ -613,11 +619,11 @@ func TestTableMigrateJournalExists(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -648,7 +654,7 @@ func TestTableMigrateJournalExists(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", 1*time.Second) + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) if err != nil { t.Fatal(err) } @@ -675,11 +681,11 @@ func TestShardMigrateJournalExists(t *testing.T) { tme := newTestShardMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) if err != nil { t.Fatal(err) } @@ -710,7 +716,7 @@ func TestShardMigrateJournalExists(t *testing.T) { tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) - err = tme.wr.MigrateWrites(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", 1*time.Second) + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) if err != nil { t.Fatal(err) } @@ -736,7 +742,7 @@ func TestMigrateNoStreamsFound(t *testing.T) { tme.dbDest1Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) tme.dbDest2Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) want := "no streams found in keyspace ks2 for: test" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -767,7 +773,7 @@ func TestMigrateDistinctSources(t *testing.T) { fmt.Sprintf("1|%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) want := "source keyspaces are mismatched across streams" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -795,7 +801,7 @@ func TestMigrateMismatchedTables(t *testing.T) { fmt.Sprintf("1|%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) want := "table lists are mismatched across streams" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -809,7 +815,7 @@ func TestTableMigrateAllShardsNotPresent(t *testing.T) { tme.dbDest1Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) want := "mismatched shards for keyspace" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -863,25 +869,13 @@ func TestMigrateNoTableWildcards(t *testing.T) { fmt.Sprintf("1|%v", bls3), ), nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_TABLES, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) want := "cannot migrate streams with wild card table names" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) } } -func TestShardMigrateSourceTargetMismatch(t *testing.T) { - ctx := context.Background() - tme := newTestTableMigrater(ctx, t) - defer tme.stopTablets(t) - - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) - want := "source and target keyspace must match" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("MigrateReads: %v, must contain %v", err, want) - } -} - func TestShardMigrateTargetMatchesSource(t *testing.T) { ctx := context.Background() tme := newTestShardMigrater(ctx, t) @@ -903,7 +897,7 @@ func TestShardMigrateTargetMatchesSource(t *testing.T) { fmt.Sprintf("1|%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, binlogdatapb.MigrationType_SHARDS, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) want := "target shard matches a source shard" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) From 3b44876002f5c3cfd20f879994388ce643120b89 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Mon, 8 Jul 2019 20:33:23 -0700 Subject: [PATCH 15/17] migrater: expose separate vtctl commands Signed-off-by: Sugu Sougoumarane --- go/vt/vtctl/vtctl.go | 52 +++++++++++++++++++++++++++++++ go/vt/wrangler/migrater.go | 19 ++++++------ go/vt/wrangler/migrater_test.go | 54 ++++++++++++++++----------------- 3 files changed, 89 insertions(+), 36 deletions(-) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 018e007fdb5..4761e460aa7 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -320,6 +320,12 @@ var commands = []commandGroup{ {"MigrateServedFrom", commandMigrateServedFrom, "[-cells=c1,c2,...] [-reverse] ", "Makes the serve the given type. This command also rebuilds the serving graph."}, + {"MigrateReads", commandMigrateReads, + "[-cells=c1,c2,...] [-reverse] ", + "Migrate read traffic for the specified workflow."}, + {"MigrateWrites", commandMigrateWrites, + " ", + "Migrate write traffic for the specified workflow."}, {"CancelResharding", commandCancelResharding, "", "Permanently cancels a resharding in progress. All resharding related metadata will be deleted."}, @@ -1815,6 +1821,52 @@ func commandMigrateServedFrom(ctx context.Context, wr *wrangler.Wrangler, subFla return wr.MigrateServedFrom(ctx, keyspace, shard, servedType, cells, *reverse, *filteredReplicationWaitTime) } +func commandMigrateReads(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward. Use in case of trouble") + cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") + if err := subFlags.Parse(args); err != nil { + return err + } + if subFlags.NArg() != 3 { + return fmt.Errorf("the , and arguments are required for the MigrateReads command") + } + + keyspace := subFlags.Arg(0) + workflow := subFlags.Arg(1) + servedType, err := parseTabletType(subFlags.Arg(2), []topodatapb.TabletType{topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY}) + if err != nil { + return err + } + var cells []string + if *cellsStr != "" { + cells = strings.Split(*cellsStr, ",") + } + direction := wrangler.DirectionForward + if *reverse { + direction = wrangler.DirectionBackward + } + return wr.MigrateReads(ctx, keyspace, workflow, servedType, cells, direction) +} + +func commandMigrateWrites(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations") + if err := subFlags.Parse(args); err != nil { + return err + } + if subFlags.NArg() != 2 { + return fmt.Errorf("the and arguments are required for the MigrateWrites command") + } + + keyspace := subFlags.Arg(0) + workflow := subFlags.Arg(1) + journalID, err := wr.MigrateWrites(ctx, keyspace, workflow, *filteredReplicationWaitTime) + if err != nil { + return err + } + wr.Logger().Infof("Migration Journal ID: %v", journalID) + return nil +} + func commandCancelResharding(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { if err := subFlags.Parse(args); err != nil { return err diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go index 6052acb0c7b..5abab1733f1 100644 --- a/go/vt/wrangler/migrater.go +++ b/go/vt/wrangler/migrater.go @@ -42,12 +42,13 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" ) -// migrateDirection specifies the migration direction. -type migrateDirection int +// MigrateDirection specifies the migration direction. +type MigrateDirection int +// The following constants define the migration direction. const ( - directionForward = migrateDirection(iota) - directionBackward + DirectionForward = MigrateDirection(iota) + DirectionBackward ) // accessType specifies the type of access for a shard (allow/disallow writes). @@ -88,7 +89,7 @@ type miSource struct { } // MigrateReads is a generic way of migrating read traffic for a resharding workflow. -func (wr *Wrangler) MigrateReads(ctx context.Context, targetKeyspace, workflow string, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { +func (wr *Wrangler) MigrateReads(ctx context.Context, targetKeyspace, workflow string, servedType topodatapb.TabletType, cells []string, direction MigrateDirection) error { if servedType != topodatapb.TabletType_REPLICA && servedType != topodatapb.TabletType_RDONLY { return fmt.Errorf("tablet type must be REPLICA or RDONLY: %v", servedType) } @@ -409,7 +410,7 @@ func (mi *migrater) compareShards(ctx context.Context, keyspace string, sis []*t return nil } -func (mi *migrater) migrateTableReads(ctx context.Context, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { +func (mi *migrater) migrateTableReads(ctx context.Context, cells []string, servedType topodatapb.TabletType, direction MigrateDirection) error { rules, err := mi.wr.getRoutingRules(ctx) if err != nil { return err @@ -421,7 +422,7 @@ func (mi *migrater) migrateTableReads(ctx context.Context, cells []string, serve // For backward, we delete them. tt := strings.ToLower(servedType.String()) for _, table := range mi.tables { - if direction == directionForward { + if direction == DirectionForward { rules[table+"@"+tt] = []string{mi.targetKeyspace + "." + table} rules[mi.targetKeyspace+"."+table+"@"+tt] = []string{mi.targetKeyspace + "." + table} rules[mi.sourceKeyspace+"."+table+"@"+tt] = []string{mi.targetKeyspace + "." + table} @@ -437,9 +438,9 @@ func (mi *migrater) migrateTableReads(ctx context.Context, cells []string, serve return mi.wr.ts.RebuildSrvVSchema(ctx, cells) } -func (mi *migrater) migrateShardReads(ctx context.Context, cells []string, servedType topodatapb.TabletType, direction migrateDirection) error { +func (mi *migrater) migrateShardReads(ctx context.Context, cells []string, servedType topodatapb.TabletType, direction MigrateDirection) error { var fromShards, toShards []*topo.ShardInfo - if direction == directionForward { + if direction == DirectionForward { fromShards, toShards = mi.sourceShards(), mi.targetShards() } else { fromShards, toShards = mi.targetShards(), mi.sourceShards() diff --git a/go/vt/wrangler/migrater_test.go b/go/vt/wrangler/migrater_test.go index 6e9c9c93e6e..b0d239ed288 100644 --- a/go/vt/wrangler/migrater_test.go +++ b/go/vt/wrangler/migrater_test.go @@ -40,7 +40,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell RDONLY migration. - err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, []string{"cell1"}, DirectionForward) if err != nil { t.Fatal(err) } @@ -69,7 +69,7 @@ func TestTableMigrate(t *testing.T) { // The global routing already contains redirections for rdonly. // So, adding routes for replica and deploying to cell2 will also cause // cell2 to migrat rdonly. This is a quirk that can be fixed later if necessary. - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, []string{"cell2"}, DirectionForward) if err != nil { t.Fatal(err) } @@ -107,7 +107,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell backward REPLICA migration. - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, []string{"cell2"}, DirectionBackward) if err != nil { t.Fatal(err) } @@ -127,7 +127,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Migrate all REPLICA. - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) if err != nil { t.Fatal(err) } @@ -153,7 +153,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // All cells RDONLY backward migration. - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionBackward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionBackward) if err != nil { t.Fatal(err) } @@ -173,7 +173,7 @@ func TestTableMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate master with MigrateReads. - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_MASTER, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_MASTER, nil, DirectionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) @@ -193,7 +193,7 @@ func TestTableMigrate(t *testing.T) { // Test MigrateWrites cancelation on failure. // Migrate all the reads first. - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) if err != nil { t.Fatal(err) } @@ -335,7 +335,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell RDONLY migration. - err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", []string{"cell1"}, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, []string{"cell1"}, DirectionForward) if err != nil { t.Fatal(err) } @@ -351,7 +351,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Other cell REPLICA migration. - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, []string{"cell2"}, DirectionForward) if err != nil { t.Fatal(err) } @@ -367,7 +367,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Single cell backward REPLICA migration. - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", []string{"cell2"}, topodatapb.TabletType_REPLICA, directionBackward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, []string{"cell2"}, DirectionBackward) if err != nil { t.Fatal(err) } @@ -386,7 +386,7 @@ func TestShardMigrate(t *testing.T) { // This is an extra step that does not exist in the tables test. // The per-cell migration mechanism is different for tables. So, this // extra step is needed to bring things in sync. - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) if err != nil { t.Fatal(err) } @@ -398,7 +398,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Migrate all REPLICA. - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) if err != nil { t.Fatal(err) } @@ -410,7 +410,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // All cells RDONLY backward migration. - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionBackward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionBackward) if err != nil { t.Fatal(err) } @@ -422,7 +422,7 @@ func TestShardMigrate(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't migrate master with MigrateReads. - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_MASTER, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_MASTER, nil, DirectionForward) want := "tablet type must be REPLICA or RDONLY: MASTER" if err == nil || err.Error() != want { t.Errorf("MigrateReads(master) err: %v, want %v", err, want) @@ -442,7 +442,7 @@ func TestShardMigrate(t *testing.T) { // Test MigrateWrites cancelation on failure. // Migrate all the reads first. - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) if err != nil { t.Fatal(err) } @@ -551,11 +551,11 @@ func TestMigrateFailJournal(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) if err != nil { t.Fatal(err) } @@ -619,11 +619,11 @@ func TestTableMigrateJournalExists(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) if err != nil { t.Fatal(err) } @@ -681,11 +681,11 @@ func TestShardMigrateJournalExists(t *testing.T) { tme := newTestShardMigrater(ctx, t) defer tme.stopTablets(t) - err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) if err != nil { t.Fatal(err) } - err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_REPLICA, directionForward) + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) if err != nil { t.Fatal(err) } @@ -742,7 +742,7 @@ func TestMigrateNoStreamsFound(t *testing.T) { tme.dbDest1Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) tme.dbDest2Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) - err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) want := "no streams found in keyspace ks2 for: test" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -773,7 +773,7 @@ func TestMigrateDistinctSources(t *testing.T) { fmt.Sprintf("1|%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) want := "source keyspaces are mismatched across streams" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -801,7 +801,7 @@ func TestMigrateMismatchedTables(t *testing.T) { fmt.Sprintf("1|%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) want := "table lists are mismatched across streams" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -815,7 +815,7 @@ func TestTableMigrateAllShardsNotPresent(t *testing.T) { tme.dbDest1Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) - err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) want := "mismatched shards for keyspace" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -869,7 +869,7 @@ func TestMigrateNoTableWildcards(t *testing.T) { fmt.Sprintf("1|%v", bls3), ), nil) - err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) want := "cannot migrate streams with wild card table names" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) @@ -897,7 +897,7 @@ func TestShardMigrateTargetMatchesSource(t *testing.T) { fmt.Sprintf("1|%v", bls), ), nil) - err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", nil, topodatapb.TabletType_RDONLY, directionForward) + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) want := "target shard matches a source shard" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("MigrateReads: %v, must contain %v", err, want) From 4b86104476d32861f93c94077da5cf26627b3628 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Fri, 19 Jul 2019 19:32:34 -0700 Subject: [PATCH 16/17] migrater: address review comments Signed-off-by: Sugu Sougoumarane --- go/vt/vtctl/vtctl.go | 2 +- .../tabletmanager/vreplication/engine.go | 6 ++-- go/vt/wrangler/fake_tablet_test.go | 2 +- go/vt/wrangler/migrater.go | 30 +++++++++---------- go/vt/wrangler/migrater_test.go | 28 ++++++++++++++++- 5 files changed, 47 insertions(+), 21 deletions(-) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 4761e460aa7..d89a57a0e26 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -324,7 +324,7 @@ var commands = []commandGroup{ "[-cells=c1,c2,...] [-reverse] ", "Migrate read traffic for the specified workflow."}, {"MigrateWrites", commandMigrateWrites, - " ", + "[-filtered_replication_wait_time=30s] ", "Migrate write traffic for the specified workflow."}, {"CancelResharding", commandCancelResharding, "", diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine.go b/go/vt/vttablet/tabletmanager/vreplication/engine.go index 76a8ce639fe..65a9bc2a8eb 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine.go @@ -35,7 +35,7 @@ import ( const ( reshardingJournalTableName = "_vt.resharding_journal" vreplicationTableName = "_vt.vreplication" - CreateReshardingJournalTable = `create table if not exists _vt.resharding_journal( + createReshardingJournalTable = `create table if not exists _vt.resharding_journal( id bigint, db_name varbinary(255), val blob, @@ -122,7 +122,7 @@ func (vre *Engine) executeFetchMaybeCreateTable(dbClient binlogplayer.DBClient, } // If it's a bad table or db, it could be because the vreplication tables weren't created. - // In that case we can try creating then again. + // In that case we can try creating them again. merr, isSQLErr := err.(*mysql.SQLError) if !isSQLErr || !(merr.Num == mysql.ERNoSuchTable || merr.Num == mysql.ERBadDb || merr.Num == mysql.ERBadFieldError) { return qr, err @@ -136,7 +136,7 @@ func (vre *Engine) executeFetchMaybeCreateTable(dbClient binlogplayer.DBClient, return nil, err } } - if _, merr := dbClient.ExecuteFetch(CreateReshardingJournalTable, 0); merr != nil { + if _, merr := dbClient.ExecuteFetch(createReshardingJournalTable, 0); merr != nil { log.Warningf("Failed to ensure %s exists: %v", reshardingJournalTableName, merr) return nil, err } diff --git a/go/vt/wrangler/fake_tablet_test.go b/go/vt/wrangler/fake_tablet_test.go index 042533fa8ef..85adabf6cb1 100644 --- a/go/vt/wrangler/fake_tablet_test.go +++ b/go/vt/wrangler/fake_tablet_test.go @@ -46,7 +46,7 @@ import ( // This file was copied from testlib. All tests from testlib should be moved // to the current directory. In order to move tests from there, we have to // remove the circular dependency it causes (through vtctl dependence). -// The tests in this diectory call wrangler functions directory. So, there's +// The tests in this directory call wrangler functions directly. So, there's // no circular dependency. // This file contains utility methods for unit tests. diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go index 5abab1733f1..fe8b324a441 100644 --- a/go/vt/wrangler/migrater.go +++ b/go/vt/wrangler/migrater.go @@ -97,11 +97,11 @@ func (wr *Wrangler) MigrateReads(ctx context.Context, targetKeyspace, workflow s if err != nil { return err } - if err := mi.validate(ctx); err != nil { + if err := mi.validate(ctx, false); err != nil { return err } - // For reads, locking the source keysppace is sufficient. + // For reads, locking the source keyspace is sufficient. ctx, unlock, lockErr := wr.ts.LockKeyspace(ctx, mi.sourceKeyspace, "MigrateReads") if lockErr != nil { return lockErr @@ -121,10 +121,7 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, targetKeyspace, workflow return 0, err } mi.wr.Logger().Infof("Built migration metadata: %+v", mi) - if err := mi.validate(ctx); err != nil { - return 0, err - } - if err := mi.validateForWrite(ctx); err != nil { + if err := mi.validate(ctx, true); err != nil { return 0, err } @@ -253,6 +250,9 @@ func (wr *Wrangler) buildMigrationTargets(ctx context.Context, targetKeyspace, w if err != nil { return nil, err } + // We check all target shards. All of them may not have a stream. + // For example, in a shard split, only the target shards will have + // a stream. for _, targetShard := range targetShards { targetsi, err := wr.ts.GetShard(ctx, targetKeyspace, targetShard) if err != nil { @@ -266,6 +266,7 @@ func (wr *Wrangler) buildMigrationTargets(ctx context.Context, targetKeyspace, w if err != nil { return nil, err } + // If there's no stream, check next. if len(p3qr.Rows) < 1 { continue } @@ -294,7 +295,7 @@ func (wr *Wrangler) buildMigrationTargets(ctx context.Context, targetKeyspace, w return targets, nil } -// hashStreams produces a reproduceable hash based on the input parameters. +// hashStreams produces a reproducible hash based on the input parameters. func hashStreams(targetKeyspace string, targets map[string]*miTarget) int64 { var expanded []string for shard, target := range targets { @@ -312,7 +313,7 @@ func hashStreams(targetKeyspace string, targets map[string]*miTarget) int64 { return int64(hasher.Sum64() & math.MaxInt64) } -func (mi *migrater) validate(ctx context.Context) error { +func (mi *migrater) validate(ctx context.Context, isWrite bool) error { if mi.migrationType == binlogdatapb.MigrationType_TABLES { // All shards must be present. if err := mi.compareShards(ctx, mi.sourceKeyspace, mi.sourceShards()); err != nil { @@ -327,6 +328,9 @@ func (mi *migrater) validate(ctx context.Context) error { return fmt.Errorf("cannot migrate streams with wild card table names: %v", table) } } + if isWrite { + return mi.validateTableForWrite(ctx) + } } else { // binlogdatapb.MigrationType_SHARDS // Source and target shards must not match. for sourceShard := range mi.sources { @@ -334,17 +338,13 @@ func (mi *migrater) validate(ctx context.Context) error { return fmt.Errorf("target shard matches a source shard: %v", sourceShard) } } + if isWrite { + return mi.validateShardForWrite(ctx) + } } return nil } -func (mi *migrater) validateForWrite(ctx context.Context) error { - if mi.migrationType == binlogdatapb.MigrationType_TABLES { - return mi.validateTableForWrite(ctx) - } - return mi.validateShardForWrite(ctx) -} - func (mi *migrater) validateTableForWrite(ctx context.Context) error { rules, err := mi.wr.getRoutingRules(ctx) if err != nil { diff --git a/go/vt/wrangler/migrater_test.go b/go/vt/wrangler/migrater_test.go index b0d239ed288..0e9f25995de 100644 --- a/go/vt/wrangler/migrater_test.go +++ b/go/vt/wrangler/migrater_test.go @@ -38,6 +38,13 @@ func TestTableMigrate(t *testing.T) { tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) + checkCellRouting(t, tme.wr, "cell1", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + }) + //------------------------------------------------------------------------------------------------------------------- // Single cell RDONLY migration. err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, []string{"cell1"}, DirectionForward) @@ -68,7 +75,8 @@ func TestTableMigrate(t *testing.T) { // Other cell REPLICA migration. // The global routing already contains redirections for rdonly. // So, adding routes for replica and deploying to cell2 will also cause - // cell2 to migrat rdonly. This is a quirk that can be fixed later if necessary. + // cell2 to migrate rdonly. This is a quirk that can be fixed later if necessary. + // TODO(sougou): check if it's worth fixing, or clearly document the quirk. err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, []string{"cell2"}, DirectionForward) if err != nil { t.Fatal(err) @@ -171,6 +179,20 @@ func TestTableMigrate(t *testing.T) { }) verifyQueries(t, tme.allDBClients) + //------------------------------------------------------------------------------------------------------------------- + // All cells RDONLY backward migration. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionBackward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + }) + verifyQueries(t, tme.allDBClients) + //------------------------------------------------------------------------------------------------------------------- // Can't migrate master with MigrateReads. err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_MASTER, nil, DirectionForward) @@ -197,6 +219,10 @@ func TestTableMigrate(t *testing.T) { if err != nil { t.Fatal(err) } + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } checkRouting(t, tme.wr, map[string][]string{ "t1": {"ks1.t1"}, "ks2.t1": {"ks1.t1"}, From dfda28482f67658ce3cb4e3f7efaf6bcbb3b4792 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 4 Aug 2019 12:54:31 -0700 Subject: [PATCH 17/17] migrater: adddress more review comments Signed-off-by: Sugu Sougoumarane --- go/vt/vtctl/vtctl.go | 12 ++++++------ go/vt/wrangler/migrater.go | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index d89a57a0e26..b7645714375 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -1766,9 +1766,9 @@ func commandVerticalSplitClone(ctx context.Context, wr *wrangler.Wrangler, subFl func commandMigrateServedTypes(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") - reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward. Use in case of trouble") + reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward.") skipReFreshState := subFlags.Bool("skip-refresh-state", false, "Skips refreshing the state of the source tablets after the migration, meaning that the refresh will need to be done manually, replica and rdonly only)") - filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations") + filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations. The migration will be aborted on timeout.") reverseReplication := subFlags.Bool("reverse_replication", false, "For master migration, enabling this flag reverses replication which allows you to rollback") if err := subFlags.Parse(args); err != nil { return err @@ -1796,9 +1796,9 @@ func commandMigrateServedTypes(ctx context.Context, wr *wrangler.Wrangler, subFl } func commandMigrateServedFrom(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward. Use in case of trouble") + reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward.") cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") - filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations") + filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations. The migration will be aborted on timeout.") if err := subFlags.Parse(args); err != nil { return err } @@ -1822,7 +1822,7 @@ func commandMigrateServedFrom(ctx context.Context, wr *wrangler.Wrangler, subFla } func commandMigrateReads(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward. Use in case of trouble") + reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward.") cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") if err := subFlags.Parse(args); err != nil { return err @@ -1849,7 +1849,7 @@ func commandMigrateReads(ctx context.Context, wr *wrangler.Wrangler, subFlags *f } func commandMigrateWrites(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations") + filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations. The migration will be aborted on timeout.") if err := subFlags.Parse(args); err != nil { return err } diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go index fe8b324a441..1627dd5edb0 100644 --- a/go/vt/wrangler/migrater.go +++ b/go/vt/wrangler/migrater.go @@ -97,7 +97,7 @@ func (wr *Wrangler) MigrateReads(ctx context.Context, targetKeyspace, workflow s if err != nil { return err } - if err := mi.validate(ctx, false); err != nil { + if err := mi.validate(ctx, false /* isWrite */); err != nil { return err } @@ -121,7 +121,7 @@ func (wr *Wrangler) MigrateWrites(ctx context.Context, targetKeyspace, workflow return 0, err } mi.wr.Logger().Infof("Built migration metadata: %+v", mi) - if err := mi.validate(ctx, true); err != nil { + if err := mi.validate(ctx, true /* isWrite */); err != nil { return 0, err } @@ -251,8 +251,8 @@ func (wr *Wrangler) buildMigrationTargets(ctx context.Context, targetKeyspace, w return nil, err } // We check all target shards. All of them may not have a stream. - // For example, in a shard split, only the target shards will have - // a stream. + // For example, if we're splitting -80 to -40,40-80, only those + // two target shards will have vreplication streams. for _, targetShard := range targetShards { targetsi, err := wr.ts.GetShard(ctx, targetKeyspace, targetShard) if err != nil { @@ -266,7 +266,7 @@ func (wr *Wrangler) buildMigrationTargets(ctx context.Context, targetKeyspace, w if err != nil { return nil, err } - // If there's no stream, check next. + // If there's no vreplication stream, check the next target. if len(p3qr.Rows) < 1 { continue }