diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index a0b4c05757a..24983945643 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -48,7 +48,7 @@ func (x OnDDLAction) String() string { return proto.EnumName(OnDDLAction_name, int32(x)) } func (OnDDLAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{0} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{0} } // VEventType enumerates the event types. @@ -73,6 +73,7 @@ const ( VEventType_FIELD VEventType = 13 VEventType_HEARTBEAT VEventType = 14 VEventType_VGTID VEventType = 15 + VEventType_JOURNAL VEventType = 16 ) var VEventType_name = map[int32]string{ @@ -92,6 +93,7 @@ var VEventType_name = map[int32]string{ 13: "FIELD", 14: "HEARTBEAT", 15: "VGTID", + 16: "JOURNAL", } var VEventType_value = map[string]int32{ "UNKNOWN": 0, @@ -110,13 +112,38 @@ var VEventType_value = map[string]int32{ "FIELD": 13, "HEARTBEAT": 14, "VGTID": 15, + "JOURNAL": 16, } func (x VEventType) String() string { return proto.EnumName(VEventType_name, int32(x)) } func (VEventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1} +} + +// MigrationType specifies the type of migration for the Journal. +type MigrationType int32 + +const ( + MigrationType_TABLES MigrationType = 0 + MigrationType_SHARDS MigrationType = 1 +) + +var MigrationType_name = map[int32]string{ + 0: "TABLES", + 1: "SHARDS", +} +var MigrationType_value = map[string]int32{ + "TABLES": 0, + "SHARDS": 1, +} + +func (x MigrationType) String() string { + return proto.EnumName(MigrationType_name, int32(x)) +} +func (MigrationType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{2} } type BinlogTransaction_Statement_Category int32 @@ -164,7 +191,7 @@ func (x BinlogTransaction_Statement_Category) String() string { return proto.EnumName(BinlogTransaction_Statement_Category_name, int32(x)) } func (BinlogTransaction_Statement_Category) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1, 0, 0} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1, 0, 0} } // Charset is the per-statement charset info from a QUERY_EVENT binlog entry. @@ -184,7 +211,7 @@ func (m *Charset) Reset() { *m = Charset{} } func (m *Charset) String() string { return proto.CompactTextString(m) } func (*Charset) ProtoMessage() {} func (*Charset) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{0} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{0} } func (m *Charset) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Charset.Unmarshal(m, b) @@ -241,7 +268,7 @@ func (m *BinlogTransaction) Reset() { *m = BinlogTransaction{} } func (m *BinlogTransaction) String() string { return proto.CompactTextString(m) } func (*BinlogTransaction) ProtoMessage() {} func (*BinlogTransaction) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1} } func (m *BinlogTransaction) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogTransaction.Unmarshal(m, b) @@ -291,7 +318,7 @@ func (m *BinlogTransaction_Statement) Reset() { *m = BinlogTransaction_S func (m *BinlogTransaction_Statement) String() string { return proto.CompactTextString(m) } func (*BinlogTransaction_Statement) ProtoMessage() {} func (*BinlogTransaction_Statement) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1, 0} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1, 0} } func (m *BinlogTransaction_Statement) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogTransaction_Statement.Unmarshal(m, b) @@ -349,7 +376,7 @@ func (m *StreamKeyRangeRequest) Reset() { *m = StreamKeyRangeRequest{} } func (m *StreamKeyRangeRequest) String() string { return proto.CompactTextString(m) } func (*StreamKeyRangeRequest) ProtoMessage() {} func (*StreamKeyRangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{2} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{2} } func (m *StreamKeyRangeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamKeyRangeRequest.Unmarshal(m, b) @@ -402,7 +429,7 @@ func (m *StreamKeyRangeResponse) Reset() { *m = StreamKeyRangeResponse{} func (m *StreamKeyRangeResponse) String() string { return proto.CompactTextString(m) } func (*StreamKeyRangeResponse) ProtoMessage() {} func (*StreamKeyRangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{3} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{3} } func (m *StreamKeyRangeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamKeyRangeResponse.Unmarshal(m, b) @@ -446,7 +473,7 @@ func (m *StreamTablesRequest) Reset() { *m = StreamTablesRequest{} } func (m *StreamTablesRequest) String() string { return proto.CompactTextString(m) } func (*StreamTablesRequest) ProtoMessage() {} func (*StreamTablesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{4} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{4} } func (m *StreamTablesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTablesRequest.Unmarshal(m, b) @@ -499,7 +526,7 @@ func (m *StreamTablesResponse) Reset() { *m = StreamTablesResponse{} } func (m *StreamTablesResponse) String() string { return proto.CompactTextString(m) } func (*StreamTablesResponse) ProtoMessage() {} func (*StreamTablesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{5} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{5} } func (m *StreamTablesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTablesResponse.Unmarshal(m, b) @@ -544,7 +571,7 @@ func (m *Rule) Reset() { *m = Rule{} } func (m *Rule) String() string { return proto.CompactTextString(m) } func (*Rule) ProtoMessage() {} func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{6} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{6} } func (m *Rule) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Rule.Unmarshal(m, b) @@ -591,7 +618,7 @@ func (m *Filter) Reset() { *m = Filter{} } func (m *Filter) String() string { return proto.CompactTextString(m) } func (*Filter) ProtoMessage() {} func (*Filter) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{7} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{7} } func (m *Filter) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Filter.Unmarshal(m, b) @@ -646,7 +673,7 @@ func (m *BinlogSource) Reset() { *m = BinlogSource{} } func (m *BinlogSource) String() string { return proto.CompactTextString(m) } func (*BinlogSource) ProtoMessage() {} func (*BinlogSource) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{8} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{8} } func (m *BinlogSource) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogSource.Unmarshal(m, b) @@ -728,7 +755,7 @@ func (m *RowChange) Reset() { *m = RowChange{} } func (m *RowChange) String() string { return proto.CompactTextString(m) } func (*RowChange) ProtoMessage() {} func (*RowChange) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{9} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{9} } func (m *RowChange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RowChange.Unmarshal(m, b) @@ -775,7 +802,7 @@ func (m *RowEvent) Reset() { *m = RowEvent{} } func (m *RowEvent) String() string { return proto.CompactTextString(m) } func (*RowEvent) ProtoMessage() {} func (*RowEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{10} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{10} } func (m *RowEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RowEvent.Unmarshal(m, b) @@ -821,7 +848,7 @@ func (m *FieldEvent) Reset() { *m = FieldEvent{} } func (m *FieldEvent) String() string { return proto.CompactTextString(m) } func (*FieldEvent) ProtoMessage() {} func (*FieldEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{11} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{11} } func (m *FieldEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FieldEvent.Unmarshal(m, b) @@ -868,7 +895,7 @@ func (m *ShardGtid) Reset() { *m = ShardGtid{} } func (m *ShardGtid) String() string { return proto.CompactTextString(m) } func (*ShardGtid) ProtoMessage() {} func (*ShardGtid) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{12} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{12} } func (m *ShardGtid) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ShardGtid.Unmarshal(m, b) @@ -920,7 +947,7 @@ func (m *VGtid) Reset() { *m = VGtid{} } func (m *VGtid) String() string { return proto.CompactTextString(m) } func (*VGtid) ProtoMessage() {} func (*VGtid) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{13} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{13} } func (m *VGtid) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VGtid.Unmarshal(m, b) @@ -947,6 +974,138 @@ func (m *VGtid) GetShardGtids() []*ShardGtid { return nil } +type KeyspaceShard struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyspaceShard) Reset() { *m = KeyspaceShard{} } +func (m *KeyspaceShard) String() string { return proto.CompactTextString(m) } +func (*KeyspaceShard) ProtoMessage() {} +func (*KeyspaceShard) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{14} +} +func (m *KeyspaceShard) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyspaceShard.Unmarshal(m, b) +} +func (m *KeyspaceShard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyspaceShard.Marshal(b, m, deterministic) +} +func (dst *KeyspaceShard) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyspaceShard.Merge(dst, src) +} +func (m *KeyspaceShard) XXX_Size() int { + return xxx_messageInfo_KeyspaceShard.Size(m) +} +func (m *KeyspaceShard) XXX_DiscardUnknown() { + xxx_messageInfo_KeyspaceShard.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyspaceShard proto.InternalMessageInfo + +func (m *KeyspaceShard) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *KeyspaceShard) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +type Journal struct { + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + MigrationType MigrationType `protobuf:"varint,2,opt,name=migration_type,json=migrationType,proto3,enum=binlogdata.MigrationType" json:"migration_type,omitempty"` + Tables []string `protobuf:"bytes,3,rep,name=tables,proto3" json:"tables,omitempty"` + LocalPosition string `protobuf:"bytes,4,opt,name=local_position,json=localPosition,proto3" json:"local_position,omitempty"` + ShardGtids []*ShardGtid `protobuf:"bytes,5,rep,name=shard_gtids,json=shardGtids,proto3" json:"shard_gtids,omitempty"` + Participants []*KeyspaceShard `protobuf:"bytes,6,rep,name=participants,proto3" json:"participants,omitempty"` + ReversedIds []int64 `protobuf:"varint,7,rep,packed,name=reversed_ids,json=reversedIds,proto3" json:"reversed_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Journal) Reset() { *m = Journal{} } +func (m *Journal) String() string { return proto.CompactTextString(m) } +func (*Journal) ProtoMessage() {} +func (*Journal) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{15} +} +func (m *Journal) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Journal.Unmarshal(m, b) +} +func (m *Journal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Journal.Marshal(b, m, deterministic) +} +func (dst *Journal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Journal.Merge(dst, src) +} +func (m *Journal) XXX_Size() int { + return xxx_messageInfo_Journal.Size(m) +} +func (m *Journal) XXX_DiscardUnknown() { + xxx_messageInfo_Journal.DiscardUnknown(m) +} + +var xxx_messageInfo_Journal proto.InternalMessageInfo + +func (m *Journal) GetId() int64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Journal) GetMigrationType() MigrationType { + if m != nil { + return m.MigrationType + } + return MigrationType_TABLES +} + +func (m *Journal) GetTables() []string { + if m != nil { + return m.Tables + } + return nil +} + +func (m *Journal) GetLocalPosition() string { + if m != nil { + return m.LocalPosition + } + return "" +} + +func (m *Journal) GetShardGtids() []*ShardGtid { + if m != nil { + return m.ShardGtids + } + return nil +} + +func (m *Journal) GetParticipants() []*KeyspaceShard { + if m != nil { + return m.Participants + } + return nil +} + +func (m *Journal) GetReversedIds() []int64 { + if m != nil { + return m.ReversedIds + } + return nil +} + // VEvent represents a vstream event type VEvent struct { Type VEventType `protobuf:"varint,1,opt,name=type,proto3,enum=binlogdata.VEventType" json:"type,omitempty"` @@ -956,6 +1115,7 @@ type VEvent struct { RowEvent *RowEvent `protobuf:"bytes,5,opt,name=row_event,json=rowEvent,proto3" json:"row_event,omitempty"` FieldEvent *FieldEvent `protobuf:"bytes,6,opt,name=field_event,json=fieldEvent,proto3" json:"field_event,omitempty"` Vgtid *VGtid `protobuf:"bytes,7,opt,name=vgtid,proto3" json:"vgtid,omitempty"` + Journal *Journal `protobuf:"bytes,8,opt,name=journal,proto3" json:"journal,omitempty"` // current_time specifies the current time to handle clock skew. CurrentTime int64 `protobuf:"varint,20,opt,name=current_time,json=currentTime,proto3" json:"current_time,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -967,7 +1127,7 @@ func (m *VEvent) Reset() { *m = VEvent{} } func (m *VEvent) String() string { return proto.CompactTextString(m) } func (*VEvent) ProtoMessage() {} func (*VEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{14} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{16} } func (m *VEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VEvent.Unmarshal(m, b) @@ -1036,6 +1196,13 @@ func (m *VEvent) GetVgtid() *VGtid { return nil } +func (m *VEvent) GetJournal() *Journal { + if m != nil { + return m.Journal + } + return nil +} + func (m *VEvent) GetCurrentTime() int64 { if m != nil { return m.CurrentTime @@ -1059,7 +1226,7 @@ func (m *VStreamRequest) Reset() { *m = VStreamRequest{} } func (m *VStreamRequest) String() string { return proto.CompactTextString(m) } func (*VStreamRequest) ProtoMessage() {} func (*VStreamRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{15} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{17} } func (m *VStreamRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamRequest.Unmarshal(m, b) @@ -1126,7 +1293,7 @@ func (m *VStreamResponse) Reset() { *m = VStreamResponse{} } func (m *VStreamResponse) String() string { return proto.CompactTextString(m) } func (*VStreamResponse) ProtoMessage() {} func (*VStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{16} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{18} } func (m *VStreamResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamResponse.Unmarshal(m, b) @@ -1169,7 +1336,7 @@ func (m *VStreamRowsRequest) Reset() { *m = VStreamRowsRequest{} } func (m *VStreamRowsRequest) String() string { return proto.CompactTextString(m) } func (*VStreamRowsRequest) ProtoMessage() {} func (*VStreamRowsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{17} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{19} } func (m *VStreamRowsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamRowsRequest.Unmarshal(m, b) @@ -1240,7 +1407,7 @@ func (m *VStreamRowsResponse) Reset() { *m = VStreamRowsResponse{} } func (m *VStreamRowsResponse) String() string { return proto.CompactTextString(m) } func (*VStreamRowsResponse) ProtoMessage() {} func (*VStreamRowsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{18} + return fileDescriptor_binlogdata_db2d20dd0016de21, []int{20} } func (m *VStreamRowsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamRowsResponse.Unmarshal(m, b) @@ -1311,6 +1478,8 @@ func init() { proto.RegisterType((*FieldEvent)(nil), "binlogdata.FieldEvent") proto.RegisterType((*ShardGtid)(nil), "binlogdata.ShardGtid") proto.RegisterType((*VGtid)(nil), "binlogdata.VGtid") + proto.RegisterType((*KeyspaceShard)(nil), "binlogdata.KeyspaceShard") + proto.RegisterType((*Journal)(nil), "binlogdata.Journal") proto.RegisterType((*VEvent)(nil), "binlogdata.VEvent") proto.RegisterType((*VStreamRequest)(nil), "binlogdata.VStreamRequest") proto.RegisterType((*VStreamResponse)(nil), "binlogdata.VStreamResponse") @@ -1318,97 +1487,110 @@ func init() { proto.RegisterType((*VStreamRowsResponse)(nil), "binlogdata.VStreamRowsResponse") proto.RegisterEnum("binlogdata.OnDDLAction", OnDDLAction_name, OnDDLAction_value) proto.RegisterEnum("binlogdata.VEventType", VEventType_name, VEventType_value) + proto.RegisterEnum("binlogdata.MigrationType", MigrationType_name, MigrationType_value) proto.RegisterEnum("binlogdata.BinlogTransaction_Statement_Category", BinlogTransaction_Statement_Category_name, BinlogTransaction_Statement_Category_value) } -func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_e3df2e837eaa5305) } - -var fileDescriptor_binlogdata_e3df2e837eaa5305 = []byte{ - // 1372 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0xdd, 0x72, 0xdb, 0x54, - 0x10, 0xae, 0x6d, 0xf9, 0x6f, 0x95, 0x26, 0xca, 0xc9, 0x0f, 0x9e, 0x0c, 0x65, 0x82, 0x06, 0x68, - 0xc8, 0x0c, 0x4e, 0x31, 0x50, 0xae, 0xa0, 0xe3, 0x1f, 0xd5, 0x75, 0xab, 0xd8, 0xe9, 0xb1, 0x9a, - 0x32, 0xbd, 0xd1, 0x28, 0xd2, 0x71, 0xa2, 0x89, 0x2c, 0x39, 0xd2, 0xb1, 0x83, 0x1f, 0x80, 0xe1, - 0x01, 0xb8, 0xe5, 0x05, 0xb8, 0xe3, 0x05, 0xb8, 0x63, 0x78, 0x13, 0xde, 0x83, 0x39, 0x3f, 0x92, - 0xed, 0xb4, 0xb4, 0x81, 0x19, 0x2e, 0xb8, 0xc9, 0xec, 0xff, 0xd9, 0xfd, 0x76, 0xbd, 0xda, 0x80, - 0x76, 0xe6, 0x87, 0x41, 0x74, 0xee, 0x39, 0xd4, 0xa9, 0x4f, 0xe2, 0x88, 0x46, 0x08, 0x16, 0x92, - 0x3d, 0x75, 0x46, 0xe3, 0x89, 0x2b, 0x14, 0x7b, 0xea, 0xd5, 0x94, 0xc4, 0x73, 0xc9, 0xac, 0xd3, - 0x68, 0x12, 0x2d, 0xbc, 0xf4, 0x63, 0x28, 0xb7, 0x2f, 0x9c, 0x38, 0x21, 0x14, 0xed, 0x42, 0xc9, - 0x0d, 0x7c, 0x12, 0xd2, 0x5a, 0x6e, 0x3f, 0x77, 0x50, 0xc4, 0x92, 0x43, 0x08, 0x14, 0x37, 0x0a, - 0xc3, 0x5a, 0x9e, 0x4b, 0x39, 0xcd, 0x6c, 0x13, 0x12, 0xcf, 0x48, 0x5c, 0x2b, 0x08, 0x5b, 0xc1, - 0xe9, 0x7f, 0x16, 0x60, 0xb3, 0xc5, 0xf3, 0xb0, 0x62, 0x27, 0x4c, 0x1c, 0x97, 0xfa, 0x51, 0x88, - 0xba, 0x00, 0x09, 0x75, 0x28, 0x19, 0x93, 0x90, 0x26, 0xb5, 0xdc, 0x7e, 0xe1, 0x40, 0x6d, 0xdc, - 0xaf, 0x2f, 0x55, 0xf0, 0x9a, 0x4b, 0x7d, 0x98, 0xda, 0xe3, 0x25, 0x57, 0xd4, 0x00, 0x95, 0xcc, - 0x48, 0x48, 0x6d, 0x1a, 0x5d, 0x92, 0xb0, 0xa6, 0xec, 0xe7, 0x0e, 0xd4, 0xc6, 0x66, 0x5d, 0x14, - 0x68, 0x30, 0x8d, 0xc5, 0x14, 0x18, 0x48, 0x46, 0xef, 0xfd, 0x91, 0x87, 0x6a, 0x16, 0x0d, 0x99, - 0x50, 0x71, 0x1d, 0x4a, 0xce, 0xa3, 0x78, 0xce, 0xcb, 0x5c, 0x6f, 0x3c, 0xb8, 0x65, 0x22, 0xf5, - 0xb6, 0xf4, 0xc3, 0x59, 0x04, 0xf4, 0x19, 0x94, 0x5d, 0x81, 0x1e, 0x47, 0x47, 0x6d, 0x6c, 0x2d, - 0x07, 0x93, 0xc0, 0xe2, 0xd4, 0x06, 0x69, 0x50, 0x48, 0xae, 0x02, 0x0e, 0xd9, 0x1a, 0x66, 0xa4, - 0xfe, 0x4b, 0x0e, 0x2a, 0x69, 0x5c, 0xb4, 0x05, 0x1b, 0x2d, 0xd3, 0x7e, 0xd1, 0xc7, 0x46, 0x7b, - 0xd0, 0xed, 0xf7, 0x5e, 0x19, 0x1d, 0xed, 0x0e, 0x5a, 0x83, 0x4a, 0xcb, 0xb4, 0x5b, 0x46, 0xb7, - 0xd7, 0xd7, 0x72, 0xe8, 0x2e, 0x54, 0x5b, 0xa6, 0xdd, 0x1e, 0x1c, 0x1f, 0xf7, 0x2c, 0x2d, 0x8f, - 0x36, 0x40, 0x6d, 0x99, 0x36, 0x1e, 0x98, 0x66, 0xab, 0xd9, 0x7e, 0xa6, 0x15, 0xd0, 0x0e, 0x6c, - 0xb6, 0x4c, 0xbb, 0x73, 0x6c, 0xda, 0x1d, 0xe3, 0x04, 0x1b, 0xed, 0xa6, 0x65, 0x74, 0x34, 0x05, - 0x01, 0x94, 0x98, 0xb8, 0x63, 0x6a, 0x45, 0x49, 0x0f, 0x0d, 0x4b, 0x2b, 0xc9, 0x70, 0xbd, 0xfe, - 0xd0, 0xc0, 0x96, 0x56, 0x96, 0xec, 0x8b, 0x93, 0x4e, 0xd3, 0x32, 0xb4, 0x8a, 0x64, 0x3b, 0x86, - 0x69, 0x58, 0x86, 0x56, 0x7d, 0xaa, 0x54, 0xf2, 0x5a, 0xe1, 0xa9, 0x52, 0x29, 0x68, 0x8a, 0xfe, - 0x53, 0x0e, 0x76, 0x86, 0x34, 0x26, 0xce, 0xf8, 0x19, 0x99, 0x63, 0x27, 0x3c, 0x27, 0x98, 0x5c, - 0x4d, 0x49, 0x42, 0xd1, 0x1e, 0x54, 0x26, 0x51, 0xe2, 0x33, 0xec, 0x38, 0xc0, 0x55, 0x9c, 0xf1, - 0xe8, 0x08, 0xaa, 0x97, 0x64, 0x6e, 0xc7, 0xcc, 0x5e, 0x02, 0x86, 0xea, 0xd9, 0x40, 0x66, 0x91, - 0x2a, 0x97, 0x92, 0x5a, 0xc6, 0xb7, 0xf0, 0x6e, 0x7c, 0xf5, 0x11, 0xec, 0xde, 0x4c, 0x2a, 0x99, - 0x44, 0x61, 0x42, 0x90, 0x09, 0x48, 0x38, 0xda, 0x74, 0xd1, 0x5b, 0x9e, 0x9f, 0xda, 0xb8, 0xf7, - 0xd6, 0x01, 0xc0, 0x9b, 0x67, 0x37, 0x45, 0xfa, 0xf7, 0xb0, 0x25, 0xde, 0xb1, 0x9c, 0xb3, 0x80, - 0x24, 0xb7, 0x29, 0x7d, 0x17, 0x4a, 0x94, 0x1b, 0xd7, 0xf2, 0xfb, 0x85, 0x83, 0x2a, 0x96, 0xdc, - 0x3f, 0xad, 0xd0, 0x83, 0xed, 0xd5, 0x97, 0xff, 0x93, 0xfa, 0xbe, 0x04, 0x05, 0x4f, 0x03, 0x82, - 0xb6, 0xa1, 0x38, 0x76, 0xa8, 0x7b, 0x21, 0xab, 0x11, 0x0c, 0x2b, 0x65, 0xe4, 0x07, 0x94, 0xc4, - 0xbc, 0x85, 0x55, 0x2c, 0x39, 0xfd, 0x01, 0x94, 0x1e, 0x73, 0x0a, 0x7d, 0x02, 0xc5, 0x78, 0xca, - 0x6a, 0x15, 0x3f, 0x75, 0x6d, 0x39, 0x01, 0x16, 0x18, 0x0b, 0xb5, 0xfe, 0x73, 0x1e, 0xd6, 0x44, - 0x42, 0xc3, 0x68, 0x1a, 0xbb, 0x84, 0x21, 0x78, 0x49, 0xe6, 0xc9, 0xc4, 0x71, 0x49, 0x8a, 0x60, - 0xca, 0xb3, 0x64, 0x92, 0x0b, 0x27, 0xf6, 0xe4, 0xab, 0x82, 0x41, 0x5f, 0x81, 0xca, 0x91, 0xa4, - 0x36, 0x9d, 0x4f, 0x08, 0xc7, 0x70, 0xbd, 0xb1, 0xbd, 0x18, 0x2a, 0x8e, 0x13, 0xb5, 0xe6, 0x13, - 0x82, 0x81, 0x66, 0xf4, 0xea, 0x24, 0x2a, 0xb7, 0x98, 0xc4, 0x45, 0xff, 0x8a, 0x2b, 0xfd, 0x3b, - 0xcc, 0xc0, 0x28, 0xc9, 0x28, 0x4b, 0xb5, 0x0a, 0x38, 0x52, 0x80, 0x50, 0x1d, 0x4a, 0x51, 0x68, - 0x7b, 0x5e, 0x50, 0x2b, 0xf3, 0x34, 0xdf, 0x5b, 0xb6, 0x1d, 0x84, 0x9d, 0x8e, 0xd9, 0x14, 0x2d, - 0x29, 0x46, 0x61, 0xc7, 0x0b, 0xf4, 0xe7, 0x50, 0xc5, 0xd1, 0x75, 0xfb, 0x82, 0x27, 0xa0, 0x43, - 0xe9, 0x8c, 0x8c, 0xa2, 0x98, 0xc8, 0xae, 0x82, 0xdc, 0x7a, 0x38, 0xba, 0xc6, 0x52, 0x83, 0xf6, - 0xa1, 0xe8, 0x8c, 0xd2, 0xc6, 0xac, 0x9a, 0x08, 0x85, 0xee, 0x40, 0x05, 0x47, 0xd7, 0x7c, 0x53, - 0xa2, 0x7b, 0x20, 0x10, 0xb1, 0x43, 0x67, 0x9c, 0xc2, 0x5d, 0xe5, 0x92, 0xbe, 0x33, 0x26, 0xe8, - 0x21, 0xa8, 0x71, 0x74, 0x6d, 0xbb, 0xfc, 0x79, 0x31, 0xb6, 0x6a, 0x63, 0x67, 0xa5, 0x95, 0x69, - 0x72, 0x18, 0xe2, 0x94, 0x4c, 0xf4, 0xe7, 0x00, 0x8f, 0x7d, 0x12, 0x78, 0xb7, 0x7a, 0xe4, 0x23, - 0x06, 0x1f, 0x09, 0xbc, 0x34, 0xfe, 0x9a, 0x4c, 0x99, 0x47, 0xc0, 0x52, 0xc7, 0x80, 0x18, 0xb2, - 0x6e, 0x77, 0xa9, 0xef, 0xfd, 0x8b, 0x19, 0x41, 0xa0, 0x9c, 0x53, 0xdf, 0xe3, 0xc3, 0x51, 0xc5, - 0x9c, 0xd6, 0x1f, 0x41, 0xf1, 0x94, 0x87, 0x7b, 0x08, 0x2a, 0xb7, 0xb2, 0x99, 0x38, 0x9d, 0xd8, - 0x95, 0x32, 0xb3, 0xa7, 0x31, 0x24, 0x29, 0x99, 0xe8, 0xbf, 0xe6, 0xa1, 0x74, 0x2a, 0x6a, 0x3c, - 0x04, 0x85, 0x0f, 0x9f, 0xf8, 0x9e, 0xec, 0x2e, 0xfb, 0x0a, 0x0b, 0x3e, 0x7e, 0xdc, 0x06, 0xbd, - 0x0f, 0x55, 0xea, 0x8f, 0x49, 0x42, 0x9d, 0xf1, 0x84, 0x67, 0x59, 0xc0, 0x0b, 0xc1, 0x9b, 0x32, - 0x65, 0x1f, 0x0d, 0x36, 0x32, 0x0a, 0x17, 0x31, 0x12, 0x7d, 0x0e, 0x55, 0xd6, 0x19, 0xfe, 0x8d, - 0xab, 0x15, 0x79, 0xab, 0xb7, 0x6f, 0xf4, 0x85, 0x3f, 0x8b, 0x2b, 0x71, 0xda, 0xeb, 0xaf, 0x41, - 0xe5, 0x58, 0x4a, 0x27, 0x31, 0xab, 0xbb, 0xab, 0xb3, 0x9a, 0xf6, 0x0c, 0xc3, 0x68, 0xd1, 0xbf, - 0xfb, 0x50, 0x9c, 0xf1, 0x94, 0xca, 0xf2, 0x5b, 0xbb, 0x5c, 0x1c, 0x07, 0x45, 0xe8, 0xd1, 0x87, - 0xb0, 0xe6, 0x4e, 0xe3, 0x98, 0x7f, 0x9c, 0xfd, 0x31, 0xa9, 0x6d, 0xf3, 0xda, 0x54, 0x29, 0xb3, - 0xfc, 0x31, 0xd1, 0x7f, 0xcc, 0xc3, 0xfa, 0xa9, 0x58, 0x5f, 0xe9, 0xca, 0x7c, 0x04, 0x5b, 0x64, - 0x34, 0x22, 0x2e, 0xf5, 0x67, 0xc4, 0x76, 0x9d, 0x20, 0x20, 0xb1, 0xed, 0x7b, 0x72, 0xc4, 0x37, - 0xea, 0xe2, 0x8c, 0x69, 0x73, 0x79, 0xaf, 0x83, 0x37, 0x33, 0x5b, 0x29, 0xf2, 0x90, 0x01, 0x5b, - 0xfe, 0x78, 0x4c, 0x3c, 0xdf, 0xa1, 0xcb, 0x01, 0xc4, 0x0f, 0x60, 0x47, 0x4e, 0xd3, 0xa9, 0xd5, - 0x75, 0x28, 0x59, 0x84, 0xc9, 0x3c, 0xb2, 0x30, 0x1f, 0xb3, 0x9f, 0x77, 0x7c, 0x9e, 0x6d, 0xe1, - 0xbb, 0xd2, 0xd3, 0xe2, 0x42, 0x2c, 0x95, 0x2b, 0x1b, 0x5e, 0xb9, 0xb1, 0xe1, 0x17, 0x9b, 0xa0, - 0xf8, 0xae, 0x4d, 0xa0, 0x7f, 0x03, 0x1b, 0x19, 0x10, 0x72, 0x83, 0x1f, 0x42, 0x89, 0xf7, 0x26, - 0x1d, 0x41, 0xf4, 0xfa, 0x18, 0x61, 0x69, 0xa1, 0xff, 0x90, 0x07, 0x94, 0xfa, 0x47, 0xd7, 0xc9, - 0xff, 0x14, 0xcc, 0x6d, 0x28, 0x72, 0xb9, 0x44, 0x52, 0x30, 0x0c, 0x87, 0xc0, 0x49, 0xe8, 0xe4, - 0x32, 0x83, 0x51, 0x38, 0x3f, 0x67, 0x7f, 0x31, 0x49, 0xa6, 0x01, 0xc5, 0xd2, 0x42, 0xff, 0x2d, - 0x07, 0x5b, 0x2b, 0x38, 0x48, 0x2c, 0x17, 0x5b, 0x25, 0xf7, 0xf7, 0x5b, 0x05, 0x1d, 0x40, 0x65, - 0x72, 0xf9, 0x96, 0xed, 0x93, 0x69, 0xdf, 0xf8, 0xb3, 0xfc, 0x00, 0x94, 0x38, 0xba, 0x4e, 0x6a, - 0x0a, 0xf7, 0x5c, 0x5e, 0xb5, 0x5c, 0xce, 0xf6, 0xf5, 0x4a, 0x1d, 0x2b, 0xfb, 0x5a, 0x68, 0x0e, - 0xbf, 0x05, 0x75, 0x69, 0xed, 0xb3, 0xcb, 0xac, 0xd7, 0xed, 0x0f, 0xb0, 0xa1, 0xdd, 0x41, 0x15, - 0x50, 0x86, 0xd6, 0xe0, 0x44, 0xcb, 0x31, 0xca, 0xf8, 0xce, 0x68, 0x8b, 0x6b, 0x8f, 0x51, 0xb6, - 0x34, 0x2a, 0x1c, 0xfe, 0x9e, 0x03, 0x58, 0x6c, 0x18, 0xa4, 0x42, 0xf9, 0x45, 0xff, 0x59, 0x7f, - 0xf0, 0xb2, 0x2f, 0x02, 0x74, 0xad, 0x5e, 0x47, 0xcb, 0xa1, 0x2a, 0x14, 0xc5, 0xf9, 0x98, 0x67, - 0x2f, 0xc8, 0xdb, 0xb1, 0xc0, 0x0e, 0xcb, 0xec, 0x70, 0x54, 0x50, 0x19, 0x0a, 0xd9, 0x79, 0x28, - 0xef, 0xc1, 0x12, 0x0b, 0x88, 0x8d, 0x13, 0xb3, 0xd9, 0x36, 0xb4, 0x32, 0x53, 0x64, 0x97, 0x21, - 0x40, 0x29, 0x3d, 0x0b, 0x99, 0x27, 0x3b, 0x26, 0x81, 0xbd, 0x33, 0xb0, 0x9e, 0x18, 0x58, 0x53, - 0x99, 0x0c, 0x0f, 0x5e, 0x6a, 0x6b, 0x4c, 0xf6, 0xb8, 0x67, 0x98, 0x1d, 0xed, 0x2e, 0xbb, 0x26, - 0x9f, 0x18, 0x4d, 0x6c, 0xb5, 0x8c, 0xa6, 0xa5, 0xad, 0x33, 0xcd, 0x29, 0x4f, 0x70, 0xa3, 0xf5, - 0xe9, 0xab, 0xfb, 0x33, 0x9f, 0x92, 0x24, 0xa9, 0xfb, 0xd1, 0x91, 0xa0, 0x8e, 0xce, 0xa3, 0xa3, - 0x19, 0x3d, 0xe2, 0xff, 0xa3, 0x1c, 0x2d, 0x7e, 0x08, 0x67, 0x25, 0x2e, 0xf9, 0xe2, 0xaf, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x59, 0xa0, 0xff, 0x30, 0xff, 0x0c, 0x00, 0x00, +func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_db2d20dd0016de21) } + +var fileDescriptor_binlogdata_db2d20dd0016de21 = []byte{ + // 1558 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0xcb, 0x72, 0xdb, 0xca, + 0x11, 0x35, 0x09, 0xf0, 0xd5, 0x90, 0x28, 0x68, 0xf4, 0x08, 0xa3, 0x8a, 0x53, 0x0a, 0x2a, 0x8e, + 0x14, 0x55, 0x85, 0x72, 0x98, 0xc4, 0x59, 0x39, 0x0e, 0x1f, 0xb0, 0x4c, 0x09, 0x22, 0xe5, 0x21, + 0x24, 0xa7, 0xbc, 0x41, 0x41, 0xc4, 0x48, 0x42, 0x04, 0x02, 0x34, 0x30, 0xa4, 0xa2, 0x0f, 0x48, + 0xe5, 0x03, 0xb2, 0xcd, 0x0f, 0x64, 0x9f, 0x6d, 0xb6, 0xd9, 0xe7, 0x0b, 0xb2, 0xca, 0x7f, 0xdc, + 0x9a, 0x07, 0x40, 0x42, 0xf6, 0xb5, 0xe5, 0x5b, 0x75, 0x17, 0x77, 0xc3, 0xea, 0xe9, 0xe9, 0xe7, + 0x41, 0x4f, 0x77, 0x13, 0xf4, 0x4b, 0x3f, 0x0c, 0xa2, 0x6b, 0xcf, 0xa5, 0x6e, 0x73, 0x1a, 0x47, + 0x34, 0x42, 0xb0, 0xe0, 0xec, 0x68, 0x73, 0x1a, 0x4f, 0xc7, 0xe2, 0x62, 0x47, 0xfb, 0x30, 0x23, + 0xf1, 0xbd, 0x3c, 0xd4, 0x69, 0x34, 0x8d, 0x16, 0x5a, 0xc6, 0x29, 0x54, 0xba, 0x37, 0x6e, 0x9c, + 0x10, 0x8a, 0xb6, 0xa1, 0x3c, 0x0e, 0x7c, 0x12, 0xd2, 0x46, 0x61, 0xb7, 0xb0, 0x5f, 0xc2, 0xf2, + 0x84, 0x10, 0xa8, 0xe3, 0x28, 0x0c, 0x1b, 0x45, 0xce, 0xe5, 0x34, 0x93, 0x4d, 0x48, 0x3c, 0x27, + 0x71, 0x43, 0x11, 0xb2, 0xe2, 0x64, 0xfc, 0x5f, 0x81, 0xf5, 0x0e, 0x8f, 0xc3, 0x8e, 0xdd, 0x30, + 0x71, 0xc7, 0xd4, 0x8f, 0x42, 0x74, 0x04, 0x90, 0x50, 0x97, 0x92, 0x09, 0x09, 0x69, 0xd2, 0x28, + 0xec, 0x2a, 0xfb, 0x5a, 0x6b, 0xaf, 0xb9, 0x94, 0xc1, 0x47, 0x2a, 0xcd, 0x51, 0x2a, 0x8f, 0x97, + 0x54, 0x51, 0x0b, 0x34, 0x32, 0x27, 0x21, 0x75, 0x68, 0x74, 0x4b, 0xc2, 0x86, 0xba, 0x5b, 0xd8, + 0xd7, 0x5a, 0xeb, 0x4d, 0x91, 0xa0, 0xc9, 0x6e, 0x6c, 0x76, 0x81, 0x81, 0x64, 0xf4, 0xce, 0x7f, + 0x8a, 0x50, 0xcb, 0xac, 0x21, 0x0b, 0xaa, 0x63, 0x97, 0x92, 0xeb, 0x28, 0xbe, 0xe7, 0x69, 0xd6, + 0x5b, 0xcf, 0x1f, 0x19, 0x48, 0xb3, 0x2b, 0xf5, 0x70, 0x66, 0x01, 0xfd, 0x0a, 0x2a, 0x63, 0x81, + 0x1e, 0x47, 0x47, 0x6b, 0x6d, 0x2c, 0x1b, 0x93, 0xc0, 0xe2, 0x54, 0x06, 0xe9, 0xa0, 0x24, 0x1f, + 0x02, 0x0e, 0xd9, 0x0a, 0x66, 0xa4, 0xf1, 0xcf, 0x02, 0x54, 0x53, 0xbb, 0x68, 0x03, 0xd6, 0x3a, + 0x96, 0x73, 0x3e, 0xc0, 0x66, 0x77, 0x78, 0x34, 0xe8, 0xbf, 0x37, 0x7b, 0xfa, 0x13, 0xb4, 0x02, + 0xd5, 0x8e, 0xe5, 0x74, 0xcc, 0xa3, 0xfe, 0x40, 0x2f, 0xa0, 0x55, 0xa8, 0x75, 0x2c, 0xa7, 0x3b, + 0x3c, 0x3d, 0xed, 0xdb, 0x7a, 0x11, 0xad, 0x81, 0xd6, 0xb1, 0x1c, 0x3c, 0xb4, 0xac, 0x4e, 0xbb, + 0x7b, 0xa2, 0x2b, 0x68, 0x0b, 0xd6, 0x3b, 0x96, 0xd3, 0x3b, 0xb5, 0x9c, 0x9e, 0x79, 0x86, 0xcd, + 0x6e, 0xdb, 0x36, 0x7b, 0xba, 0x8a, 0x00, 0xca, 0x8c, 0xdd, 0xb3, 0xf4, 0x92, 0xa4, 0x47, 0xa6, + 0xad, 0x97, 0xa5, 0xb9, 0xfe, 0x60, 0x64, 0x62, 0x5b, 0xaf, 0xc8, 0xe3, 0xf9, 0x59, 0xaf, 0x6d, + 0x9b, 0x7a, 0x55, 0x1e, 0x7b, 0xa6, 0x65, 0xda, 0xa6, 0x5e, 0x3b, 0x56, 0xab, 0x45, 0x5d, 0x39, + 0x56, 0xab, 0x8a, 0xae, 0x1a, 0x7f, 0x2f, 0xc0, 0xd6, 0x88, 0xc6, 0xc4, 0x9d, 0x9c, 0x90, 0x7b, + 0xec, 0x86, 0xd7, 0x04, 0x93, 0x0f, 0x33, 0x92, 0x50, 0xb4, 0x03, 0xd5, 0x69, 0x94, 0xf8, 0x0c, + 0x3b, 0x0e, 0x70, 0x0d, 0x67, 0x67, 0x74, 0x08, 0xb5, 0x5b, 0x72, 0xef, 0xc4, 0x4c, 0x5e, 0x02, + 0x86, 0x9a, 0x59, 0x41, 0x66, 0x96, 0xaa, 0xb7, 0x92, 0x5a, 0xc6, 0x57, 0xf9, 0x32, 0xbe, 0xc6, + 0x15, 0x6c, 0x3f, 0x0c, 0x2a, 0x99, 0x46, 0x61, 0x42, 0x90, 0x05, 0x48, 0x28, 0x3a, 0x74, 0xf1, + 0x6d, 0x79, 0x7c, 0x5a, 0xeb, 0xe9, 0x67, 0x0b, 0x00, 0xaf, 0x5f, 0x3e, 0x64, 0x19, 0x7f, 0x81, + 0x0d, 0xe1, 0xc7, 0x76, 0x2f, 0x03, 0x92, 0x3c, 0x26, 0xf5, 0x6d, 0x28, 0x53, 0x2e, 0xdc, 0x28, + 0xee, 0x2a, 0xfb, 0x35, 0x2c, 0x4f, 0x5f, 0x9b, 0xa1, 0x07, 0x9b, 0x79, 0xcf, 0xdf, 0x4b, 0x7e, + 0xbf, 0x05, 0x15, 0xcf, 0x02, 0x82, 0x36, 0xa1, 0x34, 0x71, 0xe9, 0xf8, 0x46, 0x66, 0x23, 0x0e, + 0x2c, 0x95, 0x2b, 0x3f, 0xa0, 0x24, 0xe6, 0x9f, 0xb0, 0x86, 0xe5, 0xc9, 0x78, 0x0e, 0xe5, 0xd7, + 0x9c, 0x42, 0xbf, 0x80, 0x52, 0x3c, 0x63, 0xb9, 0x8a, 0xa7, 0xae, 0x2f, 0x07, 0xc0, 0x0c, 0x63, + 0x71, 0x6d, 0xfc, 0xa3, 0x08, 0x2b, 0x22, 0xa0, 0x51, 0x34, 0x8b, 0xc7, 0x84, 0x21, 0x78, 0x4b, + 0xee, 0x93, 0xa9, 0x3b, 0x26, 0x29, 0x82, 0xe9, 0x99, 0x05, 0x93, 0xdc, 0xb8, 0xb1, 0x27, 0xbd, + 0x8a, 0x03, 0xfa, 0x1d, 0x68, 0x1c, 0x49, 0xea, 0xd0, 0xfb, 0x29, 0xe1, 0x18, 0xd6, 0x5b, 0x9b, + 0x8b, 0xa2, 0xe2, 0x38, 0x51, 0xfb, 0x7e, 0x4a, 0x30, 0xd0, 0x8c, 0xce, 0x57, 0xa2, 0xfa, 0x88, + 0x4a, 0x5c, 0x7c, 0xbf, 0x52, 0xee, 0xfb, 0x1d, 0x64, 0x60, 0x94, 0xa5, 0x95, 0xa5, 0x5c, 0x05, + 0x1c, 0x29, 0x40, 0xa8, 0x09, 0xe5, 0x28, 0x74, 0x3c, 0x2f, 0x68, 0x54, 0x78, 0x98, 0x3f, 0x5a, + 0x96, 0x1d, 0x86, 0xbd, 0x9e, 0xd5, 0x16, 0x9f, 0xa4, 0x14, 0x85, 0x3d, 0x2f, 0x30, 0xde, 0x42, + 0x0d, 0x47, 0x77, 0xdd, 0x1b, 0x1e, 0x80, 0x01, 0xe5, 0x4b, 0x72, 0x15, 0xc5, 0x44, 0x7e, 0x55, + 0x90, 0x5d, 0x0f, 0x47, 0x77, 0x58, 0xde, 0xa0, 0x5d, 0x28, 0xb9, 0x57, 0xe9, 0x87, 0xc9, 0x8b, + 0x88, 0x0b, 0xc3, 0x85, 0x2a, 0x8e, 0xee, 0x78, 0xa7, 0x44, 0x4f, 0x41, 0x20, 0xe2, 0x84, 0xee, + 0x24, 0x85, 0xbb, 0xc6, 0x39, 0x03, 0x77, 0x42, 0xd0, 0x0b, 0xd0, 0xe2, 0xe8, 0xce, 0x19, 0x73, + 0xf7, 0xa2, 0x6c, 0xb5, 0xd6, 0x56, 0xee, 0x53, 0xa6, 0xc1, 0x61, 0x88, 0x53, 0x32, 0x31, 0xde, + 0x02, 0xbc, 0xf6, 0x49, 0xe0, 0x3d, 0xca, 0xc9, 0xcf, 0x19, 0x7c, 0x24, 0xf0, 0x52, 0xfb, 0x2b, + 0x32, 0x64, 0x6e, 0x01, 0xcb, 0x3b, 0x06, 0xc4, 0x88, 0x7d, 0xed, 0x23, 0xea, 0x7b, 0xdf, 0xa1, + 0x46, 0x10, 0xa8, 0xd7, 0xd4, 0xf7, 0x78, 0x71, 0xd4, 0x30, 0xa7, 0x8d, 0x57, 0x50, 0xba, 0xe0, + 0xe6, 0x5e, 0x80, 0xc6, 0xa5, 0x1c, 0xc6, 0x4e, 0x2b, 0x36, 0x97, 0x66, 0xe6, 0x1a, 0x43, 0x92, + 0x92, 0x89, 0xd1, 0x86, 0xd5, 0x13, 0xe9, 0x96, 0x0b, 0x7c, 0x7d, 0x5c, 0xc6, 0xbf, 0x8a, 0x50, + 0x39, 0x8e, 0x66, 0x71, 0xe8, 0x06, 0xa8, 0x0e, 0x45, 0xdf, 0xe3, 0x7a, 0x0a, 0x2e, 0xfa, 0x1e, + 0xfa, 0x23, 0xd4, 0x27, 0xfe, 0x75, 0xec, 0xb2, 0x7a, 0x10, 0xa5, 0x5d, 0xe4, 0x35, 0xf3, 0xe3, + 0xe5, 0xc8, 0x4e, 0x53, 0x09, 0x5e, 0xdf, 0xab, 0x93, 0xe5, 0xe3, 0x52, 0xc5, 0x2a, 0xb9, 0x8a, + 0x7d, 0x06, 0xf5, 0x20, 0x1a, 0xbb, 0x81, 0x93, 0xf5, 0x2a, 0x95, 0x07, 0xb5, 0xca, 0xb9, 0x67, + 0x69, 0xc3, 0x7a, 0x80, 0x4b, 0xe9, 0x91, 0xb8, 0xa0, 0x97, 0xb0, 0x32, 0x75, 0x63, 0xea, 0x8f, + 0xfd, 0xa9, 0xcb, 0xa6, 0x7d, 0x99, 0x2b, 0xe6, 0xc2, 0xce, 0xe1, 0x86, 0x73, 0xe2, 0xe8, 0x67, + 0xb0, 0x12, 0x93, 0x39, 0x89, 0x13, 0xe2, 0x39, 0xcc, 0x6f, 0x65, 0x57, 0xd9, 0x57, 0xb0, 0x96, + 0xf2, 0xfa, 0x5e, 0x62, 0xfc, 0xaf, 0x08, 0xe5, 0x0b, 0x51, 0x5d, 0x07, 0xa0, 0x72, 0x6c, 0xc4, + 0x24, 0xdf, 0x5e, 0x76, 0x22, 0x24, 0x38, 0x30, 0x5c, 0x06, 0xfd, 0x04, 0x6a, 0xd4, 0x9f, 0x90, + 0x84, 0xba, 0x93, 0x29, 0x07, 0x53, 0xc1, 0x0b, 0xc6, 0xa7, 0x6a, 0x84, 0x8d, 0x6b, 0xf6, 0x58, + 0x05, 0x3c, 0x8c, 0x44, 0xbf, 0x86, 0x1a, 0x7b, 0x13, 0x7c, 0xbb, 0x68, 0x94, 0xf8, 0x23, 0xdb, + 0x7c, 0xf0, 0x22, 0xb8, 0x5b, 0x5c, 0x8d, 0xd3, 0x57, 0xf6, 0x7b, 0xd0, 0x78, 0x15, 0x4b, 0x25, + 0xd1, 0x25, 0xb6, 0xf3, 0x5d, 0x22, 0x7d, 0x2d, 0x18, 0xae, 0x16, 0x2f, 0x67, 0x0f, 0x4a, 0x73, + 0x1e, 0x52, 0x45, 0x6e, 0x39, 0xcb, 0xc9, 0x71, 0xd8, 0xc5, 0x3d, 0x1b, 0x21, 0x7f, 0x16, 0x55, + 0xd4, 0xa8, 0x7e, 0x3c, 0x42, 0x64, 0x81, 0xe1, 0x54, 0x86, 0x21, 0x3c, 0x9e, 0xc5, 0x31, 0xdf, + 0xa2, 0xfc, 0x09, 0x69, 0x6c, 0x72, 0x28, 0x34, 0xc9, 0xb3, 0xfd, 0x09, 0x31, 0xfe, 0x56, 0x84, + 0xfa, 0x85, 0x98, 0x33, 0xe9, 0x6c, 0x7b, 0x05, 0x1b, 0xe4, 0xea, 0x8a, 0x8c, 0xa9, 0x3f, 0x27, + 0xce, 0xd8, 0x0d, 0x02, 0x12, 0x3b, 0xb2, 0x60, 0xb5, 0xd6, 0x5a, 0x53, 0xec, 0x9b, 0x5d, 0xce, + 0xef, 0xf7, 0xf0, 0x7a, 0x26, 0x2b, 0x59, 0x1e, 0x32, 0x61, 0xc3, 0x9f, 0x4c, 0x88, 0xe7, 0xbb, + 0x74, 0xd9, 0x80, 0xe8, 0x54, 0x5b, 0xf2, 0xd9, 0x5f, 0xd8, 0x47, 0x2e, 0x25, 0x0b, 0x33, 0x99, + 0x46, 0x66, 0xe6, 0x19, 0xab, 0xea, 0xf8, 0x3a, 0x1b, 0x97, 0xab, 0x52, 0xd3, 0xe6, 0x4c, 0x2c, + 0x2f, 0x73, 0xa3, 0x58, 0x7d, 0x30, 0x8a, 0x17, 0x2d, 0xbb, 0xf4, 0xa5, 0x96, 0x6d, 0xbc, 0x84, + 0xb5, 0x0c, 0x08, 0x39, 0x6a, 0x0f, 0xa0, 0xcc, 0x3f, 0x65, 0xda, 0x2b, 0xd0, 0xc7, 0x55, 0x87, + 0xa5, 0x84, 0xf1, 0xd7, 0x22, 0xa0, 0x54, 0x3f, 0xba, 0x4b, 0x7e, 0xa0, 0x60, 0x6e, 0x42, 0x89, + 0xf3, 0x25, 0x92, 0xe2, 0xc0, 0x70, 0x08, 0xdc, 0x84, 0x4e, 0x6f, 0x33, 0x18, 0x85, 0xf2, 0x5b, + 0xf6, 0x8b, 0x49, 0x32, 0x0b, 0x28, 0x96, 0x12, 0xc6, 0xbf, 0x0b, 0xb0, 0x91, 0xc3, 0x41, 0x62, + 0xb9, 0x68, 0xff, 0x85, 0x6f, 0x6f, 0xff, 0x68, 0x1f, 0xaa, 0xd3, 0xdb, 0xcf, 0x8c, 0x89, 0xec, + 0xf6, 0x93, 0xaf, 0xf8, 0xa7, 0xa0, 0xc6, 0xd1, 0x5d, 0xd2, 0x50, 0xb9, 0xe6, 0xf2, 0x4c, 0xe4, + 0x7c, 0x36, 0x58, 0x73, 0x79, 0xe4, 0x06, 0xab, 0xb8, 0x39, 0xf8, 0x03, 0x68, 0x4b, 0xf3, 0x99, + 0xad, 0xd0, 0xfd, 0xa3, 0xc1, 0x10, 0x9b, 0xfa, 0x13, 0x54, 0x05, 0x75, 0x64, 0x0f, 0xcf, 0xf4, + 0x02, 0xa3, 0xcc, 0x3f, 0x99, 0x5d, 0xb1, 0x96, 0x33, 0xca, 0x91, 0x42, 0xca, 0xc1, 0x7f, 0x0b, + 0x00, 0x8b, 0x86, 0x84, 0x34, 0xa8, 0x9c, 0x0f, 0x4e, 0x06, 0xc3, 0x77, 0x03, 0x61, 0xe0, 0xc8, + 0xee, 0xf7, 0xf4, 0x02, 0xaa, 0x41, 0x49, 0xec, 0xf9, 0x45, 0xe6, 0x41, 0x2e, 0xf9, 0x0a, 0xfb, + 0x07, 0x90, 0x6d, 0xf8, 0x2a, 0xaa, 0x80, 0x92, 0xed, 0xf1, 0x72, 0x71, 0x2f, 0x33, 0x83, 0xd8, + 0x3c, 0xb3, 0xda, 0x5d, 0x53, 0xaf, 0xb0, 0x8b, 0x6c, 0x85, 0x07, 0x28, 0xa7, 0xfb, 0x3b, 0xd3, + 0x64, 0x5b, 0x3f, 0x30, 0x3f, 0x43, 0xfb, 0x8d, 0x89, 0x75, 0x8d, 0xf1, 0xf0, 0xf0, 0x9d, 0xbe, + 0xc2, 0x78, 0xaf, 0xfb, 0xa6, 0xd5, 0xd3, 0x57, 0xd9, 0xda, 0xff, 0xc6, 0x6c, 0x63, 0xbb, 0x63, + 0xb6, 0x6d, 0xbd, 0xce, 0x6e, 0x2e, 0x78, 0x80, 0x6b, 0xcc, 0xcd, 0xf1, 0xf0, 0x1c, 0x0f, 0xda, + 0x96, 0xae, 0x1f, 0xec, 0xc1, 0x6a, 0x6e, 0xfe, 0x30, 0x5f, 0x76, 0xbb, 0x63, 0x99, 0x23, 0xfd, + 0x09, 0xa3, 0x47, 0x6f, 0xda, 0xb8, 0x37, 0xd2, 0x0b, 0x9d, 0x5f, 0xbe, 0xdf, 0x9b, 0xfb, 0x94, + 0x24, 0x49, 0xd3, 0x8f, 0x0e, 0x05, 0x75, 0x78, 0x1d, 0x1d, 0xce, 0xe9, 0x21, 0xff, 0x0b, 0x7a, + 0xb8, 0x78, 0x3e, 0x97, 0x65, 0xce, 0xf9, 0xcd, 0x37, 0x01, 0x00, 0x00, 0xff, 0xff, 0x2e, 0xb4, + 0x72, 0xde, 0xde, 0x0e, 0x00, 0x00, } diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 018e007fdb5..b7645714375 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -320,6 +320,12 @@ var commands = []commandGroup{ {"MigrateServedFrom", commandMigrateServedFrom, "[-cells=c1,c2,...] [-reverse] ", "Makes the serve the given type. This command also rebuilds the serving graph."}, + {"MigrateReads", commandMigrateReads, + "[-cells=c1,c2,...] [-reverse] ", + "Migrate read traffic for the specified workflow."}, + {"MigrateWrites", commandMigrateWrites, + "[-filtered_replication_wait_time=30s] ", + "Migrate write traffic for the specified workflow."}, {"CancelResharding", commandCancelResharding, "", "Permanently cancels a resharding in progress. All resharding related metadata will be deleted."}, @@ -1760,9 +1766,9 @@ func commandVerticalSplitClone(ctx context.Context, wr *wrangler.Wrangler, subFl func commandMigrateServedTypes(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") - reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward. Use in case of trouble") + reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward.") skipReFreshState := subFlags.Bool("skip-refresh-state", false, "Skips refreshing the state of the source tablets after the migration, meaning that the refresh will need to be done manually, replica and rdonly only)") - filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations") + filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations. The migration will be aborted on timeout.") reverseReplication := subFlags.Bool("reverse_replication", false, "For master migration, enabling this flag reverses replication which allows you to rollback") if err := subFlags.Parse(args); err != nil { return err @@ -1790,9 +1796,9 @@ func commandMigrateServedTypes(ctx context.Context, wr *wrangler.Wrangler, subFl } func commandMigrateServedFrom(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { - reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward. Use in case of trouble") + reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward.") cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") - filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations") + filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations. The migration will be aborted on timeout.") if err := subFlags.Parse(args); err != nil { return err } @@ -1815,6 +1821,52 @@ func commandMigrateServedFrom(ctx context.Context, wr *wrangler.Wrangler, subFla return wr.MigrateServedFrom(ctx, keyspace, shard, servedType, cells, *reverse, *filteredReplicationWaitTime) } +func commandMigrateReads(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward.") + cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update") + if err := subFlags.Parse(args); err != nil { + return err + } + if subFlags.NArg() != 3 { + return fmt.Errorf("the , and arguments are required for the MigrateReads command") + } + + keyspace := subFlags.Arg(0) + workflow := subFlags.Arg(1) + servedType, err := parseTabletType(subFlags.Arg(2), []topodatapb.TabletType{topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY}) + if err != nil { + return err + } + var cells []string + if *cellsStr != "" { + cells = strings.Split(*cellsStr, ",") + } + direction := wrangler.DirectionForward + if *reverse { + direction = wrangler.DirectionBackward + } + return wr.MigrateReads(ctx, keyspace, workflow, servedType, cells, direction) +} + +func commandMigrateWrites(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { + filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on master migrations. The migration will be aborted on timeout.") + if err := subFlags.Parse(args); err != nil { + return err + } + if subFlags.NArg() != 2 { + return fmt.Errorf("the and arguments are required for the MigrateWrites command") + } + + keyspace := subFlags.Arg(0) + workflow := subFlags.Arg(1) + journalID, err := wr.MigrateWrites(ctx, keyspace, workflow, *filteredReplicationWaitTime) + if err != nil { + return err + } + wr.Logger().Infof("Migration Journal ID: %v", journalID) + return nil +} + func commandCancelResharding(ctx context.Context, wr *wrangler.Wrangler, subFlags *flag.FlagSet, args []string) error { if err := subFlags.Parse(args); err != nil { return err diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go index 1a964e7b02a..359cc782505 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go @@ -35,6 +35,7 @@ const ( updateQuery deleteQuery selectQuery + reshardingJournalQuery ) // buildControllerPlan parses the input query and returns an appropriate plan. @@ -58,15 +59,23 @@ func buildControllerPlan(query string) (*controllerPlan, error) { } func buildInsertPlan(ins *sqlparser.Insert) (*controllerPlan, error) { + switch sqlparser.String(ins.Table) { + case reshardingJournalTableName: + return &controllerPlan{ + opcode: reshardingJournalQuery, + query: sqlparser.String(ins), + }, nil + case vreplicationTableName: + // no-op + default: + return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(ins.Table)) + } if ins.Action != sqlparser.InsertStr { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(ins)) } if ins.Ignore != "" { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(ins)) } - if sqlparser.String(ins.Table) != "_vt.vreplication" { - return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(ins.Table)) - } if ins.Partitions != nil { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(ins)) } @@ -106,7 +115,15 @@ func buildInsertPlan(ins *sqlparser.Insert) (*controllerPlan, error) { } func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { - if sqlparser.String(upd.TableExprs) != "_vt.vreplication" { + switch sqlparser.String(upd.TableExprs) { + case reshardingJournalTableName: + return &controllerPlan{ + opcode: reshardingJournalQuery, + query: sqlparser.String(upd), + }, nil + case vreplicationTableName: + // no-op + default: return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(upd.TableExprs)) } if upd.OrderBy != nil || upd.Limit != nil { @@ -131,12 +148,20 @@ func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { } func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { + switch sqlparser.String(del.TableExprs) { + case reshardingJournalTableName: + return &controllerPlan{ + opcode: reshardingJournalQuery, + query: sqlparser.String(del), + }, nil + case vreplicationTableName: + // no-op + default: + return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(del.TableExprs)) + } if del.Targets != nil { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(del)) } - if sqlparser.String(del.TableExprs) != "_vt.vreplication" { - return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(del.TableExprs)) - } if del.Partitions != nil { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(del)) } @@ -157,13 +182,20 @@ func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { } func buildSelectPlan(sel *sqlparser.Select) (*controllerPlan, error) { - if sqlparser.String(sel.From) != "_vt.vreplication" { + switch sqlparser.String(sel.From) { + case reshardingJournalTableName: + return &controllerPlan{ + opcode: reshardingJournalQuery, + query: sqlparser.String(sel), + }, nil + case vreplicationTableName: + return &controllerPlan{ + opcode: selectQuery, + query: sqlparser.String(sel), + }, nil + default: return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(sel.From)) } - return &controllerPlan{ - opcode: selectQuery, - query: sqlparser.String(sel), - }, nil } func extractID(where *sqlparser.Where) (int, error) { diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go index 533668a2955..18296c76b8f 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go @@ -124,8 +124,8 @@ func TestControllerPlan(t *testing.T) { in: "delete from a where id = 1", err: "invalid table name: a", }, { - in: "delete a, b from a where id = 1", - err: "unsupported construct: delete a, b from a where id = 1", + in: "delete a, b from _vt.vreplication where id = 1", + err: "unsupported construct: delete a, b from _vt.vreplication where id = 1", }, { in: "delete from _vt.vreplication where id = 1 order by id", err: "unsupported construct: delete from _vt.vreplication where id = 1 order by id asc", diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine.go b/go/vt/vttablet/tabletmanager/vreplication/engine.go index bbe760f46d4..65a9bc2a8eb 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine.go @@ -32,6 +32,17 @@ import ( "vitess.io/vitess/go/vt/topo" ) +const ( + reshardingJournalTableName = "_vt.resharding_journal" + vreplicationTableName = "_vt.vreplication" + createReshardingJournalTable = `create table if not exists _vt.resharding_journal( + id bigint, + db_name varbinary(255), + val blob, + primary key (id) +) ENGINE=InnoDB` +) + var tabletTypesStr = flag.String("vreplication_tablet_type", "REPLICA", "comma separated list of tablet types used as a source") // waitRetryTime can be changed to a smaller value for tests. @@ -102,7 +113,7 @@ func (vre *Engine) Open(ctx context.Context) error { // executeFetchMaybeCreateTable calls DBClient.ExecuteFetch and does one retry if // there's a failure due to mysql.ERNoSuchTable or mysql.ERBadDb which can be fixed -// by re-creating the _vt.vreplication table. +// by re-creating the vreplication tables. func (vre *Engine) executeFetchMaybeCreateTable(dbClient binlogplayer.DBClient, query string, maxrows int) (qr *sqltypes.Result, err error) { qr, err = dbClient.ExecuteFetch(query, maxrows) @@ -110,29 +121,33 @@ func (vre *Engine) executeFetchMaybeCreateTable(dbClient binlogplayer.DBClient, return } - // If it's a bad table or db, it could be because _vt.vreplication wasn't created. - // In that case we can try creating it again. + // If it's a bad table or db, it could be because the vreplication tables weren't created. + // In that case we can try creating them again. merr, isSQLErr := err.(*mysql.SQLError) if !isSQLErr || !(merr.Num == mysql.ERNoSuchTable || merr.Num == mysql.ERBadDb || merr.Num == mysql.ERBadFieldError) { return qr, err } - log.Info("Looks like _vt.vreplication table may not exist. Trying to recreate... ") + log.Info("Looks like the vreplcation tables may not exist. Trying to recreate... ") if merr.Num == mysql.ERNoSuchTable || merr.Num == mysql.ERBadDb { for _, query := range binlogplayer.CreateVReplicationTable() { if _, merr := dbClient.ExecuteFetch(query, 0); merr != nil { - log.Warningf("Failed to ensure _vt.vreplication table exists: %v", merr) + log.Warningf("Failed to ensure %s exists: %v", vreplicationTableName, merr) return nil, err } } + if _, merr := dbClient.ExecuteFetch(createReshardingJournalTable, 0); merr != nil { + log.Warningf("Failed to ensure %s exists: %v", reshardingJournalTableName, merr) + return nil, err + } } if merr.Num == mysql.ERBadFieldError { - log.Info("Adding column to table _vt.vreplication") + log.Infof("Adding column to table %s", vreplicationTableName) for _, query := range binlogplayer.AlterVReplicationTable() { if _, merr := dbClient.ExecuteFetch(query, 0); merr != nil { merr, isSQLErr := err.(*mysql.SQLError) if !isSQLErr || !(merr.Num == mysql.ERDupFieldName) { - log.Warningf("Failed to alter _vt.vreplication table: %v", merr) + log.Warningf("Failed to alter %s table: %v", vreplicationTableName, merr) return nil, err } } @@ -287,8 +302,8 @@ func (vre *Engine) Exec(query string) (*sqltypes.Result, error) { delete(vre.controllers, plan.id) } return vre.executeFetchMaybeCreateTable(dbClient, plan.query, 1) - case selectQuery: - // select queries are passed through. + case selectQuery, reshardingJournalQuery: + // select and resharding journal queries are passed through. return vre.executeFetchMaybeCreateTable(dbClient, plan.query, 10000) } panic("unreachable") diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go index a4ac882dd10..9d644c0a202 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go @@ -412,6 +412,7 @@ func TestCreateDBAndTable(t *testing.T) { dbClient.ExpectRequest("CREATE DATABASE IF NOT EXISTS _vt", &sqltypes.Result{}, nil) dbClient.ExpectRequest("DROP TABLE IF EXISTS _vt.blp_checkpoint", &sqltypes.Result{}, nil) dbClient.ExpectRequestRE("CREATE TABLE IF NOT EXISTS _vt.vreplication.*", &sqltypes.Result{}, nil) + dbClient.ExpectRequestRE("create table if not exists _vt.resharding_journal.*", &sqltypes.Result{}, nil) dbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) // Non-recoverable error. @@ -425,6 +426,7 @@ func TestCreateDBAndTable(t *testing.T) { dbClient.ExpectRequest("CREATE DATABASE IF NOT EXISTS _vt", &sqltypes.Result{}, nil) dbClient.ExpectRequest("DROP TABLE IF EXISTS _vt.blp_checkpoint", &sqltypes.Result{}, nil) dbClient.ExpectRequestRE("CREATE TABLE IF NOT EXISTS _vt.vreplication.*", &sqltypes.Result{}, nil) + dbClient.ExpectRequestRE("create table if not exists _vt.resharding_journal.*", &sqltypes.Result{}, nil) dbClient.ExpectRequest("insert into _vt.vreplication values (null)", &sqltypes.Result{InsertID: 1}, nil) diff --git a/go/vt/wrangler/fake_dbclient_test.go b/go/vt/wrangler/fake_dbclient_test.go new file mode 100644 index 00000000000..1f2ca1f54a7 --- /dev/null +++ b/go/vt/wrangler/fake_dbclient_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "fmt" + "regexp" + "testing" + + "vitess.io/vitess/go/sqltypes" +) + +func verifyQueries(t *testing.T, dcs []*fakeDBClient) { + for _, dc := range dcs { + dc.verifyQueries(t) + } +} + +type dbResult struct { + result *sqltypes.Result + err error + called bool +} + +// fakeDBClient fakes a binlog_player.DBClient. +type fakeDBClient struct { + queries map[string]*dbResult + queriesRE map[string]*dbResult +} + +// NewfakeDBClient returns a new DBClientMock. +func newFakeDBClient() *fakeDBClient { + return &fakeDBClient{ + queries: map[string]*dbResult{ + "use _vt": {result: &sqltypes.Result{}, called: true}, + "select * from _vt.vreplication where db_name='db'": {result: &sqltypes.Result{}}, + }, + queriesRE: make(map[string]*dbResult), + } +} + +func (dc *fakeDBClient) addQuery(query string, result *sqltypes.Result, err error) { + dc.queries[query] = &dbResult{result: result, err: err} +} + +func (dc *fakeDBClient) addQueryRE(query string, result *sqltypes.Result, err error) { + dc.queriesRE[query] = &dbResult{result: result, err: err} +} + +// DBName is part of the DBClient interface +func (dc *fakeDBClient) DBName() string { + return "db" +} + +// Connect is part of the DBClient interface +func (dc *fakeDBClient) Connect() error { + return nil +} + +// Begin is part of the DBClient interface +func (dc *fakeDBClient) Begin() error { + return nil +} + +// Commit is part of the DBClient interface +func (dc *fakeDBClient) Commit() error { + return nil +} + +// Rollback is part of the DBClient interface +func (dc *fakeDBClient) Rollback() error { + return nil +} + +// Close is part of the DBClient interface +func (dc *fakeDBClient) Close() { +} + +// ExecuteFetch is part of the DBClient interface +func (dc *fakeDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Result, err error) { + if dbr := dc.queries[query]; dbr != nil { + dbr.called = true + return dbr.result, dbr.err + } + for re, dbr := range dc.queriesRE { + if regexp.MustCompile(re).MatchString(query) { + dbr.called = true + return dbr.result, dbr.err + } + } + return nil, fmt.Errorf("unexpected query: %s", query) +} + +func (dc *fakeDBClient) verifyQueries(t *testing.T) { + t.Helper() + for query, dbr := range dc.queries { + if !dbr.called { + t.Errorf("query: %v was not called", query) + } + } + for query, dbr := range dc.queriesRE { + if !dbr.called { + t.Errorf("query: %v was not called", query) + } + } +} diff --git a/go/vt/wrangler/fake_tablet_test.go b/go/vt/wrangler/fake_tablet_test.go new file mode 100644 index 00000000000..85adabf6cb1 --- /dev/null +++ b/go/vt/wrangler/fake_tablet_test.go @@ -0,0 +1,228 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "fmt" + "net" + "net/http" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vttablet/grpctmserver" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + "vitess.io/vitess/go/vt/vttablet/tabletmanager" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + // import the gRPC client implementation for tablet manager + _ "vitess.io/vitess/go/vt/vttablet/grpctmclient" + + // import the gRPC client implementation for query service + _ "vitess.io/vitess/go/vt/vttablet/grpctabletconn" +) + +// This file was copied from testlib. All tests from testlib should be moved +// to the current directory. In order to move tests from there, we have to +// remove the circular dependency it causes (through vtctl dependence). +// The tests in this directory call wrangler functions directly. So, there's +// no circular dependency. + +// This file contains utility methods for unit tests. +// We allow the creation of fake tablets, and running their event loop based +// on a FakeMysqlDaemon. + +// fakeTablet keeps track of a fake tablet in memory. It has: +// - a Tablet record (used for creating the tablet, kept for user's information) +// - a FakeMysqlDaemon (used by the fake event loop) +// - a 'done' channel (used to terminate the fake event loop) +type fakeTablet struct { + // Tablet and FakeMysqlDaemon are populated at NewFakeTablet time. + // We also create the RPCServer, so users can register more services + // before calling StartActionLoop(). + Tablet *topodatapb.Tablet + FakeMysqlDaemon *fakemysqldaemon.FakeMysqlDaemon + RPCServer *grpc.Server + + // The following fields are created when we start the event loop for + // the tablet, and closed / cleared when we stop it. + // The Listener is used by the gRPC server. + Agent *tabletmanager.ActionAgent + Listener net.Listener + + // These optional fields are used if the tablet also needs to + // listen on the 'vt' port. + StartHTTPServer bool + HTTPListener net.Listener + HTTPServer *http.Server +} + +// TabletOption is an interface for changing tablet parameters. +// It's a way to pass multiple parameters to NewFakeTablet without +// making it too cumbersome. +type TabletOption func(tablet *topodatapb.Tablet) + +// TabletKeyspaceShard is the option to set the tablet keyspace and shard +func TabletKeyspaceShard(t *testing.T, keyspace, shard string) TabletOption { + return func(tablet *topodatapb.Tablet) { + tablet.Keyspace = keyspace + shard, kr, err := topo.ValidateShardName(shard) + if err != nil { + t.Fatalf("cannot ValidateShardName value %v", shard) + } + tablet.Shard = shard + tablet.KeyRange = kr + } +} + +// newFakeTablet creates the test tablet in the topology. 'uid' +// has to be between 0 and 99. All the tablet info will be derived +// from that. Look at the implementation if you need values. +// Use TabletOption implementations if you need to change values at creation. +// 'db' can be nil if the test doesn't use a database at all. +func newFakeTablet(t *testing.T, wr *Wrangler, cell string, uid uint32, tabletType topodatapb.TabletType, db *fakesqldb.DB, options ...TabletOption) *fakeTablet { + if uid > 99 { + t.Fatalf("uid has to be between 0 and 99: %v", uid) + } + mysqlPort := int32(3300 + uid) + hostname := fmt.Sprintf("%v.%d", cell, uid) + tablet := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: cell, Uid: uid}, + Hostname: hostname, + MysqlHostname: hostname, + PortMap: map[string]int32{ + "vt": int32(8100 + uid), + "grpc": int32(8200 + uid), + }, + Keyspace: "test_keyspace", + Shard: "0", + Type: tabletType, + } + topoproto.SetMysqlPort(tablet, mysqlPort) + for _, option := range options { + option(tablet) + } + if err := wr.InitTablet(context.Background(), tablet, false /* allowMasterOverride */, true /* createShardAndKeyspace */, false /* allowUpdate */); err != nil { + t.Fatalf("cannot create tablet %v: %v", uid, err) + } + + // create a FakeMysqlDaemon with the right information by default + fakeMysqlDaemon := fakemysqldaemon.NewFakeMysqlDaemon(db) + fakeMysqlDaemon.MysqlPort = mysqlPort + + return &fakeTablet{ + Tablet: tablet, + FakeMysqlDaemon: fakeMysqlDaemon, + RPCServer: grpc.NewServer(), + } +} + +// StartActionLoop will start the action loop for a fake tablet, +// using ft.FakeMysqlDaemon as the backing mysqld. +func (ft *fakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { + if ft.Agent != nil { + t.Fatalf("Agent for %v is already running", ft.Tablet.Alias) + } + + // Listen on a random port for gRPC. + var err error + ft.Listener, err = net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Cannot listen: %v", err) + } + gRPCPort := int32(ft.Listener.Addr().(*net.TCPAddr).Port) + + // If needed, listen on a random port for HTTP. + vtPort := ft.Tablet.PortMap["vt"] + if ft.StartHTTPServer { + ft.HTTPListener, err = net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Cannot listen on http port: %v", err) + } + handler := http.NewServeMux() + ft.HTTPServer = &http.Server{ + Handler: handler, + } + go ft.HTTPServer.Serve(ft.HTTPListener) + vtPort = int32(ft.HTTPListener.Addr().(*net.TCPAddr).Port) + } + + // Create a test agent on that port, and re-read the record + // (it has new ports and IP). + ft.Agent = tabletmanager.NewTestActionAgent(context.Background(), wr.TopoServer(), ft.Tablet.Alias, vtPort, gRPCPort, ft.FakeMysqlDaemon, nil) + ft.Tablet = ft.Agent.Tablet() + + // Register the gRPC server, and starts listening. + grpctmserver.RegisterForTest(ft.RPCServer, ft.Agent) + go ft.RPCServer.Serve(ft.Listener) + + // And wait for it to serve, so we don't start using it before it's + // ready. + timeout := 5 * time.Second + step := 10 * time.Millisecond + c := tmclient.NewTabletManagerClient() + for timeout >= 0 { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + err := c.Ping(ctx, ft.Agent.Tablet()) + cancel() + if err == nil { + break + } + time.Sleep(step) + timeout -= step + } + if timeout < 0 { + panic("StartActionLoop failed.") + } +} + +// StopActionLoop will stop the Action Loop for the given FakeTablet +func (ft *fakeTablet) StopActionLoop(t *testing.T) { + if ft.Agent == nil { + t.Fatalf("Agent for %v is not running", ft.Tablet.Alias) + } + if ft.StartHTTPServer { + ft.HTTPListener.Close() + } + ft.Listener.Close() + ft.Agent.Stop() + ft.Agent = nil + ft.Listener = nil + ft.HTTPListener = nil +} + +// Target returns the keyspace/shard/type info of this tablet as Target. +func (ft *fakeTablet) Target() querypb.Target { + return querypb.Target{ + Keyspace: ft.Tablet.Keyspace, + Shard: ft.Tablet.Shard, + TabletType: ft.Tablet.Type, + } +} + +func init() { + // enforce we will use the right protocol (gRPC) in all unit tests + *tmclient.TabletManagerProtocol = "grpc" + *tabletconn.TabletProtocol = "grpc" +} diff --git a/go/vt/wrangler/migrater.go b/go/vt/wrangler/migrater.go new file mode 100644 index 00000000000..1627dd5edb0 --- /dev/null +++ b/go/vt/wrangler/migrater.go @@ -0,0 +1,859 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "fmt" + "hash/fnv" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/sync2" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/key" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +// MigrateDirection specifies the migration direction. +type MigrateDirection int + +// The following constants define the migration direction. +const ( + DirectionForward = MigrateDirection(iota) + DirectionBackward +) + +// accessType specifies the type of access for a shard (allow/disallow writes). +type accessType int + +const ( + allowWrites = accessType(iota) + disallowWrites +) + +// migrater contains the metadata for migrating read and write traffic +// for vreplication streams. +type migrater struct { + migrationType binlogdatapb.MigrationType + wr *Wrangler + id int64 + sources map[string]*miSource + targets map[string]*miTarget + sourceKeyspace string + targetKeyspace string + tables []string +} + +// miTarget contains the metadata for each migration target. +type miTarget struct { + si *topo.ShardInfo + master *topo.TabletInfo + sources map[uint32]*binlogdatapb.BinlogSource + position string +} + +// miSource contains the metadata for each migration source. +type miSource struct { + si *topo.ShardInfo + master *topo.TabletInfo + position string + journaled bool +} + +// MigrateReads is a generic way of migrating read traffic for a resharding workflow. +func (wr *Wrangler) MigrateReads(ctx context.Context, targetKeyspace, workflow string, servedType topodatapb.TabletType, cells []string, direction MigrateDirection) error { + if servedType != topodatapb.TabletType_REPLICA && servedType != topodatapb.TabletType_RDONLY { + return fmt.Errorf("tablet type must be REPLICA or RDONLY: %v", servedType) + } + mi, err := wr.buildMigrater(ctx, targetKeyspace, workflow) + if err != nil { + return err + } + if err := mi.validate(ctx, false /* isWrite */); err != nil { + return err + } + + // For reads, locking the source keyspace is sufficient. + ctx, unlock, lockErr := wr.ts.LockKeyspace(ctx, mi.sourceKeyspace, "MigrateReads") + if lockErr != nil { + return lockErr + } + defer unlock(&err) + + if mi.migrationType == binlogdatapb.MigrationType_TABLES { + return mi.migrateTableReads(ctx, cells, servedType, direction) + } + return mi.migrateShardReads(ctx, cells, servedType, direction) +} + +// MigrateWrites is a generic way of migrating write traffic for a resharding workflow. +func (wr *Wrangler) MigrateWrites(ctx context.Context, targetKeyspace, workflow string, filteredReplicationWaitTime time.Duration) (journalID int64, err error) { + mi, err := wr.buildMigrater(ctx, targetKeyspace, workflow) + if err != nil { + return 0, err + } + mi.wr.Logger().Infof("Built migration metadata: %+v", mi) + if err := mi.validate(ctx, true /* isWrite */); err != nil { + return 0, err + } + + // Need to lock both source and target keyspaces. + ctx, sourceUnlock, lockErr := wr.ts.LockKeyspace(ctx, mi.sourceKeyspace, "MigrateWrites") + if lockErr != nil { + return 0, lockErr + } + defer sourceUnlock(&err) + if mi.targetKeyspace != mi.sourceKeyspace { + tctx, targetUnlock, lockErr := wr.ts.LockKeyspace(ctx, mi.targetKeyspace, "MigrateWrites") + if lockErr != nil { + return 0, lockErr + } + ctx = tctx + defer targetUnlock(&err) + } + + journalsExist, err := mi.checkJournals(ctx) + if err != nil { + return 0, err + } + if !journalsExist { + mi.wr.Logger().Infof("No previous journals were found. Proceeding normally.") + if err := mi.stopSourceWrites(ctx); err != nil { + mi.cancelMigration(ctx) + return 0, err + } + if err := mi.waitForCatchup(ctx, filteredReplicationWaitTime); err != nil { + mi.cancelMigration(ctx) + return 0, err + } + } else { + mi.wr.Logger().Infof("Journals were found. Completing the left over steps.") + // Need to gather positions in case all journals were not created. + if err := mi.gatherPositions(ctx); err != nil { + return 0, err + } + } + // This is the point of no return. Once a journal is created, + // traffic can be redirected to target shards. + if err := mi.createJournals(ctx); err != nil { + return 0, err + } + if err := mi.createReverseReplication(ctx); err != nil { + return 0, err + } + if err := mi.allowTargetWrites(ctx); err != nil { + return 0, err + } + if err := mi.changeRouting(ctx); err != nil { + return 0, err + } + mi.deleteTargetVReplication(ctx) + return mi.id, nil +} + +func (wr *Wrangler) buildMigrater(ctx context.Context, targetKeyspace, workflow string) (*migrater, error) { + targets, err := wr.buildMigrationTargets(ctx, targetKeyspace, workflow) + if err != nil { + return nil, err + } + + mi := &migrater{ + wr: wr, + id: hashStreams(targetKeyspace, targets), + targets: targets, + sources: make(map[string]*miSource), + targetKeyspace: targetKeyspace, + } + mi.wr.Logger().Infof("Migration ID for workflow %s: %d", workflow, mi.id) + + // Build the sources + for _, target := range targets { + for _, bls := range target.sources { + if mi.sourceKeyspace == "" { + mi.sourceKeyspace = bls.Keyspace + } else if mi.sourceKeyspace != bls.Keyspace { + return nil, fmt.Errorf("source keyspaces are mismatched across streams: %v vs %v", mi.sourceKeyspace, bls.Keyspace) + } + if _, ok := mi.sources[bls.Shard]; ok { + continue + } + + sourcesi, err := mi.wr.ts.GetShard(ctx, bls.Keyspace, bls.Shard) + if err != nil { + return nil, err + } + sourceMaster, err := mi.wr.ts.GetTablet(ctx, sourcesi.MasterAlias) + if err != nil { + return nil, err + } + mi.sources[bls.Shard] = &miSource{ + si: sourcesi, + master: sourceMaster, + } + + if mi.tables == nil { + for _, rule := range bls.Filter.Rules { + mi.tables = append(mi.tables, rule.Match) + } + sort.Strings(mi.tables) + } else { + var tables []string + for _, rule := range bls.Filter.Rules { + tables = append(tables, rule.Match) + } + sort.Strings(tables) + if !reflect.DeepEqual(mi.tables, tables) { + return nil, fmt.Errorf("table lists are mismatched across streams: %v vs %v", mi.tables, tables) + } + } + } + } + if mi.sourceKeyspace != mi.targetKeyspace { + mi.migrationType = binlogdatapb.MigrationType_TABLES + } else { + mi.migrationType = binlogdatapb.MigrationType_SHARDS + } + return mi, nil +} + +func (wr *Wrangler) buildMigrationTargets(ctx context.Context, targetKeyspace, workflow string) (targets map[string]*miTarget, err error) { + targets = make(map[string]*miTarget) + targetShards, err := wr.ts.GetShardNames(ctx, targetKeyspace) + if err != nil { + return nil, err + } + // We check all target shards. All of them may not have a stream. + // For example, if we're splitting -80 to -40,40-80, only those + // two target shards will have vreplication streams. + for _, targetShard := range targetShards { + targetsi, err := wr.ts.GetShard(ctx, targetKeyspace, targetShard) + if err != nil { + return nil, err + } + targetMaster, err := wr.ts.GetTablet(ctx, targetsi.MasterAlias) + if err != nil { + return nil, err + } + p3qr, err := wr.tmc.VReplicationExec(ctx, targetMaster.Tablet, fmt.Sprintf("select id, source from _vt.vreplication where workflow='%s' and db_name='%s'", workflow, targetMaster.DbName())) + if err != nil { + return nil, err + } + // If there's no vreplication stream, check the next target. + if len(p3qr.Rows) < 1 { + continue + } + + targets[targetShard] = &miTarget{ + si: targetsi, + master: targetMaster, + sources: make(map[uint32]*binlogdatapb.BinlogSource), + } + qr := sqltypes.Proto3ToResult(p3qr) + for _, row := range qr.Rows { + id, err := sqltypes.ToInt64(row[0]) + if err != nil { + return nil, err + } + var bls binlogdatapb.BinlogSource + if err := proto.UnmarshalText(row[1].ToString(), &bls); err != nil { + return nil, err + } + targets[targetShard].sources[uint32(id)] = &bls + } + } + if len(targets) == 0 { + return nil, fmt.Errorf("no streams found in keyspace %s for: %s", targetKeyspace, workflow) + } + return targets, nil +} + +// hashStreams produces a reproducible hash based on the input parameters. +func hashStreams(targetKeyspace string, targets map[string]*miTarget) int64 { + var expanded []string + for shard, target := range targets { + for uid := range target.sources { + expanded = append(expanded, fmt.Sprintf("%s:%d", shard, uid)) + } + } + sort.Strings(expanded) + hasher := fnv.New64() + hasher.Write([]byte(targetKeyspace)) + for _, str := range expanded { + hasher.Write([]byte(str)) + } + // Convert to int64 after dropping the highest bit. + return int64(hasher.Sum64() & math.MaxInt64) +} + +func (mi *migrater) validate(ctx context.Context, isWrite bool) error { + if mi.migrationType == binlogdatapb.MigrationType_TABLES { + // All shards must be present. + if err := mi.compareShards(ctx, mi.sourceKeyspace, mi.sourceShards()); err != nil { + return err + } + if err := mi.compareShards(ctx, mi.targetKeyspace, mi.targetShards()); err != nil { + return err + } + // Wildcard table names not allowed. + for _, table := range mi.tables { + if strings.HasPrefix(table, "/") { + return fmt.Errorf("cannot migrate streams with wild card table names: %v", table) + } + } + if isWrite { + return mi.validateTableForWrite(ctx) + } + } else { // binlogdatapb.MigrationType_SHARDS + // Source and target shards must not match. + for sourceShard := range mi.sources { + if _, ok := mi.targets[sourceShard]; ok { + return fmt.Errorf("target shard matches a source shard: %v", sourceShard) + } + } + if isWrite { + return mi.validateShardForWrite(ctx) + } + } + return nil +} + +func (mi *migrater) validateTableForWrite(ctx context.Context) error { + rules, err := mi.wr.getRoutingRules(ctx) + if err != nil { + return err + } + for _, table := range mi.tables { + for _, tabletType := range []topodatapb.TabletType{topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY} { + tt := strings.ToLower(tabletType.String()) + if rules[table+"@"+tt] == nil || rules[mi.targetKeyspace+"."+table+"@"+tt] == nil { + return fmt.Errorf("missing tablet type specific routing, read-only traffic must be migrated before migrating writes: %v", table) + } + } + } + return nil +} + +func (mi *migrater) validateShardForWrite(ctx context.Context) error { + srvKeyspaces, err := mi.wr.ts.GetSrvKeyspaceAllCells(ctx, mi.sourceKeyspace) + if err != nil { + return err + } + + // Checking one shard is enough. + var si *topo.ShardInfo + for _, source := range mi.sources { + si = source.si + break + } + + for _, srvKeyspace := range srvKeyspaces { + var shardServedTypes []string + for _, partition := range srvKeyspace.GetPartitions() { + if partition.GetServedType() == topodatapb.TabletType_MASTER { + continue + } + for _, shardReference := range partition.GetShardReferences() { + if key.KeyRangeEqual(shardReference.GetKeyRange(), si.GetKeyRange()) { + shardServedTypes = append(shardServedTypes, partition.GetServedType().String()) + } + } + } + if len(shardServedTypes) > 0 { + return fmt.Errorf("cannot migrate MASTER away from %v/%v until everything else is migrated. Make sure that the following types are migrated first: %v", si.Keyspace(), si.ShardName(), strings.Join(shardServedTypes, ", ")) + } + } + return nil +} + +func (mi *migrater) compareShards(ctx context.Context, keyspace string, sis []*topo.ShardInfo) error { + var shards []string + for _, si := range sis { + shards = append(shards, si.ShardName()) + } + topoShards, err := mi.wr.ts.GetShardNames(ctx, keyspace) + if err != nil { + return err + } + sort.Strings(topoShards) + sort.Strings(shards) + if !reflect.DeepEqual(topoShards, shards) { + return fmt.Errorf("mismatched shards for keyspace %s: topo: %v vs migrate command: %v", keyspace, topoShards, shards) + } + return nil +} + +func (mi *migrater) migrateTableReads(ctx context.Context, cells []string, servedType topodatapb.TabletType, direction MigrateDirection) error { + rules, err := mi.wr.getRoutingRules(ctx) + if err != nil { + return err + } + // We assume that the following rules were setup when the targets were created: + // table -> sourceKeyspace.table + // targetKeyspace.table -> sourceKeyspace.table + // For forward migration, we add tablet type specific rules to redirect traffic to the target. + // For backward, we delete them. + tt := strings.ToLower(servedType.String()) + for _, table := range mi.tables { + if direction == DirectionForward { + rules[table+"@"+tt] = []string{mi.targetKeyspace + "." + table} + rules[mi.targetKeyspace+"."+table+"@"+tt] = []string{mi.targetKeyspace + "." + table} + rules[mi.sourceKeyspace+"."+table+"@"+tt] = []string{mi.targetKeyspace + "." + table} + } else { + delete(rules, table+"@"+tt) + delete(rules, mi.targetKeyspace+"."+table+"@"+tt) + delete(rules, mi.sourceKeyspace+"."+table+"@"+tt) + } + } + if err := mi.wr.saveRoutingRules(ctx, rules); err != nil { + return err + } + return mi.wr.ts.RebuildSrvVSchema(ctx, cells) +} + +func (mi *migrater) migrateShardReads(ctx context.Context, cells []string, servedType topodatapb.TabletType, direction MigrateDirection) error { + var fromShards, toShards []*topo.ShardInfo + if direction == DirectionForward { + fromShards, toShards = mi.sourceShards(), mi.targetShards() + } else { + fromShards, toShards = mi.targetShards(), mi.sourceShards() + } + + if err := mi.wr.updateShardRecords(ctx, mi.sourceKeyspace, fromShards, cells, servedType, true /* isFrom */, false /* clearSourceShards */); err != nil { + return err + } + if err := mi.wr.updateShardRecords(ctx, mi.sourceKeyspace, toShards, cells, servedType, false, false); err != nil { + return err + } + return mi.wr.ts.MigrateServedType(ctx, mi.sourceKeyspace, toShards, fromShards, servedType, cells) +} + +func (mi *migrater) checkJournals(ctx context.Context) (journalsExist bool, err error) { + var exist sync2.AtomicBool + err = mi.forAllSources(func(source *miSource) error { + statement := fmt.Sprintf("select 1 from _vt.resharding_journal where id=%v", mi.id) + p3qr, err := mi.wr.tmc.VReplicationExec(ctx, source.master.Tablet, statement) + if err != nil { + return err + } + if len(p3qr.Rows) >= 1 { + exist.Set(true) + source.journaled = true + } + return nil + }) + return exist.Get(), err +} + +func (mi *migrater) stopSourceWrites(ctx context.Context) error { + var err error + if mi.migrationType == binlogdatapb.MigrationType_TABLES { + err = mi.changeTableSourceWrites(ctx, disallowWrites) + } else { + err = mi.changeShardsAccess(ctx, mi.sourceKeyspace, mi.sourceShards(), disallowWrites) + } + if err != nil { + return err + } + return mi.forAllSources(func(source *miSource) error { + var err error + source.position, err = mi.wr.tmc.MasterPosition(ctx, source.master.Tablet) + mi.wr.Logger().Infof("Position for source %v:%v: %v", mi.sourceKeyspace, source.si.ShardName(), source.position) + return err + }) +} + +func (mi *migrater) changeTableSourceWrites(ctx context.Context, access accessType) error { + return mi.forAllSources(func(source *miSource) error { + if _, err := mi.wr.ts.UpdateShardFields(ctx, mi.sourceKeyspace, source.si.ShardName(), func(si *topo.ShardInfo) error { + return si.UpdateSourceBlacklistedTables(ctx, topodatapb.TabletType_MASTER, nil, access == allowWrites /* remove */, mi.tables) + }); err != nil { + return err + } + return mi.wr.tmc.RefreshState(ctx, source.master.Tablet) + }) +} + +func (mi *migrater) waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, filteredReplicationWaitTime) + defer cancel() + + var mu sync.Mutex + return mi.forAllUids(func(target *miTarget, uid uint32) error { + bls := target.sources[uid] + source := mi.sources[bls.Shard] + if err := mi.wr.tmc.VReplicationWaitForPos(ctx, target.master.Tablet, int(uid), source.position); err != nil { + return err + } + if _, err := mi.wr.tmc.VReplicationExec(ctx, target.master.Tablet, binlogplayer.StopVReplication(uid, "stopped for cutover")); err != nil { + return err + } + + // Need lock because a target can have multiple uids. + mu.Lock() + defer mu.Unlock() + if target.position != "" { + return nil + } + var err error + target.position, err = mi.wr.tmc.MasterPosition(ctx, target.master.Tablet) + mi.wr.Logger().Infof("Position for uid %v: %v", uid, target.position) + return err + }) +} + +func (mi *migrater) cancelMigration(ctx context.Context) { + var err error + if mi.migrationType == binlogdatapb.MigrationType_TABLES { + err = mi.changeTableSourceWrites(ctx, allowWrites) + } else { + err = mi.changeShardsAccess(ctx, mi.sourceKeyspace, mi.sourceShards(), allowWrites) + } + if err != nil { + mi.wr.Logger().Errorf("Cancel migration failed:", err) + } + + err = mi.forAllUids(func(target *miTarget, uid uint32) error { + if _, err := mi.wr.tmc.VReplicationExec(ctx, target.master.Tablet, binlogplayer.StartVReplication(uid)); err != nil { + return err + } + return nil + }) + if err != nil { + mi.wr.Logger().Errorf("Cancel migration failed: could not restart vreplication: %v", err) + } +} + +func (mi *migrater) gatherPositions(ctx context.Context) error { + err := mi.forAllSources(func(source *miSource) error { + var err error + source.position, err = mi.wr.tmc.MasterPosition(ctx, source.master.Tablet) + mi.wr.Logger().Infof("Position for source %v:%v: %v", mi.sourceKeyspace, source.si.ShardName(), source.position) + return err + }) + if err != nil { + return err + } + return mi.forAllTargets(func(target *miTarget) error { + var err error + target.position, err = mi.wr.tmc.MasterPosition(ctx, target.master.Tablet) + mi.wr.Logger().Infof("Position for target %v:%v: %v", mi.targetKeyspace, target.si.ShardName(), target.position) + return err + }) +} + +func (mi *migrater) createJournals(ctx context.Context) error { + return mi.forAllSources(func(source *miSource) error { + if source.journaled { + return nil + } + journal := &binlogdatapb.Journal{ + Id: mi.id, + MigrationType: mi.migrationType, + Tables: mi.tables, + LocalPosition: source.position, + } + participantMap := make(map[string]bool) + for targetShard, target := range mi.targets { + found := false + for _, tsource := range target.sources { + if source.si.ShardName() == tsource.Shard { + found = true + break + } + } + if !found { + continue + } + journal.ShardGtids = append(journal.ShardGtids, &binlogdatapb.ShardGtid{ + Keyspace: mi.targetKeyspace, + Shard: targetShard, + Gtid: target.position, + }) + for _, tsource := range target.sources { + participantMap[tsource.Shard] = true + } + } + for shard := range participantMap { + journal.Participants = append(journal.Participants, &binlogdatapb.KeyspaceShard{ + Keyspace: mi.sourceKeyspace, + Shard: shard, + }) + } + mi.wr.Logger().Infof("Creating journal: %v", journal) + statement := fmt.Sprintf("insert into _vt.resharding_journal "+ + "(id, db_name, val) "+ + "values (%v, %v, %v)", + mi.id, encodeString(source.master.DbName()), encodeString(journal.String())) + if _, err := mi.wr.tmc.VReplicationExec(ctx, source.master.Tablet, statement); err != nil { + return err + } + return nil + }) +} + +func (mi *migrater) createReverseReplication(ctx context.Context) error { + vs, err := mi.wr.ts.GetVSchema(ctx, mi.sourceKeyspace) + if err != nil { + return err + } + ksschema, err := vindexes.BuildKeyspaceSchema(vs, mi.sourceKeyspace) + if err != nil { + return err + } + return mi.forAllUids(func(target *miTarget, uid uint32) error { + bls := target.sources[uid] + source := mi.sources[bls.Shard] + reverseBls := &binlogdatapb.BinlogSource{ + Keyspace: mi.targetKeyspace, + Shard: target.si.ShardName(), + TabletType: bls.TabletType, + Filter: &binlogdatapb.Filter{}, + } + for _, rule := range bls.Filter.Rules { + var filter string + if strings.HasPrefix(rule.Match, "/") { + if ksschema.Keyspace.Sharded { + filter = bls.Shard + } + } else { + var inKeyrange string + if ksschema.Keyspace.Sharded { + vtable, ok := ksschema.Tables[rule.Match] + if !ok { + return fmt.Errorf("table %s not found in vschema", rule.Match) + } + // TODO(sougou): handle degenerate cases like sequence, etc. + // We currently assume the primary vindex is the best way to filter, which may not be true. + inKeyrange = fmt.Sprintf(" where in_keyrange(%s, '%s', '%s')", sqlparser.String(vtable.ColumnVindexes[0].Columns[0]), vs.Vindexes[vtable.ColumnVindexes[0].Name].Type, bls.Shard) + } + filter = fmt.Sprintf("select * from %s%s", rule.Match, inKeyrange) + } + reverseBls.Filter.Rules = append(reverseBls.Filter.Rules, &binlogdatapb.Rule{ + Match: rule.Match, + Filter: filter, + }) + } + + _, err := mi.wr.VReplicationExec(ctx, source.master.Alias, binlogplayer.CreateVReplicationState("ReversedResharding", reverseBls, target.position, binlogplayer.BlpStopped, source.master.DbName())) + return err + }) +} + +func (mi *migrater) allowTargetWrites(ctx context.Context) error { + if mi.migrationType == binlogdatapb.MigrationType_TABLES { + return mi.allowTableTargetWrites(ctx) + } + return mi.changeShardsAccess(ctx, mi.targetKeyspace, mi.targetShards(), allowWrites) +} + +func (mi *migrater) allowTableTargetWrites(ctx context.Context) error { + return mi.forAllTargets(func(target *miTarget) error { + if _, err := mi.wr.ts.UpdateShardFields(ctx, mi.targetKeyspace, target.si.ShardName(), func(si *topo.ShardInfo) error { + return si.UpdateSourceBlacklistedTables(ctx, topodatapb.TabletType_MASTER, nil, true, mi.tables) + }); err != nil { + return err + } + return mi.wr.tmc.RefreshState(ctx, target.master.Tablet) + }) +} + +func (mi *migrater) changeRouting(ctx context.Context) error { + if mi.migrationType == binlogdatapb.MigrationType_TABLES { + return mi.changeTableRouting(ctx) + } + return mi.changeShardRouting(ctx) +} + +func (mi *migrater) changeTableRouting(ctx context.Context) error { + rules, err := mi.wr.getRoutingRules(ctx) + if err != nil { + return err + } + // We assume that the following rules were setup when the targets were created: + // table -> sourceKeyspace.table + // targetKeyspace.table -> sourceKeyspace.table + // Additionally, MigrateReads would have added rules like this: + // table@replica -> targetKeyspace.table + // targetKeyspace.table@replica -> targetKeyspace.table + // After this step, only the following rules will be left: + // table -> targetKeyspace.table + // sourceKeyspace.table -> targetKeyspace.table + for _, table := range mi.tables { + for _, tabletType := range []topodatapb.TabletType{topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY} { + tt := strings.ToLower(tabletType.String()) + delete(rules, table+"@"+tt) + delete(rules, mi.targetKeyspace+"."+table+"@"+tt) + delete(rules, mi.sourceKeyspace+"."+table+"@"+tt) + mi.wr.Logger().Infof("Delete routing: %v %v %v", table+"@"+tt, mi.targetKeyspace+"."+table+"@"+tt, mi.sourceKeyspace+"."+table+"@"+tt) + } + delete(rules, mi.targetKeyspace+"."+table) + mi.wr.Logger().Infof("Delete routing: %v", mi.targetKeyspace+"."+table) + rules[table] = []string{mi.targetKeyspace + "." + table} + rules[mi.sourceKeyspace+"."+table] = []string{mi.targetKeyspace + "." + table} + mi.wr.Logger().Infof("Add routing: %v %v", table, mi.sourceKeyspace+"."+table) + } + if err := mi.wr.saveRoutingRules(ctx, rules); err != nil { + return err + } + return mi.wr.ts.RebuildSrvVSchema(ctx, nil) +} + +func (mi *migrater) changeShardRouting(ctx context.Context) error { + err := mi.forAllSources(func(source *miSource) error { + _, err := mi.wr.ts.UpdateShardFields(ctx, mi.sourceKeyspace, source.si.ShardName(), func(si *topo.ShardInfo) error { + si.IsMasterServing = false + return nil + }) + return err + }) + if err != nil { + return err + } + err = mi.forAllTargets(func(target *miTarget) error { + _, err := mi.wr.ts.UpdateShardFields(ctx, mi.targetKeyspace, target.si.ShardName(), func(si *topo.ShardInfo) error { + si.IsMasterServing = true + return nil + }) + return err + }) + if err != nil { + return err + } + return mi.wr.ts.MigrateServedType(ctx, mi.targetKeyspace, mi.targetShards(), mi.sourceShards(), topodatapb.TabletType_MASTER, nil) +} + +func (mi *migrater) deleteTargetVReplication(ctx context.Context) { + _ = mi.forAllUids(func(target *miTarget, uid uint32) error { + if _, err := mi.wr.tmc.VReplicationExec(ctx, target.master.Tablet, binlogplayer.DeleteVReplication(uid)); err != nil { + mi.wr.Logger().Errorf("Final cleanup: could not delete vreplication, please delete stopped streams manually: %v", err) + } + return nil + }) +} + +func (mi *migrater) changeShardsAccess(ctx context.Context, keyspace string, shards []*topo.ShardInfo, access accessType) error { + if err := mi.wr.ts.UpdateDisableQueryService(ctx, mi.sourceKeyspace, shards, topodatapb.TabletType_MASTER, nil, access == disallowWrites /* disable */); err != nil { + return err + } + return mi.wr.refreshMasters(ctx, shards) +} + +func (mi *migrater) forAllSources(f func(*miSource) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, source := range mi.sources { + wg.Add(1) + go func(source *miSource) { + defer wg.Done() + + if err := f(source); err != nil { + allErrors.RecordError(err) + } + }(source) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +func (mi *migrater) forAllTargets(f func(*miTarget) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, target := range mi.targets { + wg.Add(1) + go func(target *miTarget) { + defer wg.Done() + + if err := f(target); err != nil { + allErrors.RecordError(err) + } + }(target) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +func (mi *migrater) forAllUids(f func(target *miTarget, uid uint32) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, target := range mi.targets { + for uid := range target.sources { + wg.Add(1) + go func(target *miTarget, uid uint32) { + defer wg.Done() + + if err := f(target, uid); err != nil { + allErrors.RecordError(err) + } + }(target, uid) + } + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +func (mi *migrater) sourceShards() []*topo.ShardInfo { + shards := make([]*topo.ShardInfo, 0, len(mi.sources)) + for _, source := range mi.sources { + shards = append(shards, source.si) + } + return shards +} + +func (mi *migrater) targetShards() []*topo.ShardInfo { + shards := make([]*topo.ShardInfo, 0, len(mi.targets)) + for _, target := range mi.targets { + shards = append(shards, target.si) + } + return shards +} + +func (wr *Wrangler) getRoutingRules(ctx context.Context) (map[string][]string, error) { + rrs, err := wr.ts.GetRoutingRules(ctx) + if err != nil { + return nil, err + } + rules := make(map[string][]string, len(rrs.Rules)) + for _, rr := range rrs.Rules { + rules[rr.FromTable] = rr.ToTables + } + return rules, nil +} + +func (wr *Wrangler) saveRoutingRules(ctx context.Context, rules map[string][]string) error { + rrs := &vschemapb.RoutingRules{Rules: make([]*vschemapb.RoutingRule, 0, len(rules))} + for from, to := range rules { + rrs.Rules = append(rrs.Rules, &vschemapb.RoutingRule{ + FromTable: from, + ToTables: to, + }) + } + return wr.ts.SaveRoutingRules(ctx, rrs) +} diff --git a/go/vt/wrangler/migrater_env_test.go b/go/vt/wrangler/migrater_env_test.go new file mode 100644 index 00000000000..5384591ba48 --- /dev/null +++ b/go/vt/wrangler/migrater_env_test.go @@ -0,0 +1,397 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "fmt" + "testing" + + "golang.org/x/net/context" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/logutil" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/vt/vttablet/tmclient" +) + +const vreplQueryks = "select id, source from _vt.vreplication where workflow = 'test' and db_name = 'vt_ks'" +const vreplQueryks2 = "select id, source from _vt.vreplication where workflow = 'test' and db_name = 'vt_ks2'" + +type testMigraterEnv struct { + ts *topo.Server + wr *Wrangler + source1Master, source1Replica, source1Rdonly *fakeTablet + source2Master, source2Replica, source2Rdonly *fakeTablet + dest1Master, dest1Replica, dest1Rdonly *fakeTablet + dest2Master, dest2Replica, dest2Rdonly *fakeTablet + dbSource1Client, dbSource2Client *fakeDBClient + dbDest1Client, dbDest2Client *fakeDBClient + allDBClients []*fakeDBClient + targetKeyspace string + streams map[string][]uint32 +} + +func newTestTableMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { + tme := &testMigraterEnv{} + tme.ts = memorytopo.NewServer("cell1", "cell2") + tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) + + // Create cluster: ks1:-40,40- and ks2:-80,80-. + tme.source1Master = newFakeTablet(t, tme.wr, "cell1", 10, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks1", "-40")) + tme.source1Replica = newFakeTablet(t, tme.wr, "cell1", 11, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks1", "-40")) + tme.source1Rdonly = newFakeTablet(t, tme.wr, "cell1", 12, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks1", "-40")) + + tme.source2Master = newFakeTablet(t, tme.wr, "cell1", 20, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks1", "40-")) + tme.source2Replica = newFakeTablet(t, tme.wr, "cell1", 21, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks1", "40-")) + tme.source2Rdonly = newFakeTablet(t, tme.wr, "cell1", 22, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks1", "40-")) + + tme.dest1Master = newFakeTablet(t, tme.wr, "cell1", 30, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks2", "-80")) + tme.dest1Replica = newFakeTablet(t, tme.wr, "cell1", 31, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks2", "-80")) + tme.dest1Rdonly = newFakeTablet(t, tme.wr, "cell1", 32, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks2", "-80")) + + tme.dest2Master = newFakeTablet(t, tme.wr, "cell1", 40, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks2", "80-")) + tme.dest2Replica = newFakeTablet(t, tme.wr, "cell1", 41, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks2", "80-")) + tme.dest2Rdonly = newFakeTablet(t, tme.wr, "cell1", 42, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks2", "80-")) + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "hash", + }}, + }, + "t2": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "hash", + }}, + }, + }, + } + if err := tme.ts.SaveVSchema(ctx, "ks1", vs); err != nil { + t.Fatal(err) + } + if err := tme.ts.SaveVSchema(ctx, "ks2", vs); err != nil { + t.Fatal(err) + } + if err := tme.ts.RebuildSrvVSchema(ctx, nil); err != nil { + t.Fatal(err) + } + err := topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks1", []string{"cell1"}) + if err != nil { + t.Fatal(err) + } + err = topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks2", []string{"cell1"}) + if err != nil { + t.Fatal(err) + } + + tme.startTablets(t) + tme.createDBClients(ctx, t) + tme.setMasterPositions() + + // Emulate the following replication streams (many-to-many table migration): + // -40 -> -80 + // 40- -> -80 + // 40- -> 80- + // -40 will only have one target, and 80- will have only one source. + bls1 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('-80')", + }}, + }, + } + bls2 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('-80')", + }}, + }, + } + tme.dbDest1Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls1), + fmt.Sprintf("2|%v", bls2), + ), nil) + bls3 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('80-')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('80-')", + }}, + }, + } + tme.dbDest2Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls3), + ), nil) + + if err := tme.wr.saveRoutingRules(ctx, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + }); err != nil { + t.Fatal(err) + } + if err := tme.ts.RebuildSrvVSchema(ctx, nil); err != nil { + t.Fatal(err) + } + + tme.targetKeyspace = "ks2" + tme.streams = map[string][]uint32{ + "-80": {1, 2}, + "80-": {1}, + } + return tme +} + +func newTestShardMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { + tme := &testMigraterEnv{} + tme.ts = memorytopo.NewServer("cell1", "cell2") + tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) + + // Create cluster with "ks" as keyspace. -40,40- as serving, -80,80- as non-serving. + tme.source1Master = newFakeTablet(t, tme.wr, "cell1", 10, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "-40")) + tme.source1Replica = newFakeTablet(t, tme.wr, "cell1", 11, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "-40")) + tme.source1Rdonly = newFakeTablet(t, tme.wr, "cell1", 12, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "-40")) + + tme.source2Master = newFakeTablet(t, tme.wr, "cell1", 20, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "40-")) + tme.source2Replica = newFakeTablet(t, tme.wr, "cell1", 21, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "40-")) + tme.source2Rdonly = newFakeTablet(t, tme.wr, "cell1", 22, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "40-")) + + tme.dest1Master = newFakeTablet(t, tme.wr, "cell1", 30, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "-80")) + tme.dest1Replica = newFakeTablet(t, tme.wr, "cell1", 31, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "-80")) + tme.dest1Rdonly = newFakeTablet(t, tme.wr, "cell1", 32, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "-80")) + + tme.dest2Master = newFakeTablet(t, tme.wr, "cell1", 40, topodatapb.TabletType_MASTER, nil, TabletKeyspaceShard(t, "ks", "80-")) + tme.dest2Replica = newFakeTablet(t, tme.wr, "cell1", 41, topodatapb.TabletType_REPLICA, nil, TabletKeyspaceShard(t, "ks", "80-")) + tme.dest2Rdonly = newFakeTablet(t, tme.wr, "cell1", 42, topodatapb.TabletType_RDONLY, nil, TabletKeyspaceShard(t, "ks", "80-")) + + vs := &vschemapb.Keyspace{Sharded: true} + if err := tme.ts.SaveVSchema(ctx, "ks", vs); err != nil { + t.Fatal(err) + } + if err := tme.ts.RebuildSrvVSchema(ctx, nil); err != nil { + t.Fatal(err) + } + err := topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks", nil) + if err != nil { + t.Fatal(err) + } + + tme.startTablets(t) + tme.createDBClients(ctx, t) + tme.setMasterPositions() + + // Emulate the following replication streams (simultaneous split and merge): + // -40 -> -80 + // 40- -> -80 + // 40- -> 80- + // -40 will only have one target, and 80- will have only one source. + bls1 := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "-80", + }}, + }, + } + bls2 := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "-80", + }}, + }, + } + tme.dbDest1Client.addQuery(vreplQueryks, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls1), + fmt.Sprintf("2|%v", bls2), + ), nil) + bls3 := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "80-", + }}, + }, + } + tme.dbDest2Client.addQuery(vreplQueryks, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls3), + ), nil) + + tme.targetKeyspace = "ks" + tme.streams = map[string][]uint32{ + "-80": {1, 2}, + "80-": {1}, + } + tme.dbSource1Client.addQuery(vreplQueryks, &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery(vreplQueryks, &sqltypes.Result{}, nil) + return tme +} + +func (tme *testMigraterEnv) startTablets(t *testing.T) { + tme.source1Replica.StartActionLoop(t, tme.wr) + tme.source1Rdonly.StartActionLoop(t, tme.wr) + tme.source1Master.StartActionLoop(t, tme.wr) + + tme.source2Replica.StartActionLoop(t, tme.wr) + tme.source2Rdonly.StartActionLoop(t, tme.wr) + tme.source2Master.StartActionLoop(t, tme.wr) + + tme.dest1Replica.StartActionLoop(t, tme.wr) + tme.dest1Rdonly.StartActionLoop(t, tme.wr) + tme.dest1Master.StartActionLoop(t, tme.wr) + + tme.dest2Replica.StartActionLoop(t, tme.wr) + tme.dest2Rdonly.StartActionLoop(t, tme.wr) + tme.dest2Master.StartActionLoop(t, tme.wr) +} + +func (tme *testMigraterEnv) stopTablets(t *testing.T) { + tme.source1Replica.StopActionLoop(t) + tme.source1Rdonly.StopActionLoop(t) + tme.source1Master.StopActionLoop(t) + + tme.source2Replica.StopActionLoop(t) + tme.source2Rdonly.StopActionLoop(t) + tme.source2Master.StopActionLoop(t) + + tme.dest1Replica.StopActionLoop(t) + tme.dest1Rdonly.StopActionLoop(t) + tme.dest1Master.StopActionLoop(t) + + tme.dest2Replica.StopActionLoop(t) + tme.dest2Rdonly.StopActionLoop(t) + tme.dest2Master.StopActionLoop(t) +} + +func (tme *testMigraterEnv) createDBClients(ctx context.Context, t *testing.T) { + tme.dbDest1Client = newFakeDBClient() + dbClientFactory1 := func() binlogplayer.DBClient { return tme.dbDest1Client } + tme.dest1Master.Agent.VREngine = vreplication.NewEngine(tme.ts, "", tme.dest1Master.FakeMysqlDaemon, dbClientFactory1, tme.dbDest1Client.DBName()) + if err := tme.dest1Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + tme.dbDest2Client = newFakeDBClient() + dbClientFactory2 := func() binlogplayer.DBClient { return tme.dbDest2Client } + tme.dest2Master.Agent.VREngine = vreplication.NewEngine(tme.ts, "", tme.dest2Master.FakeMysqlDaemon, dbClientFactory2, tme.dbDest2Client.DBName()) + if err := tme.dest2Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + tme.dbSource1Client = newFakeDBClient() + dbClientFactory3 := func() binlogplayer.DBClient { return tme.dbSource1Client } + tme.source1Master.Agent.VREngine = vreplication.NewEngine(tme.ts, "", tme.source1Master.FakeMysqlDaemon, dbClientFactory3, tme.dbSource1Client.DBName()) + if err := tme.source1Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + tme.dbSource2Client = newFakeDBClient() + dbClientFactory4 := func() binlogplayer.DBClient { return tme.dbSource2Client } + tme.source2Master.Agent.VREngine = vreplication.NewEngine(tme.ts, "", tme.source2Master.FakeMysqlDaemon, dbClientFactory4, tme.dbSource2Client.DBName()) + if err := tme.source2Master.Agent.VREngine.Open(ctx); err != nil { + t.Fatal(err) + } + + tme.allDBClients = []*fakeDBClient{tme.dbDest1Client, tme.dbDest2Client, tme.dbSource1Client, tme.dbSource2Client} +} + +func (tme *testMigraterEnv) setMasterPositions() { + tme.source1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + tme.source2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 892, + }, + }, + } + tme.dest1Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } + tme.dest2Master.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{ + GTIDSet: mysql.MariadbGTIDSet{ + mysql.MariadbGTID{ + Domain: 5, + Server: 456, + Sequence: 893, + }, + }, + } +} diff --git a/go/vt/wrangler/migrater_test.go b/go/vt/wrangler/migrater_test.go new file mode 100644 index 00000000000..0e9f25995de --- /dev/null +++ b/go/vt/wrangler/migrater_test.go @@ -0,0 +1,1039 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wrangler + +import ( + "errors" + "fmt" + "reflect" + "strings" + "testing" + "time" + + "golang.org/x/net/context" + "vitess.io/vitess/go/sqltypes" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo" +) + +// TestTableMigrate tests table mode migrations. +// This has to be kept in sync with TestShardMigrate. +func TestTableMigrate(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + checkCellRouting(t, tme.wr, "cell1", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + }) + + //------------------------------------------------------------------------------------------------------------------- + // Single cell RDONLY migration. + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, []string{"cell1"}, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkCellRouting(t, tme.wr, "cell1", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + }) + checkCellRouting(t, tme.wr, "cell2", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + }) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Other cell REPLICA migration. + // The global routing already contains redirections for rdonly. + // So, adding routes for replica and deploying to cell2 will also cause + // cell2 to migrate rdonly. This is a quirk that can be fixed later if necessary. + // TODO(sougou): check if it's worth fixing, or clearly document the quirk. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, []string{"cell2"}, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkCellRouting(t, tme.wr, "cell1", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + }) + checkCellRouting(t, tme.wr, "cell2", map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, + }) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Single cell backward REPLICA migration. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, []string{"cell2"}, DirectionBackward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + }) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Migrate all REPLICA. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, + }) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // All cells RDONLY backward migration. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionBackward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, + }) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // All cells RDONLY backward migration. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionBackward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + }) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Can't migrate master with MigrateReads. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_MASTER, nil, DirectionForward) + want := "tablet type must be REPLICA or RDONLY: MASTER" + if err == nil || err.Error() != want { + t.Errorf("MigrateReads(master) err: %v, want %v", err, want) + } + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + want = "missing tablet type specific routing, read-only traffic must be migrated before migrating writes" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites err: %v, want %v", err, want) + } + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Test MigrateWrites cancelation on failure. + + // Migrate all the reads first. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + }) + + // Check for journals. + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) + + // Wait for position: Reads current state, updates to Stopped, and re-reads. + state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "pos|state|message", + "varchar|varchar|varchar"), + "MariaDB/5-456-892|Running|", + ) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest2Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Cancel Migration + cancel1 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 1" + cancel2 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 2" + tme.dbDest1Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) + + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 0*time.Second) + want = "DeadlineExceeded" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) + } + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks1.t1"}, + "ks2.t1": {"ks1.t1"}, + "t2": {"ks1.t2"}, + "ks2.t2": {"ks1.t2"}, + "t1@replica": {"ks2.t1"}, + "ks2.t1@replica": {"ks2.t1"}, + "ks1.t1@replica": {"ks2.t1"}, + "t2@replica": {"ks2.t2"}, + "ks2.t2@replica": {"ks2.t2"}, + "ks1.t2@replica": {"ks2.t2"}, + "t1@rdonly": {"ks2.t1"}, + "ks2.t1@rdonly": {"ks2.t1"}, + "ks1.t1@rdonly": {"ks2.t1"}, + "t2@rdonly": {"ks2.t2"}, + "ks2.t2@rdonly": {"ks2.t2"}, + "ks1.t2@rdonly": {"ks2.t2"}, + }) + checkBlacklist(t, tme.ts, "ks1:-40", nil) + checkBlacklist(t, tme.ts, "ks1:40-", nil) + checkBlacklist(t, tme.ts, "ks2:-80", nil) + checkBlacklist(t, tme.ts, "ks2:80-", nil) + + //------------------------------------------------------------------------------------------------------------------- + // Test successful MigrateWrites. + + // Create journals. + journal1 := "insert into _vt.resharding_journal.*9113431017721636330.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" + tme.dbSource1Client.addQueryRE(journal1, &sqltypes.Result{}, nil) + journal2 := "insert into _vt.resharding_journal.*9113431017721636330.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*80.*participants.*40.*40" + tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) + + // Create backward replicaions. + tme.dbSource1Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*-40.*t2.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*80-.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + tme.dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Delete the target replications. + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + + journalID, err := tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + if err != nil { + t.Fatal(err) + } + if journalID != 9113431017721636330 { + t.Errorf("journal id: %d, want 9113431017721636330", journalID) + } + + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks2.t1"}, + "ks1.t1": {"ks2.t1"}, + "t2": {"ks2.t2"}, + "ks1.t2": {"ks2.t2"}, + }) + checkBlacklist(t, tme.ts, "ks1:-40", []string{"t1", "t2"}) + checkBlacklist(t, tme.ts, "ks1:40-", []string{"t1", "t2"}) + checkBlacklist(t, tme.ts, "ks2:-80", nil) + checkBlacklist(t, tme.ts, "ks2:80-", nil) + + verifyQueries(t, tme.allDBClients) +} + +// TestShardMigrate tests table mode migrations. +// This has to be kept in sync with TestTableMigrate. +func TestShardMigrate(t *testing.T) { + ctx := context.Background() + tme := newTestShardMigrater(ctx, t) + defer tme.stopTablets(t) + + // Initial check + checkServedTypes(t, tme.ts, "ks:-40", 3) + checkServedTypes(t, tme.ts, "ks:40-", 3) + checkServedTypes(t, tme.ts, "ks:-80", 0) + checkServedTypes(t, tme.ts, "ks:80-", 0) + + //------------------------------------------------------------------------------------------------------------------- + // Single cell RDONLY migration. + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, []string{"cell1"}, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkCellServedTypes(t, tme.ts, "ks:-40", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:-40", "cell2", 3) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell2", 3) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell2", 0) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell2", 0) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Other cell REPLICA migration. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, []string{"cell2"}, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkCellServedTypes(t, tme.ts, "ks:-40", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:-40", "cell2", 2) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell2", 2) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell2", 1) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell2", 1) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Single cell backward REPLICA migration. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, []string{"cell2"}, DirectionBackward) + if err != nil { + t.Fatal(err) + } + checkCellServedTypes(t, tme.ts, "ks:-40", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell1", 2) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell1", 1) + checkCellServedTypes(t, tme.ts, "ks:-40", "cell2", 3) + checkCellServedTypes(t, tme.ts, "ks:40-", "cell2", 3) + checkCellServedTypes(t, tme.ts, "ks:-80", "cell2", 0) + checkCellServedTypes(t, tme.ts, "ks:80-", "cell2", 0) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Migrate all RDONLY. + // This is an extra step that does not exist in the tables test. + // The per-cell migration mechanism is different for tables. So, this + // extra step is needed to bring things in sync. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkServedTypes(t, tme.ts, "ks:-40", 2) + checkServedTypes(t, tme.ts, "ks:40-", 2) + checkServedTypes(t, tme.ts, "ks:-80", 1) + checkServedTypes(t, tme.ts, "ks:80-", 1) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Migrate all REPLICA. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkServedTypes(t, tme.ts, "ks:-40", 1) + checkServedTypes(t, tme.ts, "ks:40-", 1) + checkServedTypes(t, tme.ts, "ks:-80", 2) + checkServedTypes(t, tme.ts, "ks:80-", 2) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // All cells RDONLY backward migration. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionBackward) + if err != nil { + t.Fatal(err) + } + checkServedTypes(t, tme.ts, "ks:-40", 2) + checkServedTypes(t, tme.ts, "ks:40-", 2) + checkServedTypes(t, tme.ts, "ks:-80", 1) + checkServedTypes(t, tme.ts, "ks:80-", 1) + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Can't migrate master with MigrateReads. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_MASTER, nil, DirectionForward) + want := "tablet type must be REPLICA or RDONLY: MASTER" + if err == nil || err.Error() != want { + t.Errorf("MigrateReads(master) err: %v, want %v", err, want) + } + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Can't migrate writes if REPLICA and RDONLY have not fully migrated yet. + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + want = "cannot migrate MASTER away" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites err: %v, want %v", err, want) + } + verifyQueries(t, tme.allDBClients) + + //------------------------------------------------------------------------------------------------------------------- + // Test MigrateWrites cancelation on failure. + + // Migrate all the reads first. + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + checkServedTypes(t, tme.ts, "ks:-40", 1) + checkServedTypes(t, tme.ts, "ks:40-", 1) + checkServedTypes(t, tme.ts, "ks:-80", 2) + checkServedTypes(t, tme.ts, "ks:80-", 2) + checkIsMasterServing(t, tme.ts, "ks:-40", true) + checkIsMasterServing(t, tme.ts, "ks:40-", true) + checkIsMasterServing(t, tme.ts, "ks:-80", false) + checkIsMasterServing(t, tme.ts, "ks:80-", false) + + // Check for journals. + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 6432976123657117098", &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 6432976123657117098", &sqltypes.Result{}, nil) + + // Wait for position: Reads current state, updates to Stopped, and re-reads. + state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "pos|state|message", + "varchar|varchar|varchar"), + "MariaDB/5-456-892|Running|", + ) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest2Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Cancel Migration + cancel1 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 1" + cancel2 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 2" + tme.dbDest1Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) + + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 0*time.Second) + want = "DeadlineExceeded" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) + } + checkServedTypes(t, tme.ts, "ks:-40", 1) + checkServedTypes(t, tme.ts, "ks:40-", 1) + checkServedTypes(t, tme.ts, "ks:-80", 2) + checkServedTypes(t, tme.ts, "ks:80-", 2) + checkIsMasterServing(t, tme.ts, "ks:-40", true) + checkIsMasterServing(t, tme.ts, "ks:40-", true) + checkIsMasterServing(t, tme.ts, "ks:-80", false) + checkIsMasterServing(t, tme.ts, "ks:80-", false) + + //------------------------------------------------------------------------------------------------------------------- + // Test successful MigrateWrites. + + // Create journals. + journal1 := "insert into _vt.resharding_journal.*6432976123657117098.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*-80.*MariaDB/5-456-893.*participants.*40.*40" + tme.dbSource1Client.addQueryRE(journal1, &sqltypes.Result{}, nil) + journal2 := "insert into _vt.resharding_journal.*6432976123657117098.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" + tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) + + // Create backward replicaions. + tme.dbSource1Client.addQueryRE("insert into _vt.vreplication.*-80.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*-80.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*80-.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + tme.dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Delete the target replications. + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + + journalID, err := tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + if err != nil { + t.Fatal(err) + } + if journalID != 6432976123657117098 { + t.Errorf("journal id: %d, want 6432976123657117098", journalID) + } + + checkServedTypes(t, tme.ts, "ks:-40", 0) + checkServedTypes(t, tme.ts, "ks:40-", 0) + checkServedTypes(t, tme.ts, "ks:-80", 3) + checkServedTypes(t, tme.ts, "ks:80-", 3) + + checkIsMasterServing(t, tme.ts, "ks:-40", false) + checkIsMasterServing(t, tme.ts, "ks:40-", false) + checkIsMasterServing(t, tme.ts, "ks:-80", true) + checkIsMasterServing(t, tme.ts, "ks:80-", true) + + verifyQueries(t, tme.allDBClients) +} + +// TestMigrateFailJournal tests that cancel doesn't get called after point of no return. +// No need to test this for shard migrate because code paths are the same. +func TestMigrateFailJournal(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + + // Check for journals. + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) + + // Wait for position: Reads current state, updates to Stopped, and re-reads. + state := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "pos|state|message", + "varchar|varchar|varchar"), + "MariaDB/5-456-892|Running|", + ) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest2Client.addQuery("select pos, state, message from _vt.vreplication where id=1", state, nil) + tme.dbDest1Client.addQuery("select pos, state, message from _vt.vreplication where id=2", state, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id = 2", &sqltypes.Result{}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbDest1Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Cancel Migration: these must not get called. + cancel1 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 1" + cancel2 := "update _vt.vreplication set state = 'Running', stop_pos = null where id = 2" + tme.dbDest1Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery(cancel1, &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery(cancel2, &sqltypes.Result{}, nil) + + // Make the journal call fail. + tme.dbSource1Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) + tme.dbSource2Client.addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) + + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + want := "journaling intentionally failed" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateWrites(0 timeout) err: %v, must contain %v", err, want) + } + + // Verify that cancel didn't happen. + if tme.dbDest1Client.queries[cancel1].called { + t.Errorf("tme.dbDest1Client.queries[cancel1].called: %v, want false", tme.dbDest1Client.queries[cancel1]) + } + if tme.dbDest2Client.queries[cancel1].called { + t.Errorf("tme.dbDest1Client.queries[cancel1].called: %v, want false", tme.dbDest1Client.queries[cancel1]) + } + if tme.dbDest1Client.queries[cancel2].called { + t.Errorf("tme.dbDest1Client.queries[cancel1].called: %v, want false", tme.dbDest1Client.queries[cancel1]) + } +} + +func TestTableMigrateJournalExists(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + + // Show one journal as created. + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1"), nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 9113431017721636330", &sqltypes.Result{}, nil) + + // Create the missing journal. + journal2 := "insert into _vt.resharding_journal.*9113431017721636330.*tables.*t1.*t2.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*80.*participants.*40.*40" + tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) + + // Create backward replicaions. + tme.dbSource1Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*-40.*t2.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*-80.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*ks2.*80-.*t1.*in_keyrange.*c1.*hash.*40-.*t2.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + tme.dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Delete the target replications. + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + if err != nil { + t.Fatal(err) + } + + // Routes will be redone. + checkRouting(t, tme.wr, map[string][]string{ + "t1": {"ks2.t1"}, + "ks1.t1": {"ks2.t1"}, + "t2": {"ks2.t2"}, + "ks1.t2": {"ks2.t2"}, + }) + // We're showing that there are no blacklisted tables. But in real life, + // tables on ks1 should be blacklisted from the previous failed attempt. + checkBlacklist(t, tme.ts, "ks1:-40", nil) + checkBlacklist(t, tme.ts, "ks1:40-", nil) + checkBlacklist(t, tme.ts, "ks2:-80", nil) + checkBlacklist(t, tme.ts, "ks2:80-", nil) + + verifyQueries(t, tme.allDBClients) +} + +func TestShardMigrateJournalExists(t *testing.T) { + ctx := context.Background() + tme := newTestShardMigrater(ctx, t) + defer tme.stopTablets(t) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + err = tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_REPLICA, nil, DirectionForward) + if err != nil { + t.Fatal(err) + } + + // Show one journal as created. + tme.dbSource1Client.addQuery("select 1 from _vt.resharding_journal where id = 6432976123657117098", sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1"), nil) + tme.dbSource2Client.addQuery("select 1 from _vt.resharding_journal where id = 6432976123657117098", &sqltypes.Result{}, nil) + + // Create the missing journal. + journal2 := "insert into _vt.resharding_journal.*6432976123657117098.*migration_type:SHARDS.*local_position.*MariaDB/5-456-892.*shard_gtids.*80.*MariaDB/5-456-893.*shard_gtids.*80.*MariaDB/5-456-893.*participants.*40.*40" + tme.dbSource2Client.addQueryRE(journal2, &sqltypes.Result{}, nil) + + // Create backward replicaions. + tme.dbSource1Client.addQueryRE("insert into _vt.vreplication.*-80.*-40.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*-80.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 1}, nil) + tme.dbSource2Client.addQueryRE("insert into _vt.vreplication.*80-.*40-.*MariaDB/5-456-893.*Stopped", &sqltypes.Result{InsertID: 2}, nil) + stopped := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|state", + "int64|varchar"), + "1|Stopped", + ) + tme.dbSource1Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 1", stopped, nil) + tme.dbSource2Client.addQuery("select * from _vt.vreplication where id = 2", stopped, nil) + + // Delete the target replications. + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery("delete from _vt.vreplication where id = 1", &sqltypes.Result{}, nil) + tme.dbDest1Client.addQuery("delete from _vt.vreplication where id = 2", &sqltypes.Result{}, nil) + + _, err = tme.wr.MigrateWrites(ctx, tme.targetKeyspace, "test", 1*time.Second) + if err != nil { + t.Fatal(err) + } + + checkServedTypes(t, tme.ts, "ks:-40", 0) + checkServedTypes(t, tme.ts, "ks:40-", 0) + checkServedTypes(t, tme.ts, "ks:-80", 3) + checkServedTypes(t, tme.ts, "ks:80-", 3) + + checkIsMasterServing(t, tme.ts, "ks:-40", false) + checkIsMasterServing(t, tme.ts, "ks:40-", false) + checkIsMasterServing(t, tme.ts, "ks:-80", true) + checkIsMasterServing(t, tme.ts, "ks:80-", true) + + verifyQueries(t, tme.allDBClients) +} + +func TestMigrateNoStreamsFound(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + tme.dbDest1Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) + tme.dbDest2Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + want := "no streams found in keyspace ks2 for: test" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestMigrateDistinctSources(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + bls := &binlogdatapb.BinlogSource{ + Keyspace: "ks2", + Shard: "-80", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }, { + Match: "t2", + Filter: "select * from t2 where in_keyrange('-80')", + }}, + }, + } + tme.dbDest1Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls), + ), nil) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + want := "source keyspaces are mismatched across streams" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestMigrateMismatchedTables(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + bls := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1 where in_keyrange('-80')", + }}, + }, + } + tme.dbDest1Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls), + ), nil) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + want := "table lists are mismatched across streams" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestTableMigrateAllShardsNotPresent(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + tme.dbDest1Client.addQuery(vreplQueryks2, &sqltypes.Result{}, nil) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + want := "mismatched shards for keyspace" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestMigrateNoTableWildcards(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigrater(ctx, t) + defer tme.stopTablets(t) + + bls1 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "-40", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "", + }}, + }, + } + bls2 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "", + }}, + }, + } + tme.dbDest1Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls1), + fmt.Sprintf("2|%v", bls2), + ), nil) + bls3 := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: "40-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "", + }}, + }, + } + tme.dbDest2Client.addQuery(vreplQueryks2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls3), + ), nil) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + want := "cannot migrate streams with wild card table names" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func TestShardMigrateTargetMatchesSource(t *testing.T) { + ctx := context.Background() + tme := newTestShardMigrater(ctx, t) + defer tme.stopTablets(t) + + bls := &binlogdatapb.BinlogSource{ + Keyspace: "ks", + Shard: "-80", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + Filter: "-80", + }}, + }, + } + tme.dbDest1Client.addQuery(vreplQueryks, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source", + "int64|varchar"), + fmt.Sprintf("1|%v", bls), + ), nil) + + err := tme.wr.MigrateReads(ctx, tme.targetKeyspace, "test", topodatapb.TabletType_RDONLY, nil, DirectionForward) + want := "target shard matches a source shard" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("MigrateReads: %v, must contain %v", err, want) + } +} + +func checkRouting(t *testing.T, wr *Wrangler, want map[string][]string) { + t.Helper() + ctx := context.Background() + got, err := wr.getRoutingRules(ctx) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("rules:\n%v, want\n%v", got, want) + } + cells, err := wr.ts.GetCellInfoNames(ctx) + if err != nil { + t.Fatal(err) + } + for _, cell := range cells { + checkCellRouting(t, wr, cell, want) + } +} + +func checkCellRouting(t *testing.T, wr *Wrangler, cell string, want map[string][]string) { + t.Helper() + ctx := context.Background() + svs, err := wr.ts.GetSrvVSchema(ctx, cell) + if err != nil { + t.Fatal(err) + } + got := make(map[string][]string) + for _, rr := range svs.RoutingRules.Rules { + got[rr.FromTable] = append(got[rr.FromTable], rr.ToTables...) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("srv rules for cell %s:\n%v, want\n%v", cell, got, want) + } +} + +func checkBlacklist(t *testing.T, ts *topo.Server, keyspaceShard string, want []string) { + t.Helper() + ctx := context.Background() + splits := strings.Split(keyspaceShard, ":") + si, err := ts.GetShard(ctx, splits[0], splits[1]) + if err != nil { + t.Fatal(err) + } + tc := si.GetTabletControl(topodatapb.TabletType_MASTER) + var got []string + if tc != nil { + got = tc.BlacklistedTables + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Blacklisted tables for %v: %v, want %v", keyspaceShard, got, want) + } +} + +func checkServedTypes(t *testing.T, ts *topo.Server, keyspaceShard string, want int) { + t.Helper() + ctx := context.Background() + splits := strings.Split(keyspaceShard, ":") + si, err := ts.GetShard(ctx, splits[0], splits[1]) + if err != nil { + t.Fatal(err) + } + + servedTypes, err := ts.GetShardServingTypes(ctx, si) + if err != nil { + t.Fatal(err) + } + + if len(servedTypes) != want { + t.Errorf("shard %v has wrong served types: got: %v, want: %v", keyspaceShard, len(servedTypes), want) + } +} + +func checkCellServedTypes(t *testing.T, ts *topo.Server, keyspaceShard, cell string, want int) { + t.Helper() + ctx := context.Background() + splits := strings.Split(keyspaceShard, ":") + srvKeyspace, err := ts.GetSrvKeyspace(ctx, cell, splits[0]) + if err != nil { + t.Fatal(err) + } + count := 0 +outer: + for _, partition := range srvKeyspace.GetPartitions() { + for _, ref := range partition.ShardReferences { + if ref.Name == splits[1] { + count++ + continue outer + } + } + } + if count != want { + t.Errorf("serving types for keyspaceShard %s, cell %s: %d, want %d", keyspaceShard, cell, count, want) + } +} + +func checkIsMasterServing(t *testing.T, ts *topo.Server, keyspaceShard string, want bool) { + t.Helper() + ctx := context.Background() + splits := strings.Split(keyspaceShard, ":") + si, err := ts.GetShard(ctx, splits[0], splits[1]) + if err != nil { + t.Fatal(err) + } + if want != si.IsMasterServing { + t.Errorf("IsMasterServing(%v): %v, want %v", keyspaceShard, si.IsMasterServing, want) + } +} diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto index 021f1fbf189..b28dbc5356f 100644 --- a/proto/binlogdata.proto +++ b/proto/binlogdata.proto @@ -187,6 +187,7 @@ enum VEventType { FIELD = 13; HEARTBEAT = 14; VGTID = 15; + JOURNAL = 16; } // RowChange represents one row change @@ -216,6 +217,27 @@ message VGtid { repeated ShardGtid shard_gtids = 1; } +message KeyspaceShard { + string keyspace = 1; + string shard = 2; +} + +// MigrationType specifies the type of migration for the Journal. +enum MigrationType { + TABLES = 0; + SHARDS = 1; +} + +message Journal { + int64 id = 1; + MigrationType migration_type = 2; + repeated string tables = 3; + string local_position = 4; + repeated ShardGtid shard_gtids = 5; + repeated KeyspaceShard participants = 6; + repeated int64 reversed_ids = 7; +} + // VEvent represents a vstream event message VEvent { VEventType type = 1; @@ -225,6 +247,7 @@ message VEvent { RowEvent row_event = 5; FieldEvent field_event = 6; VGtid vgtid = 7; + Journal journal = 8; // current_time specifies the current time to handle clock skew. int64 current_time = 20; } diff --git a/py/vtproto/binlogdata_pb2.py b/py/vtproto/binlogdata_pb2.py index c744f3b7280..53eda7da5b1 100644 --- a/py/vtproto/binlogdata_pb2.py +++ b/py/vtproto/binlogdata_pb2.py @@ -23,7 +23,7 @@ package='binlogdata', syntax='proto3', serialized_options=_b('Z\'vitess.io/vitess/go/vt/proto/binlogdata'), - serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bvtrpc.proto\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"%\n\x04Rule\x12\r\n\x05match\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\")\n\x06\x46ilter\x12\x1f\n\x05rules\x18\x01 \x03(\x0b\x32\x10.binlogdata.Rule\"\xde\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x12\"\n\x06\x66ilter\x18\x06 \x01(\x0b\x32\x12.binlogdata.Filter\x12\'\n\x06on_ddl\x18\x07 \x01(\x0e\x32\x17.binlogdata.OnDDLAction\"B\n\tRowChange\x12\x1a\n\x06\x62\x65\x66ore\x18\x01 \x01(\x0b\x32\n.query.Row\x12\x19\n\x05\x61\x66ter\x18\x02 \x01(\x0b\x32\n.query.Row\"J\n\x08RowEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12*\n\x0brow_changes\x18\x02 \x03(\x0b\x32\x15.binlogdata.RowChange\">\n\nFieldEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x1c\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x0c.query.Field\":\n\tShardGtid\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12\x0c\n\x04gtid\x18\x03 \x01(\t\"3\n\x05VGtid\x12*\n\x0bshard_gtids\x18\x01 \x03(\x0b\x32\x15.binlogdata.ShardGtid\"\xea\x01\n\x06VEvent\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.binlogdata.VEventType\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x0b\n\x03\x64\x64l\x18\x04 \x01(\t\x12\'\n\trow_event\x18\x05 \x01(\x0b\x32\x14.binlogdata.RowEvent\x12+\n\x0b\x66ield_event\x18\x06 \x01(\x0b\x32\x16.binlogdata.FieldEvent\x12 \n\x05vgtid\x18\x07 \x01(\x0b\x32\x11.binlogdata.VGtid\x12\x14\n\x0c\x63urrent_time\x18\x14 \x01(\x03\"\xc7\x01\n\x0eVStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x10\n\x08position\x18\x04 \x01(\t\x12\"\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x12.binlogdata.Filter\"5\n\x0fVStreamResponse\x12\"\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x12.binlogdata.VEvent\"\xc8\x01\n\x12VStreamRowsRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\r\n\x05query\x18\x04 \x01(\t\x12\"\n\x06lastpk\x18\x05 \x01(\x0b\x32\x12.query.QueryResult\"\x97\x01\n\x13VStreamRowsResponse\x12\x1c\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x0c.query.Field\x12\x1e\n\x08pkfields\x18\x02 \x03(\x0b\x32\x0c.query.Field\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x18\n\x04rows\x18\x04 \x03(\x0b\x32\n.query.Row\x12\x1a\n\x06lastpk\x18\x05 \x01(\x0b\x32\n.query.Row*>\n\x0bOnDDLAction\x12\n\n\x06IGNORE\x10\x00\x12\x08\n\x04STOP\x10\x01\x12\x08\n\x04\x45XEC\x10\x02\x12\x0f\n\x0b\x45XEC_IGNORE\x10\x03*\xc4\x01\n\nVEventType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04GTID\x10\x01\x12\t\n\x05\x42\x45GIN\x10\x02\x12\n\n\x06\x43OMMIT\x10\x03\x12\x0c\n\x08ROLLBACK\x10\x04\x12\x07\n\x03\x44\x44L\x10\x05\x12\n\n\x06INSERT\x10\x06\x12\x0b\n\x07REPLACE\x10\x07\x12\n\n\x06UPDATE\x10\x08\x12\n\n\x06\x44\x45LETE\x10\t\x12\x07\n\x03SET\x10\n\x12\t\n\x05OTHER\x10\x0b\x12\x07\n\x03ROW\x10\x0c\x12\t\n\x05\x46IELD\x10\r\x12\r\n\tHEARTBEAT\x10\x0e\x12\t\n\x05VGTID\x10\x0f\x42)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3') + serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bvtrpc.proto\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"%\n\x04Rule\x12\r\n\x05match\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\")\n\x06\x46ilter\x12\x1f\n\x05rules\x18\x01 \x03(\x0b\x32\x10.binlogdata.Rule\"\xde\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x12\"\n\x06\x66ilter\x18\x06 \x01(\x0b\x32\x12.binlogdata.Filter\x12\'\n\x06on_ddl\x18\x07 \x01(\x0e\x32\x17.binlogdata.OnDDLAction\"B\n\tRowChange\x12\x1a\n\x06\x62\x65\x66ore\x18\x01 \x01(\x0b\x32\n.query.Row\x12\x19\n\x05\x61\x66ter\x18\x02 \x01(\x0b\x32\n.query.Row\"J\n\x08RowEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12*\n\x0brow_changes\x18\x02 \x03(\x0b\x32\x15.binlogdata.RowChange\">\n\nFieldEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x1c\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x0c.query.Field\":\n\tShardGtid\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12\x0c\n\x04gtid\x18\x03 \x01(\t\"3\n\x05VGtid\x12*\n\x0bshard_gtids\x18\x01 \x03(\x0b\x32\x15.binlogdata.ShardGtid\"0\n\rKeyspaceShard\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\"\xe3\x01\n\x07Journal\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x31\n\x0emigration_type\x18\x02 \x01(\x0e\x32\x19.binlogdata.MigrationType\x12\x0e\n\x06tables\x18\x03 \x03(\t\x12\x16\n\x0elocal_position\x18\x04 \x01(\t\x12*\n\x0bshard_gtids\x18\x05 \x03(\x0b\x32\x15.binlogdata.ShardGtid\x12/\n\x0cparticipants\x18\x06 \x03(\x0b\x32\x19.binlogdata.KeyspaceShard\x12\x14\n\x0creversed_ids\x18\x07 \x03(\x03\"\x90\x02\n\x06VEvent\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.binlogdata.VEventType\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x0b\n\x03\x64\x64l\x18\x04 \x01(\t\x12\'\n\trow_event\x18\x05 \x01(\x0b\x32\x14.binlogdata.RowEvent\x12+\n\x0b\x66ield_event\x18\x06 \x01(\x0b\x32\x16.binlogdata.FieldEvent\x12 \n\x05vgtid\x18\x07 \x01(\x0b\x32\x11.binlogdata.VGtid\x12$\n\x07journal\x18\x08 \x01(\x0b\x32\x13.binlogdata.Journal\x12\x14\n\x0c\x63urrent_time\x18\x14 \x01(\x03\"\xc7\x01\n\x0eVStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x10\n\x08position\x18\x04 \x01(\t\x12\"\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x12.binlogdata.Filter\"5\n\x0fVStreamResponse\x12\"\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x12.binlogdata.VEvent\"\xc8\x01\n\x12VStreamRowsRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\r\n\x05query\x18\x04 \x01(\t\x12\"\n\x06lastpk\x18\x05 \x01(\x0b\x32\x12.query.QueryResult\"\x97\x01\n\x13VStreamRowsResponse\x12\x1c\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x0c.query.Field\x12\x1e\n\x08pkfields\x18\x02 \x03(\x0b\x32\x0c.query.Field\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x18\n\x04rows\x18\x04 \x03(\x0b\x32\n.query.Row\x12\x1a\n\x06lastpk\x18\x05 \x01(\x0b\x32\n.query.Row*>\n\x0bOnDDLAction\x12\n\n\x06IGNORE\x10\x00\x12\x08\n\x04STOP\x10\x01\x12\x08\n\x04\x45XEC\x10\x02\x12\x0f\n\x0b\x45XEC_IGNORE\x10\x03*\xd1\x01\n\nVEventType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04GTID\x10\x01\x12\t\n\x05\x42\x45GIN\x10\x02\x12\n\n\x06\x43OMMIT\x10\x03\x12\x0c\n\x08ROLLBACK\x10\x04\x12\x07\n\x03\x44\x44L\x10\x05\x12\n\n\x06INSERT\x10\x06\x12\x0b\n\x07REPLACE\x10\x07\x12\n\n\x06UPDATE\x10\x08\x12\n\n\x06\x44\x45LETE\x10\t\x12\x07\n\x03SET\x10\n\x12\t\n\x05OTHER\x10\x0b\x12\x07\n\x03ROW\x10\x0c\x12\t\n\x05\x46IELD\x10\r\x12\r\n\tHEARTBEAT\x10\x0e\x12\t\n\x05VGTID\x10\x0f\x12\x0b\n\x07JOURNAL\x10\x10*\'\n\rMigrationType\x12\n\n\x06TABLES\x10\x00\x12\n\n\x06SHARDS\x10\x01\x42)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3') , dependencies=[vtrpc__pb2.DESCRIPTOR,query__pb2.DESCRIPTOR,topodata__pb2.DESCRIPTOR,]) @@ -52,8 +52,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2433, - serialized_end=2495, + serialized_start=2751, + serialized_end=2813, ) _sym_db.RegisterEnumDescriptor(_ONDDLACTION) @@ -128,15 +128,42 @@ name='VGTID', index=15, number=15, serialized_options=None, type=None), + _descriptor.EnumValueDescriptor( + name='JOURNAL', index=16, number=16, + serialized_options=None, + type=None), ], containing_type=None, serialized_options=None, - serialized_start=2498, - serialized_end=2694, + serialized_start=2816, + serialized_end=3025, ) _sym_db.RegisterEnumDescriptor(_VEVENTTYPE) VEventType = enum_type_wrapper.EnumTypeWrapper(_VEVENTTYPE) +_MIGRATIONTYPE = _descriptor.EnumDescriptor( + name='MigrationType', + full_name='binlogdata.MigrationType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='TABLES', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SHARDS', index=1, number=1, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=3027, + serialized_end=3066, +) +_sym_db.RegisterEnumDescriptor(_MIGRATIONTYPE) + +MigrationType = enum_type_wrapper.EnumTypeWrapper(_MIGRATIONTYPE) IGNORE = 0 STOP = 1 EXEC = 2 @@ -157,6 +184,9 @@ FIELD = 13 HEARTBEAT = 14 VGTID = 15 +JOURNAL = 16 +TABLES = 0 +SHARDS = 1 _BINLOGTRANSACTION_STATEMENT_CATEGORY = _descriptor.EnumDescriptor( @@ -826,6 +856,117 @@ ) +_KEYSPACESHARD = _descriptor.Descriptor( + name='KeyspaceShard', + full_name='binlogdata.KeyspaceShard', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='keyspace', full_name='binlogdata.KeyspaceShard.keyspace', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='shard', full_name='binlogdata.KeyspaceShard.shard', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1582, + serialized_end=1630, +) + + +_JOURNAL = _descriptor.Descriptor( + name='Journal', + full_name='binlogdata.Journal', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='id', full_name='binlogdata.Journal.id', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='migration_type', full_name='binlogdata.Journal.migration_type', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='tables', full_name='binlogdata.Journal.tables', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='local_position', full_name='binlogdata.Journal.local_position', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='shard_gtids', full_name='binlogdata.Journal.shard_gtids', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='participants', full_name='binlogdata.Journal.participants', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='reversed_ids', full_name='binlogdata.Journal.reversed_ids', index=6, + number=7, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1633, + serialized_end=1860, +) + + _VEVENT = _descriptor.Descriptor( name='VEvent', full_name='binlogdata.VEvent', @@ -883,7 +1024,14 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='current_time', full_name='binlogdata.VEvent.current_time', index=7, + name='journal', full_name='binlogdata.VEvent.journal', index=7, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='current_time', full_name='binlogdata.VEvent.current_time', index=8, number=20, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, @@ -901,8 +1049,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1583, - serialized_end=1817, + serialized_start=1863, + serialized_end=2135, ) @@ -960,8 +1108,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1820, - serialized_end=2019, + serialized_start=2138, + serialized_end=2337, ) @@ -991,8 +1139,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2021, - serialized_end=2074, + serialized_start=2339, + serialized_end=2392, ) @@ -1050,8 +1198,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2077, - serialized_end=2277, + serialized_start=2395, + serialized_end=2595, ) @@ -1109,8 +1257,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2280, - serialized_end=2431, + serialized_start=2598, + serialized_end=2749, ) _BINLOGTRANSACTION_STATEMENT.fields_by_name['category'].enum_type = _BINLOGTRANSACTION_STATEMENT_CATEGORY @@ -1134,10 +1282,14 @@ _ROWEVENT.fields_by_name['row_changes'].message_type = _ROWCHANGE _FIELDEVENT.fields_by_name['fields'].message_type = query__pb2._FIELD _VGTID.fields_by_name['shard_gtids'].message_type = _SHARDGTID +_JOURNAL.fields_by_name['migration_type'].enum_type = _MIGRATIONTYPE +_JOURNAL.fields_by_name['shard_gtids'].message_type = _SHARDGTID +_JOURNAL.fields_by_name['participants'].message_type = _KEYSPACESHARD _VEVENT.fields_by_name['type'].enum_type = _VEVENTTYPE _VEVENT.fields_by_name['row_event'].message_type = _ROWEVENT _VEVENT.fields_by_name['field_event'].message_type = _FIELDEVENT _VEVENT.fields_by_name['vgtid'].message_type = _VGTID +_VEVENT.fields_by_name['journal'].message_type = _JOURNAL _VSTREAMREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID _VSTREAMREQUEST.fields_by_name['immediate_caller_id'].message_type = query__pb2._VTGATECALLERID _VSTREAMREQUEST.fields_by_name['target'].message_type = query__pb2._TARGET @@ -1165,6 +1317,8 @@ DESCRIPTOR.message_types_by_name['FieldEvent'] = _FIELDEVENT DESCRIPTOR.message_types_by_name['ShardGtid'] = _SHARDGTID DESCRIPTOR.message_types_by_name['VGtid'] = _VGTID +DESCRIPTOR.message_types_by_name['KeyspaceShard'] = _KEYSPACESHARD +DESCRIPTOR.message_types_by_name['Journal'] = _JOURNAL DESCRIPTOR.message_types_by_name['VEvent'] = _VEVENT DESCRIPTOR.message_types_by_name['VStreamRequest'] = _VSTREAMREQUEST DESCRIPTOR.message_types_by_name['VStreamResponse'] = _VSTREAMRESPONSE @@ -1172,6 +1326,7 @@ DESCRIPTOR.message_types_by_name['VStreamRowsResponse'] = _VSTREAMROWSRESPONSE DESCRIPTOR.enum_types_by_name['OnDDLAction'] = _ONDDLACTION DESCRIPTOR.enum_types_by_name['VEventType'] = _VEVENTTYPE +DESCRIPTOR.enum_types_by_name['MigrationType'] = _MIGRATIONTYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) Charset = _reflection.GeneratedProtocolMessageType('Charset', (_message.Message,), dict( @@ -1280,6 +1435,20 @@ )) _sym_db.RegisterMessage(VGtid) +KeyspaceShard = _reflection.GeneratedProtocolMessageType('KeyspaceShard', (_message.Message,), dict( + DESCRIPTOR = _KEYSPACESHARD, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.KeyspaceShard) + )) +_sym_db.RegisterMessage(KeyspaceShard) + +Journal = _reflection.GeneratedProtocolMessageType('Journal', (_message.Message,), dict( + DESCRIPTOR = _JOURNAL, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.Journal) + )) +_sym_db.RegisterMessage(Journal) + VEvent = _reflection.GeneratedProtocolMessageType('VEvent', (_message.Message,), dict( DESCRIPTOR = _VEVENT, __module__ = 'binlogdata_pb2'