diff --git a/go/vt/binlog/binlogplayer/binlog_player.go b/go/vt/binlog/binlogplayer/binlog_player.go index d778582f77c..b906c028595 100644 --- a/go/vt/binlog/binlogplayer/binlog_player.go +++ b/go/vt/binlog/binlogplayer/binlog_player.go @@ -23,6 +23,7 @@ import ( "bytes" "encoding/hex" "fmt" + "math" "sync" "time" @@ -112,6 +113,7 @@ func NewStats() *Stats { bps.Timings = stats.NewTimings("", "", "") bps.Rates = stats.NewRates("", bps.Timings, 15, 60e9) bps.History = history.New(3) + bps.SecondsBehindMaster.Set(math.MaxInt64) return bps } @@ -200,18 +202,8 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { log.Error(err) return err } - blp.position, err = mysql.DecodePosition(settings.StartPos) - if err != nil { - log.Error(err) - return err - } - if settings.StopPos != "" { - blp.stopPosition, err = mysql.DecodePosition(settings.StopPos) - if err != nil { - log.Error(err) - return err - } - } + blp.position = settings.StartPos + blp.stopPosition = settings.StopPos t, err := throttler.NewThrottler( fmt.Sprintf("BinlogPlayer/%d", blp.uid), "transactions", @@ -525,8 +517,8 @@ func SetVReplicationState(dbClient DBClient, uid uint32, state, message string) // VRSettings contains the settings of a vreplication table. type VRSettings struct { - StartPos string - StopPos string + StartPos mysql.Position + StopPos mysql.Position MaxTPS int64 MaxReplicationLag int64 State string @@ -541,25 +533,34 @@ func ReadVRSettings(dbClient DBClient, uid uint32) (VRSettings, error) { return VRSettings{}, fmt.Errorf("error %v in selecting vreplication settings %v", err, query) } - if qr.RowsAffected != 1 { + if len(qr.Rows) != 1 { return VRSettings{}, fmt.Errorf("checkpoint information not available in db for %v", uid) } + vrRow := qr.Rows[0] - maxTPS, err := sqltypes.ToInt64(qr.Rows[0][2]) + maxTPS, err := sqltypes.ToInt64(vrRow[2]) if err != nil { return VRSettings{}, fmt.Errorf("failed to parse max_tps column: %v", err) } - maxReplicationLag, err := sqltypes.ToInt64(qr.Rows[0][3]) + maxReplicationLag, err := sqltypes.ToInt64(vrRow[3]) if err != nil { return VRSettings{}, fmt.Errorf("failed to parse max_replication_lag column: %v", err) } + startPos, err := mysql.DecodePosition(vrRow[0].ToString()) + if err != nil { + return VRSettings{}, fmt.Errorf("failed to parse pos column: %v", err) + } + stopPos, err := mysql.DecodePosition(vrRow[1].ToString()) + if err != nil { + return VRSettings{}, fmt.Errorf("failed to parse stop_pos column: %v", err) + } return VRSettings{ - StartPos: qr.Rows[0][0].ToString(), - StopPos: qr.Rows[0][1].ToString(), + StartPos: startPos, + StopPos: stopPos, MaxTPS: maxTPS, MaxReplicationLag: maxReplicationLag, - State: qr.Rows[0][4].ToString(), + State: vrRow[4].ToString(), }, nil } @@ -585,12 +586,12 @@ func CreateVReplicationState(workflow string, source *binlogdatapb.BinlogSource, func GenerateUpdatePos(uid uint32, pos mysql.Position, timeUpdated int64, txTimestamp int64) string { if txTimestamp != 0 { return fmt.Sprintf( - "update _vt.vreplication set pos=%v, time_updated=%v, transaction_timestamp=%v where id=%v", + "update _vt.vreplication set pos=%v, time_updated=%v, transaction_timestamp=%v, message='' where id=%v", encodeString(mysql.EncodePosition(pos)), timeUpdated, txTimestamp, uid) } return fmt.Sprintf( - "update _vt.vreplication set pos=%v, time_updated=%v where id=%v", + "update _vt.vreplication set pos=%v, time_updated=%v, message='' where id=%v", encodeString(mysql.EncodePosition(pos)), timeUpdated, uid) } diff --git a/go/vt/binlog/binlogplayer/binlog_player_test.go b/go/vt/binlog/binlogplayer/binlog_player_test.go index 99d57f9127d..dedcfcc0858 100644 --- a/go/vt/binlog/binlogplayer/binlog_player_test.go +++ b/go/vt/binlog/binlogplayer/binlog_player_test.go @@ -357,7 +357,7 @@ func TestCreateVReplicationTables(t *testing.T) { func TestUpdateVReplicationPos(t *testing.T) { gtid := mysql.MustParseGTID("MariaDB", "0-1-8283") want := "update _vt.vreplication " + - "set pos='MariaDB/0-1-8283', time_updated=88822 " + + "set pos='MariaDB/0-1-8283', time_updated=88822, message='' " + "where id=78522" got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 0) @@ -369,7 +369,7 @@ func TestUpdateVReplicationPos(t *testing.T) { func TestUpdateVReplicationTimestamp(t *testing.T) { gtid := mysql.MustParseGTID("MariaDB", "0-2-582") want := "update _vt.vreplication " + - "set pos='MariaDB/0-2-582', time_updated=88822, transaction_timestamp=481828 " + + "set pos='MariaDB/0-2-582', time_updated=88822, transaction_timestamp=481828, message='' " + "where id=78522" got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 481828) diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index 87f39855ed3..5d880bbc098 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -48,7 +48,7 @@ func (x OnDDLAction) String() string { return proto.EnumName(OnDDLAction_name, int32(x)) } func (OnDDLAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{0} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{0} } // VEventType enumerates the event types. @@ -113,7 +113,7 @@ func (x VEventType) String() string { return proto.EnumName(VEventType_name, int32(x)) } func (VEventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{1} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{1} } type BinlogTransaction_Statement_Category int32 @@ -161,7 +161,7 @@ func (x BinlogTransaction_Statement_Category) String() string { return proto.EnumName(BinlogTransaction_Statement_Category_name, int32(x)) } func (BinlogTransaction_Statement_Category) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{1, 0, 0} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{1, 0, 0} } // Charset is the per-statement charset info from a QUERY_EVENT binlog entry. @@ -181,7 +181,7 @@ func (m *Charset) Reset() { *m = Charset{} } func (m *Charset) String() string { return proto.CompactTextString(m) } func (*Charset) ProtoMessage() {} func (*Charset) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{0} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{0} } func (m *Charset) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Charset.Unmarshal(m, b) @@ -238,7 +238,7 @@ func (m *BinlogTransaction) Reset() { *m = BinlogTransaction{} } func (m *BinlogTransaction) String() string { return proto.CompactTextString(m) } func (*BinlogTransaction) ProtoMessage() {} func (*BinlogTransaction) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{1} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{1} } func (m *BinlogTransaction) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogTransaction.Unmarshal(m, b) @@ -288,7 +288,7 @@ func (m *BinlogTransaction_Statement) Reset() { *m = BinlogTransaction_S func (m *BinlogTransaction_Statement) String() string { return proto.CompactTextString(m) } func (*BinlogTransaction_Statement) ProtoMessage() {} func (*BinlogTransaction_Statement) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{1, 0} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{1, 0} } func (m *BinlogTransaction_Statement) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogTransaction_Statement.Unmarshal(m, b) @@ -346,7 +346,7 @@ func (m *StreamKeyRangeRequest) Reset() { *m = StreamKeyRangeRequest{} } func (m *StreamKeyRangeRequest) String() string { return proto.CompactTextString(m) } func (*StreamKeyRangeRequest) ProtoMessage() {} func (*StreamKeyRangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{2} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{2} } func (m *StreamKeyRangeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamKeyRangeRequest.Unmarshal(m, b) @@ -399,7 +399,7 @@ func (m *StreamKeyRangeResponse) Reset() { *m = StreamKeyRangeResponse{} func (m *StreamKeyRangeResponse) String() string { return proto.CompactTextString(m) } func (*StreamKeyRangeResponse) ProtoMessage() {} func (*StreamKeyRangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{3} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{3} } func (m *StreamKeyRangeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamKeyRangeResponse.Unmarshal(m, b) @@ -443,7 +443,7 @@ func (m *StreamTablesRequest) Reset() { *m = StreamTablesRequest{} } func (m *StreamTablesRequest) String() string { return proto.CompactTextString(m) } func (*StreamTablesRequest) ProtoMessage() {} func (*StreamTablesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{4} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{4} } func (m *StreamTablesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTablesRequest.Unmarshal(m, b) @@ -496,7 +496,7 @@ func (m *StreamTablesResponse) Reset() { *m = StreamTablesResponse{} } func (m *StreamTablesResponse) String() string { return proto.CompactTextString(m) } func (*StreamTablesResponse) ProtoMessage() {} func (*StreamTablesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{5} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{5} } func (m *StreamTablesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTablesResponse.Unmarshal(m, b) @@ -541,7 +541,7 @@ func (m *Rule) Reset() { *m = Rule{} } func (m *Rule) String() string { return proto.CompactTextString(m) } func (*Rule) ProtoMessage() {} func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{6} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{6} } func (m *Rule) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Rule.Unmarshal(m, b) @@ -588,7 +588,7 @@ func (m *Filter) Reset() { *m = Filter{} } func (m *Filter) String() string { return proto.CompactTextString(m) } func (*Filter) ProtoMessage() {} func (*Filter) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{7} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{7} } func (m *Filter) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Filter.Unmarshal(m, b) @@ -643,7 +643,7 @@ func (m *BinlogSource) Reset() { *m = BinlogSource{} } func (m *BinlogSource) String() string { return proto.CompactTextString(m) } func (*BinlogSource) ProtoMessage() {} func (*BinlogSource) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{8} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{8} } func (m *BinlogSource) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BinlogSource.Unmarshal(m, b) @@ -725,7 +725,7 @@ func (m *RowChange) Reset() { *m = RowChange{} } func (m *RowChange) String() string { return proto.CompactTextString(m) } func (*RowChange) ProtoMessage() {} func (*RowChange) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{9} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{9} } func (m *RowChange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RowChange.Unmarshal(m, b) @@ -772,7 +772,7 @@ func (m *RowEvent) Reset() { *m = RowEvent{} } func (m *RowEvent) String() string { return proto.CompactTextString(m) } func (*RowEvent) ProtoMessage() {} func (*RowEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{10} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{10} } func (m *RowEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RowEvent.Unmarshal(m, b) @@ -818,7 +818,7 @@ func (m *FieldEvent) Reset() { *m = FieldEvent{} } func (m *FieldEvent) String() string { return proto.CompactTextString(m) } func (*FieldEvent) ProtoMessage() {} func (*FieldEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{11} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{11} } func (m *FieldEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FieldEvent.Unmarshal(m, b) @@ -871,7 +871,7 @@ func (m *VEvent) Reset() { *m = VEvent{} } func (m *VEvent) String() string { return proto.CompactTextString(m) } func (*VEvent) ProtoMessage() {} func (*VEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{12} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{12} } func (m *VEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VEvent.Unmarshal(m, b) @@ -956,7 +956,7 @@ func (m *VStreamRequest) Reset() { *m = VStreamRequest{} } func (m *VStreamRequest) String() string { return proto.CompactTextString(m) } func (*VStreamRequest) ProtoMessage() {} func (*VStreamRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{13} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{13} } func (m *VStreamRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamRequest.Unmarshal(m, b) @@ -1023,7 +1023,7 @@ func (m *VStreamResponse) Reset() { *m = VStreamResponse{} } func (m *VStreamResponse) String() string { return proto.CompactTextString(m) } func (*VStreamResponse) ProtoMessage() {} func (*VStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_binlogdata_60517ed2deb82a7b, []int{14} + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{14} } func (m *VStreamResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VStreamResponse.Unmarshal(m, b) @@ -1050,6 +1050,148 @@ func (m *VStreamResponse) GetEvents() []*VEvent { return nil } +// VStreamRowsRequest is the payload for VStreamRows +type VStreamRowsRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *query.VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *query.Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + Query string `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"` + Lastpk *query.QueryResult `protobuf:"bytes,5,opt,name=lastpk,proto3" json:"lastpk,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VStreamRowsRequest) Reset() { *m = VStreamRowsRequest{} } +func (m *VStreamRowsRequest) String() string { return proto.CompactTextString(m) } +func (*VStreamRowsRequest) ProtoMessage() {} +func (*VStreamRowsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{15} +} +func (m *VStreamRowsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VStreamRowsRequest.Unmarshal(m, b) +} +func (m *VStreamRowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VStreamRowsRequest.Marshal(b, m, deterministic) +} +func (dst *VStreamRowsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VStreamRowsRequest.Merge(dst, src) +} +func (m *VStreamRowsRequest) XXX_Size() int { + return xxx_messageInfo_VStreamRowsRequest.Size(m) +} +func (m *VStreamRowsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VStreamRowsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VStreamRowsRequest proto.InternalMessageInfo + +func (m *VStreamRowsRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *VStreamRowsRequest) GetImmediateCallerId() *query.VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *VStreamRowsRequest) GetTarget() *query.Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *VStreamRowsRequest) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +func (m *VStreamRowsRequest) GetLastpk() *query.QueryResult { + if m != nil { + return m.Lastpk + } + return nil +} + +// VStreamRowsResponse is the response from VStreamRows +type VStreamRowsResponse struct { + Fields []*query.Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"` + Pkfields []*query.Field `protobuf:"bytes,2,rep,name=pkfields,proto3" json:"pkfields,omitempty"` + Gtid string `protobuf:"bytes,3,opt,name=gtid,proto3" json:"gtid,omitempty"` + Rows []*query.Row `protobuf:"bytes,4,rep,name=rows,proto3" json:"rows,omitempty"` + Lastpk *query.Row `protobuf:"bytes,5,opt,name=lastpk,proto3" json:"lastpk,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VStreamRowsResponse) Reset() { *m = VStreamRowsResponse{} } +func (m *VStreamRowsResponse) String() string { return proto.CompactTextString(m) } +func (*VStreamRowsResponse) ProtoMessage() {} +func (*VStreamRowsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_binlogdata_4e5c97d173ca1608, []int{16} +} +func (m *VStreamRowsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VStreamRowsResponse.Unmarshal(m, b) +} +func (m *VStreamRowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VStreamRowsResponse.Marshal(b, m, deterministic) +} +func (dst *VStreamRowsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VStreamRowsResponse.Merge(dst, src) +} +func (m *VStreamRowsResponse) XXX_Size() int { + return xxx_messageInfo_VStreamRowsResponse.Size(m) +} +func (m *VStreamRowsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VStreamRowsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VStreamRowsResponse proto.InternalMessageInfo + +func (m *VStreamRowsResponse) GetFields() []*query.Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *VStreamRowsResponse) GetPkfields() []*query.Field { + if m != nil { + return m.Pkfields + } + return nil +} + +func (m *VStreamRowsResponse) GetGtid() string { + if m != nil { + return m.Gtid + } + return "" +} + +func (m *VStreamRowsResponse) GetRows() []*query.Row { + if m != nil { + return m.Rows + } + return nil +} + +func (m *VStreamRowsResponse) GetLastpk() *query.Row { + if m != nil { + return m.Lastpk + } + return nil +} + func init() { proto.RegisterType((*Charset)(nil), "binlogdata.Charset") proto.RegisterType((*BinlogTransaction)(nil), "binlogdata.BinlogTransaction") @@ -1067,89 +1209,98 @@ func init() { proto.RegisterType((*VEvent)(nil), "binlogdata.VEvent") proto.RegisterType((*VStreamRequest)(nil), "binlogdata.VStreamRequest") proto.RegisterType((*VStreamResponse)(nil), "binlogdata.VStreamResponse") + proto.RegisterType((*VStreamRowsRequest)(nil), "binlogdata.VStreamRowsRequest") + proto.RegisterType((*VStreamRowsResponse)(nil), "binlogdata.VStreamRowsResponse") proto.RegisterEnum("binlogdata.OnDDLAction", OnDDLAction_name, OnDDLAction_value) proto.RegisterEnum("binlogdata.VEventType", VEventType_name, VEventType_value) proto.RegisterEnum("binlogdata.BinlogTransaction_Statement_Category", BinlogTransaction_Statement_Category_name, BinlogTransaction_Statement_Category_value) } -func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_60517ed2deb82a7b) } - -var fileDescriptor_binlogdata_60517ed2deb82a7b = []byte{ - // 1215 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x5d, 0x6e, 0xdb, 0x46, - 0x10, 0x8e, 0x44, 0x8a, 0x12, 0x87, 0x8e, 0x4d, 0xaf, 0x7f, 0x2a, 0x18, 0x0d, 0xe0, 0x12, 0x6d, - 0xe3, 0x1a, 0xa8, 0x9c, 0xaa, 0x7f, 0x4f, 0x6d, 0x21, 0x89, 0x8c, 0xa3, 0x84, 0x96, 0x9c, 0x35, - 0x93, 0x14, 0x79, 0x21, 0x68, 0x72, 0x65, 0x13, 0xa6, 0x48, 0x85, 0x5c, 0xdb, 0xd5, 0x09, 0x7a, - 0x80, 0xbe, 0xf6, 0x02, 0x3d, 0x42, 0x2f, 0xd0, 0x9b, 0xf4, 0x1e, 0xc5, 0xfe, 0x90, 0x92, 0x1c, - 0xa0, 0x71, 0x1f, 0xfa, 0x36, 0xff, 0x3b, 0xf3, 0xcd, 0x70, 0x86, 0x60, 0x9e, 0xc7, 0x69, 0x92, - 0x5d, 0x44, 0x01, 0x0d, 0x3a, 0xb3, 0x3c, 0xa3, 0x19, 0x82, 0x85, 0x64, 0xcf, 0xb8, 0xa1, 0xf9, - 0x2c, 0x14, 0x8a, 0x3d, 0xe3, 0xdd, 0x35, 0xc9, 0xe7, 0x92, 0x59, 0xa7, 0xd9, 0x2c, 0x5b, 0x78, - 0x59, 0x27, 0xd0, 0x1c, 0x5c, 0x06, 0x79, 0x41, 0x28, 0xda, 0x05, 0x2d, 0x4c, 0x62, 0x92, 0xd2, - 0x76, 0x6d, 0xbf, 0x76, 0xd0, 0xc0, 0x92, 0x43, 0x08, 0xd4, 0x30, 0x4b, 0xd3, 0x76, 0x9d, 0x4b, - 0x39, 0xcd, 0x6c, 0x0b, 0x92, 0xdf, 0x90, 0xbc, 0xad, 0x08, 0x5b, 0xc1, 0x59, 0x7f, 0x2b, 0xb0, - 0xd9, 0xe7, 0x79, 0x78, 0x79, 0x90, 0x16, 0x41, 0x48, 0xe3, 0x2c, 0x45, 0xc7, 0x00, 0x05, 0x0d, - 0x28, 0x99, 0x92, 0x94, 0x16, 0xed, 0xda, 0xbe, 0x72, 0x60, 0x74, 0x1f, 0x77, 0x96, 0x2a, 0x78, - 0xcf, 0xa5, 0x73, 0x56, 0xda, 0xe3, 0x25, 0x57, 0xd4, 0x05, 0x83, 0xdc, 0x90, 0x94, 0xfa, 0x34, - 0xbb, 0x22, 0x69, 0x5b, 0xdd, 0xaf, 0x1d, 0x18, 0xdd, 0xcd, 0x8e, 0x28, 0xd0, 0x61, 0x1a, 0x8f, - 0x29, 0x30, 0x90, 0x8a, 0xde, 0xfb, 0xab, 0x0e, 0x7a, 0x15, 0x0d, 0xb9, 0xd0, 0x0a, 0x03, 0x4a, - 0x2e, 0xb2, 0x7c, 0xce, 0xcb, 0x5c, 0xef, 0x3e, 0xb9, 0x67, 0x22, 0x9d, 0x81, 0xf4, 0xc3, 0x55, - 0x04, 0xf4, 0x25, 0x34, 0x43, 0x81, 0x1e, 0x47, 0xc7, 0xe8, 0x6e, 0x2d, 0x07, 0x93, 0xc0, 0xe2, - 0xd2, 0x06, 0x99, 0xa0, 0x14, 0xef, 0x12, 0x0e, 0xd9, 0x1a, 0x66, 0xa4, 0xf5, 0x47, 0x0d, 0x5a, - 0x65, 0x5c, 0xb4, 0x05, 0x1b, 0x7d, 0xd7, 0x7f, 0x35, 0xc2, 0xce, 0x60, 0x7c, 0x3c, 0x1a, 0xbe, - 0x75, 0x6c, 0xf3, 0x01, 0x5a, 0x83, 0x56, 0xdf, 0xf5, 0xfb, 0xce, 0xf1, 0x70, 0x64, 0xd6, 0xd0, - 0x43, 0xd0, 0xfb, 0xae, 0x3f, 0x18, 0x9f, 0x9c, 0x0c, 0x3d, 0xb3, 0x8e, 0x36, 0xc0, 0xe8, 0xbb, - 0x3e, 0x1e, 0xbb, 0x6e, 0xbf, 0x37, 0x78, 0x61, 0x2a, 0x68, 0x07, 0x36, 0xfb, 0xae, 0x6f, 0x9f, - 0xb8, 0xbe, 0xed, 0x9c, 0x62, 0x67, 0xd0, 0xf3, 0x1c, 0xdb, 0x54, 0x11, 0x80, 0xc6, 0xc4, 0xb6, - 0x6b, 0x36, 0x24, 0x7d, 0xe6, 0x78, 0xa6, 0x26, 0xc3, 0x0d, 0x47, 0x67, 0x0e, 0xf6, 0xcc, 0xa6, - 0x64, 0x5f, 0x9d, 0xda, 0x3d, 0xcf, 0x31, 0x5b, 0x92, 0xb5, 0x1d, 0xd7, 0xf1, 0x1c, 0x53, 0x7f, - 0xae, 0xb6, 0xea, 0xa6, 0xf2, 0x5c, 0x6d, 0x29, 0xa6, 0x6a, 0xfd, 0x56, 0x83, 0x9d, 0x33, 0x9a, - 0x93, 0x60, 0xfa, 0x82, 0xcc, 0x71, 0x90, 0x5e, 0x10, 0x4c, 0xde, 0x5d, 0x93, 0x82, 0xa2, 0x3d, - 0x68, 0xcd, 0xb2, 0x22, 0x66, 0xd8, 0x71, 0x80, 0x75, 0x5c, 0xf1, 0xe8, 0x08, 0xf4, 0x2b, 0x32, - 0xf7, 0x73, 0x66, 0x2f, 0x01, 0x43, 0x9d, 0x6a, 0x20, 0xab, 0x48, 0xad, 0x2b, 0x49, 0x2d, 0xe3, - 0xab, 0x7c, 0x18, 0x5f, 0x6b, 0x02, 0xbb, 0x77, 0x93, 0x2a, 0x66, 0x59, 0x5a, 0x10, 0xe4, 0x02, - 0x12, 0x8e, 0x3e, 0x5d, 0xf4, 0x96, 0xe7, 0x67, 0x74, 0x1f, 0xfd, 0xeb, 0x00, 0xe0, 0xcd, 0xf3, - 0xbb, 0x22, 0xeb, 0x17, 0xd8, 0x12, 0xef, 0x78, 0xc1, 0x79, 0x42, 0x8a, 0xfb, 0x94, 0xbe, 0x0b, - 0x1a, 0xe5, 0xc6, 0xed, 0xfa, 0xbe, 0x72, 0xa0, 0x63, 0xc9, 0xfd, 0xd7, 0x0a, 0x23, 0xd8, 0x5e, - 0x7d, 0xf9, 0x7f, 0xa9, 0xef, 0x1b, 0x50, 0xf1, 0x75, 0x42, 0xd0, 0x36, 0x34, 0xa6, 0x01, 0x0d, - 0x2f, 0x65, 0x35, 0x82, 0x61, 0xa5, 0x4c, 0xe2, 0x84, 0x92, 0x9c, 0xb7, 0x50, 0xc7, 0x92, 0xb3, - 0x9e, 0x80, 0xf6, 0x94, 0x53, 0xe8, 0x73, 0x68, 0xe4, 0xd7, 0xac, 0x56, 0xf1, 0xa9, 0x9b, 0xcb, - 0x09, 0xb0, 0xc0, 0x58, 0xa8, 0xad, 0xdf, 0xeb, 0xb0, 0x26, 0x12, 0x3a, 0xcb, 0xae, 0xf3, 0x90, - 0x30, 0x04, 0xaf, 0xc8, 0xbc, 0x98, 0x05, 0x21, 0x29, 0x11, 0x2c, 0x79, 0x96, 0x4c, 0x71, 0x19, - 0xe4, 0x91, 0x7c, 0x55, 0x30, 0xe8, 0x5b, 0x30, 0x38, 0x92, 0xd4, 0xa7, 0xf3, 0x19, 0xe1, 0x18, - 0xae, 0x77, 0xb7, 0x17, 0x43, 0xc5, 0x71, 0xa2, 0xde, 0x7c, 0x46, 0x30, 0xd0, 0x8a, 0x5e, 0x9d, - 0x44, 0xf5, 0x1e, 0x93, 0xb8, 0xe8, 0x5f, 0x63, 0xa5, 0x7f, 0x87, 0x15, 0x18, 0x9a, 0x8c, 0xb2, - 0x54, 0xab, 0x80, 0xa3, 0x04, 0x08, 0x75, 0x40, 0xcb, 0x52, 0x3f, 0x8a, 0x92, 0x76, 0x93, 0xa7, - 0xf9, 0xd1, 0xb2, 0xed, 0x38, 0xb5, 0x6d, 0xb7, 0x27, 0x5a, 0xd2, 0xc8, 0x52, 0x3b, 0x4a, 0xac, - 0x97, 0xa0, 0xe3, 0xec, 0x76, 0x70, 0xc9, 0x13, 0xb0, 0x40, 0x3b, 0x27, 0x93, 0x2c, 0x27, 0xb2, - 0xab, 0x20, 0xb7, 0x1e, 0xce, 0x6e, 0xb1, 0xd4, 0xa0, 0x7d, 0x68, 0x04, 0x93, 0xb2, 0x31, 0xab, - 0x26, 0x42, 0x61, 0x05, 0xd0, 0xc2, 0xd9, 0x2d, 0xdf, 0x94, 0xe8, 0x11, 0x08, 0x44, 0xfc, 0x34, - 0x98, 0x96, 0x70, 0xeb, 0x5c, 0x32, 0x0a, 0xa6, 0x04, 0x7d, 0x07, 0x46, 0x9e, 0xdd, 0xfa, 0x21, - 0x7f, 0x5e, 0x8c, 0xad, 0xd1, 0xdd, 0x59, 0x69, 0x65, 0x99, 0x1c, 0x86, 0xbc, 0x24, 0x0b, 0xeb, - 0x25, 0xc0, 0xd3, 0x98, 0x24, 0xd1, 0xbd, 0x1e, 0xf9, 0x94, 0xc1, 0x47, 0x92, 0xa8, 0x8c, 0xbf, - 0x26, 0x53, 0xe6, 0x11, 0xb0, 0xd4, 0x59, 0xbf, 0xd6, 0x41, 0x7b, 0x2d, 0xe2, 0x1d, 0x82, 0xca, - 0x1b, 0x2d, 0x76, 0xf7, 0xee, 0x72, 0x3a, 0xc2, 0x82, 0xb7, 0x9a, 0xdb, 0xa0, 0x8f, 0x41, 0xa7, - 0xf1, 0x94, 0x14, 0x34, 0x98, 0xce, 0x38, 0x24, 0x0a, 0x5e, 0x08, 0xd8, 0x59, 0xbb, 0xa0, 0x71, - 0xc4, 0x47, 0x46, 0xc7, 0x9c, 0x66, 0x0b, 0x9a, 0xb5, 0x47, 0xe5, 0x22, 0x46, 0xa2, 0xaf, 0x40, - 0x67, 0x28, 0xf0, 0x7b, 0xd2, 0x6e, 0x70, 0x58, 0xb7, 0xef, 0x60, 0xc0, 0x9f, 0xc5, 0xad, 0xbc, - 0xc4, 0xf5, 0x7b, 0x30, 0x78, 0xde, 0xd2, 0x49, 0xcc, 0xc5, 0xee, 0xea, 0x5c, 0x94, 0xf8, 0x60, - 0x98, 0x2c, 0xb0, 0xfa, 0x04, 0xd6, 0xc2, 0xeb, 0x3c, 0xe7, 0xf7, 0x2d, 0x9e, 0x92, 0xf6, 0x36, - 0x4f, 0xd9, 0x90, 0x32, 0x2f, 0x9e, 0x12, 0x86, 0xc4, 0xfa, 0x6b, 0xb1, 0x01, 0xca, 0xad, 0xf3, - 0x13, 0x6c, 0x91, 0xc9, 0x84, 0x84, 0x34, 0xbe, 0x21, 0x7e, 0x18, 0x24, 0x09, 0xc9, 0xfd, 0x38, - 0x92, 0x53, 0xb2, 0xd1, 0x11, 0x7f, 0x02, 0x03, 0x2e, 0x1f, 0xda, 0x78, 0xb3, 0xb2, 0x95, 0xa2, - 0x08, 0x39, 0xb0, 0x15, 0x4f, 0xa7, 0x24, 0x8a, 0x03, 0xba, 0x1c, 0x40, 0xcc, 0xd0, 0x8e, 0x6c, - 0xc8, 0x6b, 0xef, 0x38, 0xa0, 0x64, 0x11, 0xa6, 0xf2, 0xa8, 0xc2, 0x7c, 0xc6, 0xbe, 0x90, 0xfc, - 0xa2, 0x5a, 0x64, 0x0f, 0xa5, 0xa7, 0xc7, 0x85, 0x58, 0x2a, 0x57, 0x96, 0xa4, 0x7a, 0x67, 0x49, - 0x2e, 0x3e, 0xa6, 0xc6, 0x87, 0x3e, 0x26, 0xeb, 0x07, 0xd8, 0xa8, 0x80, 0x90, 0x4b, 0xf0, 0x10, - 0x34, 0x0e, 0x79, 0xb9, 0x77, 0xd0, 0xfb, 0xd3, 0x81, 0xa5, 0xc5, 0xe1, 0x8f, 0x60, 0x2c, 0x7d, - 0x71, 0xec, 0x28, 0x0e, 0x8f, 0x47, 0x63, 0xec, 0x98, 0x0f, 0x50, 0x0b, 0xd4, 0x33, 0x6f, 0x7c, - 0x6a, 0xd6, 0x18, 0xe5, 0xfc, 0xec, 0x0c, 0xc4, 0xa1, 0x65, 0x94, 0x2f, 0x8d, 0x94, 0xc3, 0x3f, - 0x6b, 0x00, 0x8b, 0x81, 0x43, 0x06, 0x34, 0x5f, 0x8d, 0x5e, 0x8c, 0xc6, 0x6f, 0x46, 0x22, 0xc0, - 0xb1, 0x37, 0xb4, 0xcd, 0x1a, 0xd2, 0xa1, 0x21, 0x2e, 0x77, 0x9d, 0xbd, 0x20, 0xcf, 0xb6, 0xc2, - 0x6e, 0x7a, 0x75, 0xb3, 0x55, 0xd4, 0x04, 0xa5, 0xba, 0xcc, 0xf2, 0x14, 0x6b, 0x2c, 0x20, 0x76, - 0x4e, 0xdd, 0xde, 0xc0, 0x31, 0x9b, 0x4c, 0x51, 0x1d, 0x65, 0x00, 0xad, 0xbc, 0xc8, 0xcc, 0x93, - 0xdd, 0x71, 0x60, 0xef, 0x8c, 0xbd, 0x67, 0x0e, 0x36, 0x0d, 0x26, 0xc3, 0xe3, 0x37, 0xe6, 0x1a, - 0x93, 0x3d, 0x1d, 0x3a, 0xae, 0x6d, 0x3e, 0x64, 0x87, 0xfc, 0x99, 0xd3, 0xc3, 0x5e, 0xdf, 0xe9, - 0x79, 0xe6, 0x7a, 0xff, 0x8b, 0xb7, 0x8f, 0x6f, 0x62, 0x4a, 0x8a, 0xa2, 0x13, 0x67, 0x47, 0x82, - 0x3a, 0xba, 0xc8, 0x8e, 0x6e, 0xe8, 0x11, 0xff, 0x27, 0x3c, 0x5a, 0xa0, 0x76, 0xae, 0x71, 0xc9, - 0xd7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x0e, 0xe3, 0x8a, 0xa5, 0x6f, 0x0a, 0x00, 0x00, +func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_4e5c97d173ca1608) } + +var fileDescriptor_binlogdata_4e5c97d173ca1608 = []byte{ + // 1315 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0x4d, 0x72, 0xdb, 0xc6, + 0x12, 0x36, 0x09, 0x10, 0x24, 0x1a, 0xb2, 0x04, 0x8d, 0x7e, 0x1e, 0x4b, 0xf5, 0xfc, 0x4a, 0x0f, + 0xf5, 0x5e, 0xac, 0xa8, 0x2a, 0x94, 0xc3, 0xfc, 0xad, 0x92, 0x14, 0x49, 0xc0, 0x32, 0x6d, 0x88, + 0x94, 0x47, 0xb0, 0x9d, 0xf2, 0x06, 0x05, 0x81, 0x43, 0x09, 0x25, 0x10, 0xa0, 0x81, 0xa1, 0x14, + 0x1e, 0x20, 0x95, 0x03, 0x64, 0x9b, 0x0b, 0xe4, 0x08, 0x59, 0x64, 0x9b, 0x9b, 0xe4, 0x1e, 0xa9, + 0xf9, 0x01, 0x48, 0xca, 0x89, 0xad, 0x2c, 0xb2, 0xc8, 0x06, 0xd5, 0xdd, 0xd3, 0xdd, 0xd3, 0xfd, + 0x75, 0x4f, 0xa3, 0xc1, 0x3c, 0x8f, 0x92, 0x38, 0xbd, 0x18, 0x05, 0x34, 0x68, 0x4d, 0xb3, 0x94, + 0xa6, 0x08, 0x16, 0x92, 0x3d, 0xe3, 0x9a, 0x66, 0xd3, 0x50, 0x1c, 0xec, 0x19, 0x6f, 0x66, 0x24, + 0x9b, 0x4b, 0x66, 0x9d, 0xa6, 0xd3, 0x74, 0x61, 0x65, 0x9d, 0x40, 0xbd, 0x77, 0x19, 0x64, 0x39, + 0xa1, 0x68, 0x17, 0xb4, 0x30, 0x8e, 0x48, 0x42, 0x9b, 0x95, 0xfd, 0xca, 0x41, 0x0d, 0x4b, 0x0e, + 0x21, 0x50, 0xc3, 0x34, 0x49, 0x9a, 0x55, 0x2e, 0xe5, 0x34, 0xd3, 0xcd, 0x49, 0x76, 0x4d, 0xb2, + 0xa6, 0x22, 0x74, 0x05, 0x67, 0xfd, 0xa6, 0xc0, 0x66, 0x97, 0xc7, 0xe1, 0x65, 0x41, 0x92, 0x07, + 0x21, 0x8d, 0xd2, 0x04, 0x1d, 0x03, 0xe4, 0x34, 0xa0, 0x64, 0x42, 0x12, 0x9a, 0x37, 0x2b, 0xfb, + 0xca, 0x81, 0xd1, 0x7e, 0xd8, 0x5a, 0xca, 0xe0, 0x2d, 0x93, 0xd6, 0x59, 0xa1, 0x8f, 0x97, 0x4c, + 0x51, 0x1b, 0x0c, 0x72, 0x4d, 0x12, 0xea, 0xd3, 0xf4, 0x8a, 0x24, 0x4d, 0x75, 0xbf, 0x72, 0x60, + 0xb4, 0x37, 0x5b, 0x22, 0x41, 0x87, 0x9d, 0x78, 0xec, 0x00, 0x03, 0x29, 0xe9, 0xbd, 0x5f, 0xab, + 0xa0, 0x97, 0xde, 0x90, 0x0b, 0x8d, 0x30, 0xa0, 0xe4, 0x22, 0xcd, 0xe6, 0x3c, 0xcd, 0xf5, 0xf6, + 0xa3, 0x3b, 0x06, 0xd2, 0xea, 0x49, 0x3b, 0x5c, 0x7a, 0x40, 0x1f, 0x41, 0x3d, 0x14, 0xe8, 0x71, + 0x74, 0x8c, 0xf6, 0xd6, 0xb2, 0x33, 0x09, 0x2c, 0x2e, 0x74, 0x90, 0x09, 0x4a, 0xfe, 0x26, 0xe6, + 0x90, 0xad, 0x61, 0x46, 0x5a, 0x3f, 0x55, 0xa0, 0x51, 0xf8, 0x45, 0x5b, 0xb0, 0xd1, 0x75, 0xfd, + 0x17, 0x03, 0xec, 0xf4, 0x86, 0xc7, 0x83, 0xfe, 0x6b, 0xc7, 0x36, 0xef, 0xa1, 0x35, 0x68, 0x74, + 0x5d, 0xbf, 0xeb, 0x1c, 0xf7, 0x07, 0x66, 0x05, 0xdd, 0x07, 0xbd, 0xeb, 0xfa, 0xbd, 0xe1, 0xc9, + 0x49, 0xdf, 0x33, 0xab, 0x68, 0x03, 0x8c, 0xae, 0xeb, 0xe3, 0xa1, 0xeb, 0x76, 0x3b, 0xbd, 0x67, + 0xa6, 0x82, 0x76, 0x60, 0xb3, 0xeb, 0xfa, 0xf6, 0x89, 0xeb, 0xdb, 0xce, 0x29, 0x76, 0x7a, 0x1d, + 0xcf, 0xb1, 0x4d, 0x15, 0x01, 0x68, 0x4c, 0x6c, 0xbb, 0x66, 0x4d, 0xd2, 0x67, 0x8e, 0x67, 0x6a, + 0xd2, 0x5d, 0x7f, 0x70, 0xe6, 0x60, 0xcf, 0xac, 0x4b, 0xf6, 0xc5, 0xa9, 0xdd, 0xf1, 0x1c, 0xb3, + 0x21, 0x59, 0xdb, 0x71, 0x1d, 0xcf, 0x31, 0xf5, 0xa7, 0x6a, 0xa3, 0x6a, 0x2a, 0x4f, 0xd5, 0x86, + 0x62, 0xaa, 0xd6, 0x0f, 0x15, 0xd8, 0x39, 0xa3, 0x19, 0x09, 0x26, 0xcf, 0xc8, 0x1c, 0x07, 0xc9, + 0x05, 0xc1, 0xe4, 0xcd, 0x8c, 0xe4, 0x14, 0xed, 0x41, 0x63, 0x9a, 0xe6, 0x11, 0xc3, 0x8e, 0x03, + 0xac, 0xe3, 0x92, 0x47, 0x47, 0xa0, 0x5f, 0x91, 0xb9, 0x9f, 0x31, 0x7d, 0x09, 0x18, 0x6a, 0x95, + 0x0d, 0x59, 0x7a, 0x6a, 0x5c, 0x49, 0x6a, 0x19, 0x5f, 0xe5, 0xfd, 0xf8, 0x5a, 0x63, 0xd8, 0xbd, + 0x1d, 0x54, 0x3e, 0x4d, 0x93, 0x9c, 0x20, 0x17, 0x90, 0x30, 0xf4, 0xe9, 0xa2, 0xb6, 0x3c, 0x3e, + 0xa3, 0xfd, 0xe0, 0x9d, 0x0d, 0x80, 0x37, 0xcf, 0x6f, 0x8b, 0xac, 0x6f, 0x61, 0x4b, 0xdc, 0xe3, + 0x05, 0xe7, 0x31, 0xc9, 0xef, 0x92, 0xfa, 0x2e, 0x68, 0x94, 0x2b, 0x37, 0xab, 0xfb, 0xca, 0x81, + 0x8e, 0x25, 0xf7, 0x57, 0x33, 0x1c, 0xc1, 0xf6, 0xea, 0xcd, 0x7f, 0x4b, 0x7e, 0x9f, 0x82, 0x8a, + 0x67, 0x31, 0x41, 0xdb, 0x50, 0x9b, 0x04, 0x34, 0xbc, 0x94, 0xd9, 0x08, 0x86, 0xa5, 0x32, 0x8e, + 0x62, 0x4a, 0x32, 0x5e, 0x42, 0x1d, 0x4b, 0xce, 0x7a, 0x04, 0xda, 0x63, 0x4e, 0xa1, 0x0f, 0xa0, + 0x96, 0xcd, 0x58, 0xae, 0xe2, 0xa9, 0x9b, 0xcb, 0x01, 0x30, 0xc7, 0x58, 0x1c, 0x5b, 0x3f, 0x56, + 0x61, 0x4d, 0x04, 0x74, 0x96, 0xce, 0xb2, 0x90, 0x30, 0x04, 0xaf, 0xc8, 0x3c, 0x9f, 0x06, 0x21, + 0x29, 0x10, 0x2c, 0x78, 0x16, 0x4c, 0x7e, 0x19, 0x64, 0x23, 0x79, 0xab, 0x60, 0xd0, 0x67, 0x60, + 0x70, 0x24, 0xa9, 0x4f, 0xe7, 0x53, 0xc2, 0x31, 0x5c, 0x6f, 0x6f, 0x2f, 0x9a, 0x8a, 0xe3, 0x44, + 0xbd, 0xf9, 0x94, 0x60, 0xa0, 0x25, 0xbd, 0xda, 0x89, 0xea, 0x1d, 0x3a, 0x71, 0x51, 0xbf, 0xda, + 0x4a, 0xfd, 0x0e, 0x4b, 0x30, 0x34, 0xe9, 0x65, 0x29, 0x57, 0x01, 0x47, 0x01, 0x10, 0x6a, 0x81, + 0x96, 0x26, 0xfe, 0x68, 0x14, 0x37, 0xeb, 0x3c, 0xcc, 0x7f, 0x2d, 0xeb, 0x0e, 0x13, 0xdb, 0x76, + 0x3b, 0xa2, 0x24, 0xb5, 0x34, 0xb1, 0x47, 0xb1, 0xf5, 0x1c, 0x74, 0x9c, 0xde, 0xf4, 0x2e, 0x79, + 0x00, 0x16, 0x68, 0xe7, 0x64, 0x9c, 0x66, 0x44, 0x56, 0x15, 0xe4, 0xd4, 0xc3, 0xe9, 0x0d, 0x96, + 0x27, 0x68, 0x1f, 0x6a, 0xc1, 0xb8, 0x28, 0xcc, 0xaa, 0x8a, 0x38, 0xb0, 0x02, 0x68, 0xe0, 0xf4, + 0x86, 0x4f, 0x4a, 0xf4, 0x00, 0x04, 0x22, 0x7e, 0x12, 0x4c, 0x0a, 0xb8, 0x75, 0x2e, 0x19, 0x04, + 0x13, 0x82, 0x3e, 0x07, 0x23, 0x4b, 0x6f, 0xfc, 0x90, 0x5f, 0x2f, 0xda, 0xd6, 0x68, 0xef, 0xac, + 0x94, 0xb2, 0x08, 0x0e, 0x43, 0x56, 0x90, 0xb9, 0xf5, 0x1c, 0xe0, 0x71, 0x44, 0xe2, 0xd1, 0x9d, + 0x2e, 0xf9, 0x1f, 0x83, 0x8f, 0xc4, 0xa3, 0xc2, 0xff, 0x9a, 0x0c, 0x99, 0x7b, 0xc0, 0xf2, 0xcc, + 0xfa, 0xbe, 0x0a, 0xda, 0x4b, 0xe1, 0xef, 0x10, 0x54, 0x5e, 0x68, 0x31, 0xbb, 0x77, 0x97, 0xc3, + 0x11, 0x1a, 0xbc, 0xd4, 0x5c, 0x07, 0xfd, 0x1b, 0x74, 0x1a, 0x4d, 0x48, 0x4e, 0x83, 0xc9, 0x94, + 0x43, 0xa2, 0xe0, 0x85, 0x80, 0xfd, 0xd6, 0x2e, 0x68, 0x34, 0xe2, 0x2d, 0xa3, 0x63, 0x4e, 0xb3, + 0x01, 0xcd, 0xca, 0xa3, 0x72, 0x11, 0x23, 0xd1, 0xc7, 0xa0, 0x33, 0x14, 0xf8, 0xff, 0xa4, 0x59, + 0xe3, 0xb0, 0x6e, 0xdf, 0xc2, 0x80, 0x5f, 0x8b, 0x1b, 0x59, 0x81, 0xeb, 0x17, 0x60, 0xf0, 0xb8, + 0xa5, 0x91, 0xe8, 0x8b, 0xdd, 0xd5, 0xbe, 0x28, 0xf0, 0xc1, 0x30, 0x5e, 0x60, 0xf5, 0x5f, 0x58, + 0x0b, 0x67, 0x59, 0xc6, 0xff, 0x6f, 0xd1, 0x84, 0x34, 0xb7, 0x79, 0xc8, 0x86, 0x94, 0x79, 0xd1, + 0x84, 0x30, 0x24, 0xd6, 0x5f, 0x8a, 0x09, 0x50, 0x4c, 0x9d, 0xaf, 0x61, 0x8b, 0x8c, 0xc7, 0x24, + 0xa4, 0xd1, 0x35, 0xf1, 0xc3, 0x20, 0x8e, 0x49, 0xe6, 0x47, 0x23, 0xd9, 0x25, 0x1b, 0x2d, 0xb1, + 0x09, 0xf4, 0xb8, 0xbc, 0x6f, 0xe3, 0xcd, 0x52, 0x57, 0x8a, 0x46, 0xc8, 0x81, 0xad, 0x68, 0x32, + 0x21, 0xa3, 0x28, 0xa0, 0xcb, 0x0e, 0x44, 0x0f, 0xed, 0xc8, 0x82, 0xbc, 0xf4, 0x8e, 0x03, 0x4a, + 0x16, 0x6e, 0x4a, 0x8b, 0xd2, 0xcd, 0xff, 0xd9, 0x0b, 0xc9, 0x2e, 0xca, 0x41, 0x76, 0x5f, 0x5a, + 0x7a, 0x5c, 0x88, 0xe5, 0xe1, 0xca, 0x90, 0x54, 0x6f, 0x0d, 0xc9, 0xc5, 0x63, 0xaa, 0xbd, 0xef, + 0x31, 0x59, 0x5f, 0xc2, 0x46, 0x09, 0x84, 0x1c, 0x82, 0x87, 0xa0, 0x71, 0xc8, 0x8b, 0xb9, 0x83, + 0xde, 0xee, 0x0e, 0x2c, 0x35, 0xac, 0xef, 0xaa, 0x80, 0x0a, 0xfb, 0xf4, 0x26, 0xff, 0x87, 0x82, + 0xb9, 0x0d, 0x35, 0x2e, 0x97, 0x48, 0x0a, 0x86, 0xe1, 0x10, 0x07, 0x39, 0x9d, 0x5e, 0x95, 0x30, + 0x0a, 0xe3, 0xe7, 0xec, 0x8b, 0x49, 0x3e, 0x8b, 0x29, 0x96, 0x1a, 0xd6, 0x2f, 0x15, 0xd8, 0x5a, + 0xc1, 0x41, 0x62, 0xb9, 0x78, 0x98, 0x95, 0x3f, 0x7f, 0x98, 0xe8, 0x00, 0x1a, 0xd3, 0xab, 0x77, + 0x3c, 0xe0, 0xf2, 0xf4, 0x0f, 0x5f, 0xdb, 0x7f, 0x40, 0xcd, 0xd2, 0x9b, 0xbc, 0xa9, 0x72, 0xcb, + 0xe5, 0x69, 0xc5, 0xe5, 0x6c, 0xe4, 0xad, 0xe4, 0xb1, 0x32, 0xf2, 0xc4, 0xc9, 0xe1, 0x57, 0x60, + 0x2c, 0x4d, 0x4e, 0xb6, 0xdc, 0xf4, 0x8f, 0x07, 0x43, 0xec, 0x98, 0xf7, 0x50, 0x03, 0xd4, 0x33, + 0x6f, 0x78, 0x6a, 0x56, 0x18, 0xe5, 0x7c, 0xe3, 0xf4, 0xc4, 0xc2, 0xc4, 0x28, 0x5f, 0x2a, 0x29, + 0x87, 0x3f, 0x57, 0x00, 0x16, 0x83, 0x03, 0x19, 0x50, 0x7f, 0x31, 0x78, 0x36, 0x18, 0xbe, 0x1a, + 0x08, 0x07, 0xc7, 0x5e, 0xdf, 0x36, 0x2b, 0x48, 0x87, 0x9a, 0xd8, 0xc0, 0xaa, 0xec, 0x06, 0xb9, + 0x7e, 0x29, 0x6c, 0x37, 0x2b, 0x77, 0x2f, 0x15, 0xd5, 0x41, 0x29, 0x37, 0x2c, 0xb9, 0x52, 0x69, + 0xcc, 0x21, 0x76, 0x4e, 0xdd, 0x4e, 0xcf, 0x31, 0xeb, 0xec, 0xa0, 0x5c, 0xae, 0x00, 0xb4, 0x62, + 0xb3, 0x62, 0x96, 0x6c, 0x1f, 0x03, 0x76, 0xcf, 0xd0, 0x7b, 0xe2, 0x60, 0xd3, 0x60, 0x32, 0x3c, + 0x7c, 0x65, 0xae, 0x31, 0xd9, 0xe3, 0xbe, 0xe3, 0xda, 0xe6, 0x7d, 0xb6, 0x90, 0x3d, 0x71, 0x3a, + 0xd8, 0xeb, 0x3a, 0x1d, 0xcf, 0x5c, 0xef, 0x7e, 0xf8, 0xfa, 0xe1, 0x75, 0x44, 0x49, 0x9e, 0xb7, + 0xa2, 0xf4, 0x48, 0x50, 0x47, 0x17, 0xe9, 0xd1, 0x35, 0x3d, 0xe2, 0xbb, 0xfd, 0xd1, 0xa2, 0xfb, + 0xcf, 0x35, 0x2e, 0xf9, 0xe4, 0xf7, 0x00, 0x00, 0x00, 0xff, 0xff, 0x47, 0x47, 0xf1, 0x1d, 0x37, + 0x0c, 0x00, 0x00, } diff --git a/go/vt/proto/queryservice/queryservice.pb.go b/go/vt/proto/queryservice/queryservice.pb.go index 7a0bce4f401..ac8303d162a 100644 --- a/go/vt/proto/queryservice/queryservice.pb.go +++ b/go/vt/proto/queryservice/queryservice.pb.go @@ -88,6 +88,8 @@ type QueryClient interface { UpdateStream(ctx context.Context, in *query.UpdateStreamRequest, opts ...grpc.CallOption) (Query_UpdateStreamClient, error) // VStream streams vreplication events. VStream(ctx context.Context, in *binlogdata.VStreamRequest, opts ...grpc.CallOption) (Query_VStreamClient, error) + // VStreamRows streams rows from the specified starting point. + VStreamRows(ctx context.Context, in *binlogdata.VStreamRowsRequest, opts ...grpc.CallOption) (Query_VStreamRowsClient, error) } type queryClient struct { @@ -411,6 +413,38 @@ func (x *queryVStreamClient) Recv() (*binlogdata.VStreamResponse, error) { return m, nil } +func (c *queryClient) VStreamRows(ctx context.Context, in *binlogdata.VStreamRowsRequest, opts ...grpc.CallOption) (Query_VStreamRowsClient, error) { + stream, err := c.cc.NewStream(ctx, &_Query_serviceDesc.Streams[5], "/queryservice.Query/VStreamRows", opts...) + if err != nil { + return nil, err + } + x := &queryVStreamRowsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Query_VStreamRowsClient interface { + Recv() (*binlogdata.VStreamRowsResponse, error) + grpc.ClientStream +} + +type queryVStreamRowsClient struct { + grpc.ClientStream +} + +func (x *queryVStreamRowsClient) Recv() (*binlogdata.VStreamRowsResponse, error) { + m := new(binlogdata.VStreamRowsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // QueryServer is the server API for Query service. type QueryServer interface { // Execute executes the specified SQL query (might be in a @@ -464,6 +498,8 @@ type QueryServer interface { UpdateStream(*query.UpdateStreamRequest, Query_UpdateStreamServer) error // VStream streams vreplication events. VStream(*binlogdata.VStreamRequest, Query_VStreamServer) error + // VStreamRows streams rows from the specified starting point. + VStreamRows(*binlogdata.VStreamRowsRequest, Query_VStreamRowsServer) error } func RegisterQueryServer(s *grpc.Server, srv QueryServer) { @@ -881,6 +917,27 @@ func (x *queryVStreamServer) Send(m *binlogdata.VStreamResponse) error { return x.ServerStream.SendMsg(m) } +func _Query_VStreamRows_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(binlogdata.VStreamRowsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QueryServer).VStreamRows(m, &queryVStreamRowsServer{stream}) +} + +type Query_VStreamRowsServer interface { + Send(*binlogdata.VStreamRowsResponse) error + grpc.ServerStream +} + +type queryVStreamRowsServer struct { + grpc.ServerStream +} + +func (x *queryVStreamRowsServer) Send(m *binlogdata.VStreamRowsResponse) error { + return x.ServerStream.SendMsg(m) +} + var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "queryservice.Query", HandlerType: (*QueryServer)(nil), @@ -980,46 +1037,53 @@ var _Query_serviceDesc = grpc.ServiceDesc{ Handler: _Query_VStream_Handler, ServerStreams: true, }, + { + StreamName: "VStreamRows", + Handler: _Query_VStreamRows_Handler, + ServerStreams: true, + }, }, Metadata: "queryservice.proto", } -func init() { proto.RegisterFile("queryservice.proto", fileDescriptor_queryservice_17509881eb07629d) } - -var fileDescriptor_queryservice_17509881eb07629d = []byte{ - // 544 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x95, 0xdf, 0x6b, 0xd4, 0x40, - 0x10, 0xc7, 0xf5, 0xa1, 0xad, 0x4c, 0x4f, 0xad, 0x5b, 0xab, 0x36, 0xad, 0x6d, 0xed, 0x9b, 0x08, - 0x17, 0x51, 0x41, 0x28, 0xf8, 0xd0, 0x0b, 0x16, 0xa5, 0xf8, 0xeb, 0xce, 0x16, 0xf1, 0x41, 0xd8, - 0x4b, 0x86, 0x33, 0x34, 0x97, 0x4d, 0x93, 0xbd, 0x43, 0xff, 0x6a, 0xff, 0x05, 0x31, 0x9b, 0x99, - 0xec, 0xee, 0x25, 0xbe, 0xdd, 0x7e, 0xbf, 0x33, 0x1f, 0x26, 0x3b, 0x37, 0xb3, 0x20, 0xae, 0x17, - 0x58, 0xfe, 0xae, 0xb0, 0x5c, 0xa6, 0x31, 0x0e, 0x8b, 0x52, 0x69, 0x25, 0x06, 0xb6, 0x16, 0x6c, - 0xd6, 0x27, 0x63, 0x05, 0x5b, 0xd3, 0x34, 0xcf, 0xd4, 0x2c, 0x91, 0x5a, 0x1a, 0xe5, 0xc5, 0x9f, - 0x01, 0xac, 0x7d, 0xf9, 0x17, 0x21, 0x4e, 0x60, 0xe3, 0xed, 0x2f, 0x8c, 0x17, 0x1a, 0xc5, 0xce, - 0xd0, 0x24, 0x35, 0xe7, 0x31, 0x5e, 0x2f, 0xb0, 0xd2, 0xc1, 0x03, 0x5f, 0xae, 0x0a, 0x95, 0x57, - 0x78, 0x7c, 0x43, 0xbc, 0x87, 0x41, 0x23, 0x8e, 0xa4, 0x8e, 0x7f, 0x8a, 0xc0, 0x8d, 0xac, 0x45, - 0xa2, 0xec, 0x75, 0x7a, 0x8c, 0xfa, 0x08, 0xb7, 0x27, 0xba, 0x44, 0x39, 0xa7, 0x62, 0x28, 0xde, - 0x51, 0x09, 0xb6, 0xdf, 0x6d, 0x12, 0xed, 0xf9, 0x4d, 0xf1, 0x0a, 0xd6, 0x46, 0x38, 0x4b, 0x73, - 0xb1, 0xdd, 0x84, 0xd6, 0x27, 0xca, 0xbf, 0xef, 0x8a, 0x5c, 0xc5, 0x6b, 0x58, 0x8f, 0xd4, 0x7c, - 0x9e, 0x6a, 0x41, 0x11, 0xe6, 0x48, 0x79, 0x3b, 0x9e, 0xca, 0x89, 0x6f, 0xe0, 0xd6, 0x58, 0x65, - 0xd9, 0x54, 0xc6, 0x57, 0x82, 0xee, 0x8b, 0x04, 0x4a, 0x7e, 0xb8, 0xa2, 0x73, 0xfa, 0x09, 0x6c, - 0x7c, 0x2e, 0xb1, 0x90, 0x65, 0xdb, 0x84, 0xe6, 0xec, 0x37, 0x81, 0x65, 0xce, 0xfd, 0x04, 0x77, - 0x4c, 0x39, 0x8d, 0x95, 0x88, 0x7d, 0xa7, 0x4a, 0x92, 0x89, 0xf4, 0xb8, 0xc7, 0x65, 0xe0, 0x05, - 0x6c, 0x51, 0x89, 0x8c, 0x3c, 0xf0, 0x6a, 0xf7, 0xa1, 0x87, 0xbd, 0x3e, 0x63, 0xbf, 0xc1, 0xbd, - 0xa8, 0x44, 0xa9, 0xf1, 0x6b, 0x29, 0xf3, 0x4a, 0xc6, 0x3a, 0x55, 0xb9, 0xa0, 0xbc, 0x15, 0x87, - 0xc0, 0x47, 0xfd, 0x01, 0x4c, 0x3e, 0x83, 0xcd, 0x89, 0x96, 0xa5, 0x6e, 0x5a, 0xb7, 0xcb, 0x7f, - 0x0e, 0xd6, 0x88, 0x16, 0x74, 0x59, 0x0e, 0x07, 0x35, 0xf7, 0x91, 0x39, 0xad, 0xb6, 0xc2, 0xb1, - 0x2d, 0xe6, 0xfc, 0x80, 0xed, 0x48, 0xe5, 0x71, 0xb6, 0x48, 0x9c, 0x6f, 0x7d, 0xc2, 0x17, 0xbf, - 0xe2, 0x11, 0xf7, 0xf8, 0x7f, 0x21, 0xcc, 0x1f, 0xc3, 0xdd, 0x31, 0xca, 0xc4, 0x66, 0x53, 0x53, - 0x3d, 0x9d, 0xb8, 0x07, 0x7d, 0xb6, 0x3d, 0xca, 0xf5, 0x30, 0xd0, 0xf8, 0x05, 0xf6, 0x84, 0x78, - 0xd3, 0xb7, 0xd7, 0xe9, 0xd9, 0x8d, 0xb6, 0x1d, 0xb3, 0x1a, 0x0e, 0x3b, 0x72, 0x9c, 0xfd, 0x70, - 0xd4, 0x1f, 0x60, 0x2f, 0x89, 0x0f, 0x58, 0x55, 0x72, 0x86, 0x66, 0xf0, 0x79, 0x49, 0x38, 0xaa, - 0xbf, 0x24, 0x3c, 0xd3, 0x5a, 0x12, 0x11, 0x40, 0x63, 0x9e, 0xc6, 0x57, 0xe2, 0x91, 0x1b, 0x7f, - 0xda, 0xb6, 0x7b, 0xb7, 0xc3, 0xe1, 0xa2, 0x22, 0x80, 0x49, 0x91, 0xa5, 0xda, 0xac, 0x53, 0x82, - 0xb4, 0x92, 0x0f, 0xb1, 0x1d, 0x86, 0x9c, 0xc3, 0xc0, 0xd4, 0xf7, 0x0e, 0x65, 0xa6, 0xdb, 0x4d, - 0x6a, 0x8b, 0xfe, 0xf5, 0xbb, 0x9e, 0xf5, 0x59, 0xe7, 0x30, 0xb8, 0x28, 0x12, 0xa9, 0xe9, 0x96, - 0x08, 0x66, 0x8b, 0x3e, 0xcc, 0xf5, 0x2c, 0xd8, 0x19, 0x6c, 0x5c, 0x32, 0xc7, 0x7a, 0x47, 0x2e, - 0x7d, 0x4e, 0x97, 0xd7, 0x72, 0x46, 0xcf, 0xbe, 0x3f, 0x5d, 0xa6, 0x1a, 0xab, 0x6a, 0x98, 0xaa, - 0xd0, 0xfc, 0x0a, 0x67, 0x2a, 0x5c, 0xea, 0xb0, 0x7e, 0x91, 0x42, 0xfb, 0xf5, 0x9a, 0xae, 0xd7, - 0xda, 0xcb, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x47, 0x8e, 0x80, 0xe8, 0x06, 0x00, 0x00, +func init() { proto.RegisterFile("queryservice.proto", fileDescriptor_queryservice_98b01c0566d3f32e) } + +var fileDescriptor_queryservice_98b01c0566d3f32e = []byte{ + // 563 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x95, 0x4f, 0x6f, 0xd3, 0x4c, + 0x10, 0xc6, 0xdf, 0xf7, 0xd0, 0x06, 0x4d, 0x52, 0x28, 0x5b, 0x0a, 0xd4, 0x2d, 0x69, 0xe9, 0x0d, + 0x21, 0x25, 0x08, 0x90, 0x90, 0x2a, 0x71, 0x68, 0x2c, 0x2a, 0x50, 0xc5, 0x3f, 0x87, 0x56, 0x88, + 0x03, 0xd2, 0xc6, 0x5e, 0x05, 0xab, 0x8e, 0xd7, 0xf5, 0x6e, 0x52, 0xf8, 0x7c, 0x7c, 0x31, 0x84, + 0xd7, 0x33, 0xde, 0xdd, 0xd8, 0xdc, 0xb2, 0xcf, 0x33, 0xf3, 0xd3, 0x78, 0x27, 0x33, 0x0b, 0xec, + 0x7a, 0x29, 0xca, 0x5f, 0x4a, 0x94, 0xab, 0x34, 0x16, 0xa3, 0xa2, 0x94, 0x5a, 0xb2, 0x81, 0xad, + 0x05, 0xfd, 0xea, 0x64, 0xac, 0x60, 0x7b, 0x96, 0xe6, 0x99, 0x9c, 0x27, 0x5c, 0x73, 0xa3, 0x3c, + 0xff, 0xbd, 0x05, 0x1b, 0x9f, 0xff, 0x46, 0xb0, 0x13, 0xe8, 0xbd, 0xf9, 0x29, 0xe2, 0xa5, 0x16, + 0x6c, 0x77, 0x64, 0x92, 0xea, 0x73, 0x24, 0xae, 0x97, 0x42, 0xe9, 0xe0, 0xbe, 0x2f, 0xab, 0x42, + 0xe6, 0x4a, 0x1c, 0xff, 0xc7, 0xde, 0xc1, 0xa0, 0x16, 0x27, 0x5c, 0xc7, 0x3f, 0x58, 0xe0, 0x46, + 0x56, 0x22, 0x52, 0xf6, 0x5b, 0x3d, 0x42, 0x7d, 0x80, 0xad, 0xa9, 0x2e, 0x05, 0x5f, 0x60, 0x31, + 0x18, 0xef, 0xa8, 0x08, 0x3b, 0x68, 0x37, 0x91, 0xf6, 0xec, 0x7f, 0xf6, 0x12, 0x36, 0x26, 0x62, + 0x9e, 0xe6, 0x6c, 0xa7, 0x0e, 0xad, 0x4e, 0x98, 0x7f, 0xcf, 0x15, 0xa9, 0x8a, 0x57, 0xb0, 0x19, + 0xca, 0xc5, 0x22, 0xd5, 0x0c, 0x23, 0xcc, 0x11, 0xf3, 0x76, 0x3d, 0x95, 0x12, 0x5f, 0xc3, 0xad, + 0x48, 0x66, 0xd9, 0x8c, 0xc7, 0x57, 0x0c, 0xef, 0x0b, 0x05, 0x4c, 0x7e, 0xb0, 0xa6, 0x53, 0xfa, + 0x09, 0xf4, 0x3e, 0x95, 0xa2, 0xe0, 0x65, 0xd3, 0x84, 0xfa, 0xec, 0x37, 0x81, 0x64, 0xca, 0xfd, + 0x08, 0xb7, 0x4d, 0x39, 0xb5, 0x95, 0xb0, 0x03, 0xa7, 0x4a, 0x94, 0x91, 0xf4, 0xa8, 0xc3, 0x25, + 0xe0, 0x05, 0x6c, 0x63, 0x89, 0x84, 0x1c, 0x7a, 0xb5, 0xfb, 0xd0, 0xc3, 0x4e, 0x9f, 0xb0, 0x5f, + 0xe1, 0x6e, 0x58, 0x0a, 0xae, 0xc5, 0x97, 0x92, 0xe7, 0x8a, 0xc7, 0x3a, 0x95, 0x39, 0xc3, 0xbc, + 0x35, 0x07, 0xc1, 0x47, 0xdd, 0x01, 0x44, 0x3e, 0x83, 0xfe, 0x54, 0xf3, 0x52, 0xd7, 0xad, 0xdb, + 0xa3, 0x3f, 0x07, 0x69, 0x48, 0x0b, 0xda, 0x2c, 0x87, 0x23, 0x34, 0xf5, 0x91, 0x38, 0x8d, 0xb6, + 0xc6, 0xb1, 0x2d, 0xe2, 0x7c, 0x87, 0x9d, 0x50, 0xe6, 0x71, 0xb6, 0x4c, 0x9c, 0x6f, 0x7d, 0x4c, + 0x17, 0xbf, 0xe6, 0x21, 0xf7, 0xf8, 0x5f, 0x21, 0xc4, 0x8f, 0xe0, 0x4e, 0x24, 0x78, 0x62, 0xb3, + 0xb1, 0xa9, 0x9e, 0x8e, 0xdc, 0x61, 0x97, 0x6d, 0x8f, 0x72, 0x35, 0x0c, 0x38, 0x7e, 0x81, 0x3d, + 0x21, 0xde, 0xf4, 0xed, 0xb7, 0x7a, 0x76, 0xa3, 0x6d, 0xc7, 0xac, 0x86, 0xc3, 0x96, 0x1c, 0x67, + 0x3f, 0x1c, 0x75, 0x07, 0xd8, 0x4b, 0xe2, 0xbd, 0x50, 0x8a, 0xcf, 0x85, 0x19, 0x7c, 0x5a, 0x12, + 0x8e, 0xea, 0x2f, 0x09, 0xcf, 0xb4, 0x96, 0x44, 0x08, 0x50, 0x9b, 0xa7, 0xf1, 0x15, 0x7b, 0xe8, + 0xc6, 0x9f, 0x36, 0xed, 0xde, 0x6b, 0x71, 0xa8, 0xa8, 0x10, 0x60, 0x5a, 0x64, 0xa9, 0x36, 0xeb, + 0x14, 0x21, 0x8d, 0xe4, 0x43, 0x6c, 0x87, 0x20, 0xe7, 0x30, 0x30, 0xf5, 0xbd, 0x15, 0x3c, 0xd3, + 0xcd, 0x26, 0xb5, 0x45, 0xff, 0xfa, 0x5d, 0xcf, 0xfa, 0xac, 0x73, 0x18, 0x5c, 0x14, 0x09, 0xd7, + 0x78, 0x4b, 0x08, 0xb3, 0x45, 0x1f, 0xe6, 0x7a, 0x16, 0xec, 0x0c, 0x7a, 0x97, 0xc4, 0xb1, 0xde, + 0x91, 0x4b, 0x9f, 0xd3, 0xe6, 0x59, 0x9c, 0x08, 0xfa, 0x28, 0xcb, 0x1b, 0xc5, 0x86, 0x6d, 0xf1, + 0xf2, 0x46, 0x35, 0x0b, 0xa5, 0xcb, 0x6f, 0x98, 0x93, 0xa7, 0xdf, 0x9e, 0xac, 0x52, 0x2d, 0x94, + 0x1a, 0xa5, 0x72, 0x6c, 0x7e, 0x8d, 0xe7, 0x72, 0xbc, 0xd2, 0xe3, 0xea, 0x95, 0x1b, 0xdb, 0x2f, + 0xe2, 0x6c, 0xb3, 0xd2, 0x5e, 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0xdd, 0x23, 0x8f, 0x51, 0x3c, + 0x07, 0x00, 0x00, } diff --git a/go/vt/sqlparser/parsed_query.go b/go/vt/sqlparser/parsed_query.go index 8fcce8c98aa..2f6c4e705e7 100644 --- a/go/vt/sqlparser/parsed_query.go +++ b/go/vt/sqlparser/parsed_query.go @@ -53,23 +53,31 @@ func (pq *ParsedQuery) GenerateQuery(bindVariables map[string]*querypb.BindVaria } var buf strings.Builder buf.Grow(len(pq.Query)) + if err := pq.Append(&buf, bindVariables, extras); err != nil { + return "", err + } + return buf.String(), nil +} + +// Append appends the generated query to the provided buffer. +func (pq *ParsedQuery) Append(buf *strings.Builder, bindVariables map[string]*querypb.BindVariable, extras map[string]Encodable) error { current := 0 for _, loc := range pq.bindLocations { buf.WriteString(pq.Query[current:loc.offset]) name := pq.Query[loc.offset : loc.offset+loc.length] if encodable, ok := extras[name[1:]]; ok { - encodable.EncodeSQL(&buf) + encodable.EncodeSQL(buf) } else { supplied, _, err := FetchBindVar(name, bindVariables) if err != nil { - return "", err + return err } - EncodeValue(&buf, supplied) + EncodeValue(buf, supplied) } current = loc.offset + loc.length } buf.WriteString(pq.Query[current:]) - return buf.String(), nil + return nil } // MarshalJSON is a custom JSON marshaler for ParsedQuery. diff --git a/go/vt/vtcombo/tablet_map.go b/go/vt/vtcombo/tablet_map.go index 28a6ae52dca..861eff8f1bf 100644 --- a/go/vt/vtcombo/tablet_map.go +++ b/go/vt/vtcombo/tablet_map.go @@ -483,6 +483,12 @@ func (itc *internalTabletConn) VStream(ctx context.Context, target *querypb.Targ return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } +// VStreamRows is part of the QueryService interface. +func (itc *internalTabletConn) VStreamRows(ctx context.Context, target *querypb.Target, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error { + err := itc.tablet.qsc.QueryService().VStreamRows(ctx, target, query, lastpk, send) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) +} + // // TabletManagerClient implementation // diff --git a/go/vt/vttablet/grpcqueryservice/server.go b/go/vt/vttablet/grpcqueryservice/server.go index 36a776ae540..d0f55351516 100644 --- a/go/vt/vttablet/grpcqueryservice/server.go +++ b/go/vt/vttablet/grpcqueryservice/server.go @@ -76,14 +76,12 @@ func (q *query) StreamExecute(request *querypb.StreamExecuteRequest, stream quer request.EffectiveCallerId, request.ImmediateCallerId, ) - if err := q.server.StreamExecute(ctx, request.Target, request.Query.Sql, request.Query.BindVariables, request.TransactionId, request.Options, func(reply *sqltypes.Result) error { + err = q.server.StreamExecute(ctx, request.Target, request.Query.Sql, request.Query.BindVariables, request.TransactionId, request.Options, func(reply *sqltypes.Result) error { return stream.Send(&querypb.StreamExecuteResponse{ Result: sqltypes.ResultToProto3(reply), }) - }); err != nil { - return vterrors.ToGRPC(err) - } - return nil + }) + return vterrors.ToGRPC(err) } // Begin is part of the queryservice.QueryServer interface @@ -300,14 +298,12 @@ func (q *query) MessageStream(request *querypb.MessageStreamRequest, stream quer request.EffectiveCallerId, request.ImmediateCallerId, ) - if err := q.server.MessageStream(ctx, request.Target, request.Name, func(qr *sqltypes.Result) error { + err = q.server.MessageStream(ctx, request.Target, request.Name, func(qr *sqltypes.Result) error { return stream.Send(&querypb.MessageStreamResponse{ Result: sqltypes.ResultToProto3(qr), }) - }); err != nil { - return vterrors.ToGRPC(err) - } - return nil + }) + return vterrors.ToGRPC(err) } // MessageAck is part of the queryservice.QueryServer interface @@ -352,10 +348,8 @@ func (q *query) SplitQuery(ctx context.Context, request *querypb.SplitQueryReque // StreamHealth is part of the queryservice.QueryServer interface func (q *query) StreamHealth(request *querypb.StreamHealthRequest, stream queryservicepb.Query_StreamHealthServer) (err error) { defer q.server.HandlePanic(&err) - if err = q.server.StreamHealth(stream.Context(), stream.Send); err != nil { - return vterrors.ToGRPC(err) - } - return nil + err = q.server.StreamHealth(stream.Context(), stream.Send) + return vterrors.ToGRPC(err) } // UpdateStream is part of the queryservice.QueryServer interface @@ -365,14 +359,12 @@ func (q *query) UpdateStream(request *querypb.UpdateStreamRequest, stream querys request.EffectiveCallerId, request.ImmediateCallerId, ) - if err := q.server.UpdateStream(ctx, request.Target, request.Position, request.Timestamp, func(reply *querypb.StreamEvent) error { + err = q.server.UpdateStream(ctx, request.Target, request.Position, request.Timestamp, func(reply *querypb.StreamEvent) error { return stream.Send(&querypb.UpdateStreamResponse{ Event: reply, }) - }); err != nil { - return vterrors.ToGRPC(err) - } - return nil + }) + return vterrors.ToGRPC(err) } // VStream is part of the queryservice.QueryServer interface @@ -382,14 +374,23 @@ func (q *query) VStream(request *binlogdatapb.VStreamRequest, stream queryservic request.EffectiveCallerId, request.ImmediateCallerId, ) - if err := q.server.VStream(ctx, request.Target, request.Position, request.Filter, func(events []*binlogdatapb.VEvent) error { + err = q.server.VStream(ctx, request.Target, request.Position, request.Filter, func(events []*binlogdatapb.VEvent) error { return stream.Send(&binlogdatapb.VStreamResponse{ Events: events, }) - }); err != nil { - return vterrors.ToGRPC(err) - } - return nil + }) + return vterrors.ToGRPC(err) +} + +// VStreamRows is part of the queryservice.QueryServer interface +func (q *query) VStreamRows(request *binlogdatapb.VStreamRowsRequest, stream queryservicepb.Query_VStreamRowsServer) (err error) { + defer q.server.HandlePanic(&err) + ctx := callerid.NewContext(callinfo.GRPCCallInfo(stream.Context()), + request.EffectiveCallerId, + request.ImmediateCallerId, + ) + err = q.server.VStreamRows(ctx, request.Target, request.Query, request.Lastpk, stream.Send) + return vterrors.ToGRPC(err) } // Register registers the implementation on the provide gRPC Server. diff --git a/go/vt/vttablet/grpctabletconn/conn.go b/go/vt/vttablet/grpctabletconn/conn.go index 56667e72513..aae4da11ac8 100644 --- a/go/vt/vttablet/grpctabletconn/conn.go +++ b/go/vt/vttablet/grpctabletconn/conn.go @@ -714,6 +714,47 @@ func (conn *gRPCQueryClient) VStream(ctx context.Context, target *querypb.Target } } +// VStreamRows streams rows of a query from the specified starting point. +func (conn *gRPCQueryClient) VStreamRows(ctx context.Context, target *querypb.Target, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error { + stream, err := func() (queryservicepb.Query_VStreamRowsClient, error) { + conn.mu.RLock() + defer conn.mu.RUnlock() + if conn.cc == nil { + return nil, tabletconn.ConnClosed + } + + req := &binlogdatapb.VStreamRowsRequest{ + Target: target, + EffectiveCallerId: callerid.EffectiveCallerIDFromContext(ctx), + ImmediateCallerId: callerid.ImmediateCallerIDFromContext(ctx), + Query: query, + Lastpk: lastpk, + } + stream, err := conn.c.VStreamRows(ctx, req) + if err != nil { + return nil, tabletconn.ErrorFromGRPC(err) + } + return stream, nil + }() + if err != nil { + return err + } + for { + r, err := stream.Recv() + if err != nil { + return tabletconn.ErrorFromGRPC(err) + } + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err := send(r); err != nil { + return err + } + } +} + // HandlePanic is a no-op. func (conn *gRPCQueryClient) HandlePanic(err *error) { } diff --git a/go/vt/vttablet/queryservice/queryservice.go b/go/vt/vttablet/queryservice/queryservice.go index 52153a2941b..7dbdd8a59cb 100644 --- a/go/vt/vttablet/queryservice/queryservice.go +++ b/go/vt/vttablet/queryservice/queryservice.go @@ -103,6 +103,9 @@ type QueryService interface { // VStream streams VReplication events based on the specified filter. VStream(ctx context.Context, target *querypb.Target, startPos string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error + // VStreamRows streams rows of a table from the specified starting point. + VStreamRows(ctx context.Context, target *querypb.Target, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error + // StreamHealth streams health status. StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error diff --git a/go/vt/vttablet/queryservice/wrapped.go b/go/vt/vttablet/queryservice/wrapped.go index 9d25d42dbd0..2d9a434314c 100644 --- a/go/vt/vttablet/queryservice/wrapped.go +++ b/go/vt/vttablet/queryservice/wrapped.go @@ -252,9 +252,16 @@ func (ws *wrappedService) UpdateStream(ctx context.Context, target *querypb.Targ } func (ws *wrappedService) VStream(ctx context.Context, target *querypb.Target, startPos string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { - return ws.wrapper(ctx, target, ws.impl, "UpdateStream", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (bool, error) { + return ws.wrapper(ctx, target, ws.impl, "VStream", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (bool, error) { innerErr := conn.VStream(ctx, target, startPos, filter, send) - return canRetry(ctx, innerErr), innerErr + return false, innerErr + }) +} + +func (ws *wrappedService) VStreamRows(ctx context.Context, target *querypb.Target, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error { + return ws.wrapper(ctx, target, ws.impl, "VStreamRows", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (bool, error) { + innerErr := conn.VStreamRows(ctx, target, query, lastpk, send) + return false, innerErr }) } diff --git a/go/vt/vttablet/sandboxconn/sandboxconn.go b/go/vt/vttablet/sandboxconn/sandboxconn.go index 36caa6ee668..175538a209f 100644 --- a/go/vt/vttablet/sandboxconn/sandboxconn.go +++ b/go/vt/vttablet/sandboxconn/sandboxconn.go @@ -363,6 +363,11 @@ func (sbc *SandboxConn) VStream(ctx context.Context, target *querypb.Target, sta return fmt.Errorf("Not implemented in test") } +// VStreamRows is part of the QueryService interface. +func (sbc *SandboxConn) VStreamRows(ctx context.Context, target *querypb.Target, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error { + return fmt.Errorf("Not implemented in test") +} + // HandlePanic is part of the QueryService interface. func (sbc *SandboxConn) HandlePanic(err *error) { } diff --git a/go/vt/vttablet/tabletconntest/fakequeryservice.go b/go/vt/vttablet/tabletconntest/fakequeryservice.go index 51ae53f8b33..334b17d3a4c 100644 --- a/go/vt/vttablet/tabletconntest/fakequeryservice.go +++ b/go/vt/vttablet/tabletconntest/fakequeryservice.go @@ -856,6 +856,11 @@ func (f *FakeQueryService) VStream(ctx context.Context, target *querypb.Target, panic("not implemented") } +// VStreamRows is part of the QueryService interface. +func (f *FakeQueryService) VStreamRows(ctx context.Context, target *querypb.Target, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error { + panic("not implemented") +} + // CreateFakeServer returns the fake server for the tests func CreateFakeServer(t *testing.T) *FakeQueryService { return &FakeQueryService{ diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller.go b/go/vt/vttablet/tabletmanager/vreplication/controller.go index 75d1c7c32de..9d3a356d8f0 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller.go @@ -189,12 +189,13 @@ func (ct *controller) runBlp(ctx context.Context) (err error) { player := binlogplayer.NewBinlogPlayerKeyRange(dbClient, tablet, ct.source.KeyRange, ct.id, ct.blpStats) return player.ApplyBinlogEvents(ctx) case ct.source.Filter != nil: - // VPlayer requires the timezone to be UTC. + // Timestamp fields from binlogs are always sent as UTC. + // So, we should set the timezone to be UTC for those values to be correctly inserted. if _, err := dbClient.ExecuteFetch("set @@session.time_zone = '+00:00'", 10000); err != nil { return err } - vplayer := newVPlayer(ct.id, &ct.source, tablet, ct.blpStats, dbClient, ct.mysqld) - return vplayer.Play(ctx) + vreplicator := newVReplicator(ct.id, &ct.source, tablet, ct.blpStats, dbClient, ct.mysqld) + return vreplicator.Replicate(ctx) } return fmt.Errorf("missing source") } diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go index a9b5d60d4c8..97a2ee97884 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go @@ -103,6 +103,16 @@ func TestMain(m *testing.M) { } defer playerEngine.Close() + if err := env.Mysqld.ExecuteSuperQueryList(context.Background(), binlogplayer.CreateVReplicationTable()); err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return 1 + } + + if err := env.Mysqld.ExecuteSuperQueryList(context.Background(), CreateCopyState); err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return 1 + } + return m.Run() }() os.Exit(exitCode) @@ -125,6 +135,8 @@ func addTablet(id int, shard string, tabletType topodatapb.TabletType, serving, func deleteTablet(t *topodatapb.Tablet) { env.TopoServ.DeleteTablet(context.Background(), t.Alias) + // This is not automatically removed from shard replication, which results in log spam. + topo.DeleteTabletReplicationData(context.Background(), env.TopoServ, t) } func newTablet(id int, shard string, tabletType topodatapb.TabletType, serving, healthy bool) *topodatapb.Tablet { @@ -194,6 +206,25 @@ func (ftc *fakeTabletConn) VStream(ctx context.Context, target *querypb.Target, return streamerEngine.Stream(ctx, startPos, filter, send) } +// streamRowsHook allows you to do work just before VStreamRows is dispatched. +var streamRowsHook func(ctx context.Context) + +// VStreamRows directly calls into the pre-initialized engine. +func (ftc *fakeTabletConn) VStreamRows(ctx context.Context, target *querypb.Target, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error { + if streamRowsHook != nil { + streamRowsHook(ctx) + } + var row []sqltypes.Value + if lastpk != nil { + r := sqltypes.Proto3ToResult(lastpk) + if len(r.Rows) != 1 { + return fmt.Errorf("unexpected lastpk input: %v", lastpk) + } + row = r.Rows[0] + } + return streamerEngine.StreamRows(ctx, query, row, send) +} + //-------------------------------------- // Binlog Client to TabletManager @@ -368,10 +399,64 @@ func expectDBClientQueries(t *testing.T, queries []string) { } } +// expectNontxQueries disregards transactional statements like begin and commit. +// It also disregards updates to _vt.vreplication. +func expectNontxQueries(t *testing.T, queries []string) { + t.Helper() + failed := false + for i, query := range queries { + if failed { + t.Errorf("no query received, expecting %s", query) + continue + } + var got string + retry: + select { + case got = <-globalDBQueries: + if got == "begin" || got == "commit" || strings.Contains(got, "_vt.vreplication") { + goto retry + } + var match bool + if query[0] == '/' { + result, err := regexp.MatchString(query[1:], got) + if err != nil { + panic(err) + } + match = result + } else { + match = (got == query) + } + if !match { + t.Errorf("query:\n%q, does not match query %d:\n%q", got, i, query) + } + case <-time.After(5 * time.Second): + t.Errorf("no query received, expecting %s", query) + failed = true + } + } + for { + select { + case got := <-globalDBQueries: + if got == "begin" || got == "commit" || got == "rollback" || strings.Contains(got, "_vt.vreplication") { + continue + } + t.Errorf("unexpected query: %s", got) + default: + return + } + } +} + func expectData(t *testing.T, table string, values [][]string) { t.Helper() - qr, err := env.Mysqld.FetchSuperQuery(context.Background(), fmt.Sprintf("select * from %s.%s", vrepldb, table)) + var query string + if len(strings.Split(table, ".")) == 1 { + query = fmt.Sprintf("select * from %s.%s", vrepldb, table) + } else { + query = fmt.Sprintf("select * from %s", table) + } + qr, err := env.Mysqld.FetchSuperQuery(context.Background(), query) if err != nil { t.Error(err) return diff --git a/go/vt/vttablet/tabletmanager/vreplication/player_plan.go b/go/vt/vttablet/tabletmanager/vreplication/player_plan.go deleted file mode 100644 index bc10f436375..00000000000 --- a/go/vt/vttablet/tabletmanager/vreplication/player_plan.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vreplication - -import ( - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/sqlparser" - - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" -) - -// PlayerPlan is the execution plan for a player stream. -type PlayerPlan struct { - VStreamFilter *binlogdatapb.Filter - TargetTables map[string]*TablePlan - TablePlans map[string]*TablePlan -} - -// TablePlan is the execution plan for a table within a player stream. -type TablePlan struct { - Name string - SendRule *binlogdatapb.Rule - PKReferences []string `json:",omitempty"` - Insert *sqlparser.ParsedQuery `json:",omitempty"` - Update *sqlparser.ParsedQuery `json:",omitempty"` - Delete *sqlparser.ParsedQuery `json:",omitempty"` - Fields []*querypb.Field `json:",omitempty"` -} - -func (tp *TablePlan) generateStatements(rowChange *binlogdatapb.RowChange) ([]string, error) { - // MakeRowTrusted is needed here because Proto3ToResult is not convenient. - var before, after bool - bindvars := make(map[string]*querypb.BindVariable) - if rowChange.Before != nil { - before = true - vals := sqltypes.MakeRowTrusted(tp.Fields, rowChange.Before) - for i, field := range tp.Fields { - bindvars["b_"+field.Name] = sqltypes.ValueBindVariable(vals[i]) - } - } - if rowChange.After != nil { - after = true - vals := sqltypes.MakeRowTrusted(tp.Fields, rowChange.After) - for i, field := range tp.Fields { - bindvars["a_"+field.Name] = sqltypes.ValueBindVariable(vals[i]) - } - } - switch { - case !before && after: - query, err := tp.Insert.GenerateQuery(bindvars, nil) - if err != nil { - return nil, err - } - return []string{query}, nil - case before && !after: - if tp.Delete == nil { - return nil, nil - } - query, err := tp.Delete.GenerateQuery(bindvars, nil) - if err != nil { - return nil, err - } - return []string{query}, nil - case before && after: - if !tp.pkChanged(bindvars) { - query, err := tp.Update.GenerateQuery(bindvars, nil) - if err != nil { - return nil, err - } - return []string{query}, nil - } - - queries := make([]string, 0, 2) - if tp.Delete != nil { - query, err := tp.Delete.GenerateQuery(bindvars, nil) - if err != nil { - return nil, err - } - queries = append(queries, query) - } - query, err := tp.Insert.GenerateQuery(bindvars, nil) - if err != nil { - return nil, err - } - queries = append(queries, query) - return queries, nil - } - return nil, nil -} - -func (tp *TablePlan) pkChanged(bindvars map[string]*querypb.BindVariable) bool { - for _, pkref := range tp.PKReferences { - v1, _ := sqltypes.BindVariableToValue(bindvars["b_"+pkref]) - v2, _ := sqltypes.BindVariableToValue(bindvars["a_"+pkref]) - if !valsEqual(v1, v2) { - return true - } - } - return false -} - -func valsEqual(v1, v2 sqltypes.Value) bool { - if v1.IsNull() && v2.IsNull() { - return true - } - // If any one of them is null, something has changed. - if v1.IsNull() || v2.IsNull() { - return false - } - // Compare content only if none are null. - return v1.ToString() == v2.ToString() -} diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go new file mode 100644 index 00000000000..1eb7d3799a7 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go @@ -0,0 +1,267 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +// ReplicatorPlan is the execution plan for the replicator. +// The constructor for this is buildReplicatorPlan in table_plan_builder.go +// The initial build identifies the tables that need to be replicated, +// and builds partial TablePlan objects for them. The partial plan is used +// to send streaming requests. As the responses return field info, this +// information is used to build the final execution plan (buildExecutionPlan). +type ReplicatorPlan struct { + VStreamFilter *binlogdatapb.Filter + TargetTables map[string]*TablePlan + TablePlans map[string]*TablePlan + tableKeys map[string][]string +} + +// buildExecution plan uses the field info as input and the partially built +// TablePlan for that table to build a full plan. +func (rp *ReplicatorPlan) buildExecutionPlan(fieldEvent *binlogdatapb.FieldEvent) (*TablePlan, error) { + prelim := rp.TablePlans[fieldEvent.TableName] + if prelim == nil { + // Unreachable code. + return nil, fmt.Errorf("plan not found for %s", fieldEvent.TableName) + } + if prelim.Insert != nil { + tplanv := *prelim + tplanv.Fields = fieldEvent.Fields + return &tplanv, nil + } + // select * construct was used. We need to use the field names. + tplan, err := rp.buildFromFields(prelim.TargetName, fieldEvent.Fields) + if err != nil { + return nil, err + } + tplan.Fields = fieldEvent.Fields + return tplan, nil +} + +// buildFromFields builds a full TablePlan, but uses the field info as the +// full column list. This happens when the query used was a 'select *', which +// requires us to wait for the field info sent by the source. +func (rp *ReplicatorPlan) buildFromFields(tableName string, fields []*querypb.Field) (*TablePlan, error) { + tpb := &tablePlanBuilder{ + name: sqlparser.NewTableIdent(tableName), + } + for _, field := range fields { + colName := sqlparser.NewColIdent(field.Name) + cexpr := &colExpr{ + colName: colName, + expr: &sqlparser.ColName{ + Name: colName, + }, + references: map[string]bool{ + field.Name: true, + }, + } + tpb.colExprs = append(tpb.colExprs, cexpr) + } + if err := tpb.analyzePK(rp.tableKeys); err != nil { + return nil, err + } + return tpb.generate(rp.tableKeys), nil +} + +// MarshalJSON performs a custom JSON Marshalling. +func (rp *ReplicatorPlan) MarshalJSON() ([]byte, error) { + var targets []string + for k := range rp.TargetTables { + targets = append(targets, k) + } + sort.Strings(targets) + v := struct { + VStreamFilter *binlogdatapb.Filter + TargetTables []string + TablePlans map[string]*TablePlan + }{ + VStreamFilter: rp.VStreamFilter, + TargetTables: targets, + TablePlans: rp.TablePlans, + } + return json.Marshal(&v) +} + +// TablePlan is the execution plan for a table within a player stream. +// The ParsedQuery objects assume that a map of before and after values +// will be built based on the streaming rows. Before image values will +// be prefixed with a "b_", and after image values will be prefixed +// with a "a_". +type TablePlan struct { + TargetName string + SendRule *binlogdatapb.Rule + PKReferences []string + // BulkInsertFront, BulkInsertValues and BulkInsertOnDup are used + // by vcopier. + BulkInsertFront *sqlparser.ParsedQuery + BulkInsertValues *sqlparser.ParsedQuery + BulkInsertOnDup *sqlparser.ParsedQuery + // Insert, Update and Delete are used by vplayer. + // If the plan is an insertIgnore type, then Insert + // and Update contain 'insert ignore' statements and + // Delete is nil. + Insert *sqlparser.ParsedQuery + Update *sqlparser.ParsedQuery + Delete *sqlparser.ParsedQuery + Fields []*querypb.Field +} + +// MarshalJSON performs a custom JSON Marshalling. +func (tp *TablePlan) MarshalJSON() ([]byte, error) { + v := struct { + TargetName string + SendRule string + PKReferences []string `json:",omitempty"` + InsertFront *sqlparser.ParsedQuery `json:",omitempty"` + InsertValues *sqlparser.ParsedQuery `json:",omitempty"` + InsertOnDup *sqlparser.ParsedQuery `json:",omitempty"` + Insert *sqlparser.ParsedQuery `json:",omitempty"` + Update *sqlparser.ParsedQuery `json:",omitempty"` + Delete *sqlparser.ParsedQuery `json:",omitempty"` + }{ + TargetName: tp.TargetName, + SendRule: tp.SendRule.Match, + PKReferences: tp.PKReferences, + InsertFront: tp.BulkInsertFront, + InsertValues: tp.BulkInsertValues, + InsertOnDup: tp.BulkInsertOnDup, + Insert: tp.Insert, + Update: tp.Update, + Delete: tp.Delete, + } + return json.Marshal(&v) +} + +func (tp *TablePlan) generateBulkInsert(rows *binlogdatapb.VStreamRowsResponse) (string, error) { + bindvars := make(map[string]*querypb.BindVariable, len(tp.Fields)) + var buf strings.Builder + if err := tp.BulkInsertFront.Append(&buf, nil, nil); err != nil { + return "", err + } + buf.WriteString(" values ") + separator := "" + for _, row := range rows.Rows { + vals := sqltypes.MakeRowTrusted(tp.Fields, row) + for i, field := range tp.Fields { + bindvars["a_"+field.Name] = sqltypes.ValueBindVariable(vals[i]) + } + buf.WriteString(separator) + separator = ", " + tp.BulkInsertValues.Append(&buf, bindvars, nil) + } + if tp.BulkInsertOnDup != nil { + tp.BulkInsertOnDup.Append(&buf, nil, nil) + } + return buf.String(), nil +} + +func (tp *TablePlan) generateStatements(rowChange *binlogdatapb.RowChange) ([]string, error) { + // MakeRowTrusted is needed here because Proto3ToResult is not convenient. + var before, after bool + bindvars := make(map[string]*querypb.BindVariable, len(tp.Fields)) + if rowChange.Before != nil { + before = true + vals := sqltypes.MakeRowTrusted(tp.Fields, rowChange.Before) + for i, field := range tp.Fields { + bindvars["b_"+field.Name] = sqltypes.ValueBindVariable(vals[i]) + } + } + if rowChange.After != nil { + after = true + vals := sqltypes.MakeRowTrusted(tp.Fields, rowChange.After) + for i, field := range tp.Fields { + bindvars["a_"+field.Name] = sqltypes.ValueBindVariable(vals[i]) + } + } + switch { + case !before && after: + query, err := tp.Insert.GenerateQuery(bindvars, nil) + if err != nil { + return nil, err + } + return []string{query}, nil + case before && !after: + if tp.Delete == nil { + return nil, nil + } + query, err := tp.Delete.GenerateQuery(bindvars, nil) + if err != nil { + return nil, err + } + return []string{query}, nil + case before && after: + if !tp.pkChanged(bindvars) { + query, err := tp.Update.GenerateQuery(bindvars, nil) + if err != nil { + return nil, err + } + return []string{query}, nil + } + + queries := make([]string, 0, 2) + if tp.Delete != nil { + query, err := tp.Delete.GenerateQuery(bindvars, nil) + if err != nil { + return nil, err + } + queries = append(queries, query) + } + query, err := tp.Insert.GenerateQuery(bindvars, nil) + if err != nil { + return nil, err + } + queries = append(queries, query) + return queries, nil + } + return nil, nil +} + +func (tp *TablePlan) pkChanged(bindvars map[string]*querypb.BindVariable) bool { + for _, pkref := range tp.PKReferences { + v1, _ := sqltypes.BindVariableToValue(bindvars["b_"+pkref]) + v2, _ := sqltypes.BindVariableToValue(bindvars["a_"+pkref]) + if !valsEqual(v1, v2) { + return true + } + } + return false +} + +func valsEqual(v1, v2 sqltypes.Value) bool { + if v1.IsNull() && v2.IsNull() { + return true + } + // If any one of them is null, something has changed. + if v1.IsNull() || v2.IsNull() { + return false + } + // Compare content only if none are null. + return v1.ToString() == v2.ToString() +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/player_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go similarity index 56% rename from go/vt/vttablet/tabletmanager/vreplication/player_plan_test.go rename to go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go index 0fc9e6a3ef7..c12c26f830e 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/player_plan_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go @@ -18,60 +18,25 @@ package vreplication import ( "encoding/json" - "sort" "testing" + "vitess.io/vitess/go/sqltypes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - "vitess.io/vitess/go/vt/sqlparser" ) -func (pp *PlayerPlan) MarshalJSON() ([]byte, error) { - var targets []string - for k := range pp.TargetTables { - targets = append(targets, k) - } - sort.Strings(targets) - v := struct { - VStreamFilter *binlogdatapb.Filter - TargetTables []string - TablePlans map[string]*TablePlan - }{ - VStreamFilter: pp.VStreamFilter, - TargetTables: targets, - TablePlans: pp.TablePlans, - } - return json.Marshal(&v) -} - -func (tp *TablePlan) MarshalJSON() ([]byte, error) { - v := struct { - Name string - SendRule string - PKReferences []string `json:",omitempty"` - Insert *sqlparser.ParsedQuery `json:",omitempty"` - Update *sqlparser.ParsedQuery `json:",omitempty"` - Delete *sqlparser.ParsedQuery `json:",omitempty"` - }{ - Name: tp.Name, - SendRule: tp.SendRule.Match, - PKReferences: tp.PKReferences, - Insert: tp.Insert, - Update: tp.Update, - Delete: tp.Delete, - } - return json.Marshal(&v) -} - -type TestPlayerPlan struct { +type TestReplicatorPlan struct { VStreamFilter *binlogdatapb.Filter TargetTables []string TablePlans map[string]*TestTablePlan } type TestTablePlan struct { - Name string + TargetName string SendRule string PKReferences []string `json:",omitempty"` + InsertFront string `json:",omitempty"` + InsertValues string `json:",omitempty"` + InsertOnDup string `json:",omitempty"` Insert string `json:",omitempty"` Update string `json:",omitempty"` Delete string `json:",omitempty"` @@ -79,9 +44,10 @@ type TestTablePlan struct { func TestBuildPlayerPlan(t *testing.T) { testcases := []struct { - input *binlogdatapb.Filter - plan *TestPlayerPlan - err string + input *binlogdatapb.Filter + plan *TestReplicatorPlan + planpk *TestReplicatorPlan + err string }{{ // Regular expression input: &binlogdatapb.Filter{ @@ -89,7 +55,7 @@ func TestBuildPlayerPlan(t *testing.T) { Match: "/.*", }}, }, - plan: &TestPlayerPlan{ + plan: &TestReplicatorPlan{ VStreamFilter: &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "t1", @@ -99,8 +65,23 @@ func TestBuildPlayerPlan(t *testing.T) { TargetTables: []string{"t1"}, TablePlans: map[string]*TestTablePlan{ "t1": { - Name: "t1", - SendRule: "t1", + TargetName: "t1", + SendRule: "t1", + }, + }, + }, + planpk: &TestReplicatorPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1", + }}, + }, + TargetTables: []string{"t1"}, + TablePlans: map[string]*TestTablePlan{ + "t1": { + TargetName: "t1", + SendRule: "t1", }, }, }, @@ -112,7 +93,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select * from t2", }}, }, - plan: &TestPlayerPlan{ + plan: &TestReplicatorPlan{ VStreamFilter: &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "t2", @@ -122,8 +103,23 @@ func TestBuildPlayerPlan(t *testing.T) { TargetTables: []string{"t1"}, TablePlans: map[string]*TestTablePlan{ "t2": { - Name: "t1", - SendRule: "t2", + TargetName: "t1", + SendRule: "t2", + }, + }, + }, + planpk: &TestReplicatorPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t2", + Filter: "select * from t2", + }}, + }, + TargetTables: []string{"t1"}, + TablePlans: map[string]*TestTablePlan{ + "t2": { + TargetName: "t1", + SendRule: "t2", }, }, }, @@ -135,7 +131,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select c1, c2 from t2", }}, }, - plan: &TestPlayerPlan{ + plan: &TestReplicatorPlan{ VStreamFilter: &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "t2", @@ -145,15 +141,38 @@ func TestBuildPlayerPlan(t *testing.T) { TargetTables: []string{"t1"}, TablePlans: map[string]*TestTablePlan{ "t2": { - Name: "t1", + TargetName: "t1", SendRule: "t2", PKReferences: []string{"c1"}, - Insert: "insert into t1 set c1=:a_c1, c2=:a_c2", + InsertFront: "insert into t1(c1,c2)", + InsertValues: "(:a_c1,:a_c2)", + Insert: "insert into t1(c1,c2) values (:a_c1,:a_c2)", Update: "update t1 set c2=:a_c2 where c1=:b_c1", Delete: "delete from t1 where c1=:b_c1", }, }, }, + planpk: &TestReplicatorPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t2", + Filter: "select c1, c2, pk1, pk2 from t2", + }}, + }, + TargetTables: []string{"t1"}, + TablePlans: map[string]*TestTablePlan{ + "t2": { + TargetName: "t1", + SendRule: "t2", + PKReferences: []string{"c1", "pk1", "pk2"}, + InsertFront: "insert into t1(c1,c2)", + InsertValues: "(:a_c1,:a_c2)", + Insert: "insert into t1(c1,c2) select :a_c1, :a_c2 where (:a_pk1,:a_pk2) <= (1,'aaa')", + Update: "update t1 set c2=:a_c2 where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')", + Delete: "delete from t1 where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')", + }, + }, + }, }, { // partial group by input: &binlogdatapb.Filter{ @@ -162,7 +181,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select c1, c2, c3 from t2 group by c3, c1", }}, }, - plan: &TestPlayerPlan{ + plan: &TestReplicatorPlan{ VStreamFilter: &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "t2", @@ -172,15 +191,40 @@ func TestBuildPlayerPlan(t *testing.T) { TargetTables: []string{"t1"}, TablePlans: map[string]*TestTablePlan{ "t2": { - Name: "t1", + TargetName: "t1", SendRule: "t2", PKReferences: []string{"c1"}, - Insert: "insert into t1 set c1=:a_c1, c2=:a_c2, c3=:a_c3 on duplicate key update c2=:a_c2", + InsertFront: "insert into t1(c1,c2,c3)", + InsertValues: "(:a_c1,:a_c2,:a_c3)", + InsertOnDup: "on duplicate key update c2=values(c2)", + Insert: "insert into t1(c1,c2,c3) values (:a_c1,:a_c2,:a_c3) on duplicate key update c2=values(c2)", Update: "update t1 set c2=:a_c2 where c1=:b_c1", Delete: "update t1 set c2=null where c1=:b_c1", }, }, }, + planpk: &TestReplicatorPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t2", + Filter: "select c1, c2, c3, pk1, pk2 from t2", + }}, + }, + TargetTables: []string{"t1"}, + TablePlans: map[string]*TestTablePlan{ + "t2": { + TargetName: "t1", + SendRule: "t2", + PKReferences: []string{"c1", "pk1", "pk2"}, + InsertFront: "insert into t1(c1,c2,c3)", + InsertValues: "(:a_c1,:a_c2,:a_c3)", + InsertOnDup: "on duplicate key update c2=values(c2)", + Insert: "insert into t1(c1,c2,c3) select :a_c1, :a_c2, :a_c3 where (:a_pk1,:a_pk2) <= (1,'aaa') on duplicate key update c2=values(c2)", + Update: "update t1 set c2=:a_c2 where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')", + Delete: "update t1 set c2=null where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')", + }, + }, + }, }, { // full group by input: &binlogdatapb.Filter{ @@ -189,7 +233,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select c1, c2, c3 from t2 group by c3, c1, c2", }}, }, - plan: &TestPlayerPlan{ + plan: &TestReplicatorPlan{ VStreamFilter: &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "t2", @@ -199,11 +243,33 @@ func TestBuildPlayerPlan(t *testing.T) { TargetTables: []string{"t1"}, TablePlans: map[string]*TestTablePlan{ "t2": { - Name: "t1", + TargetName: "t1", SendRule: "t2", PKReferences: []string{"c1"}, - Insert: "insert ignore into t1 set c1=:a_c1, c2=:a_c2, c3=:a_c3", - Update: "insert ignore into t1 set c1=:a_c1, c2=:a_c2, c3=:a_c3", + InsertFront: "insert ignore into t1(c1,c2,c3)", + InsertValues: "(:a_c1,:a_c2,:a_c3)", + Insert: "insert ignore into t1(c1,c2,c3) values (:a_c1,:a_c2,:a_c3)", + Update: "insert ignore into t1(c1,c2,c3) values (:a_c1,:a_c2,:a_c3)", + }, + }, + }, + planpk: &TestReplicatorPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t2", + Filter: "select c1, c2, c3, pk1, pk2 from t2", + }}, + }, + TargetTables: []string{"t1"}, + TablePlans: map[string]*TestTablePlan{ + "t2": { + TargetName: "t1", + SendRule: "t2", + PKReferences: []string{"c1", "pk1", "pk2"}, + InsertFront: "insert ignore into t1(c1,c2,c3)", + InsertValues: "(:a_c1,:a_c2,:a_c3)", + Insert: "insert ignore into t1(c1,c2,c3) select :a_c1, :a_c2, :a_c3 where (:a_pk1,:a_pk2) <= (1,'aaa')", + Update: "insert ignore into t1(c1,c2,c3) select :a_c1, :a_c2, :a_c3 where (:a_pk1,:a_pk2) <= (1,'aaa')", }, }, }, @@ -214,7 +280,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select foo(a) as c1, b c2 from t1", }}, }, - plan: &TestPlayerPlan{ + plan: &TestReplicatorPlan{ VStreamFilter: &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "t1", @@ -224,15 +290,38 @@ func TestBuildPlayerPlan(t *testing.T) { TargetTables: []string{"t1"}, TablePlans: map[string]*TestTablePlan{ "t1": { - Name: "t1", + TargetName: "t1", SendRule: "t1", PKReferences: []string{"a"}, - Insert: "insert into t1 set c1=foo(:a_a), c2=:a_b", + InsertFront: "insert into t1(c1,c2)", + InsertValues: "(foo(:a_a),:a_b)", + Insert: "insert into t1(c1,c2) values (foo(:a_a),:a_b)", Update: "update t1 set c2=:a_b where c1=(foo(:b_a))", Delete: "delete from t1 where c1=(foo(:b_a))", }, }, }, + planpk: &TestReplicatorPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select a, b, pk1, pk2 from t1", + }}, + }, + TargetTables: []string{"t1"}, + TablePlans: map[string]*TestTablePlan{ + "t1": { + TargetName: "t1", + SendRule: "t1", + PKReferences: []string{"a", "pk1", "pk2"}, + InsertFront: "insert into t1(c1,c2)", + InsertValues: "(foo(:a_a),:a_b)", + Insert: "insert into t1(c1,c2) select foo(:a_a), :a_b where (:a_pk1,:a_pk2) <= (1,'aaa')", + Update: "update t1 set c2=:a_b where c1=(foo(:b_a)) and (:b_pk1,:b_pk2) <= (1,'aaa')", + Delete: "delete from t1 where c1=(foo(:b_a)) and (:b_pk1,:b_pk2) <= (1,'aaa')", + }, + }, + }, }, { input: &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -240,7 +329,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select a + b as c1, c as c2 from t1", }}, }, - plan: &TestPlayerPlan{ + plan: &TestReplicatorPlan{ VStreamFilter: &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "t1", @@ -250,15 +339,38 @@ func TestBuildPlayerPlan(t *testing.T) { TargetTables: []string{"t1"}, TablePlans: map[string]*TestTablePlan{ "t1": { - Name: "t1", + TargetName: "t1", SendRule: "t1", PKReferences: []string{"a", "b"}, - Insert: "insert into t1 set c1=:a_a + :a_b, c2=:a_c", + InsertFront: "insert into t1(c1,c2)", + InsertValues: "(:a_a + :a_b,:a_c)", + Insert: "insert into t1(c1,c2) values (:a_a + :a_b,:a_c)", Update: "update t1 set c2=:a_c where c1=(:b_a + :b_b)", Delete: "delete from t1 where c1=(:b_a + :b_b)", }, }, }, + planpk: &TestReplicatorPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select a, b, c, pk1, pk2 from t1", + }}, + }, + TargetTables: []string{"t1"}, + TablePlans: map[string]*TestTablePlan{ + "t1": { + TargetName: "t1", + SendRule: "t1", + PKReferences: []string{"a", "b", "pk1", "pk2"}, + InsertFront: "insert into t1(c1,c2)", + InsertValues: "(:a_a + :a_b,:a_c)", + Insert: "insert into t1(c1,c2) select :a_a + :a_b, :a_c where (:a_pk1,:a_pk2) <= (1,'aaa')", + Update: "update t1 set c2=:a_c where c1=(:b_a + :b_b) and (:b_pk1,:b_pk2) <= (1,'aaa')", + Delete: "delete from t1 where c1=(:b_a + :b_b) and (:b_pk1,:b_pk2) <= (1,'aaa')", + }, + }, + }, }, { // syntax error input: &binlogdatapb.Filter{ @@ -409,8 +521,18 @@ func TestBuildPlayerPlan(t *testing.T) { "t1": {"c1"}, } + copyState := map[string]*sqltypes.Result{ + "t1": sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "pk1|pk2", + "int64|varchar", + ), + "1|aaa", + ), + } + for _, tcase := range testcases { - plan, err := buildPlayerPlan(tcase.input, tableKeys) + plan, err := buildReplicatorPlan(tcase.input, tableKeys, nil) gotPlan, _ := json.Marshal(plan) wantPlan, _ := json.Marshal(tcase.plan) if string(gotPlan) != string(wantPlan) { @@ -423,5 +545,15 @@ func TestBuildPlayerPlan(t *testing.T) { if gotErr != tcase.err { t.Errorf("Filter err(%v): %s, want %v", tcase.input, gotErr, tcase.err) } + + plan, err = buildReplicatorPlan(tcase.input, tableKeys, copyState) + if err != nil { + continue + } + gotPlan, _ = json.Marshal(plan) + wantPlan, _ = json.Marshal(tcase.planpk) + if string(gotPlan) != string(wantPlan) { + t.Errorf("Filter(%v,copyState):\n%s, want\n%s", tcase.input, gotPlan, wantPlan) + } } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go index 71bb265d4eb..59eb78b6077 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go +++ b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go @@ -22,10 +22,10 @@ import ( "sort" "strings" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" ) type tablePlanBuilder struct { @@ -35,6 +35,7 @@ type tablePlanBuilder struct { colExprs []*colExpr onInsert insertType pkCols []*colExpr + lastpk *sqltypes.Result } // colExpr describes the processing to be performed to @@ -69,22 +70,32 @@ type insertType int // The following values are the various insert types. const ( insertNormal = insertType(iota) - insertOndup + insertOnDup insertIgnore ) -// buildPlayerPlan builds a PlayerPlan from the input filter. +// buildReplicatorPlan builds a ReplicatorPlan for the tables that match the filter. // The filter is matched against the target schema. For every table matched, // a table-specific rule is built to be sent to the source. We don't send the // original rule to the source because it may not match the same tables as the // target. -func buildPlayerPlan(filter *binlogdatapb.Filter, tableKeys map[string][]string) (*PlayerPlan, error) { - plan := &PlayerPlan{ +// The TablePlan built is a partial plan. The full plan for a table is built +// when we receive field information from events or rows sent by the source. +// buildExecutionPlan is the function that builds the full plan. +func buildReplicatorPlan(filter *binlogdatapb.Filter, tableKeys map[string][]string, copyState map[string]*sqltypes.Result) (*ReplicatorPlan, error) { + plan := &ReplicatorPlan{ VStreamFilter: &binlogdatapb.Filter{}, TargetTables: make(map[string]*TablePlan), TablePlans: make(map[string]*TablePlan), + tableKeys: tableKeys, } +nextTable: for tableName := range tableKeys { + lastpk, ok := copyState[tableName] + if ok && lastpk == nil { + // Don't replicate uncopied tables. + continue + } for _, rule := range filter.Rules { switch { case strings.HasPrefix(rule.Match, "/"): @@ -102,22 +113,24 @@ func buildPlayerPlan(filter *binlogdatapb.Filter, tableKeys map[string][]string) } plan.VStreamFilter.Rules = append(plan.VStreamFilter.Rules, sendRule) tablePlan := &TablePlan{ - Name: tableName, - SendRule: sendRule, + TargetName: tableName, + SendRule: sendRule, } plan.TargetTables[tableName] = tablePlan plan.TablePlans[tableName] = tablePlan + continue nextTable case rule.Match == tableName: - sendRule, tablePlan, err := buildTablePlan(rule, tableKeys) + tablePlan, err := buildTablePlan(rule, tableKeys, lastpk) if err != nil { return nil, err } - if _, ok := plan.TablePlans[sendRule.Match]; ok { + if _, ok := plan.TablePlans[tablePlan.SendRule.Match]; ok { continue } - plan.VStreamFilter.Rules = append(plan.VStreamFilter.Rules, sendRule) + plan.VStreamFilter.Rules = append(plan.VStreamFilter.Rules, tablePlan.SendRule) plan.TargetTables[tableName] = tablePlan - plan.TablePlans[sendRule.Match] = tablePlan + plan.TablePlans[tablePlan.SendRule.Match] = tablePlan + continue nextTable } } } @@ -133,10 +146,10 @@ func buildQuery(tableName, filter string) string { return buf.String() } -func buildTablePlan(rule *binlogdatapb.Rule, tableKeys map[string][]string) (*binlogdatapb.Rule, *TablePlan, error) { +func buildTablePlan(rule *binlogdatapb.Rule, tableKeys map[string][]string, lastpk *sqltypes.Result) (*TablePlan, error) { sel, fromTable, err := analyzeSelectFrom(rule.Filter) if err != nil { - return nil, nil, err + return nil, err } sendRule := &binlogdatapb.Rule{ Match: fromTable, @@ -144,17 +157,17 @@ func buildTablePlan(rule *binlogdatapb.Rule, tableKeys map[string][]string) (*bi if expr, ok := sel.SelectExprs[0].(*sqlparser.StarExpr); ok { if len(sel.SelectExprs) != 1 { - return nil, nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + return nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel)) } if !expr.TableName.IsEmpty() { - return nil, nil, fmt.Errorf("unsupported qualifier for '*' expression: %v", sqlparser.String(expr)) + return nil, fmt.Errorf("unsupported qualifier for '*' expression: %v", sqlparser.String(expr)) } sendRule.Filter = rule.Filter tablePlan := &TablePlan{ - Name: rule.Match, - SendRule: sendRule, + TargetName: rule.Match, + SendRule: sendRule, } - return sendRule, tablePlan, nil + return tablePlan, nil } tpb := &tablePlanBuilder{ @@ -164,45 +177,28 @@ func buildTablePlan(rule *binlogdatapb.Rule, tableKeys map[string][]string) (*bi Where: sel.Where, }, selColumns: make(map[string]bool), + lastpk: lastpk, } if err := tpb.analyzeExprs(sel.SelectExprs); err != nil { - return nil, nil, err + return nil, err + } + if tpb.lastpk != nil { + for _, f := range tpb.lastpk.Fields { + tpb.addCol(sqlparser.NewColIdent(f.Name)) + } } if err := tpb.analyzeGroupBy(sel.GroupBy); err != nil { - return nil, nil, err + return nil, err } if err := tpb.analyzePK(tableKeys); err != nil { - return nil, nil, err + return nil, err } sendRule.Filter = sqlparser.String(tpb.sendSelect) tablePlan := tpb.generate(tableKeys) tablePlan.SendRule = sendRule - return sendRule, tablePlan, nil -} - -func buildTablePlanFromFields(tableName string, fields []*querypb.Field, tableKeys map[string][]string) (*TablePlan, error) { - tpb := &tablePlanBuilder{ - name: sqlparser.NewTableIdent(tableName), - } - for _, field := range fields { - colName := sqlparser.NewColIdent(field.Name) - cexpr := &colExpr{ - colName: colName, - expr: &sqlparser.ColName{ - Name: colName, - }, - references: map[string]bool{ - field.Name: true, - }, - } - tpb.colExprs = append(tpb.colExprs, cexpr) - } - if err := tpb.analyzePK(tableKeys); err != nil { - return nil, err - } - return tpb.generate(tableKeys), nil + return tablePlan, nil } func (tpb *tablePlanBuilder) generate(tableKeys map[string][]string) *TablePlan { @@ -212,17 +208,28 @@ func (tpb *tablePlanBuilder) generate(tableKeys map[string][]string) *TablePlan refmap[k] = true } } + if tpb.lastpk != nil { + for _, f := range tpb.lastpk.Fields { + refmap[f.Name] = true + } + } pkrefs := make([]string, 0, len(refmap)) for k := range refmap { pkrefs = append(pkrefs, k) } sort.Strings(pkrefs) + + bvf := &bindvarFormatter{} + return &TablePlan{ - Name: tpb.name.String(), - PKReferences: pkrefs, - Insert: tpb.generateInsertStatement(), - Update: tpb.generateUpdateStatement(), - Delete: tpb.generateDeleteStatement(), + TargetName: tpb.name.String(), + PKReferences: pkrefs, + BulkInsertFront: tpb.generateInsertPart(sqlparser.NewTrackedBuffer(bvf.formatter)), + BulkInsertValues: tpb.generateValuesPart(sqlparser.NewTrackedBuffer(bvf.formatter), bvf), + BulkInsertOnDup: tpb.generateOnDupPart(sqlparser.NewTrackedBuffer(bvf.formatter)), + Insert: tpb.generateInsertStatement(), + Update: tpb.generateUpdateStatement(), + Delete: tpb.generateDeleteStatement(), } } @@ -364,7 +371,7 @@ func (tpb *tablePlanBuilder) analyzeGroupBy(groupBy sqlparser.GroupBy) error { tpb.onInsert = insertIgnore for _, cExpr := range tpb.colExprs { if !cExpr.isGrouped { - tpb.onInsert = insertOndup + tpb.onInsert = insertOnDup break } } @@ -402,53 +409,59 @@ func (tpb *tablePlanBuilder) findCol(name sqlparser.ColIdent) *colExpr { func (tpb *tablePlanBuilder) generateInsertStatement() *sqlparser.ParsedQuery { bvf := &bindvarFormatter{} buf := sqlparser.NewTrackedBuffer(bvf.formatter) - if tpb.onInsert == insertIgnore { - buf.Myprintf("insert ignore into %v set ", tpb.name) + + tpb.generateInsertPart(buf) + if tpb.lastpk == nil { + buf.Myprintf(" values ", tpb.name) + tpb.generateValuesPart(buf, bvf) } else { - buf.Myprintf("insert into %v set ", tpb.name) - } - tpb.generateInsertValues(buf, bvf) - if tpb.onInsert == insertOndup { - buf.Myprintf(" on duplicate key update ") - tpb.generateUpdate(buf, bvf, false /* before */, true /* after */) + tpb.generateSelectPart(buf, bvf) } + tpb.generateOnDupPart(buf) + return buf.ParsedQuery() } -func (tpb *tablePlanBuilder) generateUpdateStatement() *sqlparser.ParsedQuery { +func (tpb *tablePlanBuilder) generateInsertPart(buf *sqlparser.TrackedBuffer) *sqlparser.ParsedQuery { if tpb.onInsert == insertIgnore { - return tpb.generateInsertStatement() + buf.Myprintf("insert ignore into %v(", tpb.name) + } else { + buf.Myprintf("insert into %v(", tpb.name) } - bvf := &bindvarFormatter{} - buf := sqlparser.NewTrackedBuffer(bvf.formatter) - buf.Myprintf("update %v set ", tpb.name) - tpb.generateUpdate(buf, bvf, true /* before */, true /* after */) - tpb.generateWhere(buf, bvf) + separator := "" + for _, cexpr := range tpb.colExprs { + buf.Myprintf("%s%s", separator, cexpr.colName.String()) + separator = "," + } + buf.Myprintf(")", tpb.name) return buf.ParsedQuery() } -func (tpb *tablePlanBuilder) generateDeleteStatement() *sqlparser.ParsedQuery { - bvf := &bindvarFormatter{} - buf := sqlparser.NewTrackedBuffer(bvf.formatter) - switch tpb.onInsert { - case insertNormal: - buf.Myprintf("delete from %v", tpb.name) - tpb.generateWhere(buf, bvf) - case insertOndup: - buf.Myprintf("update %v set ", tpb.name) - tpb.generateUpdate(buf, bvf, true /* before */, false /* after */) - tpb.generateWhere(buf, bvf) - case insertIgnore: - return nil +func (tpb *tablePlanBuilder) generateValuesPart(buf *sqlparser.TrackedBuffer, bvf *bindvarFormatter) *sqlparser.ParsedQuery { + bvf.mode = bvAfter + separator := "(" + for _, cexpr := range tpb.colExprs { + buf.Myprintf("%s", separator) + separator = "," + switch cexpr.operation { + case opExpr: + buf.Myprintf("%v", cexpr.expr) + case opCount: + buf.WriteString("1") + case opSum: + buf.Myprintf("ifnull(%v, 0)", cexpr.expr) + } } + buf.Myprintf(")") return buf.ParsedQuery() } -func (tpb *tablePlanBuilder) generateInsertValues(buf *sqlparser.TrackedBuffer, bvf *bindvarFormatter) { +func (tpb *tablePlanBuilder) generateSelectPart(buf *sqlparser.TrackedBuffer, bvf *bindvarFormatter) *sqlparser.ParsedQuery { bvf.mode = bvAfter + buf.WriteString(" select ") separator := "" for _, cexpr := range tpb.colExprs { - buf.Myprintf("%s%s=", separator, cexpr.colName.String()) + buf.Myprintf("%s", separator) separator = ", " switch cexpr.operation { case opExpr: @@ -459,9 +472,16 @@ func (tpb *tablePlanBuilder) generateInsertValues(buf *sqlparser.TrackedBuffer, buf.Myprintf("ifnull(%v, 0)", cexpr.expr) } } + buf.WriteString(" where ") + tpb.generatePKConstraint(buf, bvf) + return buf.ParsedQuery() } -func (tpb *tablePlanBuilder) generateUpdate(buf *sqlparser.TrackedBuffer, bvf *bindvarFormatter, before, after bool) { +func (tpb *tablePlanBuilder) generateOnDupPart(buf *sqlparser.TrackedBuffer) *sqlparser.ParsedQuery { + if tpb.onInsert != insertOnDup { + return nil + } + buf.Myprintf(" on duplicate key update ") separator := "" for _, cexpr := range tpb.colExprs { if cexpr.isGrouped || cexpr.isPK { @@ -471,33 +491,80 @@ func (tpb *tablePlanBuilder) generateUpdate(buf *sqlparser.TrackedBuffer, bvf *b separator = ", " switch cexpr.operation { case opExpr: - if after { - bvf.mode = bvAfter - buf.Myprintf("%v", cexpr.expr) - } else { - buf.WriteString("null") - } + buf.Myprintf("values(%s)", cexpr.colName.String()) case opCount: - switch { - case before && after: - buf.Myprintf("%s", cexpr.colName.String()) - case before: - buf.Myprintf("%s-1", cexpr.colName.String()) - case after: - buf.Myprintf("%s+1", cexpr.colName.String()) - } + buf.Myprintf("%s+1", cexpr.colName.String()) case opSum: buf.Myprintf("%s", cexpr.colName.String()) - if before { - bvf.mode = bvBefore - buf.Myprintf("-ifnull(%v, 0)", cexpr.expr) + buf.Myprintf("+ifnull(values(%s), 0)", cexpr.colName.String()) + } + } + return buf.ParsedQuery() +} + +func (tpb *tablePlanBuilder) generateUpdateStatement() *sqlparser.ParsedQuery { + if tpb.onInsert == insertIgnore { + return tpb.generateInsertStatement() + } + bvf := &bindvarFormatter{} + buf := sqlparser.NewTrackedBuffer(bvf.formatter) + buf.Myprintf("update %v set ", tpb.name) + separator := "" + for _, cexpr := range tpb.colExprs { + if cexpr.isGrouped || cexpr.isPK { + continue + } + buf.Myprintf("%s%s=", separator, cexpr.colName.String()) + separator = ", " + switch cexpr.operation { + case opExpr: + bvf.mode = bvAfter + buf.Myprintf("%v", cexpr.expr) + case opCount: + buf.Myprintf("%s", cexpr.colName.String()) + case opSum: + buf.Myprintf("%s", cexpr.colName.String()) + bvf.mode = bvBefore + buf.Myprintf("-ifnull(%v, 0)", cexpr.expr) + bvf.mode = bvAfter + buf.Myprintf("+ifnull(%v, 0)", cexpr.expr) + } + } + tpb.generateWhere(buf, bvf) + return buf.ParsedQuery() +} + +func (tpb *tablePlanBuilder) generateDeleteStatement() *sqlparser.ParsedQuery { + bvf := &bindvarFormatter{} + buf := sqlparser.NewTrackedBuffer(bvf.formatter) + switch tpb.onInsert { + case insertNormal: + buf.Myprintf("delete from %v", tpb.name) + tpb.generateWhere(buf, bvf) + case insertOnDup: + bvf.mode = bvBefore + buf.Myprintf("update %v set ", tpb.name) + separator := "" + for _, cexpr := range tpb.colExprs { + if cexpr.isGrouped || cexpr.isPK { + continue } - if after { - bvf.mode = bvAfter - buf.Myprintf("+ifnull(%v, 0)", cexpr.expr) + buf.Myprintf("%s%s=", separator, cexpr.colName.String()) + separator = ", " + switch cexpr.operation { + case opExpr: + buf.WriteString("null") + case opCount: + buf.Myprintf("%s-1", cexpr.colName.String()) + case opSum: + buf.Myprintf("%s-ifnull(%v, 0)", cexpr.colName.String(), cexpr.expr) } } + tpb.generateWhere(buf, bvf) + case insertIgnore: + return nil } + return buf.ParsedQuery() } func (tpb *tablePlanBuilder) generateWhere(buf *sqlparser.TrackedBuffer, bvf *bindvarFormatter) { @@ -513,8 +580,34 @@ func (tpb *tablePlanBuilder) generateWhere(buf *sqlparser.TrackedBuffer, bvf *bi } separator = " and " } + if tpb.lastpk != nil { + buf.WriteString(" and ") + tpb.generatePKConstraint(buf, bvf) + } +} + +func (tpb *tablePlanBuilder) generatePKConstraint(buf *sqlparser.TrackedBuffer, bvf *bindvarFormatter) { + separator := "(" + for _, pkname := range tpb.lastpk.Fields { + buf.Myprintf("%s%v", separator, &sqlparser.ColName{Name: sqlparser.NewColIdent(pkname.Name)}) + separator = "," + } + separator = ") <= (" + for _, val := range tpb.lastpk.Rows[0] { + buf.WriteString(separator) + separator = "," + val.EncodeSQL(buf) + } + buf.WriteString(")") } +// bindvarFormatter is a dual mode formatter. Its behavior +// can be changed dynamically changed to generate bind vars +// for the 'before' row or 'after' row by setting its mode +// to 'bvBefore' or 'bvAfter'. For example, inserts will always +// use bvAfter, whereas deletes will always use bvBefore. +// For updates, values being set will use bvAfter, whereas +// the where clause will use bvBefore. type bindvarFormatter struct { mode bindvarMode } @@ -522,8 +615,7 @@ type bindvarFormatter struct { type bindvarMode int const ( - bvNone = bindvarMode(iota) - bvBefore + bvBefore = bindvarMode(iota) bvAfter ) diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go new file mode 100644 index 00000000000..82646741fd3 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go @@ -0,0 +1,303 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +type vcopier struct { + vr *vreplicator + tablePlan *TablePlan +} + +func newVCopier(vr *vreplicator) *vcopier { + return &vcopier{ + vr: vr, + } +} + +func (vc *vcopier) initTablesForCopy(ctx context.Context) error { + defer vc.vr.dbClient.Rollback() + + plan, err := buildReplicatorPlan(vc.vr.source.Filter, vc.vr.tableKeys, nil) + if err != nil { + return err + } + if err := vc.vr.dbClient.Begin(); err != nil { + return err + } + // Insert the table list only if at least one table matches. + if len(plan.TargetTables) != 0 { + var buf strings.Builder + buf.WriteString("insert into _vt.copy_state(vrepl_id, table_name) values ") + prefix := "" + for name := range plan.TargetTables { + fmt.Fprintf(&buf, "%s(%d, %s)", prefix, vc.vr.id, encodeString(name)) + prefix = ", " + } + if _, err := vc.vr.dbClient.ExecuteFetch(buf.String(), 1); err != nil { + return err + } + if err := vc.vr.setState(binlogplayer.VReplicationCopying, ""); err != nil { + return err + } + } else { + if err := vc.vr.setState(binlogplayer.BlpStopped, "There is nothing to replicate"); err != nil { + return err + } + } + return vc.vr.dbClient.Commit() +} + +func (vc *vcopier) copyNext(ctx context.Context, settings binlogplayer.VRSettings) error { + qr, err := vc.vr.dbClient.ExecuteFetch(fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id=%d", vc.vr.id), 10000) + if err != nil { + return err + } + var tableToCopy string + copyState := make(map[string]*sqltypes.Result) + for _, row := range qr.Rows { + tableName := row[0].ToString() + lastpk := row[1].ToString() + if tableToCopy == "" { + tableToCopy = tableName + } + copyState[tableName] = nil + if lastpk != "" { + var r querypb.QueryResult + if err := proto.UnmarshalText(lastpk, &r); err != nil { + return err + } + copyState[tableName] = sqltypes.Proto3ToResult(&r) + } + } + if len(copyState) == 0 { + return fmt.Errorf("unexpected: there are no tables to copy") + } + if err := vc.catchup(ctx, copyState); err != nil { + return err + } + return vc.copyTable(ctx, tableToCopy, copyState) +} + +func (vc *vcopier) catchup(ctx context.Context, copyState map[string]*sqltypes.Result) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + settings, err := binlogplayer.ReadVRSettings(vc.vr.dbClient, vc.vr.id) + if err != nil { + return err + } + // If there's no start position, it means we're copying the + // first table. So, there's nothing to catch up to. + if settings.StartPos.IsZero() { + return nil + } + + // Start vreplication. + errch := make(chan error, 1) + go func() { + errch <- newVPlayer(vc.vr, settings, copyState, mysql.Position{}).play(ctx) + }() + + // Wait for catchup. + tmr := time.NewTimer(1 * time.Second) + seconds := int64(replicaLagTolerance / time.Second) + defer tmr.Stop() + for { + sbm := vc.vr.stats.SecondsBehindMaster.Get() + if sbm < seconds { + cancel() + // Make sure vplayer returns before returning. + <-errch + return nil + } + select { + case err := <-errch: + if err != nil { + return err + } + return io.EOF + case <-ctx.Done(): + // Make sure vplayer returns before returning. + <-errch + return io.EOF + case <-tmr.C: + } + } +} + +func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState map[string]*sqltypes.Result) error { + defer vc.vr.dbClient.Rollback() + + log.Infof("Copying table %s, lastpk: %v", tableName, copyState[tableName]) + + plan, err := buildReplicatorPlan(vc.vr.source.Filter, vc.vr.tableKeys, nil) + if err != nil { + return err + } + + initialPlan, ok := plan.TargetTables[tableName] + if !ok { + return fmt.Errorf("plan not found for table: %s, curret plans are: %#v", tableName, plan.TargetTables) + } + + vsClient, err := tabletconn.GetDialer()(vc.vr.sourceTablet, grpcclient.FailFast(false)) + if err != nil { + return fmt.Errorf("error dialing tablet: %v", err) + } + defer vsClient.Close(ctx) + + ctx, cancel := context.WithTimeout(ctx, copyTimeout) + defer cancel() + + target := &querypb.Target{ + Keyspace: vc.vr.sourceTablet.Keyspace, + Shard: vc.vr.sourceTablet.Shard, + TabletType: vc.vr.sourceTablet.Type, + } + + var lastpkpb *querypb.QueryResult + if lastpkqr := copyState[tableName]; lastpkqr != nil { + lastpkpb = sqltypes.ResultToProto3(lastpkqr) + } + + var pkfields []*querypb.Field + var updateCopyState *sqlparser.ParsedQuery + err = vsClient.VStreamRows(ctx, target, initialPlan.SendRule.Filter, lastpkpb, func(rows *binlogdatapb.VStreamRowsResponse) error { + select { + case <-ctx.Done(): + return io.EOF + default: + } + if vc.tablePlan == nil { + if len(rows.Fields) == 0 { + return fmt.Errorf("expecting field event first, got: %v", rows) + } + if err := vc.fastForward(ctx, copyState, rows.Gtid); err != nil { + return err + } + fieldEvent := &binlogdatapb.FieldEvent{ + TableName: initialPlan.SendRule.Match, + Fields: rows.Fields, + } + vc.tablePlan, err = plan.buildExecutionPlan(fieldEvent) + if err != nil { + return err + } + pkfields = rows.Pkfields + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("update _vt.copy_state set lastpk=%a where vrepl_id=%s and table_name=%s", ":lastpk", strconv.Itoa(int(vc.vr.id)), encodeString(tableName)) + updateCopyState = buf.ParsedQuery() + } + if len(rows.Rows) == 0 { + return nil + } + // The number of rows we receive depends on the packet size set + // for the row streamer. Since the packet size is roughly equivalent + // to data size, this should map to a uniform amount of pages affected + // per statement. A packet size of 30K will roughly translate to 8 + // mysql pages of 4K each. + query, err := vc.tablePlan.generateBulkInsert(rows) + if err != nil { + return err + } + var buf bytes.Buffer + err = proto.CompactText(&buf, &querypb.QueryResult{ + Fields: pkfields, + Rows: []*querypb.Row{rows.Lastpk}, + }) + if err != nil { + return err + } + bv := map[string]*querypb.BindVariable{ + "lastpk": { + Type: sqltypes.VarBinary, + Value: buf.Bytes(), + }, + } + updateState, err := updateCopyState.GenerateQuery(bv, nil) + if err != nil { + return err + } + if err := vc.vr.dbClient.Begin(); err != nil { + return err + } + if _, err := vc.vr.dbClient.ExecuteFetch(query, 0); err != nil { + return err + } + if _, err := vc.vr.dbClient.ExecuteFetch(updateState, 0); err != nil { + return err + } + if err := vc.vr.dbClient.Commit(); err != nil { + return err + } + return nil + }) + // If there was a timeout, return without an error. + select { + case <-ctx.Done(): + return nil + default: + } + if err != nil { + return err + } + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("delete from _vt.copy_state where vrepl_id=%s and table_name=%s", strconv.Itoa(int(vc.vr.id)), encodeString(tableName)) + if _, err := vc.vr.dbClient.ExecuteFetch(buf.String(), 0); err != nil { + return err + } + return nil +} + +func (vc *vcopier) fastForward(ctx context.Context, copyState map[string]*sqltypes.Result, gtid string) error { + pos, err := mysql.DecodePosition(gtid) + if err != nil { + return err + } + settings, err := binlogplayer.ReadVRSettings(vc.vr.dbClient, vc.vr.id) + if err != nil { + return err + } + if settings.StartPos.IsZero() { + update := binlogplayer.GenerateUpdatePos(vc.vr.id, pos, time.Now().Unix(), 0) + _, err := vc.vr.dbClient.ExecuteFetch(update, 0) + return err + } + return newVPlayer(vc.vr, settings, copyState, pos).play(ctx) +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go new file mode 100644 index 00000000000..ff81c6eaf45 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go @@ -0,0 +1,405 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "fmt" + "strings" + "testing" + "time" + + "golang.org/x/net/context" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +func TestPlayerCopyTables(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + execStatements(t, []string{ + "create table src1(id int, val varbinary(128), primary key(id))", + "insert into src1 values(2, 'bbb'), (1, 'aaa')", + fmt.Sprintf("create table %s.dst1(id int, val varbinary(128), primary key(id))", vrepldb), + "create table yes(id int, val varbinary(128), primary key(id))", + fmt.Sprintf("create table %s.yes(id int, val varbinary(128), primary key(id))", vrepldb), + "create table no(id int, val varbinary(128), primary key(id))", + }) + defer execStatements(t, []string{ + "drop table src1", + fmt.Sprintf("drop table %s.dst1", vrepldb), + "drop table yes", + fmt.Sprintf("drop table %s.yes", vrepldb), + "drop table no", + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "dst1", + Filter: "select * from src1", + }, { + Match: "/yes", + }}, + } + + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + OnDdl: binlogdatapb.OnDDLAction_IGNORE, + } + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName) + qr, err := playerEngine.Exec(query) + if err != nil { + t.Fatal(err) + } + defer func() { + query := fmt.Sprintf("delete from _vt.vreplication where id = %d", qr.InsertID) + if _, err := playerEngine.Exec(query); err != nil { + t.Fatal(err) + } + expectDBClientQueries(t, []string{ + "/delete", + }) + }() + + expectDBClientQueries(t, []string{ + "/insert into _vt.vreplication", + // Create the list of tables to copy and transition to Copying state. + "begin", + "/insert into _vt.copy_state", + "/update _vt.vreplication set state='Copying'", + "commit", + "rollback", + // The first fast-forward has no starting point. So, it just saves the current position. + "/update _vt.vreplication set pos=", + "begin", + "insert into dst1(id,val) values (1,'aaa'), (2,'bbb')", + `/update _vt.copy_state set lastpk='fields: rows: ' where vrepl_id=.*`, + "commit", + // copy of dst1 is done: delete from copy_state. + "/delete from _vt.copy_state.*dst1", + "rollback", + // The next FF executes and updates the position before copying. + "begin", + "/update _vt.vreplication set pos=", + "commit", + // Nothing to copy from yes. Delete from copy_state. + "/delete from _vt.copy_state.*yes", + "rollback", + // All tables copied. Final catch up followed by Running state. + "/update _vt.vreplication set state='Running'", + }) + expectData(t, "dst1", [][]string{ + {"1", "aaa"}, + {"2", "bbb"}, + }) + expectData(t, "yes", [][]string{}) +} + +// TestPlayerCopyTableContinuation tests the copy workflow where tables have been partially copied. +func TestPlayerCopyTableContinuation(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + execStatements(t, []string{ + // src1 is initialized as partially copied. + // lastpk will be initialized at (6,6) later below. + // dst1 only copies id1 and val. This will allow us to test for correctness if id2 changes in the source. + "create table src1(id1 int, id2 int, val varbinary(128), primary key(id1, id2))", + "insert into src1 values(2,2,'no change'), (3,3,'update'), (4,4,'delete'), (5,5,'move within'), (6,6,'move out'), (8,8,'no change'), (9,9,'delete'), (10,10,'update'), (11,11,'move in')", + fmt.Sprintf("create table %s.dst1(id int, val varbinary(128), primary key(id))", vrepldb), + fmt.Sprintf("insert into %s.dst1 values(2,'no change'), (3,'update'), (4,'delete'), (5,'move within'), (6,'move out')", vrepldb), + // copied is initialized as fully copied + "create table copied(id int, val varbinary(128), primary key(id))", + "insert into copied values(1,'aaa')", + fmt.Sprintf("create table %s.copied(id int, val varbinary(128), primary key(id))", vrepldb), + fmt.Sprintf("insert into %s.copied values(1,'aaa')", vrepldb), + // not_copied yet to be copied. + "create table not_copied(id int, val varbinary(128), primary key(id))", + "insert into not_copied values(1,'aaa')", + fmt.Sprintf("create table %s.not_copied(id int, val varbinary(128), primary key(id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table src1", + fmt.Sprintf("drop table %s.dst1", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "dst1", + Filter: "select id1 as id, val from src1", + }, { + Match: "copied", + Filter: "select * from copied", + }, { + Match: "not_copied", + Filter: "select * from not_copied", + }}, + } + pos := masterPosition(t) + execStatements(t, []string{ + // insert inside and outside current range. + "insert into src1 values(1,1,'insert in'), (7,7,'insert out')", + // update inside and outside current range. + "update src1 set val='updated' where id1 in (3,10)", + // delete inside and outside current range. + "delete from src1 where id1 in (4,9)", + // move row within range by changing id2. + "update src1 set id2=10 where id1=5", + // move row from within to outside range. + "update src1 set id1=12 where id1=6", + // move row from outside to witihn range. + "update src1 set id1=4 where id1=11", + // modify the copied table. + "update copied set val='bbb' where id=1", + // modify the uncopied table. + "update not_copied set val='bbb' where id=1", + }) + + // Set a hook to execute statements just before the copy begins from src1. + streamRowsHook = func(context.Context) { + execStatements(t, []string{ + "update src1 set val='updated again' where id1 = 3", + }) + // Set it back to nil. Otherwise, this will get executed again when copying not_copied. + streamRowsHook = nil + } + defer func() { streamRowsHook = nil }() + + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + OnDdl: binlogdatapb.OnDDLAction_IGNORE, + } + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.BlpStopped, playerEngine.dbName) + qr, err := playerEngine.Exec(query) + if err != nil { + t.Fatal(err) + } + // As mentioned above. lastpk cut-off is set at (6,6) + lastpk := sqltypes.ResultToProto3(sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id1|id2", + "int32|int32", + ), + "6|6", + )) + lastpk.RowsAffected = 0 + execStatements(t, []string{ + fmt.Sprintf("insert into _vt.copy_state values(%d, '%s', %s)", qr.InsertID, "dst1", encodeString(fmt.Sprintf("%v", lastpk))), + fmt.Sprintf("insert into _vt.copy_state values(%d, '%s', null)", qr.InsertID, "not_copied"), + }) + id := qr.InsertID + _, err = playerEngine.Exec(fmt.Sprintf("update _vt.vreplication set state='Copying', pos=%s where id=%d", encodeString(pos), id)) + if err != nil { + t.Fatal(err) + } + defer func() { + query := fmt.Sprintf("delete from _vt.vreplication where id = %d", id) + if _, err := playerEngine.Exec(query); err != nil { + t.Fatal(err) + } + for q := range globalDBQueries { + if strings.HasPrefix(q, "delete from _vt.vreplication") { + break + } + } + }() + + for q := range globalDBQueries { + if strings.HasPrefix(q, "update") { + break + } + } + + expectNontxQueries(t, []string{ + // Catchup + "insert into dst1(id,val) select 1, 'insert in' where (1,1) <= (6,6)", + "insert into dst1(id,val) select 7, 'insert out' where (7,7) <= (6,6)", + "update dst1 set val='updated' where id=3 and (3,3) <= (6,6)", + "update dst1 set val='updated' where id=10 and (10,10) <= (6,6)", + "delete from dst1 where id=4 and (4,4) <= (6,6)", + "delete from dst1 where id=9 and (9,9) <= (6,6)", + "delete from dst1 where id=5 and (5,5) <= (6,6)", + "insert into dst1(id,val) select 5, 'move within' where (5,10) <= (6,6)", + "delete from dst1 where id=6 and (6,6) <= (6,6)", + "insert into dst1(id,val) select 12, 'move out' where (12,6) <= (6,6)", + "delete from dst1 where id=11 and (11,11) <= (6,6)", + "insert into dst1(id,val) select 4, 'move in' where (4,11) <= (6,6)", + "update copied set val='bbb' where id=1", + // Fast-forward + "update dst1 set val='updated again' where id=3 and (3,3) <= (6,6)", + // Copy + "insert into dst1(id,val) values (7,'insert out'), (8,'no change'), (10,'updated'), (12,'move out')", + `/update _vt.copy_state set lastpk='fields: fields: rows: ' where vrepl_id=.*`, + "/delete from _vt.copy_state.*dst1", + "rollback", + // Copy again. There should be no events for catchup. + "insert into not_copied(id,val) values (1,'bbb')", + `/update _vt.copy_state set lastpk='fields: rows: ' where vrepl_id=.*`, + "/delete from _vt.copy_state.*not_copied", + "rollback", + }) + expectData(t, "dst1", [][]string{ + {"1", "insert in"}, + {"2", "no change"}, + {"3", "updated again"}, + {"4", "move in"}, + {"5", "move within"}, + {"7", "insert out"}, + {"8", "no change"}, + {"10", "updated"}, + {"12", "move out"}, + }) + expectData(t, "copied", [][]string{ + {"1", "bbb"}, + }) + expectData(t, "not_copied", [][]string{ + {"1", "bbb"}, + }) +} + +func TestPlayerCopyTablesNone(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "dst1", + Filter: "select * from src1", + }}, + } + + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + OnDdl: binlogdatapb.OnDDLAction_IGNORE, + } + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName) + qr, err := playerEngine.Exec(query) + if err != nil { + t.Fatal(err) + } + defer func() { + query := fmt.Sprintf("delete from _vt.vreplication where id = %d", qr.InsertID) + if _, err := playerEngine.Exec(query); err != nil { + t.Fatal(err) + } + expectDBClientQueries(t, []string{ + "/delete", + }) + }() + + expectDBClientQueries(t, []string{ + "/insert into _vt.vreplication", + "begin", + "/update _vt.vreplication set state='Stopped'", + "commit", + "rollback", + }) +} + +func TestPlayerCopyTableCancel(t *testing.T) { + defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) + + execStatements(t, []string{ + "create table src1(id int, val varbinary(128), primary key(id))", + "insert into src1 values(2, 'bbb'), (1, 'aaa')", + fmt.Sprintf("create table %s.dst1(id int, val varbinary(128), primary key(id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table src1", + fmt.Sprintf("drop table %s.dst1", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + saveTimeout := copyTimeout + copyTimeout = 1 * time.Millisecond + defer func() { copyTimeout = saveTimeout }() + + // Set a hook to reset the copy timeout after first call. + streamRowsHook = func(ctx context.Context) { + <-ctx.Done() + copyTimeout = saveTimeout + streamRowsHook = nil + } + defer func() { streamRowsHook = nil }() + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "dst1", + Filter: "select * from src1", + }}, + } + + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + OnDdl: binlogdatapb.OnDDLAction_IGNORE, + } + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName) + qr, err := playerEngine.Exec(query) + if err != nil { + t.Fatal(err) + } + defer func() { + query := fmt.Sprintf("delete from _vt.vreplication where id = %d", qr.InsertID) + if _, err := playerEngine.Exec(query); err != nil { + t.Fatal(err) + } + expectDBClientQueries(t, []string{ + "/delete", + }) + }() + + // Make sure rows get copied in spite of the early context cancel. + expectDBClientQueries(t, []string{ + "/insert into _vt.vreplication", + // Create the list of tables to copy and transition to Copying state. + "begin", + "/insert into _vt.copy_state", + "/update _vt.vreplication set state='Copying'", + "commit", + "rollback", + // The first copy will do nothing because we set the timeout to be too low. + // We should expect it to do an empty rollback. + "rollback", + // The next copy should proceed as planned because we've made the timeout high again. + // The first fast-forward has no starting point. So, it just saves the current position. + "/update _vt.vreplication set pos=", + "begin", + "insert into dst1(id,val) values (1,'aaa'), (2,'bbb')", + `/update _vt.copy_state set lastpk='fields: rows: ' where vrepl_id=.*`, + "commit", + // copy of dst1 is done: delete from copy_state. + "/delete from _vt.copy_state.*dst1", + "rollback", + // All tables copied. Final catch up followed by Running state. + "/update _vt.vreplication set state='Running'", + }) + expectData(t, "dst1", [][]string{ + {"1", "aaa"}, + {"2", "bbb"}, + }) +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go index 35062f79813..bb4f52a56e4 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go @@ -20,162 +20,80 @@ import ( "errors" "fmt" "io" - "strings" "time" "golang.org/x/net/context" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/vttablet/tabletconn" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -var ( - // idleTimeout is set to slightly above 1s, compared to heartbeatTime - // set by VStreamer at slightly below 1s. This minimizes conflicts - // between the two timeouts. - idleTimeout = 1100 * time.Millisecond - dbLockRetryDelay = 1 * time.Second - relayLogMaxSize = 10000 - relayLogMaxItems = 1000 - - // CreateCopyState is the list of statements to execute for creating - // the _vt.copy_state table - CreateCopyState = []string{ - `create table if not exists _vt.copy_state ( - vrepl_id int, - table_name varbinary(128), - last_pk varbinary(2000), - primary key (vrepl_id, table_name))`} ) type vplayer struct { - id uint32 - source *binlogdatapb.BinlogSource - sourceTablet *topodatapb.Tablet - stats *binlogplayer.Stats - dbClient *vdbClient - // mysqld is used to fetch the local schema. - mysqld mysqlctl.MysqlDaemon + vr *vreplicator + startPos mysql.Position + stopPos mysql.Position + saveStop bool + copyState map[string]*sqltypes.Result + + replicatorPlan *ReplicatorPlan + tablePlans map[string]*TablePlan pos mysql.Position - // unsavedGTID when we receive a GTID event and reset - // if it gets saved. If Fetch returns on idleTimeout, - // we save the last unsavedGTID. - unsavedGTID *binlogdatapb.VEvent + // unsavedEvent is saved any time we skip an event without + // saving: This can be an empty commit or a skipped DDL. + unsavedEvent *binlogdatapb.VEvent // timeLastSaved is set every time a GTID is saved. timeLastSaved time.Time // lastTimestampNs is the last timestamp seen so far. lastTimestampNs int64 // timeOffsetNs keeps track of the clock difference with respect to source tablet. timeOffsetNs int64 - stopPos mysql.Position - - tableKeys map[string][]string - - // pplan is built based on the source Filter at the beginning. - pplan *PlayerPlan - // tplans[table] is built for each table based on pplan and schema info - // about the table. - tplans map[string]*TablePlan } -func newVPlayer(id uint32, source *binlogdatapb.BinlogSource, sourceTablet *topodatapb.Tablet, stats *binlogplayer.Stats, dbClient binlogplayer.DBClient, mysqld mysqlctl.MysqlDaemon) *vplayer { +func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map[string]*sqltypes.Result, pausePos mysql.Position) *vplayer { + saveStop := true + if !pausePos.IsZero() { + settings.StopPos = pausePos + saveStop = false + } return &vplayer{ - id: id, - source: source, - sourceTablet: sourceTablet, - stats: stats, - dbClient: newVDBClient(dbClient, stats), - mysqld: mysqld, + vr: vr, + startPos: settings.StartPos, + pos: settings.StartPos, + stopPos: settings.StopPos, + saveStop: saveStop, + copyState: copyState, timeLastSaved: time.Now(), - tplans: make(map[string]*TablePlan), + tablePlans: make(map[string]*TablePlan), } } -func (vp *vplayer) Play(ctx context.Context) error { - tableKeys, err := vp.buildTableKeys() - if err != nil { - return err +// play is not resumable. If pausePos is set, play returns without updating the vreplication state. +func (vp *vplayer) play(ctx context.Context) error { + if !vp.stopPos.IsZero() && vp.startPos.AtLeast(vp.stopPos) { + return vp.vr.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stop position %v already reached: %v", vp.startPos, vp.stopPos)) } - vp.tableKeys = tableKeys - plan, err := buildPlayerPlan(vp.source.Filter, tableKeys) - if err != nil { - return err - } - vp.pplan = plan - settings, err := binlogplayer.ReadVRSettings(vp.dbClient, vp.id) + plan, err := buildReplicatorPlan(vp.vr.source.Filter, vp.vr.tableKeys, vp.copyState) if err != nil { - return fmt.Errorf("error reading VReplication settings: %v", err) - } - if settings.State == binlogplayer.VReplicationInit { - if err := vp.initTablesForCopy(ctx); err != nil { - return err - } - return nil - } - return vp.replicate(ctx, settings) -} - -func (vp *vplayer) initTablesForCopy(ctx context.Context) error { - defer vp.dbClient.Rollback() - - // Check if table exists. - if _, err := vp.dbClient.ExecuteFetch("select * from _vt.copy_state limit 1", 10); err != nil { - // If it's a not found error, create it. - merr, isSQLErr := err.(*mysql.SQLError) - if !isSQLErr || !(merr.Num == mysql.ERNoSuchTable || merr.Num == mysql.ERBadDb) { - return err - } - for _, query := range CreateCopyState { - if _, merr := vp.dbClient.ExecuteFetch(query, 0); merr != nil { - log.Errorf("Failed to ensure _vt.copy_state table exists: %v", merr) - return err - } - } - } - if err := vp.dbClient.Begin(); err != nil { - return err - } - // Insert the table list only if at least one table matches. - if len(vp.pplan.TargetTables) != 0 { - var buf strings.Builder - buf.WriteString("insert into _vt.copy_state(vrepl_id, table_name) values ") - prefix := "" - for name := range vp.pplan.TargetTables { - fmt.Fprintf(&buf, "%s(%d, %s)", prefix, vp.id, encodeString(name)) - prefix = ", " - } - if _, err := vp.dbClient.ExecuteFetch(buf.String(), 1); err != nil { - return err - } - } - if err := vp.setState(binlogplayer.VReplicationCopying, ""); err != nil { return err } - return vp.dbClient.Commit() -} + vp.replicatorPlan = plan -func (vp *vplayer) replicate(ctx context.Context, settings binlogplayer.VRSettings) error { - if err := vp.setState(binlogplayer.BlpRunning, ""); err != nil { - return err - } - if err := vp.play(ctx, settings); err != nil { + if err := vp.fetchAndApply(ctx); err != nil { msg := err.Error() - vp.stats.History.Add(&binlogplayer.StatsHistoryRecord{ + vp.vr.stats.History.Add(&binlogplayer.StatsHistoryRecord{ Time: time.Now(), Message: msg, }) - if err := vp.setState(binlogplayer.BlpError, msg); err != nil { + if err := vp.vr.setMessage(msg); err != nil { log.Errorf("Failed to set error state: %v", err) } return err @@ -183,43 +101,28 @@ func (vp *vplayer) replicate(ctx context.Context, settings binlogplayer.VRSettin return nil } -func (vp *vplayer) play(ctx context.Context, settings binlogplayer.VRSettings) error { - var err error - vp.pos, err = mysql.DecodePosition(settings.StartPos) - if err != nil { - return vp.setState(binlogplayer.BlpStopped, fmt.Sprintf("error decoding start position %v: %v", settings.StartPos, err)) - } - if settings.StopPos != "" { - vp.stopPos, err = mysql.DecodePosition(settings.StopPos) - if err != nil { - return vp.setState(binlogplayer.BlpStopped, fmt.Sprintf("error decoding stop position %v: %v", settings.StopPos, err)) - } - } - if !vp.stopPos.IsZero() { - if vp.pos.AtLeast(vp.stopPos) { - return vp.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stop position %v already reached: %v", vp.pos, vp.stopPos)) - } - } - log.Infof("Starting VReplication player id: %v, startPos: %v, stop: %v, source: %v, filter: %v", vp.id, settings.StartPos, vp.stopPos, vp.sourceTablet, vp.source) +func (vp *vplayer) fetchAndApply(ctx context.Context) error { + log.Infof("Starting VReplication player id: %v, startPos: %v, stop: %v, source: %v, filter: %v", vp.vr.id, vp.startPos, vp.stopPos, vp.vr.sourceTablet, vp.vr.source) - vsClient, err := tabletconn.GetDialer()(vp.sourceTablet, grpcclient.FailFast(false)) + vsClient, err := tabletconn.GetDialer()(vp.vr.sourceTablet, grpcclient.FailFast(false)) if err != nil { return fmt.Errorf("error dialing tablet: %v", err) } + defer vsClient.Close(ctx) ctx, cancel := context.WithCancel(ctx) defer cancel() relay := newRelayLog(ctx, relayLogMaxItems, relayLogMaxSize) target := &querypb.Target{ - Keyspace: vp.sourceTablet.Keyspace, - Shard: vp.sourceTablet.Shard, - TabletType: vp.sourceTablet.Type, + Keyspace: vp.vr.sourceTablet.Keyspace, + Shard: vp.vr.sourceTablet.Shard, + TabletType: vp.vr.sourceTablet.Type, } - log.Infof("Sending vstream command: %v", vp.pplan.VStreamFilter) + log.Infof("Sending vstream command: %v", vp.replicatorPlan.VStreamFilter) streamErr := make(chan error, 1) go func() { - streamErr <- vsClient.VStream(ctx, target, settings.StartPos, vp.pplan.VStreamFilter, func(events []*binlogdatapb.VEvent) error { + streamErr <- vsClient.VStream(ctx, target, mysql.EncodePosition(vp.startPos), vp.replicatorPlan.VStreamFilter, func(events []*binlogdatapb.VEvent) error { return relay.Send(events) }) }() @@ -264,20 +167,67 @@ func (vp *vplayer) play(ctx context.Context, settings binlogplayer.VRSettings) e } } -func (vp *vplayer) buildTableKeys() (map[string][]string, error) { - schema, err := vp.mysqld.GetSchema(vp.dbClient.DBName(), []string{"/.*/"}, nil, false) - if err != nil { - return nil, err +func (vp *vplayer) applyRowEvent(ctx context.Context, rowEvent *binlogdatapb.RowEvent) error { + tplan := vp.tablePlans[rowEvent.TableName] + if tplan == nil { + return fmt.Errorf("unexpected event on table %s", rowEvent.TableName) + } + for _, change := range rowEvent.RowChanges { + queries, err := tplan.generateStatements(change) + if err != nil { + return err + } + for _, query := range queries { + if err := vp.exec(ctx, query); err != nil { + return err + } + } } - tableKeys := make(map[string][]string) - for _, td := range schema.TableDefinitions { - if len(td.PrimaryKeyColumns) != 0 { - tableKeys[td.Name] = td.PrimaryKeyColumns - } else { - tableKeys[td.Name] = td.Columns + return nil +} + +func (vp *vplayer) updatePos(ts int64) (posReached bool, err error) { + update := binlogplayer.GenerateUpdatePos(vp.vr.id, vp.pos, time.Now().Unix(), ts) + if _, err := vp.vr.dbClient.ExecuteFetch(update, 0); err != nil { + vp.vr.dbClient.Rollback() + return false, fmt.Errorf("error %v updating position", err) + } + vp.unsavedEvent = nil + vp.timeLastSaved = time.Now() + vp.vr.stats.SetLastPosition(vp.pos) + posReached = !vp.stopPos.IsZero() && vp.pos.Equal(vp.stopPos) + if posReached { + if vp.saveStop { + if err := vp.vr.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stopped at position %v", vp.stopPos)); err != nil { + return false, err + } + } + } + return posReached, nil +} + +func (vp *vplayer) exec(ctx context.Context, sql string) error { + vp.vr.stats.Timings.Record("query", time.Now()) + _, err := vp.vr.dbClient.ExecuteFetch(sql, 0) + for err != nil { + if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERLockDeadlock || sqlErr.Number() == mysql.ERLockWaitTimeout { + log.Infof("retryable error: %v, waiting for %v and retrying", sqlErr, dbLockRetryDelay) + if err := vp.vr.dbClient.Rollback(); err != nil { + return err + } + time.Sleep(dbLockRetryDelay) + // Check context here. Otherwise this can become an infinite loop. + select { + case <-ctx.Done(): + return io.EOF + default: + } + err = vp.vr.dbClient.Retry() + continue } + return err } - return tableKeys, nil + return nil } func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { @@ -290,7 +240,7 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { // So, we should assume we're falling behind. if len(items) == 0 { behind := time.Now().UnixNano() - vp.lastTimestampNs - vp.timeOffsetNs - vp.stats.SecondsBehindMaster.Set(behind / 1e9) + vp.vr.stats.SecondsBehindMaster.Set(behind / 1e9) } // Filtered replication often ends up receiving a large number of empty transactions. // This is required because the player needs to know the latest position of the source. @@ -305,13 +255,14 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { // 1. Fetch was idle for idleTimeout. // 2. We've been receiving empty events for longer than idleTimeout. // In both cases, now > timeLastSaved. If so, any unsaved GTID should be saved. - if time.Since(vp.timeLastSaved) >= idleTimeout && vp.unsavedGTID != nil { - // Although unlikely, we should not save if a transaction is still open. - // This can happen if a large transaction is split as multiple events. - if !vp.dbClient.InTransaction { - if err := vp.updatePos(vp.unsavedGTID.Timestamp); err != nil { - return err - } + if time.Since(vp.timeLastSaved) >= idleTimeout && vp.unsavedEvent != nil { + posReached, err := vp.updatePos(vp.unsavedEvent.Timestamp) + if err != nil { + return err + } + if posReached { + // Unreachable. + return nil } } for i, events := range items { @@ -319,7 +270,7 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { if event.Timestamp != 0 { vp.lastTimestampNs = event.Timestamp * 1e9 vp.timeOffsetNs = time.Now().UnixNano() - event.CurrentTime - vp.stats.SecondsBehindMaster.Set(event.CurrentTime/1e9 - event.Timestamp) + vp.vr.stats.SecondsBehindMaster.Set(event.CurrentTime/1e9 - event.Timestamp) } mustSave := false switch event.Type { @@ -366,13 +317,14 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m return err } vp.pos = pos - vp.unsavedGTID = event + // A new position should not be saved until a commit or DDL. + vp.unsavedEvent = nil if vp.stopPos.IsZero() { return nil } if !vp.pos.Equal(vp.stopPos) && vp.pos.AtLeast(vp.stopPos) { // Code is unreachable, but bad data can cause this to happen. - if err := vp.setState(binlogplayer.BlpStopped, fmt.Sprintf("next event position %v exceeds stop pos %v, exiting without applying", vp.pos, vp.stopPos)); err != nil { + if err := vp.vr.setState(binlogplayer.BlpStopped, fmt.Sprintf("next event position %v exceeds stop pos %v, exiting without applying", vp.pos, vp.stopPos)); err != nil { return err } return io.EOF @@ -381,61 +333,67 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m // No-op: begin is called as needed. case binlogdatapb.VEventType_COMMIT: if mustSave { - if err := vp.dbClient.Begin(); err != nil { + if err := vp.vr.dbClient.Begin(); err != nil { return err } } - if !vp.dbClient.InTransaction { + if !vp.vr.dbClient.InTransaction { + // We're skipping an empty transaction. We may have to save the position on inactivity. + vp.unsavedEvent = event return nil } - if err := vp.updatePos(event.Timestamp); err != nil { + posReached, err := vp.updatePos(event.Timestamp) + if err != nil { return err } - posReached := !vp.stopPos.IsZero() && vp.pos.Equal(vp.stopPos) - if posReached { - if err := vp.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stopped at position %v", vp.stopPos)); err != nil { - return err - } - } - if err := vp.dbClient.Commit(); err != nil { + if err := vp.vr.dbClient.Commit(); err != nil { return err } if posReached { return io.EOF } case binlogdatapb.VEventType_FIELD: - if err := vp.dbClient.Begin(); err != nil { + if err := vp.vr.dbClient.Begin(); err != nil { return err } - if err := vp.updatePlan(event.FieldEvent); err != nil { + tplan, err := vp.replicatorPlan.buildExecutionPlan(event.FieldEvent) + if err != nil { return err } + vp.tablePlans[event.FieldEvent.TableName] = tplan case binlogdatapb.VEventType_ROW: - if err := vp.dbClient.Begin(); err != nil { + if err := vp.vr.dbClient.Begin(); err != nil { return err } if err := vp.applyRowEvent(ctx, event.RowEvent); err != nil { return err } case binlogdatapb.VEventType_DDL: - if vp.dbClient.InTransaction { + if vp.vr.dbClient.InTransaction { return fmt.Errorf("unexpected state: DDL encountered in the middle of a transaction: %v", event.Ddl) } - switch vp.source.OnDdl { + switch vp.vr.source.OnDdl { case binlogdatapb.OnDDLAction_IGNORE: - // no-op + // We still have to update the position. + posReached, err := vp.updatePos(event.Timestamp) + if err != nil { + return err + } + if posReached { + return io.EOF + } case binlogdatapb.OnDDLAction_STOP: - if err := vp.dbClient.Begin(); err != nil { + if err := vp.vr.dbClient.Begin(); err != nil { return err } - if err := vp.updatePos(event.Timestamp); err != nil { + if _, err := vp.updatePos(event.Timestamp); err != nil { return err } - if err := vp.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stopped at DDL %s", event.Ddl)); err != nil { + if err := vp.vr.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stopped at DDL %s", event.Ddl)); err != nil { return err } - if err := vp.dbClient.Commit(); err != nil { + if err := vp.vr.dbClient.Commit(); err != nil { return err } return io.EOF @@ -443,105 +401,27 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m if err := vp.exec(ctx, event.Ddl); err != nil { return err } - if err := vp.updatePos(event.Timestamp); err != nil { + posReached, err := vp.updatePos(event.Timestamp) + if err != nil { return err } + if posReached { + return io.EOF + } case binlogdatapb.OnDDLAction_EXEC_IGNORE: if err := vp.exec(ctx, event.Ddl); err != nil { log.Infof("Ignoring error: %v for DDL: %s", err, event.Ddl) } - if err := vp.updatePos(event.Timestamp); err != nil { + posReached, err := vp.updatePos(event.Timestamp) + if err != nil { return err } - } - case binlogdatapb.VEventType_HEARTBEAT: - // No-op: heartbeat timings are calculated in outer loop. - } - return nil -} - -func (vp *vplayer) setState(state, message string) error { - return binlogplayer.SetVReplicationState(vp.dbClient, vp.id, state, message) -} - -func (vp *vplayer) updatePlan(fieldEvent *binlogdatapb.FieldEvent) error { - prelim := vp.pplan.TablePlans[fieldEvent.TableName] - if prelim == nil { - prelim = &TablePlan{ - Name: fieldEvent.TableName, - } - } - if prelim.Insert != nil { - prelim.Fields = fieldEvent.Fields - vp.tplans[fieldEvent.TableName] = prelim - return nil - } - tplan, err := buildTablePlanFromFields(prelim.Name, fieldEvent.Fields, vp.tableKeys) - if err != nil { - return err - } - tplan.Fields = fieldEvent.Fields - vp.tplans[fieldEvent.TableName] = tplan - return nil -} - -func (vp *vplayer) applyRowEvent(ctx context.Context, rowEvent *binlogdatapb.RowEvent) error { - tplan := vp.tplans[rowEvent.TableName] - if tplan == nil { - return fmt.Errorf("unexpected event on table %s", rowEvent.TableName) - } - for _, change := range rowEvent.RowChanges { - queries, err := tplan.generateStatements(change) - if err != nil { - return err - } - for _, query := range queries { - if err := vp.exec(ctx, query); err != nil { - return err - } - } - } - return nil -} - -func (vp *vplayer) updatePos(ts int64) error { - updatePos := binlogplayer.GenerateUpdatePos(vp.id, vp.pos, time.Now().Unix(), ts) - if _, err := vp.dbClient.ExecuteFetch(updatePos, 0); err != nil { - vp.dbClient.Rollback() - return fmt.Errorf("error %v updating position", err) - } - vp.unsavedGTID = nil - vp.timeLastSaved = time.Now() - vp.stats.SetLastPosition(vp.pos) - return nil -} - -func (vp *vplayer) exec(ctx context.Context, sql string) error { - vp.stats.Timings.Record("query", time.Now()) - _, err := vp.dbClient.ExecuteFetch(sql, 0) - for err != nil { - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERLockDeadlock || sqlErr.Number() == mysql.ERLockWaitTimeout { - log.Infof("retryable error: %v, waiting for %v and retrying", sqlErr, dbLockRetryDelay) - if err := vp.dbClient.Rollback(); err != nil { - return err - } - time.Sleep(dbLockRetryDelay) - // Check context here. Otherwise this can become an infinite loop. - select { - case <-ctx.Done(): + if posReached { return io.EOF - default: } - err = vp.dbClient.Retry() - continue } - return err + case binlogdatapb.VEventType_HEARTBEAT: + // No-op: heartbeat timings are calculated in outer loop. } return nil } - -func encodeString(in string) string { - var buf strings.Builder - sqltypes.NewVarChar(in).EncodeSQL(&buf) - return buf.String() -} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_copy_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_copy_test.go deleted file mode 100644 index 7b2e1b41aa4..00000000000 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer_copy_test.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vreplication - -import ( - "fmt" - "strings" - "testing" - - "golang.org/x/net/context" - - "vitess.io/vitess/go/vt/binlog/binlogplayer" - - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -func TestPlayerInitTables(t *testing.T) { - defer deleteTablet(addTablet(100, "0", topodatapb.TabletType_REPLICA, true, true)) - - execStatements(t, []string{ - "create table src1(id int, val varbinary(128), primary key(id))", - fmt.Sprintf("create table %s.dst1(id int, val varbinary(128), primary key(id))", vrepldb), - "create table yes(id int, val varbinary(128), primary key(id))", - fmt.Sprintf("create table %s.yes(id int, val varbinary(128), primary key(id))", vrepldb), - "create table no(id int, val varbinary(128), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table src1", - fmt.Sprintf("drop table %s.dst1", vrepldb), - "drop table yes", - fmt.Sprintf("drop table %s.yes", vrepldb), - "drop table no", - }) - env.SchemaEngine.Reload(context.Background()) - - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "dst1", - Filter: "select * from src1", - }, { - Match: "dst2", - Filter: "select id, val1, sum(val2) as sval2, count(*) as rcount from src2 group by id", - }, { - Match: "/yes", - }}, - } - - bls := &binlogdatapb.BinlogSource{ - Keyspace: env.KeyspaceName, - Shard: env.ShardName, - Filter: filter, - OnDdl: binlogdatapb.OnDDLAction_IGNORE, - } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName) - qr, err := playerEngine.Exec(query) - if err != nil { - t.Fatal(err) - } - defer func() { - query := fmt.Sprintf("delete from _vt.vreplication where id = %d", qr.InsertID) - if _, err := playerEngine.Exec(query); err != nil { - t.Fatal(err) - } - expectDBClientQueries(t, []string{ - "/delete", - }) - }() - - for q := range globalDBQueries { - if strings.HasPrefix(q, "create table if not exists _vt.copy_state") { - break - } - } - - expectDBClientQueries(t, []string{ - "begin", - "/insert into _vt.copy_state", - "/update _vt.vreplication set state='Copying'", - "commit", - "rollback", - }) -} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_test.go index d806fa2e4ef..e2e822462bb 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_test.go @@ -92,7 +92,7 @@ func TestPlayerFilters(t *testing.T) { input: "insert into src1 values(1, 'aaa')", output: []string{ "begin", - "insert into dst1 set id=1, val='aaa'", + "insert into dst1(id,val) values (1,'aaa')", "/update _vt.vreplication set pos=", "commit", }, @@ -129,7 +129,7 @@ func TestPlayerFilters(t *testing.T) { input: "insert into src2 values(1, 2, 3)", output: []string{ "begin", - "insert into dst2 set id=1, val1=2, sval2=ifnull(3, 0), rcount=1 on duplicate key update val1=2, sval2=sval2+ifnull(3, 0), rcount=rcount+1", + "insert into dst2(id,val1,sval2,rcount) values (1,2,ifnull(3, 0),1) on duplicate key update val1=values(val1), sval2=sval2+ifnull(values(sval2), 0), rcount=rcount+1", "/update _vt.vreplication set pos=", "commit", }, @@ -168,7 +168,7 @@ func TestPlayerFilters(t *testing.T) { input: "insert into src3 values(1, 'aaa')", output: []string{ "begin", - "insert ignore into dst3 set id=1, val='aaa'", + "insert ignore into dst3(id,val) values (1,'aaa')", "/update _vt.vreplication set pos=", "commit", }, @@ -181,7 +181,7 @@ func TestPlayerFilters(t *testing.T) { input: "update src3 set val='bbb'", output: []string{ "begin", - "insert ignore into dst3 set id=1, val='bbb'", + "insert ignore into dst3(id,val) values (1,'bbb')", "/update _vt.vreplication set pos=", "commit", }, @@ -206,7 +206,7 @@ func TestPlayerFilters(t *testing.T) { input: "insert into yes values(1, 'aaa')", output: []string{ "begin", - "insert into yes set id=1, val='aaa'", + "insert into yes(id,val) values (1,'aaa')", "/update _vt.vreplication set pos=", "commit", }, @@ -236,7 +236,7 @@ func TestPlayerFilters(t *testing.T) { input: "insert into nopk values(1, 'aaa')", output: []string{ "begin", - "insert into nopk set id=1, val='aaa'", + "insert into nopk(id,val) values (1,'aaa')", "/update _vt.vreplication set pos=", "commit", }, @@ -250,7 +250,7 @@ func TestPlayerFilters(t *testing.T) { output: []string{ "begin", "delete from nopk where id=1 and val='aaa'", - "insert into nopk set id=1, val='bbb'", + "insert into nopk(id,val) values (1,'bbb')", "/update _vt.vreplication set pos=", "commit", }, @@ -310,7 +310,7 @@ func TestPlayerUpdates(t *testing.T) { }{{ // Start with all nulls input: "insert into t1 values(1, null, null, null)", - output: "insert into t1 set id=1, grouped=null, ungrouped=null, summed=ifnull(null, 0), rcount=1 on duplicate key update ungrouped=null, summed=summed+ifnull(null, 0), rcount=rcount+1", + output: "insert into t1(id,grouped,ungrouped,summed,rcount) values (1,null,null,ifnull(null, 0),1) on duplicate key update ungrouped=values(ungrouped), summed=summed+ifnull(values(summed), 0), rcount=rcount+1", table: "t1", data: [][]string{ {"1", "", "", "0", "1"}, @@ -350,7 +350,7 @@ func TestPlayerUpdates(t *testing.T) { }, { // insert non-null values input: "insert into t1 values(2, 2, 3, 4)", - output: "insert into t1 set id=2, grouped=2, ungrouped=3, summed=ifnull(4, 0), rcount=1 on duplicate key update ungrouped=3, summed=summed+ifnull(4, 0), rcount=rcount+1", + output: "insert into t1(id,grouped,ungrouped,summed,rcount) values (2,2,3,ifnull(4, 0),1) on duplicate key update ungrouped=values(ungrouped), summed=summed+ifnull(values(summed), 0), rcount=rcount+1", table: "t1", data: [][]string{ {"1", "", "", "0", "1"}, @@ -416,9 +416,9 @@ func TestPlayerRowMove(t *testing.T) { }) expectDBClientQueries(t, []string{ "begin", - "insert into dst set val1=1, sval2=ifnull(1, 0), rcount=1 on duplicate key update sval2=sval2+ifnull(1, 0), rcount=rcount+1", - "insert into dst set val1=2, sval2=ifnull(2, 0), rcount=1 on duplicate key update sval2=sval2+ifnull(2, 0), rcount=rcount+1", - "insert into dst set val1=2, sval2=ifnull(3, 0), rcount=1 on duplicate key update sval2=sval2+ifnull(3, 0), rcount=rcount+1", + "insert into dst(val1,sval2,rcount) values (1,ifnull(1, 0),1) on duplicate key update sval2=sval2+ifnull(values(sval2), 0), rcount=rcount+1", + "insert into dst(val1,sval2,rcount) values (2,ifnull(2, 0),1) on duplicate key update sval2=sval2+ifnull(values(sval2), 0), rcount=rcount+1", + "insert into dst(val1,sval2,rcount) values (2,ifnull(3, 0),1) on duplicate key update sval2=sval2+ifnull(values(sval2), 0), rcount=rcount+1", "/update _vt.vreplication set pos=", "commit", }) @@ -433,7 +433,7 @@ func TestPlayerRowMove(t *testing.T) { expectDBClientQueries(t, []string{ "begin", "update dst set sval2=sval2-ifnull(3, 0), rcount=rcount-1 where val1=2", - "insert into dst set val1=1, sval2=ifnull(4, 0), rcount=1 on duplicate key update sval2=sval2+ifnull(4, 0), rcount=rcount+1", + "insert into dst(val1,sval2,rcount) values (1,ifnull(4, 0),1) on duplicate key update sval2=sval2+ifnull(values(sval2), 0), rcount=rcount+1", "/update _vt.vreplication set pos=", "commit", }) @@ -492,42 +492,42 @@ func TestPlayerTypes(t *testing.T) { data [][]string }{{ input: "insert into vitess_ints values(-128, 255, -32768, 65535, -8388608, 16777215, -2147483648, 4294967295, -9223372036854775808, 18446744073709551615, 2012)", - output: "insert into vitess_ints set tiny=-128, tinyu=255, small=-32768, smallu=65535, medium=-8388608, mediumu=16777215, normal=-2147483648, normalu=4294967295, big=-9223372036854775808, bigu=18446744073709551615, y=2012", + output: "insert into vitess_ints(tiny,tinyu,small,smallu,medium,mediumu,normal,normalu,big,bigu,y) values (-128,255,-32768,65535,-8388608,16777215,-2147483648,4294967295,-9223372036854775808,18446744073709551615,2012)", table: "vitess_ints", data: [][]string{ {"-128", "255", "-32768", "65535", "-8388608", "16777215", "-2147483648", "4294967295", "-9223372036854775808", "18446744073709551615", "2012"}, }, }, { input: "insert into vitess_fracts values(1, 1.99, 2.99, 3.99, 4.99)", - output: "insert into vitess_fracts set id=1, deci=1.99, num=2.99, f=3.99E+00, d=4.99E+00", + output: "insert into vitess_fracts(id,deci,num,f,d) values (1,1.99,2.99,3.99E+00,4.99E+00)", table: "vitess_fracts", data: [][]string{ {"1", "1.99", "2.99", "3.99", "4.99"}, }, }, { input: "insert into vitess_strings values('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a', 'a,b')", - output: "insert into vitess_strings set vb='a', c='b', vc='c', b='d\\0\\0\\0', tb='e', bl='f', ttx='g', tx='h', en='1', s='3'", + output: "insert into vitess_strings(vb,c,vc,b,tb,bl,ttx,tx,en,s) values ('a','b','c','d\\0\\0\\0','e','f','g','h','1','3')", table: "vitess_strings", data: [][]string{ {"a", "b", "c", "d\x00\x00\x00", "e", "f", "g", "h", "a", "a,b"}, }, }, { input: "insert into vitess_misc values(1, '\x01', '2012-01-01', '2012-01-01 15:45:45', '15:45:45', point(1, 2))", - output: "insert into vitess_misc set id=1, b=b'00000001', d='2012-01-01', dt='2012-01-01 15:45:45', t='15:45:45', g='\\0\\0\\0\\0\x01\x01\\0\\0\\0\\0\\0\\0\\0\\0\\0\xf0?\\0\\0\\0\\0\\0\\0\\0@'", + output: "insert into vitess_misc(id,b,d,dt,t,g) values (1,b'00000001','2012-01-01','2012-01-01 15:45:45','15:45:45','\\0\\0\\0\\0\x01\x01\\0\\0\\0\\0\\0\\0\\0\\0\\0\xf0?\\0\\0\\0\\0\\0\\0\\0@')", table: "vitess_misc", data: [][]string{ {"1", "\x01", "2012-01-01", "2012-01-01 15:45:45", "15:45:45", "\x00\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@"}, }, }, { input: "insert into vitess_null values(1, null)", - output: "insert into vitess_null set id=1, val=null", + output: "insert into vitess_null(id,val) values (1,null)", table: "vitess_null", data: [][]string{ {"1", ""}, }, }, { input: "insert into binary_pk values('a', 'aaa')", - output: "insert into binary_pk set b='a\\0\\0\\0', val='aaa'", + output: "insert into binary_pk(b,val) values ('a\\0\\0\\0','aaa')", table: "binary_pk", data: [][]string{ {"a\x00\x00\x00", "aaa"}, @@ -583,14 +583,14 @@ func TestPlayerDDL(t *testing.T) { execStatements(t, []string{"insert into t1 values(1)"}) expectDBClientQueries(t, []string{ "begin", - "insert into t1 set id=1", + "insert into t1(id) values (1)", "/update _vt.vreplication set pos=", "commit", }) execStatements(t, []string{"alter table t1 add column val varchar(128)"}) execStatements(t, []string{"alter table t1 drop column val"}) - expectDBClientQueries(t, []string{}) + expectDBClientQueries(t, []string{"/update _vt.vreplication set pos="}) cancel() cancel, id := startVReplication(t, filter, binlogdatapb.OnDDLAction_STOP, "") @@ -612,6 +612,7 @@ func TestPlayerDDL(t *testing.T) { // It should stop at the next DDL expectDBClientQueries(t, []string{ "/update.*'Running'", + // Second update is from vreplicator. "/update.*'Running'", "begin", fmt.Sprintf("/update.*'%s'", pos2), @@ -630,7 +631,7 @@ func TestPlayerDDL(t *testing.T) { execStatements(t, []string{"alter table t1 add column val2 varchar(128)"}) expectDBClientQueries(t, []string{ "alter table t1 add column val2 varchar(128)", - "/update _vt.vreplication set state='Error'", + "/update _vt.vreplication set message='Duplicate", }) cancel() @@ -704,10 +705,11 @@ func TestPlayerStopPos(t *testing.T) { t.Fatal(err) } expectDBClientQueries(t, []string{ - "/update.*'Running'", // done by Engine - "/update.*'Running'", // done by vplayer on start + "/update.*'Running'", + // Second update is from vreplicator. + "/update.*'Running'", "begin", - "insert into yes set id=1, val='aaa'", + "insert into yes(id,val) values (1,'aaa')", fmt.Sprintf("/update.*'%s'", stopPos), "/update.*'Stopped'", "commit", @@ -727,8 +729,9 @@ func TestPlayerStopPos(t *testing.T) { t.Fatal(err) } expectDBClientQueries(t, []string{ - "/update.*'Running'", // done by Engine - "/update.*'Running'", // done by vplayer on start + "/update.*'Running'", + // Second update is from vreplicator. + "/update.*'Running'", "begin", // Since 'no' generates empty transactions that are skipped by // vplayer, a commit is done only for the stop position event. @@ -743,8 +746,9 @@ func TestPlayerStopPos(t *testing.T) { t.Fatal(err) } expectDBClientQueries(t, []string{ - "/update.*'Running'", // done by Engine - "/update.*'Running'", // done by vplayer on start + "/update.*'Running'", + // Second update is from vreplicator. + "/update.*'Running'", "/update.*'Stopped'.*already reached", }) } @@ -780,7 +784,7 @@ func TestPlayerIdleUpdate(t *testing.T) { start := time.Now() expectDBClientQueries(t, []string{ "begin", - "insert into t1 set id=1, val='aaa'", + "insert into t1(id,val) values (1,'aaa')", "/update _vt.vreplication set pos=", "commit", }) @@ -830,8 +834,8 @@ func TestPlayerSplitTransaction(t *testing.T) { // but still combined as one transaction. expectDBClientQueries(t, []string{ "begin", - "insert into t1 set id=1, val='123456'", - "insert into t1 set id=2, val='789012'", + "insert into t1(id,val) values (1,'123456')", + "insert into t1(id,val) values (2,'789012')", "/update _vt.vreplication set pos=", "commit", }) @@ -866,8 +870,8 @@ func TestPlayerLockErrors(t *testing.T) { }) expectDBClientQueries(t, []string{ "begin", - "insert into t1 set id=1, val='aaa'", - "insert into t1 set id=2, val='bbb'", + "insert into t1(id,val) values (1,'aaa')", + "insert into t1(id,val) values (2,'bbb')", "/update _vt.vreplication set pos=", "commit", }) @@ -939,7 +943,7 @@ func TestPlayerCancelOnLock(t *testing.T) { }) expectDBClientQueries(t, []string{ "begin", - "insert into t1 set id=1, val='aaa'", + "insert into t1(id,val) values (1,'aaa')", "/update _vt.vreplication set pos=", "commit", }) @@ -1009,7 +1013,7 @@ func TestPlayerBatching(t *testing.T) { }) expectDBClientQueries(t, []string{ "begin", - "insert into t1 set id=1, val='aaa'", + "insert into t1(id,val) values (1,'aaa')", "/update _vt.vreplication set pos=", "commit", }) @@ -1055,8 +1059,8 @@ func TestPlayerBatching(t *testing.T) { "/update _vt.vreplication set pos=", "commit", "begin", - "insert into t1 set id=2, val='aaa'", - "insert into t1 set id=3, val='aaa'", + "insert into t1(id,val) values (2,'aaa')", + "insert into t1(id,val) values (3,'aaa')", "/update _vt.vreplication set pos=", "commit", "alter table t1 add column val2 varbinary(128)", @@ -1106,7 +1110,7 @@ func TestPlayerRelayLogMaxSize(t *testing.T) { }) expectDBClientQueries(t, []string{ "begin", - "insert into t1 set id=1, val='123456'", + "insert into t1(id,val) values (1,'123456')", "/update _vt.vreplication set pos=", "commit", }) @@ -1152,12 +1156,12 @@ func TestPlayerRelayLogMaxSize(t *testing.T) { "/update _vt.vreplication set pos=", "commit", "begin", - "insert into t1 set id=2, val='789012'", - "insert into t1 set id=3, val='345678'", + "insert into t1(id,val) values (2,'789012')", + "insert into t1(id,val) values (3,'345678')", "/update _vt.vreplication set pos=", "commit", "begin", - "insert into t1 set id=4, val='901234'", + "insert into t1(id,val) values (4,'901234')", "/update _vt.vreplication set pos=", "commit", }) @@ -1195,14 +1199,14 @@ func TestRestartOnVStreamEnd(t *testing.T) { }) expectDBClientQueries(t, []string{ "begin", - "insert into t1 set id=1, val='aaa'", + "insert into t1(id,val) values (1,'aaa')", "/update _vt.vreplication set pos=", "commit", }) streamerEngine.Close() expectDBClientQueries(t, []string{ - "/update.*'Error'.*vstream ended", + "/update _vt.vreplication set message='vstream ended'", }) if err := streamerEngine.Open(env.KeyspaceName, env.ShardName); err != nil { t.Fatal(err) @@ -1212,9 +1216,9 @@ func TestRestartOnVStreamEnd(t *testing.T) { "insert into t1 values(2, 'aaa')", }) expectDBClientQueries(t, []string{ - "/update.*'Running'", + "/update _vt.vreplication set state='Running'", "begin", - "insert into t1 set id=2, val='aaa'", + "insert into t1(id,val) values (2,'aaa')", "/update _vt.vreplication set pos=", "commit", }) @@ -1255,7 +1259,7 @@ func TestTimestamp(t *testing.T) { "begin", // The insert value for ts will be in UTC. // We'll check the row instead. - "/insert into t1 set id=", + "/insert into t1", "/update _vt.vreplication set pos=", "commit", }) @@ -1287,12 +1291,10 @@ func startVReplication(t *testing.T, filter *binlogdatapb.Filter, onddl binlogda if err != nil { t.Fatal(err) } - // Eat all the initialization queries - for q := range globalDBQueries { - if strings.HasPrefix(q, "update") { - break - } - } + expectDBClientQueries(t, []string{ + "/insert into _vt.vreplication", + "/update _vt.vreplication set state='Running'", + }) return func() { t.Helper() query := fmt.Sprintf("delete from _vt.vreplication where id = %d", qr.InsertID) diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go new file mode 100644 index 00000000000..108d1c8efe0 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go @@ -0,0 +1,187 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "fmt" + "strings" + "time" + + "golang.org/x/net/context" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/mysqlctl" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +var ( + // idleTimeout is set to slightly above 1s, compared to heartbeatTime + // set by VStreamer at slightly below 1s. This minimizes conflicts + // between the two timeouts. + idleTimeout = 1100 * time.Millisecond + dbLockRetryDelay = 1 * time.Second + relayLogMaxSize = 30000 + relayLogMaxItems = 1000 + copyTimeout = 1 * time.Hour + replicaLagTolerance = 10 * time.Second + + // CreateCopyState is the list of statements to execute for creating + // the _vt.copy_state table + CreateCopyState = []string{ + `create table if not exists _vt.copy_state ( + vrepl_id int, + table_name varbinary(128), + lastpk varbinary(2000), + primary key (vrepl_id, table_name))`} +) + +type vreplicator struct { + id uint32 + source *binlogdatapb.BinlogSource + sourceTablet *topodatapb.Tablet + stats *binlogplayer.Stats + dbClient *vdbClient + // mysqld is used to fetch the local schema. + mysqld mysqlctl.MysqlDaemon + + tableKeys map[string][]string +} + +func newVReplicator(id uint32, source *binlogdatapb.BinlogSource, sourceTablet *topodatapb.Tablet, stats *binlogplayer.Stats, dbClient binlogplayer.DBClient, mysqld mysqlctl.MysqlDaemon) *vreplicator { + return &vreplicator{ + id: id, + source: source, + sourceTablet: sourceTablet, + stats: stats, + dbClient: newVDBClient(dbClient, stats), + mysqld: mysqld, + } +} + +func (vr *vreplicator) Replicate(ctx context.Context) error { + tableKeys, err := vr.buildTableKeys() + if err != nil { + return err + } + vr.tableKeys = tableKeys + + for { + settings, numTablesToCopy, err := vr.readSettings(ctx) + if err != nil { + return fmt.Errorf("error reading VReplication settings: %v", err) + } + // If any of the operations below changed state to Stopped, we should return. + if settings.State == binlogplayer.BlpStopped { + return nil + } + switch { + case numTablesToCopy != 0: + if err := newVCopier(vr).copyNext(ctx, settings); err != nil { + return err + } + case settings.StartPos.IsZero(): + if err := newVCopier(vr).initTablesForCopy(ctx); err != nil { + return err + } + default: + if err := vr.setState(binlogplayer.BlpRunning, ""); err != nil { + return err + } + return newVPlayer(vr, settings, nil, mysql.Position{}).play(ctx) + } + } +} + +func (vr *vreplicator) buildTableKeys() (map[string][]string, error) { + schema, err := vr.mysqld.GetSchema(vr.dbClient.DBName(), []string{"/.*/"}, nil, false) + if err != nil { + return nil, err + } + tableKeys := make(map[string][]string) + for _, td := range schema.TableDefinitions { + if len(td.PrimaryKeyColumns) != 0 { + tableKeys[td.Name] = td.PrimaryKeyColumns + } else { + tableKeys[td.Name] = td.Columns + } + } + return tableKeys, nil +} + +func (vr *vreplicator) readSettings(ctx context.Context) (settings binlogplayer.VRSettings, numTablesToCopy int64, err error) { + settings, err = binlogplayer.ReadVRSettings(vr.dbClient, vr.id) + if err != nil { + return settings, numTablesToCopy, fmt.Errorf("error reading VReplication settings: %v", err) + } + + query := fmt.Sprintf("select count(*) from _vt.copy_state where vrepl_id=%d", vr.id) + qr, err := vr.dbClient.ExecuteFetch(query, 10) + if err != nil { + // If it's a not found error, create it. + merr, isSQLErr := err.(*mysql.SQLError) + if !isSQLErr || !(merr.Num == mysql.ERNoSuchTable || merr.Num == mysql.ERBadDb) { + return settings, numTablesToCopy, err + } + log.Info("Looks like _vt.copy_state table may not exist. Trying to create... ") + for _, query := range CreateCopyState { + if _, merr := vr.dbClient.ExecuteFetch(query, 0); merr != nil { + log.Errorf("Failed to ensure _vt.copy_state table exists: %v", merr) + return settings, numTablesToCopy, err + } + } + // Redo the read. + qr, err = vr.dbClient.ExecuteFetch(query, 10) + if err != nil { + return settings, numTablesToCopy, err + } + } + if len(qr.Rows) == 0 || len(qr.Rows[0]) == 0 { + return settings, numTablesToCopy, fmt.Errorf("unexpected result from %s: %v", query, qr) + } + numTablesToCopy, err = sqltypes.ToInt64(qr.Rows[0][0]) + if err != nil { + return settings, numTablesToCopy, err + } + return settings, numTablesToCopy, nil +} + +func (vr *vreplicator) setMessage(message string) error { + vr.stats.History.Add(&binlogplayer.StatsHistoryRecord{ + Time: time.Now(), + Message: message, + }) + query := fmt.Sprintf("update _vt.vreplication set message=%v where id=%v", encodeString(message), vr.id) + if _, err := vr.dbClient.ExecuteFetch(query, 1); err != nil { + return fmt.Errorf("could not set message: %v: %v", query, err) + } + return nil +} + +func (vr *vreplicator) setState(state, message string) error { + return binlogplayer.SetVReplicationState(vr.dbClient, vr.id, state, message) +} + +func encodeString(in string) string { + var buf strings.Builder + sqltypes.NewVarChar(in).EncodeSQL(&buf) + return buf.String() +} diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 003b27708a0..491c1cf604a 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -1325,38 +1325,28 @@ func (tsv *TabletServer) execDML(ctx context.Context, target *querypb.Target, qu // VStream streams VReplication events. func (tsv *TabletServer) VStream(ctx context.Context, target *querypb.Target, startPos string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { - // This code is partially duplicated from startRequest. This is because - // is allowed even if the tablet is in non-serving state. - err := func() error { - tsv.mu.Lock() - defer tsv.mu.Unlock() - - if target != nil { - // a valid target needs to be used - switch { - case target.Keyspace != tsv.target.Keyspace: - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid keyspace %v", target.Keyspace) - case target.Shard != tsv.target.Shard: - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid shard %v", target.Shard) - case target.TabletType != tsv.target.TabletType: - for _, otherType := range tsv.alsoAllow { - if target.TabletType == otherType { - return nil - } - } - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "invalid tablet type: %v, want: %v or %v", target.TabletType, tsv.target.TabletType, tsv.alsoAllow) - } - } else if !tabletenv.IsLocalContext(ctx) { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "No target") - } - return nil - }() - if err != nil { + if err := tsv.verifyTarget(ctx, target); err != nil { return err } return tsv.vstreamer.Stream(ctx, startPos, filter, send) } +// VStreamRows streams rows from the specified starting point. +func (tsv *TabletServer) VStreamRows(ctx context.Context, target *querypb.Target, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error { + if err := tsv.verifyTarget(ctx, target); err != nil { + return err + } + var row []sqltypes.Value + if lastpk != nil { + r := sqltypes.Proto3ToResult(lastpk) + if len(r.Rows) != 1 { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected lastpk input: %v", lastpk) + } + row = r.Rows[0] + } + return tsv.vstreamer.StreamRows(ctx, query, row, send) +} + // SplitQuery splits a query + bind variables into smaller queries that return a // subset of rows from the original query. This is the new version that supports multiple // split columns and multiple split algortihms. @@ -1450,6 +1440,32 @@ func (tsv *TabletServer) execRequest( return nil } +// verifyTarget allows requests to be executed even in non-serving state. +func (tsv *TabletServer) verifyTarget(ctx context.Context, target *querypb.Target) error { + tsv.mu.Lock() + defer tsv.mu.Unlock() + + if target != nil { + // a valid target needs to be used + switch { + case target.Keyspace != tsv.target.Keyspace: + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid keyspace %v", target.Keyspace) + case target.Shard != tsv.target.Shard: + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid shard %v", target.Shard) + case target.TabletType != tsv.target.TabletType: + for _, otherType := range tsv.alsoAllow { + if target.TabletType == otherType { + return nil + } + } + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "invalid tablet type: %v, want: %v or %v", target.TabletType, tsv.target.TabletType, tsv.alsoAllow) + } + } else if !tabletenv.IsLocalContext(ctx) { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "No target") + } + return nil +} + func (tsv *TabletServer) handlePanicAndSendLogStats( sql string, bindVariables map[string]*querypb.BindVariable, diff --git a/go/vt/vttablet/tabletserver/vstreamer/engine.go b/go/vt/vttablet/tabletserver/vstreamer/engine.go index 9e0e014bd95..7147289fa20 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/engine.go +++ b/go/vt/vttablet/tabletserver/vstreamer/engine.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/acl" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" @@ -55,9 +56,10 @@ type Engine struct { isOpen bool // wg is incremented for every Stream, and decremented on end. // Close waits for all current streams to end by waiting on wg. - wg sync.WaitGroup - streamers map[int]*vstreamer - streamIdx int + wg sync.WaitGroup + streamers map[int]*vstreamer + rowStreamers map[int]*rowStreamer + streamIdx int // watcherOnce is used for initializing kschema // and setting up the vschema watch. It's guaranteed that @@ -78,10 +80,11 @@ type Engine struct { // Open and Close can be called multiple times and are idempotent. func NewEngine(ts srvtopo.Server, se *schema.Engine) *Engine { vse := &Engine{ - streamers: make(map[int]*vstreamer), - kschema: &vindexes.KeyspaceSchema{}, - ts: ts, - se: se, + streamers: make(map[int]*vstreamer), + rowStreamers: make(map[int]*rowStreamer), + kschema: &vindexes.KeyspaceSchema{}, + ts: ts, + se: se, } once.Do(func() { vschemaErrors = stats.NewCounter("VSchemaErrors", "Count of VSchema errors") @@ -121,6 +124,10 @@ func (vse *Engine) Close() { // cancel is non-blocking. s.Cancel() } + for _, s := range vse.rowStreamers { + // cancel is non-blocking. + s.Cancel() + } vse.isOpen = false }() @@ -174,6 +181,46 @@ func (vse *Engine) Stream(ctx context.Context, startPos string, filter *binlogda return streamer.Stream() } +// StreamRows streams rows. +func (vse *Engine) StreamRows(ctx context.Context, query string, lastpk []sqltypes.Value, send func(*binlogdatapb.VStreamRowsResponse) error) error { + // Ensure kschema is initialized and the watcher is started. + // Starting of the watcher has to be delayed till the first call to Stream + // because this overhead should be incurred only if someone uses this feature. + vse.watcherOnce.Do(vse.setWatch) + log.Infof("Streaming rows for query %s, lastpk: %s", query, lastpk) + + // Create stream and add it to the map. + rowStreamer, idx, err := func() (*rowStreamer, int, error) { + vse.mu.Lock() + defer vse.mu.Unlock() + if !vse.isOpen { + return nil, 0, errors.New("VStreamer is not open") + } + rowStreamer := newRowStreamer(ctx, vse.cp, vse.se, query, lastpk, vse.kschema, send) + idx := vse.streamIdx + vse.rowStreamers[idx] = rowStreamer + vse.streamIdx++ + // Now that we've added the stream, increment wg. + // This must be done before releasing the lock. + vse.wg.Add(1) + return rowStreamer, idx, nil + }() + if err != nil { + return err + } + + // Remove stream from map and decrement wg when it ends. + defer func() { + vse.mu.Lock() + defer vse.mu.Unlock() + delete(vse.rowStreamers, idx) + vse.wg.Done() + }() + + // No lock is held while streaming, but wg is incremented. + return rowStreamer.Stream() +} + // ServeHTTP shows the current VSchema. func (vse *Engine) ServeHTTP(response http.ResponseWriter, request *http.Request) { if err := acl.CheckAccessHTTP(request, acl.DEBUGGING); err != nil { diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go index db639ae619b..a9e593c0136 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go @@ -33,7 +33,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -// Plan represents the streaming plan for a table. +// Plan represents the plan for a table. type Plan struct { Table *Table ColExprs []ColExpr @@ -49,19 +49,26 @@ type ColExpr struct { Type querypb.Type } -// Table contains the metadata for a table. The -// name is dervied from mysql's Table_map_log_event. +// Table contains the metadata for a table. type Table struct { - *mysql.TableMap + Name string Columns []schema.TableColumn } -// The filter function needs the ability to perform expression evaluations. This is -// because the consumer of vstream is not just VPlayer. It can also be a dumb client -// like a mysql client that's subscribing to changes. This ability to allow users -// to directly pull events by sending a complex select query. The same reasoning -// applies to where clauses. For now, only simple functions like hour are supported, -// but this can be expanded in the future. +// fields returns the fields for the plan. +func (plan *Plan) fields() []*querypb.Field { + fields := make([]*querypb.Field, len(plan.ColExprs)) + for i, ce := range plan.ColExprs { + fields[i] = &querypb.Field{ + Name: ce.Alias.String(), + Type: ce.Type, + } + } + return fields +} + +// filter filters the row against the plan. It returns false if the row did not match. +// If the row matched, it returns the columns to be sent. func (plan *Plan) filter(values []sqltypes.Value) (bool, []sqltypes.Value, error) { result := make([]sqltypes.Value, len(plan.ColExprs)) for i, colExpr := range plan.ColExprs { @@ -211,33 +218,18 @@ func buildREPlan(ti *Table, kschema *vindexes.KeyspaceSchema, filter string) (*P } func buildTablePlan(ti *Table, kschema *vindexes.KeyspaceSchema, query string) (*Plan, error) { - statement, err := sqlparser.Parse(query) + sel, fromTable, err := analyzeSelect(query) if err != nil { return nil, err } - plan := &Plan{ - Table: ti, - } - sel, ok := statement.(*sqlparser.Select) - if !ok { - return nil, fmt.Errorf("unsupported: %v", sqlparser.String(statement)) - } - if len(sel.From) > 1 { - return nil, fmt.Errorf("unsupported: %v", sqlparser.String(sel)) - } - node, ok := sel.From[0].(*sqlparser.AliasedTableExpr) - if !ok { - return nil, fmt.Errorf("unsupported: %v", sqlparser.String(sel)) - } - fromTable := sqlparser.GetTableName(node.Expr) - if fromTable.IsEmpty() { - return nil, fmt.Errorf("unsupported: %v", sqlparser.String(sel)) - } if fromTable.String() != ti.Name { return nil, fmt.Errorf("unsupported: select expression table %v does not match the table entry name %s", sqlparser.String(fromTable), ti.Name) } - if err := plan.analyzeExprs(ti, sel.SelectExprs); err != nil { + plan := &Plan{ + Table: ti, + } + if err := plan.analyzeExprs(sel.SelectExprs); err != nil { return nil, err } @@ -252,16 +244,39 @@ func buildTablePlan(ti *Table, kschema *vindexes.KeyspaceSchema, query string) ( if !funcExpr.Name.EqualString("in_keyrange") { return nil, fmt.Errorf("unsupported where clause: %v", sqlparser.String(sel.Where)) } - if err := plan.analyzeInKeyRange(ti, kschema, funcExpr.Exprs); err != nil { + if err := plan.analyzeInKeyRange(kschema, funcExpr.Exprs); err != nil { return nil, err } return plan, nil } -func (plan *Plan) analyzeExprs(ti *Table, selExprs sqlparser.SelectExprs) error { +func analyzeSelect(query string) (sel *sqlparser.Select, fromTable sqlparser.TableIdent, err error) { + statement, err := sqlparser.Parse(query) + if err != nil { + return nil, fromTable, err + } + sel, ok := statement.(*sqlparser.Select) + if !ok { + return nil, fromTable, fmt.Errorf("unsupported: %v", sqlparser.String(statement)) + } + if len(sel.From) > 1 { + return nil, fromTable, fmt.Errorf("unsupported: %v", sqlparser.String(sel)) + } + node, ok := sel.From[0].(*sqlparser.AliasedTableExpr) + if !ok { + return nil, fromTable, fmt.Errorf("unsupported: %v", sqlparser.String(sel)) + } + fromTable = sqlparser.GetTableName(node.Expr) + if fromTable.IsEmpty() { + return nil, fromTable, fmt.Errorf("unsupported: %v", sqlparser.String(sel)) + } + return sel, fromTable, nil +} + +func (plan *Plan) analyzeExprs(selExprs sqlparser.SelectExprs) error { if _, ok := selExprs[0].(*sqlparser.StarExpr); !ok { for _, expr := range selExprs { - cExpr, err := plan.analyzeExpr(ti, expr) + cExpr, err := plan.analyzeExpr(expr) if err != nil { return err } @@ -271,8 +286,8 @@ func (plan *Plan) analyzeExprs(ti *Table, selExprs sqlparser.SelectExprs) error if len(selExprs) != 1 { return fmt.Errorf("unsupported: %v", sqlparser.String(selExprs)) } - plan.ColExprs = make([]ColExpr, len(ti.Columns)) - for i, col := range ti.Columns { + plan.ColExprs = make([]ColExpr, len(plan.Table.Columns)) + for i, col := range plan.Table.Columns { plan.ColExprs[i].ColNum = i plan.ColExprs[i].Alias = col.Name plan.ColExprs[i].Type = col.Type @@ -281,7 +296,7 @@ func (plan *Plan) analyzeExprs(ti *Table, selExprs sqlparser.SelectExprs) error return nil } -func (plan *Plan) analyzeExpr(ti *Table, selExpr sqlparser.SelectExpr) (cExpr ColExpr, err error) { +func (plan *Plan) analyzeExpr(selExpr sqlparser.SelectExpr) (cExpr ColExpr, err error) { aliased, ok := selExpr.(*sqlparser.AliasedExpr) if !ok { return ColExpr{}, fmt.Errorf("unsupported: %v", sqlparser.String(selExpr)) @@ -297,25 +312,25 @@ func (plan *Plan) analyzeExpr(ti *Table, selExpr sqlparser.SelectExpr) (cExpr Co if !colname.Qualifier.IsEmpty() { return ColExpr{}, fmt.Errorf("unsupported qualifier for column: %v", sqlparser.String(colname)) } - colnum, err := findColumn(ti, colname.Name) + colnum, err := findColumn(plan.Table, colname.Name) if err != nil { return ColExpr{}, err } - return ColExpr{ColNum: colnum, Alias: as, Type: ti.Columns[colnum].Type}, nil + return ColExpr{ColNum: colnum, Alias: as, Type: plan.Table.Columns[colnum].Type}, nil } -func (plan *Plan) analyzeInKeyRange(ti *Table, kschema *vindexes.KeyspaceSchema, exprs sqlparser.SelectExprs) error { +func (plan *Plan) analyzeInKeyRange(kschema *vindexes.KeyspaceSchema, exprs sqlparser.SelectExprs) error { var colname sqlparser.ColIdent var krExpr sqlparser.SelectExpr switch len(exprs) { case 1: - table := kschema.Tables[ti.Name] + table := kschema.Tables[plan.Table.Name] if table == nil { - return fmt.Errorf("no vschema definition for table %s", ti.Name) + return fmt.Errorf("no vschema definition for table %s", plan.Table.Name) } // Get Primary Vindex. if len(table.ColumnVindexes) == 0 { - return fmt.Errorf("table %s has no primary vindex", ti.Name) + return fmt.Errorf("table %s has no primary vindex", plan.Table.Name) } colname = table.ColumnVindexes[0].Columns[0] plan.Vindex = table.ColumnVindexes[0].Vindex diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go index f11e2815281..5d46fb2eede 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go @@ -150,9 +150,7 @@ func TestMustSendDDL(t *testing.T) { func TestPlanbuilder(t *testing.T) { t1 := &Table{ - TableMap: &mysql.TableMap{ - Name: "t1", - }, + Name: "t1", Columns: []schema.TableColumn{{ Name: sqlparser.NewColIdent("id"), Type: sqltypes.Int64, @@ -163,18 +161,14 @@ func TestPlanbuilder(t *testing.T) { } // t1alt has no id column t1alt := &Table{ - TableMap: &mysql.TableMap{ - Name: "t1", - }, + Name: "t1", Columns: []schema.TableColumn{{ Name: sqlparser.NewColIdent("val"), Type: sqltypes.VarBinary, }}, } t2 := &Table{ - TableMap: &mysql.TableMap{ - Name: "t2", - }, + Name: "t2", Columns: []schema.TableColumn{{ Name: sqlparser.NewColIdent("id"), Type: sqltypes.Int64, @@ -420,6 +414,8 @@ func TestPlanbuilder(t *testing.T) { if !reflect.DeepEqual(tcase.outPlan, plan) { t.Errorf("Plan(%v, %v):\n%v, want\n%v", tcase.inTable, tcase.inRule, plan, tcase.outPlan) } + } else if tcase.outPlan != nil { + t.Errorf("Plan(%v, %v):\nnil, want\n%v", tcase.inTable, tcase.inRule, tcase.outPlan) } gotErr := "" if err != nil { diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go new file mode 100644 index 00000000000..c814060506e --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go @@ -0,0 +1,288 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +import ( + "context" + "fmt" + "sync" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +type rowStreamer struct { + ctx context.Context + cancel func() + + cp *mysql.ConnParams + se *schema.Engine + query string + lastpk []sqltypes.Value + send func(*binlogdatapb.VStreamRowsResponse) error + kschema *vindexes.KeyspaceSchema + + plan *Plan + pkColumns []int + sendQuery string +} + +func newRowStreamer(ctx context.Context, cp *mysql.ConnParams, se *schema.Engine, query string, lastpk []sqltypes.Value, kschema *vindexes.KeyspaceSchema, send func(*binlogdatapb.VStreamRowsResponse) error) *rowStreamer { + ctx, cancel := context.WithCancel(ctx) + return &rowStreamer{ + ctx: ctx, + cancel: cancel, + cp: cp, + se: se, + query: query, + lastpk: lastpk, + send: send, + kschema: kschema, + } +} + +func (rs *rowStreamer) Cancel() { + rs.cancel() +} + +func (rs *rowStreamer) Stream() error { + // Ensure se is Open. If vttablet came up in a non_serving role, + // the schema engine may not have been initialized. + if err := rs.se.Open(); err != nil { + return err + } + + if err := rs.buildPlan(); err != nil { + return err + } + + conn, err := mysql.Connect(rs.ctx, rs.cp) + if err != nil { + return err + } + defer conn.Close() + return rs.streamQuery(conn, rs.send) +} + +func (rs *rowStreamer) buildPlan() error { + // This pre-parsing is required to extract the table name + // and create its metadata. + _, fromTable, err := analyzeSelect(rs.query) + if err != nil { + return err + } + st := rs.se.GetTable(fromTable) + if st == nil { + return fmt.Errorf("unknown table %v in schema", fromTable) + } + ti := &Table{ + Name: st.Name.String(), + Columns: st.Columns, + } + rs.plan, err = buildTablePlan(ti, rs.kschema, rs.query) + if err != nil { + return err + } + rs.pkColumns, err = buildPKColumns(st) + if err != nil { + return err + } + rs.sendQuery, err = rs.buildSelect() + if err != nil { + return err + } + return err +} + +func buildPKColumns(st *schema.Table) ([]int, error) { + if len(st.PKColumns) == 0 { + pkColumns := make([]int, len(st.Columns)) + for i := range st.Columns { + pkColumns[i] = i + } + return pkColumns, nil + } + for _, pk := range st.PKColumns { + if pk >= len(st.Columns) { + return nil, fmt.Errorf("primary key %d refers to non-existent column", pk) + } + } + return st.PKColumns, nil +} + +func (rs *rowStreamer) buildSelect() (string, error) { + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("select ") + prefix := "" + for _, col := range rs.plan.Table.Columns { + buf.Myprintf("%s%v", prefix, col.Name) + prefix = ", " + } + buf.Myprintf(" from %v", sqlparser.NewTableIdent(rs.plan.Table.Name)) + if len(rs.lastpk) != 0 { + if len(rs.lastpk) != len(rs.pkColumns) { + return "", fmt.Errorf("primary key values don't match length: %v vs %v", rs.lastpk, rs.pkColumns) + } + buf.WriteString(" where (") + prefix := "" + for _, pk := range rs.pkColumns { + buf.Myprintf("%s%v", prefix, rs.plan.Table.Columns[pk].Name) + prefix = "," + } + buf.WriteString(") > (") + prefix = "" + for _, val := range rs.lastpk { + buf.WriteString(prefix) + prefix = "," + val.EncodeSQL(buf) + } + buf.WriteString(")") + } + buf.Myprintf(" order by ", sqlparser.NewTableIdent(rs.plan.Table.Name)) + prefix = "" + for _, pk := range rs.pkColumns { + buf.Myprintf("%s%v", prefix, rs.plan.Table.Columns[pk].Name) + prefix = ", " + } + return buf.String(), nil +} + +func (rs *rowStreamer) streamQuery(conn *mysql.Conn, send func(*binlogdatapb.VStreamRowsResponse) error) error { + unlock, gtid, err := rs.lockTable() + if err != nil { + return err + } + defer unlock() + + if err := conn.ExecuteStreamFetch(rs.sendQuery); err != nil { + return err + } + + // first call the callback with the fields + flds, err := conn.Fields() + if err != nil { + return err + } + pkfields := make([]*querypb.Field, len(rs.pkColumns)) + for i, pk := range rs.pkColumns { + pkfields[i] = &querypb.Field{ + Name: flds[pk].Name, + Type: flds[pk].Type, + } + } + + err = send(&binlogdatapb.VStreamRowsResponse{ + Fields: rs.plan.fields(), + Pkfields: pkfields, + Gtid: gtid, + }) + if err != nil { + return fmt.Errorf("stream send error: %v", err) + } + if err := unlock(); err != nil { + return err + } + + response := &binlogdatapb.VStreamRowsResponse{} + lastpk := make([]sqltypes.Value, len(rs.pkColumns)) + byteCount := 0 + for { + select { + case <-rs.ctx.Done(): + return fmt.Errorf("stream ended: %v", rs.ctx.Err()) + default: + } + + row, err := conn.FetchNext() + if err != nil { + return err + } + if row == nil { + break + } + for i, pk := range rs.pkColumns { + lastpk[i] = row[pk] + } + ok, filtered, err := rs.plan.filter(row) + if err != nil { + return err + } + if ok { + response.Rows = append(response.Rows, sqltypes.RowToProto3(filtered)) + for _, s := range filtered { + byteCount += s.Len() + } + } + + if byteCount >= *packetSize { + response.Lastpk = sqltypes.RowToProto3(lastpk) + err = send(response) + if err != nil { + return err + } + // empty the rows so we start over, but we keep the + // same capacity + response.Rows = response.Rows[:0] + byteCount = 0 + } + } + + if len(response.Rows) > 0 { + response.Lastpk = sqltypes.RowToProto3(lastpk) + err = send(response) + if err != nil { + return err + } + } + + return nil +} + +func (rs *rowStreamer) lockTable() (unlock func() error, gtid string, err error) { + conn, err := mysql.Connect(rs.ctx, rs.cp) + if err != nil { + return nil, "", err + } + // mysql recommends this before locking tables. + if _, err := conn.ExecuteFetch("set autocommit=0", 0, false); err != nil { + return nil, "", err + } + if _, err := conn.ExecuteFetch(fmt.Sprintf("lock tables %s read", sqlparser.String(sqlparser.NewTableIdent(rs.plan.Table.Name))), 0, false); err != nil { + return nil, "", err + } + var once sync.Once + unlock = func() error { + var err error + once.Do(func() { + _, err = conn.ExecuteFetch("unlock tables", 0, false) + conn.Close() + }) + return err + } + pos, err := conn.MasterPosition() + if err != nil { + unlock() + return nil, "", err + } + return unlock, mysql.EncodePosition(pos), nil +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go new file mode 100644 index 00000000000..5612673dcbb --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go @@ -0,0 +1,237 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +import ( + "context" + "fmt" + "testing" + "time" + + "vitess.io/vitess/go/sqltypes" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +func TestStreamRowsScan(t *testing.T) { + if testing.Short() { + t.Skip() + } + + execStatements(t, []string{ + // Single PK + "create table t1(id int, val varbinary(128), primary key(id))", + "insert into t1 values (1, 'aaa'), (2, 'bbb')", + // Composite PK + "create table t2(id1 int, id2 int, val varbinary(128), primary key(id1, id2))", + "insert into t2 values (1, 2, 'aaa'), (1, 3, 'bbb')", + // No PK + "create table t3(id int, val varbinary(128))", + "insert into t3 values (1, 'aaa'), (2, 'bbb')", + }) + defer execStatements(t, []string{ + "drop table t1", + "drop table t2", + "drop table t3", + }) + engine.se.Reload(context.Background()) + + // t1: all rows + wantStream := []string{ + `fields: fields: pkfields: `, + `rows: rows: lastpk: `, + } + wantQuery := "select id, val from t1 order by id" + checkStream(t, "select * from t1", nil, wantQuery, wantStream) + + // t1: lastpk=1 + wantStream = []string{ + `fields: fields: pkfields: `, + `rows: lastpk: `, + } + wantQuery = "select id, val from t1 where (id) > (1) order by id" + checkStream(t, "select * from t1", []sqltypes.Value{sqltypes.NewInt64(1)}, wantQuery, wantStream) + + // t1: different column ordering + wantStream = []string{ + `fields: fields: pkfields: `, + `rows: rows: lastpk: `, + } + wantQuery = "select id, val from t1 order by id" + checkStream(t, "select val, id from t1", nil, wantQuery, wantStream) + + // t2: all rows + wantStream = []string{ + `fields: fields: fields: pkfields: pkfields: `, + `rows: rows: lastpk: `, + } + wantQuery = "select id1, id2, val from t2 order by id1, id2" + checkStream(t, "select * from t2", nil, wantQuery, wantStream) + + // t2: lastpk=1,2 + wantStream = []string{ + `fields: fields: fields: pkfields: pkfields: `, + `rows: lastpk: `, + } + wantQuery = "select id1, id2, val from t2 where (id1,id2) > (1,2) order by id1, id2" + checkStream(t, "select * from t2", []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, wantQuery, wantStream) + + // t3: all rows + wantStream = []string{ + `fields: fields: pkfields: pkfields: `, + `rows: rows: lastpk: `, + } + wantQuery = "select id, val from t3 order by id, val" + checkStream(t, "select * from t3", nil, wantQuery, wantStream) + + // t3: lastpk: 1,'aaa' + wantStream = []string{ + `fields: fields: pkfields: pkfields: `, + `rows: lastpk: `, + } + wantQuery = "select id, val from t3 where (id,val) > (1,'aaa') order by id, val" + checkStream(t, "select * from t3", []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewVarBinary("aaa")}, wantQuery, wantStream) +} + +func TestStreamRowsKeyRange(t *testing.T) { + if testing.Short() { + t.Skip() + } + + if err := env.SetVSchema(shardedVSchema); err != nil { + t.Fatal(err) + } + defer env.SetVSchema("{}") + + execStatements(t, []string{ + "create table t1(id1 int, val varbinary(128), primary key(id1))", + "insert into t1 values (1, 'aaa'), (6, 'bbb')", + }) + defer execStatements(t, []string{ + "drop table t1", + }) + engine.se.Reload(context.Background()) + + time.Sleep(1 * time.Second) + + // Only the first row should be returned, but lastpk should be 6. + wantStream := []string{ + `fields: fields: pkfields: `, + `rows: lastpk: `, + } + wantQuery := "select id1, val from t1 order by id1" + checkStream(t, "select * from t1 where in_keyrange('-80')", nil, wantQuery, wantStream) +} + +func TestStreamRowsMultiPacket(t *testing.T) { + if testing.Short() { + t.Skip() + } + + savedSize := *packetSize + *packetSize = 10 + defer func() { *packetSize = savedSize }() + + execStatements(t, []string{ + "create table t1(id int, val varbinary(128), primary key(id))", + "insert into t1 values (1, '234'), (2, '6789'), (3, '1'), (4, '2345678901'), (5, '2')", + }) + defer execStatements(t, []string{ + "drop table t1", + }) + engine.se.Reload(context.Background()) + + wantStream := []string{ + `fields: fields: pkfields: `, + `rows: rows: rows: lastpk: `, + `rows: lastpk: `, + `rows: lastpk: `, + } + wantQuery := "select id, val from t1 order by id" + checkStream(t, "select * from t1", nil, wantQuery, wantStream) +} + +func TestStreamRowsCancel(t *testing.T) { + if testing.Short() { + t.Skip() + } + + savedSize := *packetSize + *packetSize = 10 + defer func() { *packetSize = savedSize }() + + execStatements(t, []string{ + "create table t1(id int, val varbinary(128), primary key(id))", + "insert into t1 values (1, '234567890'), (2, '234')", + }) + defer execStatements(t, []string{ + "drop table t1", + }) + engine.se.Reload(context.Background()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := engine.StreamRows(ctx, "select * from t1", nil, func(rows *binlogdatapb.VStreamRowsResponse) error { + cancel() + return nil + }) + if got, want := err.Error(), "stream ended: context canceled"; got != want { + t.Errorf("err: %v, want %s", err, want) + } +} + +func checkStream(t *testing.T, query string, lastpk []sqltypes.Value, wantQuery string, wantStream []string) { + t.Helper() + + i := 0 + ch := make(chan error) + // We don't want to report errors inside callback functions because + // line numbers come out wrong. + go func() { + first := true + defer close(ch) + err := engine.StreamRows(context.Background(), query, lastpk, func(rows *binlogdatapb.VStreamRowsResponse) error { + if first { + if rows.Gtid == "" { + ch <- fmt.Errorf("stream gtid is empty") + } + if got := engine.rowStreamers[engine.streamIdx-1].sendQuery; got != wantQuery { + ch <- fmt.Errorf("query sent:\n%v, want\n%v", got, wantQuery) + } + } + first = false + rows.Gtid = "" + if i >= len(wantStream) { + ch <- fmt.Errorf("unexpected stream rows: %v", rows) + return nil + } + srows := fmt.Sprintf("%v", rows) + if srows != wantStream[i] { + ch <- fmt.Errorf("stream %d:\n%s, want\n%s", i, srows, wantStream[i]) + } + i++ + return nil + }) + if err != nil { + ch <- err + } + }() + for err := range ch { + t.Error(err) + } +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go index 6923e0c8b04..010fe66df69 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go @@ -32,10 +32,9 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" ) -var packetSize = flag.Int("vstream_packet_size", 10000, "Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount.") +var packetSize = flag.Int("vstream_packet_size", 30000, "Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount.") // heartbeatTime is set to slightly below 1s, compared to idleTimeout // set by VPlayer at slightly above 1s. This minimizes conflicts @@ -55,13 +54,20 @@ type vstreamer struct { // A kschema is a VSchema for just one keyspace. kevents chan *vindexes.KeyspaceSchema kschema *vindexes.KeyspaceSchema - plans map[uint64]*Plan + plans map[uint64]*streamerPlan // format and pos are updated by parseEvent. format mysql.BinlogFormat pos mysql.Position } +// streamerPlan extends the original plan to also include +// the TableMap which is used to extract values from the binlog events. +type streamerPlan struct { + *Plan + TableMap *mysql.TableMap +} + func newVStreamer(ctx context.Context, cp *mysql.ConnParams, se *schema.Engine, startPos string, filter *binlogdatapb.Filter, kschema *vindexes.KeyspaceSchema, send func([]*binlogdatapb.VEvent) error) *vstreamer { ctx, cancel := context.WithCancel(ctx) return &vstreamer{ @@ -74,7 +80,7 @@ func newVStreamer(ctx context.Context, cp *mysql.ConnParams, se *schema.Engine, send: send, kevents: make(chan *vindexes.KeyspaceSchema, 1), kschema: kschema, - plans: make(map[uint64]*Plan), + plans: make(map[uint64]*streamerPlan), } } @@ -335,38 +341,35 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e vs.plans[id] = nil return nil, nil } - ti := vs.se.GetTable(sqlparser.NewTableIdent(tm.Name)) - if ti == nil { + st := vs.se.GetTable(sqlparser.NewTableIdent(tm.Name)) + if st == nil { return nil, fmt.Errorf("unknown table %v in schema", tm.Name) } - if len(ti.Columns) < len(tm.Types) { - return nil, fmt.Errorf("cannot determine table columns for %s: event has %d columns, current schema has %d: %#v", tm.Name, len(tm.Types), len(ti.Columns), ev) + if len(st.Columns) < len(tm.Types) { + return nil, fmt.Errorf("cannot determine table columns for %s: event has %d columns, current schema has %d: %#v", tm.Name, len(tm.Types), len(st.Columns), ev) } table := &Table{ - TableMap: tm, + Name: st.Name.String(), // Columns should be truncated to match those in tm. - Columns: ti.Columns[:len(tm.Types)], + Columns: st.Columns[:len(tm.Types)], } plan, err := buildPlan(table, vs.kschema, vs.filter) if err != nil { return nil, err } - vs.plans[id] = plan if plan == nil { + vs.plans[id] = nil return nil, nil } - fields := make([]*querypb.Field, len(plan.ColExprs)) - for i, ce := range plan.ColExprs { - fields[i] = &querypb.Field{ - Name: ce.Alias.String(), - Type: ce.Type, - } + vs.plans[id] = &streamerPlan{ + Plan: plan, + TableMap: tm, } vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_FIELD, FieldEvent: &binlogdatapb.FieldEvent{ TableName: plan.Table.Name, - Fields: fields, + Fields: plan.fields(), }, }) case ev.IsWriteRows() || ev.IsDeleteRows() || ev.IsUpdateRows(): @@ -380,7 +383,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e if plan == nil { return nil, nil } - rows, err := ev.Rows(vs.format, plan.Table.TableMap) + rows, err := ev.Rows(vs.format, plan.TableMap) if err != nil { return nil, err } @@ -434,12 +437,15 @@ func (vs *vstreamer) rebuildPlans() error { if err != nil { return err } - vs.plans[id] = newPlan + vs.plans[id] = &streamerPlan{ + Plan: newPlan, + TableMap: plan.TableMap, + } } return nil } -func (vs *vstreamer) extractRowAndFilter(plan *Plan, data []byte, dataColumns, nullColumns mysql.Bitmap) (bool, []sqltypes.Value, error) { +func (vs *vstreamer) extractRowAndFilter(plan *streamerPlan, data []byte, dataColumns, nullColumns mysql.Bitmap) (bool, []sqltypes.Value, error) { if len(data) == 0 { return false, nil, nil } @@ -454,7 +460,7 @@ func (vs *vstreamer) extractRowAndFilter(plan *Plan, data []byte, dataColumns, n valueIndex++ continue } - value, l, err := mysql.CellValue(data, pos, plan.Table.Types[colNum], plan.Table.Metadata[colNum], plan.Table.Columns[colNum].Type) + value, l, err := mysql.CellValue(data, pos, plan.TableMap.Types[colNum], plan.TableMap.Metadata[colNum], plan.Table.Columns[colNum].Type) if err != nil { return false, nil, err } diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go index 4d8ceccb157..69f297111d0 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go @@ -175,7 +175,7 @@ func TestRegexp(t *testing.T) { runCases(t, filter, testcases) } -func TestREKeyrange(t *testing.T) { +func TestREKeyRange(t *testing.T) { if testing.Short() { t.Skip() } @@ -357,7 +357,7 @@ func TestDDLAddColumn(t *testing.T) { go func() { defer close(ch) if err := vstream(ctx, t, pos, filter, ch); err != nil { - t.Fatal(err) + t.Error(err) } }() expectLog(ctx, t, "ddls", ch, [][]string{{ diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto index 4942198823a..3779a624cf9 100644 --- a/proto/binlogdata.proto +++ b/proto/binlogdata.proto @@ -231,3 +231,22 @@ message VStreamRequest { message VStreamResponse { repeated VEvent events = 1; } + +// VStreamRowsRequest is the payload for VStreamRows +message VStreamRowsRequest { + vtrpc.CallerID effective_caller_id = 1; + query.VTGateCallerID immediate_caller_id = 2; + query.Target target = 3; + + string query = 4; + query.QueryResult lastpk = 5; +} + +// VStreamRowsResponse is the response from VStreamRows +message VStreamRowsResponse { + repeated query.Field fields = 1; + repeated query.Field pkfields = 2; + string gtid = 3; + repeated query.Row rows = 4; + query.Row lastpk = 5; +} diff --git a/proto/queryservice.proto b/proto/queryservice.proto index 897cbf3f034..8b6494f5e9b 100644 --- a/proto/queryservice.proto +++ b/proto/queryservice.proto @@ -98,4 +98,7 @@ service Query { // VStream streams vreplication events. rpc VStream(binlogdata.VStreamRequest) returns (stream binlogdata.VStreamResponse) {}; + + // VStreamRows streams rows from the specified starting point. + rpc VStreamRows(binlogdata.VStreamRowsRequest) returns (stream binlogdata.VStreamRowsResponse) {}; } diff --git a/py/vtproto/binlogdata_pb2.py b/py/vtproto/binlogdata_pb2.py index fe5c3d42e8c..2e0153062f3 100644 --- a/py/vtproto/binlogdata_pb2.py +++ b/py/vtproto/binlogdata_pb2.py @@ -23,7 +23,7 @@ package='binlogdata', syntax='proto3', serialized_options=_b('Z\'vitess.io/vitess/go/vt/proto/binlogdata'), - serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bvtrpc.proto\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"%\n\x04Rule\x12\r\n\x05match\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\")\n\x06\x46ilter\x12\x1f\n\x05rules\x18\x01 \x03(\x0b\x32\x10.binlogdata.Rule\"\xde\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x12\"\n\x06\x66ilter\x18\x06 \x01(\x0b\x32\x12.binlogdata.Filter\x12\'\n\x06on_ddl\x18\x07 \x01(\x0e\x32\x17.binlogdata.OnDDLAction\"B\n\tRowChange\x12\x1a\n\x06\x62\x65\x66ore\x18\x01 \x01(\x0b\x32\n.query.Row\x12\x19\n\x05\x61\x66ter\x18\x02 \x01(\x0b\x32\n.query.Row\"J\n\x08RowEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12*\n\x0brow_changes\x18\x02 \x03(\x0b\x32\x15.binlogdata.RowChange\">\n\nFieldEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x1c\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x0c.query.Field\"\xc8\x01\n\x06VEvent\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.binlogdata.VEventType\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x0b\n\x03\x64\x64l\x18\x04 \x01(\t\x12\'\n\trow_event\x18\x05 \x01(\x0b\x32\x14.binlogdata.RowEvent\x12+\n\x0b\x66ield_event\x18\x06 \x01(\x0b\x32\x16.binlogdata.FieldEvent\x12\x14\n\x0c\x63urrent_time\x18\x14 \x01(\x03\"\xc7\x01\n\x0eVStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x10\n\x08position\x18\x04 \x01(\t\x12\"\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x12.binlogdata.Filter\"5\n\x0fVStreamResponse\x12\"\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x12.binlogdata.VEvent*>\n\x0bOnDDLAction\x12\n\n\x06IGNORE\x10\x00\x12\x08\n\x04STOP\x10\x01\x12\x08\n\x04\x45XEC\x10\x02\x12\x0f\n\x0b\x45XEC_IGNORE\x10\x03*\xb9\x01\n\nVEventType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04GTID\x10\x01\x12\t\n\x05\x42\x45GIN\x10\x02\x12\n\n\x06\x43OMMIT\x10\x03\x12\x0c\n\x08ROLLBACK\x10\x04\x12\x07\n\x03\x44\x44L\x10\x05\x12\n\n\x06INSERT\x10\x06\x12\x0b\n\x07REPLACE\x10\x07\x12\n\n\x06UPDATE\x10\x08\x12\n\n\x06\x44\x45LETE\x10\t\x12\x07\n\x03SET\x10\n\x12\t\n\x05OTHER\x10\x0b\x12\x07\n\x03ROW\x10\x0c\x12\t\n\x05\x46IELD\x10\r\x12\r\n\tHEARTBEAT\x10\x0e\x42)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3') + serialized_pb=_b('\n\x10\x62inlogdata.proto\x12\nbinlogdata\x1a\x0bvtrpc.proto\x1a\x0bquery.proto\x1a\x0etopodata.proto\"7\n\x07\x43harset\x12\x0e\n\x06\x63lient\x18\x01 \x01(\x05\x12\x0c\n\x04\x63onn\x18\x02 \x01(\x05\x12\x0e\n\x06server\x18\x03 \x01(\x05\"\xb5\x03\n\x11\x42inlogTransaction\x12;\n\nstatements\x18\x01 \x03(\x0b\x32\'.binlogdata.BinlogTransaction.Statement\x12&\n\x0b\x65vent_token\x18\x04 \x01(\x0b\x32\x11.query.EventToken\x1a\xae\x02\n\tStatement\x12\x42\n\x08\x63\x61tegory\x18\x01 \x01(\x0e\x32\x30.binlogdata.BinlogTransaction.Statement.Category\x12$\n\x07\x63harset\x18\x02 \x01(\x0b\x32\x13.binlogdata.Charset\x12\x0b\n\x03sql\x18\x03 \x01(\x0c\"\xa9\x01\n\x08\x43\x61tegory\x12\x13\n\x0f\x42L_UNRECOGNIZED\x10\x00\x12\x0c\n\x08\x42L_BEGIN\x10\x01\x12\r\n\tBL_COMMIT\x10\x02\x12\x0f\n\x0b\x42L_ROLLBACK\x10\x03\x12\x15\n\x11\x42L_DML_DEPRECATED\x10\x04\x12\n\n\x06\x42L_DDL\x10\x05\x12\n\n\x06\x42L_SET\x10\x06\x12\r\n\tBL_INSERT\x10\x07\x12\r\n\tBL_UPDATE\x10\x08\x12\r\n\tBL_DELETE\x10\tJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"v\n\x15StreamKeyRangeRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12%\n\tkey_range\x18\x02 \x01(\x0b\x32\x12.topodata.KeyRange\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"S\n\x16StreamKeyRangeResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"]\n\x13StreamTablesRequest\x12\x10\n\x08position\x18\x01 \x01(\t\x12\x0e\n\x06tables\x18\x02 \x03(\t\x12$\n\x07\x63harset\x18\x03 \x01(\x0b\x32\x13.binlogdata.Charset\"Q\n\x14StreamTablesResponse\x12\x39\n\x12\x62inlog_transaction\x18\x01 \x01(\x0b\x32\x1d.binlogdata.BinlogTransaction\"%\n\x04Rule\x12\r\n\x05match\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\")\n\x06\x46ilter\x12\x1f\n\x05rules\x18\x01 \x03(\x0b\x32\x10.binlogdata.Rule\"\xde\x01\n\x0c\x42inlogSource\x12\x10\n\x08keyspace\x18\x01 \x01(\t\x12\r\n\x05shard\x18\x02 \x01(\t\x12)\n\x0btablet_type\x18\x03 \x01(\x0e\x32\x14.topodata.TabletType\x12%\n\tkey_range\x18\x04 \x01(\x0b\x32\x12.topodata.KeyRange\x12\x0e\n\x06tables\x18\x05 \x03(\t\x12\"\n\x06\x66ilter\x18\x06 \x01(\x0b\x32\x12.binlogdata.Filter\x12\'\n\x06on_ddl\x18\x07 \x01(\x0e\x32\x17.binlogdata.OnDDLAction\"B\n\tRowChange\x12\x1a\n\x06\x62\x65\x66ore\x18\x01 \x01(\x0b\x32\n.query.Row\x12\x19\n\x05\x61\x66ter\x18\x02 \x01(\x0b\x32\n.query.Row\"J\n\x08RowEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12*\n\x0brow_changes\x18\x02 \x03(\x0b\x32\x15.binlogdata.RowChange\">\n\nFieldEvent\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x1c\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x0c.query.Field\"\xc8\x01\n\x06VEvent\x12$\n\x04type\x18\x01 \x01(\x0e\x32\x16.binlogdata.VEventType\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x0b\n\x03\x64\x64l\x18\x04 \x01(\t\x12\'\n\trow_event\x18\x05 \x01(\x0b\x32\x14.binlogdata.RowEvent\x12+\n\x0b\x66ield_event\x18\x06 \x01(\x0b\x32\x16.binlogdata.FieldEvent\x12\x14\n\x0c\x63urrent_time\x18\x14 \x01(\x03\"\xc7\x01\n\x0eVStreamRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\x10\n\x08position\x18\x04 \x01(\t\x12\"\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x12.binlogdata.Filter\"5\n\x0fVStreamResponse\x12\"\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x12.binlogdata.VEvent\"\xc8\x01\n\x12VStreamRowsRequest\x12,\n\x13\x65\x66\x66\x65\x63tive_caller_id\x18\x01 \x01(\x0b\x32\x0f.vtrpc.CallerID\x12\x32\n\x13immediate_caller_id\x18\x02 \x01(\x0b\x32\x15.query.VTGateCallerID\x12\x1d\n\x06target\x18\x03 \x01(\x0b\x32\r.query.Target\x12\r\n\x05query\x18\x04 \x01(\t\x12\"\n\x06lastpk\x18\x05 \x01(\x0b\x32\x12.query.QueryResult\"\x97\x01\n\x13VStreamRowsResponse\x12\x1c\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x0c.query.Field\x12\x1e\n\x08pkfields\x18\x02 \x03(\x0b\x32\x0c.query.Field\x12\x0c\n\x04gtid\x18\x03 \x01(\t\x12\x18\n\x04rows\x18\x04 \x03(\x0b\x32\n.query.Row\x12\x1a\n\x06lastpk\x18\x05 \x01(\x0b\x32\n.query.Row*>\n\x0bOnDDLAction\x12\n\n\x06IGNORE\x10\x00\x12\x08\n\x04STOP\x10\x01\x12\x08\n\x04\x45XEC\x10\x02\x12\x0f\n\x0b\x45XEC_IGNORE\x10\x03*\xb9\x01\n\nVEventType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04GTID\x10\x01\x12\t\n\x05\x42\x45GIN\x10\x02\x12\n\n\x06\x43OMMIT\x10\x03\x12\x0c\n\x08ROLLBACK\x10\x04\x12\x07\n\x03\x44\x44L\x10\x05\x12\n\n\x06INSERT\x10\x06\x12\x0b\n\x07REPLACE\x10\x07\x12\n\n\x06UPDATE\x10\x08\x12\n\n\x06\x44\x45LETE\x10\t\x12\x07\n\x03SET\x10\n\x12\t\n\x05OTHER\x10\x0b\x12\x07\n\x03ROW\x10\x0c\x12\t\n\x05\x46IELD\x10\r\x12\r\n\tHEARTBEAT\x10\x0e\x42)Z\'vitess.io/vitess/go/vt/proto/binlogdatab\x06proto3') , dependencies=[vtrpc__pb2.DESCRIPTOR,query__pb2.DESCRIPTOR,topodata__pb2.DESCRIPTOR,]) @@ -52,8 +52,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=1929, - serialized_end=1991, + serialized_start=2286, + serialized_end=2348, ) _sym_db.RegisterEnumDescriptor(_ONDDLACTION) @@ -127,8 +127,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=1994, - serialized_end=2179, + serialized_start=2351, + serialized_end=2536, ) _sym_db.RegisterEnumDescriptor(_VEVENTTYPE) @@ -907,6 +907,124 @@ serialized_end=1927, ) + +_VSTREAMROWSREQUEST = _descriptor.Descriptor( + name='VStreamRowsRequest', + full_name='binlogdata.VStreamRowsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='effective_caller_id', full_name='binlogdata.VStreamRowsRequest.effective_caller_id', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='immediate_caller_id', full_name='binlogdata.VStreamRowsRequest.immediate_caller_id', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='target', full_name='binlogdata.VStreamRowsRequest.target', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='query', full_name='binlogdata.VStreamRowsRequest.query', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='lastpk', full_name='binlogdata.VStreamRowsRequest.lastpk', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1930, + serialized_end=2130, +) + + +_VSTREAMROWSRESPONSE = _descriptor.Descriptor( + name='VStreamRowsResponse', + full_name='binlogdata.VStreamRowsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='fields', full_name='binlogdata.VStreamRowsResponse.fields', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='pkfields', full_name='binlogdata.VStreamRowsResponse.pkfields', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='gtid', full_name='binlogdata.VStreamRowsResponse.gtid', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='rows', full_name='binlogdata.VStreamRowsResponse.rows', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='lastpk', full_name='binlogdata.VStreamRowsResponse.lastpk', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2133, + serialized_end=2284, +) + _BINLOGTRANSACTION_STATEMENT.fields_by_name['category'].enum_type = _BINLOGTRANSACTION_STATEMENT_CATEGORY _BINLOGTRANSACTION_STATEMENT.fields_by_name['charset'].message_type = _CHARSET _BINLOGTRANSACTION_STATEMENT.containing_type = _BINLOGTRANSACTION @@ -935,6 +1053,14 @@ _VSTREAMREQUEST.fields_by_name['target'].message_type = query__pb2._TARGET _VSTREAMREQUEST.fields_by_name['filter'].message_type = _FILTER _VSTREAMRESPONSE.fields_by_name['events'].message_type = _VEVENT +_VSTREAMROWSREQUEST.fields_by_name['effective_caller_id'].message_type = vtrpc__pb2._CALLERID +_VSTREAMROWSREQUEST.fields_by_name['immediate_caller_id'].message_type = query__pb2._VTGATECALLERID +_VSTREAMROWSREQUEST.fields_by_name['target'].message_type = query__pb2._TARGET +_VSTREAMROWSREQUEST.fields_by_name['lastpk'].message_type = query__pb2._QUERYRESULT +_VSTREAMROWSRESPONSE.fields_by_name['fields'].message_type = query__pb2._FIELD +_VSTREAMROWSRESPONSE.fields_by_name['pkfields'].message_type = query__pb2._FIELD +_VSTREAMROWSRESPONSE.fields_by_name['rows'].message_type = query__pb2._ROW +_VSTREAMROWSRESPONSE.fields_by_name['lastpk'].message_type = query__pb2._ROW DESCRIPTOR.message_types_by_name['Charset'] = _CHARSET DESCRIPTOR.message_types_by_name['BinlogTransaction'] = _BINLOGTRANSACTION DESCRIPTOR.message_types_by_name['StreamKeyRangeRequest'] = _STREAMKEYRANGEREQUEST @@ -950,6 +1076,8 @@ DESCRIPTOR.message_types_by_name['VEvent'] = _VEVENT DESCRIPTOR.message_types_by_name['VStreamRequest'] = _VSTREAMREQUEST DESCRIPTOR.message_types_by_name['VStreamResponse'] = _VSTREAMRESPONSE +DESCRIPTOR.message_types_by_name['VStreamRowsRequest'] = _VSTREAMROWSREQUEST +DESCRIPTOR.message_types_by_name['VStreamRowsResponse'] = _VSTREAMROWSRESPONSE DESCRIPTOR.enum_types_by_name['OnDDLAction'] = _ONDDLACTION DESCRIPTOR.enum_types_by_name['VEventType'] = _VEVENTTYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) @@ -1067,6 +1195,20 @@ )) _sym_db.RegisterMessage(VStreamResponse) +VStreamRowsRequest = _reflection.GeneratedProtocolMessageType('VStreamRowsRequest', (_message.Message,), dict( + DESCRIPTOR = _VSTREAMROWSREQUEST, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.VStreamRowsRequest) + )) +_sym_db.RegisterMessage(VStreamRowsRequest) + +VStreamRowsResponse = _reflection.GeneratedProtocolMessageType('VStreamRowsResponse', (_message.Message,), dict( + DESCRIPTOR = _VSTREAMROWSRESPONSE, + __module__ = 'binlogdata_pb2' + # @@protoc_insertion_point(class_scope:binlogdata.VStreamRowsResponse) + )) +_sym_db.RegisterMessage(VStreamRowsResponse) + DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope) diff --git a/py/vtproto/queryservice_pb2.py b/py/vtproto/queryservice_pb2.py index 3c443df129b..986c7701187 100644 --- a/py/vtproto/queryservice_pb2.py +++ b/py/vtproto/queryservice_pb2.py @@ -21,7 +21,7 @@ package='queryservice', syntax='proto3', serialized_options=_b('Z)vitess.io/vitess/go/vt/proto/queryservice'), - serialized_pb=_b('\n\x12queryservice.proto\x12\x0cqueryservice\x1a\x0bquery.proto\x1a\x10\x62inlogdata.proto2\xef\x0c\n\x05Query\x12:\n\x07\x45xecute\x12\x15.query.ExecuteRequest\x1a\x16.query.ExecuteResponse\"\x00\x12I\n\x0c\x45xecuteBatch\x12\x1a.query.ExecuteBatchRequest\x1a\x1b.query.ExecuteBatchResponse\"\x00\x12N\n\rStreamExecute\x12\x1b.query.StreamExecuteRequest\x1a\x1c.query.StreamExecuteResponse\"\x00\x30\x01\x12\x34\n\x05\x42\x65gin\x12\x13.query.BeginRequest\x1a\x14.query.BeginResponse\"\x00\x12\x37\n\x06\x43ommit\x12\x14.query.CommitRequest\x1a\x15.query.CommitResponse\"\x00\x12=\n\x08Rollback\x12\x16.query.RollbackRequest\x1a\x17.query.RollbackResponse\"\x00\x12:\n\x07Prepare\x12\x15.query.PrepareRequest\x1a\x16.query.PrepareResponse\"\x00\x12O\n\x0e\x43ommitPrepared\x12\x1c.query.CommitPreparedRequest\x1a\x1d.query.CommitPreparedResponse\"\x00\x12U\n\x10RollbackPrepared\x12\x1e.query.RollbackPreparedRequest\x1a\x1f.query.RollbackPreparedResponse\"\x00\x12X\n\x11\x43reateTransaction\x12\x1f.query.CreateTransactionRequest\x1a .query.CreateTransactionResponse\"\x00\x12\x46\n\x0bStartCommit\x12\x19.query.StartCommitRequest\x1a\x1a.query.StartCommitResponse\"\x00\x12\x46\n\x0bSetRollback\x12\x19.query.SetRollbackRequest\x1a\x1a.query.SetRollbackResponse\"\x00\x12^\n\x13\x43oncludeTransaction\x12!.query.ConcludeTransactionRequest\x1a\".query.ConcludeTransactionResponse\"\x00\x12R\n\x0fReadTransaction\x12\x1d.query.ReadTransactionRequest\x1a\x1e.query.ReadTransactionResponse\"\x00\x12I\n\x0c\x42\x65ginExecute\x12\x1a.query.BeginExecuteRequest\x1a\x1b.query.BeginExecuteResponse\"\x00\x12X\n\x11\x42\x65ginExecuteBatch\x12\x1f.query.BeginExecuteBatchRequest\x1a .query.BeginExecuteBatchResponse\"\x00\x12N\n\rMessageStream\x12\x1b.query.MessageStreamRequest\x1a\x1c.query.MessageStreamResponse\"\x00\x30\x01\x12\x43\n\nMessageAck\x12\x18.query.MessageAckRequest\x1a\x19.query.MessageAckResponse\"\x00\x12\x43\n\nSplitQuery\x12\x18.query.SplitQueryRequest\x1a\x19.query.SplitQueryResponse\"\x00\x12K\n\x0cStreamHealth\x12\x1a.query.StreamHealthRequest\x1a\x1b.query.StreamHealthResponse\"\x00\x30\x01\x12K\n\x0cUpdateStream\x12\x1a.query.UpdateStreamRequest\x1a\x1b.query.UpdateStreamResponse\"\x00\x30\x01\x12\x46\n\x07VStream\x12\x1a.binlogdata.VStreamRequest\x1a\x1b.binlogdata.VStreamResponse\"\x00\x30\x01\x42+Z)vitess.io/vitess/go/vt/proto/queryserviceb\x06proto3') + serialized_pb=_b('\n\x12queryservice.proto\x12\x0cqueryservice\x1a\x0bquery.proto\x1a\x10\x62inlogdata.proto2\xc3\r\n\x05Query\x12:\n\x07\x45xecute\x12\x15.query.ExecuteRequest\x1a\x16.query.ExecuteResponse\"\x00\x12I\n\x0c\x45xecuteBatch\x12\x1a.query.ExecuteBatchRequest\x1a\x1b.query.ExecuteBatchResponse\"\x00\x12N\n\rStreamExecute\x12\x1b.query.StreamExecuteRequest\x1a\x1c.query.StreamExecuteResponse\"\x00\x30\x01\x12\x34\n\x05\x42\x65gin\x12\x13.query.BeginRequest\x1a\x14.query.BeginResponse\"\x00\x12\x37\n\x06\x43ommit\x12\x14.query.CommitRequest\x1a\x15.query.CommitResponse\"\x00\x12=\n\x08Rollback\x12\x16.query.RollbackRequest\x1a\x17.query.RollbackResponse\"\x00\x12:\n\x07Prepare\x12\x15.query.PrepareRequest\x1a\x16.query.PrepareResponse\"\x00\x12O\n\x0e\x43ommitPrepared\x12\x1c.query.CommitPreparedRequest\x1a\x1d.query.CommitPreparedResponse\"\x00\x12U\n\x10RollbackPrepared\x12\x1e.query.RollbackPreparedRequest\x1a\x1f.query.RollbackPreparedResponse\"\x00\x12X\n\x11\x43reateTransaction\x12\x1f.query.CreateTransactionRequest\x1a .query.CreateTransactionResponse\"\x00\x12\x46\n\x0bStartCommit\x12\x19.query.StartCommitRequest\x1a\x1a.query.StartCommitResponse\"\x00\x12\x46\n\x0bSetRollback\x12\x19.query.SetRollbackRequest\x1a\x1a.query.SetRollbackResponse\"\x00\x12^\n\x13\x43oncludeTransaction\x12!.query.ConcludeTransactionRequest\x1a\".query.ConcludeTransactionResponse\"\x00\x12R\n\x0fReadTransaction\x12\x1d.query.ReadTransactionRequest\x1a\x1e.query.ReadTransactionResponse\"\x00\x12I\n\x0c\x42\x65ginExecute\x12\x1a.query.BeginExecuteRequest\x1a\x1b.query.BeginExecuteResponse\"\x00\x12X\n\x11\x42\x65ginExecuteBatch\x12\x1f.query.BeginExecuteBatchRequest\x1a .query.BeginExecuteBatchResponse\"\x00\x12N\n\rMessageStream\x12\x1b.query.MessageStreamRequest\x1a\x1c.query.MessageStreamResponse\"\x00\x30\x01\x12\x43\n\nMessageAck\x12\x18.query.MessageAckRequest\x1a\x19.query.MessageAckResponse\"\x00\x12\x43\n\nSplitQuery\x12\x18.query.SplitQueryRequest\x1a\x19.query.SplitQueryResponse\"\x00\x12K\n\x0cStreamHealth\x12\x1a.query.StreamHealthRequest\x1a\x1b.query.StreamHealthResponse\"\x00\x30\x01\x12K\n\x0cUpdateStream\x12\x1a.query.UpdateStreamRequest\x1a\x1b.query.UpdateStreamResponse\"\x00\x30\x01\x12\x46\n\x07VStream\x12\x1a.binlogdata.VStreamRequest\x1a\x1b.binlogdata.VStreamResponse\"\x00\x30\x01\x12R\n\x0bVStreamRows\x12\x1e.binlogdata.VStreamRowsRequest\x1a\x1f.binlogdata.VStreamRowsResponse\"\x00\x30\x01\x42+Z)vitess.io/vitess/go/vt/proto/queryserviceb\x06proto3') , dependencies=[query__pb2.DESCRIPTOR,binlogdata__pb2.DESCRIPTOR,]) @@ -39,7 +39,7 @@ index=0, serialized_options=None, serialized_start=68, - serialized_end=1715, + serialized_end=1799, methods=[ _descriptor.MethodDescriptor( name='Execute', @@ -239,6 +239,15 @@ output_type=binlogdata__pb2._VSTREAMRESPONSE, serialized_options=None, ), + _descriptor.MethodDescriptor( + name='VStreamRows', + full_name='queryservice.Query.VStreamRows', + index=22, + containing_service=None, + input_type=binlogdata__pb2._VSTREAMROWSREQUEST, + output_type=binlogdata__pb2._VSTREAMROWSRESPONSE, + serialized_options=None, + ), ]) _sym_db.RegisterServiceDescriptor(_QUERY) diff --git a/py/vtproto/queryservice_pb2_grpc.py b/py/vtproto/queryservice_pb2_grpc.py index 61adf9b42bd..f6d80e62adc 100644 --- a/py/vtproto/queryservice_pb2_grpc.py +++ b/py/vtproto/queryservice_pb2_grpc.py @@ -125,6 +125,11 @@ def __init__(self, channel): request_serializer=binlogdata__pb2.VStreamRequest.SerializeToString, response_deserializer=binlogdata__pb2.VStreamResponse.FromString, ) + self.VStreamRows = channel.unary_stream( + '/queryservice.Query/VStreamRows', + request_serializer=binlogdata__pb2.VStreamRowsRequest.SerializeToString, + response_deserializer=binlogdata__pb2.VStreamRowsResponse.FromString, + ) class QueryServicer(object): @@ -292,6 +297,13 @@ def VStream(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def VStreamRows(self, request, context): + """VStreamRows streams rows from the specified starting point. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_QueryServicer_to_server(servicer, server): rpc_method_handlers = { @@ -405,6 +417,11 @@ def add_QueryServicer_to_server(servicer, server): request_deserializer=binlogdata__pb2.VStreamRequest.FromString, response_serializer=binlogdata__pb2.VStreamResponse.SerializeToString, ), + 'VStreamRows': grpc.unary_stream_rpc_method_handler( + servicer.VStreamRows, + request_deserializer=binlogdata__pb2.VStreamRowsRequest.FromString, + response_serializer=binlogdata__pb2.VStreamRowsResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'queryservice.Query', rpc_method_handlers)