From 5210ead7190a63eb3ef4d65044f05eb263e8ce46 Mon Sep 17 00:00:00 2001 From: Yipei Wang Date: Tue, 31 Jan 2017 11:20:57 -0800 Subject: [PATCH 001/108] workflow: Defined structure to track status per tasks in workflowstate.proto. Implement ParallelRunner and Checkpointer. Create a simple test for ParallelRunner. --- go/vt/proto/workflow/workflow.pb.go | 214 ++++++++++- go/vt/topo/workflow.go | 18 +- go/vt/workflow/resharding/checkpoint.go | 62 +++ .../horizontal_resharding_workflow.go | 358 +++++++++++++++--- .../horizontal_resharding_workflow_test.go | 38 +- go/vt/workflow/resharding/parallel_runner.go | 73 ++++ .../resharding/parallel_runner_test.go | 95 +++++ go/vt/workflow/resharding/status.go | 118 ++++++ proto/workflow.proto | 31 ++ 9 files changed, 920 insertions(+), 87 deletions(-) create mode 100644 go/vt/workflow/resharding/checkpoint.go create mode 100644 go/vt/workflow/resharding/parallel_runner.go create mode 100644 go/vt/workflow/resharding/parallel_runner_test.go create mode 100644 go/vt/workflow/resharding/status.go diff --git a/go/vt/proto/workflow/workflow.pb.go b/go/vt/proto/workflow/workflow.pb.go index 70ce2abe855..1b246023976 100644 --- a/go/vt/proto/workflow/workflow.pb.go +++ b/go/vt/proto/workflow/workflow.pb.go @@ -10,6 +10,8 @@ It is generated from these files: It has these top-level messages: Workflow + Task + WorkflowCheckpoint */ package workflow @@ -56,6 +58,30 @@ func (x WorkflowState) String() string { } func (WorkflowState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +type TaskState int32 + +const ( + TaskState_TaskNotStarted TaskState = 0 + TaskState_TaskRunning TaskState = 1 + TaskState_TaskDone TaskState = 2 +) + +var TaskState_name = map[int32]string{ + 0: "TaskNotStarted", + 1: "TaskRunning", + 2: "TaskDone", +} +var TaskState_value = map[string]int32{ + "TaskNotStarted": 0, + "TaskRunning": 1, + "TaskDone": 2, +} + +func (x TaskState) String() string { + return proto.EnumName(TaskState_name, int32(x)) +} +func (TaskState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + // Workflow is the persisted state of a long-running workflow. type Workflow struct { // uuid is set when the workflow is created, and immutable after @@ -97,29 +123,183 @@ func (m *Workflow) String() string { return proto.CompactTextString(m func (*Workflow) ProtoMessage() {} func (*Workflow) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Workflow) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + +func (m *Workflow) GetFactoryName() string { + if m != nil { + return m.FactoryName + } + return "" +} + +func (m *Workflow) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Workflow) GetState() WorkflowState { + if m != nil { + return m.State + } + return WorkflowState_NotStarted +} + +func (m *Workflow) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *Workflow) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *Workflow) GetStartTime() int64 { + if m != nil { + return m.StartTime + } + return 0 +} + +func (m *Workflow) GetEndTime() int64 { + if m != nil { + return m.EndTime + } + return 0 +} + +type Task struct { + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId" json:"task_id,omitempty"` + State TaskState `protobuf:"varint,2,opt,name=state,enum=workflow.TaskState" json:"state,omitempty"` + // attributes includes the parameters the task needs. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Error string `protobuf:"bytes,4,opt,name=error" json:"error,omitempty"` +} + +func (m *Task) Reset() { *m = Task{} } +func (m *Task) String() string { return proto.CompactTextString(m) } +func (*Task) ProtoMessage() {} +func (*Task) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Task) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *Task) GetState() TaskState { + if m != nil { + return m.State + } + return TaskState_TaskNotStarted +} + +func (m *Task) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Task) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type WorkflowCheckpoint struct { + // code_version is used to detect incompabilities between the version of the + // running workflow and the one which wrote the checkpoint. If they don't + // match, the workflow must not continue. The author of workflow must update + // this variable in their implementation when incompabilities are introduced. + CodeVersion int32 `protobuf:"varint,1,opt,name=code_version,json=codeVersion" json:"code_version,omitempty"` + // Task is the data structure that stores the execution status and the + // attributes of a task. + Tasks map[string]*Task `protobuf:"bytes,2,rep,name=tasks" json:"tasks,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // settings includes workflow specific data, e.g. the resharding workflow + // would store the source shards and destination shards. + Settings map[string]string `protobuf:"bytes,3,rep,name=settings" json:"settings,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *WorkflowCheckpoint) Reset() { *m = WorkflowCheckpoint{} } +func (m *WorkflowCheckpoint) String() string { return proto.CompactTextString(m) } +func (*WorkflowCheckpoint) ProtoMessage() {} +func (*WorkflowCheckpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *WorkflowCheckpoint) GetCodeVersion() int32 { + if m != nil { + return m.CodeVersion + } + return 0 +} + +func (m *WorkflowCheckpoint) GetTasks() map[string]*Task { + if m != nil { + return m.Tasks + } + return nil +} + +func (m *WorkflowCheckpoint) GetSettings() map[string]string { + if m != nil { + return m.Settings + } + return nil +} + func init() { proto.RegisterType((*Workflow)(nil), "workflow.Workflow") + proto.RegisterType((*Task)(nil), "workflow.Task") + proto.RegisterType((*WorkflowCheckpoint)(nil), "workflow.WorkflowCheckpoint") proto.RegisterEnum("workflow.WorkflowState", WorkflowState_name, WorkflowState_value) + proto.RegisterEnum("workflow.TaskState", TaskState_name, TaskState_value) } func init() { proto.RegisterFile("workflow.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 246 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x54, 0x90, 0x41, 0x4b, 0x03, 0x31, - 0x10, 0x85, 0x4d, 0xbb, 0xdb, 0x4d, 0xa7, 0x75, 0x59, 0x06, 0xc1, 0x78, 0x10, 0x56, 0x4f, 0x8b, - 0x60, 0x0f, 0x0a, 0xfe, 0x02, 0xcf, 0x3d, 0xa4, 0x82, 0xc7, 0x12, 0xcd, 0x54, 0x16, 0xdd, 0x44, - 0xd2, 0x59, 0x8a, 0xff, 0xd8, 0x9f, 0x21, 0xc9, 0x76, 0x85, 0xde, 0xde, 0x9b, 0x2f, 0x6f, 0x5e, - 0x18, 0x28, 0x0f, 0x3e, 0x7c, 0xee, 0xbe, 0xfc, 0x61, 0xf5, 0x1d, 0x3c, 0x7b, 0x94, 0xa3, 0xbf, - 0xfd, 0x15, 0x20, 0x5f, 0x8f, 0x06, 0x11, 0xb2, 0xbe, 0x6f, 0xad, 0x12, 0xb5, 0x68, 0xe6, 0x3a, - 0x69, 0xbc, 0x81, 0xe5, 0xce, 0xbc, 0xb3, 0x0f, 0x3f, 0x5b, 0x67, 0x3a, 0x52, 0x93, 0xc4, 0x16, - 0xc7, 0xd9, 0xda, 0x74, 0x14, 0x63, 0x09, 0x4d, 0x87, 0x58, 0xd4, 0x78, 0x0f, 0xf9, 0x9e, 0x0d, - 0x93, 0xca, 0x6a, 0xd1, 0x94, 0x0f, 0x97, 0xab, 0xff, 0x1f, 0x8c, 0x6d, 0x9b, 0x88, 0xf5, 0xf0, - 0x2a, 0xae, 0xb0, 0x86, 0x8d, 0xca, 0x6b, 0xd1, 0x2c, 0x75, 0xd2, 0x78, 0x01, 0x39, 0x85, 0xe0, - 0x83, 0x9a, 0xa5, 0xbd, 0x83, 0xc1, 0x6b, 0x80, 0x3d, 0x9b, 0xc0, 0x5b, 0x6e, 0x3b, 0x52, 0x45, - 0x2d, 0x9a, 0xa9, 0x9e, 0xa7, 0xc9, 0x4b, 0xdb, 0x11, 0x5e, 0x81, 0x24, 0x67, 0x07, 0x28, 0x13, - 0x2c, 0xc8, 0xd9, 0x88, 0xee, 0x9e, 0xe0, 0xfc, 0xa4, 0x1b, 0x4b, 0x80, 0xb5, 0xe7, 0x4d, 0xcc, - 0x92, 0xad, 0xce, 0x70, 0x01, 0x85, 0xee, 0x9d, 0x6b, 0xdd, 0x47, 0x25, 0x50, 0x42, 0xf6, 0xec, - 0x1d, 0x55, 0x93, 0xb7, 0x59, 0xba, 0xd9, 0xe3, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe2, 0x1f, - 0x18, 0x22, 0x45, 0x01, 0x00, 0x00, + // 479 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x53, 0x5d, 0x8b, 0xd3, 0x40, + 0x14, 0x75, 0xd2, 0xa4, 0x4d, 0x6f, 0xba, 0xd9, 0x72, 0x15, 0x36, 0x16, 0x94, 0x58, 0x04, 0x63, + 0xc1, 0x3e, 0x54, 0x10, 0x51, 0x76, 0x41, 0xfc, 0x40, 0x5f, 0xf6, 0x21, 0x5d, 0xf4, 0xb1, 0xcc, + 0x36, 0xb3, 0x6b, 0xc8, 0x76, 0x66, 0x99, 0x4c, 0x76, 0xe9, 0x0f, 0xf4, 0x77, 0xf8, 0xea, 0xcf, + 0x90, 0x99, 0x49, 0xd2, 0x46, 0x45, 0xd8, 0xb7, 0xfb, 0x75, 0x4e, 0xce, 0xb9, 0x73, 0x03, 0xe1, + 0xad, 0x90, 0xc5, 0xc5, 0x95, 0xb8, 0x9d, 0x5f, 0x4b, 0xa1, 0x04, 0xfa, 0x4d, 0x3e, 0xfd, 0x45, + 0xc0, 0xff, 0x56, 0x27, 0x88, 0xe0, 0x56, 0x55, 0x9e, 0x45, 0x24, 0x26, 0xc9, 0x30, 0x35, 0x31, + 0x3e, 0x81, 0xd1, 0x05, 0x5d, 0x2b, 0x21, 0xb7, 0x2b, 0x4e, 0x37, 0x2c, 0x72, 0x4c, 0x2f, 0xa8, + 0x6b, 0xa7, 0x74, 0xc3, 0x34, 0xcc, 0xb4, 0x7a, 0x16, 0xa6, 0x63, 0x7c, 0x01, 0x5e, 0xa9, 0xa8, + 0x62, 0x91, 0x1b, 0x93, 0x24, 0x5c, 0x1c, 0xcd, 0x5b, 0x05, 0xcd, 0xd7, 0x96, 0xba, 0x9d, 0xda, + 0x29, 0x4d, 0x91, 0x51, 0x45, 0x23, 0x2f, 0x26, 0xc9, 0x28, 0x35, 0x31, 0x3e, 0x00, 0x8f, 0x49, + 0x29, 0x64, 0xd4, 0x37, 0xbc, 0x36, 0xc1, 0x47, 0x00, 0xa5, 0xa2, 0x52, 0xad, 0x54, 0xbe, 0x61, + 0xd1, 0x20, 0x26, 0x49, 0x2f, 0x1d, 0x9a, 0xca, 0x59, 0xbe, 0x61, 0xf8, 0x10, 0x7c, 0xc6, 0x33, + 0xdb, 0xf4, 0x4d, 0x73, 0xc0, 0x78, 0xa6, 0x5b, 0xd3, 0x9f, 0x04, 0xdc, 0x33, 0x5a, 0x16, 0x78, + 0x04, 0x03, 0x45, 0xcb, 0x62, 0xd5, 0x3a, 0xed, 0xeb, 0xf4, 0x4b, 0x86, 0xcf, 0x1b, 0xd1, 0x8e, + 0x11, 0x7d, 0x7f, 0x27, 0x5a, 0xe3, 0x3a, 0x82, 0x4f, 0x00, 0xa8, 0x52, 0x32, 0x3f, 0xaf, 0x14, + 0x2b, 0xa3, 0x5e, 0xdc, 0x4b, 0x82, 0xc5, 0xe3, 0xee, 0xfc, 0xfc, 0x5d, 0x3b, 0xf0, 0x91, 0x2b, + 0xb9, 0x4d, 0xf7, 0x10, 0x3b, 0x73, 0xee, 0x9e, 0xb9, 0xc9, 0x31, 0x1c, 0xfe, 0x01, 0xc2, 0x31, + 0xf4, 0x0a, 0xb6, 0xad, 0x85, 0xea, 0x50, 0x43, 0x6f, 0xe8, 0x55, 0xd5, 0x3c, 0x85, 0x4d, 0xde, + 0x38, 0xaf, 0xc9, 0xf4, 0x87, 0x03, 0xd8, 0xac, 0xf7, 0xfd, 0x77, 0xb6, 0x2e, 0xae, 0x45, 0xce, + 0x95, 0x7e, 0xc2, 0xb5, 0xc8, 0xd8, 0xea, 0x86, 0xc9, 0x32, 0x17, 0xdc, 0x70, 0x79, 0x69, 0xa0, + 0x6b, 0x5f, 0x6d, 0x09, 0x8f, 0xc1, 0xd3, 0x3b, 0x28, 0x23, 0xc7, 0x38, 0x79, 0xf6, 0xf7, 0x73, + 0xed, 0xf8, 0x8c, 0xb9, 0xda, 0x92, 0x45, 0xe1, 0x27, 0xf0, 0x4b, 0xa6, 0x54, 0xce, 0x2f, 0x9b, + 0x5d, 0xcc, 0xfe, 0xcb, 0xb0, 0xac, 0x87, 0x2d, 0x49, 0x8b, 0x9d, 0x7c, 0x06, 0xd8, 0x91, 0xff, + 0xc3, 0xfa, 0xd3, 0x7d, 0xeb, 0xc1, 0x22, 0xec, 0x2e, 0x7c, 0x6f, 0x15, 0x93, 0xb7, 0x70, 0xd0, + 0xf9, 0xc8, 0x5d, 0xf6, 0x38, 0x7b, 0x05, 0x07, 0x9d, 0x2b, 0xc5, 0x10, 0xe0, 0x54, 0xa8, 0xa5, + 0xbe, 0x32, 0x96, 0x8d, 0xef, 0x61, 0x00, 0x83, 0xb4, 0xe2, 0x3c, 0xe7, 0x97, 0x63, 0x82, 0x3e, + 0xb8, 0x1f, 0x04, 0x67, 0x63, 0x67, 0x76, 0x02, 0xc3, 0xf6, 0x50, 0x10, 0x21, 0xd4, 0x49, 0x07, + 0x77, 0x08, 0x81, 0x11, 0xda, 0x62, 0x47, 0xe0, 0xeb, 0x82, 0xc5, 0x9f, 0xf7, 0xcd, 0xdf, 0xf9, + 0xf2, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4e, 0xca, 0x5c, 0xae, 0xaf, 0x03, 0x00, 0x00, } diff --git a/go/vt/topo/workflow.go b/go/vt/topo/workflow.go index 43521e2aecc..dd677b6f522 100644 --- a/go/vt/topo/workflow.go +++ b/go/vt/topo/workflow.go @@ -17,7 +17,7 @@ const ( workflowFilename = "Workflow" ) -func pathForWorkflow(uuid string) string { +func PathForWorkflow(uuid string) string { return path.Join(workflowsPath, uuid, workflowFilename) } @@ -27,6 +27,14 @@ type WorkflowInfo struct { *workflowpb.Workflow } +func (w *WorkflowInfo) SetVersion(v Version) { + w.version = v +} + +func (w *WorkflowInfo) Version() Version { + return w.version +} + // GetWorkflowNames returns the names of the existing // workflows. They are sorted by uuid. func (ts Server) GetWorkflowNames(ctx context.Context) ([]string, error) { @@ -51,7 +59,7 @@ func (ts Server) CreateWorkflow(ctx context.Context, w *workflowpb.Workflow) (*W } // Save it. - filePath := pathForWorkflow(w.Uuid) + filePath := PathForWorkflow(w.Uuid) version, err := ts.Create(ctx, GlobalCell, filePath, contents) if err != nil { return nil, err @@ -65,7 +73,7 @@ func (ts Server) CreateWorkflow(ctx context.Context, w *workflowpb.Workflow) (*W // GetWorkflow reads a workflow from the Backend. func (ts Server) GetWorkflow(ctx context.Context, uuid string) (*WorkflowInfo, error) { // Read the file. - filePath := pathForWorkflow(uuid) + filePath := PathForWorkflow(uuid) contents, version, err := ts.Get(ctx, GlobalCell, filePath) if err != nil { return nil, err @@ -93,7 +101,7 @@ func (ts Server) SaveWorkflow(ctx context.Context, wi *WorkflowInfo) error { } // Save it. - filePath := pathForWorkflow(wi.Uuid) + filePath := PathForWorkflow(wi.Uuid) version, err := ts.Update(ctx, GlobalCell, filePath, contents, wi.version) if err != nil { return err @@ -107,6 +115,6 @@ func (ts Server) SaveWorkflow(ctx context.Context, wi *WorkflowInfo) error { // DeleteWorkflow deletes the specified workflow. After this, the // WorkflowInfo object should not be used any more. func (ts Server) DeleteWorkflow(ctx context.Context, wi *WorkflowInfo) error { - filePath := pathForWorkflow(wi.Uuid) + filePath := PathForWorkflow(wi.Uuid) return ts.Delete(ctx, GlobalCell, filePath, wi.version) } diff --git a/go/vt/workflow/resharding/checkpoint.go b/go/vt/workflow/resharding/checkpoint.go new file mode 100644 index 00000000000..ef8bacaa1dc --- /dev/null +++ b/go/vt/workflow/resharding/checkpoint.go @@ -0,0 +1,62 @@ +package resharding + +import ( + "context" + "encoding/json" + "sync" + + "github.com/youtube/vitess/go/vt/topo" + + workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" +) + +// Checkpoint checkpoints the data into topology server. +type Checkpoint struct { + topoServer topo.Server + // checkpointMu is used for protecting data access during checkpointing. + checkpointMu sync.Mutex + wcp *workflowpb.WorkflowCheckpoint + wi *topo.WorkflowInfo +} + +// Update update the status and checkpointing the update. +func (c *Checkpoint) Update(taskID string, status workflowpb.TaskState) error { + c.checkpointMu.Lock() + defer c.checkpointMu.Unlock() + c.wcp.Tasks[taskID].State = status + return c.Store() +} + +// Store packets the checkpoint and sends it to the topology server. +func (c *Checkpoint) Store() error { + var err error + var data []byte + data, err = json.Marshal(c.wcp) + if err != nil { + return err + } + c.wi.Data = data + return c.topoServer.SaveWorkflow(context.TODO(), c.wi) +} + +// CheckpointFile checkpoints the data into local files. This is used for debugging. +//type CheckpointFile struct { +// FilePath string +// counter int +//} +// +//// CheckpointFunc implements Checkpoint.CheckpointFunc +//func (c *CheckpointFile) Checkpoint(s *workflowpb.WorkflowCheckpoint) error { +// file, err := os.Create(fmt.Sprintf("%v_%v", c.FilePath, c.counter)) +// c.counter++ +// +// if err != nil { +// return err +// } +// defer file.Close() +// fmt.Fprintln(file, fmt.Sprintf("code version: %v", s.CodeVersion)) +// for _, task := range s.Tasks { +// fmt.Fprintln(file, fmt.Sprintf("task: state: %v\n attributes: %v\n", task.State, task.Attributes)) +// } +// return nil +//} diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow.go b/go/vt/workflow/resharding/horizontal_resharding_workflow.go index 0103bd1eddb..1c6a72467b0 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow.go @@ -28,10 +28,16 @@ import ( topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" + statepb "github.com/youtube/vitess/go/vt/proto/workflowstate" ) const ( horizontalReshardingFactoryName = "horizontal_resharding" + copySchemaTaskName = "copy_schema" + splitCloneTaskName = "split_clone" + waitFilteredReplicationTaskName = "wait_replication" + splitDiffTaskName = "split_diff" + migrateTaskName = "migrate" ) // HorizontalReshardingData is the data structure to store resharding arguments. @@ -48,6 +54,8 @@ type HorizontalReshardingWorkflow struct { wr ReshardingWrangler manager *workflow.Manager topoServer topo.Server + // wi is the topo.WorkflowInfo + wi *topo.WorkflowInfo // logger is the logger we export UI logs from. logger *logutil.MemoryLogger @@ -63,6 +71,9 @@ type HorizontalReshardingWorkflow struct { vtworkers []string subWorkflows []*PerShardHorizontalResharding + + subTasks map[string]*statepb.TaskContainer + taskParameters []*statepb.TaskParam } // PerShardHorizontalReshardingData is the data structure to store the resharding arguments for each shard. @@ -101,7 +112,10 @@ func (hw *HorizontalReshardingWorkflow) Run(ctx context.Context, manager *workfl hw.rootUINode.BroadcastChanges(true /* updateChildren */) // TODO(yipeiw): Support action button to allow retry, stop, restart. - if err := hw.executeWorkflow(); err != nil { + // if err := hw.executeWorkflow(); err != nil { + // return err + // } + if err := hw.runWorkflow(); err != nil { return err } @@ -129,11 +143,19 @@ func (hw *HorizontalReshardingWorkflow) createSubWorkflows() error { sourceShard = os.Right[0] destinationShards = os.Left } - if err := hw.createWorkflowPerShard(sourceShard, destinationShards, hw.vtworkers[i]); err != nil { return err } } + // Initialize the tasks (parameters and states) for the workflow. + hw.subTasks = map[string]*statepb.TaskContainer{ + copySchemaTaskName: new(CopySchemaTaskHelper).InitTasks(hw.subWorkflows), + splitCloneTaskName: new(SplitCloneTaskHelper).InitTasks(hw.subWorkflows), + waitFilteredReplicationTaskName: new(WaitFilteredReplicationTaskHelper).InitTasks(hw.subWorkflows), + splitDiffTaskName: new(SplitDiffTaskHelper).InitTasks(hw.subWorkflows), + migrateTaskName: new(MigrateTaskHelper).InitTasks(hw.subWorkflows), + } + return nil } @@ -180,99 +202,193 @@ func (hw *HorizontalReshardingWorkflow) createWorkflowPerShard(sourceShard *topo return nil } -func (hw *HorizontalReshardingWorkflow) executeWorkflow() error { - if err := hw.runAllSubWorkflows(hw.executeCopySchemaPerShard); err != nil { - hw.logger.Infof("Horizontal Resharding: error in CopySchemaShard: %v.", err) - return err - } - if err := hw.runAllSubWorkflows(hw.executeSplitClonePerShard); err != nil { - hw.logger.Infof("Horizontal Resharding: error in SplitClone: %v.", err) +// checkpointed saves a checkpoint in topo server. +// Needs to be called with the lock. +func (hw *HorizontalReshardingWorkflow) checkpointed(ctx context.Context) error { + var err error + hw.wi.Data, err = json.Marshal(hw.subTasks) + if err != nil { return err } - if err := hw.runAllSubWorkflows(hw.executeSplitDiffPerShard); err != nil { - hw.logger.Infof("Horizontal Resharding: error in SplitDiff: %v.", err) - return err + err = hw.topoServer.SaveWorkflow(ctx, hw.wi) + if err != nil { + hw.logger.Errorf("SaveWorkflow failed: %v", err) + } else { + hw.logger.Infof("SaveWorkflow successful") } - if err := hw.runAllSubWorkflows(hw.executeMigratePerShard); err != nil { - hw.logger.Infof("Horizontal Resharding: error in MigratedServedType: %v.", err) - return err + return err +} + +// updateStatus will update the status for specific task +func (hw *HorizontalReshardingWorkflow) updateStatus(step string, taskParam *statepb.TaskParam, status statepb.TaskState) { + hw.subTasks[step].Tasks[taskParam.String()].State = status + hw.checkpointed(context.TODO()) // I think this context needs separate control, we always want the checkpointing to succeed. +} + +func (hw *HorizontalReshardingWorkflow) runWorkflow() error { + // TODO(yipeiw): the code for each step execution is very similar, code refactorition needed in the next step. + + // Dynamically decides the task parameters based on the step and execution states. + hw.taskParameters = GetTaskParam(hw.subTasks[copySchemaTaskName]) + + // To verify the task parameters and status, I Print it out and check manually in unit test. + PrintTasks(copySchemaTaskName, hw.subTasks[copySchemaTaskName], hw.taskParameters) + var err error + err = hw.runAllTasks( + func(param *statepb.TaskParam) error { + var taskErr error + status := statepb.TaskState_Done + taskErr = hw.runCopySchema(param) + if taskErr != nil { + status = statepb.TaskState_Failed + hw.logger.Infof("Horizontal Resharding: error in CopySchemaShard: %v.", taskErr) + } + hw.updateStatus(copySchemaTaskName, param, status) + return taskErr + }) + PrintTasks("AFTER_"+copySchemaTaskName, hw.subTasks[copySchemaTaskName], nil) + + hw.taskParameters = GetTaskParam(hw.subTasks[splitCloneTaskName]) + PrintTasks(splitCloneTaskName, hw.subTasks[splitCloneTaskName], hw.taskParameters) + err = hw.runAllTasks( + func(param *statepb.TaskParam) error { + var taskErr error + status := statepb.TaskState_Done + taskErr = hw.runSplitClone(param) + if taskErr != nil { + status = statepb.TaskState_Failed + hw.logger.Infof("Horizontal Resharding: error in SplitClone: %v.", err) + } + hw.updateStatus(splitCloneTaskName, param, status) + return taskErr + }) + PrintTasks("AFTER_"+splitCloneTaskName, hw.subTasks[splitCloneTaskName], nil) + + hw.taskParameters = GetTaskParam(hw.subTasks[waitFilteredReplicationTaskName]) + PrintTasks(waitFilteredReplicationTaskName, hw.subTasks[waitFilteredReplicationTaskName], hw.taskParameters) + err = hw.runAllTasks( + func(param *statepb.TaskParam) error { + var taskErr error + status := statepb.TaskState_Done + taskErr = hw.runWaitFilteredReplication(param) + if taskErr != nil { + status = statepb.TaskState_Failed + hw.logger.Infof("Horizontal Resharding: error in SplitDiff: %v.", err) + } + hw.updateStatus(waitFilteredReplicationTaskName, param, status) + return taskErr + }) + PrintTasks("AFTER_"+waitFilteredReplicationTaskName, hw.subTasks[waitFilteredReplicationTaskName], nil) + + hw.taskParameters = GetTaskParam(hw.subTasks[splitDiffTaskName]) + PrintTasks(splitDiffTaskName, hw.subTasks[splitDiffTaskName], hw.taskParameters) + err = hw.runAllTasks( + func(param *statepb.TaskParam) error { + var taskErr error + status := statepb.TaskState_Done + taskErr = hw.runSplitDiff(param) + if taskErr != nil { + status = statepb.TaskState_Failed + hw.logger.Infof("Horizontal Resharding: error in SplitDiff: %v.", err) + } + hw.updateStatus(splitDiffTaskName, param, status) + return taskErr + }) + PrintTasks("After_"+splitDiffTaskName, hw.subTasks[splitDiffTaskName], nil) + + hw.taskParameters = GetTaskParam(hw.subTasks[migrateTaskName]) + PrintTasks(migrateTaskName, hw.subTasks[migrateTaskName], hw.taskParameters) + // run the migration tasks sequentially. + for _, param := range hw.taskParameters { + status := statepb.TaskState_Done + if taskErr := hw.runMigrate(param); taskErr != nil { + status = statepb.TaskState_Failed + hw.logger.Infof("Horizontal Resharding: error in MigratedServedType: %v.", err) + return taskErr + } + hw.updateStatus(migrateTaskName, param, status) } + PrintTasks("AFTER_"+migrateTaskName, hw.subTasks[migrateTaskName], nil) + return nil } -// runAllSubWorkflows runs jobs in parallel. -func (hw *HorizontalReshardingWorkflow) runAllSubWorkflows(executeFunc func(subWorkflow *PerShardHorizontalResharding) error) error { +// runAllTasks runs jobs in parallel. The task parameters are dynamically updated befor execution on each step. It depends on the parallism pattern for the specific step and +// progress of each step (if it is retried). +// The executeFunc is responsible for handling how to use the parameter in the step. +func (hw *HorizontalReshardingWorkflow) runAllTasks(executeFunc func(param *statepb.TaskParam) error) error { ec := concurrency.AllErrorRecorder{} wg := sync.WaitGroup{} - for _, sw := range hw.subWorkflows { + for _, s := range hw.taskParameters { wg.Add(1) - go func(s *PerShardHorizontalResharding) { + go func(s *statepb.TaskParam) { defer wg.Done() ec.RecordError(executeFunc(s)) - }(sw) + }(s) } wg.Wait() return ec.Error() } -// executeCopySchemaPerShard runs CopySchemaShard to copy the schema of a source shard to all its destination shards. -// TODO(yipeiw): excludeTable information can be added to UI input parameters, s.t the user can customize excluded tables during resharding. -func (hw *HorizontalReshardingWorkflow) executeCopySchemaPerShard(perhw *PerShardHorizontalResharding) error { - sourceKeyspaceShard := topoproto.KeyspaceShardString(perhw.Keyspace, perhw.SourceShard) - for _, d := range perhw.DestinationShards { - err := hw.wr.CopySchemaShardFromShard(hw.ctx, nil /* tableArray*/, nil /* excludeTableArray */, true /*includeViews*/, perhw.Keyspace, perhw.SourceShard, perhw.Keyspace, d, wrangler.DefaultWaitSlaveTimeout) - if err != nil { - hw.logger.Infof("Horizontal Resharding: error in CopySchemaShardFromShard from %s to %s: %v.", sourceKeyspaceShard, d, err) - return err - } - hw.logger.Infof("Horizontal Resharding: CopySchemaShardFromShard from %s to %s is finished.", sourceKeyspaceShard, d) +// runCopySchemaPerShard runs CopySchema for a destination shard. +// There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. +func (hw *HorizontalReshardingWorkflow) runCopySchema(param *statepb.TaskParam) error { + s := param.SourceShards[0] + d := param.DestinationShards[0] + err := hw.wr.CopySchemaShardFromShard(hw.ctx, nil /* tableArray*/, nil /* excludeTableArray */, true, /*includeViews*/ + param.Keyspace, s, param.Keyspace, d, wrangler.DefaultWaitSlaveTimeout) + if err != nil { + hw.logger.Infof("Horizontal Resharding: error in CopySchemaShardFromShard from %s to %s: %v.", s, d, err) } - return nil + hw.logger.Infof("Horizontal Resharding: CopySchemaShardFromShard from %s to %s is finished.", s, d) + return err } -// executeSplitClonePerShard runs SplitClone to clone the data within a keyspace from a source shard to its destination shards. -func (hw *HorizontalReshardingWorkflow) executeSplitClonePerShard(perhw *PerShardHorizontalResharding) error { - sourceKeyspaceShard := topoproto.KeyspaceShardString(perhw.Keyspace, perhw.SourceShard) - var destinationKeyspaceShards []string - for _, destShard := range perhw.DestinationShards { - destinationKeyspaceShards = append(destinationKeyspaceShards, topoproto.KeyspaceShardString(perhw.Keyspace, destShard)) - } +// runSplitClonePerShard runs SplitClone for a source shard. +// There should be #sourceshards parameters, while each param includes 1 sourceshard and its destshards. The destShards are useless here. +func (hw *HorizontalReshardingWorkflow) runSplitClone(param *statepb.TaskParam) error { + sourceKeyspaceShard := topoproto.KeyspaceShardString(param.Keyspace, param.SourceShards[0]) // Reset the vtworker to avoid error if vtworker command has been called elsewhere. // This is because vtworker class doesn't cleanup the environment after execution. - automation.ExecuteVtworker(hw.ctx, perhw.Vtworker, []string{"Reset"}) + automation.ExecuteVtworker(hw.ctx, param.Vtworker, []string{"Reset"}) // The flag min_healthy_rdonly_tablets is set to 1 (default value is 2). // Therefore, we can reuse the normal end to end test setting, which has only 1 rdonly tablet. // TODO(yipeiw): Add min_healthy_rdonly_tablets as an input argument in UI. args := []string{"SplitClone", "--min_healthy_rdonly_tablets=1", sourceKeyspaceShard} - if _, err := automation.ExecuteVtworker(hw.ctx, perhw.Vtworker, args); err != nil { - hw.logger.Infof("Horizontal resharding: error in SplitClone in keyspace %s: %v.", perhw.Keyspace, err) + if _, err := automation.ExecuteVtworker(hw.ctx, param.Vtworker, args); err != nil { + hw.logger.Infof("Horizontal resharding: error in SplitClone in keyspace %s: %v.", param.Keyspace, err) return err } hw.logger.Infof("Horizontal resharding: SplitClone is finished.") - // Wait for filtered replication task. - for _, d := range perhw.DestinationShards { - if err := hw.wr.WaitForFilteredReplication(hw.ctx, perhw.Keyspace, d, wrangler.DefaultWaitForFilteredReplicationMaxDelay); err != nil { - hw.logger.Infof("Horizontal Resharding: error in WaitForFilteredReplication: %v.", err) - return err - } - hw.logger.Infof("Horizontal Resharding:WaitForFilteredReplication is finished on " + d) + + return nil +} + +// runWaitFilteredReplication runs WaitForFilteredReplication for a destination shard. +// There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. +func (hw *HorizontalReshardingWorkflow) runWaitFilteredReplication(param *statepb.TaskParam) error { + d := param.DestinationShards[0] + if err := hw.wr.WaitForFilteredReplication(hw.ctx, param.Keyspace, d, wrangler.DefaultWaitForFilteredReplicationMaxDelay); err != nil { + hw.logger.Infof("Horizontal Resharding: error in WaitForFilteredReplication: %v.", err) + return err } + hw.logger.Infof("Horizontal Resharding:WaitForFilteredReplication is finished on " + d) return nil } -// executeSplitDiffPerShard runs SplitDiff for every destination shard to the source and destination -// to ensure all the data is present and correct. -func (hw *HorizontalReshardingWorkflow) executeSplitDiffPerShard(perhw *PerShardHorizontalResharding) error { +// runSplitDiffPerShard runs SplitDiff for a source shard. +// There should be #sourceshards parameters, while each param includes 1 sourceshard and its destshards. +func (hw *HorizontalReshardingWorkflow) runSplitDiff(param *statepb.TaskParam) error { var destinationKeyspaceShards []string - for _, destShard := range perhw.DestinationShards { - destinationKeyspaceShards = append(destinationKeyspaceShards, topoproto.KeyspaceShardString(perhw.Keyspace, destShard)) + for _, destShard := range param.DestinationShards { + destinationKeyspaceShards = append(destinationKeyspaceShards, topoproto.KeyspaceShardString(param.Keyspace, destShard)) } for _, d := range destinationKeyspaceShards { - automation.ExecuteVtworker(hw.ctx, perhw.Vtworker, []string{"Reset"}) + automation.ExecuteVtworker(hw.ctx, param.Vtworker, []string{"Reset"}) args := []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", d} - _, err := automation.ExecuteVtworker(hw.ctx, perhw.Vtworker, args) + _, err := automation.ExecuteVtworker(hw.ctx, param.Vtworker, args) if err != nil { return err } @@ -281,14 +397,16 @@ func (hw *HorizontalReshardingWorkflow) executeSplitDiffPerShard(perhw *PerShard return nil } -// executeMigratePerShard runs MigrateServedTypes to switch over to serving from the new shards. -func (hw *HorizontalReshardingWorkflow) executeMigratePerShard(perhw *PerShardHorizontalResharding) error { - sourceKeyspaceShard := topoproto.KeyspaceShardString(perhw.Keyspace, perhw.SourceShard) +// runMigratePerShard runs the migration sequentially among all source shards. +// There should be 1 parameter, which includes all source shards to be migrated. +func (hw *HorizontalReshardingWorkflow) runMigrate(param *statepb.TaskParam) error { + s := param.SourceShards[0] + sourceKeyspaceShard := topoproto.KeyspaceShardString(param.Keyspace, s) servedTypeParams := []topodatapb.TabletType{topodatapb.TabletType_RDONLY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_MASTER} for _, servedType := range servedTypeParams { - err := hw.wr.MigrateServedTypes(hw.ctx, perhw.Keyspace, perhw.SourceShard, nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime) + err := hw.wr.MigrateServedTypes(hw.ctx, param.Keyspace, s, nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime) if err != nil { hw.logger.Infof("Horizontal Resharding: error in MigrateServedTypes on servedType %s: %v.", servedType, err) return err @@ -298,6 +416,126 @@ func (hw *HorizontalReshardingWorkflow) executeMigratePerShard(perhw *PerShardHo return nil } +// All the executeXXX. functions are in the old implementation, which is replaced by the runXXX function now. +//func (hw *HorizontalReshardingWorkflow) executeWorkflow() error { +// if err := hw.runAllSubWorkflows(hw.executeCopySchemaPerShard); err != nil { +// hw.logger.Infof("Horizontal Resharding: error in CopySchemaShard: %v.", err) +// return err +// } +// +// if err := hw.runAllSubWorkflows(hw.executeSplitClonePerShard); err != nil { +// hw.logger.Infof("Horizontal Resharding: error in SplitClone: %v.", err) +// return err +// } +// if err := hw.runAllSubWorkflows(hw.executeSplitDiffPerShard); err != nil { +// hw.logger.Infof("Horizontal Resharding: error in SplitDiff: %v.", err) +// return err +// } +// if err := hw.runAllSubWorkflows(hw.executeMigratePerShard); err != nil { +// hw.logger.Infof("Horizontal Resharding: error in MigratedServedType: %v.", err) +// return err +// } +// return nil +//} +// +//// runAllSubWorkflows runs jobs in parallel. +//func (hw *HorizontalReshardingWorkflow) runAllSubWorkflows(executeFunc func(subWorkflow *PerShardHorizontalResharding) error) error { +// ec := concurrency.AllErrorRecorder{} +// wg := sync.WaitGroup{} +// for _, sw := range hw.subWorkflows { +// wg.Add(1) +// go func(s *PerShardHorizontalResharding) { +// defer wg.Done() +// ec.RecordError(executeFunc(s)) +// }(sw) +// } +// wg.Wait() +// return ec.Error() +//} +// +//// executeCopySchemaPerShard runs CopySchemaShard to copy the schema of a source shard to all its destination shards. +//// TODO(yipeiw): excludeTable information can be added to UI input parameters, s.t the user can customize excluded tables during resharding. +//func (hw *HorizontalReshardingWorkflow) executeCopySchemaPerShard(perhw *PerShardHorizontalResharding) error { +// sourceKeyspaceShard := topoproto.KeyspaceShardString(perhw.Keyspace, perhw.SourceShard) +// for _, d := range perhw.DestinationShards { +// err := hw.wr.CopySchemaShardFromShard(hw.ctx, nil /* tableArray*/, nil /* excludeTableArray */, true /*includeViews*/, perhw.Keyspace, perhw.SourceShard, perhw.Keyspace, d, wrangler.DefaultWaitSlaveTimeout) +// if err != nil { +// hw.logger.Infof("Horizontal Resharding: error in CopySchemaShardFromShard from %s to %s: %v.", sourceKeyspaceShard, d, err) +// return err +// } +// hw.logger.Infof("Horizontal Resharding: CopySchemaShardFromShard from %s to %s is finished.", sourceKeyspaceShard, d) +// } +// return nil +//} +// +//// executeSplitClonePerShard runs SplitClone to clone the data within a keyspace from a source shard to its destination shards. +//func (hw *HorizontalReshardingWorkflow) executeSplitClonePerShard(perhw *PerShardHorizontalResharding) error { +// sourceKeyspaceShard := topoproto.KeyspaceShardString(perhw.Keyspace, perhw.SourceShard) +// var destinationKeyspaceShards []string +// for _, destShard := range perhw.DestinationShards { +// destinationKeyspaceShards = append(destinationKeyspaceShards, topoproto.KeyspaceShardString(perhw.Keyspace, destShard)) +// } +// +// // Reset the vtworker to avoid error if vtworker command has been called elsewhere. +// // This is because vtworker class doesn't cleanup the environment after execution. +// automation.ExecuteVtworker(hw.ctx, perhw.Vtworker, []string{"Reset"}) +// // The flag min_healthy_rdonly_tablets is set to 1 (default value is 2). +// // Therefore, we can reuse the normal end to end test setting, which has only 1 rdonly tablet. +// // TODO(yipeiw): Add min_healthy_rdonly_tablets as an input argument in UI. +// args := []string{"SplitClone", "--min_healthy_rdonly_tablets=1", sourceKeyspaceShard} +// if _, err := automation.ExecuteVtworker(hw.ctx, perhw.Vtworker, args); err != nil { +// hw.logger.Infof("Horizontal resharding: error in SplitClone in keyspace %s: %v.", perhw.Keyspace, err) +// return err +// } +// hw.logger.Infof("Horizontal resharding: SplitClone is finished.") +// // Wait for filtered replication task. +// for _, d := range perhw.DestinationShards { +// if err := hw.wr.WaitForFilteredReplication(hw.ctx, perhw.Keyspace, d, wrangler.DefaultWaitForFilteredReplicationMaxDelay); err != nil { +// hw.logger.Infof("Horizontal Resharding: error in WaitForFilteredReplication: %v.", err) +// return err +// } +// hw.logger.Infof("Horizontal Resharding:WaitForFilteredReplication is finished on " + d) +// } +// return nil +//} +// +//// executeSplitDiffPerShard runs SplitDiff for every destination shard to the source and destination +//// to ensure all the data is present and correct. +//func (hw *HorizontalReshardingWorkflow) executeSplitDiffPerShard(perhw *PerShardHorizontalResharding) error { +// var destinationKeyspaceShards []string +// for _, destShard := range perhw.DestinationShards { +// destinationKeyspaceShards = append(destinationKeyspaceShards, topoproto.KeyspaceShardString(perhw.Keyspace, destShard)) +// } +// +// for _, d := range destinationKeyspaceShards { +// automation.ExecuteVtworker(hw.ctx, perhw.Vtworker, []string{"Reset"}) +// args := []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", d} +// _, err := automation.ExecuteVtworker(hw.ctx, perhw.Vtworker, args) +// if err != nil { +// return err +// } +// } +// hw.logger.Infof("Horizontal resharding: SplitDiff is finished.") +// return nil +//} +// +//// executeMigratePerShard runs MigrateServedTypes to switch over to serving from the new shards. +//func (hw *HorizontalReshardingWorkflow) executeMigratePerShard(perhw *PerShardHorizontalResharding) error { +// sourceKeyspaceShard := topoproto.KeyspaceShardString(perhw.Keyspace, perhw.SourceShard) +// servedTypeParams := []topodatapb.TabletType{topodatapb.TabletType_RDONLY, +// topodatapb.TabletType_REPLICA, +// topodatapb.TabletType_MASTER} +// for _, servedType := range servedTypeParams { +// err := hw.wr.MigrateServedTypes(hw.ctx, perhw.Keyspace, perhw.SourceShard, nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime) +// if err != nil { +// hw.logger.Infof("Horizontal Resharding: error in MigrateServedTypes on servedType %s: %v.", servedType, err) +// return err +// } +// hw.logger.Infof("Horizontal Resharding: MigrateServedTypes is finished on tablet %s serve type %s.", sourceKeyspaceShard, servedType) +// } +// return nil +//} + func (hw *HorizontalReshardingWorkflow) setUIMessage(message string) { log.Infof("Horizontal resharding on keyspace %v: %v.", hw.keyspace, message) hw.rootUINode.Log = hw.logger.String() diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go index efc445e5e8f..955588179e7 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go @@ -6,14 +6,18 @@ import ( "github.com/golang/mock/gomock" "github.com/youtube/vitess/go/vt/logutil" + "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/worker/fakevtworkerclient" "github.com/youtube/vitess/go/vt/worker/vtworkerclient" "github.com/youtube/vitess/go/vt/wrangler" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" + workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" + statepb "github.com/youtube/vitess/go/vt/proto/workflowstate" ) func TestHorizontalResharding(t *testing.T) { + ts := memorytopo.NewServer("cell") // Create fake wrangler using mock interface, which is used for the unit test in steps CopySchema and MigratedServedType. ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -21,10 +25,11 @@ func TestHorizontalResharding(t *testing.T) { // Create the workflow (ignore the node construction since we don't test the front-end part in this unit test). hw := &HorizontalReshardingWorkflow{ - keyspace: "test_keyspace", - vtworkers: []string{"localhost:15032"}, - wr: mockWranglerInterface, - logger: logutil.NewMemoryLogger(), + keyspace: "test_keyspace", + vtworkers: []string{"localhost:15032"}, + wr: mockWranglerInterface, + topoServer: ts, + logger: logutil.NewMemoryLogger(), } perShard := &PerShardHorizontalResharding{ @@ -88,8 +93,31 @@ func TestHorizontalResharding(t *testing.T) { fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", "test_keyspace/-80"}, "", nil) fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", "test_keyspace/80-"}, "", nil) + // manually complete the task initalization, which is part of createSubWorkflows. + // TODO(yipeiw): this code is repeated, should be removed from unit test. + hw.subTasks = map[string]*statepb.TaskContainer{ + copySchemaTaskName: new(CopySchemaTaskHelper).InitTasks(hw.subWorkflows), + splitCloneTaskName: new(SplitCloneTaskHelper).InitTasks(hw.subWorkflows), + waitFilteredReplicationTaskName: new(WaitFilteredReplicationTaskHelper).InitTasks(hw.subWorkflows), + splitDiffTaskName: new(SplitDiffTaskHelper).InitTasks(hw.subWorkflows), + migrateTaskName: new(MigrateTaskHelper).InitTasks(hw.subWorkflows), + } + + // Create the initial workflowpb.Workflow object. + w := &workflowpb.Workflow{ + Uuid: "testworkflow0000", + FactoryName: "horizontal_resharding", + State: workflowpb.WorkflowState_NotStarted, + } + var err error + hw.wi, err = hw.topoServer.CreateWorkflow(hw.ctx, w) + if err != nil { + t.Errorf("%s: Horizontal resharding workflow fails in creating workflowInfo", err) + } + // Test the execution of horizontal resharding. - if err := hw.executeWorkflow(); err != nil { + // To simply demonstate the ability to track task status and leverage it for control the workflow execution, only happy path is used here. + if err := hw.runWorkflow(); err != nil { t.Errorf("%s: Horizontal resharding workflow should not fail", err) } } diff --git a/go/vt/workflow/resharding/parallel_runner.go b/go/vt/workflow/resharding/parallel_runner.go new file mode 100644 index 00000000000..371b0ac518a --- /dev/null +++ b/go/vt/workflow/resharding/parallel_runner.go @@ -0,0 +1,73 @@ +package resharding + +import ( + "fmt" + + log "github.com/golang/glog" + + "github.com/youtube/vitess/go/vt/concurrency" + + workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" +) + +type level int + +const ( + SEQUENTIAL level = 1 + iota + PARALLEL +) + +// ParallelRunner is used to control executing tasks concurrently. +// Each phase has its own ParallelRunner object. +type ParallelRunner struct { + // TODO(yipeiw) : ParallelRunner should fields for per-task controllable actions. +} + +// Run is the entry point for controling task executions. +// runTasks should be a copy of tasks with the expected execution order, the status of task should be +// both updated in this copy and the original one (checkpointer.Update does this). This is to avoid +// data racing situation. +func (p *ParallelRunner) Run(runTasks []*workflowpb.Task, executeFunc func(map[string]string) error, cp *Checkpoint, concurrencyLevel level) error { + var parallelNum int // default value is 0. The task will not run in this case. + switch concurrencyLevel { + case SEQUENTIAL: + parallelNum = 1 + case PARALLEL: + parallelNum = len(runTasks) + } + + // TODO(yipeiw): Support retry, restart, pause actions. Wrap the execution to interleave with actions. + // sem is a channel used to control the level of concurrency. + sem := make(chan bool, parallelNum) + var ec concurrency.AllErrorRecorder + for _, task := range runTasks { + // TODO(yipeiw): Add checking logics to support retry, pause, restart actions when lauching tasks. + if task.State == workflowpb.TaskState_TaskDone { + continue + } + + sem <- true + go func(t *workflowpb.Task) { + defer func() { <-sem }() + status := workflowpb.TaskState_TaskDone + if err := executeFunc(t.Attributes); err != nil { + status = workflowpb.TaskState_TaskNotStarted + t.Error = fmt.Sprintf("%v", err) + ec.RecordError(err) + } + + t.State = status + // only log the error passage rather then propograting it through ErrorRecorder. The reason is that error message in + // ErrorRecorder will leads to stop of the workflow, which is unexpected if only checkpointing fails. + // However, the checkpointing failure right after initializing the tasks should lead to the stop of the workflow. + if err := cp.Update(task.TaskId, status); err != nil { + log.Errorf("%v", err) + } + }(task) + } + // Wait until all running jobs are done. + for i := 0; i < parallelNum; i++ { + sem <- true + } + return ec.Error() +} diff --git a/go/vt/workflow/resharding/parallel_runner_test.go b/go/vt/workflow/resharding/parallel_runner_test.go new file mode 100644 index 00000000000..2f64e5703c1 --- /dev/null +++ b/go/vt/workflow/resharding/parallel_runner_test.go @@ -0,0 +1,95 @@ +package resharding + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/topo/memorytopo" + + workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" +) + +const ( + printName = "Sleep" + + codeVersion = 1 +) + +func executePrint(attr map[string]string) error { + fmt.Printf("The number passed to me is %v \n", attr["Number"]) + return nil +} + +func taskNameOfPrint(num string) string { + return fmt.Sprintf("%v_%v", printName, num) +} + +func InitPrintTasks(numTasks int) *workflowpb.WorkflowCheckpoint { + tasks := make(map[string]*workflowpb.Task) + var infoList []string + for i := 0; i < numTasks; i++ { + num := fmt.Sprintf("%v", i) + t := &workflowpb.Task{ + TaskId: taskNameOfPrint(num), + State: workflowpb.TaskState_TaskNotStarted, + Attributes: map[string]string{"Number": num}, + } + tasks[t.TaskId] = t + infoList = append(infoList, num) + } + return &workflowpb.WorkflowCheckpoint{ + CodeVersion: codeVersion, + Tasks: tasks, + Settings: map[string]string{"numbers": strings.Join(infoList, ",")}, + } +} + +func GetOrderedPrintTasks(wcp *workflowpb.WorkflowCheckpoint) []*workflowpb.Task { + var tasks []*workflowpb.Task + for _, n := range strings.Split(wcp.Settings["numbers"], ",") { + taskID := taskNameOfPrint(n) + tasks = append(tasks, wcp.Tasks[taskID]) + } + return tasks +} + +func TestParallelRunner(t *testing.T) { + ts := memorytopo.NewServer("cell") + w := &workflowpb.Workflow{ + Uuid: "testparallelrunner", + FactoryName: "simple_print", + State: workflowpb.WorkflowState_NotStarted, + } + var err error + var wi *topo.WorkflowInfo + wi, err = ts.CreateWorkflow(context.TODO(), w) + if err != nil { + t.Errorf("%s: Parallel Runner fails in creating workflow", err) + } + + taskNum := 5 + initCheckpoint := InitPrintTasks(taskNum) + + cp := &Checkpoint{ + topoServer: ts, + wcp: initCheckpoint, + wi: wi, + } + cp.Store() + + var p *ParallelRunner + tasks := GetOrderedPrintTasks(initCheckpoint) + if err := p.Run(tasks, executePrint, cp, PARALLEL); err != nil { + t.Errorf("%s: Parallel Runner should not fail", err) + } + + //Check whether all tasks are in finished status. + for _, task := range cp.wcp.Tasks { + if task.State != workflowpb.TaskState_TaskDone { + t.Errorf("Task info: %v, %v, %v: Parallel Runner task not finished", task.TaskId, task.State, task.Attributes) + } + } +} diff --git a/go/vt/workflow/resharding/status.go b/go/vt/workflow/resharding/status.go new file mode 100644 index 00000000000..6968019ac64 --- /dev/null +++ b/go/vt/workflow/resharding/status.go @@ -0,0 +1,118 @@ +package resharding + +import ( + "fmt" + + statepb "github.com/youtube/vitess/go/vt/proto/workflowstate" +) + +// GetTaskParam generates the parameters for unfinished tasks. +func GetTaskParam(tasks *statepb.TaskContainer) []*statepb.TaskParam { + var params []*statepb.TaskParam + for _, v := range tasks.Tasks { + if v.State != statepb.TaskState_Done && v.State != statepb.TaskState_Running { + params = append(params, v.Param) + } + } + return params +} + +// PrintTasks prints the tasks and the dynamically generated parameters for a specific step. +func PrintTasks(step string, tasks *statepb.TaskContainer, params []*statepb.TaskParam) { + fmt.Printf("\n%s printing the tasks information:", step) + for _, v := range tasks.Tasks { + fmt.Println("task param: ", v.Param) + fmt.Println("task status: ", v.State) + } + + if params != nil { + fmt.Println("printing the task parameters passed to execution") + for _, p := range params { + fmt.Println("execution task param: ", p) + } + } +} + +func initParamPerDestinationShard(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { + tasks := make(map[string]*statepb.Task) + for _, sw := range subWorkflows { + for _, d := range sw.DestinationShards { + param := &statepb.TaskParam{ + Keyspace: sw.Keyspace, + DestinationShards: []string{d}, + SourceShards: []string{sw.SourceShard}, + Vtworker: sw.Vtworker, + } + t := &statepb.Task{ + Param: param, + State: statepb.TaskState_Created, + } + tasks[param.String()] = t + } + } + return &statepb.TaskContainer{Tasks: tasks} +} + +func initParamPerSourceShard(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { + tasks := make(map[string]*statepb.Task) + for _, sw := range subWorkflows { + param := &statepb.TaskParam{ + Keyspace: sw.Keyspace, + DestinationShards: sw.DestinationShards, + SourceShards: []string{sw.SourceShard}, + Vtworker: sw.Vtworker, + } + t := &statepb.Task{ + Param: param, + State: statepb.TaskState_Created, + } + tasks[param.String()] = t + } + return &statepb.TaskContainer{Tasks: tasks} +} + +// TaskHelper includes methods to generate task parameters based on the execution status while updating the status at the same time. +// Each step should implements this interface. +type TaskHelper interface { + InitTasks(perhw []*PerShardHorizontalResharding) *statepb.TaskContainer +} + +// CopySchemaTaskHelper implements TaskHelper interface. +type CopySchemaTaskHelper struct{} + +// InitTasks implements TaskHelper.InitTasks (per destination shard manner). +func (c CopySchemaTaskHelper) InitTasks(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { + return initParamPerDestinationShard(subWorkflows) +} + +// SplitCloneTaskHelper implements TaskHelper interface. +type SplitCloneTaskHelper struct{} + +// InitTasks implements TaskHelper.InitTasks. +func (c SplitCloneTaskHelper) InitTasks(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { + return initParamPerSourceShard(subWorkflows) +} + +// WaitFilteredReplicationTaskHelper implements TaskHelper interface. +type WaitFilteredReplicationTaskHelper struct{} + +// InitTasks implements TaskHelper.InitTasks. +func (c WaitFilteredReplicationTaskHelper) InitTasks(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { + return initParamPerDestinationShard(subWorkflows) +} + +// SplitDiffTaskHelper implements TaskHelper interface. +type SplitDiffTaskHelper struct{} + +// InitTasks implements TaskHelper.InitTasks. +func (c SplitDiffTaskHelper) InitTasks(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { + return initParamPerSourceShard(subWorkflows) +} + +// MigrateTaskHelper implements TaskHelper interface. +type MigrateTaskHelper struct{} + +// InitTasks implements TaskHelper.InitTasks. +func (c MigrateTaskHelper) InitTasks(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { + return initParamPerSourceShard(subWorkflows) +} diff --git a/proto/workflow.proto b/proto/workflow.proto index b107c9682cf..0dcb05fb3a0 100644 --- a/proto/workflow.proto +++ b/proto/workflow.proto @@ -58,3 +58,34 @@ message Workflow { // This field only makes sense if 'state' is Done. int64 end_time = 8; } + +enum TaskState { + TaskNotStarted = 0; + TaskRunning = 1; + TaskDone = 2; +} + +message Task { + string task_id = 1; + TaskState state = 2; + // attributes includes the parameters the task needs. + map attributes = 3; + string error = 4; +} + +message WorkflowCheckpoint { + // code_version is used to detect incompabilities between the version of the + // running workflow and the one which wrote the checkpoint. If they don't + // match, the workflow must not continue. The author of workflow must update + // this variable in their implementation when incompabilities are introduced. + int32 code_version = 1; + // tasks stores all tasks of the workflow in a map. The key is a unique name + // to identify the task, e.g. clone/-80. + + // Task is the data structure that stores the execution status and the + // attributes of a task. + map tasks = 2; + // settings includes workflow specific data, e.g. the resharding workflow + // would store the source shards and destination shards. + map settings = 3; +} From 0e1023ab87ed5975670142b31a913af7a2aa2eb1 Mon Sep 17 00:00:00 2001 From: Yipei Wang Date: Wed, 1 Feb 2017 18:21:08 -0800 Subject: [PATCH 002/108] workflow: Using ParallelRunner to implement horizontal resharding workflow. Complete the unit test and E2E test for happy path. --- go/vt/proto/workflow/workflow.pb.go | 70 +-- go/vt/vtctld/workflow.go | 4 +- go/vt/workflow/resharding/checkpoint.go | 55 +-- .../horizontal_resharding_workflow.go | 410 ++++-------------- .../horizontal_resharding_workflow_test.go | 105 +++-- go/vt/workflow/resharding/parallel_runner.go | 22 +- .../resharding/parallel_runner_test.go | 105 +++-- go/vt/workflow/resharding/status.go | 118 ----- go/vt/workflow/resharding/task_helper.go | 85 ++++ proto/workflow.proto | 2 +- 10 files changed, 361 insertions(+), 615 deletions(-) delete mode 100644 go/vt/workflow/resharding/status.go create mode 100644 go/vt/workflow/resharding/task_helper.go diff --git a/go/vt/proto/workflow/workflow.pb.go b/go/vt/proto/workflow/workflow.pb.go index 1b246023976..3be466f408c 100644 --- a/go/vt/proto/workflow/workflow.pb.go +++ b/go/vt/proto/workflow/workflow.pb.go @@ -180,8 +180,8 @@ func (m *Workflow) GetEndTime() int64 { } type Task struct { - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId" json:"task_id,omitempty"` - State TaskState `protobuf:"varint,2,opt,name=state,enum=workflow.TaskState" json:"state,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + State TaskState `protobuf:"varint,2,opt,name=state,enum=workflow.TaskState" json:"state,omitempty"` // attributes includes the parameters the task needs. Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Error string `protobuf:"bytes,4,opt,name=error" json:"error,omitempty"` @@ -192,9 +192,9 @@ func (m *Task) String() string { return proto.CompactTextString(m) } func (*Task) ProtoMessage() {} func (*Task) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } -func (m *Task) GetTaskId() string { +func (m *Task) GetId() string { if m != nil { - return m.TaskId + return m.Id } return "" } @@ -271,35 +271,35 @@ func init() { func init() { proto.RegisterFile("workflow.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 479 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x53, 0x5d, 0x8b, 0xd3, 0x40, - 0x14, 0x75, 0xd2, 0xa4, 0x4d, 0x6f, 0xba, 0xd9, 0x72, 0x15, 0x36, 0x16, 0x94, 0x58, 0x04, 0x63, - 0xc1, 0x3e, 0x54, 0x10, 0x51, 0x76, 0x41, 0xfc, 0x40, 0x5f, 0xf6, 0x21, 0x5d, 0xf4, 0xb1, 0xcc, - 0x36, 0xb3, 0x6b, 0xc8, 0x76, 0x66, 0x99, 0x4c, 0x76, 0xe9, 0x0f, 0xf4, 0x77, 0xf8, 0xea, 0xcf, - 0x90, 0x99, 0x49, 0xd2, 0x46, 0x45, 0xd8, 0xb7, 0xfb, 0x75, 0x4e, 0xce, 0xb9, 0x73, 0x03, 0xe1, - 0xad, 0x90, 0xc5, 0xc5, 0x95, 0xb8, 0x9d, 0x5f, 0x4b, 0xa1, 0x04, 0xfa, 0x4d, 0x3e, 0xfd, 0x45, - 0xc0, 0xff, 0x56, 0x27, 0x88, 0xe0, 0x56, 0x55, 0x9e, 0x45, 0x24, 0x26, 0xc9, 0x30, 0x35, 0x31, - 0x3e, 0x81, 0xd1, 0x05, 0x5d, 0x2b, 0x21, 0xb7, 0x2b, 0x4e, 0x37, 0x2c, 0x72, 0x4c, 0x2f, 0xa8, - 0x6b, 0xa7, 0x74, 0xc3, 0x34, 0xcc, 0xb4, 0x7a, 0x16, 0xa6, 0x63, 0x7c, 0x01, 0x5e, 0xa9, 0xa8, - 0x62, 0x91, 0x1b, 0x93, 0x24, 0x5c, 0x1c, 0xcd, 0x5b, 0x05, 0xcd, 0xd7, 0x96, 0xba, 0x9d, 0xda, - 0x29, 0x4d, 0x91, 0x51, 0x45, 0x23, 0x2f, 0x26, 0xc9, 0x28, 0x35, 0x31, 0x3e, 0x00, 0x8f, 0x49, - 0x29, 0x64, 0xd4, 0x37, 0xbc, 0x36, 0xc1, 0x47, 0x00, 0xa5, 0xa2, 0x52, 0xad, 0x54, 0xbe, 0x61, - 0xd1, 0x20, 0x26, 0x49, 0x2f, 0x1d, 0x9a, 0xca, 0x59, 0xbe, 0x61, 0xf8, 0x10, 0x7c, 0xc6, 0x33, - 0xdb, 0xf4, 0x4d, 0x73, 0xc0, 0x78, 0xa6, 0x5b, 0xd3, 0x9f, 0x04, 0xdc, 0x33, 0x5a, 0x16, 0x78, - 0x04, 0x03, 0x45, 0xcb, 0x62, 0xd5, 0x3a, 0xed, 0xeb, 0xf4, 0x4b, 0x86, 0xcf, 0x1b, 0xd1, 0x8e, - 0x11, 0x7d, 0x7f, 0x27, 0x5a, 0xe3, 0x3a, 0x82, 0x4f, 0x00, 0xa8, 0x52, 0x32, 0x3f, 0xaf, 0x14, - 0x2b, 0xa3, 0x5e, 0xdc, 0x4b, 0x82, 0xc5, 0xe3, 0xee, 0xfc, 0xfc, 0x5d, 0x3b, 0xf0, 0x91, 0x2b, - 0xb9, 0x4d, 0xf7, 0x10, 0x3b, 0x73, 0xee, 0x9e, 0xb9, 0xc9, 0x31, 0x1c, 0xfe, 0x01, 0xc2, 0x31, - 0xf4, 0x0a, 0xb6, 0xad, 0x85, 0xea, 0x50, 0x43, 0x6f, 0xe8, 0x55, 0xd5, 0x3c, 0x85, 0x4d, 0xde, - 0x38, 0xaf, 0xc9, 0xf4, 0x87, 0x03, 0xd8, 0xac, 0xf7, 0xfd, 0x77, 0xb6, 0x2e, 0xae, 0x45, 0xce, - 0x95, 0x7e, 0xc2, 0xb5, 0xc8, 0xd8, 0xea, 0x86, 0xc9, 0x32, 0x17, 0xdc, 0x70, 0x79, 0x69, 0xa0, - 0x6b, 0x5f, 0x6d, 0x09, 0x8f, 0xc1, 0xd3, 0x3b, 0x28, 0x23, 0xc7, 0x38, 0x79, 0xf6, 0xf7, 0x73, - 0xed, 0xf8, 0x8c, 0xb9, 0xda, 0x92, 0x45, 0xe1, 0x27, 0xf0, 0x4b, 0xa6, 0x54, 0xce, 0x2f, 0x9b, - 0x5d, 0xcc, 0xfe, 0xcb, 0xb0, 0xac, 0x87, 0x2d, 0x49, 0x8b, 0x9d, 0x7c, 0x06, 0xd8, 0x91, 0xff, - 0xc3, 0xfa, 0xd3, 0x7d, 0xeb, 0xc1, 0x22, 0xec, 0x2e, 0x7c, 0x6f, 0x15, 0x93, 0xb7, 0x70, 0xd0, - 0xf9, 0xc8, 0x5d, 0xf6, 0x38, 0x7b, 0x05, 0x07, 0x9d, 0x2b, 0xc5, 0x10, 0xe0, 0x54, 0xa8, 0xa5, - 0xbe, 0x32, 0x96, 0x8d, 0xef, 0x61, 0x00, 0x83, 0xb4, 0xe2, 0x3c, 0xe7, 0x97, 0x63, 0x82, 0x3e, - 0xb8, 0x1f, 0x04, 0x67, 0x63, 0x67, 0x76, 0x02, 0xc3, 0xf6, 0x50, 0x10, 0x21, 0xd4, 0x49, 0x07, - 0x77, 0x08, 0x81, 0x11, 0xda, 0x62, 0x47, 0xe0, 0xeb, 0x82, 0xc5, 0x9f, 0xf7, 0xcd, 0xdf, 0xf9, - 0xf2, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4e, 0xca, 0x5c, 0xae, 0xaf, 0x03, 0x00, 0x00, + // 473 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x53, 0xdd, 0x8a, 0xd3, 0x40, + 0x14, 0x36, 0x7f, 0xdb, 0xf4, 0xa4, 0x9b, 0x2d, 0x47, 0xc1, 0x58, 0x50, 0x62, 0x11, 0x8c, 0x05, + 0x7b, 0x51, 0x41, 0x44, 0xd9, 0x05, 0xf1, 0x07, 0xaf, 0xf6, 0x22, 0x5d, 0xf4, 0xb2, 0xcc, 0x36, + 0xb3, 0xeb, 0x90, 0xed, 0xcc, 0x32, 0x99, 0xec, 0xd2, 0x07, 0xf4, 0x15, 0x7c, 0x06, 0x1f, 0x43, + 0x66, 0x26, 0x49, 0x1b, 0x15, 0x61, 0xef, 0xce, 0xdf, 0xf7, 0xe5, 0xfb, 0xce, 0x9c, 0x40, 0x7c, + 0x2b, 0x64, 0x79, 0x71, 0x25, 0x6e, 0xe7, 0xd7, 0x52, 0x28, 0x81, 0x61, 0x9b, 0x4f, 0x7f, 0x39, + 0x10, 0x7e, 0x6b, 0x12, 0x44, 0xf0, 0xeb, 0x9a, 0x15, 0x89, 0x93, 0x3a, 0xd9, 0x30, 0x37, 0x31, + 0x3e, 0x85, 0xd1, 0x05, 0x59, 0x2b, 0x21, 0xb7, 0x2b, 0x4e, 0x36, 0x34, 0x71, 0x4d, 0x2f, 0x6a, + 0x6a, 0xa7, 0x64, 0x43, 0x35, 0xcc, 0xb4, 0x3c, 0x0b, 0xd3, 0x31, 0xbe, 0x84, 0xa0, 0x52, 0x44, + 0xd1, 0xc4, 0x4f, 0x9d, 0x2c, 0x5e, 0x3c, 0x9c, 0x77, 0x0a, 0xda, 0xaf, 0x2d, 0x75, 0x3b, 0xb7, + 0x53, 0x9a, 0xa2, 0x20, 0x8a, 0x24, 0x41, 0xea, 0x64, 0xa3, 0xdc, 0xc4, 0xf8, 0x00, 0x02, 0x2a, + 0xa5, 0x90, 0xc9, 0x81, 0xe1, 0xb5, 0x09, 0x3e, 0x06, 0xa8, 0x14, 0x91, 0x6a, 0xa5, 0xd8, 0x86, + 0x26, 0x83, 0xd4, 0xc9, 0xbc, 0x7c, 0x68, 0x2a, 0x67, 0x6c, 0x43, 0xf1, 0x11, 0x84, 0x94, 0x17, + 0xb6, 0x19, 0x9a, 0xe6, 0x80, 0xf2, 0x42, 0xb7, 0xa6, 0x3f, 0x1d, 0xf0, 0xcf, 0x48, 0x55, 0x62, + 0x0c, 0x6e, 0x67, 0xd2, 0x65, 0x05, 0xbe, 0x68, 0xb5, 0xba, 0x46, 0xeb, 0xfd, 0x9d, 0x56, 0x3d, + 0xde, 0xd3, 0x79, 0x02, 0x40, 0x94, 0x92, 0xec, 0xbc, 0x56, 0xb4, 0x4a, 0xbc, 0xd4, 0xcb, 0xa2, + 0xc5, 0x93, 0xfe, 0xfc, 0xfc, 0x7d, 0x37, 0xf0, 0x89, 0x2b, 0xb9, 0xcd, 0xf7, 0x10, 0x3b, 0x4f, + 0xfe, 0x9e, 0xa7, 0xc9, 0x31, 0x1c, 0xfd, 0x01, 0xc2, 0x31, 0x78, 0x25, 0xdd, 0x36, 0x22, 0x75, + 0xa8, 0xa1, 0x37, 0xe4, 0xaa, 0x6e, 0x5f, 0xc0, 0x26, 0x6f, 0xdd, 0x37, 0xce, 0xf4, 0x87, 0x0b, + 0xd8, 0x6e, 0xf5, 0xc3, 0x77, 0xba, 0x2e, 0xaf, 0x05, 0xe3, 0x4a, 0xbf, 0xdc, 0x5a, 0x14, 0x74, + 0x75, 0x43, 0x65, 0xc5, 0x04, 0x37, 0x5c, 0x41, 0x1e, 0xe9, 0xda, 0x57, 0x5b, 0xc2, 0x63, 0x08, + 0x14, 0xa9, 0xca, 0x2a, 0x71, 0x8d, 0x93, 0xe7, 0x7f, 0xbf, 0xd2, 0x8e, 0xcf, 0x98, 0x6b, 0x2c, + 0x59, 0x14, 0x7e, 0x86, 0xb0, 0xa2, 0x4a, 0x31, 0x7e, 0xd9, 0xee, 0x62, 0xf6, 0x5f, 0x86, 0x65, + 0x33, 0x6c, 0x49, 0x3a, 0xec, 0xe4, 0x0b, 0xc0, 0x8e, 0xfc, 0x1f, 0xd6, 0x9f, 0xed, 0x5b, 0x8f, + 0x16, 0x71, 0x7f, 0xe1, 0x7b, 0xab, 0x98, 0xbc, 0x83, 0xc3, 0xde, 0x47, 0xee, 0xb2, 0xc7, 0xd9, + 0x6b, 0x38, 0xec, 0x1d, 0x27, 0xc6, 0x00, 0xa7, 0x42, 0x2d, 0xf5, 0x71, 0xd1, 0x62, 0x7c, 0x0f, + 0x23, 0x18, 0xe4, 0x35, 0xe7, 0x8c, 0x5f, 0x8e, 0x1d, 0x0c, 0xc1, 0xff, 0x28, 0x38, 0x1d, 0xbb, + 0xb3, 0x13, 0x18, 0x76, 0x87, 0x82, 0x08, 0xb1, 0x4e, 0x7a, 0xb8, 0x23, 0x88, 0x8c, 0xd0, 0x0e, + 0x3b, 0x82, 0x50, 0x17, 0x2c, 0xfe, 0xfc, 0xc0, 0xfc, 0x94, 0xaf, 0x7e, 0x07, 0x00, 0x00, 0xff, + 0xff, 0x2e, 0xc7, 0x72, 0x13, 0xa6, 0x03, 0x00, 0x00, } diff --git a/go/vt/vtctld/workflow.go b/go/vt/vtctld/workflow.go index 3e8af44b4f2..28e3665b9aa 100644 --- a/go/vt/vtctld/workflow.go +++ b/go/vt/vtctld/workflow.go @@ -13,7 +13,6 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vtctl" "github.com/youtube/vitess/go/vt/workflow" - "github.com/youtube/vitess/go/vt/workflow/resharding" "github.com/youtube/vitess/go/vt/workflow/topovalidator" ) @@ -41,7 +40,8 @@ func initWorkflowManager(ts topo.Server) { schemaswap.RegisterWorkflowFactory() // Register the Horizontal Resharding workflow. - resharding.Register() + // resharding.Register() + // Unregister the blacklisted workflows. for _, name := range workflowManagerDisable { workflow.Unregister(name) diff --git a/go/vt/workflow/resharding/checkpoint.go b/go/vt/workflow/resharding/checkpoint.go index ef8bacaa1dc..64498ac83fa 100644 --- a/go/vt/workflow/resharding/checkpoint.go +++ b/go/vt/workflow/resharding/checkpoint.go @@ -10,53 +10,40 @@ import ( workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" ) -// Checkpoint checkpoints the data into topology server. -type Checkpoint struct { +// CheckpointWriter save the checkpoint data into topology server. +type CheckpointWriter struct { topoServer topo.Server + // checkpointMu is used for protecting data access during checkpointing. checkpointMu sync.Mutex - wcp *workflowpb.WorkflowCheckpoint + checkpoint *workflowpb.WorkflowCheckpoint wi *topo.WorkflowInfo } -// Update update the status and checkpointing the update. -func (c *Checkpoint) Update(taskID string, status workflowpb.TaskState) error { +// NewCheckpointWriter creates a CheckpointWriter. +func NewCheckpointWriter(ts topo.Server, checkpoint *workflowpb.WorkflowCheckpoint, wi *topo.WorkflowInfo) *CheckpointWriter { + return &CheckpointWriter{ + topoServer: ts, + checkpoint: checkpoint, + wi: wi, + } +} + +// UpdateTask updates the status and checkpointing the update. +func (c *CheckpointWriter) UpdateTask(taskID string, status workflowpb.TaskState) error { c.checkpointMu.Lock() defer c.checkpointMu.Unlock() - c.wcp.Tasks[taskID].State = status - return c.Store() + + c.checkpoint.Tasks[taskID].State = status + return c.Save() } -// Store packets the checkpoint and sends it to the topology server. -func (c *Checkpoint) Store() error { +// Save packets the checkpoint and sends it to the topology server. +func (c *CheckpointWriter) Save() error { var err error - var data []byte - data, err = json.Marshal(c.wcp) + c.wi.Data, err = json.Marshal(c.checkpoint) if err != nil { return err } - c.wi.Data = data return c.topoServer.SaveWorkflow(context.TODO(), c.wi) } - -// CheckpointFile checkpoints the data into local files. This is used for debugging. -//type CheckpointFile struct { -// FilePath string -// counter int -//} -// -//// CheckpointFunc implements Checkpoint.CheckpointFunc -//func (c *CheckpointFile) Checkpoint(s *workflowpb.WorkflowCheckpoint) error { -// file, err := os.Create(fmt.Sprintf("%v_%v", c.FilePath, c.counter)) -// c.counter++ -// -// if err != nil { -// return err -// } -// defer file.Close() -// fmt.Fprintln(file, fmt.Sprintf("code version: %v", s.CodeVersion)) -// for _, task := range s.Tasks { -// fmt.Fprintln(file, fmt.Sprintf("task: state: %v\n attributes: %v\n", task.State, task.Attributes)) -// } -// return nil -//} diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow.go b/go/vt/workflow/resharding/horizontal_resharding_workflow.go index 1c6a72467b0..c73f07a96c6 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow.go @@ -10,13 +10,11 @@ import ( "flag" "fmt" "strings" - "sync" log "github.com/golang/glog" "golang.org/x/net/context" "github.com/youtube/vitess/go/vt/automation" - "github.com/youtube/vitess/go/vt/concurrency" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" @@ -28,16 +26,19 @@ import ( topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" - statepb "github.com/youtube/vitess/go/vt/proto/workflowstate" ) +// TODO(yipeiw): the order of exported and unexported variables? const ( + codeVersion = 1 + horizontalReshardingFactoryName = "horizontal_resharding" - copySchemaTaskName = "copy_schema" - splitCloneTaskName = "split_clone" - waitFilteredReplicationTaskName = "wait_replication" - splitDiffTaskName = "split_diff" - migrateTaskName = "migrate" + + CopySchemaName = "copy_schema" + SplitCloneName = "clone" + WaitFilteredReplicationName = "wait_replication" + SplitDiffName = "diff" + MigrateName = "migrate" ) // HorizontalReshardingData is the data structure to store resharding arguments. @@ -54,9 +55,7 @@ type HorizontalReshardingWorkflow struct { wr ReshardingWrangler manager *workflow.Manager topoServer topo.Server - // wi is the topo.WorkflowInfo - wi *topo.WorkflowInfo - + wi *topo.WorkflowInfo // logger is the logger we export UI logs from. logger *logutil.MemoryLogger @@ -70,23 +69,30 @@ type HorizontalReshardingWorkflow struct { keyspace string vtworkers []string - subWorkflows []*PerShardHorizontalResharding + data []*PerShardHorizontalResharding + checkpoint *workflowpb.WorkflowCheckpoint + checkpointWriter *CheckpointWriter - subTasks map[string]*statepb.TaskContainer - taskParameters []*statepb.TaskParam + // Each phase has its own ParallelRunner object. + copyRunner *ParallelRunner + cloneRunner *ParallelRunner + waitRunner *ParallelRunner + diffRunner *ParallelRunner + migrateRunner *ParallelRunner } -// PerShardHorizontalReshardingData is the data structure to store the resharding arguments for each shard. +// PerShardHorizontalReshardingData is the data structure to store the resharding arguments for a single source shard. type PerShardHorizontalReshardingData struct { - Keyspace string - SourceShard string - DestinationShards []string - Vtworker string + keyspace string + sourceShard string + destinationShards []string + vtworker string } -// PerShardHorizontalResharding contains the data and method for horizontal resharding from a single source shard. +// PerShardHorizontalResharding contains the data for horizontal resharding from a single source shard. type PerShardHorizontalResharding struct { PerShardHorizontalReshardingData + parent *HorizontalReshardingWorkflow copySchemaShardUINode *workflow.Node @@ -103,19 +109,17 @@ func (hw *HorizontalReshardingWorkflow) Run(ctx context.Context, manager *workfl hw.ctx = ctx hw.topoServer = manager.TopoServer() hw.wr = wrangler.New(logutil.NewConsoleLogger(), manager.TopoServer(), tmclient.NewTabletManagerClient()) + hw.wi = wi + // TODO(yipeiw): separate the source shards, destination shards finding code and other initialization code for the convenience of unit test. hw.createSubWorkflows() - hw.setUIMessage("Horizontal resharding: workflow created successfully.") hw.rootUINode.Display = workflow.NodeDisplayDeterminate hw.rootUINode.BroadcastChanges(true /* updateChildren */) - // TODO(yipeiw): Support action button to allow retry, stop, restart. - // if err := hw.executeWorkflow(); err != nil { - // return err - // } if err := hw.runWorkflow(); err != nil { + hw.setUIMessage(fmt.Sprintf("Horizontal Resharding failed: %v", err)) return err } @@ -124,6 +128,40 @@ func (hw *HorizontalReshardingWorkflow) Run(ctx context.Context, manager *workfl return nil } +func (hw *HorizontalReshardingWorkflow) runWorkflow() error { + hw.checkpoint = hw.InitTasks() + hw.checkpointWriter = NewCheckpointWriter(hw.topoServer, hw.checkpoint, hw.wi) + hw.checkpointWriter.Save() + + copyTasks := hw.GenerateTasks(hw.checkpoint, CopySchemaName) + if err := hw.copyRunner.Run(copyTasks, hw.runCopySchema, hw.checkpointWriter, PARALLEL); err != nil { + return err + } + + cloneTasks := hw.GenerateTasks(hw.checkpoint, SplitCloneName) + if err := hw.cloneRunner.Run(cloneTasks, hw.runSplitClone, hw.checkpointWriter, PARALLEL); err != nil { + return err + } + + waitTasks := hw.GenerateTasks(hw.checkpoint, WaitFilteredReplicationName) + if err := hw.waitRunner.Run(waitTasks, hw.runWaitFilteredReplication, hw.checkpointWriter, PARALLEL); err != nil { + return err + } + + diffTasks := hw.GenerateTasks(hw.checkpoint, SplitDiffName) + // SplitDiff requires the vtworker only work for one destination shard at a time. + // To simplify the concurrency control, we run all the SplitDiff task sequentially. + if err := hw.diffRunner.Run(diffTasks, hw.runSplitDiff, hw.checkpointWriter, SEQUENTIAL); err != nil { + return err + } + + migrateTasks := hw.GenerateTasks(hw.checkpoint, MigrateName) + if err := hw.migrateRunner.Run(migrateTasks, hw.runMigrate, hw.checkpointWriter, SEQUENTIAL); err != nil { + return err + } + return nil +} + // createSubWorkflows creates a per source shard horizontal resharding workflow for each source shard in the keyspace. func (hw *HorizontalReshardingWorkflow) createSubWorkflows() error { overlappingShards, err := topotools.FindOverlappingShards(hw.ctx, hw.topoServer, hw.keyspace) @@ -147,15 +185,6 @@ func (hw *HorizontalReshardingWorkflow) createSubWorkflows() error { return err } } - // Initialize the tasks (parameters and states) for the workflow. - hw.subTasks = map[string]*statepb.TaskContainer{ - copySchemaTaskName: new(CopySchemaTaskHelper).InitTasks(hw.subWorkflows), - splitCloneTaskName: new(SplitCloneTaskHelper).InitTasks(hw.subWorkflows), - waitFilteredReplicationTaskName: new(WaitFilteredReplicationTaskHelper).InitTasks(hw.subWorkflows), - splitDiffTaskName: new(SplitDiffTaskHelper).InitTasks(hw.subWorkflows), - migrateTaskName: new(MigrateTaskHelper).InitTasks(hw.subWorkflows), - } - return nil } @@ -168,10 +197,10 @@ func (hw *HorizontalReshardingWorkflow) createWorkflowPerShard(sourceShard *topo perShard := &PerShardHorizontalResharding{ PerShardHorizontalReshardingData: PerShardHorizontalReshardingData{ - Keyspace: hw.keyspace, - SourceShard: sourceShardName, - DestinationShards: destShardNames, - Vtworker: vtworker, + keyspace: hw.keyspace, + sourceShard: sourceShardName, + destinationShards: destShardNames, + vtworker: vtworker, }, copySchemaShardUINode: &workflow.Node{ Name: "Shard " + sourceShardName, @@ -198,145 +227,17 @@ func (hw *HorizontalReshardingWorkflow) createWorkflowPerShard(sourceShard *topo hw.splitDiffUINode.Children = append(hw.splitDiffUINode.Children, perShard.splitDiffShardUINode) hw.migrateUINode.Children = append(hw.migrateUINode.Children, perShard.migrateShardUINode) - hw.subWorkflows = append(hw.subWorkflows, perShard) - return nil -} - -// checkpointed saves a checkpoint in topo server. -// Needs to be called with the lock. -func (hw *HorizontalReshardingWorkflow) checkpointed(ctx context.Context) error { - var err error - hw.wi.Data, err = json.Marshal(hw.subTasks) - if err != nil { - return err - } - err = hw.topoServer.SaveWorkflow(ctx, hw.wi) - if err != nil { - hw.logger.Errorf("SaveWorkflow failed: %v", err) - } else { - hw.logger.Infof("SaveWorkflow successful") - } - return err -} - -// updateStatus will update the status for specific task -func (hw *HorizontalReshardingWorkflow) updateStatus(step string, taskParam *statepb.TaskParam, status statepb.TaskState) { - hw.subTasks[step].Tasks[taskParam.String()].State = status - hw.checkpointed(context.TODO()) // I think this context needs separate control, we always want the checkpointing to succeed. -} - -func (hw *HorizontalReshardingWorkflow) runWorkflow() error { - // TODO(yipeiw): the code for each step execution is very similar, code refactorition needed in the next step. - - // Dynamically decides the task parameters based on the step and execution states. - hw.taskParameters = GetTaskParam(hw.subTasks[copySchemaTaskName]) - - // To verify the task parameters and status, I Print it out and check manually in unit test. - PrintTasks(copySchemaTaskName, hw.subTasks[copySchemaTaskName], hw.taskParameters) - var err error - err = hw.runAllTasks( - func(param *statepb.TaskParam) error { - var taskErr error - status := statepb.TaskState_Done - taskErr = hw.runCopySchema(param) - if taskErr != nil { - status = statepb.TaskState_Failed - hw.logger.Infof("Horizontal Resharding: error in CopySchemaShard: %v.", taskErr) - } - hw.updateStatus(copySchemaTaskName, param, status) - return taskErr - }) - PrintTasks("AFTER_"+copySchemaTaskName, hw.subTasks[copySchemaTaskName], nil) - - hw.taskParameters = GetTaskParam(hw.subTasks[splitCloneTaskName]) - PrintTasks(splitCloneTaskName, hw.subTasks[splitCloneTaskName], hw.taskParameters) - err = hw.runAllTasks( - func(param *statepb.TaskParam) error { - var taskErr error - status := statepb.TaskState_Done - taskErr = hw.runSplitClone(param) - if taskErr != nil { - status = statepb.TaskState_Failed - hw.logger.Infof("Horizontal Resharding: error in SplitClone: %v.", err) - } - hw.updateStatus(splitCloneTaskName, param, status) - return taskErr - }) - PrintTasks("AFTER_"+splitCloneTaskName, hw.subTasks[splitCloneTaskName], nil) - - hw.taskParameters = GetTaskParam(hw.subTasks[waitFilteredReplicationTaskName]) - PrintTasks(waitFilteredReplicationTaskName, hw.subTasks[waitFilteredReplicationTaskName], hw.taskParameters) - err = hw.runAllTasks( - func(param *statepb.TaskParam) error { - var taskErr error - status := statepb.TaskState_Done - taskErr = hw.runWaitFilteredReplication(param) - if taskErr != nil { - status = statepb.TaskState_Failed - hw.logger.Infof("Horizontal Resharding: error in SplitDiff: %v.", err) - } - hw.updateStatus(waitFilteredReplicationTaskName, param, status) - return taskErr - }) - PrintTasks("AFTER_"+waitFilteredReplicationTaskName, hw.subTasks[waitFilteredReplicationTaskName], nil) - - hw.taskParameters = GetTaskParam(hw.subTasks[splitDiffTaskName]) - PrintTasks(splitDiffTaskName, hw.subTasks[splitDiffTaskName], hw.taskParameters) - err = hw.runAllTasks( - func(param *statepb.TaskParam) error { - var taskErr error - status := statepb.TaskState_Done - taskErr = hw.runSplitDiff(param) - if taskErr != nil { - status = statepb.TaskState_Failed - hw.logger.Infof("Horizontal Resharding: error in SplitDiff: %v.", err) - } - hw.updateStatus(splitDiffTaskName, param, status) - return taskErr - }) - PrintTasks("After_"+splitDiffTaskName, hw.subTasks[splitDiffTaskName], nil) - - hw.taskParameters = GetTaskParam(hw.subTasks[migrateTaskName]) - PrintTasks(migrateTaskName, hw.subTasks[migrateTaskName], hw.taskParameters) - // run the migration tasks sequentially. - for _, param := range hw.taskParameters { - status := statepb.TaskState_Done - if taskErr := hw.runMigrate(param); taskErr != nil { - status = statepb.TaskState_Failed - hw.logger.Infof("Horizontal Resharding: error in MigratedServedType: %v.", err) - return taskErr - } - hw.updateStatus(migrateTaskName, param, status) - } - PrintTasks("AFTER_"+migrateTaskName, hw.subTasks[migrateTaskName], nil) - + hw.data = append(hw.data, perShard) return nil } -// runAllTasks runs jobs in parallel. The task parameters are dynamically updated befor execution on each step. It depends on the parallism pattern for the specific step and -// progress of each step (if it is retried). -// The executeFunc is responsible for handling how to use the parameter in the step. -func (hw *HorizontalReshardingWorkflow) runAllTasks(executeFunc func(param *statepb.TaskParam) error) error { - ec := concurrency.AllErrorRecorder{} - wg := sync.WaitGroup{} - for _, s := range hw.taskParameters { - wg.Add(1) - go func(s *statepb.TaskParam) { - defer wg.Done() - ec.RecordError(executeFunc(s)) - }(s) - } - wg.Wait() - return ec.Error() -} - // runCopySchemaPerShard runs CopySchema for a destination shard. // There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. -func (hw *HorizontalReshardingWorkflow) runCopySchema(param *statepb.TaskParam) error { - s := param.SourceShards[0] - d := param.DestinationShards[0] +func (hw *HorizontalReshardingWorkflow) runCopySchema(attr map[string]string) error { + s := attr["source_shard"] + d := attr["destination_shard"] err := hw.wr.CopySchemaShardFromShard(hw.ctx, nil /* tableArray*/, nil /* excludeTableArray */, true, /*includeViews*/ - param.Keyspace, s, param.Keyspace, d, wrangler.DefaultWaitSlaveTimeout) + hw.keyspace, s, hw.keyspace, d, wrangler.DefaultWaitSlaveTimeout) if err != nil { hw.logger.Infof("Horizontal Resharding: error in CopySchemaShardFromShard from %s to %s: %v.", s, d, err) } @@ -346,18 +247,20 @@ func (hw *HorizontalReshardingWorkflow) runCopySchema(param *statepb.TaskParam) // runSplitClonePerShard runs SplitClone for a source shard. // There should be #sourceshards parameters, while each param includes 1 sourceshard and its destshards. The destShards are useless here. -func (hw *HorizontalReshardingWorkflow) runSplitClone(param *statepb.TaskParam) error { - sourceKeyspaceShard := topoproto.KeyspaceShardString(param.Keyspace, param.SourceShards[0]) +func (hw *HorizontalReshardingWorkflow) runSplitClone(attr map[string]string) error { + s := attr["source_shard"] + worker := attr["vtworker"] + sourceKeyspaceShard := topoproto.KeyspaceShardString(hw.keyspace, s) // Reset the vtworker to avoid error if vtworker command has been called elsewhere. // This is because vtworker class doesn't cleanup the environment after execution. - automation.ExecuteVtworker(hw.ctx, param.Vtworker, []string{"Reset"}) + automation.ExecuteVtworker(hw.ctx, worker, []string{"Reset"}) // The flag min_healthy_rdonly_tablets is set to 1 (default value is 2). // Therefore, we can reuse the normal end to end test setting, which has only 1 rdonly tablet. // TODO(yipeiw): Add min_healthy_rdonly_tablets as an input argument in UI. args := []string{"SplitClone", "--min_healthy_rdonly_tablets=1", sourceKeyspaceShard} - if _, err := automation.ExecuteVtworker(hw.ctx, param.Vtworker, args); err != nil { - hw.logger.Infof("Horizontal resharding: error in SplitClone in keyspace %s: %v.", param.Keyspace, err) + if _, err := automation.ExecuteVtworker(hw.ctx, worker, args); err != nil { + hw.logger.Infof("Horizontal resharding: error in SplitClone in keyspace %s: %v.", hw.keyspace, err) return err } hw.logger.Infof("Horizontal resharding: SplitClone is finished.") @@ -367,9 +270,9 @@ func (hw *HorizontalReshardingWorkflow) runSplitClone(param *statepb.TaskParam) // runWaitFilteredReplication runs WaitForFilteredReplication for a destination shard. // There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. -func (hw *HorizontalReshardingWorkflow) runWaitFilteredReplication(param *statepb.TaskParam) error { - d := param.DestinationShards[0] - if err := hw.wr.WaitForFilteredReplication(hw.ctx, param.Keyspace, d, wrangler.DefaultWaitForFilteredReplicationMaxDelay); err != nil { +func (hw *HorizontalReshardingWorkflow) runWaitFilteredReplication(attr map[string]string) error { + d := attr["destination_shard"] + if err := hw.wr.WaitForFilteredReplication(hw.ctx, hw.keyspace, d, wrangler.DefaultWaitForFilteredReplicationMaxDelay); err != nil { hw.logger.Infof("Horizontal Resharding: error in WaitForFilteredReplication: %v.", err) return err } @@ -379,34 +282,31 @@ func (hw *HorizontalReshardingWorkflow) runWaitFilteredReplication(param *statep // runSplitDiffPerShard runs SplitDiff for a source shard. // There should be #sourceshards parameters, while each param includes 1 sourceshard and its destshards. -func (hw *HorizontalReshardingWorkflow) runSplitDiff(param *statepb.TaskParam) error { - var destinationKeyspaceShards []string - for _, destShard := range param.DestinationShards { - destinationKeyspaceShards = append(destinationKeyspaceShards, topoproto.KeyspaceShardString(param.Keyspace, destShard)) - } +func (hw *HorizontalReshardingWorkflow) runSplitDiff(attr map[string]string) error { + d := attr["destination_shard"] + worker := attr["vtworker"] - for _, d := range destinationKeyspaceShards { - automation.ExecuteVtworker(hw.ctx, param.Vtworker, []string{"Reset"}) - args := []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", d} - _, err := automation.ExecuteVtworker(hw.ctx, param.Vtworker, args) - if err != nil { - return err - } + automation.ExecuteVtworker(hw.ctx, worker, []string{"Reset"}) + args := []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", topoproto.KeyspaceShardString(hw.keyspace, d)} + _, err := automation.ExecuteVtworker(hw.ctx, worker, args) + if err != nil { + return err } + hw.logger.Infof("Horizontal resharding: SplitDiff is finished.") return nil } // runMigratePerShard runs the migration sequentially among all source shards. // There should be 1 parameter, which includes all source shards to be migrated. -func (hw *HorizontalReshardingWorkflow) runMigrate(param *statepb.TaskParam) error { - s := param.SourceShards[0] - sourceKeyspaceShard := topoproto.KeyspaceShardString(param.Keyspace, s) +func (hw *HorizontalReshardingWorkflow) runMigrate(attr map[string]string) error { + s := attr["source_shard"] + sourceKeyspaceShard := topoproto.KeyspaceShardString(hw.keyspace, s) servedTypeParams := []topodatapb.TabletType{topodatapb.TabletType_RDONLY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_MASTER} for _, servedType := range servedTypeParams { - err := hw.wr.MigrateServedTypes(hw.ctx, param.Keyspace, s, nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime) + err := hw.wr.MigrateServedTypes(hw.ctx, hw.keyspace, s, nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime) if err != nil { hw.logger.Infof("Horizontal Resharding: error in MigrateServedTypes on servedType %s: %v.", servedType, err) return err @@ -416,126 +316,6 @@ func (hw *HorizontalReshardingWorkflow) runMigrate(param *statepb.TaskParam) err return nil } -// All the executeXXX. functions are in the old implementation, which is replaced by the runXXX function now. -//func (hw *HorizontalReshardingWorkflow) executeWorkflow() error { -// if err := hw.runAllSubWorkflows(hw.executeCopySchemaPerShard); err != nil { -// hw.logger.Infof("Horizontal Resharding: error in CopySchemaShard: %v.", err) -// return err -// } -// -// if err := hw.runAllSubWorkflows(hw.executeSplitClonePerShard); err != nil { -// hw.logger.Infof("Horizontal Resharding: error in SplitClone: %v.", err) -// return err -// } -// if err := hw.runAllSubWorkflows(hw.executeSplitDiffPerShard); err != nil { -// hw.logger.Infof("Horizontal Resharding: error in SplitDiff: %v.", err) -// return err -// } -// if err := hw.runAllSubWorkflows(hw.executeMigratePerShard); err != nil { -// hw.logger.Infof("Horizontal Resharding: error in MigratedServedType: %v.", err) -// return err -// } -// return nil -//} -// -//// runAllSubWorkflows runs jobs in parallel. -//func (hw *HorizontalReshardingWorkflow) runAllSubWorkflows(executeFunc func(subWorkflow *PerShardHorizontalResharding) error) error { -// ec := concurrency.AllErrorRecorder{} -// wg := sync.WaitGroup{} -// for _, sw := range hw.subWorkflows { -// wg.Add(1) -// go func(s *PerShardHorizontalResharding) { -// defer wg.Done() -// ec.RecordError(executeFunc(s)) -// }(sw) -// } -// wg.Wait() -// return ec.Error() -//} -// -//// executeCopySchemaPerShard runs CopySchemaShard to copy the schema of a source shard to all its destination shards. -//// TODO(yipeiw): excludeTable information can be added to UI input parameters, s.t the user can customize excluded tables during resharding. -//func (hw *HorizontalReshardingWorkflow) executeCopySchemaPerShard(perhw *PerShardHorizontalResharding) error { -// sourceKeyspaceShard := topoproto.KeyspaceShardString(perhw.Keyspace, perhw.SourceShard) -// for _, d := range perhw.DestinationShards { -// err := hw.wr.CopySchemaShardFromShard(hw.ctx, nil /* tableArray*/, nil /* excludeTableArray */, true /*includeViews*/, perhw.Keyspace, perhw.SourceShard, perhw.Keyspace, d, wrangler.DefaultWaitSlaveTimeout) -// if err != nil { -// hw.logger.Infof("Horizontal Resharding: error in CopySchemaShardFromShard from %s to %s: %v.", sourceKeyspaceShard, d, err) -// return err -// } -// hw.logger.Infof("Horizontal Resharding: CopySchemaShardFromShard from %s to %s is finished.", sourceKeyspaceShard, d) -// } -// return nil -//} -// -//// executeSplitClonePerShard runs SplitClone to clone the data within a keyspace from a source shard to its destination shards. -//func (hw *HorizontalReshardingWorkflow) executeSplitClonePerShard(perhw *PerShardHorizontalResharding) error { -// sourceKeyspaceShard := topoproto.KeyspaceShardString(perhw.Keyspace, perhw.SourceShard) -// var destinationKeyspaceShards []string -// for _, destShard := range perhw.DestinationShards { -// destinationKeyspaceShards = append(destinationKeyspaceShards, topoproto.KeyspaceShardString(perhw.Keyspace, destShard)) -// } -// -// // Reset the vtworker to avoid error if vtworker command has been called elsewhere. -// // This is because vtworker class doesn't cleanup the environment after execution. -// automation.ExecuteVtworker(hw.ctx, perhw.Vtworker, []string{"Reset"}) -// // The flag min_healthy_rdonly_tablets is set to 1 (default value is 2). -// // Therefore, we can reuse the normal end to end test setting, which has only 1 rdonly tablet. -// // TODO(yipeiw): Add min_healthy_rdonly_tablets as an input argument in UI. -// args := []string{"SplitClone", "--min_healthy_rdonly_tablets=1", sourceKeyspaceShard} -// if _, err := automation.ExecuteVtworker(hw.ctx, perhw.Vtworker, args); err != nil { -// hw.logger.Infof("Horizontal resharding: error in SplitClone in keyspace %s: %v.", perhw.Keyspace, err) -// return err -// } -// hw.logger.Infof("Horizontal resharding: SplitClone is finished.") -// // Wait for filtered replication task. -// for _, d := range perhw.DestinationShards { -// if err := hw.wr.WaitForFilteredReplication(hw.ctx, perhw.Keyspace, d, wrangler.DefaultWaitForFilteredReplicationMaxDelay); err != nil { -// hw.logger.Infof("Horizontal Resharding: error in WaitForFilteredReplication: %v.", err) -// return err -// } -// hw.logger.Infof("Horizontal Resharding:WaitForFilteredReplication is finished on " + d) -// } -// return nil -//} -// -//// executeSplitDiffPerShard runs SplitDiff for every destination shard to the source and destination -//// to ensure all the data is present and correct. -//func (hw *HorizontalReshardingWorkflow) executeSplitDiffPerShard(perhw *PerShardHorizontalResharding) error { -// var destinationKeyspaceShards []string -// for _, destShard := range perhw.DestinationShards { -// destinationKeyspaceShards = append(destinationKeyspaceShards, topoproto.KeyspaceShardString(perhw.Keyspace, destShard)) -// } -// -// for _, d := range destinationKeyspaceShards { -// automation.ExecuteVtworker(hw.ctx, perhw.Vtworker, []string{"Reset"}) -// args := []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", d} -// _, err := automation.ExecuteVtworker(hw.ctx, perhw.Vtworker, args) -// if err != nil { -// return err -// } -// } -// hw.logger.Infof("Horizontal resharding: SplitDiff is finished.") -// return nil -//} -// -//// executeMigratePerShard runs MigrateServedTypes to switch over to serving from the new shards. -//func (hw *HorizontalReshardingWorkflow) executeMigratePerShard(perhw *PerShardHorizontalResharding) error { -// sourceKeyspaceShard := topoproto.KeyspaceShardString(perhw.Keyspace, perhw.SourceShard) -// servedTypeParams := []topodatapb.TabletType{topodatapb.TabletType_RDONLY, -// topodatapb.TabletType_REPLICA, -// topodatapb.TabletType_MASTER} -// for _, servedType := range servedTypeParams { -// err := hw.wr.MigrateServedTypes(hw.ctx, perhw.Keyspace, perhw.SourceShard, nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime) -// if err != nil { -// hw.logger.Infof("Horizontal Resharding: error in MigrateServedTypes on servedType %s: %v.", servedType, err) -// return err -// } -// hw.logger.Infof("Horizontal Resharding: MigrateServedTypes is finished on tablet %s serve type %s.", sourceKeyspaceShard, servedType) -// } -// return nil -//} - func (hw *HorizontalReshardingWorkflow) setUIMessage(message string) { log.Infof("Horizontal resharding on keyspace %v: %v.", hw.keyspace, message) hw.rootUINode.Log = hw.logger.String() diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go index 955588179e7..fe941694ea7 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go @@ -13,14 +13,45 @@ import ( topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" - statepb "github.com/youtube/vitess/go/vt/proto/workflowstate" ) func TestHorizontalResharding(t *testing.T) { - ts := memorytopo.NewServer("cell") - // Create fake wrangler using mock interface, which is used for the unit test in steps CopySchema and MigratedServedType. ctrl := gomock.NewController(t) defer ctrl.Finish() + + hw := setUp(t, ctrl) + if hw == nil { + return + } + // Create fakeworkerclient, which is used for the unit test in steps SplitClone and SplitDiff. + flag.Set("vtworker_client_protocol", "fake") + fakeVtworkerClient := fakevtworkerclient.NewFakeVtworkerClient() + vtworkerclient.RegisterFactory("fake", fakeVtworkerClient.FakeVtworkerClientFactory) + defer vtworkerclient.UnregisterFactoryForTest("fake") + + fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitClone", "--min_healthy_rdonly_tablets=1", "test_keyspace/0"}, "", nil) + fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", "test_keyspace/-80"}, "", nil) + fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", "test_keyspace/80-"}, "", nil) + + // Test the execution of horizontal resharding. + // To simply demonstate the ability to track task status and leverage it for control the workflow execution, only happy path is used here. + if err := hw.runWorkflow(); err != nil { + t.Errorf("%s: Horizontal resharding workflow should not fail", err) + } + + // Checking all tasks are Done. + for _, task := range hw.checkpoint.Tasks { + if task.State != workflowpb.TaskState_TaskDone { + t.Fatalf("task is not done: Id: %v, State: %v, Attributes:%v", task.Id, task.State, task.Attributes) + } + } +} + +// setUp prepare the test environement for the happy path. +// Other test cases can reuse this basic setup and modified it based on its need. +func setUp(t *testing.T, ctrl *gomock.Controller) *HorizontalReshardingWorkflow { + ts := memorytopo.NewServer("cell") + // Create fake wrangler using mock interface, which is used for the unit test in steps CopySchema and MigratedServedType. mockWranglerInterface := NewMockReshardingWrangler(ctrl) // Create the workflow (ignore the node construction since we don't test the front-end part in this unit test). @@ -31,17 +62,28 @@ func TestHorizontalResharding(t *testing.T) { topoServer: ts, logger: logutil.NewMemoryLogger(), } - perShard := &PerShardHorizontalResharding{ + parent: hw, PerShardHorizontalReshardingData: PerShardHorizontalReshardingData{ - Keyspace: "test_keyspace", - SourceShard: "0", - DestinationShards: []string{"-80", "80-"}, - Vtworker: "localhost:15032", + keyspace: "test_keyspace", + sourceShard: "0", + destinationShards: []string{"-80", "80-"}, + vtworker: "localhost:15032", }, } - perShard.parent = hw - hw.subWorkflows = append(hw.subWorkflows, perShard) + hw.data = append(hw.data, perShard) + // Create the initial workflowpb.Workflow object. + w := &workflowpb.Workflow{ + Uuid: "testworkflow0000", + FactoryName: "horizontal_resharding", + State: workflowpb.WorkflowState_NotStarted, + } + var err error + hw.wi, err = hw.topoServer.CreateWorkflow(hw.ctx, w) + if err != nil { + t.Errorf("%s: Horizontal resharding workflow fails in creating workflowInfo", err) + return nil + } // Set the expected behaviors for mock wrangler. mockWranglerInterface.EXPECT().CopySchemaShardFromShard( @@ -83,41 +125,10 @@ func TestHorizontalResharding(t *testing.T) { false, /* skipReFreshState */ wrangler.DefaultFilteredReplicationWaitTime).Return(nil) } - - // Create fakeworkerclient, which is used for the unit test in steps SplitClone and SplitDiff. - fakeVtworkerClient := fakevtworkerclient.NewFakeVtworkerClient() - vtworkerclient.RegisterFactory("fake", fakeVtworkerClient.FakeVtworkerClientFactory) - defer vtworkerclient.UnregisterFactoryForTest("fake") - flag.Set("vtworker_client_protocol", "fake") - fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitClone", "--min_healthy_rdonly_tablets=1", "test_keyspace/0"}, "", nil) - fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", "test_keyspace/-80"}, "", nil) - fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", "test_keyspace/80-"}, "", nil) - - // manually complete the task initalization, which is part of createSubWorkflows. - // TODO(yipeiw): this code is repeated, should be removed from unit test. - hw.subTasks = map[string]*statepb.TaskContainer{ - copySchemaTaskName: new(CopySchemaTaskHelper).InitTasks(hw.subWorkflows), - splitCloneTaskName: new(SplitCloneTaskHelper).InitTasks(hw.subWorkflows), - waitFilteredReplicationTaskName: new(WaitFilteredReplicationTaskHelper).InitTasks(hw.subWorkflows), - splitDiffTaskName: new(SplitDiffTaskHelper).InitTasks(hw.subWorkflows), - migrateTaskName: new(MigrateTaskHelper).InitTasks(hw.subWorkflows), - } - - // Create the initial workflowpb.Workflow object. - w := &workflowpb.Workflow{ - Uuid: "testworkflow0000", - FactoryName: "horizontal_resharding", - State: workflowpb.WorkflowState_NotStarted, - } - var err error - hw.wi, err = hw.topoServer.CreateWorkflow(hw.ctx, w) - if err != nil { - t.Errorf("%s: Horizontal resharding workflow fails in creating workflowInfo", err) - } - - // Test the execution of horizontal resharding. - // To simply demonstate the ability to track task status and leverage it for control the workflow execution, only happy path is used here. - if err := hw.runWorkflow(); err != nil { - t.Errorf("%s: Horizontal resharding workflow should not fail", err) - } + return hw } + +// TODO(yipeiw): fake a retry situation: fails first for made error, then fix the inserted bug and manually trigger the retry signal, +// verify whether the retrying job can be done successfully. +// problem for unit test: hard to fake action, node part, hard to separate the logic from front-end control. (figure out the call path of Init, s.t. we can create the front-end needed set-up if it is easy enough) +// problem for end-to-end test, need a way to check the workflow status; need to trigger the button through http request. diff --git a/go/vt/workflow/resharding/parallel_runner.go b/go/vt/workflow/resharding/parallel_runner.go index 371b0ac518a..3161763db25 100644 --- a/go/vt/workflow/resharding/parallel_runner.go +++ b/go/vt/workflow/resharding/parallel_runner.go @@ -13,34 +13,38 @@ import ( type level int const ( + // SEQUENTIAL means that the tasks will run sequentially. SEQUENTIAL level = 1 + iota + //PARALLEL means that the tasks will run in parallel. PARALLEL ) // ParallelRunner is used to control executing tasks concurrently. // Each phase has its own ParallelRunner object. type ParallelRunner struct { - // TODO(yipeiw) : ParallelRunner should fields for per-task controllable actions. + // TODO(yipeiw) : ParallelRunner should have fields for per-task controllable actions. } // Run is the entry point for controling task executions. -// runTasks should be a copy of tasks with the expected execution order, the status of task should be +// tasks should be a copy of tasks with the expected execution order, the status of task should be // both updated in this copy and the original one (checkpointer.Update does this). This is to avoid // data racing situation. -func (p *ParallelRunner) Run(runTasks []*workflowpb.Task, executeFunc func(map[string]string) error, cp *Checkpoint, concurrencyLevel level) error { +func (p *ParallelRunner) Run(tasks []*workflowpb.Task, executeFunc func(map[string]string) error, cp *CheckpointWriter, concurrencyLevel level) error { var parallelNum int // default value is 0. The task will not run in this case. switch concurrencyLevel { case SEQUENTIAL: parallelNum = 1 case PARALLEL: - parallelNum = len(runTasks) + parallelNum = len(tasks) + default: + panic(fmt.Sprintf("BUG: Invalid concurrency level: %v", concurrencyLevel)) } // TODO(yipeiw): Support retry, restart, pause actions. Wrap the execution to interleave with actions. // sem is a channel used to control the level of concurrency. sem := make(chan bool, parallelNum) var ec concurrency.AllErrorRecorder - for _, task := range runTasks { + for _, task := range tasks { // TODO(yipeiw): Add checking logics to support retry, pause, restart actions when lauching tasks. if task.State == workflowpb.TaskState_TaskDone { continue @@ -52,15 +56,15 @@ func (p *ParallelRunner) Run(runTasks []*workflowpb.Task, executeFunc func(map[s status := workflowpb.TaskState_TaskDone if err := executeFunc(t.Attributes); err != nil { status = workflowpb.TaskState_TaskNotStarted - t.Error = fmt.Sprintf("%v", err) + t.Error = err.Error() ec.RecordError(err) } t.State = status - // only log the error passage rather then propograting it through ErrorRecorder. The reason is that error message in + // Only log the error passage rather then propograting it through ErrorRecorder. The reason is that error message in // ErrorRecorder will leads to stop of the workflow, which is unexpected if only checkpointing fails. - // However, the checkpointing failure right after initializing the tasks should lead to the stop of the workflow. - if err := cp.Update(task.TaskId, status); err != nil { + // However, the checkpointing failure right after initializing the tasks should lead to a stop of the workflow. + if err := cp.UpdateTask(t.Id, status); err != nil { log.Errorf("%v", err) } }(task) diff --git a/go/vt/workflow/resharding/parallel_runner_test.go b/go/vt/workflow/resharding/parallel_runner_test.go index 2f64e5703c1..02ae8fcba81 100644 --- a/go/vt/workflow/resharding/parallel_runner_test.go +++ b/go/vt/workflow/resharding/parallel_runner_test.go @@ -3,10 +3,10 @@ package resharding import ( "context" "fmt" + "strconv" "strings" "testing" - "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" @@ -14,16 +14,47 @@ import ( const ( printName = "Sleep" - - codeVersion = 1 ) -func executePrint(attr map[string]string) error { - fmt.Printf("The number passed to me is %v \n", attr["Number"]) - return nil +func TestParallelRunner(t *testing.T) { + w := &workflowpb.Workflow{ + Uuid: "testparallelrunner", + FactoryName: "simple_print", + State: workflowpb.WorkflowState_NotStarted, + } + + ts := memorytopo.NewServer("cell") + wi, err := ts.CreateWorkflow(context.TODO(), w) + if err != nil { + t.Errorf("%s: Parallel Runner fails in creating workflow", err) + } + + taskNum := 5 + initCheckpoint := InitPrintTasks(taskNum) + + cp := NewCheckpointWriter(ts, initCheckpoint, wi) + cp.Save() + + tasks := GetOrderedPrintTasks(initCheckpoint) + executeLog := func(attr map[string]string) error { + t.Logf("The number passed to me is %v \n", attr["number"]) + return nil + } + + p := &ParallelRunner{} + if err := p.Run(tasks, executeLog, cp, PARALLEL); err != nil { + t.Errorf("%s: Parallel Runner should not fail", err) + } + + // Check whether all tasks are in finished status. + for _, task := range cp.checkpoint.Tasks { + if task.State != workflowpb.TaskState_TaskDone { + t.Fatalf("Task info: %v, %v, %v: Parallel Runner task not finished", task.Id, task.State, task.Attributes) + } + } } -func taskNameOfPrint(num string) string { +func logTaskName(num int) string { return fmt.Sprintf("%v_%v", printName, num) } @@ -31,14 +62,14 @@ func InitPrintTasks(numTasks int) *workflowpb.WorkflowCheckpoint { tasks := make(map[string]*workflowpb.Task) var infoList []string for i := 0; i < numTasks; i++ { - num := fmt.Sprintf("%v", i) + numStr := fmt.Sprintf("%v", i) t := &workflowpb.Task{ - TaskId: taskNameOfPrint(num), + Id: logTaskName(i), State: workflowpb.TaskState_TaskNotStarted, - Attributes: map[string]string{"Number": num}, + Attributes: map[string]string{"number": numStr}, } - tasks[t.TaskId] = t - infoList = append(infoList, num) + tasks[t.Id] = t + infoList = append(infoList, numStr) } return &workflowpb.WorkflowCheckpoint{ CodeVersion: codeVersion, @@ -47,49 +78,15 @@ func InitPrintTasks(numTasks int) *workflowpb.WorkflowCheckpoint { } } -func GetOrderedPrintTasks(wcp *workflowpb.WorkflowCheckpoint) []*workflowpb.Task { +func GetOrderedPrintTasks(checkpoint *workflowpb.WorkflowCheckpoint) []*workflowpb.Task { var tasks []*workflowpb.Task - for _, n := range strings.Split(wcp.Settings["numbers"], ",") { - taskID := taskNameOfPrint(n) - tasks = append(tasks, wcp.Tasks[taskID]) - } - return tasks -} - -func TestParallelRunner(t *testing.T) { - ts := memorytopo.NewServer("cell") - w := &workflowpb.Workflow{ - Uuid: "testparallelrunner", - FactoryName: "simple_print", - State: workflowpb.WorkflowState_NotStarted, - } - var err error - var wi *topo.WorkflowInfo - wi, err = ts.CreateWorkflow(context.TODO(), w) - if err != nil { - t.Errorf("%s: Parallel Runner fails in creating workflow", err) - } - - taskNum := 5 - initCheckpoint := InitPrintTasks(taskNum) - - cp := &Checkpoint{ - topoServer: ts, - wcp: initCheckpoint, - wi: wi, - } - cp.Store() - - var p *ParallelRunner - tasks := GetOrderedPrintTasks(initCheckpoint) - if err := p.Run(tasks, executePrint, cp, PARALLEL); err != nil { - t.Errorf("%s: Parallel Runner should not fail", err) - } - - //Check whether all tasks are in finished status. - for _, task := range cp.wcp.Tasks { - if task.State != workflowpb.TaskState_TaskDone { - t.Errorf("Task info: %v, %v, %v: Parallel Runner task not finished", task.TaskId, task.State, task.Attributes) + for _, n := range strings.Split(checkpoint.Settings["numbers"], ",") { + num, err := strconv.Atoi(n) + if err != nil { + return nil } + taskID := logTaskName(num) + tasks = append(tasks, checkpoint.Tasks[taskID]) } + return tasks } diff --git a/go/vt/workflow/resharding/status.go b/go/vt/workflow/resharding/status.go deleted file mode 100644 index 6968019ac64..00000000000 --- a/go/vt/workflow/resharding/status.go +++ /dev/null @@ -1,118 +0,0 @@ -package resharding - -import ( - "fmt" - - statepb "github.com/youtube/vitess/go/vt/proto/workflowstate" -) - -// GetTaskParam generates the parameters for unfinished tasks. -func GetTaskParam(tasks *statepb.TaskContainer) []*statepb.TaskParam { - var params []*statepb.TaskParam - for _, v := range tasks.Tasks { - if v.State != statepb.TaskState_Done && v.State != statepb.TaskState_Running { - params = append(params, v.Param) - } - } - return params -} - -// PrintTasks prints the tasks and the dynamically generated parameters for a specific step. -func PrintTasks(step string, tasks *statepb.TaskContainer, params []*statepb.TaskParam) { - fmt.Printf("\n%s printing the tasks information:", step) - for _, v := range tasks.Tasks { - fmt.Println("task param: ", v.Param) - fmt.Println("task status: ", v.State) - } - - if params != nil { - fmt.Println("printing the task parameters passed to execution") - for _, p := range params { - fmt.Println("execution task param: ", p) - } - } -} - -func initParamPerDestinationShard(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { - tasks := make(map[string]*statepb.Task) - for _, sw := range subWorkflows { - for _, d := range sw.DestinationShards { - param := &statepb.TaskParam{ - Keyspace: sw.Keyspace, - DestinationShards: []string{d}, - SourceShards: []string{sw.SourceShard}, - Vtworker: sw.Vtworker, - } - t := &statepb.Task{ - Param: param, - State: statepb.TaskState_Created, - } - tasks[param.String()] = t - } - } - return &statepb.TaskContainer{Tasks: tasks} -} - -func initParamPerSourceShard(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { - tasks := make(map[string]*statepb.Task) - for _, sw := range subWorkflows { - param := &statepb.TaskParam{ - Keyspace: sw.Keyspace, - DestinationShards: sw.DestinationShards, - SourceShards: []string{sw.SourceShard}, - Vtworker: sw.Vtworker, - } - t := &statepb.Task{ - Param: param, - State: statepb.TaskState_Created, - } - tasks[param.String()] = t - } - return &statepb.TaskContainer{Tasks: tasks} -} - -// TaskHelper includes methods to generate task parameters based on the execution status while updating the status at the same time. -// Each step should implements this interface. -type TaskHelper interface { - InitTasks(perhw []*PerShardHorizontalResharding) *statepb.TaskContainer -} - -// CopySchemaTaskHelper implements TaskHelper interface. -type CopySchemaTaskHelper struct{} - -// InitTasks implements TaskHelper.InitTasks (per destination shard manner). -func (c CopySchemaTaskHelper) InitTasks(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { - return initParamPerDestinationShard(subWorkflows) -} - -// SplitCloneTaskHelper implements TaskHelper interface. -type SplitCloneTaskHelper struct{} - -// InitTasks implements TaskHelper.InitTasks. -func (c SplitCloneTaskHelper) InitTasks(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { - return initParamPerSourceShard(subWorkflows) -} - -// WaitFilteredReplicationTaskHelper implements TaskHelper interface. -type WaitFilteredReplicationTaskHelper struct{} - -// InitTasks implements TaskHelper.InitTasks. -func (c WaitFilteredReplicationTaskHelper) InitTasks(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { - return initParamPerDestinationShard(subWorkflows) -} - -// SplitDiffTaskHelper implements TaskHelper interface. -type SplitDiffTaskHelper struct{} - -// InitTasks implements TaskHelper.InitTasks. -func (c SplitDiffTaskHelper) InitTasks(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { - return initParamPerSourceShard(subWorkflows) -} - -// MigrateTaskHelper implements TaskHelper interface. -type MigrateTaskHelper struct{} - -// InitTasks implements TaskHelper.InitTasks. -func (c MigrateTaskHelper) InitTasks(subWorkflows []*PerShardHorizontalResharding) *statepb.TaskContainer { - return initParamPerSourceShard(subWorkflows) -} diff --git a/go/vt/workflow/resharding/task_helper.go b/go/vt/workflow/resharding/task_helper.go new file mode 100644 index 00000000000..291f0850b03 --- /dev/null +++ b/go/vt/workflow/resharding/task_helper.go @@ -0,0 +1,85 @@ +package resharding + +import ( + "fmt" + "strings" + + workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" +) + +func getTaskID(phase, shardType, shardName string) string { + return fmt.Sprintf("%s_%s/%s", phase, shardType, shardName) +} + +// GenerateTasks generates a copy of tasks for a specific step. The task status is not checked in this function. +func (hw *HorizontalReshardingWorkflow) GenerateTasks(checkpoint *workflowpb.WorkflowCheckpoint, stepName string) []*workflowpb.Task { + var tasks []*workflowpb.Task + switch stepName { + case CopySchemaName, WaitFilteredReplicationName, SplitDiffName: + // TODO: clean the logics and combine it into one function. + for _, d := range strings.Split(checkpoint.Settings["destination_shards"], ",") { + taskID := getTaskID(stepName, "dest", d) + tasks = append(tasks, checkpoint.Tasks[taskID]) + } + case SplitCloneName, MigrateName: + for _, s := range strings.Split(checkpoint.Settings["source_shards"], ",") { + taskID := getTaskID(stepName, "source", s) + tasks = append(tasks, checkpoint.Tasks[taskID]) + } + } + return tasks +} + +// InitTasks initialized the tasks for the workflow and return a checkpoint to store the information. +func (hw *HorizontalReshardingWorkflow) InitTasks() *workflowpb.WorkflowCheckpoint { + taskMap := make(map[string]*workflowpb.Task) + var sourceShards, destinationShards []string + + for _, perSrc := range hw.data { + s := perSrc.sourceShard + worker := perSrc.vtworker + sourceShards = append(sourceShards, s) + for _, d := range perSrc.destinationShards { + destinationShards = append(destinationShards, d) + updatePerDestinationTask(s, d, worker, CopySchemaName, taskMap) + updatePerDestinationTask(s, d, worker, WaitFilteredReplicationName, taskMap) + updatePerDestinationTask(s, d, worker, SplitDiffName, taskMap) + } + updatePerSourceTask(s, worker, SplitCloneName, taskMap) + updatePerSourceTask(s, worker, MigrateName, taskMap) + } + + return &workflowpb.WorkflowCheckpoint{ + CodeVersion: codeVersion, + Tasks: taskMap, + Settings: map[string]string{ + "source_shards": strings.Join(sourceShards, ","), + "destination_shards": strings.Join(destinationShards, ","), + }, + } +} + +func updatePerDestinationTask(sourceShard, destinationShard, worker, name string, taskMap map[string]*workflowpb.Task) { + taskID := getTaskID(name, "dest", destinationShard) + taskMap[taskID] = &workflowpb.Task{ + Id: taskID, + State: workflowpb.TaskState_TaskNotStarted, + Attributes: map[string]string{ + "source_shard": sourceShard, + "destination_shard": destinationShard, + "vtworker": worker, + }, + } +} + +func updatePerSourceTask(sourceShard, vtworker, name string, taskMap map[string]*workflowpb.Task) { + taskID := getTaskID(name, "source", sourceShard) + taskMap[taskID] = &workflowpb.Task{ + Id: taskID, + State: workflowpb.TaskState_TaskNotStarted, + Attributes: map[string]string{ + "source_shard": sourceShard, + "vtworker": vtworker, + }, + } +} diff --git a/proto/workflow.proto b/proto/workflow.proto index 0dcb05fb3a0..2821ab6e4bb 100644 --- a/proto/workflow.proto +++ b/proto/workflow.proto @@ -66,7 +66,7 @@ enum TaskState { } message Task { - string task_id = 1; + string id = 1; TaskState state = 2; // attributes includes the parameters the task needs. map attributes = 3; From 414b0512022d49f668376c1beb5bd1632a3e05dd Mon Sep 17 00:00:00 2001 From: Yipei Wang Date: Thu, 9 Feb 2017 17:03:04 -0800 Subject: [PATCH 003/108] workflow: Implemented retry action in ParallelRunner. Created unit test to verify this function. Implemented Horizontal Resharding workflow and tested in unit test and e2e test for the happy path. --- go/vt/proto/workflow/workflow.pb.go | 148 ++++---- go/vt/vtctld/workflow.go | 3 +- go/vt/workflow/node.go | 2 +- go/vt/workflow/resharding/checkpoint.go | 3 +- .../horizontal_resharding_workflow.go | 349 ++++++------------ .../horizontal_resharding_workflow_test.go | 39 +- go/vt/workflow/resharding/parallel_runner.go | 133 +++++-- .../resharding/parallel_runner_test.go | 180 ++++++++- go/vt/workflow/resharding/task_helper.go | 135 +++++-- proto/workflow.proto | 29 +- 10 files changed, 609 insertions(+), 412 deletions(-) diff --git a/go/vt/proto/workflow/workflow.pb.go b/go/vt/proto/workflow/workflow.pb.go index 3be466f408c..cb3cf7a64d6 100644 --- a/go/vt/proto/workflow/workflow.pb.go +++ b/go/vt/proto/workflow/workflow.pb.go @@ -10,8 +10,8 @@ It is generated from these files: It has these top-level messages: Workflow - Task WorkflowCheckpoint + Task */ package workflow @@ -179,6 +179,46 @@ func (m *Workflow) GetEndTime() int64 { return 0 } +type WorkflowCheckpoint struct { + // code_version is used to detect incompabilities between the version of the + // running workflow and the one which wrote the checkpoint. If they don't + // match, the workflow must not continue. The author of workflow must update + // this variable in their implementation when incompabilities are introduced. + CodeVersion int32 `protobuf:"varint,1,opt,name=code_version,json=codeVersion" json:"code_version,omitempty"` + // Task is the data structure that stores the execution status and the + // attributes of a task. + Tasks map[string]*Task `protobuf:"bytes,2,rep,name=tasks" json:"tasks,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // settings includes workflow specific data, e.g. the resharding workflow + // would store the source shards and destination shards. + Settings map[string]string `protobuf:"bytes,3,rep,name=settings" json:"settings,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *WorkflowCheckpoint) Reset() { *m = WorkflowCheckpoint{} } +func (m *WorkflowCheckpoint) String() string { return proto.CompactTextString(m) } +func (*WorkflowCheckpoint) ProtoMessage() {} +func (*WorkflowCheckpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *WorkflowCheckpoint) GetCodeVersion() int32 { + if m != nil { + return m.CodeVersion + } + return 0 +} + +func (m *WorkflowCheckpoint) GetTasks() map[string]*Task { + if m != nil { + return m.Tasks + } + return nil +} + +func (m *WorkflowCheckpoint) GetSettings() map[string]string { + if m != nil { + return m.Settings + } + return nil +} + type Task struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` State TaskState `protobuf:"varint,2,opt,name=state,enum=workflow.TaskState" json:"state,omitempty"` @@ -190,7 +230,7 @@ type Task struct { func (m *Task) Reset() { *m = Task{} } func (m *Task) String() string { return proto.CompactTextString(m) } func (*Task) ProtoMessage() {} -func (*Task) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*Task) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (m *Task) GetId() string { if m != nil { @@ -220,50 +260,10 @@ func (m *Task) GetError() string { return "" } -type WorkflowCheckpoint struct { - // code_version is used to detect incompabilities between the version of the - // running workflow and the one which wrote the checkpoint. If they don't - // match, the workflow must not continue. The author of workflow must update - // this variable in their implementation when incompabilities are introduced. - CodeVersion int32 `protobuf:"varint,1,opt,name=code_version,json=codeVersion" json:"code_version,omitempty"` - // Task is the data structure that stores the execution status and the - // attributes of a task. - Tasks map[string]*Task `protobuf:"bytes,2,rep,name=tasks" json:"tasks,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // settings includes workflow specific data, e.g. the resharding workflow - // would store the source shards and destination shards. - Settings map[string]string `protobuf:"bytes,3,rep,name=settings" json:"settings,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *WorkflowCheckpoint) Reset() { *m = WorkflowCheckpoint{} } -func (m *WorkflowCheckpoint) String() string { return proto.CompactTextString(m) } -func (*WorkflowCheckpoint) ProtoMessage() {} -func (*WorkflowCheckpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *WorkflowCheckpoint) GetCodeVersion() int32 { - if m != nil { - return m.CodeVersion - } - return 0 -} - -func (m *WorkflowCheckpoint) GetTasks() map[string]*Task { - if m != nil { - return m.Tasks - } - return nil -} - -func (m *WorkflowCheckpoint) GetSettings() map[string]string { - if m != nil { - return m.Settings - } - return nil -} - func init() { proto.RegisterType((*Workflow)(nil), "workflow.Workflow") - proto.RegisterType((*Task)(nil), "workflow.Task") proto.RegisterType((*WorkflowCheckpoint)(nil), "workflow.WorkflowCheckpoint") + proto.RegisterType((*Task)(nil), "workflow.Task") proto.RegisterEnum("workflow.WorkflowState", WorkflowState_name, WorkflowState_value) proto.RegisterEnum("workflow.TaskState", TaskState_name, TaskState_value) } @@ -271,35 +271,35 @@ func init() { func init() { proto.RegisterFile("workflow.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 473 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x53, 0xdd, 0x8a, 0xd3, 0x40, - 0x14, 0x36, 0x7f, 0xdb, 0xf4, 0xa4, 0x9b, 0x2d, 0x47, 0xc1, 0x58, 0x50, 0x62, 0x11, 0x8c, 0x05, - 0x7b, 0x51, 0x41, 0x44, 0xd9, 0x05, 0xf1, 0x07, 0xaf, 0xf6, 0x22, 0x5d, 0xf4, 0xb2, 0xcc, 0x36, - 0xb3, 0xeb, 0x90, 0xed, 0xcc, 0x32, 0x99, 0xec, 0xd2, 0x07, 0xf4, 0x15, 0x7c, 0x06, 0x1f, 0x43, - 0x66, 0x26, 0x49, 0x1b, 0x15, 0x61, 0xef, 0xce, 0xdf, 0xf7, 0xe5, 0xfb, 0xce, 0x9c, 0x40, 0x7c, - 0x2b, 0x64, 0x79, 0x71, 0x25, 0x6e, 0xe7, 0xd7, 0x52, 0x28, 0x81, 0x61, 0x9b, 0x4f, 0x7f, 0x39, - 0x10, 0x7e, 0x6b, 0x12, 0x44, 0xf0, 0xeb, 0x9a, 0x15, 0x89, 0x93, 0x3a, 0xd9, 0x30, 0x37, 0x31, - 0x3e, 0x85, 0xd1, 0x05, 0x59, 0x2b, 0x21, 0xb7, 0x2b, 0x4e, 0x36, 0x34, 0x71, 0x4d, 0x2f, 0x6a, - 0x6a, 0xa7, 0x64, 0x43, 0x35, 0xcc, 0xb4, 0x3c, 0x0b, 0xd3, 0x31, 0xbe, 0x84, 0xa0, 0x52, 0x44, - 0xd1, 0xc4, 0x4f, 0x9d, 0x2c, 0x5e, 0x3c, 0x9c, 0x77, 0x0a, 0xda, 0xaf, 0x2d, 0x75, 0x3b, 0xb7, - 0x53, 0x9a, 0xa2, 0x20, 0x8a, 0x24, 0x41, 0xea, 0x64, 0xa3, 0xdc, 0xc4, 0xf8, 0x00, 0x02, 0x2a, - 0xa5, 0x90, 0xc9, 0x81, 0xe1, 0xb5, 0x09, 0x3e, 0x06, 0xa8, 0x14, 0x91, 0x6a, 0xa5, 0xd8, 0x86, - 0x26, 0x83, 0xd4, 0xc9, 0xbc, 0x7c, 0x68, 0x2a, 0x67, 0x6c, 0x43, 0xf1, 0x11, 0x84, 0x94, 0x17, - 0xb6, 0x19, 0x9a, 0xe6, 0x80, 0xf2, 0x42, 0xb7, 0xa6, 0x3f, 0x1d, 0xf0, 0xcf, 0x48, 0x55, 0x62, - 0x0c, 0x6e, 0x67, 0xd2, 0x65, 0x05, 0xbe, 0x68, 0xb5, 0xba, 0x46, 0xeb, 0xfd, 0x9d, 0x56, 0x3d, - 0xde, 0xd3, 0x79, 0x02, 0x40, 0x94, 0x92, 0xec, 0xbc, 0x56, 0xb4, 0x4a, 0xbc, 0xd4, 0xcb, 0xa2, - 0xc5, 0x93, 0xfe, 0xfc, 0xfc, 0x7d, 0x37, 0xf0, 0x89, 0x2b, 0xb9, 0xcd, 0xf7, 0x10, 0x3b, 0x4f, - 0xfe, 0x9e, 0xa7, 0xc9, 0x31, 0x1c, 0xfd, 0x01, 0xc2, 0x31, 0x78, 0x25, 0xdd, 0x36, 0x22, 0x75, - 0xa8, 0xa1, 0x37, 0xe4, 0xaa, 0x6e, 0x5f, 0xc0, 0x26, 0x6f, 0xdd, 0x37, 0xce, 0xf4, 0x87, 0x0b, - 0xd8, 0x6e, 0xf5, 0xc3, 0x77, 0xba, 0x2e, 0xaf, 0x05, 0xe3, 0x4a, 0xbf, 0xdc, 0x5a, 0x14, 0x74, - 0x75, 0x43, 0x65, 0xc5, 0x04, 0x37, 0x5c, 0x41, 0x1e, 0xe9, 0xda, 0x57, 0x5b, 0xc2, 0x63, 0x08, - 0x14, 0xa9, 0xca, 0x2a, 0x71, 0x8d, 0x93, 0xe7, 0x7f, 0xbf, 0xd2, 0x8e, 0xcf, 0x98, 0x6b, 0x2c, - 0x59, 0x14, 0x7e, 0x86, 0xb0, 0xa2, 0x4a, 0x31, 0x7e, 0xd9, 0xee, 0x62, 0xf6, 0x5f, 0x86, 0x65, - 0x33, 0x6c, 0x49, 0x3a, 0xec, 0xe4, 0x0b, 0xc0, 0x8e, 0xfc, 0x1f, 0xd6, 0x9f, 0xed, 0x5b, 0x8f, - 0x16, 0x71, 0x7f, 0xe1, 0x7b, 0xab, 0x98, 0xbc, 0x83, 0xc3, 0xde, 0x47, 0xee, 0xb2, 0xc7, 0xd9, - 0x6b, 0x38, 0xec, 0x1d, 0x27, 0xc6, 0x00, 0xa7, 0x42, 0x2d, 0xf5, 0x71, 0xd1, 0x62, 0x7c, 0x0f, - 0x23, 0x18, 0xe4, 0x35, 0xe7, 0x8c, 0x5f, 0x8e, 0x1d, 0x0c, 0xc1, 0xff, 0x28, 0x38, 0x1d, 0xbb, - 0xb3, 0x13, 0x18, 0x76, 0x87, 0x82, 0x08, 0xb1, 0x4e, 0x7a, 0xb8, 0x23, 0x88, 0x8c, 0xd0, 0x0e, - 0x3b, 0x82, 0x50, 0x17, 0x2c, 0xfe, 0xfc, 0xc0, 0xfc, 0x94, 0xaf, 0x7e, 0x07, 0x00, 0x00, 0xff, - 0xff, 0x2e, 0xc7, 0x72, 0x13, 0xa6, 0x03, 0x00, 0x00, + // 477 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x53, 0xdb, 0x6e, 0xd3, 0x40, + 0x10, 0x65, 0x7d, 0x69, 0x9c, 0x71, 0xea, 0x46, 0x43, 0x25, 0x4c, 0x24, 0x90, 0x89, 0x90, 0x30, + 0x91, 0xc8, 0x43, 0x90, 0x10, 0x02, 0xb5, 0x12, 0xe2, 0x22, 0x9e, 0xfa, 0xe0, 0x54, 0xf0, 0x18, + 0x6d, 0xe3, 0x6d, 0x59, 0xa5, 0xd9, 0xad, 0xd6, 0xeb, 0x56, 0xf9, 0x40, 0x7e, 0x81, 0x6f, 0xe0, + 0x33, 0xd0, 0xee, 0xc6, 0x4e, 0x0c, 0x08, 0x89, 0xb7, 0x99, 0x39, 0x73, 0xce, 0x78, 0xf6, 0x8c, + 0x21, 0xb9, 0x93, 0x6a, 0x75, 0x79, 0x2d, 0xef, 0xa6, 0x37, 0x4a, 0x6a, 0x89, 0x51, 0x93, 0x8f, + 0x7f, 0x12, 0x88, 0xbe, 0x6e, 0x13, 0x44, 0x08, 0xea, 0x9a, 0x97, 0x29, 0xc9, 0x48, 0xde, 0x2f, + 0x6c, 0x8c, 0x4f, 0x60, 0x70, 0x49, 0x97, 0x5a, 0xaa, 0xcd, 0x42, 0xd0, 0x35, 0x4b, 0x3d, 0x8b, + 0xc5, 0xdb, 0xda, 0x19, 0x5d, 0x33, 0x43, 0xb3, 0x90, 0xef, 0x68, 0x26, 0xc6, 0x17, 0x10, 0x56, + 0x9a, 0x6a, 0x96, 0x06, 0x19, 0xc9, 0x93, 0xd9, 0x83, 0x69, 0xfb, 0x05, 0xcd, 0xb4, 0xb9, 0x81, + 0x0b, 0xd7, 0x65, 0x24, 0x4a, 0xaa, 0x69, 0x1a, 0x66, 0x24, 0x1f, 0x14, 0x36, 0xc6, 0x63, 0x08, + 0x99, 0x52, 0x52, 0xa5, 0x07, 0x56, 0xd7, 0x25, 0xf8, 0x08, 0xa0, 0xd2, 0x54, 0xe9, 0x85, 0xe6, + 0x6b, 0x96, 0xf6, 0x32, 0x92, 0xfb, 0x45, 0xdf, 0x56, 0xce, 0xf9, 0x9a, 0xe1, 0x43, 0x88, 0x98, + 0x28, 0x1d, 0x18, 0x59, 0xb0, 0xc7, 0x44, 0x69, 0xa0, 0xf1, 0x77, 0x0f, 0xb0, 0x19, 0xfe, 0xfe, + 0x1b, 0x5b, 0xae, 0x6e, 0x24, 0x17, 0xda, 0x2c, 0xb8, 0x94, 0x25, 0x5b, 0xdc, 0x32, 0x55, 0x71, + 0x29, 0xec, 0xf2, 0x61, 0x11, 0x9b, 0xda, 0x17, 0x57, 0xc2, 0x13, 0x08, 0x35, 0xad, 0x56, 0x55, + 0xea, 0x65, 0x7e, 0x1e, 0xcf, 0x9e, 0xfd, 0xb9, 0xcc, 0x4e, 0x6f, 0x7a, 0x6e, 0x3a, 0x3f, 0x0a, + 0xad, 0x36, 0x85, 0x63, 0xe1, 0x27, 0x88, 0x2a, 0xa6, 0x35, 0x17, 0x57, 0x55, 0xea, 0x5b, 0x85, + 0xc9, 0x3f, 0x15, 0xe6, 0xdb, 0x66, 0x27, 0xd2, 0x72, 0x47, 0x9f, 0x01, 0x76, 0xe2, 0x38, 0x04, + 0x7f, 0xc5, 0x36, 0x5b, 0xaf, 0x4c, 0x88, 0x4f, 0x21, 0xbc, 0xa5, 0xd7, 0xb5, 0xf3, 0x28, 0x9e, + 0x25, 0xbb, 0x21, 0x86, 0x56, 0x38, 0xf0, 0x8d, 0xf7, 0x9a, 0x8c, 0xde, 0xc2, 0x61, 0x67, 0xc8, + 0x5f, 0xc4, 0x8e, 0xf7, 0xc5, 0xfa, 0x7b, 0xe4, 0xf1, 0x0f, 0x02, 0x81, 0x11, 0xc4, 0x04, 0xbc, + 0xf6, 0x58, 0x3c, 0x5e, 0xe2, 0xf3, 0xc6, 0x73, 0xcf, 0x7a, 0x7e, 0xbf, 0x3b, 0xbf, 0xe3, 0xf7, + 0x29, 0x00, 0xd5, 0x5a, 0xf1, 0x8b, 0x5a, 0xb3, 0xe6, 0x51, 0x1e, 0x77, 0xfb, 0xa7, 0xef, 0xda, + 0x06, 0xf7, 0x10, 0x7b, 0x8c, 0xdd, 0x6d, 0x04, 0x7b, 0xb7, 0x31, 0x3a, 0x81, 0xa3, 0xdf, 0x48, + 0xff, 0xb3, 0xd8, 0xe4, 0x15, 0x1c, 0x76, 0x8e, 0x13, 0x13, 0x80, 0x33, 0xa9, 0xe7, 0xe6, 0xb8, + 0x58, 0x39, 0xbc, 0x87, 0x31, 0xf4, 0x8a, 0x5a, 0x08, 0x2e, 0xae, 0x86, 0x04, 0x23, 0x08, 0x3e, + 0x48, 0xc1, 0x86, 0xde, 0xe4, 0x14, 0xfa, 0xed, 0x82, 0x88, 0x90, 0x98, 0xa4, 0xc3, 0x3b, 0x82, + 0xd8, 0x3a, 0xd0, 0x72, 0x07, 0x10, 0x99, 0x82, 0xe3, 0x5f, 0x1c, 0xd8, 0x9f, 0xf2, 0xe5, 0xaf, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x72, 0x5c, 0x6d, 0x7f, 0xa6, 0x03, 0x00, 0x00, } diff --git a/go/vt/vtctld/workflow.go b/go/vt/vtctld/workflow.go index 28e3665b9aa..d74921b1626 100644 --- a/go/vt/vtctld/workflow.go +++ b/go/vt/vtctld/workflow.go @@ -13,6 +13,7 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vtctl" "github.com/youtube/vitess/go/vt/workflow" + "github.com/youtube/vitess/go/vt/workflow/resharding" "github.com/youtube/vitess/go/vt/workflow/topovalidator" ) @@ -40,7 +41,7 @@ func initWorkflowManager(ts topo.Server) { schemaswap.RegisterWorkflowFactory() // Register the Horizontal Resharding workflow. - // resharding.Register() + resharding.Register() // Unregister the blacklisted workflows. for _, name := range workflowManagerDisable { diff --git a/go/vt/workflow/node.go b/go/vt/workflow/node.go index 7f06fee31af..184aee1b770 100644 --- a/go/vt/workflow/node.go +++ b/go/vt/workflow/node.go @@ -102,7 +102,7 @@ type ActionListener interface { // the Angular 2 web app. type Node struct { // nodeManager is the NodeManager handling this Node. - // It is set by AddRootNode, and propagated by AddChild. + // It is set by AddRootNode, and propagated by AddChildren. // Any change to this node must take the Manager's lock. nodeManager *NodeManager diff --git a/go/vt/workflow/resharding/checkpoint.go b/go/vt/workflow/resharding/checkpoint.go index 64498ac83fa..0634ed583b9 100644 --- a/go/vt/workflow/resharding/checkpoint.go +++ b/go/vt/workflow/resharding/checkpoint.go @@ -30,11 +30,12 @@ func NewCheckpointWriter(ts topo.Server, checkpoint *workflowpb.WorkflowCheckpoi } // UpdateTask updates the status and checkpointing the update. -func (c *CheckpointWriter) UpdateTask(taskID string, status workflowpb.TaskState) error { +func (c *CheckpointWriter) UpdateTask(taskID string, status workflowpb.TaskState, err string) error { c.checkpointMu.Lock() defer c.checkpointMu.Unlock() c.checkpoint.Tasks[taskID].State = status + c.checkpoint.Tasks[taskID].Error = err return c.Save() } diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow.go b/go/vt/workflow/resharding/horizontal_resharding_workflow.go index c73f07a96c6..219c1c0b334 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow.go @@ -6,29 +6,25 @@ package resharding // TO DO: it can be used to save checkpointer import ( - "encoding/json" "flag" "fmt" "strings" log "github.com/golang/glog" + "github.com/golang/protobuf/proto" "golang.org/x/net/context" - "github.com/youtube/vitess/go/vt/automation" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/topotools" "github.com/youtube/vitess/go/vt/workflow" "github.com/youtube/vitess/go/vt/wrangler" - topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" ) -// TODO(yipeiw): the order of exported and unexported variables? const ( codeVersion = 1 @@ -60,47 +56,16 @@ type HorizontalReshardingWorkflow struct { logger *logutil.MemoryLogger // rootUINode is the root node representing the workflow in the UI. - rootUINode *workflow.Node - copySchemaUINode *workflow.Node - splitCloneUINode *workflow.Node - splitDiffUINode *workflow.Node - migrateUINode *workflow.Node + rootUINode *workflow.Node + copySchemaUINode *workflow.Node + splitCloneUINode *workflow.Node + waitFilteredReplicationUINode *workflow.Node + splitDiffUINode *workflow.Node + migrateUINode *workflow.Node + taskUINodeMap map[string]*workflow.Node - keyspace string - vtworkers []string - - data []*PerShardHorizontalResharding checkpoint *workflowpb.WorkflowCheckpoint checkpointWriter *CheckpointWriter - - // Each phase has its own ParallelRunner object. - copyRunner *ParallelRunner - cloneRunner *ParallelRunner - waitRunner *ParallelRunner - diffRunner *ParallelRunner - migrateRunner *ParallelRunner -} - -// PerShardHorizontalReshardingData is the data structure to store the resharding arguments for a single source shard. -type PerShardHorizontalReshardingData struct { - keyspace string - sourceShard string - destinationShards []string - vtworker string -} - -// PerShardHorizontalResharding contains the data for horizontal resharding from a single source shard. -type PerShardHorizontalResharding struct { - PerShardHorizontalReshardingData - - parent *HorizontalReshardingWorkflow - - copySchemaShardUINode *workflow.Node - splitCloneShardUINode *workflow.Node - splitDiffShardUINode *workflow.Node - migrateShardUINode *workflow.Node - - shardUILogger *logutil.MemoryLogger } // Run executes the horizontal resharding process and updates the UI message. @@ -110,11 +75,10 @@ func (hw *HorizontalReshardingWorkflow) Run(ctx context.Context, manager *workfl hw.topoServer = manager.TopoServer() hw.wr = wrangler.New(logutil.NewConsoleLogger(), manager.TopoServer(), tmclient.NewTabletManagerClient()) hw.wi = wi - - // TODO(yipeiw): separate the source shards, destination shards finding code and other initialization code for the convenience of unit test. - hw.createSubWorkflows() - hw.setUIMessage("Horizontal resharding: workflow created successfully.") - + hw.checkpointWriter = NewCheckpointWriter(hw.topoServer, hw.checkpoint, hw.wi) + if err := hw.checkpointWriter.Save(); err != nil { + return err + } hw.rootUINode.Display = workflow.NodeDisplayDeterminate hw.rootUINode.BroadcastChanges(true /* updateChildren */) @@ -122,217 +86,62 @@ func (hw *HorizontalReshardingWorkflow) Run(ctx context.Context, manager *workfl hw.setUIMessage(fmt.Sprintf("Horizontal Resharding failed: %v", err)) return err } - - hw.setUIMessage(fmt.Sprintf("Horizontal Resharding on %v: finished sucessfully.", hw.keyspace)) - + hw.setUIMessage(fmt.Sprintf("Horizontal Resharding is finished sucessfully.")) return nil } func (hw *HorizontalReshardingWorkflow) runWorkflow() error { - hw.checkpoint = hw.InitTasks() - hw.checkpointWriter = NewCheckpointWriter(hw.topoServer, hw.checkpoint, hw.wi) - hw.checkpointWriter.Save() - copyTasks := hw.GenerateTasks(hw.checkpoint, CopySchemaName) - if err := hw.copyRunner.Run(copyTasks, hw.runCopySchema, hw.checkpointWriter, PARALLEL); err != nil { + copyRunner := NewParallelRunner(hw.ctx, hw.checkpointWriter, hw.taskUINodeMap, copyTasks) + if err := copyRunner.Run(hw.runCopySchema, PARALLEL); err != nil { return err } cloneTasks := hw.GenerateTasks(hw.checkpoint, SplitCloneName) - if err := hw.cloneRunner.Run(cloneTasks, hw.runSplitClone, hw.checkpointWriter, PARALLEL); err != nil { + cloneRunner := NewParallelRunner(hw.ctx, hw.checkpointWriter, hw.taskUINodeMap, cloneTasks) + if err := cloneRunner.Run(hw.runSplitClone, PARALLEL); err != nil { return err } waitTasks := hw.GenerateTasks(hw.checkpoint, WaitFilteredReplicationName) - if err := hw.waitRunner.Run(waitTasks, hw.runWaitFilteredReplication, hw.checkpointWriter, PARALLEL); err != nil { + waitRunner := NewParallelRunner(hw.ctx, hw.checkpointWriter, hw.taskUINodeMap, waitTasks) + if err := waitRunner.Run(hw.runWaitFilteredReplication, PARALLEL); err != nil { return err } diffTasks := hw.GenerateTasks(hw.checkpoint, SplitDiffName) + diffRunner := NewParallelRunner(hw.ctx, hw.checkpointWriter, hw.taskUINodeMap, diffTasks) // SplitDiff requires the vtworker only work for one destination shard at a time. // To simplify the concurrency control, we run all the SplitDiff task sequentially. - if err := hw.diffRunner.Run(diffTasks, hw.runSplitDiff, hw.checkpointWriter, SEQUENTIAL); err != nil { + if err := diffRunner.Run(hw.runSplitDiff, SEQUENTIAL); err != nil { return err } migrateTasks := hw.GenerateTasks(hw.checkpoint, MigrateName) - if err := hw.migrateRunner.Run(migrateTasks, hw.runMigrate, hw.checkpointWriter, SEQUENTIAL); err != nil { + migrateRunner := NewParallelRunner(hw.ctx, hw.checkpointWriter, hw.taskUINodeMap, migrateTasks) + if err := migrateRunner.Run(hw.runMigrate, SEQUENTIAL); err != nil { return err } return nil } -// createSubWorkflows creates a per source shard horizontal resharding workflow for each source shard in the keyspace. -func (hw *HorizontalReshardingWorkflow) createSubWorkflows() error { - overlappingShards, err := topotools.FindOverlappingShards(hw.ctx, hw.topoServer, hw.keyspace) - if err != nil { - hw.logger.Infof("Horizontal Resharding: createSubWorkflows error in finding overlapping shards: %v.", err) - return err - } - - for i, os := range overlappingShards { - var sourceShard *topo.ShardInfo - var destinationShards []*topo.ShardInfo - // Judge which side is source shard by checking the number of servedTypes. - if len(os.Left[0].ServedTypes) > 0 { - sourceShard = os.Left[0] - destinationShards = os.Right - } else { - sourceShard = os.Right[0] - destinationShards = os.Left - } - if err := hw.createWorkflowPerShard(sourceShard, destinationShards, hw.vtworkers[i]); err != nil { - return err - } - } - return nil -} - -func (hw *HorizontalReshardingWorkflow) createWorkflowPerShard(sourceShard *topo.ShardInfo, destinationShards []*topo.ShardInfo, vtworker string) error { - sourceShardName := sourceShard.ShardName() - var destShardNames []string - for _, s := range destinationShards { - destShardNames = append(destShardNames, s.ShardName()) - } - - perShard := &PerShardHorizontalResharding{ - PerShardHorizontalReshardingData: PerShardHorizontalReshardingData{ - keyspace: hw.keyspace, - sourceShard: sourceShardName, - destinationShards: destShardNames, - vtworker: vtworker, - }, - copySchemaShardUINode: &workflow.Node{ - Name: "Shard " + sourceShardName, - PathName: "shard_" + sourceShardName, - }, - splitCloneShardUINode: &workflow.Node{ - Name: "Shard " + sourceShardName, - PathName: "shard_" + sourceShardName, - }, - splitDiffShardUINode: &workflow.Node{ - Name: "Shard " + sourceShardName, - PathName: "shard_" + sourceShardName, - }, - migrateShardUINode: &workflow.Node{ - Name: "Shard " + sourceShardName, - PathName: "shard_" + sourceShardName, - }, - shardUILogger: logutil.NewMemoryLogger(), - } - perShard.parent = hw - - hw.copySchemaUINode.Children = append(hw.copySchemaUINode.Children, perShard.copySchemaShardUINode) - hw.splitCloneUINode.Children = append(hw.splitCloneUINode.Children, perShard.splitCloneShardUINode) - hw.splitDiffUINode.Children = append(hw.splitDiffUINode.Children, perShard.splitDiffShardUINode) - hw.migrateUINode.Children = append(hw.migrateUINode.Children, perShard.migrateShardUINode) - - hw.data = append(hw.data, perShard) - return nil -} - -// runCopySchemaPerShard runs CopySchema for a destination shard. -// There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. -func (hw *HorizontalReshardingWorkflow) runCopySchema(attr map[string]string) error { - s := attr["source_shard"] - d := attr["destination_shard"] - err := hw.wr.CopySchemaShardFromShard(hw.ctx, nil /* tableArray*/, nil /* excludeTableArray */, true, /*includeViews*/ - hw.keyspace, s, hw.keyspace, d, wrangler.DefaultWaitSlaveTimeout) - if err != nil { - hw.logger.Infof("Horizontal Resharding: error in CopySchemaShardFromShard from %s to %s: %v.", s, d, err) - } - hw.logger.Infof("Horizontal Resharding: CopySchemaShardFromShard from %s to %s is finished.", s, d) - return err -} - -// runSplitClonePerShard runs SplitClone for a source shard. -// There should be #sourceshards parameters, while each param includes 1 sourceshard and its destshards. The destShards are useless here. -func (hw *HorizontalReshardingWorkflow) runSplitClone(attr map[string]string) error { - s := attr["source_shard"] - worker := attr["vtworker"] - - sourceKeyspaceShard := topoproto.KeyspaceShardString(hw.keyspace, s) - // Reset the vtworker to avoid error if vtworker command has been called elsewhere. - // This is because vtworker class doesn't cleanup the environment after execution. - automation.ExecuteVtworker(hw.ctx, worker, []string{"Reset"}) - // The flag min_healthy_rdonly_tablets is set to 1 (default value is 2). - // Therefore, we can reuse the normal end to end test setting, which has only 1 rdonly tablet. - // TODO(yipeiw): Add min_healthy_rdonly_tablets as an input argument in UI. - args := []string{"SplitClone", "--min_healthy_rdonly_tablets=1", sourceKeyspaceShard} - if _, err := automation.ExecuteVtworker(hw.ctx, worker, args); err != nil { - hw.logger.Infof("Horizontal resharding: error in SplitClone in keyspace %s: %v.", hw.keyspace, err) - return err - } - hw.logger.Infof("Horizontal resharding: SplitClone is finished.") - - return nil -} - -// runWaitFilteredReplication runs WaitForFilteredReplication for a destination shard. -// There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. -func (hw *HorizontalReshardingWorkflow) runWaitFilteredReplication(attr map[string]string) error { - d := attr["destination_shard"] - if err := hw.wr.WaitForFilteredReplication(hw.ctx, hw.keyspace, d, wrangler.DefaultWaitForFilteredReplicationMaxDelay); err != nil { - hw.logger.Infof("Horizontal Resharding: error in WaitForFilteredReplication: %v.", err) - return err - } - hw.logger.Infof("Horizontal Resharding:WaitForFilteredReplication is finished on " + d) - return nil -} - -// runSplitDiffPerShard runs SplitDiff for a source shard. -// There should be #sourceshards parameters, while each param includes 1 sourceshard and its destshards. -func (hw *HorizontalReshardingWorkflow) runSplitDiff(attr map[string]string) error { - d := attr["destination_shard"] - worker := attr["vtworker"] - - automation.ExecuteVtworker(hw.ctx, worker, []string{"Reset"}) - args := []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", topoproto.KeyspaceShardString(hw.keyspace, d)} - _, err := automation.ExecuteVtworker(hw.ctx, worker, args) - if err != nil { - return err - } - - hw.logger.Infof("Horizontal resharding: SplitDiff is finished.") - return nil -} - -// runMigratePerShard runs the migration sequentially among all source shards. -// There should be 1 parameter, which includes all source shards to be migrated. -func (hw *HorizontalReshardingWorkflow) runMigrate(attr map[string]string) error { - s := attr["source_shard"] - sourceKeyspaceShard := topoproto.KeyspaceShardString(hw.keyspace, s) - servedTypeParams := []topodatapb.TabletType{topodatapb.TabletType_RDONLY, - topodatapb.TabletType_REPLICA, - topodatapb.TabletType_MASTER} - for _, servedType := range servedTypeParams { - err := hw.wr.MigrateServedTypes(hw.ctx, hw.keyspace, s, nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime) - if err != nil { - hw.logger.Infof("Horizontal Resharding: error in MigrateServedTypes on servedType %s: %v.", servedType, err) - return err - } - hw.logger.Infof("Horizontal Resharding: MigrateServedTypes is finished on tablet %s serve type %s.", sourceKeyspaceShard, servedType) - } - return nil -} - func (hw *HorizontalReshardingWorkflow) setUIMessage(message string) { - log.Infof("Horizontal resharding on keyspace %v: %v.", hw.keyspace, message) + log.Infof("Horizontal resharding: %v.", message) hw.rootUINode.Log = hw.logger.String() hw.rootUINode.Message = message hw.rootUINode.BroadcastChanges(false /* updateChildren */) } -// WorkflowFactory is the factory to register the HorizontalReshard Workflow. -type WorkflowFactory struct{} - // Register registers horizontal_resharding as a valid factory in the workflow framework. func Register() { - workflow.Register(horizontalReshardingFactoryName, &WorkflowFactory{}) + workflow.Register(horizontalReshardingFactoryName, &HorizontalReshardingWorkflowFactory{}) } +// HorizontalReshardingWorkflowFactory is the factory to register the HorizontalReshard Workflow. +type HorizontalReshardingWorkflowFactory struct{} + // Init is part of the workflow.Factory interface. -func (*WorkflowFactory) Init(workflowProto *workflowpb.Workflow, args []string) error { +func (*HorizontalReshardingWorkflowFactory) Init(workflowProto *workflowpb.Workflow, args []string) error { subFlags := flag.NewFlagSet(horizontalReshardingFactoryName, flag.ContinueOnError) keyspace := subFlags.String("keyspace", "", "Name of keyspace to perform horizontal resharding") vtworkersStr := subFlags.String("vtworkers", "", "A comma-separated list of vtworker addresses") @@ -346,12 +155,15 @@ func (*WorkflowFactory) Init(workflowProto *workflowpb.Workflow, args []string) vtworkers := strings.Split(*vtworkersStr, ",") workflowProto.Name = fmt.Sprintf("Horizontal resharding on keyspace %s", *keyspace) - data := &HorizontalReshardingData{ - Keyspace: *keyspace, - Vtworkers: vtworkers, + + ts := topo.Open() + defer ts.Close() + workflowCheckpoint, err := initCheckpoint(*keyspace, vtworkers, ts) + if err != nil { + return err } - var err error - workflowProto.Data, err = json.Marshal(data) + + workflowProto.Data, err = proto.Marshal(workflowCheckpoint) if err != nil { return err } @@ -359,28 +171,32 @@ func (*WorkflowFactory) Init(workflowProto *workflowpb.Workflow, args []string) } // Instantiate is part of the workflow.Factory interface. -func (*WorkflowFactory) Instantiate(workflowProto *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { +func (*HorizontalReshardingWorkflowFactory) Instantiate(workflowProto *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { rootNode.Message = "This is a workflow to execute horizontal resharding automatically." - data := &HorizontalReshardingData{} - if err := json.Unmarshal(workflowProto.Data, data); err != nil { + + workflowCheckpoint := &workflowpb.WorkflowCheckpoint{} + if err := proto.Unmarshal(workflowProto.Data, workflowCheckpoint); err != nil { return nil, err } hw := &HorizontalReshardingWorkflow{ - keyspace: data.Keyspace, - vtworkers: data.Vtworkers, + checkpoint: workflowCheckpoint, rootUINode: rootNode, copySchemaUINode: &workflow.Node{ Name: "CopySchemaShard", - PathName: "copy_schema", + PathName: CopySchemaName, }, splitCloneUINode: &workflow.Node{ Name: "SplitClone", PathName: "clone", }, + waitFilteredReplicationUINode: &workflow.Node{ + Name: "WaitForFilteredReplication", + PathName: WaitFilteredReplicationName, + }, splitDiffUINode: &workflow.Node{ Name: "SplitDiff", - PathName: "diff", + PathName: SplitDiffName, }, migrateUINode: &workflow.Node{ Name: "MigrateServedType", @@ -391,8 +207,77 @@ func (*WorkflowFactory) Instantiate(workflowProto *workflowpb.Workflow, rootNode hw.rootUINode.Children = []*workflow.Node{ hw.copySchemaUINode, hw.splitCloneUINode, + hw.waitFilteredReplicationUINode, hw.splitDiffUINode, hw.migrateUINode, } + + taskNodeMap := make(map[string]*workflow.Node) + for _, d := range strings.Split(hw.checkpoint.Settings["destination_shards"], ",") { + createUINode(CopySchemaName, "dest", d, hw.copySchemaUINode, taskNodeMap) + createUINode(WaitFilteredReplicationName, "dest", d, hw.waitFilteredReplicationUINode, taskNodeMap) + createUINode(SplitDiffName, "dest", d, hw.splitDiffUINode, taskNodeMap) + } + for _, s := range strings.Split(hw.checkpoint.Settings["source_shards"], ",") { + createUINode(SplitCloneName, "source", s, hw.splitCloneUINode, taskNodeMap) + createUINode(MigrateName, "source", s, hw.migrateUINode, taskNodeMap) + } return hw, nil } + +func createUINode(name, shardType, shardName string, rootNode *workflow.Node, nodeMap map[string]*workflow.Node) { + taskID := createTaskID(name, shardType, shardName) + taskUINode := &workflow.Node{ + Name: "Shard " + shardName, + PathName: taskID, + } + rootNode.Children = append(rootNode.Children, taskUINode) + nodeMap[taskID] = taskUINode +} + +// createSubWorkflows creates a per source shard horizontal resharding workflow for each source shard in the keyspace. +func initCheckpoint(keyspace string, vtworkers []string, ts topo.Server) (*workflowpb.WorkflowCheckpoint, error) { + overlappingShards, err := topotools.FindOverlappingShards(context.Background(), ts, keyspace) + if err != nil { + return nil, err + } + + taskMap := make(map[string]*workflowpb.Task) + var sourceShardList, destinationShardList []string + + for i, os := range overlappingShards { + var sourceShard *topo.ShardInfo + var destinationShards []*topo.ShardInfo + // Judge which side is source shard by checking the number of servedTypes. + if len(os.Left[0].ServedTypes) > 0 { + sourceShard = os.Left[0] + destinationShards = os.Right + } else { + sourceShard = os.Right[0] + destinationShards = os.Left + } + + s := sourceShard.ShardName() + sourceShardList = append(sourceShardList, s) + worker := vtworkers[i] + for _, ds := range destinationShards { + d := ds.ShardName() + destinationShardList = append(destinationShardList, d) + + updatePerDestinationTask(keyspace, s, d, worker, CopySchemaName, taskMap) + updatePerDestinationTask(keyspace, s, d, worker, WaitFilteredReplicationName, taskMap) + updatePerDestinationTask(keyspace, s, d, worker, SplitDiffName, taskMap) + } + updatePerSourceTask(keyspace, s, worker, SplitCloneName, taskMap) + updatePerSourceTask(keyspace, s, worker, MigrateName, taskMap) + } + + return &workflowpb.WorkflowCheckpoint{ + CodeVersion: codeVersion, + Tasks: taskMap, + Settings: map[string]string{ + "source_shards": strings.Join(sourceShardList, ","), + "destination_shards": strings.Join(destinationShardList, ","), + }, + }, nil +} diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go index fe941694ea7..ed0620db923 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go @@ -9,6 +9,7 @@ import ( "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/worker/fakevtworkerclient" "github.com/youtube/vitess/go/vt/worker/vtworkerclient" + "github.com/youtube/vitess/go/vt/workflow" "github.com/youtube/vitess/go/vt/wrangler" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -41,7 +42,7 @@ func TestHorizontalResharding(t *testing.T) { // Checking all tasks are Done. for _, task := range hw.checkpoint.Tasks { - if task.State != workflowpb.TaskState_TaskDone { + if task.State != workflowpb.TaskState_TaskDone || task.Error != "" { t.Fatalf("task is not done: Id: %v, State: %v, Attributes:%v", task.Id, task.State, task.Attributes) } } @@ -53,25 +54,38 @@ func setUp(t *testing.T, ctrl *gomock.Controller) *HorizontalReshardingWorkflow ts := memorytopo.NewServer("cell") // Create fake wrangler using mock interface, which is used for the unit test in steps CopySchema and MigratedServedType. mockWranglerInterface := NewMockReshardingWrangler(ctrl) + // Create the checkpoint for workflow. + keyspace := "test_keyspace" + vtworkers := []string{"localhost:15032"} + + taskMap := make(map[string]*workflowpb.Task) + source := "0" + destinations := []string{"-80", "80-"} + worker := vtworkers[0] + updatePerSourceTask(keyspace, source, worker, SplitCloneName, taskMap) + updatePerSourceTask(keyspace, source, worker, MigrateName, taskMap) + + for _, d := range destinations { + updatePerDestinationTask(keyspace, source, d, worker, CopySchemaName, taskMap) + updatePerDestinationTask(keyspace, source, d, worker, WaitFilteredReplicationName, taskMap) + updatePerDestinationTask(keyspace, source, d, worker, SplitDiffName, taskMap) + } // Create the workflow (ignore the node construction since we don't test the front-end part in this unit test). hw := &HorizontalReshardingWorkflow{ - keyspace: "test_keyspace", - vtworkers: []string{"localhost:15032"}, wr: mockWranglerInterface, topoServer: ts, logger: logutil.NewMemoryLogger(), - } - perShard := &PerShardHorizontalResharding{ - parent: hw, - PerShardHorizontalReshardingData: PerShardHorizontalReshardingData{ - keyspace: "test_keyspace", - sourceShard: "0", - destinationShards: []string{"-80", "80-"}, - vtworker: "localhost:15032", + checkpoint: &workflowpb.WorkflowCheckpoint{ + CodeVersion: codeVersion, + Tasks: taskMap, + Settings: map[string]string{ + "source_shards": "0", + "destination_shards": "-80,80-", + }, }, + taskUINodeMap: make(map[string]*workflow.Node), } - hw.data = append(hw.data, perShard) // Create the initial workflowpb.Workflow object. w := &workflowpb.Workflow{ Uuid: "testworkflow0000", @@ -84,6 +98,7 @@ func setUp(t *testing.T, ctrl *gomock.Controller) *HorizontalReshardingWorkflow t.Errorf("%s: Horizontal resharding workflow fails in creating workflowInfo", err) return nil } + hw.checkpointWriter = NewCheckpointWriter(hw.topoServer, hw.checkpoint, hw.wi) // Set the expected behaviors for mock wrangler. mockWranglerInterface.EXPECT().CopySchemaShardFromShard( diff --git a/go/vt/workflow/resharding/parallel_runner.go b/go/vt/workflow/resharding/parallel_runner.go index 3161763db25..6bbe962c761 100644 --- a/go/vt/workflow/resharding/parallel_runner.go +++ b/go/vt/workflow/resharding/parallel_runner.go @@ -2,10 +2,13 @@ package resharding import ( "fmt" + "path" + "sync" log "github.com/golang/glog" + "golang.org/x/net/context" - "github.com/youtube/vitess/go/vt/concurrency" + "github.com/youtube/vitess/go/vt/workflow" workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" ) @@ -22,50 +25,106 @@ const ( // ParallelRunner is used to control executing tasks concurrently. // Each phase has its own ParallelRunner object. type ParallelRunner struct { - // TODO(yipeiw) : ParallelRunner should have fields for per-task controllable actions. + ctx context.Context + + checkpointWriter *CheckpointWriter + taskUINodes map[string]*workflow.Node + tasks []*workflowpb.Task + + // mu is used to protect the retryActionRegistery. + mu sync.Mutex + // retryAtionRegistry stores the data for all actions. Each task can retrieve its control object through task ID. + retryActionRegistery map[string]*RetryController +} + +func NewParallelRunner(ctx context.Context, cp *CheckpointWriter, taskUINodes map[string]*workflow.Node, tasks []*workflowpb.Task) *ParallelRunner { + p := &ParallelRunner{ + ctx: ctx, + checkpointWriter: cp, + taskUINodes: taskUINodes, + tasks: tasks, + } + p.retryActionRegistery = make(map[string]*RetryController) + return p } // Run is the entry point for controling task executions. // tasks should be a copy of tasks with the expected execution order, the status of task should be -// both updated in this copy and the original one (checkpointer.Update does this). This is to avoid -// data racing situation. -func (p *ParallelRunner) Run(tasks []*workflowpb.Task, executeFunc func(map[string]string) error, cp *CheckpointWriter, concurrencyLevel level) error { +// both updated in this copy and the original one (checkpointer.UpdateTask does this). This is used +// to avoid data racing situation. +func (p *ParallelRunner) Run(executeFunc func(map[string]string) error, concurrencyLevel level) error { var parallelNum int // default value is 0. The task will not run in this case. switch concurrencyLevel { case SEQUENTIAL: parallelNum = 1 case PARALLEL: - parallelNum = len(tasks) + parallelNum = len(p.tasks) default: panic(fmt.Sprintf("BUG: Invalid concurrency level: %v", concurrencyLevel)) } - // TODO(yipeiw): Support retry, restart, pause actions. Wrap the execution to interleave with actions. // sem is a channel used to control the level of concurrency. sem := make(chan bool, parallelNum) - var ec concurrency.AllErrorRecorder - for _, task := range tasks { - // TODO(yipeiw): Add checking logics to support retry, pause, restart actions when lauching tasks. - if task.State == workflowpb.TaskState_TaskDone { + for _, task := range p.tasks { + if task.State == workflowpb.TaskState_TaskDone && task.Error == "" { continue } sem <- true go func(t *workflowpb.Task) { defer func() { <-sem }() - status := workflowpb.TaskState_TaskDone - if err := executeFunc(t.Attributes); err != nil { - status = workflowpb.TaskState_TaskNotStarted - t.Error = err.Error() - ec.RecordError(err) - } - t.State = status - // Only log the error passage rather then propograting it through ErrorRecorder. The reason is that error message in - // ErrorRecorder will leads to stop of the workflow, which is unexpected if only checkpointing fails. - // However, the checkpointing failure right after initializing the tasks should lead to a stop of the workflow. - if err := cp.UpdateTask(t.Id, status); err != nil { - log.Errorf("%v", err) + taskID := t.Id + for { + err := executeFunc(t.Attributes) + t.State = workflowpb.TaskState_TaskDone + if err != nil { + t.Error = err.Error() + } + + if updateErr := p.checkpointWriter.UpdateTask(taskID, workflowpb.TaskState_TaskDone, t.Error); updateErr != nil { + // Only logging the error rather then propograting it through ErrorRecorder. Error message in + // ErrorRecorder will lead to the stop of the workflow, which is unexpected if only checkpointing fails. + // If the checkpointing fails during initialization, we should stop the workflow. + log.Errorf("%v", updateErr) + } + + if err == nil { + t.Error = "" + return + } + + // If task fails, the retry action is enabled. + n, ok := p.taskUINodes[taskID] + if !ok { + log.Errorf("UI node not found for task %v", taskID) + return + } + + retryAction := &workflow.Action{ + Name: "Retry", + State: workflow.ActionStateEnabled, + Style: workflow.ActionStyleWaiting, + } + n.Actions = []*workflow.Action{retryAction} + n.Listener = p + + p.mu.Lock() + p.retryActionRegistery[taskID] = &RetryController{ + node: n, + retryChannel: make(chan struct{}), + } + p.mu.Unlock() + n.BroadcastChanges(false /* updateChildren */) + + // Block the task execution until the retry action is triggered or the job is canceled. + select { + case <-p.retryActionRegistery[taskID].retryChannel: + continue + case <-p.ctx.Done(): + p.retryActionRegistery = nil + return + } } }(task) } @@ -73,5 +132,31 @@ func (p *ParallelRunner) Run(tasks []*workflowpb.Task, executeFunc func(map[stri for i := 0; i < parallelNum; i++ { sem <- true } - return ec.Error() + // TODO: collect error message from tasks.Error instead, s.t. if the task is retried, we can update the error + return nil +} + +// Action handles the retry action. It implements the interface ActionListener. +func (p *ParallelRunner) Action(ctx context.Context, pathName, name string) error { + p.mu.Lock() + defer p.mu.Unlock() + + actionID := getTaskID(pathName) + c, ok := p.retryActionRegistery[actionID] + if !ok { + return fmt.Errorf("Unknown node path for the action: %v", pathName) + } + + switch name { + case "Retry": + c.closeRetryChannel() + delete(p.retryActionRegistery, actionID) + default: + return fmt.Errorf("Unknown action: %v", name) + } + return nil +} + +func getTaskID(nodePath string) string { + return path.Base(nodePath) } diff --git a/go/vt/workflow/resharding/parallel_runner_test.go b/go/vt/workflow/resharding/parallel_runner_test.go index 02ae8fcba81..16e119190bc 100644 --- a/go/vt/workflow/resharding/parallel_runner_test.go +++ b/go/vt/workflow/resharding/parallel_runner_test.go @@ -2,12 +2,15 @@ package resharding import ( "context" + "errors" "fmt" "strconv" "strings" + "sync" "testing" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/workflow" workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" ) @@ -17,43 +20,180 @@ const ( ) func TestParallelRunner(t *testing.T) { - w := &workflowpb.Workflow{ - Uuid: "testparallelrunner", - FactoryName: "simple_print", - State: workflowpb.WorkflowState_NotStarted, + cp, err := startWorkflow(5) + if err != nil { + t.Errorf("%s: Fails in creating workflow", err) } + ctx := context.Background() + tasks := GetOrderedPrintTasks(cp.checkpoint) - ts := memorytopo.NewServer("cell") - wi, err := ts.CreateWorkflow(context.TODO(), w) + p := NewParallelRunner(ctx, cp, make(map[string]*workflow.Node), tasks) + executeLog := func(attr map[string]string) error { + t.Logf("The number passed to me is %v \n", attr["number"]) + return nil + } + if err := p.Run(executeLog, PARALLEL); err != nil { + t.Errorf("%s: Parallel Runner should not fail", err) + } + + // Check whether all tasks are in finished status. + for _, task := range cp.checkpoint.Tasks { + if task.State != workflowpb.TaskState_TaskDone || task.Error != "" { + t.Fatalf("Task info: %v, %v, %v: Parallel Runner task not succeed", task.Id, task.State, task.Attributes) + } + } +} + +func TestParallelRunnerRetryAction(t *testing.T) { + cp, err := startWorkflow(5) if err != nil { - t.Errorf("%s: Parallel Runner fails in creating workflow", err) + t.Errorf("%s: Fails in creating workflow", err) } - taskNum := 5 - initCheckpoint := InitPrintTasks(taskNum) + ctx := context.Background() - cp := NewCheckpointWriter(ts, initCheckpoint, wi) - cp.Save() + // Create UI nodes. Each task has a node. These task nodes are the children of a root node. + notifications := make(chan []byte, 10) + nodeManager := workflow.NewNodeManager() + _, index, err := nodeManager.GetAndWatchFullTree(notifications) + if err != nil { + t.Errorf("GetAndWatchTree Failed: %v", err) + } + defer nodeManager.CloseWatcher(index) + + rootNode := &workflow.Node{ + PathName: "test_root", + Name: "root", + } + if err := nodeManager.AddRootNode(rootNode); err != nil { + t.Errorf("adding root node failed: %v", err) + } + result, ok := <-notifications + + taskNodeMap := make(map[string]*workflow.Node) + for _, task := range cp.checkpoint.Tasks { + taskNode := &workflow.Node{ + PathName: task.Id, + Name: "task_" + task.Id, + } + taskNodeMap[task.Id] = taskNode + rootNode.Children = append(rootNode.Children, taskNode) + } + + rootNode.BroadcastChanges(true /*updateChildren*/) - tasks := GetOrderedPrintTasks(initCheckpoint) + result, ok = <-notifications + if !ok || + strings.Contains(string(result), `"children":[]`) || + !strings.Contains(string(result), `"name":"task_Sleep_0"`) || + !strings.Contains(string(result), `"name":"task_Sleep_1"`) || + !strings.Contains(string(result), `"name":"task_Sleep_2"`) || + !strings.Contains(string(result), `"name":"task_Sleep_3"`) || + !strings.Contains(string(result), `"name":"task_Sleep_4"`) { + t.Errorf("unexpected behavior in adding children nodes: %v, %v", ok, string(result)) + } + + // Set up ParallelRunner. + tasks := GetOrderedPrintTasks(cp.checkpoint) + p := NewParallelRunner(ctx, cp, taskNodeMap, tasks) + + // Set retry flag to be false. The targeting task will fail under this condition. + retryFlag := false + errMessage := "fake error for testing retry" executeLog := func(attr map[string]string) error { t.Logf("The number passed to me is %v \n", attr["number"]) + n, err := strconv.Atoi(attr["number"]) + if err != nil { + t.Logf("Converting number string to int fails: %v \n", attr["number"]) + return err + } + if !retryFlag { + if n == 3 { + t.Logf("I will fail at this time since retry flag is false.") + return errors.New(errMessage) + } + } return nil } - p := &ParallelRunner{} - if err := p.Run(tasks, executeLog, cp, PARALLEL); err != nil { - t.Errorf("%s: Parallel Runner should not fail", err) - } + go func() { + // This goroutine is used to mornitor the UI change. + // When the retry action is enabled, it will trigger it using nodemanager. + for { + select { + case mornitor := <-notifications: + if strings.Contains(string(mornitor), "Retry") { + // Check if Retry action is enabled for the expected task. + taskName := logTaskName(3) + nodeTarget := taskNodeMap[taskName] + taskTarget := cp.checkpoint.Tasks[taskName] + if taskTarget.State != workflowpb.TaskState_TaskDone || + taskTarget.Error != errMessage || + len(nodeTarget.Actions) != 1 { + t.Fatalf("Retry action is not enabled as expectedL %v, %v, %v", &nodeTarget, taskTarget.State, taskTarget.Error) + } - // Check whether all tasks are in finished status. + // Reset the retryFlag to make the task succeed when retrying. + retryFlag = true + + t.Logf("Triggering retry action.") + if err := nodeManager.Action(ctx, &workflow.ActionParameters{ + Path: "/test_root/" + logTaskName(3), + Name: "Retry", + }); err != nil { + t.Errorf("unexpected action error: %v", err) + } + + if len(nodeTarget.Actions) != 0 { + t.Fatalf("the node actions should be empty after triggering retry: %v", nodeTarget.Actions) + } + return + } + case <-ctx.Done(): + return + } + } + }() + + // Call ParallelRunner.Run through a goroutine. In this way, the failure of task will not block the main function. + var waitGroup sync.WaitGroup + waitGroup.Add(1) + go func() { + defer waitGroup.Done() + err := p.Run(executeLog, PARALLEL) + if err != nil { + t.Logf("ParallelRunner.Run fails: %v", err) + } + }() + + waitGroup.Wait() + // Check that all tasks are finished successfully. for _, task := range cp.checkpoint.Tasks { - if task.State != workflowpb.TaskState_TaskDone { - t.Fatalf("Task info: %v, %v, %v: Parallel Runner task not finished", task.Id, task.State, task.Attributes) + if task.State != workflowpb.TaskState_TaskDone || task.Error != "" { + t.Fatalf("Task info: %v, %v, %v: Parallel Runner task not succeed", task.Id, task.State, task.Attributes) } } } +func startWorkflow(taskNum int) (*CheckpointWriter, error) { + initCheckpoint := InitPrintTasks(taskNum) + + w := &workflowpb.Workflow{ + Uuid: "testparallelrunner", + FactoryName: "simple_print", + State: workflowpb.WorkflowState_NotStarted, + } + ts := memorytopo.NewServer("cell") + wi, err := ts.CreateWorkflow(context.TODO(), w) + if err != nil { + return nil, err + } + + cp := NewCheckpointWriter(ts, initCheckpoint, wi) + cp.Save() + return cp, nil +} + func logTaskName(num int) string { return fmt.Sprintf("%v_%v", printName, num) } @@ -72,7 +212,7 @@ func InitPrintTasks(numTasks int) *workflowpb.WorkflowCheckpoint { infoList = append(infoList, numStr) } return &workflowpb.WorkflowCheckpoint{ - CodeVersion: codeVersion, + CodeVersion: 0, Tasks: tasks, Settings: map[string]string{"numbers": strings.Join(infoList, ",")}, } diff --git a/go/vt/workflow/resharding/task_helper.go b/go/vt/workflow/resharding/task_helper.go index 291f0850b03..57e4b15c47e 100644 --- a/go/vt/workflow/resharding/task_helper.go +++ b/go/vt/workflow/resharding/task_helper.go @@ -4,11 +4,16 @@ import ( "fmt" "strings" + "github.com/youtube/vitess/go/vt/automation" + "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/wrangler" + + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" ) -func getTaskID(phase, shardType, shardName string) string { - return fmt.Sprintf("%s_%s/%s", phase, shardType, shardName) +func createTaskID(phase, shardType, shardName string) string { + return fmt.Sprintf("%s_%s_%s", phase, shardType, shardName) } // GenerateTasks generates a copy of tasks for a specific step. The task status is not checked in this function. @@ -16,51 +21,113 @@ func (hw *HorizontalReshardingWorkflow) GenerateTasks(checkpoint *workflowpb.Wor var tasks []*workflowpb.Task switch stepName { case CopySchemaName, WaitFilteredReplicationName, SplitDiffName: - // TODO: clean the logics and combine it into one function. for _, d := range strings.Split(checkpoint.Settings["destination_shards"], ",") { - taskID := getTaskID(stepName, "dest", d) + taskID := createTaskID(stepName, "dest", d) tasks = append(tasks, checkpoint.Tasks[taskID]) } case SplitCloneName, MigrateName: for _, s := range strings.Split(checkpoint.Settings["source_shards"], ",") { - taskID := getTaskID(stepName, "source", s) + taskID := createTaskID(stepName, "source", s) tasks = append(tasks, checkpoint.Tasks[taskID]) } } return tasks } -// InitTasks initialized the tasks for the workflow and return a checkpoint to store the information. -func (hw *HorizontalReshardingWorkflow) InitTasks() *workflowpb.WorkflowCheckpoint { - taskMap := make(map[string]*workflowpb.Task) - var sourceShards, destinationShards []string - - for _, perSrc := range hw.data { - s := perSrc.sourceShard - worker := perSrc.vtworker - sourceShards = append(sourceShards, s) - for _, d := range perSrc.destinationShards { - destinationShards = append(destinationShards, d) - updatePerDestinationTask(s, d, worker, CopySchemaName, taskMap) - updatePerDestinationTask(s, d, worker, WaitFilteredReplicationName, taskMap) - updatePerDestinationTask(s, d, worker, SplitDiffName, taskMap) - } - updatePerSourceTask(s, worker, SplitCloneName, taskMap) - updatePerSourceTask(s, worker, MigrateName, taskMap) +// runCopySchemaPerShard runs CopySchema for a destination shard. +// There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. +func (hw *HorizontalReshardingWorkflow) runCopySchema(attr map[string]string) error { + s := attr["source_shard"] + d := attr["destination_shard"] + keyspace := attr["keyspace"] + err := hw.wr.CopySchemaShardFromShard(hw.ctx, nil /* tableArray*/, nil /* excludeTableArray */, true, /*includeViews*/ + keyspace, s, keyspace, d, wrangler.DefaultWaitSlaveTimeout) + if err != nil { + hw.logger.Infof("Horizontal Resharding: error in CopySchemaShardFromShard from %s to %s: %v.", s, d, err) } + hw.logger.Infof("Horizontal Resharding: CopySchemaShardFromShard from %s to %s is finished.", s, d) + return err +} - return &workflowpb.WorkflowCheckpoint{ - CodeVersion: codeVersion, - Tasks: taskMap, - Settings: map[string]string{ - "source_shards": strings.Join(sourceShards, ","), - "destination_shards": strings.Join(destinationShards, ","), - }, +// runSplitClonePerShard runs SplitClone for a source shard. +// There should be #sourceshards parameters, while each param includes 1 sourceshard and its destshards. The destShards are useless here. +func (hw *HorizontalReshardingWorkflow) runSplitClone(attr map[string]string) error { + s := attr["source_shard"] + worker := attr["vtworker"] + keyspace := attr["keyspace"] + + sourceKeyspaceShard := topoproto.KeyspaceShardString(keyspace, s) + // Reset the vtworker to avoid error if vtworker command has been called elsewhere. + // This is because vtworker class doesn't cleanup the environment after execution. + automation.ExecuteVtworker(hw.ctx, worker, []string{"Reset"}) + // The flag min_healthy_rdonly_tablets is set to 1 (default value is 2). + // Therefore, we can reuse the normal end to end test setting, which has only 1 rdonly tablet. + // TODO(yipeiw): Add min_healthy_rdonly_tablets as an input argument in UI. + args := []string{"SplitClone", "--min_healthy_rdonly_tablets=1", sourceKeyspaceShard} + if _, err := automation.ExecuteVtworker(hw.ctx, worker, args); err != nil { + hw.logger.Infof("Horizontal resharding: error in SplitClone in keyspace %s: %v.", keyspace, err) + return err + } + hw.logger.Infof("Horizontal resharding: SplitClone is finished.") + + return nil +} + +// runWaitFilteredReplication runs WaitForFilteredReplication for a destination shard. +// There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. +func (hw *HorizontalReshardingWorkflow) runWaitFilteredReplication(attr map[string]string) error { + d := attr["destination_shard"] + keyspace := attr["keyspace"] + + if err := hw.wr.WaitForFilteredReplication(hw.ctx, keyspace, d, wrangler.DefaultWaitForFilteredReplicationMaxDelay); err != nil { + hw.logger.Infof("Horizontal Resharding: error in WaitForFilteredReplication: %v.", err) + return err + } + hw.logger.Infof("Horizontal Resharding:WaitForFilteredReplication is finished on " + d) + return nil +} + +// runSplitDiffPerShard runs SplitDiff for a source shard. +// There should be #sourceshards parameters, while each param includes 1 sourceshard and its destshards. +func (hw *HorizontalReshardingWorkflow) runSplitDiff(attr map[string]string) error { + d := attr["destination_shard"] + worker := attr["vtworker"] + keyspace := attr["keyspace"] + + automation.ExecuteVtworker(hw.ctx, worker, []string{"Reset"}) + args := []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", topoproto.KeyspaceShardString(keyspace, d)} + _, err := automation.ExecuteVtworker(hw.ctx, worker, args) + if err != nil { + return err + } + + hw.logger.Infof("Horizontal resharding: SplitDiff is finished.") + return nil +} + +// runMigratePerShard runs the migration sequentially among all source shards. +// There should be 1 parameter, which includes all source shards to be migrated. +func (hw *HorizontalReshardingWorkflow) runMigrate(attr map[string]string) error { + s := attr["source_shard"] + keyspace := attr["keyspace"] + + sourceKeyspaceShard := topoproto.KeyspaceShardString(keyspace, s) + servedTypeParams := []topodatapb.TabletType{topodatapb.TabletType_RDONLY, + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_MASTER} + for _, servedType := range servedTypeParams { + err := hw.wr.MigrateServedTypes(hw.ctx, keyspace, s, nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime) + if err != nil { + hw.logger.Infof("Horizontal Resharding: error in MigrateServedTypes on servedType %s: %v.", servedType, err) + return err + } + hw.logger.Infof("Horizontal Resharding: MigrateServedTypes is finished on tablet %s serve type %s.", sourceKeyspaceShard, servedType) } + return nil } -func updatePerDestinationTask(sourceShard, destinationShard, worker, name string, taskMap map[string]*workflowpb.Task) { - taskID := getTaskID(name, "dest", destinationShard) +func updatePerDestinationTask(keyspace, sourceShard, destinationShard, worker, name string, taskMap map[string]*workflowpb.Task) { + taskID := createTaskID(name, "dest", destinationShard) taskMap[taskID] = &workflowpb.Task{ Id: taskID, State: workflowpb.TaskState_TaskNotStarted, @@ -68,18 +135,20 @@ func updatePerDestinationTask(sourceShard, destinationShard, worker, name string "source_shard": sourceShard, "destination_shard": destinationShard, "vtworker": worker, + "keyspace": keyspace, }, } } -func updatePerSourceTask(sourceShard, vtworker, name string, taskMap map[string]*workflowpb.Task) { - taskID := getTaskID(name, "source", sourceShard) +func updatePerSourceTask(keyspace, sourceShard, vtworker, name string, taskMap map[string]*workflowpb.Task) { + taskID := createTaskID(name, "source", sourceShard) taskMap[taskID] = &workflowpb.Task{ Id: taskID, State: workflowpb.TaskState_TaskNotStarted, Attributes: map[string]string{ "source_shard": sourceShard, "vtworker": vtworker, + "keyspace": keyspace, }, } } diff --git a/proto/workflow.proto b/proto/workflow.proto index 2821ab6e4bb..3c7bbb50bb2 100644 --- a/proto/workflow.proto +++ b/proto/workflow.proto @@ -59,20 +59,6 @@ message Workflow { int64 end_time = 8; } -enum TaskState { - TaskNotStarted = 0; - TaskRunning = 1; - TaskDone = 2; -} - -message Task { - string id = 1; - TaskState state = 2; - // attributes includes the parameters the task needs. - map attributes = 3; - string error = 4; -} - message WorkflowCheckpoint { // code_version is used to detect incompabilities between the version of the // running workflow and the one which wrote the checkpoint. If they don't @@ -89,3 +75,18 @@ message WorkflowCheckpoint { // would store the source shards and destination shards. map settings = 3; } + +enum TaskState { + TaskNotStarted = 0; + TaskRunning = 1; + TaskDone = 2; +} + +message Task { + string id = 1; + TaskState state = 2; + // attributes includes the parameters the task needs. + map attributes = 3; + string error = 4; +} + From 0361c8a9f0ef5272e67a0e6b68664e334dafa290 Mon Sep 17 00:00:00 2001 From: Yipei Wang Date: Mon, 13 Feb 2017 19:57:07 -0800 Subject: [PATCH 004/108] workflow: commit to rebase master branch. --- go/vt/topo/workflow.go | 18 +- go/vt/workflow/node.go | 11 +- go/vt/workflow/resharding/checkpoint.go | 16 +- .../horizontal_resharding_workflow.go | 281 +++++++++++------- .../horizontal_resharding_workflow_test.go | 265 +++++++++++++---- go/vt/workflow/resharding/parallel_runner.go | 144 ++++----- .../resharding/parallel_runner_test.go | 10 +- go/vt/workflow/resharding/retry_controller.go | 38 +++ go/vt/workflow/resharding/task.go | 137 +++++++++ go/vt/workflow/resharding/task_helper.go | 154 ---------- 10 files changed, 678 insertions(+), 396 deletions(-) create mode 100644 go/vt/workflow/resharding/retry_controller.go create mode 100644 go/vt/workflow/resharding/task.go delete mode 100644 go/vt/workflow/resharding/task_helper.go diff --git a/go/vt/topo/workflow.go b/go/vt/topo/workflow.go index dd677b6f522..43521e2aecc 100644 --- a/go/vt/topo/workflow.go +++ b/go/vt/topo/workflow.go @@ -17,7 +17,7 @@ const ( workflowFilename = "Workflow" ) -func PathForWorkflow(uuid string) string { +func pathForWorkflow(uuid string) string { return path.Join(workflowsPath, uuid, workflowFilename) } @@ -27,14 +27,6 @@ type WorkflowInfo struct { *workflowpb.Workflow } -func (w *WorkflowInfo) SetVersion(v Version) { - w.version = v -} - -func (w *WorkflowInfo) Version() Version { - return w.version -} - // GetWorkflowNames returns the names of the existing // workflows. They are sorted by uuid. func (ts Server) GetWorkflowNames(ctx context.Context) ([]string, error) { @@ -59,7 +51,7 @@ func (ts Server) CreateWorkflow(ctx context.Context, w *workflowpb.Workflow) (*W } // Save it. - filePath := PathForWorkflow(w.Uuid) + filePath := pathForWorkflow(w.Uuid) version, err := ts.Create(ctx, GlobalCell, filePath, contents) if err != nil { return nil, err @@ -73,7 +65,7 @@ func (ts Server) CreateWorkflow(ctx context.Context, w *workflowpb.Workflow) (*W // GetWorkflow reads a workflow from the Backend. func (ts Server) GetWorkflow(ctx context.Context, uuid string) (*WorkflowInfo, error) { // Read the file. - filePath := PathForWorkflow(uuid) + filePath := pathForWorkflow(uuid) contents, version, err := ts.Get(ctx, GlobalCell, filePath) if err != nil { return nil, err @@ -101,7 +93,7 @@ func (ts Server) SaveWorkflow(ctx context.Context, wi *WorkflowInfo) error { } // Save it. - filePath := PathForWorkflow(wi.Uuid) + filePath := pathForWorkflow(wi.Uuid) version, err := ts.Update(ctx, GlobalCell, filePath, contents, wi.version) if err != nil { return err @@ -115,6 +107,6 @@ func (ts Server) SaveWorkflow(ctx context.Context, wi *WorkflowInfo) error { // DeleteWorkflow deletes the specified workflow. After this, the // WorkflowInfo object should not be used any more. func (ts Server) DeleteWorkflow(ctx context.Context, wi *WorkflowInfo) error { - filePath := PathForWorkflow(wi.Uuid) + filePath := pathForWorkflow(wi.Uuid) return ts.Delete(ctx, GlobalCell, filePath, wi.version) } diff --git a/go/vt/workflow/node.go b/go/vt/workflow/node.go index 184aee1b770..25eb86feb0b 100644 --- a/go/vt/workflow/node.go +++ b/go/vt/workflow/node.go @@ -102,7 +102,7 @@ type ActionListener interface { // the Angular 2 web app. type Node struct { // nodeManager is the NodeManager handling this Node. - // It is set by AddRootNode, and propagated by AddChildren. + // It is set by AddRootNode, and propagated by AddChild. // Any change to this node must take the Manager's lock. nodeManager *NodeManager @@ -396,6 +396,15 @@ func (m *NodeManager) Action(ctx context.Context, ap *ActionParameters) error { return n.Listener.Action(ctx, ap.Path, ap.Name) } +func (m *NodeManager) GetNodeByRelativePath(parentNode *Node, childPath string) (*Node, error) { + fullNodePath := path.Join(parentNode.PathName, childPath) + node, err := m.getNodeByPath(fullNodePath) + if err != nil { + return nil, err + } + return node, nil +} + func (m *NodeManager) getNodeByPath(nodePath string) (*Node, error) { m.mu.Lock() defer m.mu.Unlock() diff --git a/go/vt/workflow/resharding/checkpoint.go b/go/vt/workflow/resharding/checkpoint.go index 0634ed583b9..00c393e9130 100644 --- a/go/vt/workflow/resharding/checkpoint.go +++ b/go/vt/workflow/resharding/checkpoint.go @@ -2,15 +2,15 @@ package resharding import ( "context" - "encoding/json" "sync" + "github.com/golang/protobuf/proto" "github.com/youtube/vitess/go/vt/topo" workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" ) -// CheckpointWriter save the checkpoint data into topology server. +// CheckpointWriter saves the checkpoint data into topology server. type CheckpointWriter struct { topoServer topo.Server @@ -29,8 +29,10 @@ func NewCheckpointWriter(ts topo.Server, checkpoint *workflowpb.WorkflowCheckpoi } } -// UpdateTask updates the status and checkpointing the update. +// UpdateTask updates the status of task in the checkpoint. func (c *CheckpointWriter) UpdateTask(taskID string, status workflowpb.TaskState, err string) error { + // Writing the checkpoint is protected to avoid the situation that the + // task value is partially updated when saving the checkpoint. c.checkpointMu.Lock() defer c.checkpointMu.Unlock() @@ -41,8 +43,14 @@ func (c *CheckpointWriter) UpdateTask(taskID string, status workflowpb.TaskState // Save packets the checkpoint and sends it to the topology server. func (c *CheckpointWriter) Save() error { + c.checkpointMu.Lock() + defer c.checkpointMu.Unlock() + return c.saveLocked() +} + +func (c *CheckpointWriter) saveLocked() error { var err error - c.wi.Data, err = json.Marshal(c.checkpoint) + c.wi.Data, err = proto.Marshal(c.checkpoint) if err != nil { return err } diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow.go b/go/vt/workflow/resharding/horizontal_resharding_workflow.go index 219c1c0b334..6cbf5f9492f 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow.go @@ -22,6 +22,7 @@ import ( "github.com/youtube/vitess/go/vt/workflow" "github.com/youtube/vitess/go/vt/wrangler" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" ) @@ -30,39 +31,37 @@ const ( horizontalReshardingFactoryName = "horizontal_resharding" - CopySchemaName = "copy_schema" - SplitCloneName = "clone" - WaitFilteredReplicationName = "wait_replication" - SplitDiffName = "diff" - MigrateName = "migrate" + copySchemaName = "copy_schema" + cloneName = "clone" + waitForFilteredReplicationName = "wait_for_filtered_replication" + diffName = "diff" + migrateRdonlyName = "migrate_rdonly" + migrateReplicaName = "migrate_replica" + migrateMasterName = "migrate_master" ) -// HorizontalReshardingData is the data structure to store resharding arguments. -type HorizontalReshardingData struct { - Keyspace string - Vtworkers []string -} - -// HorizontalReshardingWorkflow contains meta-information and methods to control horizontal resharding workflow. +// HorizontalReshardingWorkflow contains meta-information and methods +// to control horizontal resharding workflow. type HorizontalReshardingWorkflow struct { - // ctx is the context of the whole horizontal resharding process. Once this context is canceled, - // the horizontal resharding process stops. + // ctx is the context of the whole horizontal resharding process. + // Once this context is canceled, the horizontal resharding process stops. ctx context.Context - wr ReshardingWrangler manager *workflow.Manager topoServer topo.Server wi *topo.WorkflowInfo + wr ReshardingWrangler // logger is the logger we export UI logs from. logger *logutil.MemoryLogger // rootUINode is the root node representing the workflow in the UI. - rootUINode *workflow.Node - copySchemaUINode *workflow.Node - splitCloneUINode *workflow.Node - waitFilteredReplicationUINode *workflow.Node - splitDiffUINode *workflow.Node - migrateUINode *workflow.Node - taskUINodeMap map[string]*workflow.Node + rootUINode *workflow.Node + copySchemaUINode *workflow.Node + cloneUINode *workflow.Node + waitForFilteredReplicationUINode *workflow.Node + diffUINode *workflow.Node + migrateRdonlyUINode *workflow.Node + migrateReplicaUINode *workflow.Node + migrateMasterUINode *workflow.Node checkpoint *workflowpb.WorkflowCheckpoint checkpointWriter *CheckpointWriter @@ -72,9 +71,11 @@ type HorizontalReshardingWorkflow struct { // It implements the workflow.Workflow interface. func (hw *HorizontalReshardingWorkflow) Run(ctx context.Context, manager *workflow.Manager, wi *topo.WorkflowInfo) error { hw.ctx = ctx + hw.manager = manager hw.topoServer = manager.TopoServer() - hw.wr = wrangler.New(logutil.NewConsoleLogger(), manager.TopoServer(), tmclient.NewTabletManagerClient()) hw.wi = wi + hw.wr = wrangler.New(logutil.NewConsoleLogger(), manager.TopoServer(), tmclient.NewTabletManagerClient()) + hw.checkpointWriter = NewCheckpointWriter(hw.topoServer, hw.checkpoint, hw.wi) if err := hw.checkpointWriter.Save(); err != nil { return err @@ -91,37 +92,51 @@ func (hw *HorizontalReshardingWorkflow) Run(ctx context.Context, manager *workfl } func (hw *HorizontalReshardingWorkflow) runWorkflow() error { - copyTasks := hw.GenerateTasks(hw.checkpoint, CopySchemaName) - copyRunner := NewParallelRunner(hw.ctx, hw.checkpointWriter, hw.taskUINodeMap, copyTasks) - if err := copyRunner.Run(hw.runCopySchema, PARALLEL); err != nil { + copyTasks := hw.GetTasks(hw.checkpoint, copySchemaName) + copyRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.copySchemaUINode, hw.checkpointWriter, copyTasks, hw.runCopySchema, PARALLEL) + if err := copyRunner.Run(); err != nil { + return err + } + + cloneTasks := hw.GetTasks(hw.checkpoint, cloneName) + cloneRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.cloneUINode, hw.checkpointWriter, cloneTasks, hw.runSplitClone, PARALLEL) + if err := cloneRunner.Run(); err != nil { + return err + } + + waitTasks := hw.GetTasks(hw.checkpoint, waitForFilteredReplicationName) + waitRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.waitForFilteredReplicationUINode, hw.checkpointWriter, waitTasks, hw.runWaitForFilteredReplication, PARALLEL) + if err := waitRunner.Run(); err != nil { return err } - cloneTasks := hw.GenerateTasks(hw.checkpoint, SplitCloneName) - cloneRunner := NewParallelRunner(hw.ctx, hw.checkpointWriter, hw.taskUINodeMap, cloneTasks) - if err := cloneRunner.Run(hw.runSplitClone, PARALLEL); err != nil { + diffTasks := hw.GetTasks(hw.checkpoint, diffName) + diffRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.diffUINode, hw.checkpointWriter, diffTasks, hw.runSplitDiff, SEQUENTIAL) + // SplitDiff requires the vtworker only work for one destination shard + // at a time. To simplify the concurrency control, we run all the SplitDiff + // task sequentially. + if err := diffRunner.Run(); err != nil { return err } - waitTasks := hw.GenerateTasks(hw.checkpoint, WaitFilteredReplicationName) - waitRunner := NewParallelRunner(hw.ctx, hw.checkpointWriter, hw.taskUINodeMap, waitTasks) - if err := waitRunner.Run(hw.runWaitFilteredReplication, PARALLEL); err != nil { + migrateRdonlyTasks := hw.GetTasks(hw.checkpoint, migrateRdonlyName) + migrateRdonlyRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.migrateRdonlyUINode, hw.checkpointWriter, migrateRdonlyTasks, hw.runMigrate, SEQUENTIAL) + if err := migrateRdonlyRunner.Run(); err != nil { return err } - diffTasks := hw.GenerateTasks(hw.checkpoint, SplitDiffName) - diffRunner := NewParallelRunner(hw.ctx, hw.checkpointWriter, hw.taskUINodeMap, diffTasks) - // SplitDiff requires the vtworker only work for one destination shard at a time. - // To simplify the concurrency control, we run all the SplitDiff task sequentially. - if err := diffRunner.Run(hw.runSplitDiff, SEQUENTIAL); err != nil { + migrateReplicaTasks := hw.GetTasks(hw.checkpoint, migrateReplicaName) + migrateReplicaRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.migrateReplicaUINode, hw.checkpointWriter, migrateReplicaTasks, hw.runMigrate, SEQUENTIAL) + if err := migrateReplicaRunner.Run(); err != nil { return err } - migrateTasks := hw.GenerateTasks(hw.checkpoint, MigrateName) - migrateRunner := NewParallelRunner(hw.ctx, hw.checkpointWriter, hw.taskUINodeMap, migrateTasks) - if err := migrateRunner.Run(hw.runMigrate, SEQUENTIAL); err != nil { + migrateMasterTasks := hw.GetTasks(hw.checkpoint, migrateMasterName) + migrateMasterRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.migrateMasterUINode, hw.checkpointWriter, migrateMasterTasks, hw.runMigrate, SEQUENTIAL) + if err := migrateMasterRunner.Run(); err != nil { return err } + return nil } @@ -132,16 +147,18 @@ func (hw *HorizontalReshardingWorkflow) setUIMessage(message string) { hw.rootUINode.BroadcastChanges(false /* updateChildren */) } -// Register registers horizontal_resharding as a valid factory in the workflow framework. +// Register registers horizontal_resharding as a valid factory +// in the workflow framework. func Register() { workflow.Register(horizontalReshardingFactoryName, &HorizontalReshardingWorkflowFactory{}) } -// HorizontalReshardingWorkflowFactory is the factory to register the HorizontalReshard Workflow. +// HorizontalReshardingWorkflowFactory is the factory to register +// the HorizontalResharding Workflow. type HorizontalReshardingWorkflowFactory struct{} // Init is part of the workflow.Factory interface. -func (*HorizontalReshardingWorkflowFactory) Init(workflowProto *workflowpb.Workflow, args []string) error { +func (*HorizontalReshardingWorkflowFactory) Init(w *workflowpb.Workflow, args []string) error { subFlags := flag.NewFlagSet(horizontalReshardingFactoryName, flag.ContinueOnError) keyspace := subFlags.String("keyspace", "", "Name of keyspace to perform horizontal resharding") vtworkersStr := subFlags.String("vtworkers", "", "A comma-separated list of vtworker addresses") @@ -154,16 +171,14 @@ func (*HorizontalReshardingWorkflowFactory) Init(workflowProto *workflowpb.Workf } vtworkers := strings.Split(*vtworkersStr, ",") - workflowProto.Name = fmt.Sprintf("Horizontal resharding on keyspace %s", *keyspace) + w.Name = fmt.Sprintf("Horizontal resharding on keyspace %s", *keyspace) - ts := topo.Open() - defer ts.Close() - workflowCheckpoint, err := initCheckpoint(*keyspace, vtworkers, ts) + checkpoint, err := initCheckpoint(*keyspace, vtworkers) if err != nil { return err } - workflowProto.Data, err = proto.Marshal(workflowCheckpoint) + w.Data, err = proto.Marshal(checkpoint) if err != nil { return err } @@ -171,81 +186,92 @@ func (*HorizontalReshardingWorkflowFactory) Init(workflowProto *workflowpb.Workf } // Instantiate is part of the workflow.Factory interface. -func (*HorizontalReshardingWorkflowFactory) Instantiate(workflowProto *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { +func (*HorizontalReshardingWorkflowFactory) Instantiate(w *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { rootNode.Message = "This is a workflow to execute horizontal resharding automatically." - workflowCheckpoint := &workflowpb.WorkflowCheckpoint{} - if err := proto.Unmarshal(workflowProto.Data, workflowCheckpoint); err != nil { + checkpoint := &workflowpb.WorkflowCheckpoint{} + if err := proto.Unmarshal(w.Data, checkpoint); err != nil { return nil, err } hw := &HorizontalReshardingWorkflow{ - checkpoint: workflowCheckpoint, + checkpoint: checkpoint, rootUINode: rootNode, copySchemaUINode: &workflow.Node{ Name: "CopySchemaShard", - PathName: CopySchemaName, + PathName: copySchemaName, }, - splitCloneUINode: &workflow.Node{ + cloneUINode: &workflow.Node{ Name: "SplitClone", - PathName: "clone", + PathName: cloneName, }, - waitFilteredReplicationUINode: &workflow.Node{ + waitForFilteredReplicationUINode: &workflow.Node{ Name: "WaitForFilteredReplication", - PathName: WaitFilteredReplicationName, + PathName: waitForFilteredReplicationName, }, - splitDiffUINode: &workflow.Node{ + diffUINode: &workflow.Node{ Name: "SplitDiff", - PathName: SplitDiffName, + PathName: diffName, + }, + migrateRdonlyUINode: &workflow.Node{ + Name: "MigrateServedTypeRDONLY", + PathName: migrateRdonlyName, }, - migrateUINode: &workflow.Node{ - Name: "MigrateServedType", - PathName: "migrate", + migrateReplicaUINode: &workflow.Node{ + Name: "MigrateServedTypeREPLICA", + PathName: migrateReplicaName, + }, + migrateMasterUINode: &workflow.Node{ + Name: "MigrateServedTypeMASTER", + PathName: migrateMasterName, }, logger: logutil.NewMemoryLogger(), } hw.rootUINode.Children = []*workflow.Node{ hw.copySchemaUINode, - hw.splitCloneUINode, - hw.waitFilteredReplicationUINode, - hw.splitDiffUINode, - hw.migrateUINode, + hw.cloneUINode, + hw.waitForFilteredReplicationUINode, + hw.diffUINode, + hw.migrateRdonlyUINode, + hw.migrateReplicaUINode, + hw.migrateMasterUINode, } - taskNodeMap := make(map[string]*workflow.Node) - for _, d := range strings.Split(hw.checkpoint.Settings["destination_shards"], ",") { - createUINode(CopySchemaName, "dest", d, hw.copySchemaUINode, taskNodeMap) - createUINode(WaitFilteredReplicationName, "dest", d, hw.waitFilteredReplicationUINode, taskNodeMap) - createUINode(SplitDiffName, "dest", d, hw.splitDiffUINode, taskNodeMap) - } - for _, s := range strings.Split(hw.checkpoint.Settings["source_shards"], ",") { - createUINode(SplitCloneName, "source", s, hw.splitCloneUINode, taskNodeMap) - createUINode(MigrateName, "source", s, hw.migrateUINode, taskNodeMap) - } + destinationShards := strings.Split(checkpoint.Settings["destination_shards"], ",") + sourceShards := strings.Split(checkpoint.Settings["source_shards"], ",") + createUINode(copySchemaName, destinationShards, hw.copySchemaUINode) + createUINode(cloneName, sourceShards, hw.cloneUINode) + createUINode(waitForFilteredReplicationName, destinationShards, hw.waitForFilteredReplicationUINode) + createUINode(diffName, destinationShards, hw.diffUINode) + createUINode(migrateRdonlyName, sourceShards, hw.migrateRdonlyUINode) + createUINode(migrateReplicaName, sourceShards, hw.migrateReplicaUINode) + createUINode(migrateMasterName, sourceShards, hw.migrateMasterUINode) + return hw, nil } -func createUINode(name, shardType, shardName string, rootNode *workflow.Node, nodeMap map[string]*workflow.Node) { - taskID := createTaskID(name, shardType, shardName) - taskUINode := &workflow.Node{ - Name: "Shard " + shardName, - PathName: taskID, +func createUINode(phaseName string, shards []string, rootNode *workflow.Node) { + for _, shardName := range shards { + taskID := createTaskID(phaseName, shardName) + taskUINode := &workflow.Node{ + Name: "Shard " + shardName, + PathName: taskID, + } + rootNode.Children = append(rootNode.Children, taskUINode) } - rootNode.Children = append(rootNode.Children, taskUINode) - nodeMap[taskID] = taskUINode } -// createSubWorkflows creates a per source shard horizontal resharding workflow for each source shard in the keyspace. -func initCheckpoint(keyspace string, vtworkers []string, ts topo.Server) (*workflowpb.WorkflowCheckpoint, error) { +func initCheckpoint(keyspace string, vtworkers []string) (*workflowpb.WorkflowCheckpoint, error) { + ts := topo.Open() + defer ts.Close() + overlappingShards, err := topotools.FindOverlappingShards(context.Background(), ts, keyspace) if err != nil { return nil, err } - taskMap := make(map[string]*workflowpb.Task) var sourceShardList, destinationShardList []string - - for i, os := range overlappingShards { + for _, os := range overlappingShards { var sourceShard *topo.ShardInfo var destinationShards []*topo.ShardInfo // Judge which side is source shard by checking the number of servedTypes. @@ -257,21 +283,65 @@ func initCheckpoint(keyspace string, vtworkers []string, ts topo.Server) (*workf destinationShards = os.Left } - s := sourceShard.ShardName() - sourceShardList = append(sourceShardList, s) - worker := vtworkers[i] - for _, ds := range destinationShards { - d := ds.ShardName() - destinationShardList = append(destinationShardList, d) - - updatePerDestinationTask(keyspace, s, d, worker, CopySchemaName, taskMap) - updatePerDestinationTask(keyspace, s, d, worker, WaitFilteredReplicationName, taskMap) - updatePerDestinationTask(keyspace, s, d, worker, SplitDiffName, taskMap) + sourceShardList = append(sourceShardList, sourceShard.ShardName()) + for _, d := range destinationShards { + destinationShardList = append(destinationShardList, d.ShardName()) } - updatePerSourceTask(keyspace, s, worker, SplitCloneName, taskMap) - updatePerSourceTask(keyspace, s, worker, MigrateName, taskMap) } + taskMap := make(map[string]*workflowpb.Task) + initTasks(copySchemaName, destinationShardList, taskMap, func(i int, shard string) map[string]string { + return map[string]string{ + "source_shard": sourceShardList[0], + "destination_shard": shard, + "keyspace": keyspace, + } + }) + + initTasks(cloneName, sourceShardList, taskMap, func(i int, shard string) map[string]string { + return map[string]string{ + "source_shard": shard, + "vtworker": vtworkers[i], + "keyspace": keyspace, + } + }) + + initTasks(waitForFilteredReplicationName, destinationShardList, taskMap, func(i int, shard string) map[string]string { + return map[string]string{ + "destination_shard": shard, + "keyspace": keyspace, + } + }) + + initTasks(diffName, destinationShardList, taskMap, func(i int, shard string) map[string]string { + return map[string]string{ + "destination_shard": shard, + "vtworker": vtworkers[0], + "keyspace": keyspace, + } + }) + + initTasks(migrateRdonlyName, sourceShardList, taskMap, func(i int, shard string) map[string]string { + return map[string]string{ + "source_shard": shard, + "keyspace": keyspace, + "served_type": topodatapb.TabletType_RDONLY.String(), + } + }) + initTasks(migrateReplicaName, sourceShardList, taskMap, func(i int, shard string) map[string]string { + return map[string]string{ + "source_shard": shard, + "keyspace": keyspace, + "served_type": topodatapb.TabletType_REPLICA.String(), + } + }) + initTasks(migrateMasterName, sourceShardList, taskMap, func(i int, shard string) map[string]string { + return map[string]string{ + "source_shard": shard, + "keyspace": keyspace, + "served_type": topodatapb.TabletType_MASTER.String(), + } + }) return &workflowpb.WorkflowCheckpoint{ CodeVersion: codeVersion, Tasks: taskMap, @@ -281,3 +351,14 @@ func initCheckpoint(keyspace string, vtworkers []string, ts topo.Server) (*workf }, }, nil } + +func initTasks(phaseName string, shards []string, taskMap map[string]*workflowpb.Task, createAttributes func(int, string) map[string]string) { + for i, s := range shards { + taskID := createTaskID(phaseName, s) + taskMap[taskID] = &workflowpb.Task{ + Id: taskID, + State: workflowpb.TaskState_TaskNotStarted, + Attributes: createAttributes(i, s), + } + } +} diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go index ed0620db923..80cfc4c9346 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go @@ -1,10 +1,13 @@ package resharding import ( + "context" "flag" + "strings" "testing" "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/worker/fakevtworkerclient" @@ -16,26 +19,34 @@ import ( workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" ) +// TestHorizontalResharding runs resharding from 1 shard to 2 shards. func TestHorizontalResharding(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() + // Initialize the checkpoint for the workflow. + oneShard := ReshardingData{ + Keyspace: "test_keyspace", + SourceShard: "0", + DestinationShards: []string{"-80", "80-"}, + Vtworker: "localhost:15032", + } + initCp := createCheckpoint([]ReshardingData{oneShard}) - hw := setUp(t, ctrl) + // Create the horizontal resharding workflow. + hw := setupWorkflow(t, initCp) if hw == nil { return } - // Create fakeworkerclient, which is used for the unit test in steps SplitClone and SplitDiff. - flag.Set("vtworker_client_protocol", "fake") - fakeVtworkerClient := fakevtworkerclient.NewFakeVtworkerClient() + // Create the mock wrangler and set the expected behavior. + // Then pass it to the workflow. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + hw.wr = setupMockWrangler(hw.ctx, ctrl) + + // Set up the fake vtworkerclient. + fakeVtworkerClient := setupFakeVtworker() vtworkerclient.RegisterFactory("fake", fakeVtworkerClient.FakeVtworkerClientFactory) defer vtworkerclient.UnregisterFactoryForTest("fake") - fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitClone", "--min_healthy_rdonly_tablets=1", "test_keyspace/0"}, "", nil) - fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", "test_keyspace/-80"}, "", nil) - fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", "test_keyspace/80-"}, "", nil) - - // Test the execution of horizontal resharding. - // To simply demonstate the ability to track task status and leverage it for control the workflow execution, only happy path is used here. + // Run the workflow. if err := hw.runWorkflow(); err != nil { t.Errorf("%s: Horizontal resharding workflow should not fail", err) } @@ -48,44 +59,120 @@ func TestHorizontalResharding(t *testing.T) { } } -// setUp prepare the test environement for the happy path. -// Other test cases can reuse this basic setup and modified it based on its need. -func setUp(t *testing.T, ctrl *gomock.Controller) *HorizontalReshardingWorkflow { +// TestHorizontalReshardingRestart restarts a stopped worklow +// by loading a hand-crafted checkpoint. This checkpoint is used to fake +// the one saved by the killed workflow. It records that some tasks +// in the workflow are finished successfully. +func TestHorizontalReshardingRestart(t *testing.T) { + // Initialize the checkpoint for the workflow. + oneShard := ReshardingData{ + Keyspace: "test_keyspace", + SourceShard: "0", + DestinationShards: []string{"-80", "80-"}, + Vtworker: "localhost:15032", + } + initCp := createCheckpoint([]ReshardingData{oneShard}) + + // Set checkpoint to record that the copySchemaTask on destination shard + // "-80" succeeded. + t1 := initCp.Tasks[createTaskID(copySchemaName, "dest", "-80")] + t1.State = workflowpb.TaskState_TaskDone + // Set checkpoint to record that the copySchemaTask on destination shard + // "80-" failed with errors. + t2 := initCp.Tasks[createTaskID(copySchemaName, "dest", "80-")] + t2.State = workflowpb.TaskState_TaskDone + t2.Error = "the task CopySchema for shard 80- fails." + + // Create the workflow proto message, which will be loaded + // when restarting the stopped workflow. + workflowProto := &workflowpb.Workflow{ + Uuid: "testworkflow0000", + FactoryName: "horizontal_resharding", + State: workflowpb.WorkflowState_Running, + } + data, err := proto.Marshal(initCp) + if err != nil { + t.Errorf("error in encoding checkpoint proto message: %v", err) + } + workflowProto.Data = data + + nodeManager := workflow.NewNodeManager() + rootNode := &workflow.Node{ + PathName: "test_root", + Name: "root", + } + if err := nodeManager.AddRootNode(rootNode); err != nil { + t.Errorf("adding root node failed: %v", err) + } + + // The workflow is created using Instantiate method when it is restarted. + var factory *HorizontalReshardingWorkflowFactory + w, err := factory.Instantiate(workflowProto, rootNode) + if err != nil { + t.Errorf("horizontal resharding workflow not instantiated successfully") + } + hw := w.(*HorizontalReshardingWorkflow) + ts := memorytopo.NewServer("cell") - // Create fake wrangler using mock interface, which is used for the unit test in steps CopySchema and MigratedServedType. - mockWranglerInterface := NewMockReshardingWrangler(ctrl) - // Create the checkpoint for workflow. - keyspace := "test_keyspace" - vtworkers := []string{"localhost:15032"} + wi, err := ts.CreateWorkflow(context.TODO(), workflowProto) + if err != nil { + t.Errorf("creating workflow fails: %v", err) + } + hw.ctx = context.Background() + hw.topoServer = ts + hw.wi = wi + hw.checkpointWriter = NewCheckpointWriter(hw.topoServer, hw.checkpoint, hw.wi) + if err := hw.checkpointWriter.Save(); err != nil { + t.Errorf("checkpointWriter save fails: %v", err) + } - taskMap := make(map[string]*workflowpb.Task) - source := "0" - destinations := []string{"-80", "80-"} - worker := vtworkers[0] - updatePerSourceTask(keyspace, source, worker, SplitCloneName, taskMap) - updatePerSourceTask(keyspace, source, worker, MigrateName, taskMap) + // Create the mock wrangler and set the expected behavior. + // Then pass it to the workflow. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + hw.wr = setupMockWranglerRestart(hw.ctx, ctrl) + + // Set up the fake vtworkerclient. + fakeVtworkerClient := setupFakeVtworker() + vtworkerclient.RegisterFactory("fake", fakeVtworkerClient.FakeVtworkerClientFactory) + defer vtworkerclient.UnregisterFactoryForTest("fake") - for _, d := range destinations { - updatePerDestinationTask(keyspace, source, d, worker, CopySchemaName, taskMap) - updatePerDestinationTask(keyspace, source, d, worker, WaitFilteredReplicationName, taskMap) - updatePerDestinationTask(keyspace, source, d, worker, SplitDiffName, taskMap) + // Run the workflow. + if err := hw.runWorkflow(); err != nil { + t.Errorf("%s: Horizontal resharding workflow should not fail", err) + } + + // Checking all tasks are Done. + for _, task := range hw.checkpoint.Tasks { + if task.State != workflowpb.TaskState_TaskDone || task.Error != "" { + t.Fatalf("task is not done: Id: %v, State: %v, Attributes:%v", task.Id, task.State, task.Attributes) + } } +} + +func setupFakeVtworker() *fakevtworkerclient.FakeVtworkerClient { + // Create fakeworkerclient, which is used for the unit test in phase of + // SplitClone and SplitDiff. + flag.Set("vtworker_client_protocol", "fake") + fakeVtworkerClient := fakevtworkerclient.NewFakeVtworkerClient() + fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitClone", "--min_healthy_rdonly_tablets=1", "test_keyspace/0"}, "", nil) + fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", "test_keyspace/-80"}, "", nil) + fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", "test_keyspace/80-"}, "", nil) + return fakeVtworkerClient +} + +// setUpWorkflow prepares the test environement for the happy path. +func setupWorkflow(t *testing.T, initCheckpoint *workflowpb.WorkflowCheckpoint) *HorizontalReshardingWorkflow { + ts := memorytopo.NewServer("cell") + // Create fake wrangler using mock interface, which is used for the unit test in steps CopySchema and MigratedServedType. - // Create the workflow (ignore the node construction since we don't test the front-end part in this unit test). hw := &HorizontalReshardingWorkflow{ - wr: mockWranglerInterface, - topoServer: ts, - logger: logutil.NewMemoryLogger(), - checkpoint: &workflowpb.WorkflowCheckpoint{ - CodeVersion: codeVersion, - Tasks: taskMap, - Settings: map[string]string{ - "source_shards": "0", - "destination_shards": "-80,80-", - }, - }, + topoServer: ts, + logger: logutil.NewMemoryLogger(), + checkpoint: initCheckpoint, taskUINodeMap: make(map[string]*workflow.Node), } + // Create the initial workflowpb.Workflow object. w := &workflowpb.Workflow{ Uuid: "testworkflow0000", @@ -99,10 +186,14 @@ func setUp(t *testing.T, ctrl *gomock.Controller) *HorizontalReshardingWorkflow return nil } hw.checkpointWriter = NewCheckpointWriter(hw.topoServer, hw.checkpoint, hw.wi) + return hw +} - // Set the expected behaviors for mock wrangler. +// setupMockWrangler sets the expected behaviors for mock wrangler. +func setupMockWrangler(ctx context.Context, ctrl *gomock.Controller) *MockReshardingWrangler { + mockWranglerInterface := NewMockReshardingWrangler(ctrl) mockWranglerInterface.EXPECT().CopySchemaShardFromShard( - hw.ctx, + ctx, nil, /* tableArray*/ nil, /* excludeTableArray */ true, /*includeViews*/ @@ -113,7 +204,7 @@ func setUp(t *testing.T, ctrl *gomock.Controller) *HorizontalReshardingWorkflow wrangler.DefaultWaitSlaveTimeout).Return(nil) mockWranglerInterface.EXPECT().CopySchemaShardFromShard( - hw.ctx, + ctx, nil, /* tableArray*/ nil, /* excludeTableArray */ true, /*includeViews*/ @@ -123,15 +214,15 @@ func setUp(t *testing.T, ctrl *gomock.Controller) *HorizontalReshardingWorkflow "80-", wrangler.DefaultWaitSlaveTimeout).Return(nil) - mockWranglerInterface.EXPECT().WaitForFilteredReplication(hw.ctx, "test_keyspace", "-80", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) - mockWranglerInterface.EXPECT().WaitForFilteredReplication(hw.ctx, "test_keyspace", "80-", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) + mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "-80", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) + mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "80-", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) servedTypeParams := []topodatapb.TabletType{topodatapb.TabletType_RDONLY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_MASTER} for _, servedType := range servedTypeParams { mockWranglerInterface.EXPECT().MigrateServedTypes( - hw.ctx, + ctx, "test_keyspace", "0", nil, /* cells */ @@ -140,10 +231,78 @@ func setUp(t *testing.T, ctrl *gomock.Controller) *HorizontalReshardingWorkflow false, /* skipReFreshState */ wrangler.DefaultFilteredReplicationWaitTime).Return(nil) } - return hw + return mockWranglerInterface } -// TODO(yipeiw): fake a retry situation: fails first for made error, then fix the inserted bug and manually trigger the retry signal, -// verify whether the retrying job can be done successfully. -// problem for unit test: hard to fake action, node part, hard to separate the logic from front-end control. (figure out the call path of Init, s.t. we can create the front-end needed set-up if it is easy enough) -// problem for end-to-end test, need a way to check the workflow status; need to trigger the button through http request. +func setupMockWranglerRestart(ctx context.Context, ctrl *gomock.Controller) *MockReshardingWrangler { + // Set the mock wrangler expectations without the call of copyschema + // on shard "-80". That task is supposed to be finished + // and must not be called when restarting the workflow. + mockWranglerInterface := NewMockReshardingWrangler(ctrl) + mockWranglerInterface.EXPECT().CopySchemaShardFromShard( + ctx, + nil, /* tableArray*/ + nil, /* excludeTableArray */ + true, /*includeViews*/ + "test_keyspace", + "0", + "test_keyspace", + "80-", + wrangler.DefaultWaitSlaveTimeout).Return(nil) + + mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "-80", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) + mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "80-", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) + + servedTypeParams := []topodatapb.TabletType{topodatapb.TabletType_RDONLY, + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_MASTER} + for _, servedType := range servedTypeParams { + mockWranglerInterface.EXPECT().MigrateServedTypes( + ctx, + "test_keyspace", + "0", + nil, /* cells */ + servedType, + false, /* reverse */ + false, /* skipReFreshState */ + wrangler.DefaultFilteredReplicationWaitTime).Return(nil) + } + return mockWranglerInterface +} + +// ReshardingData stores the data for resharding one source shard. +type ReshardingData struct { + Keyspace string + SourceShard string + DestinationShards []string + Vtworker string +} + +func createCheckpoint(data []ReshardingData) *workflowpb.WorkflowCheckpoint { + taskMap := make(map[string]*workflowpb.Task) + var sourceList, destinationList []string + + for _, info := range data { + keyspace := info.Keyspace + s := info.SourceShard + worker := info.Vtworker + sourceList = append(sourceList, s) + updatePerSourceTask(keyspace, s, worker, splitCloneName, taskMap) + updatePerSourceTask(keyspace, s, worker, migrateName, taskMap) + for _, d := range info.DestinationShards { + destinationList = append(destinationList, d) + updatePerDestinationTask(keyspace, s, d, worker, copySchemaName, taskMap) + updatePerDestinationTask(keyspace, s, d, worker, waitFilteredReplicationName, taskMap) + updatePerDestinationTask(keyspace, s, d, worker, splitDiffName, taskMap) + } + } + + return &workflowpb.WorkflowCheckpoint{ + CodeVersion: codeVersion, + Tasks: taskMap, + Settings: map[string]string{ + "source_shards": strings.Join(sourceList, ","), + "destination_shards": strings.Join(destinationList, ","), + }, + } +} diff --git a/go/vt/workflow/resharding/parallel_runner.go b/go/vt/workflow/resharding/parallel_runner.go index 6bbe962c761..5e0438251c3 100644 --- a/go/vt/workflow/resharding/parallel_runner.go +++ b/go/vt/workflow/resharding/parallel_runner.go @@ -2,7 +2,6 @@ package resharding import ( "fmt" - "path" "sync" log "github.com/golang/glog" @@ -25,42 +24,44 @@ const ( // ParallelRunner is used to control executing tasks concurrently. // Each phase has its own ParallelRunner object. type ParallelRunner struct { - ctx context.Context - + ctx context.Context + nodeManager *workflow.NodeManager + phaseUINode *workflow.Node checkpointWriter *CheckpointWriter - taskUINodes map[string]*workflow.Node + // tasks stores selected tasks for the phase with expected execution order. tasks []*workflowpb.Task - + concurrencyLevel level + executeFunc func(context.Context, map[string]string) error // mu is used to protect the retryActionRegistery. mu sync.Mutex - // retryAtionRegistry stores the data for all actions. Each task can retrieve its control object through task ID. - retryActionRegistery map[string]*RetryController + // retryAtionRegistry stores the data for retry actions. + // Each task can retrieve its RetryController through its UI node path. + retryActionRegistry map[string]*RetryController } -func NewParallelRunner(ctx context.Context, cp *CheckpointWriter, taskUINodes map[string]*workflow.Node, tasks []*workflowpb.Task) *ParallelRunner { - p := &ParallelRunner{ - ctx: ctx, - checkpointWriter: cp, - taskUINodes: taskUINodes, - tasks: tasks, +func NewParallelRunner(ctx context.Context, nodeManager *workflow.NodeManager, phaseUINode *workflow.Node, cp *CheckpointWriter, tasks []*workflowpb.Task, executeFunc func(context.Context, map[string]string) error, concurrencyLevel level) *ParallelRunner { + return &ParallelRunner{ + ctx: ctx, + nodeManager: nodeManager, + phaseUINode: phaseUINode, + checkpointWriter: cp, + tasks: tasks, + executeFunc: executeFunc, + concurrencyLevel: concurrencyLevel, + retryActionRegistry: make(map[string]*RetryController), } - p.retryActionRegistery = make(map[string]*RetryController) - return p } // Run is the entry point for controling task executions. -// tasks should be a copy of tasks with the expected execution order, the status of task should be -// both updated in this copy and the original one (checkpointer.UpdateTask does this). This is used -// to avoid data racing situation. -func (p *ParallelRunner) Run(executeFunc func(map[string]string) error, concurrencyLevel level) error { +func (p *ParallelRunner) Run() error { var parallelNum int // default value is 0. The task will not run in this case. - switch concurrencyLevel { + switch p.concurrencyLevel { case SEQUENTIAL: parallelNum = 1 case PARALLEL: parallelNum = len(p.tasks) default: - panic(fmt.Sprintf("BUG: Invalid concurrency level: %v", concurrencyLevel)) + panic(fmt.Sprintf("BUG: Invalid concurrency level: %v", p.concurrencyLevel)) } // sem is a channel used to control the level of concurrency. @@ -76,53 +77,35 @@ func (p *ParallelRunner) Run(executeFunc func(map[string]string) error, concurre taskID := t.Id for { - err := executeFunc(t.Attributes) - t.State = workflowpb.TaskState_TaskDone - if err != nil { - t.Error = err.Error() - } - + err := p.executeFunc(p.ctx, t.Attributes) + // Update the task status in the checkpoint. if updateErr := p.checkpointWriter.UpdateTask(taskID, workflowpb.TaskState_TaskDone, t.Error); updateErr != nil { - // Only logging the error rather then propograting it through ErrorRecorder. Error message in - // ErrorRecorder will lead to the stop of the workflow, which is unexpected if only checkpointing fails. - // If the checkpointing fails during initialization, we should stop the workflow. + // Only logging the error rather then passing it to ErrorRecorder. + // Errors in ErrorRecorder will lead to the stop of a workflow. We + // don't want to stop the workflow if only checkpointing fails. log.Errorf("%v", updateErr) } - + // The function returns if the task is executed successfully. if err == nil { t.Error = "" return } - - // If task fails, the retry action is enabled. - n, ok := p.taskUINodes[taskID] - if !ok { - log.Errorf("UI node not found for task %v", taskID) + // When task fails, first check whether the context is cancelled. + // If so, return right away. If not, enable the retry action. + select { + case <-p.ctx.Done(): return + default: } + retryChannel := p.addRetryAction(taskID) - retryAction := &workflow.Action{ - Name: "Retry", - State: workflow.ActionStateEnabled, - Style: workflow.ActionStyleWaiting, - } - n.Actions = []*workflow.Action{retryAction} - n.Listener = p - - p.mu.Lock() - p.retryActionRegistery[taskID] = &RetryController{ - node: n, - retryChannel: make(chan struct{}), - } - p.mu.Unlock() - n.BroadcastChanges(false /* updateChildren */) - - // Block the task execution until the retry action is triggered or the job is canceled. + // Block the task execution until the retry action is triggered + // or the context is canceled. select { - case <-p.retryActionRegistery[taskID].retryChannel: + case <-retryChannel: continue case <-p.ctx.Done(): - p.retryActionRegistery = nil + p.retryActionRegistry = nil return } } @@ -138,25 +121,52 @@ func (p *ParallelRunner) Run(executeFunc func(map[string]string) error, concurre // Action handles the retry action. It implements the interface ActionListener. func (p *ParallelRunner) Action(ctx context.Context, pathName, name string) error { + switch name { + case "Retry": + return p.triggerRetry(pathName) + default: + return fmt.Errorf("Unknown action: %v", name) + } +} + +func (p *ParallelRunner) addRetryAction(taskID string) chan struct{} { + node, err := p.nodeManager.GetNodeByRelativePath(p.phaseUINode, taskID) + if err != nil { + panic(fmt.Errorf("%v: UI node not found for task %v", err, taskID)) + } + retryController := CreateRetryController(node, p /* actionListener */) + p.registerRetryController(node.PathName, retryController) + return retryController.retryChannel +} + +func (p *ParallelRunner) triggerRetry(nodePath string) error { p.mu.Lock() defer p.mu.Unlock() - actionID := getTaskID(pathName) - c, ok := p.retryActionRegistery[actionID] + c, ok := p.retryActionRegistry[nodePath] if !ok { - return fmt.Errorf("Unknown node path for the action: %v", pathName) + return fmt.Errorf("Unknown node path for the action: %v", nodePath) } + p.unregisterRetryController(nodePath) + c.triggerRetry() + return nil +} - switch name { - case "Retry": - c.closeRetryChannel() - delete(p.retryActionRegistery, actionID) - default: - return fmt.Errorf("Unknown action: %v", name) +func (p *ParallelRunner) registerRetryController(nodePath string, c *RetryController) { + p.mu.Lock() + defer p.mu.Unlock() + if _, ok := p.retryActionRegistry[nodePath]; ok { + panic(fmt.Errorf("duplicate retry action on node: %v", nodePath)) } - return nil + p.retryActionRegistry[nodePath] = c } -func getTaskID(nodePath string) string { - return path.Base(nodePath) +func (p *ParallelRunner) unregisterRetryController(nodePath string) { + p.mu.Lock() + defer p.mu.Unlock() + if _, ok := p.retryActionRegistry[nodePath]; !ok { + log.Warningf("retry action on node: %v doesn't exist, cannot unregister it", nodePath) + } else { + delete(p.retryActionRegistry, nodePath) + } } diff --git a/go/vt/workflow/resharding/parallel_runner_test.go b/go/vt/workflow/resharding/parallel_runner_test.go index 16e119190bc..f7a49239068 100644 --- a/go/vt/workflow/resharding/parallel_runner_test.go +++ b/go/vt/workflow/resharding/parallel_runner_test.go @@ -52,7 +52,8 @@ func TestParallelRunnerRetryAction(t *testing.T) { ctx := context.Background() - // Create UI nodes. Each task has a node. These task nodes are the children of a root node. + // Create UI nodes. Each task has a node. + // These task nodes are the children of a root node. notifications := make(chan []byte, 10) nodeManager := workflow.NewNodeManager() _, index, err := nodeManager.GetAndWatchFullTree(notifications) @@ -117,7 +118,7 @@ func TestParallelRunnerRetryAction(t *testing.T) { } go func() { - // This goroutine is used to mornitor the UI change. + // This goroutine is used to monitor the UI change. // When the retry action is enabled, it will trigger it using nodemanager. for { select { @@ -155,8 +156,9 @@ func TestParallelRunnerRetryAction(t *testing.T) { } }() - // Call ParallelRunner.Run through a goroutine. In this way, the failure of task will not block the main function. - var waitGroup sync.WaitGroup + // Call ParallelRunner.Run through a goroutine. In this way, + // the failure of task will not block the main function. + waitGroup := sync.WaitGroup{} waitGroup.Add(1) go func() { defer waitGroup.Done() diff --git a/go/vt/workflow/resharding/retry_controller.go b/go/vt/workflow/resharding/retry_controller.go new file mode 100644 index 00000000000..e7e59f141af --- /dev/null +++ b/go/vt/workflow/resharding/retry_controller.go @@ -0,0 +1,38 @@ +package resharding + +import "github.com/youtube/vitess/go/vt/workflow" + +// RetryController stores the data for controlling the retry action. +type RetryController struct { + node *workflow.Node + // retryChannel is used to trigger the retrying of task + // when pressing the button. + retryChannel chan struct{} +} + +// CreateRetryController create a RetryController for a specific node and +// enable the retry action on the node. +func CreateRetryController(node *workflow.Node, actionListener workflow.ActionListener) *RetryController { + retryAction := &workflow.Action{ + Name: "Retry", + State: workflow.ActionStateEnabled, + Style: workflow.ActionStyleWaiting, + } + node.Actions = []*workflow.Action{retryAction} + node.Listener = actionListener + node.BroadcastChanges(false /* updateChildren */) + return &RetryController{ + node: node, + retryChannel: make(chan struct{}), + } +} + +// triggerRetry closes the retryChannel and empties the Actions list +// in the UI Node. This disables the retry action. +func (c *RetryController) triggerRetry() { + if len(c.node.Actions) != 0 { + c.node.Actions = []*workflow.Action{} + close(c.retryChannel) + } + c.node.BroadcastChanges(false /* updateChildren */) +} diff --git a/go/vt/workflow/resharding/task.go b/go/vt/workflow/resharding/task.go new file mode 100644 index 00000000000..68daeae4572 --- /dev/null +++ b/go/vt/workflow/resharding/task.go @@ -0,0 +1,137 @@ +package resharding + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/youtube/vitess/go/vt/automation" + "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/wrangler" + + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" + workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" +) + +func createTaskID(phase, shardName string) string { + return fmt.Sprintf("%s_%s", phase, shardName) +} + +// GetTasks returns selected tasks for a phase from the checkpoint +// with expected execution order. +func (hw *HorizontalReshardingWorkflow) GetTasks(checkpoint *workflowpb.WorkflowCheckpoint, phaseName string) []*workflowpb.Task { + var shards []string + switch phaseName { + case copySchemaName, waitForFilteredReplicationName, diffName: + shards = strings.Split(checkpoint.Settings["destination_shards"], ",") + case cloneName, migrateRdonlyName, migrateReplicaName, migrateMasterName: + shards = strings.Split(checkpoint.Settings["source_shards"], ",") + } + + var tasks []*workflowpb.Task + for _, s := range shards { + taskID := createTaskID(phaseName, s) + tasks = append(tasks, checkpoint.Tasks[taskID]) + } + return tasks +} + +// runCopySchema runs CopySchema for a destination shard. +// There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. +func (hw *HorizontalReshardingWorkflow) runCopySchema(ctx context.Context, attributes map[string]string) error { + s := attributes["source_shard"] + d := attributes["destination_shard"] + keyspace := attributes["keyspace"] + err := hw.wr.CopySchemaShardFromShard(ctx, nil /* tableArray*/, nil /* excludeTableArray */, true, /*includeViews*/ + keyspace, s, keyspace, d, wrangler.DefaultWaitSlaveTimeout) + if err != nil { + hw.logger.Infof("Horizontal Resharding: error in CopySchemaShard from %s to %s: %v.", s, d, err) + } + hw.logger.Infof("Horizontal Resharding: CopySchemaShard from %s to %s is finished.", s, d) + return err +} + +// runSplitClone runs SplitClone for a source shard. +// There should be #sourceshards parameters, while each param includes 1 sourceshard and its destshards. The destShards are useless here. +func (hw *HorizontalReshardingWorkflow) runSplitClone(ctx context.Context, attributes map[string]string) error { + s := attributes["source_shard"] + worker := attributes["vtworker"] + keyspace := attributes["keyspace"] + + sourceKeyspaceShard := topoproto.KeyspaceShardString(keyspace, s) + // Reset the vtworker to avoid error if vtworker command has been called elsewhere. + // This is because vtworker class doesn't cleanup the environment after execution. + automation.ExecuteVtworker(ctx, worker, []string{"Reset"}) + // The flag min_healthy_rdonly_tablets is set to 1 (default value is 2). + // Therefore, we can reuse the normal end to end test setting, which has only 1 rdonly tablet. + // TODO(yipeiw): Add min_healthy_rdonly_tablets as an input argument in UI. + args := []string{"SplitClone", "--min_healthy_rdonly_tablets=1", sourceKeyspaceShard} + if _, err := automation.ExecuteVtworker(hw.ctx, worker, args); err != nil { + hw.logger.Infof("Horizontal resharding: error in SplitClone in keyspace %s: %v.", keyspace, err) + return err + } + hw.logger.Infof("Horizontal resharding: SplitClone is finished.") + + return nil +} + +// runWaitForFilteredReplication runs WaitForFilteredReplication for a destination shard. +// There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. +func (hw *HorizontalReshardingWorkflow) runWaitForFilteredReplication(ctx context.Context, attributes map[string]string) error { + d := attributes["destination_shard"] + keyspace := attributes["keyspace"] + + if err := hw.wr.WaitForFilteredReplication(ctx, keyspace, d, wrangler.DefaultWaitForFilteredReplicationMaxDelay); err != nil { + hw.logger.Infof("Horizontal Resharding: error in WaitForFilteredReplication: %v.", err) + return err + } + hw.logger.Infof("Horizontal Resharding:WaitForFilteredReplication is finished on " + d) + return nil +} + +// runSplitDiff runs SplitDiff for a destination shard. +func (hw *HorizontalReshardingWorkflow) runSplitDiff(ctx context.Context, attributes map[string]string) error { + d := attributes["destination_shard"] + worker := attributes["vtworker"] + keyspace := attributes["keyspace"] + + automation.ExecuteVtworker(hw.ctx, worker, []string{"Reset"}) + args := []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", topoproto.KeyspaceShardString(keyspace, d)} + _, err := automation.ExecuteVtworker(ctx, worker, args) + if err != nil { + return err + } + + hw.logger.Infof("Horizontal resharding: SplitDiff is finished.") + return nil +} + +// runMigrate runs the migration sequentially among all source shards. +// There should be 1 parameter, which includes all source shards to be migrated. +func (hw *HorizontalReshardingWorkflow) runMigrate(ctx context.Context, attributes map[string]string) error { + s := attributes["source_shard"] + keyspace := attributes["keyspace"] + servedTypeStr := attributes["served_type"] + + servedType, err := topoproto.ParseTabletType(servedTypeStr) + if err != nil { + return fmt.Errorf("unknown tablet type: %v", servedTypeStr) + } + + if servedType != topodatapb.TabletType_RDONLY && + servedType != topodatapb.TabletType_REPLICA && + servedType != topodatapb.TabletType_MASTER { + return fmt.Errorf("wrong served type to be migrated: %v", servedTypeStr) + } + + sourceKeyspaceShard := topoproto.KeyspaceShardString(keyspace, s) + err = hw.wr.MigrateServedTypes(ctx, keyspace, s, nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime) + if err != nil { + hw.logger.Infof("Horizontal Resharding: error in MigrateServedTypes on servedType %s: %v.", servedType, err) + return err + } + hw.logger.Infof("Horizontal Resharding: MigrateServedTypes is finished on tablet %s serve type %s.", sourceKeyspaceShard, servedType) + + return nil +} diff --git a/go/vt/workflow/resharding/task_helper.go b/go/vt/workflow/resharding/task_helper.go deleted file mode 100644 index 57e4b15c47e..00000000000 --- a/go/vt/workflow/resharding/task_helper.go +++ /dev/null @@ -1,154 +0,0 @@ -package resharding - -import ( - "fmt" - "strings" - - "github.com/youtube/vitess/go/vt/automation" - "github.com/youtube/vitess/go/vt/topo/topoproto" - "github.com/youtube/vitess/go/vt/wrangler" - - topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" -) - -func createTaskID(phase, shardType, shardName string) string { - return fmt.Sprintf("%s_%s_%s", phase, shardType, shardName) -} - -// GenerateTasks generates a copy of tasks for a specific step. The task status is not checked in this function. -func (hw *HorizontalReshardingWorkflow) GenerateTasks(checkpoint *workflowpb.WorkflowCheckpoint, stepName string) []*workflowpb.Task { - var tasks []*workflowpb.Task - switch stepName { - case CopySchemaName, WaitFilteredReplicationName, SplitDiffName: - for _, d := range strings.Split(checkpoint.Settings["destination_shards"], ",") { - taskID := createTaskID(stepName, "dest", d) - tasks = append(tasks, checkpoint.Tasks[taskID]) - } - case SplitCloneName, MigrateName: - for _, s := range strings.Split(checkpoint.Settings["source_shards"], ",") { - taskID := createTaskID(stepName, "source", s) - tasks = append(tasks, checkpoint.Tasks[taskID]) - } - } - return tasks -} - -// runCopySchemaPerShard runs CopySchema for a destination shard. -// There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. -func (hw *HorizontalReshardingWorkflow) runCopySchema(attr map[string]string) error { - s := attr["source_shard"] - d := attr["destination_shard"] - keyspace := attr["keyspace"] - err := hw.wr.CopySchemaShardFromShard(hw.ctx, nil /* tableArray*/, nil /* excludeTableArray */, true, /*includeViews*/ - keyspace, s, keyspace, d, wrangler.DefaultWaitSlaveTimeout) - if err != nil { - hw.logger.Infof("Horizontal Resharding: error in CopySchemaShardFromShard from %s to %s: %v.", s, d, err) - } - hw.logger.Infof("Horizontal Resharding: CopySchemaShardFromShard from %s to %s is finished.", s, d) - return err -} - -// runSplitClonePerShard runs SplitClone for a source shard. -// There should be #sourceshards parameters, while each param includes 1 sourceshard and its destshards. The destShards are useless here. -func (hw *HorizontalReshardingWorkflow) runSplitClone(attr map[string]string) error { - s := attr["source_shard"] - worker := attr["vtworker"] - keyspace := attr["keyspace"] - - sourceKeyspaceShard := topoproto.KeyspaceShardString(keyspace, s) - // Reset the vtworker to avoid error if vtworker command has been called elsewhere. - // This is because vtworker class doesn't cleanup the environment after execution. - automation.ExecuteVtworker(hw.ctx, worker, []string{"Reset"}) - // The flag min_healthy_rdonly_tablets is set to 1 (default value is 2). - // Therefore, we can reuse the normal end to end test setting, which has only 1 rdonly tablet. - // TODO(yipeiw): Add min_healthy_rdonly_tablets as an input argument in UI. - args := []string{"SplitClone", "--min_healthy_rdonly_tablets=1", sourceKeyspaceShard} - if _, err := automation.ExecuteVtworker(hw.ctx, worker, args); err != nil { - hw.logger.Infof("Horizontal resharding: error in SplitClone in keyspace %s: %v.", keyspace, err) - return err - } - hw.logger.Infof("Horizontal resharding: SplitClone is finished.") - - return nil -} - -// runWaitFilteredReplication runs WaitForFilteredReplication for a destination shard. -// There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. -func (hw *HorizontalReshardingWorkflow) runWaitFilteredReplication(attr map[string]string) error { - d := attr["destination_shard"] - keyspace := attr["keyspace"] - - if err := hw.wr.WaitForFilteredReplication(hw.ctx, keyspace, d, wrangler.DefaultWaitForFilteredReplicationMaxDelay); err != nil { - hw.logger.Infof("Horizontal Resharding: error in WaitForFilteredReplication: %v.", err) - return err - } - hw.logger.Infof("Horizontal Resharding:WaitForFilteredReplication is finished on " + d) - return nil -} - -// runSplitDiffPerShard runs SplitDiff for a source shard. -// There should be #sourceshards parameters, while each param includes 1 sourceshard and its destshards. -func (hw *HorizontalReshardingWorkflow) runSplitDiff(attr map[string]string) error { - d := attr["destination_shard"] - worker := attr["vtworker"] - keyspace := attr["keyspace"] - - automation.ExecuteVtworker(hw.ctx, worker, []string{"Reset"}) - args := []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", topoproto.KeyspaceShardString(keyspace, d)} - _, err := automation.ExecuteVtworker(hw.ctx, worker, args) - if err != nil { - return err - } - - hw.logger.Infof("Horizontal resharding: SplitDiff is finished.") - return nil -} - -// runMigratePerShard runs the migration sequentially among all source shards. -// There should be 1 parameter, which includes all source shards to be migrated. -func (hw *HorizontalReshardingWorkflow) runMigrate(attr map[string]string) error { - s := attr["source_shard"] - keyspace := attr["keyspace"] - - sourceKeyspaceShard := topoproto.KeyspaceShardString(keyspace, s) - servedTypeParams := []topodatapb.TabletType{topodatapb.TabletType_RDONLY, - topodatapb.TabletType_REPLICA, - topodatapb.TabletType_MASTER} - for _, servedType := range servedTypeParams { - err := hw.wr.MigrateServedTypes(hw.ctx, keyspace, s, nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime) - if err != nil { - hw.logger.Infof("Horizontal Resharding: error in MigrateServedTypes on servedType %s: %v.", servedType, err) - return err - } - hw.logger.Infof("Horizontal Resharding: MigrateServedTypes is finished on tablet %s serve type %s.", sourceKeyspaceShard, servedType) - } - return nil -} - -func updatePerDestinationTask(keyspace, sourceShard, destinationShard, worker, name string, taskMap map[string]*workflowpb.Task) { - taskID := createTaskID(name, "dest", destinationShard) - taskMap[taskID] = &workflowpb.Task{ - Id: taskID, - State: workflowpb.TaskState_TaskNotStarted, - Attributes: map[string]string{ - "source_shard": sourceShard, - "destination_shard": destinationShard, - "vtworker": worker, - "keyspace": keyspace, - }, - } -} - -func updatePerSourceTask(keyspace, sourceShard, vtworker, name string, taskMap map[string]*workflowpb.Task) { - taskID := createTaskID(name, "source", sourceShard) - taskMap[taskID] = &workflowpb.Task{ - Id: taskID, - State: workflowpb.TaskState_TaskNotStarted, - Attributes: map[string]string{ - "source_shard": sourceShard, - "vtworker": vtworker, - "keyspace": keyspace, - }, - } -} From ff7f20c2e05e9980cff252641955d9e25612d4b9 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 15 Feb 2017 14:57:50 -0800 Subject: [PATCH 005/108] Adding a tlstest library to generate test certs. --- go/vt/servenv/grpc_server.go | 27 +----- go/vt/servenv/grpcutils/client_tls.go | 37 ++++--- go/vt/servenv/grpcutils/server_tls.go | 38 ++++++++ go/vt/tlstest/tlstest.go | 118 +++++++++++++++++++++++ go/vt/tlstest/tlstest_test.go | 134 ++++++++++++++++++++++++++ 5 files changed, 318 insertions(+), 36 deletions(-) create mode 100644 go/vt/servenv/grpcutils/server_tls.go create mode 100644 go/vt/tlstest/tlstest.go create mode 100644 go/vt/tlstest/tlstest_test.go diff --git a/go/vt/servenv/grpc_server.go b/go/vt/servenv/grpc_server.go index f8ae96bb23b..8193459b3c4 100644 --- a/go/vt/servenv/grpc_server.go +++ b/go/vt/servenv/grpc_server.go @@ -5,17 +5,15 @@ package servenv import ( - "crypto/tls" - "crypto/x509" "flag" "fmt" - "io/ioutil" "net" "google.golang.org/grpc" "google.golang.org/grpc/credentials" log "github.com/golang/glog" + "github.com/youtube/vitess/go/vt/servenv/grpcutils" ) // This file handles gRPC server, on its own port. @@ -76,28 +74,9 @@ func createGRPCServer() { var opts []grpc.ServerOption if GRPCPort != nil && *GRPCCert != "" && *GRPCKey != "" { - config := &tls.Config{} - - // load the server cert and key - cert, err := tls.LoadX509KeyPair(*GRPCCert, *GRPCKey) + config, err := grpcutils.TLSServerConfig(*GRPCCert, *GRPCKey, *GRPCCA) if err != nil { - log.Fatalf("Failed to load cert/key: %v", err) - } - config.Certificates = []tls.Certificate{cert} - - // if specified, load ca to validate client, - // and enforce clients present valid certs. - if *GRPCCA != "" { - b, err := ioutil.ReadFile(*GRPCCA) - if err != nil { - log.Fatalf("Failed to read ca file: %v", err) - } - cp := x509.NewCertPool() - if !cp.AppendCertsFromPEM(b) { - log.Fatalf("Failed to append certificates") - } - config.ClientCAs = cp - config.ClientAuth = tls.RequireAndVerifyClientCert + log.Fatalf("Failed to log gRPC cert/key/ca: %v", err) } // create the creds server options diff --git a/go/vt/servenv/grpcutils/client_tls.go b/go/vt/servenv/grpcutils/client_tls.go index 7fc8f33f454..61802525780 100644 --- a/go/vt/servenv/grpcutils/client_tls.go +++ b/go/vt/servenv/grpcutils/client_tls.go @@ -10,17 +10,12 @@ import ( "google.golang.org/grpc/credentials" ) -// ClientSecureDialOption returns the gRPC dial option to use for the given client -// connection. It is either using TLS, or Insecure if nothing is set. -func ClientSecureDialOption(cert, key, ca, name string) (grpc.DialOption, error) { - // no secuirty options set, just return - if (cert == "" || key == "") && ca == "" { - return grpc.WithInsecure(), nil - } - +// TLSClientConfig returns the TLS config to use for a client to +// connect to a server with the provided parameters. +func TLSClientConfig(cert, key, ca, name string) (*tls.Config, error) { config := &tls.Config{} - // load the client-side cert & key if any + // Load the client-side cert & key if any. if cert != "" && key != "" { crt, err := tls.LoadX509KeyPair(cert, key) if err != nil { @@ -29,7 +24,7 @@ func ClientSecureDialOption(cert, key, ca, name string) (grpc.DialOption, error) config.Certificates = []tls.Certificate{crt} } - // load the server ca if any + // Load the server CA if any. if ca != "" { b, err := ioutil.ReadFile(ca) if err != nil { @@ -42,12 +37,30 @@ func ClientSecureDialOption(cert, key, ca, name string) (grpc.DialOption, error) config.RootCAs = cp } - // set the server name if any + // Set the server name if any. if name != "" { config.ServerName = name } - // create the creds server options + return config, nil +} + +// ClientSecureDialOption returns the gRPC dial option to use for the +// given client connection. It is either using TLS, or Insecure if +// nothing is set. +func ClientSecureDialOption(cert, key, ca, name string) (grpc.DialOption, error) { + // No security options set, just return. + if (cert == "" || key == "") && ca == "" { + return grpc.WithInsecure(), nil + } + + // Load the config. + config, err := TLSClientConfig(cert, key, ca, name) + if err != nil { + return nil, err + } + + // Create the creds server options. creds := credentials.NewTLS(config) return grpc.WithTransportCredentials(creds), nil } diff --git a/go/vt/servenv/grpcutils/server_tls.go b/go/vt/servenv/grpcutils/server_tls.go new file mode 100644 index 00000000000..c7704144706 --- /dev/null +++ b/go/vt/servenv/grpcutils/server_tls.go @@ -0,0 +1,38 @@ +package grpcutils + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" +) + +// TLSServerConfig returns the TLS config to use for a server to +// accept client connections. +func TLSServerConfig(cert, key, ca string) (*tls.Config, error) { + config := &tls.Config{} + + // Load the server cert and key. + crt, err := tls.LoadX509KeyPair(cert, key) + if err != nil { + return nil, fmt.Errorf("failed to load cert/key: %v", err) + } + config.Certificates = []tls.Certificate{crt} + + // if specified, load ca to validate client, + // and enforce clients present valid certs. + if ca != "" { + b, err := ioutil.ReadFile(ca) + if err != nil { + return nil, fmt.Errorf("Failed to read ca file: %v", err) + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("Failed to append certificates") + } + config.ClientCAs = cp + config.ClientAuth = tls.RequireAndVerifyClientCert + } + + return config, nil +} diff --git a/go/vt/tlstest/tlstest.go b/go/vt/tlstest/tlstest.go new file mode 100644 index 00000000000..7d05c36b8e3 --- /dev/null +++ b/go/vt/tlstest/tlstest.go @@ -0,0 +1,118 @@ +// Package tlstest contains utility methods to create test certificates. +// It is not meant to be used in production. +package tlstest + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + + log "github.com/golang/glog" +) + +const ( + // CA is the name of the CA toplevel cert. + CA = "ca" + + caConfig = ` +[ req ] + default_bits = 1024 + default_keyfile = keyfile.pem + distinguished_name = req_distinguished_name + attributes = req_attributes + prompt = no + output_password = mypass +[ req_distinguished_name ] + C = US + ST = California + L = Mountain View + O = Google + OU = Vitess + CN = CA + emailAddress = test@email.address +[ req_attributes ] + challengePassword = A challenge password +` + + certConfig = ` +[ req ] + default_bits = 1024 + default_keyfile = keyfile.pem + distinguished_name = req_distinguished_name + attributes = req_attributes + prompt = no + output_password = mypass +[ req_distinguished_name ] + C = US + ST = California + L = Mountain View + O = Google + OU = Vitess + CN = %s + emailAddress = test@email.address +[ req_attributes ] + challengePassword = A challenge password +` +) + +// openssl runs the openssl binary with the provided command. +func openssl(argv ...string) { + cmd := exec.Command("openssl", argv...) + output, err := cmd.CombinedOutput() + if err != nil { + log.Fatalf("openssl %v failed: %v", argv, err) + } + if len(output) > 0 { + log.Infof("openssl %v returned:\n%v", argv, string(output)) + } +} + +// CreateCA creates the toplevel 'ca' certificate and key, and places it +// in the provided directory. Temporary files are also created in that +// directory. +func CreateCA(root string) { + log.Infof("Creating test root CA in %v", root) + key := path.Join(root, "ca-key.pem") + cert := path.Join(root, "ca-cert.pem") + openssl("genrsa", "-out", key) + + config := path.Join(root, "ca.config") + if err := ioutil.WriteFile(config, []byte(caConfig), os.ModePerm); err != nil { + log.Fatalf("cannot write file %v: %v", config, err) + } + openssl("req", "-new", "-x509", "-nodes", "-days", "3600", "-batch", + "-config", config, + "-key", key, + "-out", cert) +} + +// CreateSignedCert creates a new certificate signed by the provided parent, +// with the provided serial number, name and common name. +// name is the file name to use. Common Name is the certificate common name. +func CreateSignedCert(root, parent, serial, name, commonName string) { + log.Infof("Creating signed cert and key %v", commonName) + caKey := path.Join(root, parent+"-key.pem") + caCert := path.Join(root, parent+"-cert.pem") + key := path.Join(root, name+"-key.pem") + cert := path.Join(root, name+"-cert.pem") + req := path.Join(root, name+"-req.pem") + + config := path.Join(root, name+".config") + if err := ioutil.WriteFile(config, []byte(fmt.Sprintf(certConfig, commonName)), os.ModePerm); err != nil { + log.Fatalf("cannot write file %v: %v", config, err) + } + openssl("req", "-newkey", "rsa:2048", "-days", "3600", "-nodes", + "-batch", + "-config", config, + "-keyout", key, "-out", req) + openssl("rsa", "-in", key, "-out", key) + openssl("x509", "-req", + "-in", req, + "-days", "3600", + "-CA", caCert, + "-CAkey", caKey, + "-set_serial", serial, + "-out", cert) +} diff --git a/go/vt/tlstest/tlstest_test.go b/go/vt/tlstest/tlstest_test.go new file mode 100644 index 00000000000..b7e636ab08d --- /dev/null +++ b/go/vt/tlstest/tlstest_test.go @@ -0,0 +1,134 @@ +package tlstest + +import ( + "crypto/tls" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "strings" + "sync" + "testing" + + "github.com/youtube/vitess/go/vt/servenv/grpcutils" +) + +// TestClientServer generates: +// - a root CA +// - a server intermediate CA, with a server. +// - a client intermediate CA, with a client. +// And then performs a few tests on them. +func TestClientServer(t *testing.T) { + // Our test root. + root, err := ioutil.TempDir("", "tlstest") + if err != nil { + t.Fatalf("TempDir failed: %v", err) + } + defer os.RemoveAll(root) + + // Create the certs and configs. + CreateCA(root) + + CreateSignedCert(root, CA, "01", "servers", "Servers CA") + CreateSignedCert(root, "servers", "01", "server-instance", "Server Instance") + + CreateSignedCert(root, CA, "02", "clients", "Clients CA") + CreateSignedCert(root, "clients", "01", "client-instance", "Client Instance") + serverConfig, err := grpcutils.TLSServerConfig( + path.Join(root, "server-instance-cert.pem"), + path.Join(root, "server-instance-key.pem"), + path.Join(root, "clients-cert.pem")) + if err != nil { + t.Fatalf("TLSServerConfig failed: %v", err) + } + clientConfig, err := grpcutils.TLSClientConfig( + path.Join(root, "client-instance-cert.pem"), + path.Join(root, "client-instance-key.pem"), + path.Join(root, "servers-cert.pem"), + "Server Instance") + if err != nil { + t.Fatalf("TLSClientConfig failed: %v", err) + } + + // Create a TLS server listener. + listener, err := tls.Listen("tcp", ":0", serverConfig) + if err != nil { + t.Fatalf("Listen failed: %v", err) + } + addr := listener.Addr().String() + defer listener.Close() + + wg := sync.WaitGroup{} + + // + // Positive case: accept on server side, connect a client, send data. + // + + wg.Add(1) + go func() { + defer wg.Done() + clientConn, err := tls.Dial("tcp", addr, clientConfig) + if err != nil { + t.Fatalf("Dial failed: %v", err) + } + + clientConn.Write([]byte{42}) + clientConn.Close() + }() + + serverConn, err := listener.Accept() + if err != nil { + t.Fatalf("Accept failed: %v", err) + } + + result := make([]byte, 1) + if n, err := serverConn.Read(result); (err != nil && err != io.EOF) || n != 1 { + t.Fatalf("Read failed: %v %v", n, err) + } + if result[0] != 42 { + t.Fatalf("Read returned wrong result: %v", result) + } + serverConn.Close() + + wg.Wait() + + // + // Negative case: connect a client with wrong cert (using the + // server cert on the client side). + // + + badClientConfig, err := grpcutils.TLSClientConfig( + path.Join(root, "server-instance-cert.pem"), + path.Join(root, "server-instance-key.pem"), + path.Join(root, "servers-cert.pem"), + "Server Instance") + if err != nil { + t.Fatalf("TLSClientConfig failed: %v", err) + } + + wg.Add(1) + go func() { + // We expect the Accept to work, but the first read to fail. + defer wg.Done() + serverConn, err := listener.Accept() + if err != nil { + t.Fatalf("Connection failed: %v", err) + } + + // This will fail. + result := make([]byte, 1) + if n, err := serverConn.Read(result); err == nil { + fmt.Printf("Was able to read from server: %v\n", n) + } + serverConn.Close() + }() + + if _, err = tls.Dial("tcp", addr, badClientConfig); err == nil { + t.Fatalf("Dial was expected to fail") + } + if !strings.Contains(err.Error(), "bad certificate") { + t.Errorf("Wrong error returned: %v", err) + } + t.Logf("Dial returned: %v", err) +} From f0053e6372cb990b223c9449eb78cf9b7efc8c86 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 15 Feb 2017 15:38:59 -0800 Subject: [PATCH 006/108] Creating and using vttlstest. It's just exposing the library methods into a binary, and using it in python tests. --- go/cmd/vttlstest/vttlstest.go | 94 ++++++++++++++++++++++++ test/encrypted_replication.py | 131 +++++----------------------------- test/encrypted_transport.py | 83 +++------------------ 3 files changed, 122 insertions(+), 186 deletions(-) create mode 100644 go/cmd/vttlstest/vttlstest.go diff --git a/go/cmd/vttlstest/vttlstest.go b/go/cmd/vttlstest/vttlstest.go new file mode 100644 index 00000000000..b09e2ba9b71 --- /dev/null +++ b/go/cmd/vttlstest/vttlstest.go @@ -0,0 +1,94 @@ +package main + +import ( + "flag" + "fmt" + "os" + + log "github.com/golang/glog" + + "github.com/youtube/vitess/go/exit" + "github.com/youtube/vitess/go/vt/logutil" + "github.com/youtube/vitess/go/vt/tlstest" +) + +var doc = ` +vttlstest is a tool for generating test certificates and keys for TLS tests. + +To create a toplevel CA, use: + $ vttlstest -root /tmp CreateCA + +To create an intermediate or leaf CA, use: + $ vttlstest -root /tmp CreateSignedCert servers + $ vttlstest -root /tmp CreateSignedCert -parent servers server + +To get help on a command, use: + $ vttlstest -help +` + +type cmdFunc func(subFlags *flag.FlagSet, args []string) + +var cmdMap map[string]cmdFunc + +func init() { + cmdMap = map[string]cmdFunc{ + "CreateCA": cmdCreateCA, + "CreateSignedCert": cmdCreateSignedCert, + } +} + +var ( + root = flag.String("root", ".", "root directory for certificates and keys") +) + +func cmdCreateCA(subFlags *flag.FlagSet, args []string) { + subFlags.Parse(args) + if subFlags.NArg() > 0 { + log.Fatalf("CreateCA command doesn't take any parameter") + } + + tlstest.CreateCA(*root) +} + +func cmdCreateSignedCert(subFlags *flag.FlagSet, args []string) { + parent := subFlags.String("parent", "ca", "Parent cert name to use. Use 'ca' for the toplevel CA.") + serial := subFlags.String("serial", "01", "Serial number for the certificate to create. Should be different for two certificates with the same parent.") + commonName := subFlags.String("common_name", "", "Common name for the certificate. If empty, uses the name.") + + subFlags.Parse(args) + if subFlags.NArg() != 1 { + log.Fatalf("CreateSignedCert command takes a single name as a parameter") + } + if *commonName == "" { + *commonName = subFlags.Arg(0) + } + + tlstest.CreateSignedCert(*root, *parent, *serial, subFlags.Arg(0), *commonName) +} + +func main() { + defer exit.Recover() + defer logutil.Flush() + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %v:\n", os.Args[0]) + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, doc) + } + flag.Parse() + args := flag.Args() + if len(args) == 0 { + flag.Usage() + exit.Return(1) + } + + cmdName := args[0] + args = args[1:] + cmd, ok := cmdMap[cmdName] + if !ok { + log.Fatalf("Unknown command %v", cmdName) + } + subFlags := flag.NewFlagSet(cmdName, flag.ExitOnError) + + // Run the command. + cmd(subFlags, args) +} diff --git a/test/encrypted_replication.py b/test/encrypted_replication.py index 1eb97715429..6b0a1161d0d 100755 --- a/test/encrypted_replication.py +++ b/test/encrypted_replication.py @@ -6,7 +6,6 @@ import logging import os -import subprocess import unittest import environment @@ -20,12 +19,6 @@ cert_dir = environment.tmproot + '/certs' -def openssl(cmd): - result = subprocess.call(['openssl'] + cmd, stderr=utils.devnull) - if result != 0: - raise utils.TestError('OpenSSL command failed: %s' % ' '.join(cmd)) - - def setUpModule(): try: environment.topo_server().setup() @@ -33,115 +26,27 @@ def setUpModule(): logging.debug('Creating certificates') os.makedirs(cert_dir) - # Create CA certificate - ca_key = cert_dir + '/ca-key.pem' - ca_cert = cert_dir + '/ca-cert.pem' - openssl(['genrsa', '-out', cert_dir + '/ca-key.pem']) - ca_config = cert_dir + '/ca.config' - with open(ca_config, 'w') as fd: - fd.write(""" -[ req ] - default_bits = 1024 - default_keyfile = keyfile.pem - distinguished_name = req_distinguished_name - attributes = req_attributes - prompt = no - output_password = mypass -[ req_distinguished_name ] - C = US - ST = California - L = Mountain View - O = Google - OU = Vitess - CN = Mysql CA - emailAddress = test@email.address -[ req_attributes ] - challengePassword = A challenge password -""") - openssl(['req', '-new', '-x509', '-nodes', '-days', '3600', '-batch', - '-config', ca_config, - '-key', ca_key, - '-out', ca_cert]) - - # Create mysql server certificate, remove passphrase, and sign it - server_key = cert_dir + '/server-key.pem' - server_cert = cert_dir + '/server-cert.pem' - server_req = cert_dir + '/server-req.pem' - server_config = cert_dir + '/server.config' - with open(server_config, 'w') as fd: - fd.write(""" -[ req ] - default_bits = 1024 - default_keyfile = keyfile.pem - distinguished_name = req_distinguished_name - attributes = req_attributes - prompt = no - output_password = mypass -[ req_distinguished_name ] - C = US - ST = California - L = Mountain View - O = Google - OU = Vitess - CN = Mysql Server - emailAddress = test@email.address -[ req_attributes ] - challengePassword = A challenge password -""") - openssl(['req', '-newkey', 'rsa:2048', '-days', '3600', '-nodes', '-batch', - '-config', server_config, - '-keyout', server_key, '-out', server_req]) - openssl(['rsa', '-in', server_key, '-out', server_key]) - openssl(['x509', '-req', - '-in', server_req, - '-days', '3600', - '-CA', ca_cert, - '-CAkey', ca_key, - '-set_serial', '01', - '-out', server_cert]) - - # Create mysql client certificate, remove passphrase, and sign it - client_key = cert_dir + '/client-key.pem' - client_cert = cert_dir + '/client-cert.pem' - client_req = cert_dir + '/client-req.pem' - client_config = cert_dir + '/client.config' - with open(client_config, 'w') as fd: - fd.write(""" -[ req ] - default_bits = 1024 - default_keyfile = keyfile.pem - distinguished_name = req_distinguished_name - attributes = req_attributes - prompt = no - output_password = mypass -[ req_distinguished_name ] - C = US - ST = California - L = Mountain View - O = Google - OU = Vitess - CN = Mysql Client - emailAddress = test@email.address -[ req_attributes ] - challengePassword = A challenge password -""") - openssl(['req', '-newkey', 'rsa:2048', '-days', '3600', '-nodes', '-batch', - '-config', client_config, - '-keyout', client_key, '-out', client_req]) - openssl(['rsa', '-in', client_key, '-out', client_key]) - openssl(['x509', '-req', - '-in', client_req, - '-days', '3600', - '-CA', ca_cert, - '-CAkey', ca_key, - '-set_serial', '02', - '-out', client_cert]) + utils.run(environment.binary_args('vttlstest') + + ['-root', cert_dir, + 'CreateCA']) + utils.run(environment.binary_args('vttlstest') + + ['-root', cert_dir, + 'CreateSignedCert', + '-common_name', 'Mysql Server', + '-serial', '01', + 'server']) + utils.run(environment.binary_args('vttlstest') + + ['-root', cert_dir, + 'CreateSignedCert', + '-common_name', 'Mysql Client', + '-serial', '02', + 'client']) extra_my_cnf = cert_dir + '/secure.cnf' fd = open(extra_my_cnf, 'w') - fd.write('ssl-ca=' + ca_cert + '\n') - fd.write('ssl-cert=' + server_cert + '\n') - fd.write('ssl-key=' + server_key + '\n') + fd.write('ssl-ca=' + cert_dir + '/ca-cert.pem\n') + fd.write('ssl-cert=' + cert_dir + '/server-cert.pem\n') + fd.write('ssl-key=' + cert_dir + '/server-key.pem\n') fd.close() setup_procs = [ diff --git a/test/encrypted_transport.py b/test/encrypted_transport.py index c8c955b2585..a4da2fd4538 100755 --- a/test/encrypted_transport.py +++ b/test/encrypted_transport.py @@ -44,7 +44,6 @@ import logging import os -import subprocess import unittest from vtdb import vtgate_client @@ -62,51 +61,15 @@ table_acl_config = environment.tmproot + '/table_acl_config.json' -def openssl(cmd): - result = subprocess.call(['openssl'] + cmd, stderr=utils.devnull) - if result != 0: - raise utils.TestError('OpenSSL command failed: %s' % ' '.join(cmd)) - - def create_signed_cert(ca, serial, name, common_name): logging.info('Creating signed cert and key %s', common_name) - ca_key = cert_dir + '/' + ca + '-key.pem' - ca_cert = cert_dir + '/' + ca + '-cert.pem' - key = cert_dir + '/' + name + '-key.pem' - cert = cert_dir + '/' + name + '-cert.pem' - req = cert_dir + '/' + name + '-req.pem' - config = cert_dir + '/' + name + '.config' - with open(config, 'w') as fd: - fd.write(""" -[ req ] - default_bits = 1024 - default_keyfile = keyfile.pem - distinguished_name = req_distinguished_name - attributes = req_attributes - prompt = no - output_password = mypass -[ req_distinguished_name ] - C = US - ST = California - L = Mountain View - O = Google - OU = Vitess - CN = %s - emailAddress = test@email.address -[ req_attributes ] - challengePassword = A challenge password -""" % common_name) - openssl(['req', '-newkey', 'rsa:2048', '-days', '3600', '-nodes', '-batch', - '-config', config, - '-keyout', key, '-out', req]) - openssl(['rsa', '-in', key, '-out', key]) - openssl(['x509', '-req', - '-in', req, - '-days', '3600', - '-CA', ca_cert, - '-CAkey', ca_key, - '-set_serial', serial, - '-out', cert]) + utils.run(environment.binary_args('vttlstest') + + ['-root', cert_dir, + 'CreateSignedCert', + '-parent', ca, + '-serial', serial, + '-common_name', common_name, + name]) def server_extra_args(name, ca): @@ -161,35 +124,9 @@ def setUpModule(): os.makedirs(cert_dir) # Create CA certificate - logging.info('Creating root CA') - ca_key = cert_dir + '/ca-key.pem' - ca_cert = cert_dir + '/ca-cert.pem' - openssl(['genrsa', '-out', cert_dir + '/ca-key.pem']) - ca_config = cert_dir + '/ca.config' - with open(ca_config, 'w') as fd: - fd.write(""" -[ req ] - default_bits = 1024 - default_keyfile = keyfile.pem - distinguished_name = req_distinguished_name - attributes = req_attributes - prompt = no - output_password = mypass -[ req_distinguished_name ] - C = US - ST = California - L = Mountain View - O = Google - OU = Vitess - CN = CA - emailAddress = test@email.address -[ req_attributes ] - challengePassword = A challenge password -""") - openssl(['req', '-new', '-x509', '-nodes', '-days', '3600', '-batch', - '-config', ca_config, - '-key', ca_key, - '-out', ca_cert]) + utils.run(environment.binary_args('vttlstest') + + ['-root', cert_dir, + 'CreateCA']) # create all certs create_signed_cert('ca', '01', 'vttablet-server', 'vttablet server CA') From 385633ccf6d1629d5d13e40b4726ed50adf032f7 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 16 Feb 2017 09:07:28 -0800 Subject: [PATCH 007/108] Adding SSL support in mysql server. And a unit test that validates it. --- go/mysqlconn/constants.go | 1 - go/mysqlconn/doc.go | 8 +-- go/mysqlconn/server.go | 50 ++++++++++++--- go/mysqlconn/server_test.go | 120 ++++++++++++++++++++++++++++++++++++ 4 files changed, 165 insertions(+), 14 deletions(-) diff --git a/go/mysqlconn/constants.go b/go/mysqlconn/constants.go index 9c0ce2e96ef..0186525721a 100644 --- a/go/mysqlconn/constants.go +++ b/go/mysqlconn/constants.go @@ -58,7 +58,6 @@ const ( // CapabilityClientSSL is CLIENT_SSL. // Switch to SSL after handshake. - // Not supported yet, but checked. CapabilityClientSSL = 1 << 11 // CLIENT_IGNORE_SIGPIPE 1 << 12 diff --git a/go/mysqlconn/doc.go b/go/mysqlconn/doc.go index 5b425156cd4..8d603a5acc1 100644 --- a/go/mysqlconn/doc.go +++ b/go/mysqlconn/doc.go @@ -33,16 +33,12 @@ capability flags), then the client may set the flag or not (as the server should ignore it anyway), and then should send a COM_INIT_DB message to set the database. --- -CLIENT_SSL: - -SSL is not supported yet, in neither client nor server. It is not a lot to add. - -- PLUGABLE AUTHENTICATION: We only support mysql_native_password for now, both client and server -side. It wouldn't be a lot of work to add SHA256 for instance. +side. It wouldn't be a lot of work to add SHA256 for instance, or clear text +authentication. -- Maximum Packet Size: diff --git a/go/mysqlconn/server.go b/go/mysqlconn/server.go index 72098b062ba..fcfcb770d58 100644 --- a/go/mysqlconn/server.go +++ b/go/mysqlconn/server.go @@ -3,6 +3,7 @@ package mysqlconn import ( "bytes" "crypto/rand" + "crypto/tls" "fmt" "net" @@ -60,6 +61,10 @@ type Listener struct { // ServerVersion is the version we will advertise. ServerVersion string + // TLSConfig is the server TLS config. If set, we will advertise + // that we support SSL. + TLSConfig *tls.Config + // PasswordMap maps users to passwords. PasswordMap map[string]string @@ -126,7 +131,7 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32) { defer l.handler.ConnectionClosed(c) // First build and send the server handshake packet. - cipher, err := c.writeHandshakeV10(l.ServerVersion) + cipher, err := c.writeHandshakeV10(l.ServerVersion, l.TLSConfig != nil) if err != nil { log.Errorf("Cannot send HandshakeV10 packet: %v", err) return @@ -138,11 +143,25 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32) { log.Errorf("Cannot read client handshake response: %v", err) return } - username, authResponse, err := l.parseClientHandshakePacket(c, response) + username, authResponse, err := l.parseClientHandshakePacket(c, true, response) if err != nil { log.Errorf("Cannot parse client handshake response: %v", err) return } + if c.Capabilities&CapabilityClientSSL > 0 { + // SSL was enabled. We need to re-read the auth packet. + response, err = c.readEphemeralPacket() + if err != nil { + log.Errorf("Cannot read post-SSL client handshake response: %v", err) + return + } + + username, authResponse, err = l.parseClientHandshakePacket(c, false, response) + if err != nil { + log.Errorf("Cannot parse post-SSL client handshake response: %v", err) + return + } + } // Find the user in our map password, ok := l.PasswordMap[username] @@ -225,7 +244,7 @@ func (l *Listener) Close() { // writeHandshakeV10 writes the Initial Handshake Packet, server side. // It returns the cipher data. -func (c *Conn) writeHandshakeV10(serverVersion string) ([]byte, error) { +func (c *Conn) writeHandshakeV10(serverVersion string, enableTLS bool) ([]byte, error) { capabilities := CapabilityClientLongPassword | CapabilityClientLongFlag | CapabilityClientConnectWithDB | @@ -235,6 +254,9 @@ func (c *Conn) writeHandshakeV10(serverVersion string) ([]byte, error) { CapabilityClientPluginAuth | CapabilityClientPluginAuthLenencClientData | CapabilityClientDeprecateEOF + if enableTLS { + capabilities |= CapabilityClientSSL + } length := 1 + // protocol version @@ -314,7 +336,7 @@ func (c *Conn) writeHandshakeV10(serverVersion string) ([]byte, error) { // parseClientHandshakePacket parses the handshake sent by the client. // Returns the username, auth-data, error. -func (l *Listener) parseClientHandshakePacket(c *Conn, data []byte) (string, []byte, error) { +func (l *Listener) parseClientHandshakePacket(c *Conn, firstTime bool, data []byte) (string, []byte, error) { pos := 0 // Client flags, 4 bytes. @@ -326,12 +348,15 @@ func (l *Listener) parseClientHandshakePacket(c *Conn, data []byte) (string, []b return "", nil, fmt.Errorf("parseClientHandshakePacket: only support protocol 4.1") } - // Remember a subset of the capabilities, so we can use them later in the protocol. - c.Capabilities = clientFlags & (CapabilityClientDeprecateEOF) + // Remember a subset of the capabilities, so we can use them + // later in the protocol. If we re-received the handshake packet + // after SSL negotiation, do not overwrite capabilities. + if firstTime { + c.Capabilities = clientFlags & (CapabilityClientDeprecateEOF) + } // Max packet size. Don't do anything with this now. // See doc.go for more information. - /*maxPacketSize*/ _, pos, ok = readUint32(data, pos) if !ok { return "", nil, fmt.Errorf("parseClientHandshakePacket: can't read maxPacketSize") @@ -347,6 +372,17 @@ func (l *Listener) parseClientHandshakePacket(c *Conn, data []byte) (string, []b // 23x reserved zero bytes. pos += 23 + // Check for SSL. + if firstTime && l.TLSConfig != nil && clientFlags&CapabilityClientSSL > 0 { + // Need to switch to TLS, and then re-read the packet. + conn := tls.Server(c.conn, l.TLSConfig) + c.conn = conn + c.reader.Reset(conn) + c.writer.Reset(conn) + c.Capabilities |= CapabilityClientSSL + return "", nil, nil + } + // username username, pos, ok := readNullString(data, pos) if !ok { diff --git a/go/mysqlconn/server_test.go b/go/mysqlconn/server_test.go index 3182632dcc5..fbe6e5442e7 100644 --- a/go/mysqlconn/server_test.go +++ b/go/mysqlconn/server_test.go @@ -2,6 +2,7 @@ package mysqlconn import ( "fmt" + "io/ioutil" "net" "os" "os/exec" @@ -12,6 +13,8 @@ import ( "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" vtenv "github.com/youtube/vitess/go/vt/env" + "github.com/youtube/vitess/go/vt/servenv/grpcutils" + "github.com/youtube/vitess/go/vt/tlstest" querypb "github.com/youtube/vitess/go/vt/proto/query" ) @@ -83,6 +86,27 @@ func (th *testHandler) ComQuery(c *Conn, query string) (*sqltypes.Result, error) }, nil } + if query == "ssl echo" { + value := "OFF" + if c.Capabilities&CapabilityClientSSL > 0 { + value = "ON" + } + return &sqltypes.Result{ + + Fields: []*querypb.Field{ + { + Name: "ssl_flag", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(value)), + }, + }, + }, nil + } + return &sqltypes.Result{}, nil } @@ -162,11 +186,98 @@ func TestServer(t *testing.T) { t.Errorf("Unexpected output for 'schema echo'") } + // Sanity check: make sure this didn't go through SSL + output, ok = runMysql(t, params, "ssl echo") + if !ok { + t.Fatalf("mysql failed: %v", output) + } + if !strings.Contains(output, "ssl_flag") || + !strings.Contains(output, "OFF") || + !strings.Contains(output, "1 row in set") { + t.Errorf("Unexpected output for 'ssl echo': %v", output) + } // Uncomment to leave setup up for a while, to run tests manually. // fmt.Printf("Listening to server on host '%v' port '%v'.\n", host, port) // time.Sleep(60 * time.Minute) } +// TestTLSServer creates a Server with TLS support, then uses mysql +// client to connect to it. +func TestTLSServer(t *testing.T) { + th := &testHandler{} + + // Create the listener, so we can get its host. + // Below, we are enabling --ssl-verify-server-cert, which adds + // a check that the common name of the certificate matches the + // server host name we connect to. + l, err := NewListener("tcp", ":0", th) + if err != nil { + t.Fatalf("NewListener failed: %v", err) + } + defer l.Close() + host := l.Addr().(*net.TCPAddr).IP.String() + port := l.Addr().(*net.TCPAddr).Port + + // Create the certs. + root, err := ioutil.TempDir("", "tlstest") + if err != nil { + t.Fatalf("TempDir failed: %v", err) + } + defer os.RemoveAll(root) + tlstest.CreateCA(root) + tlstest.CreateSignedCert(root, tlstest.CA, "01", "server", host) + tlstest.CreateSignedCert(root, tlstest.CA, "02", "client", "Client Cert") + + // Create the server with TLS config. + serverConfig, err := grpcutils.TLSServerConfig( + path.Join(root, "server-cert.pem"), + path.Join(root, "server-key.pem"), + path.Join(root, "ca-cert.pem")) + if err != nil { + t.Fatalf("TLSServerConfig failed: %v", err) + } + l.TLSConfig = serverConfig + l.PasswordMap["user1"] = "password1" + go func() { + l.Accept() + }() + + // Setup the right parameters. + params := &sqldb.ConnParams{ + Host: host, + Port: port, + Uname: "user1", + Pass: "password1", + // SSL flags. + Flags: CapabilityClientSSL, + SslCa: path.Join(root, "ca-cert.pem"), + SslCert: path.Join(root, "client-cert.pem"), + SslKey: path.Join(root, "client-key.pem"), + } + + // Run a 'select rows' command with results. + output, ok := runMysql(t, params, "select rows") + if !ok { + t.Fatalf("mysql failed: %v", output) + } + if !strings.Contains(output, "nice name") || + !strings.Contains(output, "nicer name") || + !strings.Contains(output, "2 rows in set") { + t.Errorf("Unexpected output for 'select rows'") + } + + // make sure this went through SSL + output, ok = runMysql(t, params, "ssl echo") + if !ok { + t.Fatalf("mysql failed: %v", output) + } + if !strings.Contains(output, "ssl_flag") || + !strings.Contains(output, "ON") || + !strings.Contains(output, "1 row in set") { + t.Errorf("Unexpected output for 'ssl echo': %v", output) + } +} + // runMysql forks a mysql command line process connecting to the provided server. func runMysql(t *testing.T, params *sqldb.ConnParams, command string) (string, bool) { dir, err := vtenv.VtMysqlRoot() @@ -207,6 +318,15 @@ func runMysql(t *testing.T, params *sqldb.ConnParams, command string) (string, b "-D", params.DbName, }...) } + if params.Flags&CapabilityClientSSL > 0 { + args = append(args, []string{ + "--ssl", + "--ssl-ca", params.SslCa, + "--ssl-cert", params.SslCert, + "--ssl-key", params.SslKey, + "--ssl-verify-server-cert", + }...) + } env := []string{ "LD_LIBRARY_PATH=" + path.Join(dir, "lib/mysql"), } From 93162ba54c5585a67caa0716e8e14022fa5d17f9 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Thu, 16 Feb 2017 10:34:52 -0800 Subject: [PATCH 008/108] Adding TLS support in mysql client library. With tests. Also, the server side needs to not use a buffer read for the first handshake packet, so it doesn't buffer TLS negociation packets as well. --- go/mysqlconn/client.go | 105 +++++++++++++++++++++++++++++---- go/mysqlconn/conn.go | 40 +++++++++++++ go/mysqlconn/conn_test.go | 9 +++ go/mysqlconn/handshake_test.go | 98 ++++++++++++++++++++++++++++++ go/mysqlconn/server.go | 5 +- go/mysqlconn/server_test.go | 3 +- 6 files changed, 244 insertions(+), 16 deletions(-) create mode 100644 go/mysqlconn/handshake_test.go diff --git a/go/mysqlconn/client.go b/go/mysqlconn/client.go index 5365f58d9ba..318cf70864d 100644 --- a/go/mysqlconn/client.go +++ b/go/mysqlconn/client.go @@ -2,6 +2,7 @@ package mysqlconn import ( "crypto/sha1" + "crypto/tls" "fmt" "net" "strconv" @@ -11,6 +12,7 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/sqldb" + "github.com/youtube/vitess/go/vt/servenv/grpcutils" ) // connectResult is used by Connect. @@ -193,15 +195,49 @@ func (c *Conn) clientHandshake(characterSet uint8, params *sqldb.ConnParams) err return sqldb.NewSQLError(CRVersionError, SSUnknownSQLState, "cannot connect to servers earlier than 4.1") } - // If client asked for SSL, but server doesn't support it, stop right here. - if capabilities&CapabilityClientSSL == 0 && params.SslCert != "" && params.SslKey != "" { - return sqldb.NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "server doesn't support SSL but client asked for it") - } - - // Remember a subset of the capabilities, so we can use them later in the protocol. + // Remember a subset of the capabilities, so we can use them + // later in the protocol. c.Capabilities = capabilities & (CapabilityClientDeprecateEOF) + // Handle switch to SSL if necessary. + if params.Flags&CapabilityClientSSL > 0 { + // If client asked for SSL, but server doesn't support it, + // stop right here. + if capabilities&CapabilityClientSSL == 0 { + return sqldb.NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "server doesn't support SSL but client asked for it") + } + + // The ServerName to verify depends on what the hostname is. + // - If it is an IP address, we need to prefix it with 'IP:'. + // - If not, we can just use it as is. + // We may need to add a ServerName field to ConnParams to + // make this more explicit. + serverName := params.Host + if net.ParseIP(params.Host) != nil { + serverName = "IP:" + params.Host + } + + // Build the TLS config. + clientConfig, err := grpcutils.TLSClientConfig(params.SslCert, params.SslKey, params.SslCa, serverName) + if err != nil { + return sqldb.NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "error loading client cert and ca: %v", err) + } + + // Send the SSLRequest packet. + if err := c.writeSSLRequest(capabilities, characterSet, params); err != nil { + return err + } + + // Switch to SSL. + conn := tls.Client(c.conn, clientConfig) + c.conn = conn + c.reader.Reset(conn) + c.writer.Reset(conn) + c.Capabilities |= CapabilityClientSSL + } + // Build and send our handshake response 41. + // Note this one will never have SSL flag on. if err := c.writeHandshakeResponse41(capabilities, cipher, characterSet, params); err != nil { return err } @@ -375,6 +411,55 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error) return capabilities, authPluginData, nil } +// writeSSLRequest writes the SSLRequest packet. It's just a truncated +// HandshakeResponse41. +func (c *Conn) writeSSLRequest(capabilities uint32, characterSet uint8, params *sqldb.ConnParams) error { + // Build our flags, with CapabilityClientSSL. + var flags uint32 = CapabilityClientLongPassword | + CapabilityClientLongFlag | + CapabilityClientProtocol41 | + CapabilityClientTransactions | + CapabilityClientSecureConnection | + CapabilityClientPluginAuth | + CapabilityClientPluginAuthLenencClientData | + CapabilityClientSSL | + // If the server supported + // CapabilityClientDeprecateEOF, we also support it. + c.Capabilities&CapabilityClientDeprecateEOF + + length := + 4 + // Client capability flags. + 4 + // Max-packet size. + 1 + // Character set. + 23 // Reserved. + + // Add the DB name if the server supports it. + if params.DbName != "" && (capabilities&CapabilityClientConnectWithDB != 0) { + flags |= CapabilityClientConnectWithDB + } + + data := make([]byte, length) + pos := 0 + + // Client capability flags. + pos = writeUint32(data, pos, flags) + + // Max-packet size, always 0. See doc.go. + pos += 4 + + // Character set. + pos = writeByte(data, pos, characterSet) + + // And send it as is. + if err := c.writePacket(data); err != nil { + return sqldb.NewSQLError(CRServerLost, SSUnknownSQLState, "cannot send SSLRequest: %v", err) + } + if err := c.flush(); err != nil { + return sqldb.NewSQLError(CRServerLost, SSUnknownSQLState, "cannot flush SSLRequest: %v", err) + } + return nil +} + // writeHandshakeResponse41 writes the handshake response. // Returns a sqldb.SQLError. func (c *Conn) writeHandshakeResponse41(capabilities uint32, cipher []byte, characterSet uint8, params *sqldb.ConnParams) error { @@ -390,7 +475,7 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, cipher []byte, char // CapabilityClientDeprecateEOF, we also support it. c.Capabilities&CapabilityClientDeprecateEOF - // FIXME(alainjobart) add SSL, multi statement, client found rows. + // FIXME(alainjobart) add multi statement, client found rows. // Password encryption. scrambledPassword := scramblePassword(cipher, []byte(params.Pass)) @@ -430,12 +515,6 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, cipher []byte, char // Character set. pos = writeByte(data, pos, characterSet) - // FIXME(alainjobart): With SSL can send this now. - // For now we don't support it. - if params.SslCert != "" && params.SslKey != "" { - return sqldb.NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "SSL support is not implemented yet in this client") - } - // 23 reserved bytes, all 0. pos += 23 diff --git a/go/mysqlconn/conn.go b/go/mysqlconn/conn.go index 0c89137a66a..4d76930e380 100644 --- a/go/mysqlconn/conn.go +++ b/go/mysqlconn/conn.go @@ -146,6 +146,46 @@ func newConn(conn net.Conn) *Conn { } } +// readPacketDirect attempts to read a packet from the socket directly. +// It needs to be used for the first handshake packet the server receives, +// so we do't buffer the SSL negociation packet. As a shortcut, only +// packets smaller than MaxPacketSize can be read here. +func (c *Conn) readPacketDirect() ([]byte, error) { + var header [4]byte + if _, err := io.ReadFull(c.conn, header[:]); err != nil { + return nil, fmt.Errorf("io.ReadFull(header size) failed: %v", err) + } + + sequence := uint8(header[3]) + if sequence != c.sequence { + return nil, fmt.Errorf("invalid sequence, expected %v got %v", c.sequence, sequence) + } + + c.sequence++ + + length := int(uint32(header[0]) | uint32(header[1])<<8 | uint32(header[2])<<16) + if length <= cap(c.buffer) { + // Fast path: read into buffer, we're good. + c.buffer = c.buffer[:length] + if _, err := io.ReadFull(c.conn, c.buffer); err != nil { + return nil, fmt.Errorf("io.ReadFull(direct packet body of length %v) failed: %v", length, err) + } + return c.buffer, nil + } + + // Sanity check + if length == MaxPacketSize { + return nil, fmt.Errorf("readPacketDirect doesn't support more than one packet") + } + + // Slow path, revert to allocating. + data := make([]byte, length) + if _, err := io.ReadFull(c.conn, data); err != nil { + return nil, fmt.Errorf("io.ReadFull(packet body of length %v) failed: %v", length, err) + } + return data, nil +} + // readEphemeralPacket attempts to read a packet into c.buffer. Do // not use this method if the contents of the packet needs to be kept // after the next readEphemeralPacket. If the packet is bigger than diff --git a/go/mysqlconn/conn_test.go b/go/mysqlconn/conn_test.go index baa925c1dfa..052a79a2238 100644 --- a/go/mysqlconn/conn_test.go +++ b/go/mysqlconn/conn_test.go @@ -121,13 +121,22 @@ func verifyPacketCommsSpecific(t *testing.T, cConn *Conn, data []byte, // Write a packet on one side, read it on the other, check it's // correct. We use all possible read and write methods. func verifyPacketComms(t *testing.T, cConn, sConn *Conn, data []byte) { + // All three writes, with ReadPacket. verifyPacketCommsSpecific(t, cConn, data, useWritePacket, sConn.ReadPacket) verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacket, sConn.ReadPacket) verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketDirect, sConn.ReadPacket) + // All three writes, with readEphemeralPacket. verifyPacketCommsSpecific(t, cConn, data, useWritePacket, sConn.readEphemeralPacket) verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacket, sConn.readEphemeralPacket) verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketDirect, sConn.readEphemeralPacket) + + // All three writes, with readPacketDirect, if size allows it. + if len(data) < MaxPacketSize { + verifyPacketCommsSpecific(t, cConn, data, useWritePacket, sConn.readPacketDirect) + verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacket, sConn.readPacketDirect) + verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketDirect, sConn.readPacketDirect) + } } func TestPackets(t *testing.T) { diff --git a/go/mysqlconn/handshake_test.go b/go/mysqlconn/handshake_test.go new file mode 100644 index 00000000000..cc690fa12b5 --- /dev/null +++ b/go/mysqlconn/handshake_test.go @@ -0,0 +1,98 @@ +package mysqlconn + +import ( + "context" + "io/ioutil" + "net" + "os" + "path" + "reflect" + "testing" + + "github.com/youtube/vitess/go/sqldb" + "github.com/youtube/vitess/go/vt/servenv/grpcutils" + "github.com/youtube/vitess/go/vt/tlstest" +) + +// This file tests the handshake scenarios between our client and our server. + +// TestSSLConnection creates a server with TLS support, a client that +// also has SSL support, and connects them. +func TestSSLConnection(t *testing.T) { + th := &testHandler{} + + // Create the listener, so we can get its host. + l, err := NewListener("tcp", ":0", th) + if err != nil { + t.Fatalf("NewListener failed: %v", err) + } + defer l.Close() + host := l.Addr().(*net.TCPAddr).IP.String() + port := l.Addr().(*net.TCPAddr).Port + + // Create the certs. + root, err := ioutil.TempDir("", "TestSSLConnection") + if err != nil { + t.Fatalf("TempDir failed: %v", err) + } + defer os.RemoveAll(root) + tlstest.CreateCA(root) + tlstest.CreateSignedCert(root, tlstest.CA, "01", "server", "IP:"+host) + tlstest.CreateSignedCert(root, tlstest.CA, "02", "client", "Client Cert") + + // Create the server with TLS config. + serverConfig, err := grpcutils.TLSServerConfig( + path.Join(root, "server-cert.pem"), + path.Join(root, "server-key.pem"), + path.Join(root, "ca-cert.pem")) + if err != nil { + t.Fatalf("TLSServerConfig failed: %v", err) + } + l.TLSConfig = serverConfig + l.PasswordMap["user1"] = "password1" + go func() { + l.Accept() + }() + + // Setup the right parameters. + params := &sqldb.ConnParams{ + Host: host, + Port: port, + Uname: "user1", + Pass: "password1", + // SSL flags. + Flags: CapabilityClientSSL, + SslCa: path.Join(root, "ca-cert.pem"), + SslCert: path.Join(root, "client-cert.pem"), + SslKey: path.Join(root, "client-key.pem"), + } + + // Create a client connection, connect. + ctx := context.Background() + conn, err := Connect(ctx, params) + if err != nil { + t.Fatalf("Connect failed: %v", err) + } + defer conn.Close() + + // Run a 'select rows' command with results. + result, err := conn.ExecuteFetch("select rows", 10000, true) + if err != nil { + t.Fatalf("ExecuteFetch failed: %v", err) + } + if !reflect.DeepEqual(result, selectRowsResult) { + t.Errorf("Got wrong result from ExecuteFetch(select rows): %v", result) + } + + // Make sure this went through SSL. + result, err = conn.ExecuteFetch("ssl echo", 10000, true) + if err != nil { + t.Fatalf("ExecuteFetch failed: %v", err) + } + if result.Rows[0][0].String() != "ON" { + t.Errorf("Got wrong result from ExecuteFetch(ssl echo): %v", result) + } + + // Send a ComQuit to avoid the error message on the server side. + conn.writeComQuit() +} diff --git a/go/mysqlconn/server.go b/go/mysqlconn/server.go index fcfcb770d58..2cc87a94f85 100644 --- a/go/mysqlconn/server.go +++ b/go/mysqlconn/server.go @@ -137,8 +137,9 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32) { return } - // Wait for the client response. - response, err := c.readEphemeralPacket() + // Wait for the client response. This has to be a direct read, + // so we don't buffer the TLS negociation packets. + response, err := c.readPacketDirect() if err != nil { log.Errorf("Cannot read client handshake response: %v", err) return diff --git a/go/mysqlconn/server_test.go b/go/mysqlconn/server_test.go index fbe6e5442e7..ffec12d360f 100644 --- a/go/mysqlconn/server_test.go +++ b/go/mysqlconn/server_test.go @@ -40,6 +40,7 @@ var selectRowsResult = &sqltypes.Result{ sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nicer name")), }, }, + RowsAffected: 2, } type testHandler struct{} @@ -219,7 +220,7 @@ func TestTLSServer(t *testing.T) { port := l.Addr().(*net.TCPAddr).Port // Create the certs. - root, err := ioutil.TempDir("", "tlstest") + root, err := ioutil.TempDir("", "TestTLSServer") if err != nil { t.Fatalf("TempDir failed: %v", err) } From 3a9aefeb48d9f77ed8b1d2747bb82a2f582670d0 Mon Sep 17 00:00:00 2001 From: Yipei Wang Date: Thu, 16 Feb 2017 14:27:25 -0800 Subject: [PATCH 009/108] workflow: resolve comments. save for pull request for change on node.go in workflow folder. --- go/vt/workflow/node.go | 14 +- go/vt/workflow/resharding/checkpoint.go | 36 +- .../horizontal_resharding_workflow.go | 160 ++++----- .../horizontal_resharding_workflow_test.go | 276 +++++---------- go/vt/workflow/resharding/parallel_runner.go | 73 +++- .../resharding/parallel_runner_test.go | 325 +++++++++--------- go/vt/workflow/resharding/retry_controller.go | 1 - go/vt/workflow/resharding/task.go | 59 ++-- 8 files changed, 444 insertions(+), 500 deletions(-) diff --git a/go/vt/workflow/node.go b/go/vt/workflow/node.go index 25eb86feb0b..01cb3c4a358 100644 --- a/go/vt/workflow/node.go +++ b/go/vt/workflow/node.go @@ -191,13 +191,13 @@ func (n *Node) deepCopyFrom(otherNode *Node, copyChildren bool) error { *n = *otherNode n.Children = oldChildren - n.Actions = []*Action{} + /*n.Actions = []*Action{} for _, otherAction := range otherNode.Actions { action := &Action{} *action = *otherAction n.Actions = append(n.Actions, action) } - + */ if !copyChildren { return nil } @@ -355,6 +355,7 @@ func (m *NodeManager) updateNodeAndBroadcastLocked(userNode *Node, updateChildre if err != nil { return err } + userNode.LastChanged = time.Now().Unix() if err := savedNode.deepCopyFrom(userNode, updateChildren); err != nil { return err @@ -396,13 +397,8 @@ func (m *NodeManager) Action(ctx context.Context, ap *ActionParameters) error { return n.Listener.Action(ctx, ap.Path, ap.Name) } -func (m *NodeManager) GetNodeByRelativePath(parentNode *Node, childPath string) (*Node, error) { - fullNodePath := path.Join(parentNode.PathName, childPath) - node, err := m.getNodeByPath(fullNodePath) - if err != nil { - return nil, err - } - return node, nil +func (m *NodeManager) GetNodeByPath(nodePath string) (*Node, error) { + return m.getNodeByPath(nodePath) } func (m *NodeManager) getNodeByPath(nodePath string) (*Node, error) { diff --git a/go/vt/workflow/resharding/checkpoint.go b/go/vt/workflow/resharding/checkpoint.go index 00c393e9130..892030ccac3 100644 --- a/go/vt/workflow/resharding/checkpoint.go +++ b/go/vt/workflow/resharding/checkpoint.go @@ -2,6 +2,7 @@ package resharding import ( "context" + "fmt" "sync" "github.com/golang/protobuf/proto" @@ -15,9 +16,9 @@ type CheckpointWriter struct { topoServer topo.Server // checkpointMu is used for protecting data access during checkpointing. - checkpointMu sync.Mutex - checkpoint *workflowpb.WorkflowCheckpoint - wi *topo.WorkflowInfo + mu sync.Mutex + checkpoint *workflowpb.WorkflowCheckpoint + wi *topo.WorkflowInfo } // NewCheckpointWriter creates a CheckpointWriter. @@ -29,22 +30,23 @@ func NewCheckpointWriter(ts topo.Server, checkpoint *workflowpb.WorkflowCheckpoi } } -// UpdateTask updates the status of task in the checkpoint. -func (c *CheckpointWriter) UpdateTask(taskID string, status workflowpb.TaskState, err string) error { - // Writing the checkpoint is protected to avoid the situation that the - // task value is partially updated when saving the checkpoint. - c.checkpointMu.Lock() - defer c.checkpointMu.Unlock() +// UpdateTask updates the task status in the checkpointing copy and +// saves the full checkpoint to the topology server. +func (c *CheckpointWriter) UpdateTask(taskID string, status workflowpb.TaskState, err error) error { + c.mu.Lock() + defer c.mu.Unlock() - c.checkpoint.Tasks[taskID].State = status - c.checkpoint.Tasks[taskID].Error = err - return c.Save() -} + errorMessage := "" + if err != nil { + errorMessage = err.Error() + } + + t := c.checkpoint.Tasks[taskID] + + fmt.Printf("error message send to task %v: %v\n", t.Id, errorMessage) -// Save packets the checkpoint and sends it to the topology server. -func (c *CheckpointWriter) Save() error { - c.checkpointMu.Lock() - defer c.checkpointMu.Unlock() + t.State = status + t.Error = errorMessage return c.saveLocked() } diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow.go b/go/vt/workflow/resharding/horizontal_resharding_workflow.go index 6cbf5f9492f..b7f66031456 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow.go @@ -30,26 +30,28 @@ const ( codeVersion = 1 horizontalReshardingFactoryName = "horizontal_resharding" +) + +type PhaseType string - copySchemaName = "copy_schema" - cloneName = "clone" - waitForFilteredReplicationName = "wait_for_filtered_replication" - diffName = "diff" - migrateRdonlyName = "migrate_rdonly" - migrateReplicaName = "migrate_replica" - migrateMasterName = "migrate_master" +const ( + phaseCopySchema PhaseType = "copy_schema" + phaseClone PhaseType = "clone" + phaseWaitForFilteredReplication PhaseType = "wait_for_filtered_replication" + phaseDiff PhaseType = "diff" + phaseMigrateRdonly PhaseType = "migrate_rdonly" + phaseMigrateReplica PhaseType = "migrate_replica" + phaseMigrateMaster PhaseType = "migrate_master" ) -// HorizontalReshardingWorkflow contains meta-information and methods -// to control horizontal resharding workflow. +// HorizontalReshardingWorkflow contains meta-information and methods to +// control the horizontal resharding workflow. type HorizontalReshardingWorkflow struct { - // ctx is the context of the whole horizontal resharding process. - // Once this context is canceled, the horizontal resharding process stops. ctx context.Context + wr ReshardingWrangler manager *workflow.Manager topoServer topo.Server wi *topo.WorkflowInfo - wr ReshardingWrangler // logger is the logger we export UI logs from. logger *logutil.MemoryLogger @@ -67,24 +69,20 @@ type HorizontalReshardingWorkflow struct { checkpointWriter *CheckpointWriter } -// Run executes the horizontal resharding process and updates the UI message. +// Run executes the horizontal resharding process. // It implements the workflow.Workflow interface. func (hw *HorizontalReshardingWorkflow) Run(ctx context.Context, manager *workflow.Manager, wi *topo.WorkflowInfo) error { hw.ctx = ctx - hw.manager = manager hw.topoServer = manager.TopoServer() - hw.wi = wi + hw.manager = manager hw.wr = wrangler.New(logutil.NewConsoleLogger(), manager.TopoServer(), tmclient.NewTabletManagerClient()) - + hw.wi = wi hw.checkpointWriter = NewCheckpointWriter(hw.topoServer, hw.checkpoint, hw.wi) - if err := hw.checkpointWriter.Save(); err != nil { - return err - } + hw.rootUINode.Display = workflow.NodeDisplayDeterminate hw.rootUINode.BroadcastChanges(true /* updateChildren */) if err := hw.runWorkflow(); err != nil { - hw.setUIMessage(fmt.Sprintf("Horizontal Resharding failed: %v", err)) return err } hw.setUIMessage(fmt.Sprintf("Horizontal Resharding is finished sucessfully.")) @@ -92,46 +90,43 @@ func (hw *HorizontalReshardingWorkflow) Run(ctx context.Context, manager *workfl } func (hw *HorizontalReshardingWorkflow) runWorkflow() error { - copyTasks := hw.GetTasks(hw.checkpoint, copySchemaName) - copyRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.copySchemaUINode, hw.checkpointWriter, copyTasks, hw.runCopySchema, PARALLEL) - if err := copyRunner.Run(); err != nil { + copySchemaTasks := hw.GetTasks(hw.checkpoint, phaseCopySchema) + copySchemaRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.copySchemaUINode, hw.checkpointWriter, copySchemaTasks, hw.runCopySchema, PARALLEL) + if err := copySchemaRunner.Run(); err != nil { return err } - cloneTasks := hw.GetTasks(hw.checkpoint, cloneName) + cloneTasks := hw.GetTasks(hw.checkpoint, phaseClone) cloneRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.cloneUINode, hw.checkpointWriter, cloneTasks, hw.runSplitClone, PARALLEL) if err := cloneRunner.Run(); err != nil { return err } - waitTasks := hw.GetTasks(hw.checkpoint, waitForFilteredReplicationName) - waitRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.waitForFilteredReplicationUINode, hw.checkpointWriter, waitTasks, hw.runWaitForFilteredReplication, PARALLEL) - if err := waitRunner.Run(); err != nil { + waitForFilteredReplicationTasks := hw.GetTasks(hw.checkpoint, phaseWaitForFilteredReplication) + waitForFilteredReplicationRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.waitForFilteredReplicationUINode, hw.checkpointWriter, waitForFilteredReplicationTasks, hw.runWaitForFilteredReplication, PARALLEL) + if err := waitForFilteredReplicationRunner.Run(); err != nil { return err } - diffTasks := hw.GetTasks(hw.checkpoint, diffName) + diffTasks := hw.GetTasks(hw.checkpoint, phaseDiff) diffRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.diffUINode, hw.checkpointWriter, diffTasks, hw.runSplitDiff, SEQUENTIAL) - // SplitDiff requires the vtworker only work for one destination shard - // at a time. To simplify the concurrency control, we run all the SplitDiff - // task sequentially. if err := diffRunner.Run(); err != nil { return err } - migrateRdonlyTasks := hw.GetTasks(hw.checkpoint, migrateRdonlyName) + migrateRdonlyTasks := hw.GetTasks(hw.checkpoint, phaseMigrateRdonly) migrateRdonlyRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.migrateRdonlyUINode, hw.checkpointWriter, migrateRdonlyTasks, hw.runMigrate, SEQUENTIAL) if err := migrateRdonlyRunner.Run(); err != nil { return err } - migrateReplicaTasks := hw.GetTasks(hw.checkpoint, migrateReplicaName) + migrateReplicaTasks := hw.GetTasks(hw.checkpoint, phaseMigrateRdonly) migrateReplicaRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.migrateReplicaUINode, hw.checkpointWriter, migrateReplicaTasks, hw.runMigrate, SEQUENTIAL) if err := migrateReplicaRunner.Run(); err != nil { return err } - migrateMasterTasks := hw.GetTasks(hw.checkpoint, migrateMasterName) + migrateMasterTasks := hw.GetTasks(hw.checkpoint, phaseMigrateMaster) migrateMasterRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.migrateMasterUINode, hw.checkpointWriter, migrateMasterTasks, hw.runMigrate, SEQUENTIAL) if err := migrateMasterRunner.Run(); err != nil { return err @@ -141,20 +136,18 @@ func (hw *HorizontalReshardingWorkflow) runWorkflow() error { } func (hw *HorizontalReshardingWorkflow) setUIMessage(message string) { - log.Infof("Horizontal resharding: %v.", message) + log.Infof("Horizontal resharding : %v.", message) hw.rootUINode.Log = hw.logger.String() hw.rootUINode.Message = message hw.rootUINode.BroadcastChanges(false /* updateChildren */) } -// Register registers horizontal_resharding as a valid factory -// in the workflow framework. func Register() { workflow.Register(horizontalReshardingFactoryName, &HorizontalReshardingWorkflowFactory{}) } // HorizontalReshardingWorkflowFactory is the factory to register -// the HorizontalResharding Workflow. +// the HorizontalReshardingWorkflow. type HorizontalReshardingWorkflowFactory struct{} // Init is part of the workflow.Factory interface. @@ -173,7 +166,9 @@ func (*HorizontalReshardingWorkflowFactory) Init(w *workflowpb.Workflow, args [] vtworkers := strings.Split(*vtworkersStr, ",") w.Name = fmt.Sprintf("Horizontal resharding on keyspace %s", *keyspace) - checkpoint, err := initCheckpoint(*keyspace, vtworkers) + ts := topo.Open() + defer ts.Close() + checkpoint, err := initCheckpoint(*keyspace, vtworkers, ts) if err != nil { return err } @@ -185,7 +180,7 @@ func (*HorizontalReshardingWorkflowFactory) Init(w *workflowpb.Workflow, args [] return nil } -// Instantiate is part of the workflow.Factory interface. +// Instantiate is part the workflow.Factory interface. func (*HorizontalReshardingWorkflowFactory) Instantiate(w *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { rootNode.Message = "This is a workflow to execute horizontal resharding automatically." @@ -199,31 +194,31 @@ func (*HorizontalReshardingWorkflowFactory) Instantiate(w *workflowpb.Workflow, rootUINode: rootNode, copySchemaUINode: &workflow.Node{ Name: "CopySchemaShard", - PathName: copySchemaName, + PathName: string(phaseCopySchema), }, cloneUINode: &workflow.Node{ Name: "SplitClone", - PathName: cloneName, + PathName: string(phaseClone), }, waitForFilteredReplicationUINode: &workflow.Node{ Name: "WaitForFilteredReplication", - PathName: waitForFilteredReplicationName, + PathName: string(phaseWaitForFilteredReplication), }, diffUINode: &workflow.Node{ Name: "SplitDiff", - PathName: diffName, + PathName: string(phaseDiff), }, migrateRdonlyUINode: &workflow.Node{ Name: "MigrateServedTypeRDONLY", - PathName: migrateRdonlyName, + PathName: string(phaseMigrateRdonly), }, migrateReplicaUINode: &workflow.Node{ Name: "MigrateServedTypeREPLICA", - PathName: migrateReplicaName, + PathName: string(phaseMigrateReplica), }, migrateMasterUINode: &workflow.Node{ Name: "MigrateServedTypeMASTER", - PathName: migrateMasterName, + PathName: string(phaseMigrateMaster), }, logger: logutil.NewMemoryLogger(), } @@ -237,40 +232,48 @@ func (*HorizontalReshardingWorkflowFactory) Instantiate(w *workflowpb.Workflow, hw.migrateMasterUINode, } - destinationShards := strings.Split(checkpoint.Settings["destination_shards"], ",") - sourceShards := strings.Split(checkpoint.Settings["source_shards"], ",") - createUINode(copySchemaName, destinationShards, hw.copySchemaUINode) - createUINode(cloneName, sourceShards, hw.cloneUINode) - createUINode(waitForFilteredReplicationName, destinationShards, hw.waitForFilteredReplicationUINode) - createUINode(diffName, destinationShards, hw.diffUINode) - createUINode(migrateRdonlyName, sourceShards, hw.migrateRdonlyUINode) - createUINode(migrateReplicaName, sourceShards, hw.migrateReplicaUINode) - createUINode(migrateMasterName, sourceShards, hw.migrateMasterUINode) + destinationShards := strings.Split(hw.checkpoint.Settings["destination_shards"], ",") + sourceShards := strings.Split(hw.checkpoint.Settings["source_shards"], ",") + + createUINodes(phaseCopySchema, destinationShards, hw.copySchemaUINode) + createUINodes(phaseClone, sourceShards, hw.cloneUINode) + createUINodes(phaseWaitForFilteredReplication, destinationShards, hw.waitForFilteredReplicationUINode) + createUINodes(phaseDiff, destinationShards, hw.diffUINode) + createUINodes(phaseMigrateRdonly, sourceShards, hw.migrateRdonlyUINode) + createUINodes(phaseMigrateReplica, sourceShards, hw.migrateReplicaUINode) + createUINodes(phaseMigrateMaster, sourceShards, hw.migrateMasterUINode) return hw, nil } -func createUINode(phaseName string, shards []string, rootNode *workflow.Node) { - for _, shardName := range shards { - taskID := createTaskID(phaseName, shardName) +func createUINodes(phaseName PhaseType, shards []string, rootNode *workflow.Node) { + for _, shard := range shards { + taskID := createTaskID(phaseName, shard) taskUINode := &workflow.Node{ - Name: "Shard " + shardName, + Name: "Shard " + shard, PathName: taskID, } rootNode.Children = append(rootNode.Children, taskUINode) } } -func initCheckpoint(keyspace string, vtworkers []string) (*workflowpb.WorkflowCheckpoint, error) { - ts := topo.Open() - defer ts.Close() +// initCheckpoint initialize the checkpoint for the horizontal workflow. +func initCheckpoint(keyspace string, vtworkers []string, ts topo.Server) (*workflowpb.WorkflowCheckpoint, error) { + sourceShardList, destinationShardList, err := findSourceAndDestinationShards(ts, keyspace) + if err != nil { + return nil, err + } + return initCheckpointFromShards(keyspace, vtworkers, sourceShardList, destinationShardList) +} +func findSourceAndDestinationShards(ts topo.Server, keyspace string) ([]string, []string, error) { overlappingShards, err := topotools.FindOverlappingShards(context.Background(), ts, keyspace) if err != nil { - return nil, err + return nil, nil, err } var sourceShardList, destinationShardList []string + for _, os := range overlappingShards { var sourceShard *topo.ShardInfo var destinationShards []*topo.ShardInfo @@ -282,15 +285,17 @@ func initCheckpoint(keyspace string, vtworkers []string) (*workflowpb.WorkflowCh sourceShard = os.Right[0] destinationShards = os.Left } - sourceShardList = append(sourceShardList, sourceShard.ShardName()) for _, d := range destinationShards { destinationShardList = append(destinationShardList, d.ShardName()) } } + return sourceShardList, destinationShardList, nil +} +func initCheckpointFromShards(keyspace string, vtworkers, sourceShardList, destinationShardList []string) (*workflowpb.WorkflowCheckpoint, error) { taskMap := make(map[string]*workflowpb.Task) - initTasks(copySchemaName, destinationShardList, taskMap, func(i int, shard string) map[string]string { + initTasks(phaseCopySchema, destinationShardList, taskMap, func(i int, shard string) map[string]string { return map[string]string{ "source_shard": sourceShardList[0], "destination_shard": shard, @@ -298,7 +303,7 @@ func initCheckpoint(keyspace string, vtworkers []string) (*workflowpb.WorkflowCh } }) - initTasks(cloneName, sourceShardList, taskMap, func(i int, shard string) map[string]string { + initTasks(phaseClone, sourceShardList, taskMap, func(i int, shard string) map[string]string { return map[string]string{ "source_shard": shard, "vtworker": vtworkers[i], @@ -306,42 +311,43 @@ func initCheckpoint(keyspace string, vtworkers []string) (*workflowpb.WorkflowCh } }) - initTasks(waitForFilteredReplicationName, destinationShardList, taskMap, func(i int, shard string) map[string]string { + initTasks(phaseWaitForFilteredReplication, destinationShardList, taskMap, func(i int, shard string) map[string]string { return map[string]string{ "destination_shard": shard, "keyspace": keyspace, } }) - initTasks(diffName, destinationShardList, taskMap, func(i int, shard string) map[string]string { + initTasks(phaseDiff, destinationShardList, taskMap, func(i int, shard string) map[string]string { return map[string]string{ "destination_shard": shard, - "vtworker": vtworkers[0], "keyspace": keyspace, + "vtworker": vtworkers[0], } }) - initTasks(migrateRdonlyName, sourceShardList, taskMap, func(i int, shard string) map[string]string { + initTasks(phaseMigrateRdonly, sourceShardList, taskMap, func(i int, shard string) map[string]string { return map[string]string{ "source_shard": shard, "keyspace": keyspace, "served_type": topodatapb.TabletType_RDONLY.String(), } }) - initTasks(migrateReplicaName, sourceShardList, taskMap, func(i int, shard string) map[string]string { + initTasks(phaseMigrateReplica, sourceShardList, taskMap, func(i int, shard string) map[string]string { return map[string]string{ "source_shard": shard, "keyspace": keyspace, "served_type": topodatapb.TabletType_REPLICA.String(), } }) - initTasks(migrateMasterName, sourceShardList, taskMap, func(i int, shard string) map[string]string { + initTasks(phaseMigrateMaster, sourceShardList, taskMap, func(i int, shard string) map[string]string { return map[string]string{ "source_shard": shard, "keyspace": keyspace, "served_type": topodatapb.TabletType_MASTER.String(), } }) + return &workflowpb.WorkflowCheckpoint{ CodeVersion: codeVersion, Tasks: taskMap, @@ -352,13 +358,13 @@ func initCheckpoint(keyspace string, vtworkers []string) (*workflowpb.WorkflowCh }, nil } -func initTasks(phaseName string, shards []string, taskMap map[string]*workflowpb.Task, createAttributes func(int, string) map[string]string) { - for i, s := range shards { - taskID := createTaskID(phaseName, s) +func initTasks(phase PhaseType, shards []string, taskMap map[string]*workflowpb.Task, getAttributes func(int, string) map[string]string) { + for i, shard := range shards { + taskID := createTaskID(phase, shard) taskMap[taskID] = &workflowpb.Task{ Id: taskID, State: workflowpb.TaskState_TaskNotStarted, - Attributes: createAttributes(i, s), + Attributes: getAttributes(i, shard), } } } diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go index 80cfc4c9346..17fa0d525e5 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go @@ -3,11 +3,9 @@ package resharding import ( "context" "flag" - "strings" "testing" "github.com/golang/mock/gomock" - "github.com/golang/protobuf/proto" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/worker/fakevtworkerclient" @@ -19,140 +17,113 @@ import ( workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" ) -// TestHorizontalResharding runs resharding from 1 shard to 2 shards. +// TestHorizontalResharding runs the happy path of HorizontalReshardingWorkflow. func TestHorizontalResharding(t *testing.T) { - // Initialize the checkpoint for the workflow. - oneShard := ReshardingData{ - Keyspace: "test_keyspace", - SourceShard: "0", - DestinationShards: []string{"-80", "80-"}, - Vtworker: "localhost:15032", - } - initCp := createCheckpoint([]ReshardingData{oneShard}) - - // Create the horizontal resharding workflow. - hw := setupWorkflow(t, initCp) - if hw == nil { - return - } - // Create the mock wrangler and set the expected behavior. - // Then pass it to the workflow. + // Set up the mock wrangler. It is used for the CopySchema and Migrate phase. ctrl := gomock.NewController(t) defer ctrl.Finish() - hw.wr = setupMockWrangler(hw.ctx, ctrl) + ctx := context.Background() + mockWranglerInterface := setupMockWrangler(ctx, ctrl) - // Set up the fake vtworkerclient. + // Set up the fakeworkerclient. It is used at SplitClone and SplitDiff phase. fakeVtworkerClient := setupFakeVtworker() vtworkerclient.RegisterFactory("fake", fakeVtworkerClient.FakeVtworkerClientFactory) defer vtworkerclient.UnregisterFactoryForTest("fake") - // Run the workflow. - if err := hw.runWorkflow(); err != nil { - t.Errorf("%s: Horizontal resharding workflow should not fail", err) - } - - // Checking all tasks are Done. - for _, task := range hw.checkpoint.Tasks { - if task.State != workflowpb.TaskState_TaskDone || task.Error != "" { - t.Fatalf("task is not done: Id: %v, State: %v, Attributes:%v", task.Id, task.State, task.Attributes) - } - } -} - -// TestHorizontalReshardingRestart restarts a stopped worklow -// by loading a hand-crafted checkpoint. This checkpoint is used to fake -// the one saved by the killed workflow. It records that some tasks -// in the workflow are finished successfully. -func TestHorizontalReshardingRestart(t *testing.T) { - // Initialize the checkpoint for the workflow. - oneShard := ReshardingData{ - Keyspace: "test_keyspace", - SourceShard: "0", - DestinationShards: []string{"-80", "80-"}, - Vtworker: "localhost:15032", + // Create a checkpoint with initialized tasks. + sourceShards := []string{"0"} + destinationShards := []string{"-80", "80-"} + vtworkers := []string{"localhost:15032"} + checkpoint, err := initCheckpointFromShards("test_keyspace", vtworkers, sourceShards, destinationShards) + if err != nil { + t.Errorf("initialize checkpoint fails: %v", err) } - initCp := createCheckpoint([]ReshardingData{oneShard}) - - // Set checkpoint to record that the copySchemaTask on destination shard - // "-80" succeeded. - t1 := initCp.Tasks[createTaskID(copySchemaName, "dest", "-80")] - t1.State = workflowpb.TaskState_TaskDone - // Set checkpoint to record that the copySchemaTask on destination shard - // "80-" failed with errors. - t2 := initCp.Tasks[createTaskID(copySchemaName, "dest", "80-")] - t2.State = workflowpb.TaskState_TaskDone - t2.Error = "the task CopySchema for shard 80- fails." - // Create the workflow proto message, which will be loaded - // when restarting the stopped workflow. - workflowProto := &workflowpb.Workflow{ - Uuid: "testworkflow0000", - FactoryName: "horizontal_resharding", - State: workflowpb.WorkflowState_Running, + // Create the workflow. + ts := memorytopo.NewServer("cell") + w := &workflowpb.Workflow{ + Uuid: "test_hw", + FactoryName: horizontalReshardingFactoryName, + State: workflowpb.WorkflowState_NotStarted, } - data, err := proto.Marshal(initCp) + wi, err := ts.CreateWorkflow(ctx, w) if err != nil { - t.Errorf("error in encoding checkpoint proto message: %v", err) - } - workflowProto.Data = data - - nodeManager := workflow.NewNodeManager() - rootNode := &workflow.Node{ - PathName: "test_root", - Name: "root", + t.Errorf("initialize WorkflowInfo fails: %v", err) } - if err := nodeManager.AddRootNode(rootNode); err != nil { - t.Errorf("adding root node failed: %v", err) + hw := &HorizontalReshardingWorkflow{ + ctx: ctx, + wr: mockWranglerInterface, + manager: workflow.NewManager(ts), + wi: wi, + topoServer: ts, + logger: logutil.NewMemoryLogger(), + checkpoint: checkpoint, + checkpointWriter: NewCheckpointWriter(ts, checkpoint, wi), } - // The workflow is created using Instantiate method when it is restarted. - var factory *HorizontalReshardingWorkflowFactory - w, err := factory.Instantiate(workflowProto, rootNode) - if err != nil { - t.Errorf("horizontal resharding workflow not instantiated successfully") + if err := hw.runWorkflow(); err != nil { + t.Errorf("%s: Horizontal resharding workflow should not fail", err) } - hw := w.(*HorizontalReshardingWorkflow) - ts := memorytopo.NewServer("cell") - wi, err := ts.CreateWorkflow(context.TODO(), workflowProto) - if err != nil { - t.Errorf("creating workflow fails: %v", err) - } - hw.ctx = context.Background() - hw.topoServer = ts - hw.wi = wi - hw.checkpointWriter = NewCheckpointWriter(hw.topoServer, hw.checkpoint, hw.wi) - if err := hw.checkpointWriter.Save(); err != nil { - t.Errorf("checkpointWriter save fails: %v", err) - } + verifySuccess(t, hw.checkpoint) +} - // Create the mock wrangler and set the expected behavior. - // Then pass it to the workflow. +// TestHorizontalReshardingRetry retries a stopped workflow, +// which the tasks are partially finished. +func TestHorizontalReshardingRetry(t *testing.T) { + // Set up mock wrangler. It is used for the CopySchema and Migrate phase. ctrl := gomock.NewController(t) defer ctrl.Finish() - hw.wr = setupMockWranglerRestart(hw.ctx, ctrl) + ctx := context.Background() + mockWranglerInterface := setupMockWrangler(ctx, ctrl) - // Set up the fake vtworkerclient. + // Set up fakeworkerclient. It is used at SplitClone and SplitDiff phase. fakeVtworkerClient := setupFakeVtworker() vtworkerclient.RegisterFactory("fake", fakeVtworkerClient.FakeVtworkerClientFactory) defer vtworkerclient.UnregisterFactoryForTest("fake") - // Run the workflow. + // Create a checkpoint for the stopped workflow. For the stopped workflow, + // the task of copying schema to shard 80- succeed while the task of copying + // schema to shard -80 failed. The rest of tasks haven't been executed. + sourceShards := []string{"0"} + destinationShards := []string{"-80", "80-"} + vtworkers := []string{"localhost:15032"} + checkpoint, err := initCheckpointFromShards("test_keyspace", vtworkers, sourceShards, destinationShards) + if err != nil { + t.Errorf("initialize checkpoint fails: %v", err) + } + setTaskSuccessOrFailure(checkpoint, createTaskID(PhaseCopySchema, "80-"), true /* isSuccess*/) + setTaskSuccessOrFailure(checkpoint, createTaskID(PhaseCopySchema, "-80"), false /* isSuccess*/) + + // Create the workflow. + ts := memorytopo.NewServer("cell") + hw := &HorizontalReshardingWorkflow{ + ctx: ctx, + wr: mockWranglerInterface, + topoServer: ts, + logger: logutil.NewMemoryLogger(), + checkpoint: checkpoint, + } + + // Rerunning the workflow. if err := hw.runWorkflow(); err != nil { t.Errorf("%s: Horizontal resharding workflow should not fail", err) } - // Checking all tasks are Done. - for _, task := range hw.checkpoint.Tasks { - if task.State != workflowpb.TaskState_TaskDone || task.Error != "" { - t.Fatalf("task is not done: Id: %v, State: %v, Attributes:%v", task.Id, task.State, task.Attributes) - } + verifySuccess(t, hw.checkpoint) +} + +func setTaskSuccessOrFailure(checkpoint *workflowpb.WorkflowCheckpoint, taskID string, isSuccess bool) { + t := checkpoint.Tasks[taskID] + t.State = workflowpb.TaskState_TaskDone + if !isSuccess { + t.Error = "failed" + } else { + t.Error = "" } } func setupFakeVtworker() *fakevtworkerclient.FakeVtworkerClient { - // Create fakeworkerclient, which is used for the unit test in phase of - // SplitClone and SplitDiff. flag.Set("vtworker_client_protocol", "fake") fakeVtworkerClient := fakevtworkerclient.NewFakeVtworkerClient() fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitClone", "--min_healthy_rdonly_tablets=1", "test_keyspace/0"}, "", nil) @@ -161,37 +132,10 @@ func setupFakeVtworker() *fakevtworkerclient.FakeVtworkerClient { return fakeVtworkerClient } -// setUpWorkflow prepares the test environement for the happy path. -func setupWorkflow(t *testing.T, initCheckpoint *workflowpb.WorkflowCheckpoint) *HorizontalReshardingWorkflow { - ts := memorytopo.NewServer("cell") - // Create fake wrangler using mock interface, which is used for the unit test in steps CopySchema and MigratedServedType. - - hw := &HorizontalReshardingWorkflow{ - topoServer: ts, - logger: logutil.NewMemoryLogger(), - checkpoint: initCheckpoint, - taskUINodeMap: make(map[string]*workflow.Node), - } - - // Create the initial workflowpb.Workflow object. - w := &workflowpb.Workflow{ - Uuid: "testworkflow0000", - FactoryName: "horizontal_resharding", - State: workflowpb.WorkflowState_NotStarted, - } - var err error - hw.wi, err = hw.topoServer.CreateWorkflow(hw.ctx, w) - if err != nil { - t.Errorf("%s: Horizontal resharding workflow fails in creating workflowInfo", err) - return nil - } - hw.checkpointWriter = NewCheckpointWriter(hw.topoServer, hw.checkpoint, hw.wi) - return hw -} - -// setupMockWrangler sets the expected behaviors for mock wrangler. -func setupMockWrangler(ctx context.Context, ctrl *gomock.Controller) *MockReshardingWrangler { +func setupMockWranglerForRetry(ctx context.Context, ctrl *gomock.Controller) *MockReshardingWrangler { mockWranglerInterface := NewMockReshardingWrangler(ctrl) + // Set the expected behaviors for mock wrangler. copy schema to shard 80- + // should not be called. mockWranglerInterface.EXPECT().CopySchemaShardFromShard( ctx, nil, /* tableArray*/ @@ -203,17 +147,6 @@ func setupMockWrangler(ctx context.Context, ctrl *gomock.Controller) *MockReshar "-80", wrangler.DefaultWaitSlaveTimeout).Return(nil) - mockWranglerInterface.EXPECT().CopySchemaShardFromShard( - ctx, - nil, /* tableArray*/ - nil, /* excludeTableArray */ - true, /*includeViews*/ - "test_keyspace", - "0", - "test_keyspace", - "80-", - wrangler.DefaultWaitSlaveTimeout).Return(nil) - mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "-80", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "80-", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) @@ -234,11 +167,20 @@ func setupMockWrangler(ctx context.Context, ctrl *gomock.Controller) *MockReshar return mockWranglerInterface } -func setupMockWranglerRestart(ctx context.Context, ctrl *gomock.Controller) *MockReshardingWrangler { - // Set the mock wrangler expectations without the call of copyschema - // on shard "-80". That task is supposed to be finished - // and must not be called when restarting the workflow. +func setupMockWrangler(ctx context.Context, ctrl *gomock.Controller) *MockReshardingWrangler { mockWranglerInterface := NewMockReshardingWrangler(ctrl) + // Set the expected behaviors for mock wrangler. + mockWranglerInterface.EXPECT().CopySchemaShardFromShard( + ctx, + nil, /* tableArray*/ + nil, /* excludeTableArray */ + true, /*includeViews*/ + "test_keyspace", + "0", + "test_keyspace", + "-80", + wrangler.DefaultWaitSlaveTimeout).Return(nil) + mockWranglerInterface.EXPECT().CopySchemaShardFromShard( ctx, nil, /* tableArray*/ @@ -269,40 +211,10 @@ func setupMockWranglerRestart(ctx context.Context, ctrl *gomock.Controller) *Moc } return mockWranglerInterface } - -// ReshardingData stores the data for resharding one source shard. -type ReshardingData struct { - Keyspace string - SourceShard string - DestinationShards []string - Vtworker string -} - -func createCheckpoint(data []ReshardingData) *workflowpb.WorkflowCheckpoint { - taskMap := make(map[string]*workflowpb.Task) - var sourceList, destinationList []string - - for _, info := range data { - keyspace := info.Keyspace - s := info.SourceShard - worker := info.Vtworker - sourceList = append(sourceList, s) - updatePerSourceTask(keyspace, s, worker, splitCloneName, taskMap) - updatePerSourceTask(keyspace, s, worker, migrateName, taskMap) - for _, d := range info.DestinationShards { - destinationList = append(destinationList, d) - updatePerDestinationTask(keyspace, s, d, worker, copySchemaName, taskMap) - updatePerDestinationTask(keyspace, s, d, worker, waitFilteredReplicationName, taskMap) - updatePerDestinationTask(keyspace, s, d, worker, splitDiffName, taskMap) +func verifySuccess(t *testing.T, checkpoint *workflowpb.WorkflowCheckpoint) { + for _, task := range checkpoint.Tasks { + if task.State != workflowpb.TaskState_TaskDone || task.Error != "" { + t.Fatalf("task: %v should succeed: task status: %v, %v", task.Id, task.State, task.Error) } } - - return &workflowpb.WorkflowCheckpoint{ - CodeVersion: codeVersion, - Tasks: taskMap, - Settings: map[string]string{ - "source_shards": strings.Join(sourceList, ","), - "destination_shards": strings.Join(destinationList, ","), - }, - } } diff --git a/go/vt/workflow/resharding/parallel_runner.go b/go/vt/workflow/resharding/parallel_runner.go index 5e0438251c3..a6a8e7a5f4d 100644 --- a/go/vt/workflow/resharding/parallel_runner.go +++ b/go/vt/workflow/resharding/parallel_runner.go @@ -2,6 +2,7 @@ package resharding import ( "fmt" + "path" "sync" log "github.com/golang/glog" @@ -16,7 +17,7 @@ type level int const ( // SEQUENTIAL means that the tasks will run sequentially. - SEQUENTIAL level = 1 + iota + SEQUENTIAL level = iota //PARALLEL means that the tasks will run in parallel. PARALLEL ) @@ -31,15 +32,19 @@ type ParallelRunner struct { // tasks stores selected tasks for the phase with expected execution order. tasks []*workflowpb.Task concurrencyLevel level - executeFunc func(context.Context, map[string]string) error + executeFunc func(context.Context, *workflowpb.Task) error // mu is used to protect the retryActionRegistery. mu sync.Mutex // retryAtionRegistry stores the data for retry actions. // Each task can retrieve its RetryController through its UI node path. retryActionRegistry map[string]*RetryController + // reportTaskStatus gives the worklfow debug option to output the task status through UI. + reportTaskStatus bool + // taskFinished stores the channels for synchroizing the finish of tasks. + taskFinished map[string]chan struct{} } -func NewParallelRunner(ctx context.Context, nodeManager *workflow.NodeManager, phaseUINode *workflow.Node, cp *CheckpointWriter, tasks []*workflowpb.Task, executeFunc func(context.Context, map[string]string) error, concurrencyLevel level) *ParallelRunner { +func NewParallelRunner(ctx context.Context, nodeManager *workflow.NodeManager, phaseUINode *workflow.Node, cp *CheckpointWriter, tasks []*workflowpb.Task, executeFunc func(context.Context, *workflowpb.Task) error, concurrencyLevel level) *ParallelRunner { return &ParallelRunner{ ctx: ctx, nodeManager: nodeManager, @@ -49,6 +54,8 @@ func NewParallelRunner(ctx context.Context, nodeManager *workflow.NodeManager, p executeFunc: executeFunc, concurrencyLevel: concurrencyLevel, retryActionRegistry: make(map[string]*RetryController), + reportTaskStatus: false, + taskFinished: make(map[string]chan struct{}), } } @@ -72,22 +79,24 @@ func (p *ParallelRunner) Run() error { } sem <- true + p.taskFinished[task.Id] = make(chan struct{}) go func(t *workflowpb.Task) { defer func() { <-sem }() + defer close(p.taskFinished[t.Id]) taskID := t.Id for { - err := p.executeFunc(p.ctx, t.Attributes) + err := p.executeFunc(p.ctx, t) // Update the task status in the checkpoint. - if updateErr := p.checkpointWriter.UpdateTask(taskID, workflowpb.TaskState_TaskDone, t.Error); updateErr != nil { + if updateErr := p.checkpointWriter.UpdateTask(taskID, workflowpb.TaskState_TaskDone, err); updateErr != nil { // Only logging the error rather then passing it to ErrorRecorder. // Errors in ErrorRecorder will lead to the stop of a workflow. We // don't want to stop the workflow if only checkpointing fails. log.Errorf("%v", updateErr) } + // The function returns if the task is executed successfully. if err == nil { - t.Error = "" return } // When task fails, first check whether the context is cancelled. @@ -97,7 +106,10 @@ func (p *ParallelRunner) Run() error { return default: } - retryChannel := p.addRetryAction(taskID) + + fmt.Printf("enabling retry action for task: %v", taskID) + + retryChannel, registerID := p.addRetryAction(taskID) // Block the task execution until the retry action is triggered // or the context is canceled. @@ -105,12 +117,18 @@ func (p *ParallelRunner) Run() error { case <-retryChannel: continue case <-p.ctx.Done(): - p.retryActionRegistry = nil + p.unregisterRetryController(registerID) return } } }(task) + + // Update task finish information on the UI. + if p.reportTaskStatus { + go p.setFinishUIMessage(task.Id) + } } + // Wait until all running jobs are done. for i := 0; i < parallelNum; i++ { sem <- true @@ -129,24 +147,28 @@ func (p *ParallelRunner) Action(ctx context.Context, pathName, name string) erro } } -func (p *ParallelRunner) addRetryAction(taskID string) chan struct{} { - node, err := p.nodeManager.GetNodeByRelativePath(p.phaseUINode, taskID) +func (p *ParallelRunner) addRetryAction(taskID string) (chan struct{}, string) { + taskNodePath := path.Join(p.phaseUINode.Path, taskID) + node, err := p.nodeManager.GetNodeByPath(taskNodePath) if err != nil { - panic(fmt.Errorf("%v: UI node not found for task %v", err, taskID)) + panic(fmt.Errorf("nodepath %v not found", taskNodePath)) } + retryController := CreateRetryController(node, p /* actionListener */) - p.registerRetryController(node.PathName, retryController) - return retryController.retryChannel + p.registerRetryController(node.Path, retryController) + node.BroadcastChanges(false /* updateChildren */) + return retryController.retryChannel, node.PathName } func (p *ParallelRunner) triggerRetry(nodePath string) error { p.mu.Lock() - defer p.mu.Unlock() - c, ok := p.retryActionRegistry[nodePath] if !ok { + p.mu.Unlock() return fmt.Errorf("Unknown node path for the action: %v", nodePath) } + p.mu.Unlock() + p.unregisterRetryController(nodePath) c.triggerRetry() return nil @@ -170,3 +192,24 @@ func (p *ParallelRunner) unregisterRetryController(nodePath string) { delete(p.retryActionRegistry, nodePath) } } + +func (p *ParallelRunner) setFinishUIMessage(taskID string) { + done, ok := p.taskFinished[taskID] + if !ok { + panic(fmt.Errorf("the finish channl for task %v not found", taskID)) + } + + taskNodePath := path.Join(p.phaseUINode.Path, taskID) + taskNode, err := p.nodeManager.GetNodeByPath(taskNodePath) + if err != nil { + panic(fmt.Errorf("nodepath %v not found", taskNodePath)) + } + + select { + case <-done: + taskNode.Message = fmt.Sprintf("task %v finished", taskID) + taskNode.BroadcastChanges(false /* updateChildren */) + case <-p.ctx.Done(): + return + } +} diff --git a/go/vt/workflow/resharding/parallel_runner_test.go b/go/vt/workflow/resharding/parallel_runner_test.go index f7a49239068..fea4fc1e9a1 100644 --- a/go/vt/workflow/resharding/parallel_runner_test.go +++ b/go/vt/workflow/resharding/parallel_runner_test.go @@ -2,233 +2,228 @@ package resharding import ( "context" - "errors" "fmt" - "strconv" + "path" "strings" "sync" "testing" + "github.com/golang/protobuf/proto" + "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/workflow" workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" ) -const ( - printName = "Sleep" -) - func TestParallelRunner(t *testing.T) { - cp, err := startWorkflow(5) + ts := memorytopo.NewServer("cell") + m := workflow.NewManager(ts) + + // Run the manager in the background. + wg, cancel, _ := startManager(t, m) + + // Create a testworkflow. + uuid, err := m.Create(context.Background(), testWorkflowFactoryName, []string{"-retry=false", "-count=2"}) if err != nil { - t.Errorf("%s: Fails in creating workflow", err) + t.Fatalf("cannot create testworkflow: %v", err) } - ctx := context.Background() - tasks := GetOrderedPrintTasks(cp.checkpoint) - p := NewParallelRunner(ctx, cp, make(map[string]*workflow.Node), tasks) - executeLog := func(attr map[string]string) error { - t.Logf("The number passed to me is %v \n", attr["number"]) - return nil - } - if err := p.Run(executeLog, PARALLEL); err != nil { - t.Errorf("%s: Parallel Runner should not fail", err) + // Start the job + if err := m.Start(context.Background(), uuid); err != nil { + t.Fatalf("cannot start testworkflow: %v", err) } - // Check whether all tasks are in finished status. - for _, task := range cp.checkpoint.Tasks { - if task.State != workflowpb.TaskState_TaskDone || task.Error != "" { - t.Fatalf("Task info: %v, %v, %v: Parallel Runner task not succeed", task.Id, task.State, task.Attributes) - } + // Wait for the workflow to end. + m.Wait(context.Background(), uuid) + + verifyWorkflowSuccess(context.Background(), t, ts, uuid) + + // Stop the manager. + if err := m.Stop(context.Background(), uuid); err != nil { + t.Fatalf("cannot stop testworkflow: %v", err) } + cancel() + wg.Wait() } func TestParallelRunnerRetryAction(t *testing.T) { - cp, err := startWorkflow(5) + // Tasks in the workflow are forced to fail at the first attempt. Then we + // retry task1, after it is finished successfully, we retry task2. + ts := memorytopo.NewServer("cell") + m := workflow.NewManager(ts) + + // Run the manager in the background. + wg, cancel, ctx := startManager(t, m) + + // Create a testworkflow. + uuid, err := m.Create(context.Background(), testWorkflowFactoryName, []string{"-retry=true", "-count=2"}) if err != nil { - t.Errorf("%s: Fails in creating workflow", err) + t.Fatalf("cannot create testworkflow: %v", err) } - ctx := context.Background() - - // Create UI nodes. Each task has a node. - // These task nodes are the children of a root node. + // We use notifications channel to monitor the update of UI. notifications := make(chan []byte, 10) - nodeManager := workflow.NewNodeManager() - _, index, err := nodeManager.GetAndWatchFullTree(notifications) + _, index, err := m.NodeManager().GetAndWatchFullTree(notifications) if err != nil { t.Errorf("GetAndWatchTree Failed: %v", err) } - defer nodeManager.CloseWatcher(index) - - rootNode := &workflow.Node{ - PathName: "test_root", - Name: "root", - } - if err := nodeManager.AddRootNode(rootNode); err != nil { - t.Errorf("adding root node failed: %v", err) - } - result, ok := <-notifications - - taskNodeMap := make(map[string]*workflow.Node) - for _, task := range cp.checkpoint.Tasks { - taskNode := &workflow.Node{ - PathName: task.Id, - Name: "task_" + task.Id, - } - taskNodeMap[task.Id] = taskNode - rootNode.Children = append(rootNode.Children, taskNode) - } - - rootNode.BroadcastChanges(true /*updateChildren*/) - - result, ok = <-notifications - if !ok || - strings.Contains(string(result), `"children":[]`) || - !strings.Contains(string(result), `"name":"task_Sleep_0"`) || - !strings.Contains(string(result), `"name":"task_Sleep_1"`) || - !strings.Contains(string(result), `"name":"task_Sleep_2"`) || - !strings.Contains(string(result), `"name":"task_Sleep_3"`) || - !strings.Contains(string(result), `"name":"task_Sleep_4"`) { - t.Errorf("unexpected behavior in adding children nodes: %v, %v", ok, string(result)) - } - - // Set up ParallelRunner. - tasks := GetOrderedPrintTasks(cp.checkpoint) - p := NewParallelRunner(ctx, cp, taskNodeMap, tasks) + defer m.NodeManager().CloseWatcher(index) + go func() { + // This goroutine is used to detect and trigger the retry actions. + task1ID := createTestTaskID(PhaseSimple, 0) + task2ID := createTestTaskID(PhaseSimple, 1) - // Set retry flag to be false. The targeting task will fail under this condition. - retryFlag := false - errMessage := "fake error for testing retry" - executeLog := func(attr map[string]string) error { - t.Logf("The number passed to me is %v \n", attr["number"]) - n, err := strconv.Atoi(attr["number"]) + task1Node, err := m.NodeManager().GetNodeByPath(path.Join("/"+uuid, task1ID)) if err != nil { - t.Logf("Converting number string to int fails: %v \n", attr["number"]) - return err + t.Errorf("fail to find node for task %v: %v", task1ID, err) } - if !retryFlag { - if n == 3 { - t.Logf("I will fail at this time since retry flag is false.") - return errors.New(errMessage) - } + task2Node, err := m.NodeManager().GetNodeByPath(path.Join("/"+uuid, task2ID)) + if err != nil { + t.Errorf("fail to find node for task %v: %v", task2ID, err) } - return nil - } - go func() { - // This goroutine is used to monitor the UI change. - // When the retry action is enabled, it will trigger it using nodemanager. + retry1 := false + retry2 := false for { select { - case mornitor := <-notifications: - if strings.Contains(string(mornitor), "Retry") { - // Check if Retry action is enabled for the expected task. - taskName := logTaskName(3) - nodeTarget := taskNodeMap[taskName] - taskTarget := cp.checkpoint.Tasks[taskName] - if taskTarget.State != workflowpb.TaskState_TaskDone || - taskTarget.Error != errMessage || - len(nodeTarget.Actions) != 1 { - t.Fatalf("Retry action is not enabled as expectedL %v, %v, %v", &nodeTarget, taskTarget.State, taskTarget.Error) - } - - // Reset the retryFlag to make the task succeed when retrying. - retryFlag = true - - t.Logf("Triggering retry action.") - if err := nodeManager.Action(ctx, &workflow.ActionParameters{ - Path: "/test_root/" + logTaskName(3), - Name: "Retry", - }); err != nil { - t.Errorf("unexpected action error: %v", err) + case monitor, ok := <-notifications: + monitorStr := string(monitor) + if !ok { + t.Errorf("notifications channel is closed unexpectedly: %v, %v", ok, monitorStr) + } + if strings.Contains(monitorStr, "Retry") { + if strings.Contains(monitorStr, task1ID) { + verifyTaskSuccessOrFailure(context.Background(), t, ts, uuid, task1ID, false /* isSuccess*/) + verifyRetryAction(t, task1Node) + retry1 = true } - - if len(nodeTarget.Actions) != 0 { - t.Fatalf("the node actions should be empty after triggering retry: %v", nodeTarget.Actions) + if strings.Contains(monitorStr, task2ID) { + verifyTaskSuccessOrFailure(context.Background(), t, ts, uuid, task2ID, false /* isSuccess*/) + verifyRetryAction(t, task2Node) + retry2 = true } + } + // After detecting both tasks have enabled retry actions after failure, + // retry task1, check its success, then retry task2, check its success. + if retry1 && retry2 { + clickRetry(ctx, t, m, task1Node.Path) + waitForFinished(ctx, t, notifications, task1ID, task1Node) + verifyTaskSuccessOrFailure(context.Background(), t, ts, uuid, task1ID, true /* isSuccess*/) + + clickRetry(ctx, t, m, task2Node.Path) + waitForFinished(ctx, t, notifications, task2ID, task2Node) + verifyTaskSuccessOrFailure(context.Background(), t, ts, uuid, task2ID, true /* isSuccess*/) return } case <-ctx.Done(): + t.Errorf("context is canceled") return } } }() - // Call ParallelRunner.Run through a goroutine. In this way, - // the failure of task will not block the main function. - waitGroup := sync.WaitGroup{} - waitGroup.Add(1) + // Start the job + if err := m.Start(context.Background(), uuid); err != nil { + t.Fatalf("cannot start testworkflow: %v", err) + } + // Wait for the workflow to end. + m.Wait(context.Background(), uuid) + + verifyWorkflowSuccess(context.Background(), t, ts, uuid) + // Stop the manager. + if err := m.Stop(context.Background(), uuid); err != nil { + t.Fatalf("cannot stop testworkflow: %v", err) + } + cancel() + wg.Wait() +} + +func startManager(t *testing.T, m *workflow.Manager) (*sync.WaitGroup, context.CancelFunc, context.Context) { + // Run the manager in the background. + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) go func() { - defer waitGroup.Done() - err := p.Run(executeLog, PARALLEL) - if err != nil { - t.Logf("ParallelRunner.Run fails: %v", err) - } + m.Run(ctx) + wg.Done() }() - waitGroup.Wait() - // Check that all tasks are finished successfully. - for _, task := range cp.checkpoint.Tasks { - if task.State != workflowpb.TaskState_TaskDone || task.Error != "" { - t.Fatalf("Task info: %v, %v, %v: Parallel Runner task not succeed", task.Id, task.State, task.Attributes) - } + m.WaitUntilRunning() + return wg, cancel, ctx +} + +func clickRetry(ctx context.Context, t *testing.T, m *workflow.Manager, nodePath string) { + t.Logf("Click retry action on node: %v.", nodePath) + if err := m.NodeManager().Action(ctx, &workflow.ActionParameters{ + Path: nodePath, + Name: "Retry", + }); err != nil { + t.Errorf("unexpected action error: %v", err) } } -func startWorkflow(taskNum int) (*CheckpointWriter, error) { - initCheckpoint := InitPrintTasks(taskNum) +func waitForFinished(ctx context.Context, t *testing.T, notifications chan []byte, taskID string, node *workflow.Node) { + for { + select { + case monitor, ok := <-notifications: + if !ok { + t.Errorf("unexpected notification: %v, %v", ok, string(monitor)) + } - w := &workflowpb.Workflow{ - Uuid: "testparallelrunner", - FactoryName: "simple_print", - State: workflowpb.WorkflowState_NotStarted, + finishMessage := fmt.Sprintf(`"message":"task %v finished"`, taskID) + if strings.Contains(string(monitor), finishMessage) { + if len(node.Actions) != 0 { + t.Fatalf("the node actions should be empty after triggering retry: %v", node.Actions) + } + return + } + case <-ctx.Done(): + return + } } - ts := memorytopo.NewServer("cell") - wi, err := ts.CreateWorkflow(context.TODO(), w) +} + +func verifyWorkflowSuccess(ctx context.Context, t *testing.T, ts topo.Server, uuid string) { + wi, err := ts.GetWorkflow(ctx, uuid) if err != nil { - return nil, err + t.Errorf("fail to get workflow for: %v", uuid) + } + checkpoint := &workflowpb.WorkflowCheckpoint{} + if err := proto.Unmarshal(wi.Workflow.Data, checkpoint); err != nil { + t.Errorf("fails to get checkpoint for the workflow: %v", err) } - cp := NewCheckpointWriter(ts, initCheckpoint, wi) - cp.Save() - return cp, nil + for _, task := range checkpoint.Tasks { + if task.State != workflowpb.TaskState_TaskDone || task.Error != "" { + t.Fatalf("task: %v should succeed: task status: %v, %v", task.Id, task.State, task.Attributes) + } + } } -func logTaskName(num int) string { - return fmt.Sprintf("%v_%v", printName, num) -} +func verifyTaskSuccessOrFailure(ctx context.Context, t *testing.T, ts topo.Server, uuid, taskID string, isSuccess bool) { + wi, err := ts.GetWorkflow(ctx, uuid) + if err != nil { + t.Errorf("fail to get workflow for: %v", uuid) + } -func InitPrintTasks(numTasks int) *workflowpb.WorkflowCheckpoint { - tasks := make(map[string]*workflowpb.Task) - var infoList []string - for i := 0; i < numTasks; i++ { - numStr := fmt.Sprintf("%v", i) - t := &workflowpb.Task{ - Id: logTaskName(i), - State: workflowpb.TaskState_TaskNotStarted, - Attributes: map[string]string{"number": numStr}, - } - tasks[t.Id] = t - infoList = append(infoList, numStr) + checkpoint := &workflowpb.WorkflowCheckpoint{} + if err := proto.Unmarshal(wi.Workflow.Data, checkpoint); err != nil { + t.Errorf("fails to get checkpoint for the workflow: %v", err) } - return &workflowpb.WorkflowCheckpoint{ - CodeVersion: 0, - Tasks: tasks, - Settings: map[string]string{"numbers": strings.Join(infoList, ",")}, + task := checkpoint.Tasks[taskID] + + taskError := "" + if !isSuccess { + taskError = errMessage + } + if task.State != workflowpb.TaskState_TaskDone || task.Error != taskError { + t.Errorf("task: %v should succeed. Task status: %v, %v", task.State, task.Error) } } -func GetOrderedPrintTasks(checkpoint *workflowpb.WorkflowCheckpoint) []*workflowpb.Task { - var tasks []*workflowpb.Task - for _, n := range strings.Split(checkpoint.Settings["numbers"], ",") { - num, err := strconv.Atoi(n) - if err != nil { - return nil - } - taskID := logTaskName(num) - tasks = append(tasks, checkpoint.Tasks[taskID]) +func verifyRetryAction(t *testing.T, node *workflow.Node) { + if len(node.Actions) != 1 || node.Actions[0].Name != "Retry" { + t.Errorf("unexpected Ation values: %v", node.Actions) } - return tasks } diff --git a/go/vt/workflow/resharding/retry_controller.go b/go/vt/workflow/resharding/retry_controller.go index e7e59f141af..6ae0482d7d6 100644 --- a/go/vt/workflow/resharding/retry_controller.go +++ b/go/vt/workflow/resharding/retry_controller.go @@ -20,7 +20,6 @@ func CreateRetryController(node *workflow.Node, actionListener workflow.ActionLi } node.Actions = []*workflow.Action{retryAction} node.Listener = actionListener - node.BroadcastChanges(false /* updateChildren */) return &RetryController{ node: node, retryChannel: make(chan struct{}), diff --git a/go/vt/workflow/resharding/task.go b/go/vt/workflow/resharding/task.go index 68daeae4572..7a8e47ce90c 100644 --- a/go/vt/workflow/resharding/task.go +++ b/go/vt/workflow/resharding/task.go @@ -14,35 +14,33 @@ import ( workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" ) -func createTaskID(phase, shardName string) string { +func createTaskID(phase PhaseType, shardName string) string { return fmt.Sprintf("%s_%s", phase, shardName) } // GetTasks returns selected tasks for a phase from the checkpoint // with expected execution order. -func (hw *HorizontalReshardingWorkflow) GetTasks(checkpoint *workflowpb.WorkflowCheckpoint, phaseName string) []*workflowpb.Task { +func (hw *HorizontalReshardingWorkflow) GetTasks(checkpoint *workflowpb.WorkflowCheckpoint, phase PhaseType) []*workflowpb.Task { var shards []string - switch phaseName { - case copySchemaName, waitForFilteredReplicationName, diffName: + switch phase { + case phaseCopySchema, phaseWaitForFilteredReplication, phaseDiff: shards = strings.Split(checkpoint.Settings["destination_shards"], ",") - case cloneName, migrateRdonlyName, migrateReplicaName, migrateMasterName: + case phaseClone, phaseMigrateRdonly, phaseMigrateReplica, phaseMigrateMaster: shards = strings.Split(checkpoint.Settings["source_shards"], ",") } var tasks []*workflowpb.Task for _, s := range shards { - taskID := createTaskID(phaseName, s) + taskID := createTaskID(phase, s) tasks = append(tasks, checkpoint.Tasks[taskID]) } return tasks } -// runCopySchema runs CopySchema for a destination shard. -// There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. -func (hw *HorizontalReshardingWorkflow) runCopySchema(ctx context.Context, attributes map[string]string) error { - s := attributes["source_shard"] - d := attributes["destination_shard"] - keyspace := attributes["keyspace"] +func (hw *HorizontalReshardingWorkflow) runCopySchema(ctx context.Context, t *workflowpb.Task) error { + s := t.Attributes["source_shard"] + d := t.Attributes["destination_shard"] + keyspace := t.Attributes["keyspace"] err := hw.wr.CopySchemaShardFromShard(ctx, nil /* tableArray*/, nil /* excludeTableArray */, true, /*includeViews*/ keyspace, s, keyspace, d, wrangler.DefaultWaitSlaveTimeout) if err != nil { @@ -52,12 +50,10 @@ func (hw *HorizontalReshardingWorkflow) runCopySchema(ctx context.Context, attri return err } -// runSplitClone runs SplitClone for a source shard. -// There should be #sourceshards parameters, while each param includes 1 sourceshard and its destshards. The destShards are useless here. -func (hw *HorizontalReshardingWorkflow) runSplitClone(ctx context.Context, attributes map[string]string) error { - s := attributes["source_shard"] - worker := attributes["vtworker"] - keyspace := attributes["keyspace"] +func (hw *HorizontalReshardingWorkflow) runSplitClone(ctx context.Context, t *workflowpb.Task) error { + s := t.Attributes["source_shard"] + worker := t.Attributes["vtworker"] + keyspace := t.Attributes["keyspace"] sourceKeyspaceShard := topoproto.KeyspaceShardString(keyspace, s) // Reset the vtworker to avoid error if vtworker command has been called elsewhere. @@ -76,11 +72,9 @@ func (hw *HorizontalReshardingWorkflow) runSplitClone(ctx context.Context, attri return nil } -// runWaitForFilteredReplication runs WaitForFilteredReplication for a destination shard. -// There should be #destshards parameters, while each param includes 1 sourceshard and 1 destshard. -func (hw *HorizontalReshardingWorkflow) runWaitForFilteredReplication(ctx context.Context, attributes map[string]string) error { - d := attributes["destination_shard"] - keyspace := attributes["keyspace"] +func (hw *HorizontalReshardingWorkflow) runWaitForFilteredReplication(ctx context.Context, t *workflowpb.Task) error { + d := t.Attributes["destination_shard"] + keyspace := t.Attributes["keyspace"] if err := hw.wr.WaitForFilteredReplication(ctx, keyspace, d, wrangler.DefaultWaitForFilteredReplicationMaxDelay); err != nil { hw.logger.Infof("Horizontal Resharding: error in WaitForFilteredReplication: %v.", err) @@ -90,11 +84,10 @@ func (hw *HorizontalReshardingWorkflow) runWaitForFilteredReplication(ctx contex return nil } -// runSplitDiff runs SplitDiff for a destination shard. -func (hw *HorizontalReshardingWorkflow) runSplitDiff(ctx context.Context, attributes map[string]string) error { - d := attributes["destination_shard"] - worker := attributes["vtworker"] - keyspace := attributes["keyspace"] +func (hw *HorizontalReshardingWorkflow) runSplitDiff(ctx context.Context, t *workflowpb.Task) error { + d := t.Attributes["destination_shard"] + worker := t.Attributes["vtworker"] + keyspace := t.Attributes["keyspace"] automation.ExecuteVtworker(hw.ctx, worker, []string{"Reset"}) args := []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", topoproto.KeyspaceShardString(keyspace, d)} @@ -107,12 +100,10 @@ func (hw *HorizontalReshardingWorkflow) runSplitDiff(ctx context.Context, attrib return nil } -// runMigrate runs the migration sequentially among all source shards. -// There should be 1 parameter, which includes all source shards to be migrated. -func (hw *HorizontalReshardingWorkflow) runMigrate(ctx context.Context, attributes map[string]string) error { - s := attributes["source_shard"] - keyspace := attributes["keyspace"] - servedTypeStr := attributes["served_type"] +func (hw *HorizontalReshardingWorkflow) runMigrate(ctx context.Context, t *workflowpb.Task) error { + s := t.Attributes["source_shard"] + keyspace := t.Attributes["keyspace"] + servedTypeStr := t.Attributes["served_type"] servedType, err := topoproto.ParseTabletType(servedTypeStr) if err != nil { From 2fb92cff0cd7da9fce3d36c823291eef547ef891 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Fri, 17 Feb 2017 11:29:26 -0500 Subject: [PATCH 010/108] rebuild vschema too --- go/vt/tabletmanager/initial_rebuild.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/go/vt/tabletmanager/initial_rebuild.go b/go/vt/tabletmanager/initial_rebuild.go index 56a41d2cfe8..8b4bb1c4f84 100644 --- a/go/vt/tabletmanager/initial_rebuild.go +++ b/go/vt/tabletmanager/initial_rebuild.go @@ -33,5 +33,10 @@ func (agent *ActionAgent) maybeRebuildKeyspace(cell, keyspace string) { if err := topotools.RebuildKeyspace(agent.batchCtx, logutil.NewConsoleLogger(), agent.TopoServer, keyspace, []string{cell}); err != nil { log.Warningf("RebuildKeyspace(%v,%v) failed: %v, may need to run 'vtctl RebuildKeyspaceGraph %v')", cell, keyspace, err, keyspace) + return + } + + if err := topotools.RebuildVSchema(agent.batchCtx, logutil.NewConsoleLogger(), agent.TopoServer, []string{cell}); err != nil { + log.Warningf("RebuildVSchema(%v) failed: %v, may need to run 'vtctl RebuildVSchemaGraph --cell %v", cell, err, cell) } } From 15f8c81d42dc2c471497d85d1ee36cc5a5d35fc7 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Fri, 17 Feb 2017 13:36:33 -0500 Subject: [PATCH 011/108] typo --- go/vt/tabletmanager/initial_rebuild.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/vt/tabletmanager/initial_rebuild.go b/go/vt/tabletmanager/initial_rebuild.go index 8b4bb1c4f84..540787606b4 100644 --- a/go/vt/tabletmanager/initial_rebuild.go +++ b/go/vt/tabletmanager/initial_rebuild.go @@ -37,6 +37,6 @@ func (agent *ActionAgent) maybeRebuildKeyspace(cell, keyspace string) { } if err := topotools.RebuildVSchema(agent.batchCtx, logutil.NewConsoleLogger(), agent.TopoServer, []string{cell}); err != nil { - log.Warningf("RebuildVSchema(%v) failed: %v, may need to run 'vtctl RebuildVSchemaGraph --cell %v", cell, err, cell) + log.Warningf("RebuildVSchema(%v) failed: %v, may need to run 'vtctl RebuildVSchemaGraph --cells %v", cell, err, cell) } } From 7c80ce5429209b6c404ad6d79645f0c17de7f058 Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Tue, 21 Feb 2017 22:57:20 -0800 Subject: [PATCH 012/108] vtgate/buffer: Fix bug that keyspaces were not logged at startup. This happened only when shards were included as well. --- go/vt/vtgate/buffer/buffer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/go/vt/vtgate/buffer/buffer.go b/go/vt/vtgate/buffer/buffer.go index cf5ba44c61e..4345eda5c4f 100644 --- a/go/vt/vtgate/buffer/buffer.go +++ b/go/vt/vtgate/buffer/buffer.go @@ -99,13 +99,13 @@ func New() *Buffer { header := "Buffering limited to configured " limited := "" if len(keyspaces) > 0 { - limited = "keyspaces: " + setToString(keyspaces) + limited += "keyspaces: " + setToString(keyspaces) } if len(shards) > 0 { if limited == "" { limited += " and " } - limited = "shards: " + setToString(shards) + limited += "shards: " + setToString(shards) } if limited != "" { limited = header + limited From d3bb9005147a80426d70da8755e75e1e919778c1 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Wed, 15 Feb 2017 17:18:42 -0800 Subject: [PATCH 013/108] vterrors: introduce vtrpcpb.Code Created the Code enum to match grpc error codes, and renamed all previous error codes to not have a name clash. Also renamed The older field to LegacyCode. --- .../vtgateclienttest/goclienttest/errors.go | 4 +- go/cmd/vtgateclienttest/services/errors.go | 4 +- go/sqltypes/proto3_test.go | 6 +- go/vt/proto/automation/automation.pb.go | 2 +- .../automationservice/automationservice.pb.go | 2 +- go/vt/proto/binlogdata/binlogdata.pb.go | 2 +- go/vt/proto/binlogservice/binlogservice.pb.go | 2 +- go/vt/proto/logutil/logutil.pb.go | 2 +- go/vt/proto/mysqlctl/mysqlctl.pb.go | 2 +- go/vt/proto/query/query.pb.go | 2 +- go/vt/proto/queryservice/queryservice.pb.go | 2 +- .../replicationdata/replicationdata.pb.go | 2 +- go/vt/proto/tableacl/tableacl.pb.go | 2 +- .../tabletmanagerdata/tabletmanagerdata.pb.go | 2 +- .../tabletmanagerservice.pb.go | 2 +- go/vt/proto/throttlerdata/throttlerdata.pb.go | 2 +- .../throttlerservice/throttlerservice.pb.go | 2 +- go/vt/proto/topodata/topodata.pb.go | 2 +- go/vt/proto/vschema/vschema.pb.go | 2 +- go/vt/proto/vtctldata/vtctldata.pb.go | 2 +- go/vt/proto/vtctlservice/vtctlservice.pb.go | 2 +- go/vt/proto/vtgate/vtgate.pb.go | 2 +- go/vt/proto/vtgateservice/vtgateservice.pb.go | 2 +- go/vt/proto/vtrpc/vtrpc.pb.go | 287 ++++++++++++++---- go/vt/proto/vttest/vttest.pb.go | 2 +- go/vt/proto/vtworkerdata/vtworkerdata.pb.go | 2 +- .../vtworkerservice/vtworkerservice.pb.go | 2 +- go/vt/proto/workflow/workflow.pb.go | 2 +- go/vt/tabletserver/query_executor.go | 6 +- go/vt/tabletserver/query_executor_test.go | 4 +- go/vt/tabletserver/sandboxconn/sandboxconn.go | 10 +- go/vt/tabletserver/tabletconn/grpc_error.go | 2 +- .../tabletconntest/tabletconntest.go | 6 +- go/vt/tabletserver/tabletenv/tablet_error.go | 6 +- .../tabletenv/tablet_error_test.go | 6 +- go/vt/tabletserver/tabletserver.go | 2 +- go/vt/tabletserver/tabletserver_test.go | 4 +- go/vt/tabletserver/tx_executor.go | 2 +- go/vt/tabletserver/tx_pool.go | 2 +- go/vt/vterrors/aggregate.go | 26 +- go/vt/vterrors/grpc.go | 20 +- go/vt/vterrors/proto3.go | 6 +- go/vt/vtgate/resolver_test.go | 2 +- go/vt/vtgate/scatter_conn.go | 2 +- go/vt/vtgate/scatter_conn_test.go | 2 +- go/vt/vtgate/vtgate.go | 4 +- go/vt/vtgate/vtgate_test.go | 10 +- go/vt/worker/grpcvtworkerclient/client.go | 2 +- go/vt/worker/instance.go | 2 +- .../vtworkerclienttest/client_testsuite.go | 2 +- proto/vtrpc.proto | 154 +++++++++- py/vtproto/vtrpc_pb2.py | 133 +++++++- 52 files changed, 580 insertions(+), 184 deletions(-) diff --git a/go/cmd/vtgateclienttest/goclienttest/errors.go b/go/cmd/vtgateclienttest/goclienttest/errors.go index af55e68843c..490f68505bf 100644 --- a/go/cmd/vtgateclienttest/goclienttest/errors.go +++ b/go/cmd/vtgateclienttest/goclienttest/errors.go @@ -26,10 +26,10 @@ var ( executeErrors = map[string]vtrpcpb.ErrorCode{ "bad input": vtrpcpb.ErrorCode_BAD_INPUT, - "deadline exceeded": vtrpcpb.ErrorCode_DEADLINE_EXCEEDED, + "deadline exceeded": vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, "integrity error": vtrpcpb.ErrorCode_INTEGRITY_ERROR, "transient error": vtrpcpb.ErrorCode_TRANSIENT_ERROR, - "unauthenticated": vtrpcpb.ErrorCode_UNAUTHENTICATED, + "unauthenticated": vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY, "aborted": vtrpcpb.ErrorCode_NOT_IN_TX, "unknown error": vtrpcpb.ErrorCode_UNKNOWN_ERROR, } diff --git a/go/cmd/vtgateclienttest/services/errors.go b/go/cmd/vtgateclienttest/services/errors.go index 0c0e552dc97..277e10f95d2 100644 --- a/go/cmd/vtgateclienttest/services/errors.go +++ b/go/cmd/vtgateclienttest/services/errors.go @@ -86,7 +86,7 @@ func trimmedRequestToError(received string) error { ) case "deadline exceeded": return vterrors.FromError( - vtrpcpb.ErrorCode_DEADLINE_EXCEEDED, + vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, errors.New("vtgate test client forced error: deadline exceeded"), ) case "integrity error": @@ -107,7 +107,7 @@ func trimmedRequestToError(received string) error { ) case "unauthenticated": return vterrors.FromError( - vtrpcpb.ErrorCode_UNAUTHENTICATED, + vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY, errors.New("vtgate test client forced error: unauthenticated"), ) case "aborted": diff --git a/go/sqltypes/proto3_test.go b/go/sqltypes/proto3_test.go index 1bf897a676a..634a9fa4471 100644 --- a/go/sqltypes/proto3_test.go +++ b/go/sqltypes/proto3_test.go @@ -244,7 +244,7 @@ func TestQueryReponses(t *testing.T) { QueryError: nil, }, { QueryResult: nil, - QueryError: vterrors.FromError(vtrpc.ErrorCode_DEADLINE_EXCEEDED, errors.New("deadline exceeded")), + QueryError: vterrors.FromError(vtrpc.ErrorCode_DEADLINE_EXCEEDED_LEGACY, errors.New("deadline exceeded")), }, } @@ -287,8 +287,8 @@ func TestQueryReponses(t *testing.T) { }, }, { Error: &vtrpc.RPCError{ - Code: vtrpc.ErrorCode_DEADLINE_EXCEEDED, - Message: "deadline exceeded", + LegacyCode: vtrpc.ErrorCode_DEADLINE_EXCEEDED_LEGACY, + Message: "deadline exceeded", }, Result: nil, }, diff --git a/go/vt/proto/automation/automation.pb.go b/go/vt/proto/automation/automation.pb.go index c63e1a8151b..317772d05f7 100644 --- a/go/vt/proto/automation/automation.pb.go +++ b/go/vt/proto/automation/automation.pb.go @@ -251,7 +251,7 @@ func init() { proto.RegisterFile("automation.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 562 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x94, 0xdb, 0x6a, 0xdb, 0x4c, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xdb, 0x6a, 0xdb, 0x4c, 0x14, 0x85, 0x7f, 0xc9, 0x87, 0xbf, 0xde, 0x6a, 0x1c, 0x31, 0x34, 0x46, 0x09, 0x4d, 0x23, 0xab, 0x37, 0x26, 0x05, 0x43, 0x9d, 0x8b, 0x94, 0xb4, 0x85, 0x1a, 0x5b, 0x84, 0xe0, 0x22, 0x85, 0xb1, 0x4c, 0xa1, 0xbd, 0x30, 0x53, 0x67, 0x2e, 0x54, 0xcb, 0x92, 0x32, 0x33, 0x2a, 0xf8, 0x05, 0xfa, diff --git a/go/vt/proto/automationservice/automationservice.pb.go b/go/vt/proto/automationservice/automationservice.pb.go index 08be3dbb684..d856f86f0dc 100644 --- a/go/vt/proto/automationservice/automationservice.pb.go +++ b/go/vt/proto/automationservice/automationservice.pb.go @@ -148,7 +148,7 @@ func init() { proto.RegisterFile("automationservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 150 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0x2c, 0x2d, 0xc9, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0x2c, 0x2d, 0xc9, 0xcf, 0x4d, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc4, 0x90, 0x90, 0x12, 0x40, 0x08, 0x41, 0x14, 0x19, 0x35, 0x32, 0x71, 0x71, 0x39, 0xc2, 0x05, 0x85, 0x4a, 0xb8, 0xc4, 0x5d, 0xf3, 0x0a, 0x4b, 0x53, 0x4b, 0x53, diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index 57c00efc7ff..46c45fb7550 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -247,7 +247,7 @@ func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 540 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x54, 0x5d, 0x6e, 0xda, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0x5d, 0x6e, 0xda, 0x40, 0x10, 0xae, 0xb1, 0x43, 0xec, 0x71, 0x9a, 0x2c, 0x9b, 0x26, 0xb2, 0x90, 0x2a, 0x21, 0xbf, 0x94, 0x97, 0xba, 0x95, 0x7b, 0x02, 0x6c, 0xaf, 0x10, 0xc9, 0x02, 0xd1, 0xe2, 0xbc, 0xf4, 0xc5, 0x32, 0x64, 0x4b, 0x11, 0xc4, 0x06, 0xef, 0x26, 0x2a, 0xe7, 0xe8, 0x29, 0x7a, 0x91, 0xde, 0xa4, 0xf7, diff --git a/go/vt/proto/binlogservice/binlogservice.pb.go b/go/vt/proto/binlogservice/binlogservice.pb.go index 451617a6f5d..b5eac227ece 100644 --- a/go/vt/proto/binlogservice/binlogservice.pb.go +++ b/go/vt/proto/binlogservice/binlogservice.pb.go @@ -204,7 +204,7 @@ func init() { proto.RegisterFile("binlogservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 149 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x4e, 0xca, 0xcc, 0xcb, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4e, 0xca, 0xcc, 0xcb, 0xc9, 0x4f, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x45, 0x11, 0x94, 0x12, 0x80, 0x70, 0x53, 0x12, 0x4b, 0x12, 0x21, 0x0a, 0x8c, 0x0e, 0x31, 0x72, 0xf1, 0x84, 0x16, 0xa4, 0x24, 0x96, 0xa4, 0x06, 0x97, 0x14, 0xa5, 0x26, 0xe6, 0x0a, 0x45, diff --git a/go/vt/proto/logutil/logutil.pb.go b/go/vt/proto/logutil/logutil.pb.go index 43dbdd597d3..a3e7a31b2bf 100644 --- a/go/vt/proto/logutil/logutil.pb.go +++ b/go/vt/proto/logutil/logutil.pb.go @@ -104,7 +104,7 @@ func init() { proto.RegisterFile("logutil.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 235 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x90, 0x41, 0x4b, 0xc3, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0x41, 0x4b, 0xc3, 0x40, 0x10, 0x85, 0xdd, 0x66, 0xd7, 0xd8, 0x09, 0x2d, 0x61, 0xf0, 0xb0, 0xc7, 0x58, 0x3c, 0x04, 0x0f, 0x3d, 0x54, 0xf0, 0xae, 0x12, 0xa5, 0x50, 0x12, 0x18, 0x05, 0xcf, 0x55, 0x47, 0x59, 0xd8, 0xee, 0x8a, 0x4d, 0xf3, 0x33, 0xfc, 0xcd, 0x92, 0x89, 0x91, 0xde, 0xe6, 0x7d, 0xef, 0xf1, 0xde, 0xb2, diff --git a/go/vt/proto/mysqlctl/mysqlctl.pb.go b/go/vt/proto/mysqlctl/mysqlctl.pb.go index c6b3947c555..d7e00e1ed55 100644 --- a/go/vt/proto/mysqlctl/mysqlctl.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl.pb.go @@ -292,7 +292,7 @@ func init() { proto.RegisterFile("mysqlctl.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 289 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x4b, 0xfb, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x4b, 0xfb, 0x30, 0x1c, 0xc5, 0x7f, 0xfd, 0x89, 0x52, 0xbf, 0x6e, 0x56, 0xa2, 0x76, 0x5d, 0x41, 0xad, 0x39, 0xc8, 0x4e, 0x13, 0xf4, 0xa4, 0x37, 0x29, 0x78, 0x13, 0x21, 0x43, 0xf0, 0x56, 0xaa, 0xcd, 0x6a, 0xa1, 0x26, 0x5d, 0x92, 0x32, 0xfc, 0xc7, 0xfc, 0xfb, 0xc4, 0x34, 0xe9, 0x3a, 0x3b, 0x3d, 0xf6, 0x7d, diff --git a/go/vt/proto/query/query.pb.go b/go/vt/proto/query/query.pb.go index 1dfb9d76cf4..188b4ac6d10 100644 --- a/go/vt/proto/query/query.pb.go +++ b/go/vt/proto/query/query.pb.go @@ -2090,7 +2090,7 @@ func init() { proto.RegisterFile("query.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 2797 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x3a, 0x49, 0x73, 0x1b, 0xc7, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3a, 0x49, 0x73, 0x1b, 0xc7, 0xd5, 0x1a, 0x2c, 0x24, 0xf0, 0x40, 0x80, 0xcd, 0x06, 0x69, 0xc1, 0x94, 0x17, 0x7e, 0x63, 0xcb, 0xd6, 0x27, 0x3b, 0x8c, 0x4c, 0x29, 0x8a, 0xcb, 0xce, 0xa2, 0x21, 0x38, 0x94, 0x61, 0x61, 0x53, 0x63, 0x20, 0x87, 0x2e, 0x57, 0x4d, 0x0d, 0x81, 0x16, 0x39, 0x45, 0x00, 0x03, 0xcd, 0x34, 0x28, diff --git a/go/vt/proto/queryservice/queryservice.pb.go b/go/vt/proto/queryservice/queryservice.pb.go index 9c005868159..d7eb26e6354 100644 --- a/go/vt/proto/queryservice/queryservice.pb.go +++ b/go/vt/proto/queryservice/queryservice.pb.go @@ -934,7 +934,7 @@ func init() { proto.RegisterFile("queryservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 491 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x95, 0xdb, 0x6e, 0xd4, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x95, 0xdb, 0x6e, 0xd4, 0x40, 0x0c, 0x86, 0xe1, 0xa2, 0x2d, 0x72, 0xc3, 0x69, 0x4a, 0x81, 0xa6, 0xa5, 0x2d, 0x7d, 0x80, 0x0a, 0x01, 0x12, 0x52, 0x25, 0x2e, 0xda, 0x08, 0x04, 0xaa, 0x38, 0x65, 0x59, 0x89, 0x2b, 0xa4, 0xd9, 0xc4, 0x5a, 0xa2, 0xcd, 0x26, 0xd9, 0xc9, 0x04, 0xc1, 0x13, 0xf1, 0x9a, 0x88, 0x4c, 0xec, 0xcc, diff --git a/go/vt/proto/replicationdata/replicationdata.pb.go b/go/vt/proto/replicationdata/replicationdata.pb.go index 6733adef566..9abeae1d35a 100644 --- a/go/vt/proto/replicationdata/replicationdata.pb.go +++ b/go/vt/proto/replicationdata/replicationdata.pb.go @@ -53,7 +53,7 @@ func init() { proto.RegisterFile("replicationdata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 241 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0xd0, 0xc1, 0x4a, 0x03, 0x31, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd0, 0xc1, 0x4a, 0x03, 0x31, 0x10, 0xc6, 0x71, 0x52, 0xed, 0x5a, 0x47, 0xb4, 0x1a, 0x2d, 0x04, 0x2f, 0x2e, 0x9e, 0x82, 0x07, 0x11, 0x7d, 0x03, 0xbd, 0xe8, 0x41, 0x90, 0xf4, 0x01, 0x42, 0xba, 0x1b, 0x6c, 0x60, 0xcd, 0x6c, 0x33, 0x53, 0xc1, 0xd7, 0xf1, 0x49, 0xa5, 0x49, 0xbb, 0x48, 0x8f, 0xf9, 0xfe, 0xbf, 0x43, 0x18, diff --git a/go/vt/proto/tableacl/tableacl.pb.go b/go/vt/proto/tableacl/tableacl.pb.go index 5840705e279..6f2ce2dd42a 100644 --- a/go/vt/proto/tableacl/tableacl.pb.go +++ b/go/vt/proto/tableacl/tableacl.pb.go @@ -69,7 +69,7 @@ func init() { proto.RegisterFile("tableacl.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 207 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x49, 0x4c, 0xca, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x49, 0x4c, 0xca, 0x49, 0x4d, 0x4c, 0xce, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf1, 0x95, 0x96, 0x33, 0x72, 0xf1, 0x85, 0x80, 0x38, 0xee, 0x45, 0xf9, 0xa5, 0x05, 0xc1, 0x05, 0xa9, 0xc9, 0x42, 0x42, 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go index b0faa63f945..5333cc3dc96 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -1291,7 +1291,7 @@ func init() { proto.RegisterFile("tabletmanagerdata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 2049 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0x5b, 0x6f, 0x1b, 0xc7, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x5b, 0x6f, 0x1b, 0xc7, 0xf5, 0x07, 0x45, 0x49, 0x96, 0x0e, 0x2f, 0x22, 0x97, 0xba, 0x50, 0x0a, 0xfe, 0xba, 0xac, 0x9d, 0x7f, 0x54, 0x17, 0x55, 0x6a, 0x25, 0x0d, 0x82, 0x04, 0x29, 0xaa, 0xab, 0xed, 0xc4, 0x89, 0x95, 0x95, 0x2f, 0x45, 0x5f, 0x16, 0x43, 0xee, 0x11, 0xb9, 0xd0, 0x72, 0x77, 0x3d, 0x33, 0x2b, 0x89, diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go index b248d39f21d..e8ea7feaa58 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go @@ -1633,7 +1633,7 @@ func init() { proto.RegisterFile("tabletmanagerservice.proto", fileDescriptor0) var fileDescriptor0 = []byte{ // 969 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x98, 0x6d, 0x6f, 0x1c, 0x35, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x6d, 0x6f, 0x1c, 0x35, 0x10, 0xc7, 0x39, 0x09, 0x0a, 0x98, 0xc7, 0x5a, 0x88, 0xa2, 0x20, 0x01, 0x4d, 0x5a, 0x1e, 0x52, 0x54, 0xf5, 0x81, 0xf2, 0xfe, 0x2e, 0xbd, 0xb6, 0x41, 0x44, 0x1c, 0x77, 0x8d, 0x82, 0x84, 0x84, 0xe4, 0xec, 0x4d, 0x6f, 0x97, 0x78, 0x6d, 0x63, 0x7b, 0xa3, 0xe4, 0x15, 0x12, 0x12, 0xaf, 0x90, diff --git a/go/vt/proto/throttlerdata/throttlerdata.pb.go b/go/vt/proto/throttlerdata/throttlerdata.pb.go index c0e9a9fbf43..796485739df 100644 --- a/go/vt/proto/throttlerdata/throttlerdata.pb.go +++ b/go/vt/proto/throttlerdata/throttlerdata.pb.go @@ -282,7 +282,7 @@ func init() { proto.RegisterFile("throttlerdata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 711 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x55, 0xdd, 0x4e, 0xdb, 0x4a, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xdd, 0x4e, 0xdb, 0x4a, 0x10, 0x96, 0x09, 0xe1, 0xc0, 0x84, 0x00, 0x59, 0x38, 0x60, 0xc2, 0xd1, 0x51, 0x8e, 0xa5, 0xa3, 0x46, 0x48, 0xcd, 0x45, 0x50, 0x55, 0x5a, 0x54, 0x09, 0x52, 0xaa, 0xaa, 0x55, 0xcb, 0x85, 0x69, 0x7b, 0xd1, 0x9b, 0xd5, 0xc6, 0x1e, 0x1c, 0x0b, 0xdb, 0xeb, 0xee, 0x2e, 0x25, 0xe9, 0x43, 0xf4, diff --git a/go/vt/proto/throttlerservice/throttlerservice.pb.go b/go/vt/proto/throttlerservice/throttlerservice.pb.go index 66841caef2c..2dee35d9275 100644 --- a/go/vt/proto/throttlerservice/throttlerservice.pb.go +++ b/go/vt/proto/throttlerservice/throttlerservice.pb.go @@ -267,7 +267,7 @@ func init() { proto.RegisterFile("throttlerservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 214 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x2b, 0xc9, 0x28, 0xca, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2b, 0xc9, 0x28, 0xca, 0x2f, 0x29, 0xc9, 0x49, 0x2d, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x40, 0x17, 0x97, 0x12, 0x86, 0x8b, 0xa4, 0x24, 0x96, 0x24, 0x42, 0x94, 0x19, 0x7d, 0x66, 0xe6, 0xe2, 0x0c, 0x81, 0x89, 0x0b, 0xf9, 0x72, 0x71, 0xf8, 0x26, 0x56, 0x04, diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index fe4098946c3..75cbf7ec352 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -548,7 +548,7 @@ func init() { proto.RegisterFile("topodata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 1096 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x56, 0x6f, 0x6f, 0xe3, 0xc4, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x6f, 0x6f, 0xe3, 0xc4, 0x13, 0xfe, 0xd9, 0x71, 0xd2, 0x64, 0x9c, 0xe6, 0x7c, 0xfb, 0xbb, 0x43, 0x96, 0x11, 0xa2, 0x8a, 0x84, 0xa8, 0x0e, 0x11, 0x50, 0x8e, 0x83, 0xea, 0x24, 0xa4, 0xa6, 0xa9, 0x0f, 0xd2, 0x3f, 0x69, 0xd8, 0xa4, 0x82, 0xbe, 0xb2, 0x9c, 0x78, 0xdb, 0xb3, 0xea, 0x64, 0xcd, 0xee, 0xa6, 0x52, 0x3e, diff --git a/go/vt/proto/vschema/vschema.pb.go b/go/vt/proto/vschema/vschema.pb.go index dc15d87b4e9..0086cb0dcb9 100644 --- a/go/vt/proto/vschema/vschema.pb.go +++ b/go/vt/proto/vschema/vschema.pb.go @@ -175,7 +175,7 @@ func init() { proto.RegisterFile("vschema.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 436 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x93, 0xd1, 0x6a, 0xd4, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0xd1, 0x6a, 0xd4, 0x40, 0x14, 0x86, 0x99, 0xc4, 0x4d, 0xb3, 0x27, 0x26, 0xd5, 0xa1, 0x96, 0x10, 0x11, 0x97, 0xa0, 0xb8, 0x57, 0xb9, 0xd8, 0x22, 0x68, 0x45, 0x51, 0x8a, 0x17, 0x45, 0x41, 0x49, 0xa5, 0xb7, 0x65, 0x9a, 0x3d, 0xd0, 0xd2, 0xcd, 0x24, 0x66, 0x92, 0x68, 0x5e, 0xc5, 0x1b, 0xc1, 0x37, 0xf0, 0x0d, 0xa5, diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index a112ca33c52..c762ac11e48 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -68,7 +68,7 @@ func init() { proto.RegisterFile("vtctldata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 175 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0x2b, 0x49, 0x2e, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0x2b, 0x49, 0x2e, 0xc9, 0x49, 0x49, 0x2c, 0x49, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0x0b, 0x48, 0xf1, 0xe6, 0xe4, 0xa7, 0x97, 0x96, 0x64, 0xe6, 0x40, 0x64, 0x94, 0xc2, 0xb9, 0xa4, 0x5c, 0x2b, 0x52, 0x93, 0x4b, 0x4b, 0x52, 0xc3, 0x40, 0x4a, 0x9c, 0xf3, 0x73, 0x73, 0x13, 0xf3, 0x52, 0x82, diff --git a/go/vt/proto/vtctlservice/vtctlservice.pb.go b/go/vt/proto/vtctlservice/vtctlservice.pb.go index 094c4197ff9..16c84edc29f 100644 --- a/go/vt/proto/vtctlservice/vtctlservice.pb.go +++ b/go/vt/proto/vtctlservice/vtctlservice.pb.go @@ -136,7 +136,7 @@ func init() { proto.RegisterFile("vtctlservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 118 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x49, 0x2e, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x49, 0x2e, 0xc9, 0x29, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x41, 0x16, 0x93, 0xe2, 0x07, 0xf3, 0x52, 0x12, 0x4b, 0x12, 0x21, 0xd2, 0x46, 0x85, 0x5c, 0xac, 0x61, 0x20, 0x21, 0xa1, 0x0c, 0x2e, 0x61, 0xd7, 0x8a, 0xd4, 0xe4, 0xd2, 0x92, 0x54, 0x30, 0xdf, diff --git a/go/vt/proto/vtgate/vtgate.pb.go b/go/vt/proto/vtgate/vtgate.pb.go index c0a6a28df6e..7bd4ef3ff0a 100644 --- a/go/vt/proto/vtgate/vtgate.pb.go +++ b/go/vt/proto/vtgate/vtgate.pb.go @@ -1772,7 +1772,7 @@ func init() { proto.RegisterFile("vtgate.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 1652 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x5a, 0xcb, 0x6e, 0x1b, 0x47, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0xcb, 0x6e, 0x1b, 0x47, 0x16, 0x45, 0x77, 0xf3, 0x79, 0x49, 0x51, 0x52, 0x89, 0x92, 0x69, 0x5a, 0x63, 0xc9, 0x8d, 0x11, 0x4c, 0x8f, 0x05, 0x7a, 0x2c, 0xcf, 0x0b, 0xb3, 0x99, 0xb1, 0x64, 0x61, 0x20, 0x78, 0xec, 0x38, 0x25, 0xc5, 0x49, 0x80, 0x18, 0x8d, 0x16, 0x59, 0x90, 0x3a, 0x24, 0xbb, 0xe9, 0xae, 0x6a, 0x3a, diff --git a/go/vt/proto/vtgateservice/vtgateservice.pb.go b/go/vt/proto/vtgateservice/vtgateservice.pb.go index af61880068e..a3acda6d7c8 100644 --- a/go/vt/proto/vtgateservice/vtgateservice.pb.go +++ b/go/vt/proto/vtgateservice/vtgateservice.pb.go @@ -1045,7 +1045,7 @@ func init() { proto.RegisterFile("vtgateservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 551 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x95, 0x5f, 0x6f, 0xd3, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x95, 0x5f, 0x6f, 0xd3, 0x30, 0x14, 0xc5, 0xe1, 0x81, 0x82, 0x2e, 0x2d, 0x42, 0x1e, 0x74, 0x5b, 0xd9, 0x18, 0x2b, 0x62, 0xe3, 0x29, 0x42, 0x20, 0x21, 0x21, 0x21, 0xa1, 0x16, 0x2a, 0x84, 0xa6, 0x01, 0x6b, 0xf9, 0xf3, 0xc4, 0x83, 0x9b, 0x5e, 0x65, 0x51, 0xd3, 0x24, 0x8d, 0x9d, 0x88, 0x7e, 0x65, 0x3e, 0x05, 0x5a, 0x62, diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go index f69f21b5912..06146f083eb 100644 --- a/go/vt/proto/vtrpc/vtrpc.pb.go +++ b/go/vt/proto/vtrpc/vtrpc.pb.go @@ -29,17 +29,173 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -// ErrorCode is the enum values for Errors. Internally, errors should -// be created with one of these codes. These will then be translated over the wire -// by various RPC frameworks. +// Code represnts canonical error codes. The names and numbers must match +// the ones defined by grpc: https://godoc.org/google.golang.org/grpc/codes. +type Code int32 + +const ( + // OK is returned on success. + Code_OK Code = 0 + // CANCELED indicates the operation was cancelled (typically by the caller). + Code_CANCELED Code = 1 + // UNKNOWN error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Code_UNKNOWN Code = 2 + // INVALID_ARGUMENT indicates client specified an invalid argument. + // Note that this differs from FAILED_PRECONDITION. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + Code_INVALID_ARGUMENT Code = 3 + // DEADLINE_EXCEEDED means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + Code_DEADLINE_EXCEEDED Code = 4 + // NOT_FOUND means some requested entity (e.g., file or directory) was + // not found. + Code_NOT_FOUND Code = 5 + // ALREADY_EXISTS means an attempt to create an entity failed because one + // already exists. + Code_ALREADY_EXISTS Code = 6 + // PERMISSION_DENIED indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use RESOURCE_EXHAUSTED + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + Code_PERMISSION_DENIED Code = 7 + // UNAUTHENTICATED indicates the request does not have valid + // authentication credentials for the operation. + Code_UNAUTHENTICATED Code = 16 + // RESOURCE_EXHAUSTED indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + Code_RESOURCE_EXHAUSTED Code = 8 + // FAILED_PRECONDITION indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE: + // (a) Use UNAVAILABLE if the client can retry just the failing call. + // (b) Use ABORTED if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FAILED_PRECONDITION if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FAILED_PRECONDITION + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FAILED_PRECONDITION if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + Code_FAILED_PRECONDITION Code = 9 + // ABORTED indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + Code_ABORTED Code = 10 + // OUT_OF_RANGE means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike INVALID_ARGUMENT, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate INVALID_ARGUMENT if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OUT_OF_RANGE if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FAILED_PRECONDITION and + // OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OUT_OF_RANGE error to detect when + // they are done. + Code_OUT_OF_RANGE Code = 11 + // UNIMPLEMENTED indicates operation is not implemented or not + // supported/enabled in this service. + Code_UNIMPLEMENTED Code = 12 + // INTERNAL errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Code_INTERNAL Code = 13 + // UNAVAILABLE indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + Code_UNAVAILABLE Code = 14 + // DATA_LOSS indicates unrecoverable data loss or corruption. + Code_DATA_LOSS Code = 15 +) + +var Code_name = map[int32]string{ + 0: "OK", + 1: "CANCELED", + 2: "UNKNOWN", + 3: "INVALID_ARGUMENT", + 4: "DEADLINE_EXCEEDED", + 5: "NOT_FOUND", + 6: "ALREADY_EXISTS", + 7: "PERMISSION_DENIED", + 16: "UNAUTHENTICATED", + 8: "RESOURCE_EXHAUSTED", + 9: "FAILED_PRECONDITION", + 10: "ABORTED", + 11: "OUT_OF_RANGE", + 12: "UNIMPLEMENTED", + 13: "INTERNAL", + 14: "UNAVAILABLE", + 15: "DATA_LOSS", +} +var Code_value = map[string]int32{ + "OK": 0, + "CANCELED": 1, + "UNKNOWN": 2, + "INVALID_ARGUMENT": 3, + "DEADLINE_EXCEEDED": 4, + "NOT_FOUND": 5, + "ALREADY_EXISTS": 6, + "PERMISSION_DENIED": 7, + "UNAUTHENTICATED": 16, + "RESOURCE_EXHAUSTED": 8, + "FAILED_PRECONDITION": 9, + "ABORTED": 10, + "OUT_OF_RANGE": 11, + "UNIMPLEMENTED": 12, + "INTERNAL": 13, + "UNAVAILABLE": 14, + "DATA_LOSS": 15, +} + +func (x Code) String() string { + return proto.EnumName(Code_name, int32(x)) +} +func (Code) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// ErrorCode is the enum values for Errors. This type is deprecated. +// Use Code instead. Background: In the initial design, we thought +// that we may end up with a different list of canonical error codes +// than the ones defined by grpc. In hindisght, we realize that +// the grpc error codes are fairly generic and mostly sufficient. +// In order to avoid confusion, thie type will be deprecated in +// favor of the new Code that matches exactly what grpc defines. +// Some names below have a _LEGACY suffix. This is to prevent +// name collisions with Code. type ErrorCode int32 const ( // SUCCESS is returned from a successful call. ErrorCode_SUCCESS ErrorCode = 0 - // CANCELLED means that the context was cancelled (and noticed in the app layer, + // CANCELLED_LEGACY means that the context was cancelled (and noticed in the app layer, // as opposed to the RPC layer). - ErrorCode_CANCELLED ErrorCode = 1 + ErrorCode_CANCELLED_LEGACY ErrorCode = 1 // UNKNOWN_ERROR includes: // 1. MySQL error codes that we don't explicitly handle. // 2. MySQL response that wasn't as expected. For example, we might expect a MySQL @@ -49,22 +205,22 @@ const ( // BAD_INPUT is returned when an end-user either sends SQL that couldn't be parsed correctly, // or tries a query that isn't supported by Vitess. ErrorCode_BAD_INPUT ErrorCode = 3 - // DEADLINE_EXCEEDED is returned when an action is taking longer than a given timeout. - ErrorCode_DEADLINE_EXCEEDED ErrorCode = 4 + // DEADLINE_EXCEEDED_LEGACY is returned when an action is taking longer than a given timeout. + ErrorCode_DEADLINE_EXCEEDED_LEGACY ErrorCode = 4 // INTEGRITY_ERROR is returned on integrity error from MySQL, usually due to // duplicate primary keys. ErrorCode_INTEGRITY_ERROR ErrorCode = 5 - // PERMISSION_DENIED errors are returned when a user requests access to something + // PERMISSION_DENIED_LEGACY errors are returned when a user requests access to something // that they don't have permissions for. - ErrorCode_PERMISSION_DENIED ErrorCode = 6 - // RESOURCE_EXHAUSTED is returned when a query exceeds its quota in some dimension + ErrorCode_PERMISSION_DENIED_LEGACY ErrorCode = 6 + // RESOURCE_EXHAUSTED_LEGACY is returned when a query exceeds its quota in some dimension // and can't be completed due to that. Queries that return RESOURCE_EXHAUSTED // should not be retried, as it could be detrimental to the server's health. // Examples of errors that will cause the RESOURCE_EXHAUSTED code: // 1. TxPoolFull: this is retried server-side, and is only returned as an error // if the server-side retries failed. // 2. Query is killed due to it taking too long. - ErrorCode_RESOURCE_EXHAUSTED ErrorCode = 7 + ErrorCode_RESOURCE_EXHAUSTED_LEGACY ErrorCode = 7 // QUERY_NOT_SERVED means that a query could not be served right now. // Client can interpret it as: "the tablet that you sent this query to cannot // serve the query right now, try a different tablet or try again later." @@ -98,46 +254,46 @@ const ( // 1. Query has been throttled // 2. VtGate could have request backlog ErrorCode_TRANSIENT_ERROR ErrorCode = 11 - // UNAUTHENTICATED errors are returned when a user requests access to something, + // UNAUTHENTICATED_LEGACY errors are returned when a user requests access to something, // and we're unable to verify the user's authentication. - ErrorCode_UNAUTHENTICATED ErrorCode = 12 + ErrorCode_UNAUTHENTICATED_LEGACY ErrorCode = 12 ) var ErrorCode_name = map[int32]string{ 0: "SUCCESS", - 1: "CANCELLED", + 1: "CANCELLED_LEGACY", 2: "UNKNOWN_ERROR", 3: "BAD_INPUT", - 4: "DEADLINE_EXCEEDED", + 4: "DEADLINE_EXCEEDED_LEGACY", 5: "INTEGRITY_ERROR", - 6: "PERMISSION_DENIED", - 7: "RESOURCE_EXHAUSTED", + 6: "PERMISSION_DENIED_LEGACY", + 7: "RESOURCE_EXHAUSTED_LEGACY", 8: "QUERY_NOT_SERVED", 9: "NOT_IN_TX", 10: "INTERNAL_ERROR", 11: "TRANSIENT_ERROR", - 12: "UNAUTHENTICATED", + 12: "UNAUTHENTICATED_LEGACY", } var ErrorCode_value = map[string]int32{ - "SUCCESS": 0, - "CANCELLED": 1, - "UNKNOWN_ERROR": 2, - "BAD_INPUT": 3, - "DEADLINE_EXCEEDED": 4, - "INTEGRITY_ERROR": 5, - "PERMISSION_DENIED": 6, - "RESOURCE_EXHAUSTED": 7, - "QUERY_NOT_SERVED": 8, - "NOT_IN_TX": 9, - "INTERNAL_ERROR": 10, - "TRANSIENT_ERROR": 11, - "UNAUTHENTICATED": 12, + "SUCCESS": 0, + "CANCELLED_LEGACY": 1, + "UNKNOWN_ERROR": 2, + "BAD_INPUT": 3, + "DEADLINE_EXCEEDED_LEGACY": 4, + "INTEGRITY_ERROR": 5, + "PERMISSION_DENIED_LEGACY": 6, + "RESOURCE_EXHAUSTED_LEGACY": 7, + "QUERY_NOT_SERVED": 8, + "NOT_IN_TX": 9, + "INTERNAL_ERROR": 10, + "TRANSIENT_ERROR": 11, + "UNAUTHENTICATED_LEGACY": 12, } func (x ErrorCode) String() string { return proto.EnumName(ErrorCode_name, int32(x)) } -func (ErrorCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (ErrorCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } // CallerID is passed along RPCs to identify the originating client // for a request. It is not meant to be secure, but only @@ -174,8 +330,8 @@ func (*CallerID) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} // We use this so the clients don't have to parse the error messages, // but instead can depend on the value of the code. type RPCError struct { - Code ErrorCode `protobuf:"varint,1,opt,name=code,enum=vtrpc.ErrorCode" json:"code,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + LegacyCode ErrorCode `protobuf:"varint,1,opt,name=legacy_code,json=legacyCode,enum=vtrpc.ErrorCode" json:"legacy_code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` } func (m *RPCError) Reset() { *m = RPCError{} } @@ -186,35 +342,48 @@ func (*RPCError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} func init() { proto.RegisterType((*CallerID)(nil), "vtrpc.CallerID") proto.RegisterType((*RPCError)(nil), "vtrpc.RPCError") + proto.RegisterEnum("vtrpc.Code", Code_name, Code_value) proto.RegisterEnum("vtrpc.ErrorCode", ErrorCode_name, ErrorCode_value) } func init() { proto.RegisterFile("vtrpc.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 376 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x54, 0x91, 0xcb, 0x6e, 0x13, 0x31, - 0x14, 0x86, 0x49, 0x7a, 0x49, 0xe6, 0xa4, 0x2d, 0xae, 0xb9, 0x28, 0x42, 0x2c, 0x50, 0xc4, 0x02, - 0xb1, 0xc8, 0x02, 0x9e, 0xc0, 0xb5, 0x8f, 0xa8, 0x21, 0x9c, 0x09, 0xbe, 0x40, 0xbb, 0x1a, 0x25, - 0x53, 0x0b, 0x05, 0x25, 0xf1, 0x68, 0x66, 0x52, 0x89, 0x27, 0xe0, 0xb5, 0x91, 0x27, 0xa1, 0x15, - 0xab, 0xd1, 0xff, 0xff, 0xdf, 0xf8, 0xb3, 0x64, 0x18, 0xdd, 0xb7, 0x75, 0x55, 0x4e, 0xab, 0x3a, - 0xb6, 0x91, 0x9f, 0x74, 0x61, 0xf2, 0x0b, 0x86, 0x72, 0xb1, 0x5e, 0x87, 0x5a, 0x2b, 0xfe, 0x1a, - 0xb2, 0xaa, 0x5e, 0x6d, 0xcb, 0x55, 0xb5, 0x58, 0x8f, 0x7b, 0x6f, 0x7a, 0xef, 0x32, 0xf3, 0x58, - 0xa4, 0xb5, 0x8c, 0x9b, 0x2a, 0x6e, 0xc3, 0xb6, 0x1d, 0xf7, 0xf7, 0xeb, 0x43, 0xc1, 0x27, 0x70, - 0xd6, 0xec, 0x96, 0x8f, 0xc0, 0x51, 0x07, 0xfc, 0xd7, 0x4d, 0x3e, 0xc3, 0xd0, 0xcc, 0x25, 0xd6, - 0x75, 0xac, 0xf9, 0x5b, 0x38, 0x2e, 0xe3, 0x5d, 0xe8, 0x34, 0x17, 0x1f, 0xd8, 0x74, 0x7f, 0xb5, - 0x6e, 0x93, 0xf1, 0x2e, 0x98, 0x6e, 0xe5, 0x63, 0x18, 0x6c, 0x42, 0xd3, 0x2c, 0x7e, 0x86, 0x83, - 0xf1, 0x5f, 0x7c, 0xff, 0xa7, 0x0f, 0xd9, 0x03, 0xcd, 0x47, 0x30, 0xb0, 0x5e, 0x4a, 0xb4, 0x96, - 0x3d, 0xe1, 0xe7, 0x90, 0x49, 0x41, 0x12, 0x67, 0x33, 0x54, 0xac, 0xc7, 0x2f, 0xe1, 0xdc, 0xd3, - 0x17, 0xca, 0x7f, 0x50, 0x81, 0xc6, 0xe4, 0x86, 0xf5, 0x13, 0x71, 0x25, 0x54, 0xa1, 0x69, 0xee, - 0x1d, 0x3b, 0xe2, 0x2f, 0xe0, 0x52, 0xa1, 0x50, 0x33, 0x4d, 0x58, 0xe0, 0x8d, 0x44, 0x54, 0xa8, - 0xd8, 0x31, 0x7f, 0x06, 0x4f, 0x35, 0x39, 0xfc, 0x64, 0xb4, 0xbb, 0x3d, 0xfc, 0x7a, 0x92, 0xd8, - 0x39, 0x9a, 0xaf, 0xda, 0x5a, 0x9d, 0x53, 0xa1, 0x90, 0x34, 0x2a, 0x76, 0xca, 0x5f, 0x02, 0x37, - 0x68, 0x73, 0x6f, 0x64, 0x3a, 0xe2, 0x5a, 0x78, 0xeb, 0x50, 0xb1, 0x01, 0x7f, 0x0e, 0xec, 0x9b, - 0x47, 0x73, 0x5b, 0x50, 0xee, 0x0a, 0x8b, 0xe6, 0x3b, 0x2a, 0x36, 0x4c, 0xfe, 0x94, 0x35, 0x15, - 0xee, 0x86, 0x65, 0x9c, 0xc3, 0x45, 0x12, 0x19, 0x12, 0xb3, 0x83, 0x07, 0x92, 0xdc, 0x19, 0x41, - 0x56, 0x23, 0xb9, 0x43, 0x39, 0x4a, 0xa5, 0x27, 0xe1, 0xdd, 0x35, 0x92, 0xd3, 0x52, 0x24, 0xc5, - 0xd9, 0xd5, 0x2b, 0x18, 0x97, 0x71, 0x33, 0xfd, 0x1d, 0x77, 0xed, 0x6e, 0x19, 0xa6, 0xf7, 0xab, - 0x36, 0x34, 0xcd, 0xfe, 0x91, 0x97, 0xa7, 0xdd, 0xe7, 0xe3, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x7c, 0x77, 0xfd, 0x16, 0xfa, 0x01, 0x00, 0x00, + // 561 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0xdd, 0x4e, 0x1b, 0x3f, + 0x10, 0xc5, 0x49, 0x80, 0x7c, 0x4c, 0x02, 0x18, 0xf3, 0xff, 0xd3, 0x14, 0x51, 0xa9, 0xe2, 0xaa, + 0xe2, 0x22, 0x52, 0xdb, 0x27, 0x70, 0xd6, 0x43, 0xb0, 0x58, 0xc6, 0xa9, 0x3f, 0x80, 0x5c, 0x59, + 0x21, 0xac, 0x10, 0x55, 0x60, 0xa3, 0x4d, 0x40, 0xe2, 0x95, 0xfa, 0x00, 0x7d, 0xbe, 0xca, 0x9b, + 0x04, 0xd4, 0xe6, 0x2a, 0xf1, 0xf8, 0x8c, 0xcf, 0x99, 0xdf, 0x68, 0xa1, 0xf5, 0x32, 0x2f, 0xa6, + 0xe3, 0xee, 0xb4, 0xc8, 0xe7, 0x39, 0xdf, 0x2e, 0x0f, 0x27, 0x3f, 0xa1, 0x91, 0x8c, 0x26, 0x93, + 0xac, 0x50, 0x92, 0x1f, 0x43, 0x73, 0x5a, 0x3c, 0x3c, 0x8d, 0x1f, 0xa6, 0xa3, 0x49, 0xa7, 0xf2, + 0xb9, 0xf2, 0xa5, 0x69, 0xde, 0x0b, 0xf1, 0x76, 0x9c, 0x3f, 0x4e, 0xf3, 0xa7, 0xec, 0x69, 0xde, + 0xa9, 0x2e, 0x6e, 0xdf, 0x0a, 0xfc, 0x04, 0xda, 0xb3, 0xe7, 0xdb, 0x77, 0xc1, 0x66, 0x29, 0xf8, + 0xab, 0x76, 0x72, 0x0d, 0x0d, 0x33, 0x48, 0xb0, 0x28, 0xf2, 0x82, 0x7f, 0x85, 0xd6, 0x24, 0xbb, + 0x1f, 0x8d, 0x5f, 0xc3, 0x38, 0xbf, 0xcb, 0x4a, 0xb7, 0xdd, 0x6f, 0xac, 0xbb, 0x48, 0x58, 0x4a, + 0x92, 0xfc, 0x2e, 0x33, 0xb0, 0x10, 0xc5, 0xff, 0xbc, 0x03, 0xf5, 0xc7, 0x6c, 0x36, 0x1b, 0xdd, + 0x67, 0x4b, 0xfb, 0xd5, 0xf1, 0xf4, 0x77, 0x15, 0xb6, 0x4a, 0x49, 0x0d, 0xaa, 0xfa, 0x82, 0x6d, + 0xf0, 0x36, 0x34, 0x12, 0x41, 0x09, 0xa6, 0x28, 0x59, 0x85, 0xb7, 0xa0, 0xee, 0xe9, 0x82, 0xf4, + 0x35, 0xb1, 0x2a, 0xff, 0x0f, 0x98, 0xa2, 0x2b, 0x91, 0x2a, 0x19, 0x84, 0xe9, 0xfb, 0x4b, 0x24, + 0xc7, 0x36, 0xf9, 0xff, 0xb0, 0x2f, 0x51, 0xc8, 0x54, 0x11, 0x06, 0xbc, 0x49, 0x10, 0x25, 0x4a, + 0xb6, 0xc5, 0x77, 0xa0, 0x49, 0xda, 0x85, 0x33, 0xed, 0x49, 0xb2, 0x6d, 0xce, 0x61, 0x57, 0xa4, + 0x06, 0x85, 0x1c, 0x06, 0xbc, 0x51, 0xd6, 0x59, 0x56, 0x8b, 0x9d, 0x03, 0x34, 0x97, 0xca, 0x5a, + 0xa5, 0x29, 0x48, 0x24, 0x85, 0x92, 0xd5, 0xf9, 0x01, 0xec, 0x79, 0x12, 0xde, 0x9d, 0x23, 0x39, + 0x95, 0x08, 0x87, 0x92, 0x31, 0x7e, 0x08, 0xdc, 0xa0, 0xd5, 0xde, 0x24, 0xd1, 0xe5, 0x5c, 0x78, + 0x1b, 0xeb, 0x0d, 0xfe, 0x01, 0x0e, 0xce, 0x84, 0x4a, 0x51, 0x86, 0x81, 0xc1, 0x44, 0x93, 0x54, + 0x4e, 0x69, 0x62, 0xcd, 0x98, 0x5c, 0xf4, 0xb4, 0x89, 0x2a, 0xe0, 0x0c, 0xda, 0xda, 0xbb, 0xa0, + 0xcf, 0x82, 0x11, 0xd4, 0x47, 0xd6, 0xe2, 0xfb, 0xb0, 0xe3, 0x49, 0x5d, 0x0e, 0x52, 0x8c, 0x63, + 0xa0, 0x64, 0xed, 0x38, 0xb9, 0x22, 0x87, 0x86, 0x44, 0xca, 0x76, 0xf8, 0x1e, 0xb4, 0x3c, 0x89, + 0x2b, 0xa1, 0x52, 0xd1, 0x4b, 0x91, 0xed, 0xc6, 0x81, 0xa4, 0x70, 0x22, 0xa4, 0xda, 0x5a, 0xb6, + 0x77, 0xfa, 0xab, 0x0a, 0xcd, 0x37, 0xd8, 0xd1, 0xcd, 0xfa, 0x24, 0x41, 0x6b, 0xd9, 0x46, 0xe4, + 0xb4, 0x40, 0x18, 0x63, 0xa5, 0xd8, 0x17, 0xc9, 0x90, 0x55, 0x16, 0x8e, 0x25, 0xca, 0x80, 0xc6, + 0x68, 0xc3, 0xaa, 0xf1, 0xc9, 0x9e, 0x90, 0x41, 0xd1, 0xc0, 0x47, 0x92, 0xc7, 0xd0, 0x59, 0x23, + 0xb9, 0xea, 0xdf, 0x8a, 0x58, 0x62, 0xbc, 0xbe, 0x51, 0x6e, 0xb8, 0x7c, 0x61, 0x3b, 0xb6, 0xac, + 0x21, 0x5c, 0xb5, 0xd4, 0xf8, 0x27, 0xf8, 0xb8, 0x0e, 0x6d, 0x75, 0x5d, 0x8f, 0x39, 0x7f, 0x78, + 0x34, 0xc3, 0x10, 0x17, 0x65, 0xd1, 0x5c, 0x95, 0x44, 0x97, 0x8b, 0x53, 0x14, 0xdc, 0x0d, 0x6b, + 0xc6, 0xc5, 0xad, 0xa8, 0x2c, 0x5d, 0x21, 0x46, 0x71, 0x46, 0x90, 0x55, 0x48, 0x6e, 0x59, 0x6c, + 0xf1, 0x23, 0x38, 0xfc, 0x67, 0x6d, 0x2b, 0xa7, 0x76, 0xef, 0x08, 0x3a, 0xe3, 0xfc, 0xb1, 0xfb, + 0x9a, 0x3f, 0xcf, 0x9f, 0x6f, 0xb3, 0xee, 0xcb, 0xc3, 0x3c, 0x9b, 0xcd, 0x16, 0x5f, 0xd3, 0x6d, + 0xad, 0xfc, 0xf9, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0xfc, 0xc4, 0x3c, 0x5f, 0x63, 0x03, 0x00, + 0x00, } diff --git a/go/vt/proto/vttest/vttest.pb.go b/go/vt/proto/vttest/vttest.pb.go index 4457ce4edb4..98ecfd79e41 100644 --- a/go/vt/proto/vttest/vttest.pb.go +++ b/go/vt/proto/vttest/vttest.pb.go @@ -107,7 +107,7 @@ func init() { proto.RegisterFile("vttest.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 297 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x51, 0xcd, 0x6a, 0xf3, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0xcd, 0x6a, 0xf3, 0x30, 0x10, 0xc4, 0x49, 0xec, 0xef, 0xcb, 0xe6, 0x87, 0x20, 0x72, 0xd0, 0xad, 0x69, 0x4a, 0xc1, 0xa7, 0x50, 0xda, 0x47, 0x08, 0xed, 0xa5, 0xd0, 0x82, 0x6b, 0x72, 0x35, 0x8e, 0xb5, 0x4d, 0x4d, 0x65, 0x4b, 0x48, 0x8a, 0xc1, 0xaf, 0xd1, 0x27, 0x2e, 0x5e, 0xcb, 0xf4, 0xe2, 0xdb, 0x68, 0x66, 0x76, diff --git a/go/vt/proto/vtworkerdata/vtworkerdata.pb.go b/go/vt/proto/vtworkerdata/vtworkerdata.pb.go index 71559a88fbe..a0e55e41ee9 100644 --- a/go/vt/proto/vtworkerdata/vtworkerdata.pb.go +++ b/go/vt/proto/vtworkerdata/vtworkerdata.pb.go @@ -66,7 +66,7 @@ func init() { proto.RegisterFile("vtworkerdata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 147 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x29, 0xcf, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x29, 0xcf, 0x2f, 0xca, 0x4e, 0x2d, 0x4a, 0x49, 0x2c, 0x49, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x41, 0x16, 0x93, 0xe2, 0xcd, 0xc9, 0x4f, 0x2f, 0x2d, 0xc9, 0xcc, 0x81, 0x48, 0x2a, 0x19, 0x73, 0xc9, 0xba, 0x56, 0xa4, 0x26, 0x97, 0x96, 0xa4, 0x86, 0x41, 0x55, 0x39, 0xe7, 0xe7, 0xe6, 0x26, diff --git a/go/vt/proto/vtworkerservice/vtworkerservice.pb.go b/go/vt/proto/vtworkerservice/vtworkerservice.pb.go index 1ffc24c794b..75cf04ee7f9 100644 --- a/go/vt/proto/vtworkerservice/vtworkerservice.pb.go +++ b/go/vt/proto/vtworkerservice/vtworkerservice.pb.go @@ -140,7 +140,7 @@ func init() { proto.RegisterFile("vtworkerservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 123 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x2d, 0x2b, 0x29, 0xcf, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2d, 0x2b, 0x29, 0xcf, 0x2f, 0xca, 0x4e, 0x2d, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x47, 0x13, 0x96, 0x12, 0x82, 0x09, 0xa4, 0x24, 0x96, 0x24, 0x42, 0x14, 0x19, 0x35, 0x33, 0x72, 0x71, 0x84, 0x41, 0x85, 0x85, 0xca, 0xb9, 0xc4, 0x5c, 0x2b, 0x52, 0x93, 0x4b, diff --git a/go/vt/proto/workflow/workflow.pb.go b/go/vt/proto/workflow/workflow.pb.go index 70ce2abe855..ef73eb3cb8d 100644 --- a/go/vt/proto/workflow/workflow.pb.go +++ b/go/vt/proto/workflow/workflow.pb.go @@ -106,7 +106,7 @@ func init() { proto.RegisterFile("workflow.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 246 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x54, 0x90, 0x41, 0x4b, 0x03, 0x31, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x41, 0x4b, 0x03, 0x31, 0x10, 0x85, 0x4d, 0xbb, 0xdb, 0x4d, 0xa7, 0x75, 0x59, 0x06, 0xc1, 0x78, 0x10, 0x56, 0x4f, 0x8b, 0x60, 0x0f, 0x0a, 0xfe, 0x02, 0xcf, 0x3d, 0xa4, 0x82, 0xc7, 0x12, 0xcd, 0x54, 0x16, 0xdd, 0x44, 0xd2, 0x59, 0x8a, 0xff, 0xd8, 0x9f, 0x21, 0xc9, 0x76, 0x85, 0xde, 0xde, 0x9b, 0x2f, 0x6f, 0x5e, diff --git a/go/vt/tabletserver/query_executor.go b/go/vt/tabletserver/query_executor.go index 2de0563ad8c..1e1fda1004d 100644 --- a/go/vt/tabletserver/query_executor.go +++ b/go/vt/tabletserver/query_executor.go @@ -243,7 +243,7 @@ func (qre *QueryExecutor) checkPermissions() error { callerID := callerid.ImmediateCallerIDFromContext(qre.ctx) if callerID == nil { if qre.tsv.qe.strictTableACL { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNAUTHENTICATED, "missing caller id") + return tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY, "missing caller id") } return nil } @@ -260,7 +260,7 @@ func (qre *QueryExecutor) checkPermissions() error { } if qre.plan.Authorized == nil { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_PERMISSION_DENIED, "table acl error: nil acl") + return tabletenv.NewTabletError(vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY, "table acl error: nil acl") } tableACLStatsKey := []string{ qre.plan.TableName.String(), @@ -279,7 +279,7 @@ func (qre *QueryExecutor) checkPermissions() error { errStr := fmt.Sprintf("table acl error: %q cannot run %v on table %q", callerID.Username, qre.plan.PlanID, qre.plan.TableName) tabletenv.TableaclDenied.Add(tableACLStatsKey, 1) qre.tsv.qe.accessCheckerLogger.Infof("%s", errStr) - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_PERMISSION_DENIED, "%s", errStr) + return tabletenv.NewTabletError(vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY, "%s", errStr) } return nil } diff --git a/go/vt/tabletserver/query_executor_test.go b/go/vt/tabletserver/query_executor_test.go index f4043bd6d5a..6dbea06ed00 100644 --- a/go/vt/tabletserver/query_executor_test.go +++ b/go/vt/tabletserver/query_executor_test.go @@ -1035,7 +1035,7 @@ func TestQueryExecutorTableAclNoPermission(t *testing.T) { if !ok { t.Fatalf("got: %v, want: *tabletenv.TabletError", err) } - if tabletError.ErrorCode != vtrpcpb.ErrorCode_PERMISSION_DENIED { + if tabletError.ErrorCode != vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY { t.Fatalf("got: %s, want: PERMISSION_DENIED", tabletError.ErrorCode) } } @@ -1089,7 +1089,7 @@ func TestQueryExecutorTableAclExemptACL(t *testing.T) { if !ok { t.Fatalf("got: %v, want: *tabletenv.TabletError", err) } - if tabletError.ErrorCode != vtrpcpb.ErrorCode_PERMISSION_DENIED { + if tabletError.ErrorCode != vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY { t.Fatalf("got: %s, want: PERMISSION_DENIED", tabletError.ErrorCode) } if !strings.Contains(tabletError.Error(), "table acl error") { diff --git a/go/vt/tabletserver/sandboxconn/sandboxconn.go b/go/vt/tabletserver/sandboxconn/sandboxconn.go index 982e1e9b11a..94a6fde8895 100644 --- a/go/vt/tabletserver/sandboxconn/sandboxconn.go +++ b/go/vt/tabletserver/sandboxconn/sandboxconn.go @@ -129,7 +129,7 @@ func (sbc *SandboxConn) getError() error { sbc.MustFailTxPool-- return &tabletconn.ServerError{ Err: "tx_pool_full: err", - ServerCode: vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED, + ServerCode: vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, } } if sbc.MustFailNotTx > 0 { @@ -143,7 +143,7 @@ func (sbc *SandboxConn) getError() error { sbc.MustFailCanceled-- return &tabletconn.ServerError{ Err: "canceled: err", - ServerCode: vtrpcpb.ErrorCode_CANCELLED, + ServerCode: vtrpcpb.ErrorCode_CANCELLED_LEGACY, } } if sbc.MustFailUnknownError > 0 { @@ -157,7 +157,7 @@ func (sbc *SandboxConn) getError() error { sbc.MustFailDeadlineExceeded-- return &tabletconn.ServerError{ Err: "deadline exceeded: err", - ServerCode: vtrpcpb.ErrorCode_DEADLINE_EXCEEDED, + ServerCode: vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, } } if sbc.MustFailIntegrityError > 0 { @@ -171,7 +171,7 @@ func (sbc *SandboxConn) getError() error { sbc.MustFailPermissionDenied-- return &tabletconn.ServerError{ Err: "permission denied: err", - ServerCode: vtrpcpb.ErrorCode_PERMISSION_DENIED, + ServerCode: vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY, } } if sbc.MustFailTransientError > 0 { @@ -185,7 +185,7 @@ func (sbc *SandboxConn) getError() error { sbc.MustFailUnauthenticated-- return &tabletconn.ServerError{ Err: "unauthenticated: err", - ServerCode: vtrpcpb.ErrorCode_UNAUTHENTICATED, + ServerCode: vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY, } } diff --git a/go/vt/tabletserver/tabletconn/grpc_error.go b/go/vt/tabletserver/tabletconn/grpc_error.go index 50e772b10d0..0be34dfafe3 100644 --- a/go/vt/tabletserver/tabletconn/grpc_error.go +++ b/go/vt/tabletserver/tabletconn/grpc_error.go @@ -42,6 +42,6 @@ func TabletErrorFromRPCError(err *vtrpcpb.RPCError) error { // server side error, convert it return &ServerError{ Err: fmt.Sprintf("vttablet: %v", err), - ServerCode: err.Code, + ServerCode: err.LegacyCode, } } diff --git a/go/vt/tabletserver/tabletconntest/tabletconntest.go b/go/vt/tabletserver/tabletconntest/tabletconntest.go index 35e6f92393b..9aacf8c4175 100644 --- a/go/vt/tabletserver/tabletconntest/tabletconntest.go +++ b/go/vt/tabletserver/tabletconntest/tabletconntest.go @@ -33,8 +33,8 @@ func testErrorHelper(t *testing.T, f *FakeQueryService, name string, ef func(con // A few generic errors tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "generic error"), tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNKNOWN_ERROR, "uncaught panic"), - tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNAUTHENTICATED, "missing caller id"), - tabletenv.NewTabletError(vtrpcpb.ErrorCode_PERMISSION_DENIED, "table acl error: nil acl"), + tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY, "missing caller id"), + tabletenv.NewTabletError(vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY, "table acl error: nil acl"), // Client will retry on this specific error tabletenv.NewTabletError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, "Query disallowed due to rule: %v", "cool rule"), @@ -43,7 +43,7 @@ func testErrorHelper(t *testing.T, f *FakeQueryService, name string, ef func(con tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Could not verify strict mode"), // This is usually transaction pool full - tabletenv.NewTabletError(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED, "Transaction pool connection limit exceeded"), + tabletenv.NewTabletError(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, "Transaction pool connection limit exceeded"), // Transaction expired or was unknown tabletenv.NewTabletError(vtrpcpb.ErrorCode_NOT_IN_TX, "Transaction 12"), diff --git a/go/vt/tabletserver/tabletenv/tablet_error.go b/go/vt/tabletserver/tabletenv/tablet_error.go index db4a77a0e58..8804a4d8c9e 100644 --- a/go/vt/tabletserver/tabletenv/tablet_error.go +++ b/go/vt/tabletserver/tabletenv/tablet_error.go @@ -144,7 +144,7 @@ func (te *TabletError) Prefix() string { prefix = "retry: " case vtrpcpb.ErrorCode_INTERNAL_ERROR: prefix = "fatal: " - case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED: + case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY: prefix = "tx_pool_full: " case vtrpcpb.ErrorCode_NOT_IN_TX: prefix = "not_in_tx: " @@ -163,7 +163,7 @@ func (te *TabletError) RecordStats() { InfoErrors.Add("Retry", 1) case vtrpcpb.ErrorCode_INTERNAL_ERROR: ErrorStats.Add("Fatal", 1) - case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED: + case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY: ErrorStats.Add("TxPoolFull", 1) case vtrpcpb.ErrorCode_NOT_IN_TX: ErrorStats.Add("NotInTx", 1) @@ -179,7 +179,7 @@ func (te *TabletError) RecordStats() { } } -// LogErrors logs panics and increments InternalErrors. +// LogError logs panics and increments InternalErrors. func LogError() { if x := recover(); x != nil { log.Errorf("Uncaught panic:\n%v\n%s", x, tb.Stack(4)) diff --git a/go/vt/tabletserver/tabletenv/tablet_error_test.go b/go/vt/tabletserver/tabletenv/tablet_error_test.go index 4a36e4c08e0..b35ac3141b2 100644 --- a/go/vt/tabletserver/tabletenv/tablet_error_test.go +++ b/go/vt/tabletserver/tabletenv/tablet_error_test.go @@ -114,7 +114,7 @@ func TestTabletErrorPrefix(t *testing.T) { if tabletErr.Prefix() != "fatal: " { t.Fatalf("tablet error with error code: INTERNAL_ERROR should has prefix: 'fatal: '") } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED, sqldb.NewSQLError(2000, "HY000", "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, sqldb.NewSQLError(2000, "HY000", "test")) if tabletErr.Prefix() != "tx_pool_full: " { t.Fatalf("tablet error with error code: RESOURCE_EXHAUSTED should has prefix: 'tx_pool_full: '") } @@ -141,7 +141,7 @@ func TestTabletErrorRecordStats(t *testing.T) { t.Fatalf("tablet error with error code INTERNAL_ERROR should increase Fatal error count by 1") } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED, sqldb.NewSQLError(2000, "HY000", "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, sqldb.NewSQLError(2000, "HY000", "test")) txPoolFullCounterBefore := ErrorStats.Counts()["TxPoolFull"] tabletErr.RecordStats() txPoolFullCounterAfter := ErrorStats.Counts()["TxPoolFull"] @@ -203,7 +203,7 @@ func TestTabletErrorLogUncaughtErr(t *testing.T) { } func TestTabletErrorTxPoolFull(t *testing.T) { - tabletErr := NewTabletErrorSQL(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED, sqldb.NewSQLError(1000, "HY000", "test")) + tabletErr := NewTabletErrorSQL(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, sqldb.NewSQLError(1000, "HY000", "test")) defer func() { err := recover() if err != nil { diff --git a/go/vt/tabletserver/tabletserver.go b/go/vt/tabletserver/tabletserver.go index 0ff4e3f5267..218a021a021 100644 --- a/go/vt/tabletserver/tabletserver.go +++ b/go/vt/tabletserver/tabletserver.go @@ -1189,7 +1189,7 @@ func (tsv *TabletServer) handleError( switch terr.ErrorCode { case vtrpcpb.ErrorCode_QUERY_NOT_SERVED: return myError - case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED: + case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY: logMethod = logTxPoolFull.Errorf case vtrpcpb.ErrorCode_INTERNAL_ERROR: logMethod = log.Errorf diff --git a/go/vt/tabletserver/tabletserver_test.go b/go/vt/tabletserver/tabletserver_test.go index f04d63f2e0c..cf31d851828 100644 --- a/go/vt/tabletserver/tabletserver_test.go +++ b/go/vt/tabletserver/tabletserver_test.go @@ -1835,7 +1835,7 @@ func TestTerseErrorsBindVars(t *testing.T) { "select * from test_table", map[string]interface{}{"a": 1}, &tabletenv.TabletError{ - ErrorCode: vtrpcpb.ErrorCode_DEADLINE_EXCEEDED, + ErrorCode: vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, Message: "msg", SQLError: 10, SQLState: "HY000", @@ -1853,7 +1853,7 @@ func TestTerseErrorsNoBindVars(t *testing.T) { config := testUtils.newQueryServiceConfig() config.TerseErrors = true tsv := NewTabletServer(config) - err := tsv.handleError("", nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_DEADLINE_EXCEEDED, "msg"), nil) + err := tsv.handleError("", nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, "msg"), nil) want := "error: msg" if err == nil || err.Error() != want { t.Errorf("Error: %v, want '%s'", err, want) diff --git a/go/vt/tabletserver/tx_executor.go b/go/vt/tabletserver/tx_executor.go index 92f5dd88189..09f08bc1cdf 100644 --- a/go/vt/tabletserver/tx_executor.go +++ b/go/vt/tabletserver/tx_executor.go @@ -51,7 +51,7 @@ func (txe *TxExecutor) Prepare(transactionID int64, dtid string) error { err = txe.te.preparedPool.Put(conn, dtid) if err != nil { txe.te.txPool.localRollback(txe.ctx, conn) - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED, "prepare failed for transaction %d: %v", transactionID, err) + return tabletenv.NewTabletError(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, "prepare failed for transaction %d: %v", transactionID, err) } localConn, err := txe.te.txPool.LocalBegin(txe.ctx) diff --git a/go/vt/tabletserver/tx_pool.go b/go/vt/tabletserver/tx_pool.go index 5b0539ec902..cea1eb06680 100644 --- a/go/vt/tabletserver/tx_pool.go +++ b/go/vt/tabletserver/tx_pool.go @@ -146,7 +146,7 @@ func (axp *TxPool) Begin(ctx context.Context) (int64, error) { return 0, err case pools.ErrTimeout: axp.LogActive() - return 0, tabletenv.NewTabletError(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED, "Transaction pool connection limit exceeded") + return 0, tabletenv.NewTabletError(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, "Transaction pool connection limit exceeded") } return 0, tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err) } diff --git a/go/vt/vterrors/aggregate.go b/go/vt/vterrors/aggregate.go index eb00b8a462b..df5d7a4bb28 100644 --- a/go/vt/vterrors/aggregate.go +++ b/go/vt/vterrors/aggregate.go @@ -31,19 +31,19 @@ const ( ) var errorPriorities = map[vtrpcpb.ErrorCode]int{ - vtrpcpb.ErrorCode_SUCCESS: PrioritySuccess, - vtrpcpb.ErrorCode_CANCELLED: PriorityCancelled, - vtrpcpb.ErrorCode_UNKNOWN_ERROR: PriorityUnknownError, - vtrpcpb.ErrorCode_BAD_INPUT: PriorityBadInput, - vtrpcpb.ErrorCode_DEADLINE_EXCEEDED: PriorityDeadlineExceeded, - vtrpcpb.ErrorCode_INTEGRITY_ERROR: PriorityIntegrityError, - vtrpcpb.ErrorCode_PERMISSION_DENIED: PriorityPermissionDenied, - vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED: PriorityResourceExhausted, - vtrpcpb.ErrorCode_QUERY_NOT_SERVED: PriorityQueryNotServed, - vtrpcpb.ErrorCode_NOT_IN_TX: PriorityNotInTx, - vtrpcpb.ErrorCode_INTERNAL_ERROR: PriorityInternalError, - vtrpcpb.ErrorCode_TRANSIENT_ERROR: PriorityTransientError, - vtrpcpb.ErrorCode_UNAUTHENTICATED: PriorityUnauthenticated, + vtrpcpb.ErrorCode_SUCCESS: PrioritySuccess, + vtrpcpb.ErrorCode_CANCELLED_LEGACY: PriorityCancelled, + vtrpcpb.ErrorCode_UNKNOWN_ERROR: PriorityUnknownError, + vtrpcpb.ErrorCode_BAD_INPUT: PriorityBadInput, + vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY: PriorityDeadlineExceeded, + vtrpcpb.ErrorCode_INTEGRITY_ERROR: PriorityIntegrityError, + vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY: PriorityPermissionDenied, + vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY: PriorityResourceExhausted, + vtrpcpb.ErrorCode_QUERY_NOT_SERVED: PriorityQueryNotServed, + vtrpcpb.ErrorCode_NOT_IN_TX: PriorityNotInTx, + vtrpcpb.ErrorCode_INTERNAL_ERROR: PriorityInternalError, + vtrpcpb.ErrorCode_TRANSIENT_ERROR: PriorityTransientError, + vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY: PriorityUnauthenticated, } // AggregateVtGateErrorCodes aggregates a list of errors into a single diff --git a/go/vt/vterrors/grpc.go b/go/vt/vterrors/grpc.go index 920b4b9d9dd..0b44eea1c87 100644 --- a/go/vt/vterrors/grpc.go +++ b/go/vt/vterrors/grpc.go @@ -30,19 +30,19 @@ func GRPCCodeToErrorCode(code codes.Code) vtrpcpb.ErrorCode { case codes.OK: return vtrpcpb.ErrorCode_SUCCESS case codes.Canceled: - return vtrpcpb.ErrorCode_CANCELLED + return vtrpcpb.ErrorCode_CANCELLED_LEGACY case codes.Unknown: return vtrpcpb.ErrorCode_UNKNOWN_ERROR case codes.InvalidArgument: return vtrpcpb.ErrorCode_BAD_INPUT case codes.DeadlineExceeded: - return vtrpcpb.ErrorCode_DEADLINE_EXCEEDED + return vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY case codes.AlreadyExists: return vtrpcpb.ErrorCode_INTEGRITY_ERROR case codes.PermissionDenied: - return vtrpcpb.ErrorCode_PERMISSION_DENIED + return vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY case codes.ResourceExhausted: - return vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED + return vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY case codes.FailedPrecondition: return vtrpcpb.ErrorCode_QUERY_NOT_SERVED case codes.Aborted: @@ -52,7 +52,7 @@ func GRPCCodeToErrorCode(code codes.Code) vtrpcpb.ErrorCode { case codes.Unavailable: return vtrpcpb.ErrorCode_TRANSIENT_ERROR case codes.Unauthenticated: - return vtrpcpb.ErrorCode_UNAUTHENTICATED + return vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY default: return vtrpcpb.ErrorCode_UNKNOWN_ERROR } @@ -63,19 +63,19 @@ func ErrorCodeToGRPCCode(code vtrpcpb.ErrorCode) codes.Code { switch code { case vtrpcpb.ErrorCode_SUCCESS: return codes.OK - case vtrpcpb.ErrorCode_CANCELLED: + case vtrpcpb.ErrorCode_CANCELLED_LEGACY: return codes.Canceled case vtrpcpb.ErrorCode_UNKNOWN_ERROR: return codes.Unknown case vtrpcpb.ErrorCode_BAD_INPUT: return codes.InvalidArgument - case vtrpcpb.ErrorCode_DEADLINE_EXCEEDED: + case vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY: return codes.DeadlineExceeded case vtrpcpb.ErrorCode_INTEGRITY_ERROR: return codes.AlreadyExists - case vtrpcpb.ErrorCode_PERMISSION_DENIED: + case vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY: return codes.PermissionDenied - case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED: + case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY: return codes.ResourceExhausted case vtrpcpb.ErrorCode_QUERY_NOT_SERVED: return codes.FailedPrecondition @@ -85,7 +85,7 @@ func ErrorCodeToGRPCCode(code vtrpcpb.ErrorCode) codes.Code { return codes.Internal case vtrpcpb.ErrorCode_TRANSIENT_ERROR: return codes.Unavailable - case vtrpcpb.ErrorCode_UNAUTHENTICATED: + case vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY: return codes.Unauthenticated default: return codes.Unknown diff --git a/go/vt/vterrors/proto3.go b/go/vt/vterrors/proto3.go index 030262fd568..21199e15ec4 100644 --- a/go/vt/vterrors/proto3.go +++ b/go/vt/vterrors/proto3.go @@ -22,7 +22,7 @@ func FromVtRPCError(rpcErr *vtrpcpb.RPCError) error { return nil } return &VitessError{ - Code: rpcErr.Code, + Code: rpcErr.LegacyCode, err: errors.New(rpcErr.Message), } } @@ -33,7 +33,7 @@ func VtRPCErrorFromVtError(err error) *vtrpcpb.RPCError { return nil } return &vtrpcpb.RPCError{ - Code: RecoverVtErrorCode(err), - Message: err.Error(), + LegacyCode: RecoverVtErrorCode(err), + Message: err.Error(), } } diff --git a/go/vt/vtgate/resolver_test.go b/go/vt/vtgate/resolver_test.go index 85a836639b6..589cd5b16fc 100644 --- a/go/vt/vtgate/resolver_test.go +++ b/go/vt/vtgate/resolver_test.go @@ -566,7 +566,7 @@ func TestIsRetryableError(t *testing.T) { // they'll be wrapped in ScatterConnError or ShardConnError. // So they can't be retried as is. {&tabletconn.ServerError{ServerCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED}, false}, - {&tabletconn.ServerError{ServerCode: vtrpcpb.ErrorCode_PERMISSION_DENIED}, false}, + {&tabletconn.ServerError{ServerCode: vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY}, false}, } for _, tt := range connErrorTests { diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index 6032f70638c..ad21b7c0390 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -82,7 +82,7 @@ func (stc *ScatterConn) endAction(startTime time.Time, allErrors *concurrency.Al if ec != vtrpcpb.ErrorCode_INTEGRITY_ERROR && ec != vtrpcpb.ErrorCode_BAD_INPUT { stc.tabletCallErrorCount.Add(statsKey, 1) } - if ec == vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED || ec == vtrpcpb.ErrorCode_NOT_IN_TX { + if ec == vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY || ec == vtrpcpb.ErrorCode_NOT_IN_TX { session.SetRollback() } } diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go index f8f98964fe3..ece70d35d9b 100644 --- a/go/vt/vtgate/scatter_conn_test.go +++ b/go/vt/vtgate/scatter_conn_test.go @@ -282,7 +282,7 @@ func TestScatterConnError(t *testing.T) { err := &ScatterConnError{ Retryable: false, Errs: []error{ - &gateway.ShardError{ErrorCode: vtrpcpb.ErrorCode_PERMISSION_DENIED, Err: &tabletconn.ServerError{Err: "tabletconn error"}}, + &gateway.ShardError{ErrorCode: vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY, Err: &tabletconn.ServerError{Err: "tabletconn error"}}, fmt.Errorf("generic error"), tabletconn.ConnClosed, }, diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index 354b86d9588..dab6ed2ec9d 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -958,10 +958,10 @@ func handleExecuteError(err error, statsKey []string, query map[string]interface case vtrpcpb.ErrorCode_INTEGRITY_ERROR: // Duplicate key error, no need to log. infoErrors.Add("DupKey", 1) - case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED, vtrpcpb.ErrorCode_BAD_INPUT: + case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, vtrpcpb.ErrorCode_BAD_INPUT: // Tx pool full error, or bad input, no need to log. normalErrors.Add(statsKey, 1) - case vtrpcpb.ErrorCode_PERMISSION_DENIED: + case vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY: // User violated permissions (TableACL), no need to log. infoErrors.Add("PermissionDenied", 1) case vtrpcpb.ErrorCode_TRANSIENT_ERROR: diff --git a/go/vt/vtgate/vtgate_test.go b/go/vt/vtgate/vtgate_test.go index af7a7faeca5..d3b30d7539f 100644 --- a/go/vt/vtgate/vtgate_test.go +++ b/go/vt/vtgate/vtgate_test.go @@ -1988,7 +1988,7 @@ func TestErrorPropagation(t *testing.T) { sbc.MustFailCanceled = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCanceled = 0 - }, vtrpcpb.ErrorCode_CANCELLED) + }, vtrpcpb.ErrorCode_CANCELLED_LEGACY) // ErrorCode_UNKNOWN_ERROR testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { @@ -2009,7 +2009,7 @@ func TestErrorPropagation(t *testing.T) { sbc.MustFailDeadlineExceeded = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailDeadlineExceeded = 0 - }, vtrpcpb.ErrorCode_DEADLINE_EXCEEDED) + }, vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY) // ErrorCode_INTEGRITY_ERROR testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { @@ -2023,14 +2023,14 @@ func TestErrorPropagation(t *testing.T) { sbc.MustFailPermissionDenied = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailPermissionDenied = 0 - }, vtrpcpb.ErrorCode_PERMISSION_DENIED) + }, vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY) // ErrorCode_RESOURCE_EXHAUSTED testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailTxPool = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailTxPool = 0 - }, vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED) + }, vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY) // ErrorCode_QUERY_NOT_SERVED testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { @@ -2065,7 +2065,7 @@ func TestErrorPropagation(t *testing.T) { sbc.MustFailUnauthenticated = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailUnauthenticated = 0 - }, vtrpcpb.ErrorCode_UNAUTHENTICATED) + }, vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY) } // This test makes sure that if we start a transaction and hit a critical diff --git a/go/vt/worker/grpcvtworkerclient/client.go b/go/vt/worker/grpcvtworkerclient/client.go index 7e120dea354..5f3a020b8bd 100644 --- a/go/vt/worker/grpcvtworkerclient/client.go +++ b/go/vt/worker/grpcvtworkerclient/client.go @@ -43,7 +43,7 @@ func gRPCVtworkerClientFactory(addr string, dialTimeout time.Duration) (vtworker } cc, err := grpc.Dial(addr, opt, grpc.WithBlock(), grpc.WithTimeout(dialTimeout)) if err != nil { - return nil, vterrors.NewVitessError(vtrpcpb.ErrorCode_DEADLINE_EXCEEDED, err, "grpc.Dial() err: %v", err) + return nil, vterrors.NewVitessError(vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, err, "grpc.Dial() err: %v", err) } c := vtworkerservicepb.NewVtworkerClient(cc) diff --git a/go/vt/worker/instance.go b/go/vt/worker/instance.go index 91a74879d42..aa1170c7649 100644 --- a/go/vt/worker/instance.go +++ b/go/vt/worker/instance.go @@ -141,7 +141,7 @@ func (wi *Instance) setAndStartWorker(ctx context.Context, wrk Worker, wr *wrang case <-wi.currentContext.Done(): // Context is done i.e. probably canceled. if wi.currentContext.Err() == context.Canceled { - err = vterrors.NewVitessError(vtrpcpb.ErrorCode_CANCELLED, err, "vtworker command was canceled: %v", err) + err = vterrors.NewVitessError(vtrpcpb.ErrorCode_CANCELLED_LEGACY, err, "vtworker command was canceled: %v", err) } default: } diff --git a/go/vt/worker/vtworkerclienttest/client_testsuite.go b/go/vt/worker/vtworkerclienttest/client_testsuite.go index 4a4ea767c24..f10b4c57eab 100644 --- a/go/vt/worker/vtworkerclienttest/client_testsuite.go +++ b/go/vt/worker/vtworkerclienttest/client_testsuite.go @@ -129,7 +129,7 @@ func commandErrorsBecauseBusy(t *testing.T, client vtworkerclient.Client, server if _, err := stream.Recv(); err != nil { // We see CANCELED from the RPC client (client side cancelation) or // from vtworker itself (server side cancelation). - if vterrors.RecoverVtErrorCode(err) != vtrpcpb.ErrorCode_CANCELLED { + if vterrors.RecoverVtErrorCode(err) != vtrpcpb.ErrorCode_CANCELLED_LEGACY { errorCodeCheck = fmt.Errorf("Block command should only error due to canceled context: %v", err) } // Stream has finished. diff --git a/proto/vtrpc.proto b/proto/vtrpc.proto index 581fe6b5c3c..9e62d469291 100644 --- a/proto/vtrpc.proto +++ b/proto/vtrpc.proto @@ -33,16 +33,142 @@ message CallerID { string subcomponent = 3; } -// ErrorCode is the enum values for Errors. Internally, errors should -// be created with one of these codes. These will then be translated over the wire -// by various RPC frameworks. +// Code represnts canonical error codes. The names and numbers must match +// the ones defined by grpc: https://godoc.org/google.golang.org/grpc/codes. +enum Code { + // OK is returned on success. + OK = 0; + + // CANCELED indicates the operation was cancelled (typically by the caller). + CANCELED = 1; + + // UNKNOWN error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + UNKNOWN = 2; + + // INVALID_ARGUMENT indicates client specified an invalid argument. + // Note that this differs from FAILED_PRECONDITION. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + INVALID_ARGUMENT = 3; + + // DEADLINE_EXCEEDED means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DEADLINE_EXCEEDED = 4; + + // NOT_FOUND means some requested entity (e.g., file or directory) was + // not found. + NOT_FOUND = 5; + + // ALREADY_EXISTS means an attempt to create an entity failed because one + // already exists. + ALREADY_EXISTS = 6; + + // PERMISSION_DENIED indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use RESOURCE_EXHAUSTED + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PERMISSION_DENIED = 7; + + // UNAUTHENTICATED indicates the request does not have valid + // authentication credentials for the operation. + UNAUTHENTICATED = 16; + + // RESOURCE_EXHAUSTED indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + RESOURCE_EXHAUSTED = 8; + + // FAILED_PRECONDITION indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE: + // (a) Use UNAVAILABLE if the client can retry just the failing call. + // (b) Use ABORTED if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FAILED_PRECONDITION if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FAILED_PRECONDITION + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FAILED_PRECONDITION if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FAILED_PRECONDITION = 9; + + // ABORTED indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + ABORTED = 10; + + // OUT_OF_RANGE means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike INVALID_ARGUMENT, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate INVALID_ARGUMENT if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OUT_OF_RANGE if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FAILED_PRECONDITION and + // OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OUT_OF_RANGE error to detect when + // they are done. + OUT_OF_RANGE = 11; + + // UNIMPLEMENTED indicates operation is not implemented or not + // supported/enabled in this service. + UNIMPLEMENTED = 12; + + // INTERNAL errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + INTERNAL = 13; + + // UNAVAILABLE indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. + // + // See litmus test above for deciding between FAILED_PRECONDITION, + // ABORTED, and UNAVAILABLE. + UNAVAILABLE = 14; + + // DATA_LOSS indicates unrecoverable data loss or corruption. + DATA_LOSS = 15; +} + +// ErrorCode is the enum values for Errors. This type is deprecated. +// Use Code instead. Background: In the initial design, we thought +// that we may end up with a different list of canonical error codes +// than the ones defined by grpc. In hindisght, we realize that +// the grpc error codes are fairly generic and mostly sufficient. +// In order to avoid confusion, thie type will be deprecated in +// favor of the new Code that matches exactly what grpc defines. +// Some names below have a _LEGACY suffix. This is to prevent +// name collisions with Code. enum ErrorCode { // SUCCESS is returned from a successful call. SUCCESS = 0; - // CANCELLED means that the context was cancelled (and noticed in the app layer, + // CANCELLED_LEGACY means that the context was cancelled (and noticed in the app layer, // as opposed to the RPC layer). - CANCELLED = 1; + CANCELLED_LEGACY = 1; // UNKNOWN_ERROR includes: // 1. MySQL error codes that we don't explicitly handle. @@ -55,25 +181,25 @@ enum ErrorCode { // or tries a query that isn't supported by Vitess. BAD_INPUT = 3; - // DEADLINE_EXCEEDED is returned when an action is taking longer than a given timeout. - DEADLINE_EXCEEDED = 4; + // DEADLINE_EXCEEDED_LEGACY is returned when an action is taking longer than a given timeout. + DEADLINE_EXCEEDED_LEGACY = 4; // INTEGRITY_ERROR is returned on integrity error from MySQL, usually due to // duplicate primary keys. INTEGRITY_ERROR = 5; - // PERMISSION_DENIED errors are returned when a user requests access to something + // PERMISSION_DENIED_LEGACY errors are returned when a user requests access to something // that they don't have permissions for. - PERMISSION_DENIED = 6; + PERMISSION_DENIED_LEGACY = 6; - // RESOURCE_EXHAUSTED is returned when a query exceeds its quota in some dimension + // RESOURCE_EXHAUSTED_LEGACY is returned when a query exceeds its quota in some dimension // and can't be completed due to that. Queries that return RESOURCE_EXHAUSTED // should not be retried, as it could be detrimental to the server's health. // Examples of errors that will cause the RESOURCE_EXHAUSTED code: // 1. TxPoolFull: this is retried server-side, and is only returned as an error // if the server-side retries failed. // 2. Query is killed due to it taking too long. - RESOURCE_EXHAUSTED = 7; + RESOURCE_EXHAUSTED_LEGACY = 7; // QUERY_NOT_SERVED means that a query could not be served right now. // Client can interpret it as: "the tablet that you sent this query to cannot @@ -112,9 +238,9 @@ enum ErrorCode { // 2. VtGate could have request backlog TRANSIENT_ERROR = 11; - // UNAUTHENTICATED errors are returned when a user requests access to something, + // UNAUTHENTICATED_LEGACY errors are returned when a user requests access to something, // and we're unable to verify the user's authentication. - UNAUTHENTICATED = 12; + UNAUTHENTICATED_LEGACY = 12; } // RPCError is an application-level error structure returned by @@ -122,6 +248,6 @@ enum ErrorCode { // We use this so the clients don't have to parse the error messages, // but instead can depend on the value of the code. message RPCError { - ErrorCode code = 1; + ErrorCode legacy_code = 1; string message = 2; } diff --git a/py/vtproto/vtrpc_pb2.py b/py/vtproto/vtrpc_pb2.py index 764ca0007a4..e1cc108489c 100644 --- a/py/vtproto/vtrpc_pb2.py +++ b/py/vtproto/vtrpc_pb2.py @@ -20,10 +20,93 @@ name='vtrpc.proto', package='vtrpc', syntax='proto3', - serialized_pb=_b('\n\x0bvtrpc.proto\x12\x05vtrpc\"F\n\x08\x43\x61llerID\x12\x11\n\tprincipal\x18\x01 \x01(\t\x12\x11\n\tcomponent\x18\x02 \x01(\t\x12\x14\n\x0csubcomponent\x18\x03 \x01(\t\";\n\x08RPCError\x12\x1e\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x10.vtrpc.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t*\x87\x02\n\tErrorCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\r\n\tCANCELLED\x10\x01\x12\x11\n\rUNKNOWN_ERROR\x10\x02\x12\r\n\tBAD_INPUT\x10\x03\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x04\x12\x13\n\x0fINTEGRITY_ERROR\x10\x05\x12\x15\n\x11PERMISSION_DENIED\x10\x06\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x07\x12\x14\n\x10QUERY_NOT_SERVED\x10\x08\x12\r\n\tNOT_IN_TX\x10\t\x12\x12\n\x0eINTERNAL_ERROR\x10\n\x12\x13\n\x0fTRANSIENT_ERROR\x10\x0b\x12\x13\n\x0fUNAUTHENTICATED\x10\x0c\x42\x1a\n\x18\x63om.youtube.vitess.protob\x06proto3') + serialized_pb=_b('\n\x0bvtrpc.proto\x12\x05vtrpc\"F\n\x08\x43\x61llerID\x12\x11\n\tprincipal\x18\x01 \x01(\t\x12\x11\n\tcomponent\x18\x02 \x01(\t\x12\x14\n\x0csubcomponent\x18\x03 \x01(\t\"B\n\x08RPCError\x12%\n\x0blegacy_code\x18\x01 \x01(\x0e\x32\x10.vtrpc.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t*\xb6\x02\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\x0c\n\x08\x43\x41NCELED\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x14\n\x10INVALID_ARGUMENT\x10\x03\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x04\x12\r\n\tNOT_FOUND\x10\x05\x12\x12\n\x0e\x41LREADY_EXISTS\x10\x06\x12\x15\n\x11PERMISSION_DENIED\x10\x07\x12\x13\n\x0fUNAUTHENTICATED\x10\x10\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x08\x12\x17\n\x13\x46\x41ILED_PRECONDITION\x10\t\x12\x0b\n\x07\x41\x42ORTED\x10\n\x12\x10\n\x0cOUT_OF_RANGE\x10\x0b\x12\x11\n\rUNIMPLEMENTED\x10\x0c\x12\x0c\n\x08INTERNAL\x10\r\x12\x0f\n\x0bUNAVAILABLE\x10\x0e\x12\r\n\tDATA_LOSS\x10\x0f*\xaa\x02\n\tErrorCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\x14\n\x10\x43\x41NCELLED_LEGACY\x10\x01\x12\x11\n\rUNKNOWN_ERROR\x10\x02\x12\r\n\tBAD_INPUT\x10\x03\x12\x1c\n\x18\x44\x45\x41\x44LINE_EXCEEDED_LEGACY\x10\x04\x12\x13\n\x0fINTEGRITY_ERROR\x10\x05\x12\x1c\n\x18PERMISSION_DENIED_LEGACY\x10\x06\x12\x1d\n\x19RESOURCE_EXHAUSTED_LEGACY\x10\x07\x12\x14\n\x10QUERY_NOT_SERVED\x10\x08\x12\r\n\tNOT_IN_TX\x10\t\x12\x12\n\x0eINTERNAL_ERROR\x10\n\x12\x13\n\x0fTRANSIENT_ERROR\x10\x0b\x12\x1a\n\x16UNAUTHENTICATED_LEGACY\x10\x0c\x42\x1a\n\x18\x63om.youtube.vitess.protob\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) +_CODE = _descriptor.EnumDescriptor( + name='Code', + full_name='vtrpc.Code', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='OK', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CANCELED', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UNKNOWN', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INVALID_ARGUMENT', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DEADLINE_EXCEEDED', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NOT_FOUND', index=5, number=5, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ALREADY_EXISTS', index=6, number=6, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PERMISSION_DENIED', index=7, number=7, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UNAUTHENTICATED', index=8, number=16, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RESOURCE_EXHAUSTED', index=9, number=8, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FAILED_PRECONDITION', index=10, number=9, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ABORTED', index=11, number=10, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='OUT_OF_RANGE', index=12, number=11, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UNIMPLEMENTED', index=13, number=12, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INTERNAL', index=14, number=13, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UNAVAILABLE', index=15, number=14, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DATA_LOSS', index=16, number=15, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=163, + serialized_end=473, +) +_sym_db.RegisterEnumDescriptor(_CODE) + +Code = enum_type_wrapper.EnumTypeWrapper(_CODE) _ERRORCODE = _descriptor.EnumDescriptor( name='ErrorCode', full_name='vtrpc.ErrorCode', @@ -35,7 +118,7 @@ options=None, type=None), _descriptor.EnumValueDescriptor( - name='CANCELLED', index=1, number=1, + name='CANCELLED_LEGACY', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( @@ -47,7 +130,7 @@ options=None, type=None), _descriptor.EnumValueDescriptor( - name='DEADLINE_EXCEEDED', index=4, number=4, + name='DEADLINE_EXCEEDED_LEGACY', index=4, number=4, options=None, type=None), _descriptor.EnumValueDescriptor( @@ -55,11 +138,11 @@ options=None, type=None), _descriptor.EnumValueDescriptor( - name='PERMISSION_DENIED', index=6, number=6, + name='PERMISSION_DENIED_LEGACY', index=6, number=6, options=None, type=None), _descriptor.EnumValueDescriptor( - name='RESOURCE_EXHAUSTED', index=7, number=7, + name='RESOURCE_EXHAUSTED_LEGACY', index=7, number=7, options=None, type=None), _descriptor.EnumValueDescriptor( @@ -79,31 +162,48 @@ options=None, type=None), _descriptor.EnumValueDescriptor( - name='UNAUTHENTICATED', index=12, number=12, + name='UNAUTHENTICATED_LEGACY', index=12, number=12, options=None, type=None), ], containing_type=None, options=None, - serialized_start=156, - serialized_end=419, + serialized_start=476, + serialized_end=774, ) _sym_db.RegisterEnumDescriptor(_ERRORCODE) ErrorCode = enum_type_wrapper.EnumTypeWrapper(_ERRORCODE) +OK = 0 +CANCELED = 1 +UNKNOWN = 2 +INVALID_ARGUMENT = 3 +DEADLINE_EXCEEDED = 4 +NOT_FOUND = 5 +ALREADY_EXISTS = 6 +PERMISSION_DENIED = 7 +UNAUTHENTICATED = 16 +RESOURCE_EXHAUSTED = 8 +FAILED_PRECONDITION = 9 +ABORTED = 10 +OUT_OF_RANGE = 11 +UNIMPLEMENTED = 12 +INTERNAL = 13 +UNAVAILABLE = 14 +DATA_LOSS = 15 SUCCESS = 0 -CANCELLED = 1 +CANCELLED_LEGACY = 1 UNKNOWN_ERROR = 2 BAD_INPUT = 3 -DEADLINE_EXCEEDED = 4 +DEADLINE_EXCEEDED_LEGACY = 4 INTEGRITY_ERROR = 5 -PERMISSION_DENIED = 6 -RESOURCE_EXHAUSTED = 7 +PERMISSION_DENIED_LEGACY = 6 +RESOURCE_EXHAUSTED_LEGACY = 7 QUERY_NOT_SERVED = 8 NOT_IN_TX = 9 INTERNAL_ERROR = 10 TRANSIENT_ERROR = 11 -UNAUTHENTICATED = 12 +UNAUTHENTICATED_LEGACY = 12 @@ -160,7 +260,7 @@ containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='code', full_name='vtrpc.RPCError.code', index=0, + name='legacy_code', full_name='vtrpc.RPCError.legacy_code', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, @@ -186,12 +286,13 @@ oneofs=[ ], serialized_start=94, - serialized_end=153, + serialized_end=160, ) -_RPCERROR.fields_by_name['code'].enum_type = _ERRORCODE +_RPCERROR.fields_by_name['legacy_code'].enum_type = _ERRORCODE DESCRIPTOR.message_types_by_name['CallerID'] = _CALLERID DESCRIPTOR.message_types_by_name['RPCError'] = _RPCERROR +DESCRIPTOR.enum_types_by_name['Code'] = _CODE DESCRIPTOR.enum_types_by_name['ErrorCode'] = _ERRORCODE CallerID = _reflection.GeneratedProtocolMessageType('CallerID', (_message.Message,), dict( From 9855595bdc28aca4d5c36795880829a2f88ed6e2 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Wed, 15 Feb 2017 23:32:00 -0800 Subject: [PATCH 014/108] vterrors: use new vtrpcpb.Code everywhere vtrpcpb.ErrorCode is used only at the grpc boundary for legacy purposes. --- .../vtgateclienttest/goclienttest/errors.go | 18 ++--- go/cmd/vtgateclienttest/services/errors.go | 20 ++--- go/sqltypes/proto3_test.go | 9 ++- go/vt/dtids/dtids.go | 8 +- go/vt/proto/vtrpc/vtrpc.pb.go | 75 +++++++++--------- go/vt/tabletserver/codex.go | 28 +++---- go/vt/tabletserver/codex_test.go | 10 +-- go/vt/tabletserver/connpool/dbconn.go | 10 +-- .../engines/schema/schema_engine.go | 24 +++--- go/vt/tabletserver/messager_engine.go | 2 +- go/vt/tabletserver/query_engine.go | 8 +- go/vt/tabletserver/query_executor.go | 32 ++++---- go/vt/tabletserver/query_executor_test.go | 28 +++---- go/vt/tabletserver/query_rules.go | 66 ++++++++-------- go/vt/tabletserver/query_rules_test.go | 4 +- go/vt/tabletserver/sandboxconn/sandboxconn.go | 38 +++++----- go/vt/tabletserver/tabletconn/grpc_error.go | 8 +- go/vt/tabletserver/tabletconn/tablet_conn.go | 4 +- .../tabletconntest/tabletconntest.go | 20 ++--- go/vt/tabletserver/tabletenv/logstats_test.go | 4 +- go/vt/tabletserver/tabletenv/tablet_error.go | 60 +++++++-------- .../tabletenv/tablet_error_test.go | 68 ++++++++--------- go/vt/tabletserver/tabletserver.go | 68 ++++++++--------- go/vt/tabletserver/tabletserver_test.go | 24 +++--- go/vt/tabletserver/testutils_test.go | 6 +- go/vt/tabletserver/twopc.go | 4 +- go/vt/tabletserver/tx_executor.go | 26 +++---- go/vt/tabletserver/tx_pool.go | 16 ++-- go/vt/tabletserver/tx_pool_test.go | 10 +-- go/vt/vterrors/aggregate.go | 34 ++++----- go/vt/vterrors/aggregate_test.go | 36 ++++----- go/vt/vterrors/grpc.go | 76 +++++++++++-------- go/vt/vterrors/proto3.go | 10 ++- go/vt/vterrors/vterrors.go | 16 ++-- go/vt/vtgate/buffer/buffer.go | 10 +-- go/vt/vtgate/buffer/buffer_test.go | 10 +-- go/vt/vtgate/gateway/discoverygateway.go | 10 +-- go/vt/vtgate/gateway/discoverygateway_test.go | 14 ++-- go/vt/vtgate/gateway/l2vtgategateway.go | 4 +- go/vt/vtgate/gateway/shard_error.go | 10 +-- go/vt/vtgate/l2vtgate/l2vtgate.go | 2 +- go/vt/vtgate/masterbuffer/masterbuffer.go | 2 +- go/vt/vtgate/resolver.go | 4 +- go/vt/vtgate/resolver_test.go | 8 +- go/vt/vtgate/safe_session.go | 2 +- go/vt/vtgate/scatter_conn.go | 10 +-- go/vt/vtgate/scatter_conn_test.go | 8 +- go/vt/vtgate/topo_utils.go | 14 ++-- go/vt/vtgate/tx_conn.go | 6 +- go/vt/vtgate/tx_conn_test.go | 20 ++--- go/vt/vtgate/vtgate.go | 12 +-- go/vt/vtgate/vtgate_test.go | 46 +++++------ go/vt/vtgate/vtgateconntest/client.go | 2 +- go/vt/worker/grpcvtworkerclient/client.go | 2 +- go/vt/worker/instance.go | 8 +- .../vtworkerclienttest/client_testsuite.go | 6 +- proto/vtrpc.proto | 1 + py/vtproto/vtrpc_pb2.py | 20 +++-- 58 files changed, 566 insertions(+), 535 deletions(-) diff --git a/go/cmd/vtgateclienttest/goclienttest/errors.go b/go/cmd/vtgateclienttest/goclienttest/errors.go index 490f68505bf..7682d4c70b6 100644 --- a/go/cmd/vtgateclienttest/goclienttest/errors.go +++ b/go/cmd/vtgateclienttest/goclienttest/errors.go @@ -24,14 +24,14 @@ var ( errorPrefix = "error://" partialErrorPrefix = "partialerror://" - executeErrors = map[string]vtrpcpb.ErrorCode{ - "bad input": vtrpcpb.ErrorCode_BAD_INPUT, - "deadline exceeded": vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, - "integrity error": vtrpcpb.ErrorCode_INTEGRITY_ERROR, - "transient error": vtrpcpb.ErrorCode_TRANSIENT_ERROR, - "unauthenticated": vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY, - "aborted": vtrpcpb.ErrorCode_NOT_IN_TX, - "unknown error": vtrpcpb.ErrorCode_UNKNOWN_ERROR, + executeErrors = map[string]vtrpcpb.Code{ + "bad input": vtrpcpb.Code_INVALID_ARGUMENT, + "deadline exceeded": vtrpcpb.Code_DEADLINE_EXCEEDED, + "integrity error": vtrpcpb.Code_ALREADY_EXISTS, + "transient error": vtrpcpb.Code_UNAVAILABLE, + "unauthenticated": vtrpcpb.Code_UNAUTHENTICATED, + "aborted": vtrpcpb.Code_ABORTED, + "unknown error": vtrpcpb.Code_UNKNOWN, } ) @@ -258,7 +258,7 @@ func checkTransactionExecuteErrors(t *testing.T, conn *vtgateconn.VTGateConn, ex } } -func checkError(t *testing.T, err error, query, errStr string, errCode vtrpcpb.ErrorCode) { +func checkError(t *testing.T, err error, query, errStr string, errCode vtrpcpb.Code) { if err == nil { t.Errorf("[%v] expected error, got nil", query) return diff --git a/go/cmd/vtgateclienttest/services/errors.go b/go/cmd/vtgateclienttest/services/errors.go index 277e10f95d2..511fd3dd310 100644 --- a/go/cmd/vtgateclienttest/services/errors.go +++ b/go/cmd/vtgateclienttest/services/errors.go @@ -81,53 +81,53 @@ func trimmedRequestToError(received string) error { switch received { case "bad input": return vterrors.FromError( - vtrpcpb.ErrorCode_BAD_INPUT, + vtrpcpb.Code_INVALID_ARGUMENT, errors.New("vtgate test client forced error: bad input"), ) case "deadline exceeded": return vterrors.FromError( - vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, + vtrpcpb.Code_DEADLINE_EXCEEDED, errors.New("vtgate test client forced error: deadline exceeded"), ) case "integrity error": return vterrors.FromError( - vtrpcpb.ErrorCode_INTEGRITY_ERROR, + vtrpcpb.Code_ALREADY_EXISTS, errors.New("vtgate test client forced error: integrity error (errno 1062) (sqlstate 23000)"), ) // request backlog and general throttling type errors case "transient error": return vterrors.FromError( - vtrpcpb.ErrorCode_TRANSIENT_ERROR, + vtrpcpb.Code_UNAVAILABLE, errors.New("request_backlog: too many requests in flight: vtgate test client forced error: transient error"), ) case "throttled error": return vterrors.FromError( - vtrpcpb.ErrorCode_TRANSIENT_ERROR, + vtrpcpb.Code_UNAVAILABLE, errors.New("request_backlog: exceeded XXX quota, rate limiting: vtgate test client forced error: transient error"), ) case "unauthenticated": return vterrors.FromError( - vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY, + vtrpcpb.Code_UNAUTHENTICATED, errors.New("vtgate test client forced error: unauthenticated"), ) case "aborted": return vterrors.FromError( - vtrpcpb.ErrorCode_NOT_IN_TX, + vtrpcpb.Code_ABORTED, errors.New("vtgate test client forced error: aborted"), ) case "query not served": return vterrors.FromError( - vtrpcpb.ErrorCode_QUERY_NOT_SERVED, + vtrpcpb.Code_FAILED_PRECONDITION, errors.New("vtgate test client forced error: query not served"), ) case "unknown error": return vterrors.FromError( - vtrpcpb.ErrorCode_UNKNOWN_ERROR, + vtrpcpb.Code_UNKNOWN, errors.New("vtgate test client forced error: unknown error"), ) default: return vterrors.FromError( - vtrpcpb.ErrorCode_UNKNOWN_ERROR, + vtrpcpb.Code_UNKNOWN, fmt.Errorf("vtgate test client error request unrecognized: %v", received), ) } diff --git a/go/sqltypes/proto3_test.go b/go/sqltypes/proto3_test.go index 634a9fa4471..129ce30427e 100644 --- a/go/sqltypes/proto3_test.go +++ b/go/sqltypes/proto3_test.go @@ -11,7 +11,7 @@ import ( "errors" querypb "github.com/youtube/vitess/go/vt/proto/query" - "github.com/youtube/vitess/go/vt/proto/vtrpc" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" "github.com/youtube/vitess/go/vt/vterrors" ) @@ -244,7 +244,7 @@ func TestQueryReponses(t *testing.T) { QueryError: nil, }, { QueryResult: nil, - QueryError: vterrors.FromError(vtrpc.ErrorCode_DEADLINE_EXCEEDED_LEGACY, errors.New("deadline exceeded")), + QueryError: vterrors.FromError(vtrpcpb.Code_DEADLINE_EXCEEDED, errors.New("deadline exceeded")), }, } @@ -286,9 +286,10 @@ func TestQueryReponses(t *testing.T) { }, }, }, { - Error: &vtrpc.RPCError{ - LegacyCode: vtrpc.ErrorCode_DEADLINE_EXCEEDED_LEGACY, + Error: &vtrpcpb.RPCError{ + LegacyCode: vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, Message: "deadline exceeded", + Code: vtrpcpb.Code_DEADLINE_EXCEEDED, }, Result: nil, }, diff --git a/go/vt/dtids/dtids.go b/go/vt/dtids/dtids.go index cd387fe9013..7a602a1a373 100644 --- a/go/vt/dtids/dtids.go +++ b/go/vt/dtids/dtids.go @@ -27,7 +27,7 @@ func New(mmShard *vtgatepb.Session_ShardSession) string { func ShardSession(dtid string) (*vtgatepb.Session_ShardSession, error) { splits := strings.Split(dtid, ":") if len(splits) != 3 { - return nil, vterrors.FromError(vtrpcpb.ErrorCode_BAD_INPUT, fmt.Errorf("invalid parts in dtid: %s", dtid)) + return nil, vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("invalid parts in dtid: %s", dtid)) } target := &querypb.Target{ Keyspace: splits[0], @@ -36,7 +36,7 @@ func ShardSession(dtid string) (*vtgatepb.Session_ShardSession, error) { } txid, err := strconv.ParseInt(splits[2], 10, 0) if err != nil { - return nil, vterrors.FromError(vtrpcpb.ErrorCode_BAD_INPUT, fmt.Errorf("invalid transaction id in dtid: %s", dtid)) + return nil, vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("invalid transaction id in dtid: %s", dtid)) } return &vtgatepb.Session_ShardSession{ Target: target, @@ -48,11 +48,11 @@ func ShardSession(dtid string) (*vtgatepb.Session_ShardSession, error) { func TransactionID(dtid string) (int64, error) { splits := strings.Split(dtid, ":") if len(splits) != 3 { - return 0, vterrors.FromError(vtrpcpb.ErrorCode_BAD_INPUT, fmt.Errorf("invalid parts in dtid: %s", dtid)) + return 0, vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("invalid parts in dtid: %s", dtid)) } txid, err := strconv.ParseInt(splits[2], 10, 0) if err != nil { - return 0, vterrors.FromError(vtrpcpb.ErrorCode_BAD_INPUT, fmt.Errorf("invalid transaction id in dtid: %s", dtid)) + return 0, vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("invalid transaction id in dtid: %s", dtid)) } return txid, nil } diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go index 06146f083eb..9835f06ccaa 100644 --- a/go/vt/proto/vtrpc/vtrpc.pb.go +++ b/go/vt/proto/vtrpc/vtrpc.pb.go @@ -332,6 +332,7 @@ func (*CallerID) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} type RPCError struct { LegacyCode ErrorCode `protobuf:"varint,1,opt,name=legacy_code,json=legacyCode,enum=vtrpc.ErrorCode" json:"legacy_code,omitempty"` Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + Code Code `protobuf:"varint,3,opt,name=code,enum=vtrpc.Code" json:"code,omitempty"` } func (m *RPCError) Reset() { *m = RPCError{} } @@ -349,41 +350,41 @@ func init() { func init() { proto.RegisterFile("vtrpc.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 561 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0xdd, 0x4e, 0x1b, 0x3f, - 0x10, 0xc5, 0x49, 0x80, 0x7c, 0x4c, 0x02, 0x18, 0xf3, 0xff, 0xd3, 0x14, 0x51, 0xa9, 0xe2, 0xaa, - 0xe2, 0x22, 0x52, 0xdb, 0x27, 0x70, 0xd6, 0x43, 0xb0, 0x58, 0xc6, 0xa9, 0x3f, 0x80, 0x5c, 0x59, - 0x21, 0xac, 0x10, 0x55, 0x60, 0xa3, 0x4d, 0x40, 0xe2, 0x95, 0xfa, 0x00, 0x7d, 0xbe, 0xca, 0x9b, - 0x04, 0xd4, 0xe6, 0x2a, 0xf1, 0xf8, 0x8c, 0xcf, 0x99, 0xdf, 0x68, 0xa1, 0xf5, 0x32, 0x2f, 0xa6, - 0xe3, 0xee, 0xb4, 0xc8, 0xe7, 0x39, 0xdf, 0x2e, 0x0f, 0x27, 0x3f, 0xa1, 0x91, 0x8c, 0x26, 0x93, - 0xac, 0x50, 0x92, 0x1f, 0x43, 0x73, 0x5a, 0x3c, 0x3c, 0x8d, 0x1f, 0xa6, 0xa3, 0x49, 0xa7, 0xf2, - 0xb9, 0xf2, 0xa5, 0x69, 0xde, 0x0b, 0xf1, 0x76, 0x9c, 0x3f, 0x4e, 0xf3, 0xa7, 0xec, 0x69, 0xde, - 0xa9, 0x2e, 0x6e, 0xdf, 0x0a, 0xfc, 0x04, 0xda, 0xb3, 0xe7, 0xdb, 0x77, 0xc1, 0x66, 0x29, 0xf8, - 0xab, 0x76, 0x72, 0x0d, 0x0d, 0x33, 0x48, 0xb0, 0x28, 0xf2, 0x82, 0x7f, 0x85, 0xd6, 0x24, 0xbb, - 0x1f, 0x8d, 0x5f, 0xc3, 0x38, 0xbf, 0xcb, 0x4a, 0xb7, 0xdd, 0x6f, 0xac, 0xbb, 0x48, 0x58, 0x4a, - 0x92, 0xfc, 0x2e, 0x33, 0xb0, 0x10, 0xc5, 0xff, 0xbc, 0x03, 0xf5, 0xc7, 0x6c, 0x36, 0x1b, 0xdd, - 0x67, 0x4b, 0xfb, 0xd5, 0xf1, 0xf4, 0x77, 0x15, 0xb6, 0x4a, 0x49, 0x0d, 0xaa, 0xfa, 0x82, 0x6d, - 0xf0, 0x36, 0x34, 0x12, 0x41, 0x09, 0xa6, 0x28, 0x59, 0x85, 0xb7, 0xa0, 0xee, 0xe9, 0x82, 0xf4, - 0x35, 0xb1, 0x2a, 0xff, 0x0f, 0x98, 0xa2, 0x2b, 0x91, 0x2a, 0x19, 0x84, 0xe9, 0xfb, 0x4b, 0x24, - 0xc7, 0x36, 0xf9, 0xff, 0xb0, 0x2f, 0x51, 0xc8, 0x54, 0x11, 0x06, 0xbc, 0x49, 0x10, 0x25, 0x4a, - 0xb6, 0xc5, 0x77, 0xa0, 0x49, 0xda, 0x85, 0x33, 0xed, 0x49, 0xb2, 0x6d, 0xce, 0x61, 0x57, 0xa4, - 0x06, 0x85, 0x1c, 0x06, 0xbc, 0x51, 0xd6, 0x59, 0x56, 0x8b, 0x9d, 0x03, 0x34, 0x97, 0xca, 0x5a, - 0xa5, 0x29, 0x48, 0x24, 0x85, 0x92, 0xd5, 0xf9, 0x01, 0xec, 0x79, 0x12, 0xde, 0x9d, 0x23, 0x39, - 0x95, 0x08, 0x87, 0x92, 0x31, 0x7e, 0x08, 0xdc, 0xa0, 0xd5, 0xde, 0x24, 0xd1, 0xe5, 0x5c, 0x78, - 0x1b, 0xeb, 0x0d, 0xfe, 0x01, 0x0e, 0xce, 0x84, 0x4a, 0x51, 0x86, 0x81, 0xc1, 0x44, 0x93, 0x54, - 0x4e, 0x69, 0x62, 0xcd, 0x98, 0x5c, 0xf4, 0xb4, 0x89, 0x2a, 0xe0, 0x0c, 0xda, 0xda, 0xbb, 0xa0, - 0xcf, 0x82, 0x11, 0xd4, 0x47, 0xd6, 0xe2, 0xfb, 0xb0, 0xe3, 0x49, 0x5d, 0x0e, 0x52, 0x8c, 0x63, - 0xa0, 0x64, 0xed, 0x38, 0xb9, 0x22, 0x87, 0x86, 0x44, 0xca, 0x76, 0xf8, 0x1e, 0xb4, 0x3c, 0x89, - 0x2b, 0xa1, 0x52, 0xd1, 0x4b, 0x91, 0xed, 0xc6, 0x81, 0xa4, 0x70, 0x22, 0xa4, 0xda, 0x5a, 0xb6, - 0x77, 0xfa, 0xab, 0x0a, 0xcd, 0x37, 0xd8, 0xd1, 0xcd, 0xfa, 0x24, 0x41, 0x6b, 0xd9, 0x46, 0xe4, - 0xb4, 0x40, 0x18, 0x63, 0xa5, 0xd8, 0x17, 0xc9, 0x90, 0x55, 0x16, 0x8e, 0x25, 0xca, 0x80, 0xc6, - 0x68, 0xc3, 0xaa, 0xf1, 0xc9, 0x9e, 0x90, 0x41, 0xd1, 0xc0, 0x47, 0x92, 0xc7, 0xd0, 0x59, 0x23, - 0xb9, 0xea, 0xdf, 0x8a, 0x58, 0x62, 0xbc, 0xbe, 0x51, 0x6e, 0xb8, 0x7c, 0x61, 0x3b, 0xb6, 0xac, - 0x21, 0x5c, 0xb5, 0xd4, 0xf8, 0x27, 0xf8, 0xb8, 0x0e, 0x6d, 0x75, 0x5d, 0x8f, 0x39, 0x7f, 0x78, - 0x34, 0xc3, 0x10, 0x17, 0x65, 0xd1, 0x5c, 0x95, 0x44, 0x97, 0x8b, 0x53, 0x14, 0xdc, 0x0d, 0x6b, - 0xc6, 0xc5, 0xad, 0xa8, 0x2c, 0x5d, 0x21, 0x46, 0x71, 0x46, 0x90, 0x55, 0x48, 0x6e, 0x59, 0x6c, - 0xf1, 0x23, 0x38, 0xfc, 0x67, 0x6d, 0x2b, 0xa7, 0x76, 0xef, 0x08, 0x3a, 0xe3, 0xfc, 0xb1, 0xfb, - 0x9a, 0x3f, 0xcf, 0x9f, 0x6f, 0xb3, 0xee, 0xcb, 0xc3, 0x3c, 0x9b, 0xcd, 0x16, 0x5f, 0xd3, 0x6d, - 0xad, 0xfc, 0xf9, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0xfc, 0xc4, 0x3c, 0x5f, 0x63, 0x03, 0x00, - 0x00, + // 574 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x53, 0xdb, 0x4e, 0x1b, 0x31, + 0x14, 0x24, 0x21, 0xe4, 0x72, 0x36, 0x80, 0x31, 0x2d, 0x4d, 0x11, 0x55, 0x2b, 0x9e, 0x2a, 0x1e, + 0x22, 0xb5, 0xfd, 0x02, 0x67, 0x7d, 0x08, 0x16, 0x8b, 0x9d, 0xfa, 0x42, 0xc9, 0x93, 0x15, 0xc2, + 0x0a, 0x51, 0x05, 0x36, 0xda, 0x04, 0x54, 0x7e, 0xa9, 0x1f, 0xd0, 0xef, 0xab, 0xec, 0x24, 0xa0, + 0x36, 0x4f, 0x89, 0xcf, 0x65, 0x66, 0xce, 0x8c, 0x16, 0x92, 0xa7, 0x79, 0x39, 0x1d, 0x77, 0xa7, + 0x65, 0x31, 0x2f, 0xe8, 0x56, 0x7c, 0x1c, 0xff, 0x84, 0x66, 0x3a, 0x9a, 0x4c, 0xf2, 0x52, 0x70, + 0x7a, 0x04, 0xad, 0x69, 0x79, 0xf7, 0x30, 0xbe, 0x9b, 0x8e, 0x26, 0x9d, 0xca, 0xa7, 0xca, 0xe7, + 0x96, 0x7e, 0x2d, 0x84, 0xee, 0xb8, 0xb8, 0x9f, 0x16, 0x0f, 0xf9, 0xc3, 0xbc, 0x53, 0x5d, 0x74, + 0x5f, 0x0a, 0xf4, 0x18, 0xda, 0xb3, 0xc7, 0xeb, 0xd7, 0x81, 0xcd, 0x38, 0xf0, 0x4f, 0xed, 0xf8, + 0x17, 0x34, 0xf5, 0x20, 0xc5, 0xb2, 0x2c, 0x4a, 0xfa, 0x05, 0x92, 0x49, 0x7e, 0x3b, 0x1a, 0x3f, + 0xfb, 0x71, 0x71, 0x93, 0x47, 0xb6, 0x9d, 0xaf, 0xa4, 0xbb, 0x50, 0x18, 0x47, 0xd2, 0xe2, 0x26, + 0xd7, 0xb0, 0x18, 0x0a, 0xff, 0x69, 0x07, 0x1a, 0xf7, 0xf9, 0x6c, 0x36, 0xba, 0xcd, 0x97, 0xf4, + 0xab, 0x27, 0xfd, 0x08, 0xb5, 0x88, 0xb2, 0x19, 0x51, 0x92, 0x25, 0x4a, 0x04, 0x88, 0x8d, 0x93, + 0x3f, 0x55, 0xa8, 0x45, 0x8c, 0x3a, 0x54, 0xd5, 0x39, 0xd9, 0xa0, 0x6d, 0x68, 0xa6, 0x4c, 0xa6, + 0x98, 0x21, 0x27, 0x15, 0x9a, 0x40, 0xc3, 0xc9, 0x73, 0xa9, 0x7e, 0x48, 0x52, 0xa5, 0x6f, 0x80, + 0x08, 0x79, 0xc9, 0x32, 0xc1, 0x3d, 0xd3, 0x7d, 0x77, 0x81, 0xd2, 0x92, 0x4d, 0xfa, 0x16, 0xf6, + 0x38, 0x32, 0x9e, 0x09, 0x89, 0x1e, 0xaf, 0x52, 0x44, 0x8e, 0x9c, 0xd4, 0xe8, 0x36, 0xb4, 0xa4, + 0xb2, 0xfe, 0x54, 0x39, 0xc9, 0xc9, 0x16, 0xa5, 0xb0, 0xc3, 0x32, 0x8d, 0x8c, 0x0f, 0x3d, 0x5e, + 0x09, 0x63, 0x0d, 0xa9, 0x87, 0xcd, 0x01, 0xea, 0x0b, 0x61, 0x8c, 0x50, 0xd2, 0x73, 0x94, 0x02, + 0x39, 0x69, 0xd0, 0x7d, 0xd8, 0x75, 0x92, 0x39, 0x7b, 0x86, 0xd2, 0x8a, 0x94, 0x59, 0xe4, 0x84, + 0xd0, 0x03, 0xa0, 0x1a, 0x8d, 0x72, 0x3a, 0x0d, 0x2c, 0x67, 0xcc, 0x99, 0x50, 0x6f, 0xd2, 0x77, + 0xb0, 0x7f, 0xca, 0x44, 0x86, 0xdc, 0x0f, 0x34, 0xa6, 0x4a, 0x72, 0x61, 0x85, 0x92, 0xa4, 0x15, + 0x94, 0xb3, 0x9e, 0xd2, 0x61, 0x0a, 0x28, 0x81, 0xb6, 0x72, 0xd6, 0xab, 0x53, 0xaf, 0x99, 0xec, + 0x23, 0x49, 0xe8, 0x1e, 0x6c, 0x3b, 0x29, 0x2e, 0x06, 0x19, 0x86, 0x33, 0x90, 0x93, 0x76, 0xb8, + 0x5c, 0x48, 0x8b, 0x5a, 0xb2, 0x8c, 0x6c, 0xd3, 0x5d, 0x48, 0x9c, 0x64, 0x97, 0x4c, 0x64, 0xac, + 0x97, 0x21, 0xd9, 0x09, 0x07, 0x71, 0x66, 0x99, 0xcf, 0x94, 0x31, 0x64, 0xf7, 0xe4, 0x77, 0x15, + 0x5a, 0x2f, 0x69, 0x04, 0x36, 0xe3, 0xd2, 0x14, 0x8d, 0x21, 0x1b, 0xc1, 0xa7, 0x85, 0x85, 0x41, + 0x56, 0x86, 0x7d, 0x96, 0x0e, 0x49, 0x65, 0xc1, 0x18, 0xad, 0xf4, 0xa8, 0xb5, 0xd2, 0xa4, 0x1a, + 0x20, 0x7b, 0x8c, 0x7b, 0x21, 0x07, 0x2e, 0x38, 0x79, 0x04, 0x9d, 0x35, 0x27, 0x57, 0xfb, 0xb5, + 0x60, 0x4b, 0x90, 0xd7, 0xd7, 0xc2, 0x0e, 0x97, 0x08, 0x5b, 0x61, 0x65, 0xcd, 0xc2, 0xd5, 0x4a, + 0x9d, 0x7e, 0x80, 0xf7, 0xeb, 0xa6, 0xad, 0xda, 0x8d, 0xa0, 0xf3, 0xbb, 0x43, 0x3d, 0xf4, 0x21, + 0x28, 0x83, 0xfa, 0x32, 0x3a, 0xba, 0x0c, 0x4e, 0x48, 0x6f, 0xaf, 0x48, 0x2b, 0x04, 0xb7, 0x72, + 0x65, 0xc9, 0x0a, 0x41, 0x8a, 0xd5, 0x4c, 0x1a, 0x81, 0xd2, 0x2e, 0x8b, 0x09, 0x3d, 0x84, 0x83, + 0xff, 0x62, 0x5b, 0x31, 0xb5, 0x7b, 0x87, 0xd0, 0x19, 0x17, 0xf7, 0xdd, 0xe7, 0xe2, 0x71, 0xfe, + 0x78, 0x9d, 0x77, 0x9f, 0xee, 0xe6, 0xf9, 0x6c, 0xb6, 0xf8, 0xdc, 0xae, 0xeb, 0xf1, 0xe7, 0xdb, + 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x18, 0x69, 0xdc, 0x84, 0x03, 0x00, 0x00, } diff --git a/go/vt/tabletserver/codex.go b/go/vt/tabletserver/codex.go index 6cb22d177db..a9bf11f5916 100644 --- a/go/vt/tabletserver/codex.go +++ b/go/vt/tabletserver/codex.go @@ -42,7 +42,7 @@ func resolvePKValues(table *schema.Table, pkValues []interface{}, bindVars map[s if length == -1 { length = len(list) } else if len(list) != length { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "mismatched lengths for values %v", pkValues) + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "mismatched lengths for values %v", pkValues) } return nil } @@ -93,7 +93,7 @@ func resolvePKValues(table *schema.Table, pkValues []interface{}, bindVars map[s func resolveListArg(col *schema.TableColumn, key string, bindVars map[string]interface{}) ([]sqltypes.Value, error) { val, _, err := sqlparser.FetchBindVar(key, bindVars) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "%v", err) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } switch list := val.(type) { @@ -102,7 +102,7 @@ func resolveListArg(col *schema.TableColumn, key string, bindVars map[string]int for i, v := range list { sqlval, err := sqltypes.BuildConverted(col.Type, v) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "%v", err) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } if err = validateValue(col, sqlval); err != nil { return nil, err @@ -112,7 +112,7 @@ func resolveListArg(col *schema.TableColumn, key string, bindVars map[string]int return resolved, nil case *querypb.BindVariable: if list.Type != querypb.Type_TUPLE { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "expecting list for bind var %s: %v", key, list) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "expecting list for bind var %s: %v", key, list) } resolved := make([]sqltypes.Value, len(list.Values)) for i, v := range list.Values { @@ -120,7 +120,7 @@ func resolveListArg(col *schema.TableColumn, key string, bindVars map[string]int sqlval := sqltypes.MakeTrusted(v.Type, v.Value) sqlval, err := sqltypes.BuildConverted(col.Type, sqlval) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "%v", err) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } if err = validateValue(col, sqlval); err != nil { return nil, err @@ -129,7 +129,7 @@ func resolveListArg(col *schema.TableColumn, key string, bindVars map[string]int } return resolved, nil default: - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "unknown type for bind variable %v", key) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "unknown type for bind variable %v", key) } } @@ -159,12 +159,12 @@ func resolveValue(col *schema.TableColumn, value interface{}, bindVars map[strin if v, ok := value.(string); ok { value, _, err = sqlparser.FetchBindVar(v, bindVars) if err != nil { - return result, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "%v", err) + return result, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } } result, err = sqltypes.BuildConverted(col.Type, value) if err != nil { - return result, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "%v", err) + return result, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } if err = validateValue(col, result); err != nil { return result, err @@ -178,23 +178,23 @@ func resolveNumber(value interface{}, bindVars map[string]interface{}) (int64, e if v, ok := value.(string); ok { value, _, err = sqlparser.FetchBindVar(v, bindVars) if err != nil { - return 0, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "%v", err) + return 0, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } } v, err := sqltypes.BuildValue(value) if err != nil { - return 0, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "%v", err) + return 0, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } ret, err := v.ParseInt64() if err != nil { - return 0, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "%v", err) + return 0, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } return ret, nil } func validateRow(table *schema.Table, columnNumbers []int, row []sqltypes.Value) error { if len(row) != len(columnNumbers) { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "data inconsistency %d vs %d", len(row), len(columnNumbers)) + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "data inconsistency %d vs %d", len(row), len(columnNumbers)) } for j, value := range row { if err := validateValue(&table.Columns[columnNumbers[j]], value); err != nil { @@ -211,11 +211,11 @@ func validateValue(col *schema.TableColumn, value sqltypes.Value) error { } if sqltypes.IsIntegral(col.Type) { if !value.IsIntegral() { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "type mismatch, expecting numeric type for %v for column: %v", value, col) + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "type mismatch, expecting numeric type for %v for column: %v", value, col) } } else if col.Type == sqltypes.VarBinary { if !value.IsQuoted() { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "type mismatch, expecting string type for %v for column: %v", value, col) + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "type mismatch, expecting string type for %v for column: %v", value, col) } } return nil diff --git a/go/vt/tabletserver/codex_test.go b/go/vt/tabletserver/codex_test.go index 5f7d58afd6d..3ebb2893fc0 100644 --- a/go/vt/tabletserver/codex_test.go +++ b/go/vt/tabletserver/codex_test.go @@ -257,7 +257,7 @@ func TestCodexResolvePKValues(t *testing.T) { pkValues = make([]interface{}, 0, 10) pkValues = append(pkValues, sqltypes.MakeString([]byte("type_mismatch"))) _, _, err = resolvePKValues(table, pkValues, nil) - testUtils.checkTabletError(t, err, vtrpcpb.ErrorCode_BAD_INPUT, "strconv.ParseInt") + testUtils.checkTabletError(t, err, vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt") // pkValues with different length bindVariables = make(map[string]interface{}) bindVariables[key] = 1 @@ -269,7 +269,7 @@ func TestCodexResolvePKValues(t *testing.T) { pkValues = append(pkValues, []interface{}{":" + key}) pkValues = append(pkValues, []interface{}{":" + key2, ":" + key3}) _, _, err = resolvePKValues(table, pkValues, bindVariables) - testUtils.checkTabletError(t, err, vtrpcpb.ErrorCode_BAD_INPUT, "mismatched lengths") + testUtils.checkTabletError(t, err, vtrpcpb.Code_INVALID_ARGUMENT, "mismatched lengths") } func TestCodexResolveListArg(t *testing.T) { @@ -284,7 +284,7 @@ func TestCodexResolveListArg(t *testing.T) { bindVariables[key] = []interface{}{fmt.Errorf("error is not supported")} _, err := resolveListArg(table.GetPKColumn(0), "::"+key, bindVariables) - testUtils.checkTabletError(t, err, vtrpcpb.ErrorCode_BAD_INPUT, "") + testUtils.checkTabletError(t, err, vtrpcpb.Code_INVALID_ARGUMENT, "") // This should successfully convert. bindVariables[key] = []interface{}{"1"} @@ -413,10 +413,10 @@ func TestCodexValidateRow(t *testing.T) { []string{"pk1", "pk2"}) // #columns and #rows do not match err := validateRow(table, []int{1}, []sqltypes.Value{}) - testUtils.checkTabletError(t, err, vtrpcpb.ErrorCode_BAD_INPUT, "data inconsistency") + testUtils.checkTabletError(t, err, vtrpcpb.Code_INVALID_ARGUMENT, "data inconsistency") // column 0 is int type but row is in string type err = validateRow(table, []int{0}, []sqltypes.Value{sqltypes.MakeString([]byte("str"))}) - testUtils.checkTabletError(t, err, vtrpcpb.ErrorCode_BAD_INPUT, "type mismatch") + testUtils.checkTabletError(t, err, vtrpcpb.Code_INVALID_ARGUMENT, "type mismatch") } func TestCodexApplyFilterWithPKDefaults(t *testing.T) { diff --git a/go/vt/tabletserver/connpool/dbconn.go b/go/vt/tabletserver/connpool/dbconn.go index 698cc6cddab..b25a2fdf78e 100644 --- a/go/vt/tabletserver/connpool/dbconn.go +++ b/go/vt/tabletserver/connpool/dbconn.go @@ -64,12 +64,12 @@ func (dbc *DBConn) Exec(ctx context.Context, query string, maxrows int, wantfiel return r, nil case !tabletenv.IsConnErr(err): // MySQL error that isn't due to a connection issue - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err) + return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, err) case attempt == 2: // If the MySQL connection is bad, we assume that there is nothing wrong with // the query itself, and retrying it might succeed. The MySQL connection might // fix itself, or the query could succeed on a different VtTablet. - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err) + return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) } // Connection error. Try to reconnect. @@ -78,7 +78,7 @@ func (dbc *DBConn) Exec(ctx context.Context, query string, maxrows int, wantfiel dbc.pool.checker.CheckMySQL() // Return the error of the reconnect and not the original connection error. // NOTE: We return a tryable error code here. - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, reconnectErr) + return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, reconnectErr) } // Reconnect succeeded. Retry query at second attempt. @@ -191,7 +191,7 @@ func (dbc *DBConn) Kill(reason string) error { if err != nil { log.Warningf("Failed to get conn from dba pool: %v", err) // TODO(aaijazi): Find the right error code for an internal error that we don't want to retry - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Failed to get conn from dba pool: %v", err) + return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Failed to get conn from dba pool: %v", err) } defer killConn.Recycle() sql := fmt.Sprintf("kill %d", dbc.conn.ID()) @@ -199,7 +199,7 @@ func (dbc *DBConn) Kill(reason string) error { if err != nil { log.Errorf("Could not kill query %s: %v", dbc.Current(), err) // TODO(aaijazi): Find the right error code for an internal error that we don't want to retry - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Could not kill query %s: %v", dbc.Current(), err) + return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Could not kill query %s: %v", dbc.Current(), err) } return nil } diff --git a/go/vt/tabletserver/engines/schema/schema_engine.go b/go/vt/tabletserver/engines/schema/schema_engine.go index e67d62ccc8c..f4002edfcba 100644 --- a/go/vt/tabletserver/engines/schema/schema_engine.go +++ b/go/vt/tabletserver/engines/schema/schema_engine.go @@ -96,7 +96,7 @@ func (se *Engine) Open(dbaParams *sqldb.ConnParams) error { conn, err := se.conns.Get(ctx) if err != nil { - return tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err) + return tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) } defer conn.Recycle() @@ -107,13 +107,13 @@ func (se *Engine) Open(dbaParams *sqldb.ConnParams) error { if se.strictMode.Get() { if err := conn.VerifyMode(); err != nil { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err.Error()) + return tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, err.Error()) } } tableData, err := conn.Exec(ctx, mysqlconn.BaseShowTables, maxTableCount, false) if err != nil { - return tabletenv.PrefixTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, err, "Could not get table list: ") + return tabletenv.PrefixTabletError(vtrpcpb.Code_INTERNAL, err, "Could not get table list: ") } tables := make(map[string]*Table, len(tableData.Rows)+1) @@ -155,7 +155,7 @@ func (se *Engine) Open(dbaParams *sqldb.ConnParams) error { // Fail if we can't load the schema for any tables, but we know that some tables exist. This points to a configuration problem. if len(tableData.Rows) != 0 && len(tables) == 1 { // len(tables) is always at least 1 because of the "dual" table - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNKNOWN_ERROR, "could not get schema for any tables") + return tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "could not get schema for any tables") } se.tables = tables se.lastChange = curTime @@ -198,7 +198,7 @@ func (se *Engine) Reload(ctx context.Context) error { curTime, tableData, err := func() (int64, *sqltypes.Result, error) { conn, err := se.conns.Get(ctx) if err != nil { - return 0, nil, tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err) + return 0, nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) } defer conn.Recycle() curTime, err := se.mysqlTime(ctx, conn) @@ -264,14 +264,14 @@ func (se *Engine) Reload(ctx context.Context) error { func (se *Engine) mysqlTime(ctx context.Context, conn *connpool.DBConn) (int64, error) { tm, err := conn.Exec(ctx, "select unix_timestamp()", 1, false) if err != nil { - return 0, tabletenv.PrefixTabletError(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err, "Could not get MySQL time: ") + return 0, tabletenv.PrefixTabletError(vtrpcpb.Code_UNKNOWN, err, "Could not get MySQL time: ") } if len(tm.Rows) != 1 || len(tm.Rows[0]) != 1 || tm.Rows[0][0].IsNull() { - return 0, tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNKNOWN_ERROR, "Unexpected result for MySQL time: %+v", tm.Rows) + return 0, tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "Unexpected result for MySQL time: %+v", tm.Rows) } t, err := strconv.ParseInt(tm.Rows[0][0].String(), 10, 64) if err != nil { - return 0, tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNKNOWN_ERROR, "Could not parse time %+v: %v", tm, err) + return 0, tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "Could not parse time %+v: %v", tm, err) } return t, nil } @@ -281,18 +281,18 @@ func (se *Engine) TableWasCreatedOrAltered(ctx context.Context, tableName string se.mu.Lock() defer se.mu.Unlock() if !se.isOpen { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "DDL called on closed schema") + return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "DDL called on closed schema") } conn, err := se.conns.Get(ctx) if err != nil { - return tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err) + return tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) } defer conn.Recycle() tableData, err := conn.Exec(ctx, mysqlconn.BaseShowTablesForTable(tableName), 1, false) if err != nil { tabletenv.InternalErrors.Add("Schema", 1) - return tabletenv.PrefixTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, err, + return tabletenv.PrefixTabletError(vtrpcpb.Code_INTERNAL, err, fmt.Sprintf("TableWasCreatedOrAltered: information_schema query failed for table %s: ", tableName)) } if len(tableData.Rows) != 1 { @@ -308,7 +308,7 @@ func (se *Engine) TableWasCreatedOrAltered(ctx context.Context, tableName string ) if err != nil { tabletenv.InternalErrors.Add("Schema", 1) - return tabletenv.PrefixTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, err, + return tabletenv.PrefixTabletError(vtrpcpb.Code_INTERNAL, err, fmt.Sprintf("TableWasCreatedOrAltered: failed to load table %s: ", tableName)) } // table_rows, data_length, index_length, max_data_length diff --git a/go/vt/tabletserver/messager_engine.go b/go/vt/tabletserver/messager_engine.go index f840d62a318..17abf17dbb7 100644 --- a/go/vt/tabletserver/messager_engine.go +++ b/go/vt/tabletserver/messager_engine.go @@ -77,7 +77,7 @@ func (me *MessagerEngine) Subscribe(name string, rcv *messageReceiver) error { defer me.mu.Unlock() mm := me.managers[name] if mm == nil { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "message table %s not found", name) + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "message table %s not found", name) } mm.Subscribe(rcv) return nil diff --git a/go/vt/tabletserver/query_engine.go b/go/vt/tabletserver/query_engine.go index 8f94175f5a4..bcfe4c92941 100644 --- a/go/vt/tabletserver/query_engine.go +++ b/go/vt/tabletserver/query_engine.go @@ -270,7 +270,7 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats } splan, err := planbuilder.GetExecPlan(sql, GetTable) if err != nil { - return nil, tabletenv.PrefixTabletError(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err, "") + return nil, tabletenv.PrefixTabletError(vtrpcpb.Code_UNKNOWN, err, "") } plan := &ExecPlan{ExecPlan: splan, Table: table} plan.Rules = qe.queryRuleSources.filterByPlan(sql, plan.PlanID, plan.TableName.String()) @@ -281,7 +281,7 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats } else { conn, err := qe.conns.Get(ctx) if err != nil { - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err) + return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) } defer conn.Recycle() @@ -290,7 +290,7 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats r, err := conn.Exec(ctx, sql, 1, true) logStats.AddRewrittenSQL(sql, start) if err != nil { - return nil, tabletenv.PrefixTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, err, "Error fetching fields: ") + return nil, tabletenv.PrefixTabletError(vtrpcpb.Code_INTERNAL, err, "Error fetching fields: ") } plan.Fields = r.Fields } @@ -316,7 +316,7 @@ func (qe *QueryEngine) GetStreamPlan(sql string) (*ExecPlan, error) { } splan, err := planbuilder.GetStreamExecPlan(sql, GetTable) if err != nil { - return nil, tabletenv.PrefixTabletError(vtrpcpb.ErrorCode_BAD_INPUT, err, "") + return nil, tabletenv.PrefixTabletError(vtrpcpb.Code_INVALID_ARGUMENT, err, "") } plan := &ExecPlan{ExecPlan: splan, Table: table} plan.Rules = qe.queryRuleSources.filterByPlan(sql, plan.PlanID, plan.TableName.String()) diff --git a/go/vt/tabletserver/query_executor.go b/go/vt/tabletserver/query_executor.go index 1e1fda1004d..0e62f3ad5f9 100644 --- a/go/vt/tabletserver/query_executor.go +++ b/go/vt/tabletserver/query_executor.go @@ -87,7 +87,7 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { switch qre.plan.PlanID { case planbuilder.PlanPassDML: if qre.tsv.qe.strictMode.Get() { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "DML too complex") + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "DML too complex") } return qre.txFetch(conn, qre.plan.FullQuery, qre.bindVars, nil, false, true) case planbuilder.PlanInsertPK: @@ -114,7 +114,7 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { case planbuilder.PlanPassSelect: return qre.execSelect() case planbuilder.PlanSelectLock: - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "Disallowed outside transaction") + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "Disallowed outside transaction") case planbuilder.PlanSet: return qre.execSet() case planbuilder.PlanOther: @@ -126,7 +126,7 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { return qre.execSQL(conn, qre.query, true) default: if !qre.tsv.qe.autoCommit.Get() { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "Disallowed outside transaction") + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "Disallowed outside transaction") } return qre.execDmlAutoCommit() } @@ -165,7 +165,7 @@ func (qre *QueryExecutor) execDmlAutoCommit() (reply *sqltypes.Result, err error switch qre.plan.PlanID { case planbuilder.PlanPassDML: if qre.tsv.qe.strictMode.Get() { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "DML too complex") + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "DML too complex") } reply, err = qre.txFetch(conn, qre.plan.FullQuery, qre.bindVars, nil, false, true) case planbuilder.PlanInsertPK: @@ -181,7 +181,7 @@ func (qre *QueryExecutor) execDmlAutoCommit() (reply *sqltypes.Result, err error case planbuilder.PlanUpsertPK: reply, err = qre.execUpsertPK(conn) default: - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "unsupported query: %s", qre.query) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported query: %s", qre.query) } return reply, err }) @@ -229,9 +229,9 @@ func (qre *QueryExecutor) checkPermissions() error { action, desc := qre.plan.Rules.getAction(remoteAddr, username, qre.bindVars) switch action { case QRFail: - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "Query disallowed due to rule: %s", desc) + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "Query disallowed due to rule: %s", desc) case QRFailRetry: - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, "Query disallowed due to rule: %s", desc) + return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "Query disallowed due to rule: %s", desc) } // Check for SuperUser calling directly to VTTablet (e.g. VTWorker) @@ -243,7 +243,7 @@ func (qre *QueryExecutor) checkPermissions() error { callerID := callerid.ImmediateCallerIDFromContext(qre.ctx) if callerID == nil { if qre.tsv.qe.strictTableACL { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY, "missing caller id") + return tabletenv.NewTabletError(vtrpcpb.Code_UNAUTHENTICATED, "missing caller id") } return nil } @@ -260,7 +260,7 @@ func (qre *QueryExecutor) checkPermissions() error { } if qre.plan.Authorized == nil { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY, "table acl error: nil acl") + return tabletenv.NewTabletError(vtrpcpb.Code_PERMISSION_DENIED, "table acl error: nil acl") } tableACLStatsKey := []string{ qre.plan.TableName.String(), @@ -279,7 +279,7 @@ func (qre *QueryExecutor) checkPermissions() error { errStr := fmt.Sprintf("table acl error: %q cannot run %v on table %q", callerID.Username, qre.plan.PlanID, qre.plan.TableName) tabletenv.TableaclDenied.Add(tableACLStatsKey, 1) qre.tsv.qe.accessCheckerLogger.Infof("%s", errStr) - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY, "%s", errStr) + return tabletenv.NewTabletError(vtrpcpb.Code_PERMISSION_DENIED, "%s", errStr) } return nil } @@ -290,7 +290,7 @@ func (qre *QueryExecutor) checkPermissions() error { func (qre *QueryExecutor) execDDL() (*sqltypes.Result, error) { ddlPlan := planbuilder.DDLParse(qre.query) if ddlPlan.Action == "" { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "DDL is not understood") + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "DDL is not understood") } conn, err := qre.tsv.te.txPool.LocalBegin(qre.ctx) @@ -464,7 +464,7 @@ func (qre *QueryExecutor) execInsertSubquery(conn *TxConnection) (*sqltypes.Resu return &sqltypes.Result{RowsAffected: 0}, nil } if len(qre.plan.ColumnNumbers) != len(innerRows[0]) { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "Subquery length does not match column list") + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "Subquery length does not match column list") } pkRows := make([][]sqltypes.Value, len(innerRows)) for i, innerRow := range innerRows { @@ -600,7 +600,7 @@ func (qre *QueryExecutor) getConn(pool *connpool.Pool) (*connpool.DBConn, error) case tabletenv.ErrConnPoolClosed: return nil, err } - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err) + return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) } func (qre *QueryExecutor) qFetch(logStats *tabletenv.LogStats, parsedQuery *sqlparser.ParsedQuery, bindVars map[string]interface{}) (*sqltypes.Result, error) { @@ -615,7 +615,7 @@ func (qre *QueryExecutor) qFetch(logStats *tabletenv.LogStats, parsedQuery *sqlp conn, err := qre.tsv.qe.conns.Get(qre.ctx) logStats.WaitingForConnection += time.Now().Sub(waitingForConnectionStart) if err != nil { - q.Err = tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err) + q.Err = tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) } else { defer conn.Recycle() q.Result, q.Err = qre.execSQL(conn, sql, false) @@ -671,7 +671,7 @@ func (qre *QueryExecutor) generateFinalSQL(parsedQuery *sqlparser.ParsedQuery, b bindVars["#maxLimit"] = qre.tsv.qe.maxResultSize.Get() + 1 sql, err := parsedQuery.GenerateQuery(bindVars) if err != nil { - return "", tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "%s", err) + return "", tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%s", err) } if buildStreamComment != nil { sql = append(sql, buildStreamComment...) @@ -696,7 +696,7 @@ func (qre *QueryExecutor) execStreamSQL(conn *connpool.DBConn, sql string, inclu qre.logStats.AddRewrittenSQL(sql, start) if err != nil { // MySQL error that isn't due to a connection issue - return tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err) + return tabletenv.NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, err) } return nil } diff --git a/go/vt/tabletserver/query_executor_test.go b/go/vt/tabletserver/query_executor_test.go index 6dbea06ed00..d2cb9cdca1a 100644 --- a/go/vt/tabletserver/query_executor_test.go +++ b/go/vt/tabletserver/query_executor_test.go @@ -93,8 +93,8 @@ func TestQueryExecutorPlanPassDmlStrictMode(t *testing.T) { if !ok { t.Fatalf("got: %v, want: a tabletenv.TabletError", tabletError) } - if tabletError.ErrorCode != vtrpcpb.ErrorCode_BAD_INPUT { - t.Fatalf("got: %s, want: BAD_INPUT", tabletError.ErrorCode) + if tabletError.Code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Fatalf("got: %s, want: BAD_INPUT", tabletError.Code) } } @@ -132,8 +132,8 @@ func TestQueryExecutorPlanPassDmlStrictModeAutoCommit(t *testing.T) { if !ok { t.Fatalf("got: %v, want: *tabletenv.TabletError", tabletError) } - if tabletError.ErrorCode != vtrpcpb.ErrorCode_BAD_INPUT { - t.Fatalf("got: %s, want: BAD_INPUT", tabletError.ErrorCode) + if tabletError.Code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Fatalf("got: %s, want: BAD_INPUT", tabletError.Code) } } @@ -690,8 +690,8 @@ func TestQueryExecutorPlanPassSelectWithLockOutsideATransaction(t *testing.T) { if !ok { t.Fatalf("got: %v, want: *tabletenv.TabletError", err) } - if got.ErrorCode != vtrpcpb.ErrorCode_BAD_INPUT { - t.Fatalf("got: %s, want: BAD_INPUT", got.ErrorCode) + if got.Code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Fatalf("got: %s, want: BAD_INPUT", got.Code) } } @@ -1035,8 +1035,8 @@ func TestQueryExecutorTableAclNoPermission(t *testing.T) { if !ok { t.Fatalf("got: %v, want: *tabletenv.TabletError", err) } - if tabletError.ErrorCode != vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY { - t.Fatalf("got: %s, want: PERMISSION_DENIED", tabletError.ErrorCode) + if tabletError.Code != vtrpcpb.Code_PERMISSION_DENIED { + t.Fatalf("got: %s, want: PERMISSION_DENIED", tabletError.Code) } } @@ -1089,8 +1089,8 @@ func TestQueryExecutorTableAclExemptACL(t *testing.T) { if !ok { t.Fatalf("got: %v, want: *tabletenv.TabletError", err) } - if tabletError.ErrorCode != vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY { - t.Fatalf("got: %s, want: PERMISSION_DENIED", tabletError.ErrorCode) + if tabletError.Code != vtrpcpb.Code_PERMISSION_DENIED { + t.Fatalf("got: %s, want: PERMISSION_DENIED", tabletError.Code) } if !strings.Contains(tabletError.Error(), "table acl error") { t.Fatalf("got %s, want tablet errorL table acl error", tabletError.Error()) @@ -1229,8 +1229,8 @@ func TestQueryExecutorBlacklistQRFail(t *testing.T) { if !ok { t.Fatalf("got: %v, want: *tabletenv.TabletError", err) } - if got.ErrorCode != vtrpcpb.ErrorCode_BAD_INPUT { - t.Fatalf("got: %s, want: BAD_INPUT", got.ErrorCode) + if got.Code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Fatalf("got: %s, want: BAD_INPUT", got.Code) } } @@ -1289,8 +1289,8 @@ func TestQueryExecutorBlacklistQRRetry(t *testing.T) { if !ok { t.Fatalf("got: %v, want: *tabletenv.TabletError", err) } - if got.ErrorCode != vtrpcpb.ErrorCode_QUERY_NOT_SERVED { - t.Fatalf("got: %s, want: QUERY_NOT_SERVED", got.ErrorCode) + if got.Code != vtrpcpb.Code_FAILED_PRECONDITION { + t.Fatalf("got: %s, want: QUERY_NOT_SERVED", got.Code) } } diff --git a/go/vt/tabletserver/query_rules.go b/go/vt/tabletserver/query_rules.go index 692195029dd..409e8b47397 100644 --- a/go/vt/tabletserver/query_rules.go +++ b/go/vt/tabletserver/query_rules.go @@ -97,7 +97,7 @@ func (qrs *QueryRules) UnmarshalJSON(data []byte) (err error) { // Ideally, we should have an error code that means "This isn't the query's // fault, but don't retry either, as this will be a global problem". // (true for all INTERNAL_ERRORS in query_rules) - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "%v", err) + return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "%v", err) } for _, ruleInfo := range rulesInfo { qr, err := BuildQueryRule(ruleInfo) @@ -333,7 +333,7 @@ func (qr *QueryRule) AddBindVarCond(name string, onAbsent, onMismatch bool, op O // Change the value to compiled regexp re, err := regexp.Compile(makeExact(v)) if err != nil { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "processing %s: %v", v, err) + return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "processing %s: %v", v, err) } converted = bvcre{re} } else { @@ -346,13 +346,13 @@ func (qr *QueryRule) AddBindVarCond(name string, onAbsent, onMismatch bool, op O b := bvcKeyRange(*v) converted = &b default: - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "type %T not allowed as condition operand (%v)", value, value) + return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "type %T not allowed as condition operand (%v)", value, value) } qr.bindVarConds = append(qr.bindVarConds, BindVarCond{name, onAbsent, onMismatch, op, converted}) return nil Error: - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "invalid operator %s for type %T (%v)", op, value, value) + return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "invalid operator %s for type %T (%v)", op, value, value) } // filterByPlan returns a new QueryRule if the query and planid match. @@ -885,7 +885,7 @@ func MapStrOperator(strop string) (op Operator, err error) { if op, ok := opmap[strop]; ok { return op, nil } - return QRNoOp, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "invalid Operator %s", strop) + return QRNoOp, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "invalid Operator %s", strop) } // BuildQueryRule builds a query rule from a ruleInfo. @@ -899,15 +899,15 @@ func BuildQueryRule(ruleInfo map[string]interface{}) (qr *QueryRule, err error) case "Name", "Description", "RequestIP", "User", "Query", "Action": sv, ok = v.(string) if !ok { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want string for %s", k) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for %s", k) } case "Plans", "BindVarConds", "TableNames": lv, ok = v.([]interface{}) if !ok { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want list for %s", k) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want list for %s", k) } default: - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "unrecognized tag %s", k) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "unrecognized tag %s", k) } switch k { case "Name": @@ -917,27 +917,27 @@ func BuildQueryRule(ruleInfo map[string]interface{}) (qr *QueryRule, err error) case "RequestIP": err = qr.SetIPCond(sv) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "could not set IP condition: %v", sv) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "could not set IP condition: %v", sv) } case "User": err = qr.SetUserCond(sv) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "could not set User condition: %v", sv) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "could not set User condition: %v", sv) } case "Query": err = qr.SetQueryCond(sv) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "could not set Query condition: %v", sv) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "could not set Query condition: %v", sv) } case "Plans": for _, p := range lv { pv, ok := p.(string) if !ok { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want string for Plans") + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for Plans") } pt, ok := planbuilder.PlanByName(pv) if !ok { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "invalid plan name: %s", pv) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "invalid plan name: %s", pv) } qr.AddPlanCond(pt) } @@ -945,7 +945,7 @@ func BuildQueryRule(ruleInfo map[string]interface{}) (qr *QueryRule, err error) for _, t := range lv { tableName, ok := t.(string) if !ok { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want string for TableNames") + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for TableNames") } qr.AddTableCond(tableName) } @@ -967,7 +967,7 @@ func BuildQueryRule(ruleInfo map[string]interface{}) (qr *QueryRule, err error) case "FAIL_RETRY": qr.act = QRFailRetry default: - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "invalid Action %s", sv) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "invalid Action %s", sv) } } } @@ -977,41 +977,41 @@ func BuildQueryRule(ruleInfo map[string]interface{}) (qr *QueryRule, err error) func buildBindVarCondition(bvc interface{}) (name string, onAbsent, onMismatch bool, op Operator, value interface{}, err error) { bvcinfo, ok := bvc.(map[string]interface{}) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want json object for bind var conditions") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want json object for bind var conditions") return } var v interface{} v, ok = bvcinfo["Name"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Name missing in BindVarConds") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Name missing in BindVarConds") return } name, ok = v.(string) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want string for Name in BindVarConds") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for Name in BindVarConds") return } v, ok = bvcinfo["OnAbsent"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "OnAbsent missing in BindVarConds") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "OnAbsent missing in BindVarConds") return } onAbsent, ok = v.(bool) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want bool for OnAbsent") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want bool for OnAbsent") return } v, ok = bvcinfo["Operator"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Operator missing in BindVarConds") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Operator missing in BindVarConds") return } strop, ok := v.(string) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want string for Operator") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for Operator") return } op, err = MapStrOperator(strop) @@ -1023,7 +1023,7 @@ func buildBindVarCondition(bvc interface{}) (name string, onAbsent, onMismatch b } v, ok = bvcinfo["Value"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Value missing in BindVarConds") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Value missing in BindVarConds") return } if op >= QREqual && op <= QRLessEqual { @@ -1034,50 +1034,50 @@ func buildBindVarCondition(bvc interface{}) (name string, onAbsent, onMismatch b // Maybe uint64 value, err = strconv.ParseUint(string(v), 10, 64) if err != nil { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want int64/uint64: %s", string(v)) + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want int64/uint64: %s", string(v)) return } } case string: value = v default: - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want string or number: %v", v) + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string or number: %v", v) return } } else if op == QRMatch || op == QRNoMatch { strvalue, ok := v.(string) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want string: %v", v) + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string: %v", v) return } value = strvalue } else if op == QRIn || op == QRNotIn { kr, ok := v.(map[string]interface{}) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want keyrange for Value") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want keyrange for Value") return } keyrange := &topodatapb.KeyRange{} strstart, ok := kr["Start"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Start missing in KeyRange") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Start missing in KeyRange") return } start, ok := strstart.(string) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want string for Start") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for Start") return } keyrange.Start = []byte(start) strend, ok := kr["End"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "End missing in KeyRange") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "End missing in KeyRange") return } end, ok := strend.(string) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want string for End") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for End") return } keyrange.End = []byte(end) @@ -1086,12 +1086,12 @@ func buildBindVarCondition(bvc interface{}) (name string, onAbsent, onMismatch b v, ok = bvcinfo["OnMismatch"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "OnMismatch missing in BindVarConds") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "OnMismatch missing in BindVarConds") return } onMismatch, ok = v.(bool) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "want bool for OnMismatch") + err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want bool for OnMismatch") return } return diff --git a/go/vt/tabletserver/query_rules_test.go b/go/vt/tabletserver/query_rules_test.go index dab9bd67b18..85e80b7f8c4 100644 --- a/go/vt/tabletserver/query_rules_test.go +++ b/go/vt/tabletserver/query_rules_test.go @@ -796,8 +796,8 @@ func TestInvalidJSON(t *testing.T) { if !ok { t.Fatalf("invalid json, should get a tablet error") } - if terr.ErrorCode != vtrpcpb.ErrorCode_INTERNAL_ERROR { - t.Fatalf("got: %v wanted: INTERNAL_ERROR", terr.ErrorCode) + if terr.Code != vtrpcpb.Code_INTERNAL { + t.Fatalf("got: %v wanted: INTERNAL_ERROR", terr.Code) } } diff --git a/go/vt/tabletserver/sandboxconn/sandboxconn.go b/go/vt/tabletserver/sandboxconn/sandboxconn.go index 94a6fde8895..3dad88a1414 100644 --- a/go/vt/tabletserver/sandboxconn/sandboxconn.go +++ b/go/vt/tabletserver/sandboxconn/sandboxconn.go @@ -104,21 +104,21 @@ func (sbc *SandboxConn) getError() error { sbc.MustFailRetry-- return &tabletconn.ServerError{ Err: "retry: err", - ServerCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED, + ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, } } if sbc.MustFailFatal > 0 { sbc.MustFailFatal-- return &tabletconn.ServerError{ Err: "fatal: err", - ServerCode: vtrpcpb.ErrorCode_INTERNAL_ERROR, + ServerCode: vtrpcpb.Code_INTERNAL, } } if sbc.MustFailServer > 0 { sbc.MustFailServer-- return &tabletconn.ServerError{ Err: "error: err", - ServerCode: vtrpcpb.ErrorCode_BAD_INPUT, + ServerCode: vtrpcpb.Code_INVALID_ARGUMENT, } } if sbc.MustFailConn > 0 { @@ -129,63 +129,63 @@ func (sbc *SandboxConn) getError() error { sbc.MustFailTxPool-- return &tabletconn.ServerError{ Err: "tx_pool_full: err", - ServerCode: vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, + ServerCode: vtrpcpb.Code_RESOURCE_EXHAUSTED, } } if sbc.MustFailNotTx > 0 { sbc.MustFailNotTx-- return &tabletconn.ServerError{ Err: "not_in_tx: err", - ServerCode: vtrpcpb.ErrorCode_NOT_IN_TX, + ServerCode: vtrpcpb.Code_ABORTED, } } if sbc.MustFailCanceled > 0 { sbc.MustFailCanceled-- return &tabletconn.ServerError{ Err: "canceled: err", - ServerCode: vtrpcpb.ErrorCode_CANCELLED_LEGACY, + ServerCode: vtrpcpb.Code_CANCELED, } } if sbc.MustFailUnknownError > 0 { sbc.MustFailUnknownError-- return &tabletconn.ServerError{ Err: "unknown error: err", - ServerCode: vtrpcpb.ErrorCode_UNKNOWN_ERROR, + ServerCode: vtrpcpb.Code_UNKNOWN, } } if sbc.MustFailDeadlineExceeded > 0 { sbc.MustFailDeadlineExceeded-- return &tabletconn.ServerError{ Err: "deadline exceeded: err", - ServerCode: vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, + ServerCode: vtrpcpb.Code_DEADLINE_EXCEEDED, } } if sbc.MustFailIntegrityError > 0 { sbc.MustFailIntegrityError-- return &tabletconn.ServerError{ Err: "integrity error: err", - ServerCode: vtrpcpb.ErrorCode_INTEGRITY_ERROR, + ServerCode: vtrpcpb.Code_ALREADY_EXISTS, } } if sbc.MustFailPermissionDenied > 0 { sbc.MustFailPermissionDenied-- return &tabletconn.ServerError{ Err: "permission denied: err", - ServerCode: vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY, + ServerCode: vtrpcpb.Code_PERMISSION_DENIED, } } if sbc.MustFailTransientError > 0 { sbc.MustFailTransientError-- return &tabletconn.ServerError{ Err: "transient error: err", - ServerCode: vtrpcpb.ErrorCode_TRANSIENT_ERROR, + ServerCode: vtrpcpb.Code_UNAVAILABLE, } } if sbc.MustFailUnauthenticated > 0 { sbc.MustFailUnauthenticated-- return &tabletconn.ServerError{ Err: "unauthenticated: err", - ServerCode: vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY, + ServerCode: vtrpcpb.Code_UNAUTHENTICATED, } } @@ -281,7 +281,7 @@ func (sbc *SandboxConn) Prepare(ctx context.Context, target *querypb.Target, tra sbc.MustFailPrepare-- return &tabletconn.ServerError{ Err: "error: err", - ServerCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED, + ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, } } return sbc.getError() @@ -294,7 +294,7 @@ func (sbc *SandboxConn) CommitPrepared(ctx context.Context, target *querypb.Targ sbc.MustFailCommitPrepared-- return &tabletconn.ServerError{ Err: "error: err", - ServerCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED, + ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, } } return sbc.getError() @@ -307,7 +307,7 @@ func (sbc *SandboxConn) RollbackPrepared(ctx context.Context, target *querypb.Ta sbc.MustFailRollbackPrepared-- return &tabletconn.ServerError{ Err: "error: err", - ServerCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED, + ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, } } return sbc.getError() @@ -320,7 +320,7 @@ func (sbc *SandboxConn) CreateTransaction(ctx context.Context, target *querypb.T sbc.MustFailCreateTransaction-- return &tabletconn.ServerError{ Err: "error: err", - ServerCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED, + ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, } } return sbc.getError() @@ -334,7 +334,7 @@ func (sbc *SandboxConn) StartCommit(ctx context.Context, target *querypb.Target, sbc.MustFailStartCommit-- return &tabletconn.ServerError{ Err: "error: err", - ServerCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED, + ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, } } return sbc.getError() @@ -348,7 +348,7 @@ func (sbc *SandboxConn) SetRollback(ctx context.Context, target *querypb.Target, sbc.MustFailSetRollback-- return &tabletconn.ServerError{ Err: "error: err", - ServerCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED, + ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, } } return sbc.getError() @@ -362,7 +362,7 @@ func (sbc *SandboxConn) ConcludeTransaction(ctx context.Context, target *querypb sbc.MustFailConcludeTransaction-- return &tabletconn.ServerError{ Err: "error: err", - ServerCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED, + ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, } } return sbc.getError() diff --git a/go/vt/tabletserver/tabletconn/grpc_error.go b/go/vt/tabletserver/tabletconn/grpc_error.go index 0be34dfafe3..1e21efdb97d 100644 --- a/go/vt/tabletserver/tabletconn/grpc_error.go +++ b/go/vt/tabletserver/tabletconn/grpc_error.go @@ -29,7 +29,7 @@ func TabletErrorFromGRPC(err error) error { // server side error, convert it return &ServerError{ Err: fmt.Sprintf("vttablet: %v", err), - ServerCode: vterrors.GRPCCodeToErrorCode(grpc.Code(err)), + ServerCode: vterrors.GRPCToCode(grpc.Code(err)), } } @@ -38,10 +38,14 @@ func TabletErrorFromRPCError(err *vtrpcpb.RPCError) error { if err == nil { return nil } + code := err.Code + if code == vtrpcpb.Code_OK { + code = vterrors.LegacyErrorCodeToCode(err.LegacyCode) + } // server side error, convert it return &ServerError{ Err: fmt.Sprintf("vttablet: %v", err), - ServerCode: err.LegacyCode, + ServerCode: code, } } diff --git a/go/vt/tabletserver/tabletconn/tablet_conn.go b/go/vt/tabletserver/tabletconn/tablet_conn.go index 95b38ca7c6e..fbcb30c0cc8 100644 --- a/go/vt/tabletserver/tabletconn/tablet_conn.go +++ b/go/vt/tabletserver/tabletconn/tablet_conn.go @@ -31,14 +31,14 @@ var ( type ServerError struct { Err string // ServerCode is the error code that we got from the server. - ServerCode vtrpcpb.ErrorCode + ServerCode vtrpcpb.Code } func (e *ServerError) Error() string { return e.Err } // VtErrorCode returns the underlying Vitess error code. // This makes ServerError implement vterrors.VtError. -func (e *ServerError) VtErrorCode() vtrpcpb.ErrorCode { return e.ServerCode } +func (e *ServerError) VtErrorCode() vtrpcpb.Code { return e.ServerCode } // OperationalError represents an error due to a failure to // communicate with vttablet. diff --git a/go/vt/tabletserver/tabletconntest/tabletconntest.go b/go/vt/tabletserver/tabletconntest/tabletconntest.go index 9aacf8c4175..28e745e4350 100644 --- a/go/vt/tabletserver/tabletconntest/tabletconntest.go +++ b/go/vt/tabletserver/tabletconntest/tabletconntest.go @@ -31,22 +31,22 @@ import ( func testErrorHelper(t *testing.T, f *FakeQueryService, name string, ef func(context.Context) error) { errors := []*tabletenv.TabletError{ // A few generic errors - tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "generic error"), - tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNKNOWN_ERROR, "uncaught panic"), - tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY, "missing caller id"), - tabletenv.NewTabletError(vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY, "table acl error: nil acl"), + tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "generic error"), + tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "uncaught panic"), + tabletenv.NewTabletError(vtrpcpb.Code_UNAUTHENTICATED, "missing caller id"), + tabletenv.NewTabletError(vtrpcpb.Code_PERMISSION_DENIED, "table acl error: nil acl"), // Client will retry on this specific error - tabletenv.NewTabletError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, "Query disallowed due to rule: %v", "cool rule"), + tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "Query disallowed due to rule: %v", "cool rule"), // Client may retry on another server on this specific error - tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Could not verify strict mode"), + tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Could not verify strict mode"), // This is usually transaction pool full - tabletenv.NewTabletError(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, "Transaction pool connection limit exceeded"), + tabletenv.NewTabletError(vtrpcpb.Code_RESOURCE_EXHAUSTED, "Transaction pool connection limit exceeded"), // Transaction expired or was unknown - tabletenv.NewTabletError(vtrpcpb.ErrorCode_NOT_IN_TX, "Transaction 12"), + tabletenv.NewTabletError(vtrpcpb.Code_ABORTED, "Transaction 12"), } for _, e := range errors { f.TabletError = e @@ -59,8 +59,8 @@ func testErrorHelper(t *testing.T, f *FakeQueryService, name string, ef func(con // First we check the recoverable vtrpc code is right. code := vterrors.RecoverVtErrorCode(err) - if code != e.ErrorCode { - t.Errorf("unexpected server code from %v: got %v, wanted %v", name, code, e.ErrorCode) + if code != e.Code { + t.Errorf("unexpected server code from %v: got %v, wanted %v", name, code, e.Code) } // Double-check we always get a ServerError, although diff --git a/go/vt/tabletserver/tabletenv/logstats_test.go b/go/vt/tabletserver/tabletenv/logstats_test.go index 9878e5ca502..cfab666bd28 100644 --- a/go/vt/tabletserver/tabletenv/logstats_test.go +++ b/go/vt/tabletserver/tabletenv/logstats_test.go @@ -107,8 +107,8 @@ func TestLogStatsErrorStr(t *testing.T) { } errStr := "unknown error" logStats.Error = &TabletError{ - ErrorCode: vtrpcpb.ErrorCode_UNKNOWN_ERROR, - Message: errStr, + Code: vtrpcpb.Code_UNKNOWN, + Message: errStr, } if !strings.Contains(logStats.ErrorStr(), errStr) { t.Fatalf("expect string '%s' in error message, but got: %s", errStr, logStats.ErrorStr()) diff --git a/go/vt/tabletserver/tabletenv/tablet_error.go b/go/vt/tabletserver/tabletenv/tablet_error.go index 8804a4d8c9e..eb7f8201047 100644 --- a/go/vt/tabletserver/tabletenv/tablet_error.go +++ b/go/vt/tabletserver/tabletenv/tablet_error.go @@ -27,7 +27,7 @@ const ( var ErrConnPoolClosed = NewTabletError( // connection pool being closed is not the query's fault, it can be retried on a // different VtTablet. - vtrpcpb.ErrorCode_INTERNAL_ERROR, + vtrpcpb.Code_INTERNAL, "connection pool is closed") // TabletError is the error type we use in this library. @@ -36,20 +36,20 @@ type TabletError struct { Message string SQLError int SQLState string - // ErrorCode will be used to transmit the error across RPC boundaries - ErrorCode vtrpcpb.ErrorCode + // Code will be used to transmit the error across RPC boundaries + Code vtrpcpb.Code } // NewTabletError returns a TabletError of the given type -func NewTabletError(errCode vtrpcpb.ErrorCode, format string, args ...interface{}) *TabletError { +func NewTabletError(errCode vtrpcpb.Code, format string, args ...interface{}) *TabletError { return &TabletError{ - Message: printable(fmt.Sprintf(format, args...)), - ErrorCode: errCode, + Message: printable(fmt.Sprintf(format, args...)), + Code: errCode, } } // NewTabletErrorSQL returns a TabletError based on the error -func NewTabletErrorSQL(errCode vtrpcpb.ErrorCode, err error) *TabletError { +func NewTabletErrorSQL(errCode vtrpcpb.Code, err error) *TabletError { var errnum int errstr := err.Error() sqlState := sqldb.SQLStateGeneral @@ -61,29 +61,29 @@ func NewTabletErrorSQL(errCode vtrpcpb.ErrorCode, err error) *TabletError { // Override error type if MySQL is in read-only mode. It's probably because // there was a remaster and there are old clients still connected. if strings.Contains(errstr, "read-only") { - errCode = vtrpcpb.ErrorCode_QUERY_NOT_SERVED + errCode = vtrpcpb.Code_FAILED_PRECONDITION } case mysqlconn.ERDupEntry: - errCode = vtrpcpb.ErrorCode_INTEGRITY_ERROR + errCode = vtrpcpb.Code_ALREADY_EXISTS case mysqlconn.ERDataTooLong, mysqlconn.ERDataOutOfRange: - errCode = vtrpcpb.ErrorCode_BAD_INPUT + errCode = vtrpcpb.Code_INVALID_ARGUMENT default: } } return &TabletError{ - Message: printable(errstr), - SQLError: errnum, - SQLState: sqlState, - ErrorCode: errCode, + Message: printable(errstr), + SQLError: errnum, + SQLState: sqlState, + Code: errCode, } } // PrefixTabletError attempts to add a string prefix to a TabletError, -// while preserving its ErrorCode. If the given error is not a -// TabletError, a new TabletError is returned with the desired ErrorCode. -func PrefixTabletError(errCode vtrpcpb.ErrorCode, err error, prefix string) error { +// while preserving its Code. If the given error is not a +// TabletError, a new TabletError is returned with the desired Code. +func PrefixTabletError(errCode vtrpcpb.Code, err error, prefix string) error { if terr, ok := err.(*TabletError); ok { - return NewTabletError(terr.ErrorCode, "%s%s", prefix, terr.Message) + return NewTabletError(terr.Code, "%s%s", prefix, terr.Message) } return NewTabletError(errCode, "%s%s", prefix, err) } @@ -132,21 +132,21 @@ func (te *TabletError) Error() string { } // VtErrorCode returns the underlying Vitess error code -func (te *TabletError) VtErrorCode() vtrpcpb.ErrorCode { - return te.ErrorCode +func (te *TabletError) VtErrorCode() vtrpcpb.Code { + return te.Code } // Prefix returns the prefix for the error, like error, fatal, etc. func (te *TabletError) Prefix() string { prefix := "error: " - switch te.ErrorCode { - case vtrpcpb.ErrorCode_QUERY_NOT_SERVED: + switch te.Code { + case vtrpcpb.Code_FAILED_PRECONDITION: prefix = "retry: " - case vtrpcpb.ErrorCode_INTERNAL_ERROR: + case vtrpcpb.Code_INTERNAL: prefix = "fatal: " - case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY: + case vtrpcpb.Code_RESOURCE_EXHAUSTED: prefix = "tx_pool_full: " - case vtrpcpb.ErrorCode_NOT_IN_TX: + case vtrpcpb.Code_ABORTED: prefix = "not_in_tx: " } // Special case for killed queries. @@ -158,14 +158,14 @@ func (te *TabletError) Prefix() string { // RecordStats will record the error in the proper stat bucket func (te *TabletError) RecordStats() { - switch te.ErrorCode { - case vtrpcpb.ErrorCode_QUERY_NOT_SERVED: + switch te.Code { + case vtrpcpb.Code_FAILED_PRECONDITION: InfoErrors.Add("Retry", 1) - case vtrpcpb.ErrorCode_INTERNAL_ERROR: + case vtrpcpb.Code_INTERNAL: ErrorStats.Add("Fatal", 1) - case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY: + case vtrpcpb.Code_RESOURCE_EXHAUSTED: ErrorStats.Add("TxPoolFull", 1) - case vtrpcpb.ErrorCode_NOT_IN_TX: + case vtrpcpb.Code_ABORTED: ErrorStats.Add("NotInTx", 1) default: switch te.SQLError { diff --git a/go/vt/tabletserver/tabletenv/tablet_error_test.go b/go/vt/tabletserver/tabletenv/tablet_error_test.go index b35ac3141b2..f007332b4ca 100644 --- a/go/vt/tabletserver/tabletenv/tablet_error_test.go +++ b/go/vt/tabletserver/tabletenv/tablet_error_test.go @@ -15,8 +15,8 @@ import ( ) func TestTabletErrorCode(t *testing.T) { - tErr := NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "error") - wantCode := vtrpcpb.ErrorCode_INTERNAL_ERROR + tErr := NewTabletError(vtrpcpb.Code_INTERNAL, "error") + wantCode := vtrpcpb.Code_INTERNAL code := tErr.VtErrorCode() if wantCode != code { t.Errorf("VtErrorCode() => %v, want %v", code, wantCode) @@ -25,27 +25,27 @@ func TestTabletErrorCode(t *testing.T) { func TestTabletErrorRetriableErrorTypeOverwrite(t *testing.T) { sqlErr := sqldb.NewSQLError(mysqlconn.EROptionPreventsStatement, mysqlconn.SSUnknownSQLState, "read-only") - tabletErr := NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, sqlErr) - if tabletErr.ErrorCode != vtrpcpb.ErrorCode_QUERY_NOT_SERVED { - t.Fatalf("got: %v wanted: QUERY_NOT_SERVED", tabletErr.ErrorCode) + tabletErr := NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqlErr) + if tabletErr.Code != vtrpcpb.Code_FAILED_PRECONDITION { + t.Fatalf("got: %v wanted: QUERY_NOT_SERVED", tabletErr.Code) } sqlErr = sqldb.NewSQLError(mysqlconn.ERDupEntry, mysqlconn.SSDupKey, "error") - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, sqlErr) - if tabletErr.ErrorCode != vtrpcpb.ErrorCode_INTEGRITY_ERROR { - t.Fatalf("got: %v wanted: INTEGRITY_ERROR", tabletErr.ErrorCode) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqlErr) + if tabletErr.Code != vtrpcpb.Code_ALREADY_EXISTS { + t.Fatalf("got: %v wanted: INTEGRITY_ERROR", tabletErr.Code) } sqlErr = sqldb.NewSQLError(mysqlconn.ERDataTooLong, mysqlconn.SSDataTooLong, "error") - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, sqlErr) - if tabletErr.ErrorCode != vtrpcpb.ErrorCode_BAD_INPUT { - t.Fatalf("got: %v wanted: BAD_INPUT", tabletErr.ErrorCode) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqlErr) + if tabletErr.Code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Fatalf("got: %v wanted: BAD_INPUT", tabletErr.Code) } sqlErr = sqldb.NewSQLError(mysqlconn.ERDataOutOfRange, mysqlconn.SSDataOutOfRange, "error") - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, sqlErr) - if tabletErr.ErrorCode != vtrpcpb.ErrorCode_BAD_INPUT { - t.Fatalf("got: %v wanted: BAD_INPUT", tabletErr.ErrorCode) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqlErr) + if tabletErr.Code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Fatalf("got: %v wanted: BAD_INPUT", tabletErr.Code) } } @@ -59,9 +59,9 @@ func TestTabletErrorMsgTooLong(t *testing.T) { } msg := string(buf) sqlErr := sqldb.NewSQLError(mysqlconn.ERDupEntry, mysqlconn.SSDupKey, msg) - tabletErr := NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, sqlErr) - if tabletErr.ErrorCode != vtrpcpb.ErrorCode_INTEGRITY_ERROR { - t.Fatalf("got %v wanted INTEGRITY_ERROR", tabletErr.ErrorCode) + tabletErr := NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqlErr) + if tabletErr.Code != vtrpcpb.Code_ALREADY_EXISTS { + t.Fatalf("got %v wanted INTEGRITY_ERROR", tabletErr.Code) } if tabletErr.Message != string(buf[:maxErrLen]) { t.Fatalf("message should be capped, only %d character will be shown", maxErrLen) @@ -69,15 +69,15 @@ func TestTabletErrorMsgTooLong(t *testing.T) { } func TestTabletErrorConnError(t *testing.T) { - tabletErr := NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, sqldb.NewSQLError(1999, "HY000", "test")) + tabletErr := NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqldb.NewSQLError(1999, "HY000", "test")) if IsConnErr(tabletErr) { t.Fatalf("tablet error: %v is not a connection error", tabletErr) } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, sqldb.NewSQLError(2000, mysqlconn.SSUnknownSQLState, "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqldb.NewSQLError(2000, mysqlconn.SSUnknownSQLState, "test")) if !IsConnErr(tabletErr) { t.Fatalf("tablet error: %v is a connection error", tabletErr) } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, sqldb.NewSQLError(mysqlconn.CRServerLost, mysqlconn.SSUnknownSQLState, "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqldb.NewSQLError(mysqlconn.CRServerLost, mysqlconn.SSUnknownSQLState, "test")) if IsConnErr(tabletErr) { t.Fatalf("tablet error: %v is not a connection error", tabletErr) } @@ -106,26 +106,26 @@ func TestTabletErrorConnError(t *testing.T) { } func TestTabletErrorPrefix(t *testing.T) { - tabletErr := NewTabletErrorSQL(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, sqldb.NewSQLError(2000, "HY000", "test")) + tabletErr := NewTabletErrorSQL(vtrpcpb.Code_FAILED_PRECONDITION, sqldb.NewSQLError(2000, "HY000", "test")) if tabletErr.Prefix() != "retry: " { t.Fatalf("tablet error with error code: QUERY_NOT_SERVED should has prefix: 'retry: '") } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, sqldb.NewSQLError(2000, "HY000", "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqldb.NewSQLError(2000, "HY000", "test")) if tabletErr.Prefix() != "fatal: " { t.Fatalf("tablet error with error code: INTERNAL_ERROR should has prefix: 'fatal: '") } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, sqldb.NewSQLError(2000, "HY000", "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_RESOURCE_EXHAUSTED, sqldb.NewSQLError(2000, "HY000", "test")) if tabletErr.Prefix() != "tx_pool_full: " { t.Fatalf("tablet error with error code: RESOURCE_EXHAUSTED should has prefix: 'tx_pool_full: '") } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_NOT_IN_TX, sqldb.NewSQLError(2000, "HY000", "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_ABORTED, sqldb.NewSQLError(2000, "HY000", "test")) if tabletErr.Prefix() != "not_in_tx: " { t.Fatalf("tablet error with error code: NOT_IN_TX should has prefix: 'not_in_tx: '") } } func TestTabletErrorRecordStats(t *testing.T) { - tabletErr := NewTabletErrorSQL(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, sqldb.NewSQLError(2000, "HY000", "test")) + tabletErr := NewTabletErrorSQL(vtrpcpb.Code_FAILED_PRECONDITION, sqldb.NewSQLError(2000, "HY000", "test")) retryCounterBefore := InfoErrors.Counts()["Retry"] tabletErr.RecordStats() retryCounterAfter := InfoErrors.Counts()["Retry"] @@ -133,7 +133,7 @@ func TestTabletErrorRecordStats(t *testing.T) { t.Fatalf("tablet error with error code QUERY_NOT_SERVED should increase Retry error count by 1") } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, sqldb.NewSQLError(2000, "HY000", "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqldb.NewSQLError(2000, "HY000", "test")) fatalCounterBefore := ErrorStats.Counts()["Fatal"] tabletErr.RecordStats() fatalCounterAfter := ErrorStats.Counts()["Fatal"] @@ -141,7 +141,7 @@ func TestTabletErrorRecordStats(t *testing.T) { t.Fatalf("tablet error with error code INTERNAL_ERROR should increase Fatal error count by 1") } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, sqldb.NewSQLError(2000, "HY000", "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_RESOURCE_EXHAUSTED, sqldb.NewSQLError(2000, "HY000", "test")) txPoolFullCounterBefore := ErrorStats.Counts()["TxPoolFull"] tabletErr.RecordStats() txPoolFullCounterAfter := ErrorStats.Counts()["TxPoolFull"] @@ -149,7 +149,7 @@ func TestTabletErrorRecordStats(t *testing.T) { t.Fatalf("tablet error with error code RESOURCE_EXHAUSTED should increase TxPoolFull error count by 1") } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_NOT_IN_TX, sqldb.NewSQLError(2000, "HY000", "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_ABORTED, sqldb.NewSQLError(2000, "HY000", "test")) notInTxCounterBefore := ErrorStats.Counts()["NotInTx"] tabletErr.RecordStats() notInTxCounterAfter := ErrorStats.Counts()["NotInTx"] @@ -157,7 +157,7 @@ func TestTabletErrorRecordStats(t *testing.T) { t.Fatalf("tablet error with error code NOT_IN_TX should increase NotInTx error count by 1") } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, sqldb.NewSQLError(mysqlconn.ERDupEntry, mysqlconn.SSDupKey, "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, sqldb.NewSQLError(mysqlconn.ERDupEntry, mysqlconn.SSDupKey, "test")) dupKeyCounterBefore := InfoErrors.Counts()["DupKey"] tabletErr.RecordStats() dupKeyCounterAfter := InfoErrors.Counts()["DupKey"] @@ -165,7 +165,7 @@ func TestTabletErrorRecordStats(t *testing.T) { t.Fatalf("sql error with SQL error mysqlconn.ERDupEntry should increase DupKey error count by 1") } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, sqldb.NewSQLError(mysqlconn.ERLockWaitTimeout, mysqlconn.SSUnknownSQLState, "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, sqldb.NewSQLError(mysqlconn.ERLockWaitTimeout, mysqlconn.SSUnknownSQLState, "test")) lockWaitTimeoutCounterBefore := ErrorStats.Counts()["Deadlock"] tabletErr.RecordStats() lockWaitTimeoutCounterAfter := ErrorStats.Counts()["Deadlock"] @@ -173,7 +173,7 @@ func TestTabletErrorRecordStats(t *testing.T) { t.Fatalf("sql error with SQL error mysqlconn.ERLockWaitTimeout should increase Deadlock error count by 1") } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, sqldb.NewSQLError(mysqlconn.ERLockDeadlock, mysqlconn.SSLockDeadlock, "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, sqldb.NewSQLError(mysqlconn.ERLockDeadlock, mysqlconn.SSLockDeadlock, "test")) deadlockCounterBefore := ErrorStats.Counts()["Deadlock"] tabletErr.RecordStats() deadlockCounterAfter := ErrorStats.Counts()["Deadlock"] @@ -181,7 +181,7 @@ func TestTabletErrorRecordStats(t *testing.T) { t.Fatalf("sql error with SQL error mysqlconn.ERLockDeadlock should increase Deadlock error count by 1") } - tabletErr = NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, sqldb.NewSQLError(mysqlconn.EROptionPreventsStatement, mysqlconn.SSUnknownSQLState, "test")) + tabletErr = NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, sqldb.NewSQLError(mysqlconn.EROptionPreventsStatement, mysqlconn.SSUnknownSQLState, "test")) failCounterBefore := ErrorStats.Counts()["Fail"] tabletErr.RecordStats() failCounterAfter := ErrorStats.Counts()["Fail"] @@ -203,7 +203,7 @@ func TestTabletErrorLogUncaughtErr(t *testing.T) { } func TestTabletErrorTxPoolFull(t *testing.T) { - tabletErr := NewTabletErrorSQL(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, sqldb.NewSQLError(1000, "HY000", "test")) + tabletErr := NewTabletErrorSQL(vtrpcpb.Code_RESOURCE_EXHAUSTED, sqldb.NewSQLError(1000, "HY000", "test")) defer func() { err := recover() if err != nil { @@ -215,7 +215,7 @@ func TestTabletErrorTxPoolFull(t *testing.T) { } func TestTabletErrorFatal(t *testing.T) { - tabletErr := NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, sqldb.NewSQLError(1000, "HY000", "test")) + tabletErr := NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqldb.NewSQLError(1000, "HY000", "test")) defer func() { err := recover() if err != nil { diff --git a/go/vt/tabletserver/tabletserver.go b/go/vt/tabletserver/tabletserver.go index 218a021a021..9c787f1b9a0 100644 --- a/go/vt/tabletserver/tabletserver.go +++ b/go/vt/tabletserver/tabletserver.go @@ -259,7 +259,7 @@ func (tsv *TabletServer) InitDBConfig(target querypb.Target, dbconfigs dbconfigs tsv.mu.Lock() defer tsv.mu.Unlock() if tsv.state != StateNotConnected { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNKNOWN_ERROR, "InitDBConfig failed, current state: %s", stateName[tsv.state]) + return tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "InitDBConfig failed, current state: %s", stateName[tsv.state]) } tsv.target = target tsv.dbconfigs = dbconfigs @@ -377,7 +377,7 @@ func (tsv *TabletServer) decideAction(tabletType topodatapb.TabletType, serving tsv.setState(StateTransitioning) return actionServeNewType, nil case StateTransitioning, StateShuttingDown: - return actionNone, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "cannot SetServingType, current state: %s", stateName[tsv.state]) + return actionNone, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "cannot SetServingType, current state: %s", stateName[tsv.state]) default: panic("unreachable") } @@ -596,7 +596,7 @@ func (tsv *TabletServer) Begin(ctx context.Context, target *querypb.Target) (tra func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tabletenv.QueryStats.Record("BEGIN", time.Now()) if tsv.txThrottler.Throttle() { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_TRANSIENT_ERROR, "Transaction throttled") + return tabletenv.NewTabletError(vtrpcpb.Code_UNAVAILABLE, "Transaction throttled") } transactionID, err = tsv.te.txPool.Begin(ctx) logStats.TransactionID = transactionID @@ -857,10 +857,10 @@ func (tsv *TabletServer) StreamExecute(ctx context.Context, target *querypb.Targ // transaction. If AsTransaction is true, TransactionId must be 0. func (tsv *TabletServer) ExecuteBatch(ctx context.Context, target *querypb.Target, queries []querytypes.BoundQuery, asTransaction bool, transactionID int64, options *querypb.ExecuteOptions) (results []sqltypes.Result, err error) { if len(queries) == 0 { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "Empty query list") + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "Empty query list") } if asTransaction && transactionID != 0 { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "cannot start a new transaction in the scope of an existing one") + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "cannot start a new transaction in the scope of an existing one") } allowOnShutdown := (transactionID != 0) @@ -955,7 +955,7 @@ func (tsv *TabletServer) MessageAck(ctx context.Context, target *querypb.Target, for _, val := range ids { v, err := sqltypes.BuildConverted(val.Type, val.Value) if err != nil { - return 0, tsv.handleError("message_ack", nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "invalid type: %v", err), nil) + return 0, tsv.handleError("message_ack", nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "invalid type: %v", err), nil) } sids = append(sids, v.String()) } @@ -989,7 +989,7 @@ func (tsv *TabletServer) execDML(ctx context.Context, target *querypb.Target, qu query, bv, err := queryGenerator() if err != nil { - return 0, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "%v", err) + return 0, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } transactionID, err := tsv.Begin(ctx, target) @@ -1121,7 +1121,7 @@ func (tsv *TabletServer) handlePanicAndSendLogStats( x, tb.Stack(4) /* Skip the last 4 boiler-plate frames. */) log.Errorf(errorMessage) - terr := tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNKNOWN_ERROR, "%s", errorMessage) + terr := tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "%s", errorMessage) *err = terr tabletenv.InternalErrors.Add("Panic", 1) if logStats != nil { @@ -1147,7 +1147,7 @@ func (tsv *TabletServer) handleError( }() terr, ok := err.(*tabletenv.TabletError) if !ok { - terr = tabletenv.NewTabletError(vtrpcpb.ErrorCode_UNKNOWN_ERROR, "%v", err) + terr = tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "%v", err) // We only want to see TabletError here. tabletenv.InternalErrors.Add("UnknownError", 1) } @@ -1172,10 +1172,10 @@ func (tsv *TabletServer) handleError( default: // Non-whitelisted error. Strip the error message. myError = &tabletenv.TabletError{ - SQLError: terr.SQLError, - SQLState: terr.SQLState, - ErrorCode: terr.ErrorCode, - Message: fmt.Sprintf("(errno %d) (sqlstate %s) during query: %s", terr.SQLError, terr.SQLState, sql), + SQLError: terr.SQLError, + SQLState: terr.SQLState, + Code: terr.Code, + Message: fmt.Sprintf("(errno %d) (sqlstate %s) during query: %s", terr.SQLError, terr.SQLState, sql), } } } else { @@ -1186,14 +1186,14 @@ func (tsv *TabletServer) handleError( logMethod := log.Infof // Suppress or demote some errors in logs. - switch terr.ErrorCode { - case vtrpcpb.ErrorCode_QUERY_NOT_SERVED: + switch terr.Code { + case vtrpcpb.Code_FAILED_PRECONDITION: return myError - case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY: + case vtrpcpb.Code_RESOURCE_EXHAUSTED: logMethod = logTxPoolFull.Errorf - case vtrpcpb.ErrorCode_INTERNAL_ERROR: + case vtrpcpb.Code_INTERNAL: logMethod = log.Errorf - case vtrpcpb.ErrorCode_NOT_IN_TX: + case vtrpcpb.Code_ABORTED: logMethod = log.Warningf default: // We want to suppress/demote some MySQL error codes. @@ -1232,20 +1232,20 @@ func validateSplitQueryParameters( // Since we're called by VTGate this should not normally be violated. if target.TabletType != topodatapb.TabletType_RDONLY { return tabletenv.NewTabletError( - vtrpcpb.ErrorCode_BAD_INPUT, + vtrpcpb.Code_INVALID_ARGUMENT, "SplitQuery must be called with a RDONLY tablet. TableType passed is: %v", target.TabletType) } if numRowsPerQueryPart < 0 { return tabletenv.NewTabletError( - vtrpcpb.ErrorCode_BAD_INPUT, + vtrpcpb.Code_INVALID_ARGUMENT, "splitQuery: numRowsPerQueryPart must be non-negative. Got: %v. SQL: %v", numRowsPerQueryPart, querytypes.QueryAsString(query.Sql, query.BindVariables)) } if splitCount < 0 { return tabletenv.NewTabletError( - vtrpcpb.ErrorCode_BAD_INPUT, + vtrpcpb.Code_INVALID_ARGUMENT, "splitQuery: splitCount must be non-negative. Got: %v. SQL: %v", splitCount, querytypes.QueryAsString(query.Sql, query.BindVariables)) @@ -1253,7 +1253,7 @@ func validateSplitQueryParameters( if (splitCount == 0 && numRowsPerQueryPart == 0) || (splitCount != 0 && numRowsPerQueryPart != 0) { return tabletenv.NewTabletError( - vtrpcpb.ErrorCode_BAD_INPUT, + vtrpcpb.Code_INVALID_ARGUMENT, "splitQuery: exactly one of {numRowsPerQueryPart, splitCount} must be"+ " non zero. Got: numRowsPerQueryPart=%v, splitCount=%v. SQL: %v", numRowsPerQueryPart, @@ -1263,7 +1263,7 @@ func validateSplitQueryParameters( if algorithm != querypb.SplitQueryRequest_EQUAL_SPLITS && algorithm != querypb.SplitQueryRequest_FULL_SCAN { return tabletenv.NewTabletError( - vtrpcpb.ErrorCode_BAD_INPUT, + vtrpcpb.Code_INVALID_ARGUMENT, "splitquery: unsupported algorithm: %v. SQL: %v", algorithm, querytypes.QueryAsString(query.Sql, query.BindVariables)) @@ -1374,7 +1374,7 @@ func splitQueryToTabletError(err error) error { if err == nil { return nil } - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "splitquery: %v", err) + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "splitquery: %v", err) } // StreamHealth streams the health status to callback. @@ -1462,11 +1462,11 @@ func (tsv *TabletServer) UpdateStream(ctx context.Context, target *querypb.Targe if position != "" { p, err = replication.DecodePosition(position) if err != nil { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "cannot parse position: %v", err) + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "cannot parse position: %v", err) } } } else if position != "" { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "at most one of position and timestamp should be specified") + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "at most one of position and timestamp should be specified") } // Validate proper target is used. @@ -1486,11 +1486,11 @@ func (tsv *TabletServer) UpdateStream(ctx context.Context, target *querypb.Targe err = s.Stream(streamCtx) switch err { case mysqlctl.ErrBinlogUnavailable: - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, "%v", err) + return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "%v", err) case nil, io.EOF: return nil default: - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "%v", err) + return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "%v", err) } } @@ -1523,19 +1523,19 @@ func (tsv *TabletServer) startRequest(ctx context.Context, target *querypb.Targe if allowOnShutdown && tsv.state == StateShuttingDown { goto verifyTarget } - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, "operation not allowed in state %s", stateName[tsv.state]) + return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "operation not allowed in state %s", stateName[tsv.state]) verifyTarget: if target != nil { // a valid target needs to be used if target.Keyspace != tsv.target.Keyspace { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, "Invalid keyspace %v", target.Keyspace) + return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "Invalid keyspace %v", target.Keyspace) } if target.Shard != tsv.target.Shard { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, "Invalid shard %v", target.Shard) + return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "Invalid shard %v", target.Shard) } if isTx && tsv.target.TabletType != topodatapb.TabletType_MASTER { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, "transactional statement disallowed on non-master tablet: %v", tsv.target.TabletType) + return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "transactional statement disallowed on non-master tablet: %v", tsv.target.TabletType) } if target.TabletType != tsv.target.TabletType { for _, otherType := range tsv.alsoAllow { @@ -1543,10 +1543,10 @@ verifyTarget: goto ok } } - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, "Invalid tablet type: %v, want: %v or %v", target.TabletType, tsv.target.TabletType, tsv.alsoAllow) + return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "Invalid tablet type: %v, want: %v or %v", target.TabletType, tsv.target.TabletType, tsv.alsoAllow) } } else if !tabletenv.IsLocalContext(ctx) { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, "No target") + return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "No target") } ok: diff --git a/go/vt/tabletserver/tabletserver_test.go b/go/vt/tabletserver/tabletserver_test.go index cf31d851828..e64f00e11e3 100644 --- a/go/vt/tabletserver/tabletserver_test.go +++ b/go/vt/tabletserver/tabletserver_test.go @@ -355,7 +355,7 @@ func TestTabletServerAllSchemaFailure(t *testing.T) { err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) defer tsv.StopService() // tabletsever shouldn't start if it can't access schema for any tables - testUtils.checkTabletError(t, err, vtrpcpb.ErrorCode_UNKNOWN_ERROR, "could not get schema for any tables") + testUtils.checkTabletError(t, err, vtrpcpb.Code_UNKNOWN, "could not get schema for any tables") } func TestTabletServerCheckMysql(t *testing.T) { @@ -1800,7 +1800,7 @@ func TestHandleExecTabletError(t *testing.T) { err := tsv.handleError( "select * from test_table", nil, - tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "tablet error"), + tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "tablet error"), nil, ) want := "fatal: tablet error" @@ -1817,7 +1817,7 @@ func TestTerseErrorsNonSQLError(t *testing.T) { err := tsv.handleError( "select * from test_table", nil, - tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "tablet error"), + tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "tablet error"), nil, ) want := "fatal: tablet error" @@ -1835,10 +1835,10 @@ func TestTerseErrorsBindVars(t *testing.T) { "select * from test_table", map[string]interface{}{"a": 1}, &tabletenv.TabletError{ - ErrorCode: vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, - Message: "msg", - SQLError: 10, - SQLState: "HY000", + Code: vtrpcpb.Code_DEADLINE_EXCEEDED, + Message: "msg", + SQLError: 10, + SQLState: "HY000", }, nil, ) @@ -1853,7 +1853,7 @@ func TestTerseErrorsNoBindVars(t *testing.T) { config := testUtils.newQueryServiceConfig() config.TerseErrors = true tsv := NewTabletServer(config) - err := tsv.handleError("", nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, "msg"), nil) + err := tsv.handleError("", nil, tabletenv.NewTabletError(vtrpcpb.Code_DEADLINE_EXCEEDED, "msg"), nil) want := "error: msg" if err == nil || err.Error() != want { t.Errorf("Error: %v, want '%s'", err, want) @@ -1869,10 +1869,10 @@ func TestTerseErrorsIgnoreFailoverInProgress(t *testing.T) { err := tsv.handleError("select * from test_table where id = :a", map[string]interface{}{"a": 1}, &tabletenv.TabletError{ - ErrorCode: vtrpcpb.ErrorCode_INTERNAL_ERROR, - Message: "failover in progress (errno 1227) (sqlstate 42000)", - SQLError: 1227, - SQLState: "42000", + Code: vtrpcpb.Code_INTERNAL, + Message: "failover in progress (errno 1227) (sqlstate 42000)", + SQLError: 1227, + SQLState: "42000", }, nil /* logStats */) if got, want := err.Error(), "fatal: failover in progress (errno 1227) (sqlstate 42000)"; got != want { diff --git a/go/vt/tabletserver/testutils_test.go b/go/vt/tabletserver/testutils_test.go index 7845c6ea10a..eccc9461851 100644 --- a/go/vt/tabletserver/testutils_test.go +++ b/go/vt/tabletserver/testutils_test.go @@ -41,13 +41,13 @@ func (util *testUtils) checkEqual(t *testing.T, expected interface{}, result int } } -func (util *testUtils) checkTabletError(t *testing.T, err interface{}, tabletErrCode vtrpcpb.ErrorCode, tabletErrStr string) { +func (util *testUtils) checkTabletError(t *testing.T, err interface{}, tabletErrCode vtrpcpb.Code, tabletErrStr string) { tabletError, ok := err.(*tabletenv.TabletError) if !ok { t.Fatalf("should return a TabletError, but got err: %v", err) } - if tabletError.ErrorCode != tabletErrCode { - t.Fatalf("got a TabletError with error code %s but wanted: %s", tabletError.ErrorCode, tabletErrCode) + if tabletError.Code != tabletErrCode { + t.Fatalf("got a TabletError with error code %s but wanted: %s", tabletError.Code, tabletErrCode) } if !strings.Contains(tabletError.Error(), tabletErrStr) { t.Fatalf("expect the tablet error should contain string: '%s', but it does not. Got tablet error: '%s'", tabletErrStr, tabletError.Error()) diff --git a/go/vt/tabletserver/twopc.go b/go/vt/tabletserver/twopc.go index 621b8cca3df..65a6b2ec331 100644 --- a/go/vt/tabletserver/twopc.go +++ b/go/vt/tabletserver/twopc.go @@ -137,7 +137,7 @@ func (tpc *TwoPC) Init(sidecarDBName string, dbaparams *sqldb.ConnParams) error } for _, s := range statements { if _, err := conn.ExecuteFetch(s, 0, false); err != nil { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, err.Error()) + return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, err.Error()) } } tpc.insertRedoTx = buildParsedQuery( @@ -368,7 +368,7 @@ func (tpc *TwoPC) Transition(ctx context.Context, conn *TxConnection, dtid strin return err } if qr.RowsAffected != 1 { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "could not transition to %v: %s", state, dtid) + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "could not transition to %v: %s", state, dtid) } return nil } diff --git a/go/vt/tabletserver/tx_executor.go b/go/vt/tabletserver/tx_executor.go index 09f08bc1cdf..c069c3fc1f2 100644 --- a/go/vt/tabletserver/tx_executor.go +++ b/go/vt/tabletserver/tx_executor.go @@ -32,7 +32,7 @@ type TxExecutor struct { // protocol, will perform all the cleanup. func (txe *TxExecutor) Prepare(transactionID int64, dtid string) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "2pc is not enabled") + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("PREPARE", time.Now()) txe.logStats.TransactionID = transactionID @@ -51,7 +51,7 @@ func (txe *TxExecutor) Prepare(transactionID int64, dtid string) error { err = txe.te.preparedPool.Put(conn, dtid) if err != nil { txe.te.txPool.localRollback(txe.ctx, conn) - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, "prepare failed for transaction %d: %v", transactionID, err) + return tabletenv.NewTabletError(vtrpcpb.Code_RESOURCE_EXHAUSTED, "prepare failed for transaction %d: %v", transactionID, err) } localConn, err := txe.te.txPool.LocalBegin(txe.ctx) @@ -78,12 +78,12 @@ func (txe *TxExecutor) Prepare(transactionID int64, dtid string) error { // marked as failed in the redo log. func (txe *TxExecutor) CommitPrepared(dtid string) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "2pc is not enabled") + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("COMMIT_PREPARED", time.Now()) conn, err := txe.te.preparedPool.FetchForCommit(dtid) if err != nil { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "cannot commit dtid %s, state: %v", dtid, err) + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "cannot commit dtid %s, state: %v", dtid, err) } if conn == nil { return nil @@ -153,7 +153,7 @@ func (txe *TxExecutor) markFailed(ctx context.Context, dtid string) { // killer will be the one to eventually roll it back. func (txe *TxExecutor) RollbackPrepared(dtid string, originalID int64) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "2pc is not enabled") + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("ROLLBACK_PREPARED", time.Now()) conn, err := txe.te.txPool.LocalBegin(txe.ctx) @@ -183,7 +183,7 @@ returnConn: // CreateTransaction creates the metadata for a 2PC transaction. func (txe *TxExecutor) CreateTransaction(dtid string, participants []*querypb.Target) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "2pc is not enabled") + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("CREATE_TRANSACTION", time.Now()) conn, err := txe.te.txPool.LocalBegin(txe.ctx) @@ -203,7 +203,7 @@ func (txe *TxExecutor) CreateTransaction(dtid string, participants []*querypb.Ta // decision to commit the associated 2pc transaction. func (txe *TxExecutor) StartCommit(transactionID int64, dtid string) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "2pc is not enabled") + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("START_COMMIT", time.Now()) txe.logStats.TransactionID = transactionID @@ -225,7 +225,7 @@ func (txe *TxExecutor) StartCommit(transactionID int64, dtid string) error { // If a transaction id is provided, that transaction is also rolled back. func (txe *TxExecutor) SetRollback(dtid string, transactionID int64) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "2pc is not enabled") + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("SET_ROLLBACK", time.Now()) txe.logStats.TransactionID = transactionID @@ -257,7 +257,7 @@ func (txe *TxExecutor) SetRollback(dtid string, transactionID int64) error { // essentially resolving it. func (txe *TxExecutor) ConcludeTransaction(dtid string) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "2pc is not enabled") + return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("RESOLVE", time.Now()) @@ -277,7 +277,7 @@ func (txe *TxExecutor) ConcludeTransaction(dtid string) error { // ReadTransaction returns the metadata for the sepcified dtid. func (txe *TxExecutor) ReadTransaction(dtid string) (*querypb.TransactionMetadata, error) { if !txe.te.twopcEnabled { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "2pc is not enabled") + return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } return txe.te.twoPC.ReadTransaction(txe.ctx, dtid) } @@ -285,15 +285,15 @@ func (txe *TxExecutor) ReadTransaction(dtid string) (*querypb.TransactionMetadat // ReadTwopcInflight returns info about all in-flight 2pc transactions. func (txe *TxExecutor) ReadTwopcInflight() (distributed []*DistributedTx, prepared, failed []*PreparedTx, err error) { if !txe.te.twopcEnabled { - return nil, nil, nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "2pc is not enabled") + return nil, nil, nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } prepared, failed, err = txe.te.twoPC.ReadAllRedo(txe.ctx) if err != nil { - return nil, nil, nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Could not read redo: %v", err) + return nil, nil, nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Could not read redo: %v", err) } distributed, err = txe.te.twoPC.ReadAllTransactions(txe.ctx) if err != nil { - return nil, nil, nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Could not read redo: %v", err) + return nil, nil, nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Could not read redo: %v", err) } return distributed, prepared, failed, nil } diff --git a/go/vt/tabletserver/tx_pool.go b/go/vt/tabletserver/tx_pool.go index cea1eb06680..26fcd2eea64 100644 --- a/go/vt/tabletserver/tx_pool.go +++ b/go/vt/tabletserver/tx_pool.go @@ -146,9 +146,9 @@ func (axp *TxPool) Begin(ctx context.Context) (int64, error) { return 0, err case pools.ErrTimeout: axp.LogActive() - return 0, tabletenv.NewTabletError(vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, "Transaction pool connection limit exceeded") + return 0, tabletenv.NewTabletError(vtrpcpb.Code_RESOURCE_EXHAUSTED, "Transaction pool connection limit exceeded") } - return 0, tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err) + return 0, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) } if _, err := conn.Exec(ctx, "begin", 1, false); err != nil { conn.Recycle() @@ -157,7 +157,7 @@ func (axp *TxPool) Begin(ctx context.Context) (int64, error) { // TabletError and instead preserve the error code. return 0, err } - return 0, tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err) + return 0, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, err) } transactionID := axp.lastID.Add(1) axp.activePool.Register( @@ -196,7 +196,7 @@ func (axp *TxPool) Rollback(ctx context.Context, transactionID int64) error { func (axp *TxPool) Get(transactionID int64, reason string) (*TxConnection, error) { v, err := axp.activePool.Get(transactionID, reason) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.ErrorCode_NOT_IN_TX, "Transaction %d: %v", transactionID, err) + return nil, tabletenv.NewTabletError(vtrpcpb.Code_ABORTED, "Transaction %d: %v", transactionID, err) } return v.(*TxConnection), nil } @@ -219,7 +219,7 @@ func (axp *TxPool) LocalCommit(ctx context.Context, conn *TxConnection, messager txStats.Add("Completed", time.Now().Sub(conn.StartTime)) if _, err := conn.Exec(ctx, "commit", 1, false); err != nil { conn.Close() - return tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err) + return tabletenv.NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, err) } messager.UpdateCaches(conn.NewMessages, conn.ChangedMessages) return nil @@ -238,7 +238,7 @@ func (axp *TxPool) localRollback(ctx context.Context, conn *TxConnection) error txStats.Add("Aborted", time.Now().Sub(conn.StartTime)) if _, err := conn.Exec(ctx, "rollback", 1, false); err != nil { conn.Close() - return tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err) + return tabletenv.NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, err) } return nil } @@ -306,9 +306,9 @@ func (txc *TxConnection) Exec(ctx context.Context, query string, maxrows int, wa if err != nil { if tabletenv.IsConnErr(err) { txc.pool.checker.CheckMySQL() - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err) + return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) } - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err) + return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, err) } return r, nil } diff --git a/go/vt/tabletserver/tx_pool_test.go b/go/vt/tabletserver/tx_pool_test.go index 391e3041c05..94130ebc6c2 100644 --- a/go/vt/tabletserver/tx_pool_test.go +++ b/go/vt/tabletserver/tx_pool_test.go @@ -190,7 +190,7 @@ func TestTxPoolBeginWithPoolConnectionError_Errno2006_Permanent(t *testing.T) { if err == nil || !strings.Contains(err.Error(), "Lost connection to MySQL server") || !strings.Contains(err.Error(), "(errno 2013)") { t.Fatalf("Begin did not return the reconnect error: %v", err) } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.ErrorCode_INTERNAL_ERROR; got != want { + if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_INTERNAL; got != want { t.Errorf("wrong error code for reconnect error after Begin: got = %v, want = %v", got, want) } } @@ -212,7 +212,7 @@ func TestTxPoolBeginWithPoolConnectionError_Errno2013(t *testing.T) { if err == nil || !strings.Contains(err.Error(), "(errno 2013)") { t.Fatalf("Begin must return connection error with MySQL errno 2013: %v", err) } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.ErrorCode_UNKNOWN_ERROR; got != want { + if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_UNKNOWN; got != want { t.Errorf("wrong error code for Begin error: got = %v, want = %v", got, want) } } @@ -253,7 +253,7 @@ func TestTxPoolBeginWithError(t *testing.T) { if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Begin: %v, want %s", err, want) } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.ErrorCode_UNKNOWN_ERROR; got != want { + if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_UNKNOWN; got != want { t.Errorf("wrong error code for Begin error: got = %v, want = %v", got, want) } } @@ -333,7 +333,7 @@ func TestTxPoolExecFailDueToConnFail_Errno2006(t *testing.T) { if err == nil || !strings.Contains(err.Error(), "(errno 2006)") { t.Fatalf("Exec must return connection error with MySQL errno 2006: %v", err) } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.ErrorCode_INTERNAL_ERROR; got != want { + if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_INTERNAL; got != want { t.Errorf("wrong error code for Exec error: got = %v, want = %v", got, want) } } @@ -366,7 +366,7 @@ func TestTxPoolExecFailDueToConnFail_Errno2013(t *testing.T) { if err == nil || !strings.Contains(err.Error(), "(errno 2013)") { t.Fatalf("Exec must return connection error with MySQL errno 2013: %v", err) } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.ErrorCode_UNKNOWN_ERROR; got != want { + if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_UNKNOWN; got != want { t.Errorf("wrong error code for Exec error: got = %v, want = %v", got, want) } } diff --git a/go/vt/vterrors/aggregate.go b/go/vt/vterrors/aggregate.go index df5d7a4bb28..ae235c19c0e 100644 --- a/go/vt/vterrors/aggregate.go +++ b/go/vt/vterrors/aggregate.go @@ -8,7 +8,7 @@ import ( vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) -// A list of all vtrpcpb.ErrorCodes, ordered by priority. These priorities are +// A list of all vtrpcpb.Code, ordered by priority. These priorities are // used when aggregating multiple errors in VtGate. // Higher priority error codes are more urgent for users to see. They are // prioritized based on the following question: assuming a scatter query produced multiple @@ -30,27 +30,27 @@ const ( PriorityBadInput ) -var errorPriorities = map[vtrpcpb.ErrorCode]int{ - vtrpcpb.ErrorCode_SUCCESS: PrioritySuccess, - vtrpcpb.ErrorCode_CANCELLED_LEGACY: PriorityCancelled, - vtrpcpb.ErrorCode_UNKNOWN_ERROR: PriorityUnknownError, - vtrpcpb.ErrorCode_BAD_INPUT: PriorityBadInput, - vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY: PriorityDeadlineExceeded, - vtrpcpb.ErrorCode_INTEGRITY_ERROR: PriorityIntegrityError, - vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY: PriorityPermissionDenied, - vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY: PriorityResourceExhausted, - vtrpcpb.ErrorCode_QUERY_NOT_SERVED: PriorityQueryNotServed, - vtrpcpb.ErrorCode_NOT_IN_TX: PriorityNotInTx, - vtrpcpb.ErrorCode_INTERNAL_ERROR: PriorityInternalError, - vtrpcpb.ErrorCode_TRANSIENT_ERROR: PriorityTransientError, - vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY: PriorityUnauthenticated, +var errorPriorities = map[vtrpcpb.Code]int{ + vtrpcpb.Code_OK: PrioritySuccess, + vtrpcpb.Code_CANCELED: PriorityCancelled, + vtrpcpb.Code_UNKNOWN: PriorityUnknownError, + vtrpcpb.Code_INVALID_ARGUMENT: PriorityBadInput, + vtrpcpb.Code_DEADLINE_EXCEEDED: PriorityDeadlineExceeded, + vtrpcpb.Code_ALREADY_EXISTS: PriorityIntegrityError, + vtrpcpb.Code_PERMISSION_DENIED: PriorityPermissionDenied, + vtrpcpb.Code_RESOURCE_EXHAUSTED: PriorityResourceExhausted, + vtrpcpb.Code_FAILED_PRECONDITION: PriorityQueryNotServed, + vtrpcpb.Code_ABORTED: PriorityNotInTx, + vtrpcpb.Code_INTERNAL: PriorityInternalError, + vtrpcpb.Code_UNAVAILABLE: PriorityTransientError, + vtrpcpb.Code_UNAUTHENTICATED: PriorityUnauthenticated, } // AggregateVtGateErrorCodes aggregates a list of errors into a single // error code. It does so by finding the highest priority error code // in the list. -func AggregateVtGateErrorCodes(errors []error) vtrpcpb.ErrorCode { - highCode := vtrpcpb.ErrorCode_SUCCESS +func AggregateVtGateErrorCodes(errors []error) vtrpcpb.Code { + highCode := vtrpcpb.Code_OK for _, e := range errors { code := RecoverVtErrorCode(e) if errorPriorities[code] > errorPriorities[highCode] { diff --git a/go/vt/vterrors/aggregate_test.go b/go/vt/vterrors/aggregate_test.go index c8c931c8c8a..d76518fe51c 100644 --- a/go/vt/vterrors/aggregate_test.go +++ b/go/vt/vterrors/aggregate_test.go @@ -15,48 +15,48 @@ import ( var errGeneric = errors.New("generic error") -func errFromCode(c vtrpcpb.ErrorCode) error { +func errFromCode(c vtrpcpb.Code) error { return FromError(c, errGeneric) } func TestAggregateVtGateErrorCodes(t *testing.T) { var testcases = []struct { input []error - expected vtrpcpb.ErrorCode + expected vtrpcpb.Code }{ { // aggregation of no errors is a success code input: nil, - expected: vtrpcpb.ErrorCode_SUCCESS, + expected: vtrpcpb.Code_OK, }, { // single error code gets returned directly - input: []error{errFromCode(vtrpcpb.ErrorCode_BAD_INPUT)}, - expected: vtrpcpb.ErrorCode_BAD_INPUT, + input: []error{errFromCode(vtrpcpb.Code_INVALID_ARGUMENT)}, + expected: vtrpcpb.Code_INVALID_ARGUMENT, }, { // aggregate two codes to the highest priority input: []error{ - errFromCode(vtrpcpb.ErrorCode_SUCCESS), - errFromCode(vtrpcpb.ErrorCode_TRANSIENT_ERROR), + errFromCode(vtrpcpb.Code_OK), + errFromCode(vtrpcpb.Code_UNAVAILABLE), }, - expected: vtrpcpb.ErrorCode_TRANSIENT_ERROR, + expected: vtrpcpb.Code_UNAVAILABLE, }, { input: []error{ - errFromCode(vtrpcpb.ErrorCode_SUCCESS), - errFromCode(vtrpcpb.ErrorCode_TRANSIENT_ERROR), - errFromCode(vtrpcpb.ErrorCode_BAD_INPUT), + errFromCode(vtrpcpb.Code_OK), + errFromCode(vtrpcpb.Code_UNAVAILABLE), + errFromCode(vtrpcpb.Code_INVALID_ARGUMENT), }, - expected: vtrpcpb.ErrorCode_BAD_INPUT, + expected: vtrpcpb.Code_INVALID_ARGUMENT, }, { // unknown errors map to the unknown code input: []error{ - errFromCode(vtrpcpb.ErrorCode_SUCCESS), + errFromCode(vtrpcpb.Code_OK), fmt.Errorf("unknown error"), }, - expected: vtrpcpb.ErrorCode_UNKNOWN_ERROR, + expected: vtrpcpb.Code_UNKNOWN, }, } for _, tc := range testcases { @@ -79,12 +79,12 @@ func TestAggregateVtGateErrors(t *testing.T) { }, { input: []error{ - errFromCode(vtrpcpb.ErrorCode_SUCCESS), - errFromCode(vtrpcpb.ErrorCode_TRANSIENT_ERROR), - errFromCode(vtrpcpb.ErrorCode_BAD_INPUT), + errFromCode(vtrpcpb.Code_OK), + errFromCode(vtrpcpb.Code_UNAVAILABLE), + errFromCode(vtrpcpb.Code_INVALID_ARGUMENT), }, expected: FromError( - vtrpcpb.ErrorCode_BAD_INPUT, + vtrpcpb.Code_INVALID_ARGUMENT, ConcatenateErrors([]error{errGeneric, errGeneric, errGeneric}), ), }, diff --git a/go/vt/vterrors/grpc.go b/go/vt/vterrors/grpc.go index 0b44eea1c87..94e6334262b 100644 --- a/go/vt/vterrors/grpc.go +++ b/go/vt/vterrors/grpc.go @@ -24,81 +24,91 @@ import ( // See: https://github.com/grpc/grpc-go/issues/319 const GRPCServerErrPrefix = "gRPCServerError:" -// GRPCCodeToErrorCode maps a gRPC codes.Code to a vtrpcpb.ErrorCode. -func GRPCCodeToErrorCode(code codes.Code) vtrpcpb.ErrorCode { +// CodeToLegacyErrorCode maps a vtrpcpb.Code to a vtrpcpb.ErrorCode. +func CodeToLegacyErrorCode(code vtrpcpb.Code) vtrpcpb.ErrorCode { switch code { - case codes.OK: + case vtrpcpb.Code_OK: return vtrpcpb.ErrorCode_SUCCESS - case codes.Canceled: + case vtrpcpb.Code_CANCELED: return vtrpcpb.ErrorCode_CANCELLED_LEGACY - case codes.Unknown: + case vtrpcpb.Code_UNKNOWN: return vtrpcpb.ErrorCode_UNKNOWN_ERROR - case codes.InvalidArgument: + case vtrpcpb.Code_INVALID_ARGUMENT: return vtrpcpb.ErrorCode_BAD_INPUT - case codes.DeadlineExceeded: + case vtrpcpb.Code_DEADLINE_EXCEEDED: return vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY - case codes.AlreadyExists: + case vtrpcpb.Code_ALREADY_EXISTS: return vtrpcpb.ErrorCode_INTEGRITY_ERROR - case codes.PermissionDenied: + case vtrpcpb.Code_PERMISSION_DENIED: return vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY - case codes.ResourceExhausted: + case vtrpcpb.Code_RESOURCE_EXHAUSTED: return vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY - case codes.FailedPrecondition: + case vtrpcpb.Code_FAILED_PRECONDITION: return vtrpcpb.ErrorCode_QUERY_NOT_SERVED - case codes.Aborted: + case vtrpcpb.Code_ABORTED: return vtrpcpb.ErrorCode_NOT_IN_TX - case codes.Internal: + case vtrpcpb.Code_INTERNAL: return vtrpcpb.ErrorCode_INTERNAL_ERROR - case codes.Unavailable: + case vtrpcpb.Code_UNAVAILABLE: return vtrpcpb.ErrorCode_TRANSIENT_ERROR - case codes.Unauthenticated: + case vtrpcpb.Code_UNAUTHENTICATED: return vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY default: return vtrpcpb.ErrorCode_UNKNOWN_ERROR } } -// ErrorCodeToGRPCCode maps a vtrpcpb.ErrorCode to a gRPC codes.Code. -func ErrorCodeToGRPCCode(code vtrpcpb.ErrorCode) codes.Code { +// LegacyErrorCodeToCode maps a vtrpcpb.ErrorCode to a gRPC vtrpcpb.Code. +func LegacyErrorCodeToCode(code vtrpcpb.ErrorCode) vtrpcpb.Code { switch code { case vtrpcpb.ErrorCode_SUCCESS: - return codes.OK + return vtrpcpb.Code_OK case vtrpcpb.ErrorCode_CANCELLED_LEGACY: - return codes.Canceled + return vtrpcpb.Code_CANCELED case vtrpcpb.ErrorCode_UNKNOWN_ERROR: - return codes.Unknown + return vtrpcpb.Code_UNKNOWN case vtrpcpb.ErrorCode_BAD_INPUT: - return codes.InvalidArgument + return vtrpcpb.Code_INVALID_ARGUMENT case vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY: - return codes.DeadlineExceeded + return vtrpcpb.Code_DEADLINE_EXCEEDED case vtrpcpb.ErrorCode_INTEGRITY_ERROR: - return codes.AlreadyExists + return vtrpcpb.Code_ALREADY_EXISTS case vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY: - return codes.PermissionDenied + return vtrpcpb.Code_PERMISSION_DENIED case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY: - return codes.ResourceExhausted + return vtrpcpb.Code_RESOURCE_EXHAUSTED case vtrpcpb.ErrorCode_QUERY_NOT_SERVED: - return codes.FailedPrecondition + return vtrpcpb.Code_FAILED_PRECONDITION case vtrpcpb.ErrorCode_NOT_IN_TX: - return codes.Aborted + return vtrpcpb.Code_ABORTED case vtrpcpb.ErrorCode_INTERNAL_ERROR: - return codes.Internal + return vtrpcpb.Code_INTERNAL case vtrpcpb.ErrorCode_TRANSIENT_ERROR: - return codes.Unavailable + return vtrpcpb.Code_UNAVAILABLE case vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY: - return codes.Unauthenticated + return vtrpcpb.Code_UNAUTHENTICATED default: - return codes.Unknown + return vtrpcpb.Code_UNKNOWN } } +// CodeToGRPC maps a vtrpcpb.Code to a grpc Code. +func CodeToGRPC(code vtrpcpb.Code) codes.Code { + return codes.Code(code) +} + +// GRPCToCode maps a grpc Code to a vtrpcpb.Code +func GRPCToCode(code codes.Code) vtrpcpb.Code { + return vtrpcpb.Code(code) +} + // toGRPCCode will attempt to determine the best gRPC code for a particular error. func toGRPCCode(err error) codes.Code { if err == nil { return codes.OK } if vtErr, ok := err.(VtError); ok { - return ErrorCodeToGRPCCode(vtErr.VtErrorCode()) + return CodeToGRPC(vtErr.VtErrorCode()) } // Returns the underlying gRPC Code, or codes.Unknown if one doesn't exist. return grpc.Code(err) @@ -140,7 +150,7 @@ func FromGRPCError(err error) error { return err } return &VitessError{ - Code: GRPCCodeToErrorCode(grpc.Code(err)), + Code: GRPCToCode(grpc.Code(err)), err: err, } } diff --git a/go/vt/vterrors/proto3.go b/go/vt/vterrors/proto3.go index 21199e15ec4..08597dcd96d 100644 --- a/go/vt/vterrors/proto3.go +++ b/go/vt/vterrors/proto3.go @@ -21,8 +21,12 @@ func FromVtRPCError(rpcErr *vtrpcpb.RPCError) error { if rpcErr == nil { return nil } + code := rpcErr.Code + if code == vtrpcpb.Code_OK { + code = LegacyErrorCodeToCode(rpcErr.LegacyCode) + } return &VitessError{ - Code: rpcErr.LegacyCode, + Code: code, err: errors.New(rpcErr.Message), } } @@ -32,8 +36,10 @@ func VtRPCErrorFromVtError(err error) *vtrpcpb.RPCError { if err == nil { return nil } + code := RecoverVtErrorCode(err) return &vtrpcpb.RPCError{ - LegacyCode: RecoverVtErrorCode(err), + LegacyCode: CodeToLegacyErrorCode(code), + Code: code, Message: err.Error(), } } diff --git a/go/vt/vterrors/vterrors.go b/go/vt/vterrors/vterrors.go index 6cae91cf20b..d6ad2b09aaa 100644 --- a/go/vt/vterrors/vterrors.go +++ b/go/vt/vterrors/vterrors.go @@ -21,21 +21,21 @@ func ConcatenateErrors(errors []error) error { // VtError is implemented by any type that exposes a vtrpcpb.ErrorCode. type VtError interface { - VtErrorCode() vtrpcpb.ErrorCode + VtErrorCode() vtrpcpb.Code } // RecoverVtErrorCode attempts to recover a vtrpcpb.ErrorCode from an error. -func RecoverVtErrorCode(err error) vtrpcpb.ErrorCode { +func RecoverVtErrorCode(err error) vtrpcpb.Code { if vtErr, ok := err.(VtError); ok { return vtErr.VtErrorCode() } - return vtrpcpb.ErrorCode_UNKNOWN_ERROR + return vtrpcpb.Code_UNKNOWN } // VitessError is the error type that we use internally for passing structured errors. type VitessError struct { // Error code of the Vitess error. - Code vtrpcpb.ErrorCode + Code vtrpcpb.Code // Error message that should be returned. This allows us to change an error message // without losing the underlying error. For example, if you have an error like // context.DeadlikeExceeded, you don't want to modify it - otherwise you would lose @@ -57,7 +57,7 @@ func (e *VitessError) Error() string { } // VtErrorCode returns the underlying Vitess error code. -func (e *VitessError) VtErrorCode() vtrpcpb.ErrorCode { +func (e *VitessError) VtErrorCode() vtrpcpb.Code { return e.Code } @@ -73,9 +73,9 @@ func (e *VitessError) AsString() string { // existing error. // Use this method also when you want to create a VitessError without a custom // message. For example: -// err := vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, +// err := vterrors.FromError(vtrpcpb.Code_INTERNAL, // errors.New("no valid endpoint")) -func FromError(code vtrpcpb.ErrorCode, err error) error { +func FromError(code vtrpcpb.Code, err error) error { return &VitessError{ Code: code, err: err, @@ -84,7 +84,7 @@ func FromError(code vtrpcpb.ErrorCode, err error) error { // NewVitessError returns a VitessError backed error with the given arguments. // Useful for preserving an underlying error while creating a new error message. -func NewVitessError(code vtrpcpb.ErrorCode, err error, format string, args ...interface{}) error { +func NewVitessError(code vtrpcpb.Code, err error, format string, args ...interface{}) error { return &VitessError{ Code: code, Message: fmt.Sprintf(format, args...), diff --git a/go/vt/vtgate/buffer/buffer.go b/go/vt/vtgate/buffer/buffer.go index cf5ba44c61e..ff0defb2603 100644 --- a/go/vt/vtgate/buffer/buffer.go +++ b/go/vt/vtgate/buffer/buffer.go @@ -29,9 +29,9 @@ import ( ) var ( - bufferFullError = vterrors.FromError(vtrpcpb.ErrorCode_TRANSIENT_ERROR, errors.New("master buffer is full")) - entryEvictedError = vterrors.FromError(vtrpcpb.ErrorCode_TRANSIENT_ERROR, errors.New("buffer full: request evicted for newer request")) - contextCanceledError = vterrors.FromError(vtrpcpb.ErrorCode_TRANSIENT_ERROR, errors.New("context was canceled before failover finished")) + bufferFullError = vterrors.FromError(vtrpcpb.Code_UNAVAILABLE, errors.New("master buffer is full")) + entryEvictedError = vterrors.FromError(vtrpcpb.Code_UNAVAILABLE, errors.New("buffer full: request evicted for newer request")) + contextCanceledError = vterrors.FromError(vtrpcpb.Code_UNAVAILABLE, errors.New("context was canceled before failover finished")) ) // bufferMode specifies how the buffer is configured for a given shard. @@ -220,7 +220,7 @@ func causedByFailover(err error) bool { if vtErr, ok := err.(vterrors.VtError); ok { switch vtErr.VtErrorCode() { - case vtrpcpb.ErrorCode_QUERY_NOT_SERVED: + case vtrpcpb.Code_FAILED_PRECONDITION: // All flavors. if strings.Contains(err.Error(), "retry: operation not allowed in state NOT_SERVING") || strings.Contains(err.Error(), "retry: operation not allowed in state SHUTTING_DOWN") || @@ -237,7 +237,7 @@ func causedByFailover(err error) bool { if strings.Contains(err.Error(), "retry: The MySQL server is running with the --read-only option so it cannot execute this statement (errno 1290) (sqlstate HY000)") { return true } - case vtrpcpb.ErrorCode_INTERNAL_ERROR: + case vtrpcpb.Code_INTERNAL: // Google internal flavor. if strings.Contains(err.Error(), "fatal: failover in progress (errno 1227) (sqlstate 42000)") { return true diff --git a/go/vt/vtgate/buffer/buffer_test.go b/go/vt/vtgate/buffer/buffer_test.go index e64e809d4ea..e95221f7b5e 100644 --- a/go/vt/vtgate/buffer/buffer_test.go +++ b/go/vt/vtgate/buffer/buffer_test.go @@ -26,9 +26,9 @@ const ( ) var ( - failoverErr = vterrors.FromError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, + failoverErr = vterrors.FromError(vtrpcpb.Code_FAILED_PRECONDITION, errors.New("vttablet: rpc error: code = 9 desc = gRPCServerError: retry: operation not allowed in state SHUTTING_DOWN")) - nonFailoverErr = vterrors.FromError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, + nonFailoverErr = vterrors.FromError(vtrpcpb.Code_FAILED_PRECONDITION, errors.New("vttablet: rpc error: code = 9 desc = gRPCServerError: retry: TODO(mberlin): Insert here any realistic error not caused by a failover")) statsKeyJoined = fmt.Sprintf("%s.%s", keyspace, shard) @@ -517,7 +517,7 @@ func isCanceledError(err error) error { if err == nil { return fmt.Errorf("buffering should have stopped early and returned an error because the request was canceled from the outside") } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.ErrorCode_TRANSIENT_ERROR; got != want { + if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_UNAVAILABLE; got != want { return fmt.Errorf("wrong error code for canceled buffered request. got = %v, want = %v", got, want) } if got, want := err.Error(), "context was canceled before failover finished: context canceled"; got != want { @@ -531,7 +531,7 @@ func isEvictedError(err error) error { if err == nil { return errors.New("request should have been evicted because the buffer was full") } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.ErrorCode_TRANSIENT_ERROR; got != want { + if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_UNAVAILABLE; got != want { return fmt.Errorf("wrong error code for evicted buffered request. got = %v, want = %v full error: %v", got, want, err) } if got, want := err.Error(), entryEvictedError.Error(); !strings.Contains(got, want) { @@ -568,7 +568,7 @@ func TestEvictionNotPossible(t *testing.T) { if bufferErr == nil || retryDone != nil { t.Fatalf("buffer should have returned an error because it's full: err: %v retryDone: %v", bufferErr, retryDone) } - if got, want := vterrors.RecoverVtErrorCode(bufferErr), vtrpcpb.ErrorCode_TRANSIENT_ERROR; got != want { + if got, want := vterrors.RecoverVtErrorCode(bufferErr), vtrpcpb.Code_UNAVAILABLE; got != want { t.Fatalf("wrong error code for evicted buffered request. got = %v, want = %v", got, want) } if got, want := bufferErr.Error(), bufferFullError.Error(); !strings.Contains(got, want) { diff --git a/go/vt/vtgate/gateway/discoverygateway.go b/go/vt/vtgate/gateway/discoverygateway.go index 6f80fea5d8c..65485a7533c 100644 --- a/go/vt/vtgate/gateway/discoverygateway.go +++ b/go/vt/vtgate/gateway/discoverygateway.go @@ -199,7 +199,7 @@ func (dg *discoveryGateway) withRetry(ctx context.Context, target *querypb.Targe tablets := dg.tsc.GetHealthyTabletStats(target.Keyspace, target.Shard, target.TabletType) if len(tablets) == 0 { // fail fast if there is no tablet - err = vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, fmt.Errorf("no valid tablet")) + err = vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("no valid tablet")) break } shuffleTablets(tablets) @@ -215,7 +215,7 @@ func (dg *discoveryGateway) withRetry(ctx context.Context, target *querypb.Targe if ts == nil { if err == nil { // do not override error from last attempt. - err = vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, fmt.Errorf("no available connection")) + err = vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("no available connection")) } break } @@ -224,7 +224,7 @@ func (dg *discoveryGateway) withRetry(ctx context.Context, target *querypb.Targe tabletLastUsed = ts.Tablet conn := dg.hc.GetConnection(ts.Key) if conn == nil { - err = vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, fmt.Errorf("no connection for key %v tablet %+v", ts.Key, ts.Tablet)) + err = vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("no connection for key %v tablet %+v", ts.Key, ts.Tablet)) invalidTablets[ts.Key] = true continue } @@ -261,7 +261,7 @@ func (dg *discoveryGateway) canRetry(ctx context.Context, err error, inTransacti } if serverError, ok := err.(*tabletconn.ServerError); ok { switch serverError.ServerCode { - case vtrpcpb.ErrorCode_INTERNAL_ERROR: + case vtrpcpb.Code_INTERNAL: // Do not retry on fatal error for streaming query. // For streaming query, vttablet sends: // - QUERY_NOT_SERVED, if streaming is not started yet; @@ -271,7 +271,7 @@ func (dg *discoveryGateway) canRetry(ctx context.Context, err error, inTransacti return false } fallthrough - case vtrpcpb.ErrorCode_QUERY_NOT_SERVED: + case vtrpcpb.Code_FAILED_PRECONDITION: // Retry on QUERY_NOT_SERVED and // INTERNAL_ERROR if not in a transaction. return !inTransaction diff --git a/go/vt/vtgate/gateway/discoverygateway_test.go b/go/vt/vtgate/gateway/discoverygateway_test.go index a8196c9f1a0..830a4391709 100644 --- a/go/vt/vtgate/gateway/discoverygateway_test.go +++ b/go/vt/vtgate/gateway/discoverygateway_test.go @@ -129,7 +129,7 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway dg.tsc.ResetForTesting() want := "target: ks.0.replica, no valid tablet" err := f(dg, target) - verifyShardError(t, err, want, vtrpcpb.ErrorCode_INTERNAL_ERROR) + verifyShardError(t, err, want, vtrpcpb.Code_INTERNAL) // tablet with error hc.Reset() @@ -137,7 +137,7 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway hc.AddTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, false, 10, fmt.Errorf("no connection")) want = "target: ks.0.replica, no valid tablet" err = f(dg, target) - verifyShardError(t, err, want, vtrpcpb.ErrorCode_INTERNAL_ERROR) + verifyShardError(t, err, want, vtrpcpb.Code_INTERNAL) // tablet without connection hc.Reset() @@ -145,7 +145,7 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway ep1 := hc.AddTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, false, 10, nil).Tablet() want = fmt.Sprintf(`target: ks.0.replica, no valid tablet`) err = f(dg, target) - verifyShardError(t, err, want, vtrpcpb.ErrorCode_INTERNAL_ERROR) + verifyShardError(t, err, want, vtrpcpb.Code_INTERNAL) // retry error hc.Reset() @@ -191,7 +191,7 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway ep1 = sc1.Tablet() want = fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), error: err`, ep1) err = f(dg, target) - verifyShardError(t, err, want, vtrpcpb.ErrorCode_BAD_INPUT) + verifyShardError(t, err, want, vtrpcpb.Code_INVALID_ARGUMENT) // conn error - no retry hc.Reset() @@ -201,7 +201,7 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway ep1 = sc1.Tablet() want = fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), error: conn`, ep1) err = f(dg, target) - verifyShardError(t, err, want, vtrpcpb.ErrorCode_UNKNOWN_ERROR) + verifyShardError(t, err, want, vtrpcpb.Code_UNKNOWN) // no failure hc.Reset() @@ -251,10 +251,10 @@ func testDiscoveryGatewayTransact(t *testing.T, streaming bool, f func(dg Gatewa ep1 = sc1.Tablet() want := fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), error: conn`, ep1) err = f(dg, target) - verifyShardError(t, err, want, vtrpcpb.ErrorCode_UNKNOWN_ERROR) + verifyShardError(t, err, want, vtrpcpb.Code_UNKNOWN) } -func verifyShardError(t *testing.T, err error, wantErr string, wantCode vtrpcpb.ErrorCode) { +func verifyShardError(t *testing.T, err error, wantErr string, wantCode vtrpcpb.Code) { if err == nil || err.Error() != wantErr { t.Errorf("wanted error: %s, got error: %v", wantErr, err) } diff --git a/go/vt/vtgate/gateway/l2vtgategateway.go b/go/vt/vtgate/gateway/l2vtgategateway.go index 91e6e6a0a02..cbcf1568ebd 100644 --- a/go/vt/vtgate/gateway/l2vtgategateway.go +++ b/go/vt/vtgate/gateway/l2vtgategateway.go @@ -238,7 +238,7 @@ func (lg *l2VTGateGateway) canRetry(ctx context.Context, err error, inTransactio } if serverError, ok := err.(*tabletconn.ServerError); ok { switch serverError.ServerCode { - case vtrpcpb.ErrorCode_INTERNAL_ERROR: + case vtrpcpb.Code_INTERNAL: // Do not retry on fatal error for streaming query. // For streaming query, vttablet sends: // - QUERY_NOT_SERVED, if streaming is not started yet; @@ -248,7 +248,7 @@ func (lg *l2VTGateGateway) canRetry(ctx context.Context, err error, inTransactio return false } fallthrough - case vtrpcpb.ErrorCode_QUERY_NOT_SERVED: + case vtrpcpb.Code_FAILED_PRECONDITION: // Retry on QUERY_NOT_SERVED and // INTERNAL_ERROR if not in a transaction. return !inTransaction diff --git a/go/vt/vtgate/gateway/shard_error.go b/go/vt/vtgate/gateway/shard_error.go index 6ddff542d05..16cc3e0e9da 100644 --- a/go/vt/vtgate/gateway/shard_error.go +++ b/go/vt/vtgate/gateway/shard_error.go @@ -24,8 +24,8 @@ type ShardError struct { InTransaction bool // Err preserves the original error, so that we don't need to parse the error string. Err error - // ErrorCode is the error code to use for all the tablet errors in aggregate - ErrorCode vtrpcpb.ErrorCode + // Code is the error code to use for all the tablet errors in aggregate + Code vtrpcpb.Code } // Error returns the error string. @@ -38,8 +38,8 @@ func (e *ShardError) Error() string { // VtErrorCode returns the underlying Vitess error code. // This is part of vterrors.VtError interface. -func (e *ShardError) VtErrorCode() vtrpcpb.ErrorCode { - return e.ErrorCode +func (e *ShardError) VtErrorCode() vtrpcpb.Code { + return e.Code } // NewShardError returns a ShardError which preserves the original @@ -61,6 +61,6 @@ func NewShardError(in error, target *querypb.Target, tablet *topodatapb.Tablet, ShardIdentifier: shardIdentifier, InTransaction: inTransaction, Err: in, - ErrorCode: vterrors.RecoverVtErrorCode(in), + Code: vterrors.RecoverVtErrorCode(in), } } diff --git a/go/vt/vtgate/l2vtgate/l2vtgate.go b/go/vt/vtgate/l2vtgate/l2vtgate.go index 4da4fcd5410..0ade1a2a205 100644 --- a/go/vt/vtgate/l2vtgate/l2vtgate.go +++ b/go/vt/vtgate/l2vtgate/l2vtgate.go @@ -98,7 +98,7 @@ func (l *L2VTGate) endAction(startTime time.Time, statsKey []string, err *error) // keys or bad queries, as those errors are caused by // client queries and are not VTGate's fault. ec := vterrors.RecoverVtErrorCode(*err) - if ec != vtrpcpb.ErrorCode_INTEGRITY_ERROR && ec != vtrpcpb.ErrorCode_BAD_INPUT { + if ec != vtrpcpb.Code_ALREADY_EXISTS && ec != vtrpcpb.Code_INVALID_ARGUMENT { l.tabletCallErrorCount.Add(statsKey, 1) } } diff --git a/go/vt/vtgate/masterbuffer/masterbuffer.go b/go/vt/vtgate/masterbuffer/masterbuffer.go index 5b2801b2323..c69eff62b8c 100644 --- a/go/vt/vtgate/masterbuffer/masterbuffer.go +++ b/go/vt/vtgate/masterbuffer/masterbuffer.go @@ -48,7 +48,7 @@ var timeSleep = time.Sleep // errBufferFull is the error returned a buffer request is rejected because the buffer is full. var errBufferFull = vterrors.FromError( - vtrpcpb.ErrorCode_TRANSIENT_ERROR, + vtrpcpb.Code_UNAVAILABLE, errors.New("master request buffer full, rejecting request"), ) diff --git a/go/vt/vtgate/resolver.go b/go/vt/vtgate/resolver.go index 60da1398357..5e3a9db6197 100644 --- a/go/vt/vtgate/resolver.go +++ b/go/vt/vtgate/resolver.go @@ -58,7 +58,7 @@ func isRetryableError(err error) bool { case *ScatterConnError: return e.Retryable case *gateway.ShardError: - return e.ErrorCode == vtrpcpb.ErrorCode_QUERY_NOT_SERVED + return e.Code == vtrpcpb.Code_FAILED_PRECONDITION default: return false } @@ -71,7 +71,7 @@ func isRetryableError(err error) bool { func (res *Resolver) ExecuteKeyspaceIds(ctx context.Context, sql string, bindVariables map[string]interface{}, keyspace string, keyspaceIds [][]byte, tabletType topodatapb.TabletType, session *vtgatepb.Session, notInTransaction bool, options *querypb.ExecuteOptions) (*sqltypes.Result, error) { if sqlannotation.IsDML(sql) && len(keyspaceIds) > 1 { return nil, vterrors.FromError( - vtrpcpb.ErrorCode_BAD_INPUT, + vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("DML should not span multiple keyspace_ids"), ) } diff --git a/go/vt/vtgate/resolver_test.go b/go/vt/vtgate/resolver_test.go index 589cd5b16fc..234982d3a55 100644 --- a/go/vt/vtgate/resolver_test.go +++ b/go/vt/vtgate/resolver_test.go @@ -560,13 +560,13 @@ func TestIsRetryableError(t *testing.T) { {fmt.Errorf("generic error"), false}, {&ScatterConnError{Retryable: true}, true}, {&ScatterConnError{Retryable: false}, false}, - {&gateway.ShardError{ErrorCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED}, true}, - {&gateway.ShardError{ErrorCode: vtrpcpb.ErrorCode_INTERNAL_ERROR}, false}, + {&gateway.ShardError{Code: vtrpcpb.Code_FAILED_PRECONDITION}, true}, + {&gateway.ShardError{Code: vtrpcpb.Code_INTERNAL}, false}, // tabletconn.ServerError will not come directly here, // they'll be wrapped in ScatterConnError or ShardConnError. // So they can't be retried as is. - {&tabletconn.ServerError{ServerCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED}, false}, - {&tabletconn.ServerError{ServerCode: vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY}, false}, + {&tabletconn.ServerError{ServerCode: vtrpcpb.Code_FAILED_PRECONDITION}, false}, + {&tabletconn.ServerError{ServerCode: vtrpcpb.Code_PERMISSION_DENIED}, false}, } for _, tt := range connErrorTests { diff --git a/go/vt/vtgate/safe_session.go b/go/vt/vtgate/safe_session.go index b8df70a7893..c5ec9be60a4 100644 --- a/go/vt/vtgate/safe_session.go +++ b/go/vt/vtgate/safe_session.go @@ -63,7 +63,7 @@ func (session *SafeSession) Append(shardSession *vtgatepb.Session_ShardSession) session.ShardSessions = append(session.ShardSessions, shardSession) if session.SingleDb && len(session.ShardSessions) > 1 { session.mustRollback = true - return vterrors.FromError(vtrpcpb.ErrorCode_BAD_INPUT, fmt.Errorf("multi-db transaction attempted: %v", session.ShardSessions)) + return vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("multi-db transaction attempted: %v", session.ShardSessions)) } return nil } diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index ad21b7c0390..678b40f93bc 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -79,10 +79,10 @@ func (stc *ScatterConn) endAction(startTime time.Time, allErrors *concurrency.Al // keys or bad queries, as those errors are caused by // client queries and are not VTGate's fault. ec := vterrors.RecoverVtErrorCode(*err) - if ec != vtrpcpb.ErrorCode_INTEGRITY_ERROR && ec != vtrpcpb.ErrorCode_BAD_INPUT { + if ec != vtrpcpb.Code_ALREADY_EXISTS && ec != vtrpcpb.Code_INVALID_ARGUMENT { stc.tabletCallErrorCount.Add(statsKey, 1) } - if ec == vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY || ec == vtrpcpb.ErrorCode_NOT_IN_TX { + if ec == vtrpcpb.Code_RESOURCE_EXHAUSTED || ec == vtrpcpb.Code_ABORTED { session.SetRollback() } } @@ -554,7 +554,7 @@ type ScatterConnError struct { // Preserve the original errors, so that we don't need to parse the error string. Errs []error // serverCode is the error code to use for all the server errors in aggregate - serverCode vtrpcpb.ErrorCode + serverCode vtrpcpb.Code } func (e *ScatterConnError) Error() string { @@ -563,7 +563,7 @@ func (e *ScatterConnError) Error() string { // VtErrorCode returns the underlying Vitess error code // This is part of vterrors.VtError interface. -func (e *ScatterConnError) VtErrorCode() vtrpcpb.ErrorCode { +func (e *ScatterConnError) VtErrorCode() vtrpcpb.Code { return e.serverCode } @@ -571,7 +571,7 @@ func (stc *ScatterConn) aggregateErrors(errors []error) error { allRetryableError := true for _, e := range errors { connError, ok := e.(*gateway.ShardError) - if !ok || (connError.ErrorCode != vtrpcpb.ErrorCode_QUERY_NOT_SERVED && connError.ErrorCode != vtrpcpb.ErrorCode_INTERNAL_ERROR) || connError.InTransaction { + if !ok || (connError.Code != vtrpcpb.Code_FAILED_PRECONDITION && connError.Code != vtrpcpb.Code_INTERNAL) || connError.InTransaction { allRetryableError = false break } diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go index ece70d35d9b..6094915c4de 100644 --- a/go/vt/vtgate/scatter_conn_test.go +++ b/go/vt/vtgate/scatter_conn_test.go @@ -98,7 +98,7 @@ func TestScatterConnStreamExecuteMulti(t *testing.T) { // verifyScatterConnError checks that a returned error has the expected message, // type, and error code. -func verifyScatterConnError(t *testing.T, err error, wantErr string, wantCode vtrpcpb.ErrorCode) { +func verifyScatterConnError(t *testing.T, err error, wantErr string, wantCode vtrpcpb.Code) { if err == nil || err.Error() != wantErr { t.Errorf("wanted error: %s, got error: %v", wantErr, err) } @@ -152,7 +152,7 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s _, err = f(sc, []string{"0", "1"}) // Verify server errors are consolidated. want = fmt.Sprintf("target: %v.0.replica, used tablet: (alias: hostname:\"0\" port_map: keyspace:\"%v\" shard:\"0\" type:REPLICA ), error: err\ntarget: %v.1.replica, used tablet: (alias: hostname:\"1\" port_map: keyspace:\"%v\" shard:\"1\" type:REPLICA ), error: err", name, name, name, name) - verifyScatterConnError(t, err, want, vtrpcpb.ErrorCode_BAD_INPUT) + verifyScatterConnError(t, err, want, vtrpcpb.Code_INVALID_ARGUMENT) // Ensure that we tried only once. if execCount := sbc0.ExecCount.Get(); execCount != 1 { t.Errorf("want 1, got %v", execCount) @@ -173,7 +173,7 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s // Verify server errors are consolidated. want = fmt.Sprintf("target: %v.0.replica, used tablet: (alias: hostname:\"0\" port_map: keyspace:\"%v\" shard:\"0\" type:REPLICA ), error: err\ntarget: %v.1.replica, used tablet: (alias: hostname:\"1\" port_map: keyspace:\"%v\" shard:\"1\" type:REPLICA ), tx_pool_full: err", name, name, name, name) // We should only surface the higher priority error code - verifyScatterConnError(t, err, want, vtrpcpb.ErrorCode_BAD_INPUT) + verifyScatterConnError(t, err, want, vtrpcpb.Code_INVALID_ARGUMENT) // Ensure that we tried only once. if execCount := sbc0.ExecCount.Get(); execCount != 1 { t.Errorf("want 1, got %v", execCount) @@ -282,7 +282,7 @@ func TestScatterConnError(t *testing.T) { err := &ScatterConnError{ Retryable: false, Errs: []error{ - &gateway.ShardError{ErrorCode: vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY, Err: &tabletconn.ServerError{Err: "tabletconn error"}}, + &gateway.ShardError{Code: vtrpcpb.Code_PERMISSION_DENIED, Err: &tabletconn.ServerError{Err: "tabletconn error"}}, fmt.Errorf("generic error"), tabletconn.ConnClosed, }, diff --git a/go/vt/vtgate/topo_utils.go b/go/vt/vtgate/topo_utils.go index d6c440224c2..25cb0a91b60 100644 --- a/go/vt/vtgate/topo_utils.go +++ b/go/vt/vtgate/topo_utils.go @@ -47,7 +47,7 @@ func getAnyShard(ctx context.Context, topoServ topo.SrvTopoServer, cell, keyspac return "", "", err } if len(allShards) == 0 { - return "", "", vterrors.FromError(vtrpcpb.ErrorCode_BAD_INPUT, + return "", "", vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("No shards found for this tabletType"), ) } @@ -58,7 +58,7 @@ func getKeyspaceShards(ctx context.Context, topoServ topo.SrvTopoServer, cell, k srvKeyspace, err := topoServ.GetSrvKeyspace(ctx, cell, keyspace) if err != nil { return "", nil, nil, vterrors.NewVitessError( - vtrpcpb.ErrorCode_INTERNAL_ERROR, err, + vtrpcpb.Code_INTERNAL, err, "keyspace %v fetch error: %v", keyspace, err, ) } @@ -70,7 +70,7 @@ func getKeyspaceShards(ctx context.Context, topoServ topo.SrvTopoServer, cell, k srvKeyspace, err = topoServ.GetSrvKeyspace(ctx, cell, keyspace) if err != nil { return "", nil, nil, vterrors.NewVitessError( - vtrpcpb.ErrorCode_INTERNAL_ERROR, err, + vtrpcpb.Code_INTERNAL, err, "keyspace %v fetch error: %v", keyspace, err, ) } @@ -80,7 +80,7 @@ func getKeyspaceShards(ctx context.Context, topoServ topo.SrvTopoServer, cell, k partition := topoproto.SrvKeyspaceGetPartition(srvKeyspace, tabletType) if partition == nil { return "", nil, nil, vterrors.NewVitessError( - vtrpcpb.ErrorCode_INTERNAL_ERROR, err, + vtrpcpb.Code_INTERNAL, err, "No partition found for tabletType %v in keyspace %v", topoproto.TabletTypeLString(tabletType), keyspace, ) } @@ -89,7 +89,7 @@ func getKeyspaceShards(ctx context.Context, topoServ topo.SrvTopoServer, cell, k func getShardForKeyspaceID(allShards []*topodatapb.ShardReference, keyspaceID []byte) (string, error) { if len(allShards) == 0 { - return "", vterrors.FromError(vtrpcpb.ErrorCode_BAD_INPUT, + return "", vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("No shards found for this tabletType"), ) } @@ -99,7 +99,7 @@ func getShardForKeyspaceID(allShards []*topodatapb.ShardReference, keyspaceID [] return shardReference.Name, nil } } - return "", vterrors.FromError(vtrpcpb.ErrorCode_BAD_INPUT, + return "", vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("KeyspaceId %v didn't match any shards %+v", hex.EncodeToString(keyspaceID), allShards), ) } @@ -179,7 +179,7 @@ func mapExactShards(ctx context.Context, topoServ topo.SrvTopoServer, cell, keys } shardnum++ } - return keyspace, nil, vterrors.FromError(vtrpcpb.ErrorCode_BAD_INPUT, + return keyspace, nil, vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("keyrange %v does not exactly match shards", key.KeyRangeString(kr)), ) } diff --git a/go/vt/vtgate/tx_conn.go b/go/vt/vtgate/tx_conn.go index 53df3f4daf5..b64ce6560f7 100644 --- a/go/vt/vtgate/tx_conn.go +++ b/go/vt/vtgate/tx_conn.go @@ -37,10 +37,10 @@ func NewTxConn(gw gateway.Gateway) *TxConn { // is used to ensure atomicity. func (txc *TxConn) Commit(ctx context.Context, twopc bool, session *SafeSession) error { if session == nil { - return vterrors.FromError(vtrpcpb.ErrorCode_BAD_INPUT, errors.New("cannot commit: empty session")) + return vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, errors.New("cannot commit: empty session")) } if !session.InTransaction() { - return vterrors.FromError(vtrpcpb.ErrorCode_NOT_IN_TX, errors.New("cannot commit: not in transaction")) + return vterrors.FromError(vtrpcpb.Code_ABORTED, errors.New("cannot commit: not in transaction")) } if twopc { return txc.commit2PC(ctx, session) @@ -155,7 +155,7 @@ func (txc *TxConn) Resolve(ctx context.Context, dtid string) error { } default: // Should never happen. - return vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, fmt.Errorf("invalid state: %v", transaction.State)) + return vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("invalid state: %v", transaction.State)) } return nil } diff --git a/go/vt/vtgate/tx_conn_test.go b/go/vt/vtgate/tx_conn_test.go index da6a10931d1..bac7aaf294c 100644 --- a/go/vt/vtgate/tx_conn_test.go +++ b/go/vt/vtgate/tx_conn_test.go @@ -25,8 +25,8 @@ func TestTxConnCommitRollbackIncorrectSession(t *testing.T) { sc, _, _ := newTestTxConnEnv("TestTxConn") // nil session err := sc.txConn.Commit(context.Background(), false, nil) - if got := vterrors.RecoverVtErrorCode(err); got != vtrpcpb.ErrorCode_BAD_INPUT { - t.Errorf("Commit: %v, want %v", got, vtrpcpb.ErrorCode_BAD_INPUT) + if got := vterrors.RecoverVtErrorCode(err); got != vtrpcpb.Code_INVALID_ARGUMENT { + t.Errorf("Commit: %v, want %v", got, vtrpcpb.Code_INVALID_ARGUMENT) } err = sc.txConn.Rollback(context.Background(), nil) @@ -37,8 +37,8 @@ func TestTxConnCommitRollbackIncorrectSession(t *testing.T) { // not in transaction session := NewSafeSession(&vtgatepb.Session{}) err = sc.txConn.Commit(context.Background(), false, session) - if got := vterrors.RecoverVtErrorCode(err); got != vtrpcpb.ErrorCode_NOT_IN_TX { - t.Errorf("Commit: %v, want %v", got, vtrpcpb.ErrorCode_NOT_IN_TX) + if got := vterrors.RecoverVtErrorCode(err); got != vtrpcpb.Code_ABORTED { + t.Errorf("Commit: %v, want %v", got, vtrpcpb.Code_ABORTED) } } @@ -592,7 +592,7 @@ func TestTxConnMultiGoSessions(t *testing.T) { }, }} err := txc.runSessions(input, func(s *vtgatepb.Session_ShardSession) error { - return vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, fmt.Errorf("err %s", s.Target.Keyspace)) + return vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("err %s", s.Target.Keyspace)) }) want := "err 0" if err == nil || err.Error() != want { @@ -609,14 +609,14 @@ func TestTxConnMultiGoSessions(t *testing.T) { }, }} err = txc.runSessions(input, func(s *vtgatepb.Session_ShardSession) error { - return vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, fmt.Errorf("err %s", s.Target.Keyspace)) + return vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("err %s", s.Target.Keyspace)) }) want = "err 0\nerr 1" if err == nil || err.Error() != want { t.Errorf("runSessions(2): %v, want %s", err, want) } errCode := err.(*ScatterConnError).VtErrorCode() - wantCode := vtrpcpb.ErrorCode_INTERNAL_ERROR + wantCode := vtrpcpb.Code_INTERNAL if errCode != wantCode { t.Errorf("Error code: %v, want %v", errCode, wantCode) } @@ -635,7 +635,7 @@ func TestTxConnMultiGoTargets(t *testing.T) { Keyspace: "0", }} err := txc.runTargets(input, func(t *querypb.Target) error { - return vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, fmt.Errorf("err %s", t.Keyspace)) + return vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("err %s", t.Keyspace)) }) want := "err 0" if err == nil || err.Error() != want { @@ -648,14 +648,14 @@ func TestTxConnMultiGoTargets(t *testing.T) { Keyspace: "1", }} err = txc.runTargets(input, func(t *querypb.Target) error { - return vterrors.FromError(vtrpcpb.ErrorCode_INTERNAL_ERROR, fmt.Errorf("err %s", t.Keyspace)) + return vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("err %s", t.Keyspace)) }) want = "err 0\nerr 1" if err == nil || err.Error() != want { t.Errorf("runTargets(2): %v, want %s", err, want) } errCode := err.(*ScatterConnError).VtErrorCode() - wantCode := vtrpcpb.ErrorCode_INTERNAL_ERROR + wantCode := vtrpcpb.Code_INTERNAL if errCode != wantCode { t.Errorf("Error code: %v, want %v", errCode, wantCode) } diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index dab6ed2ec9d..a240028c743 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -631,7 +631,7 @@ func (vtg *VTGate) StreamExecuteShards(ctx context.Context, sql string, bindVari // Begin begins a transaction. It has to be concluded by a Commit or Rollback. func (vtg *VTGate) Begin(ctx context.Context, singledb bool) (*vtgatepb.Session, error) { if !singledb && vtg.transactionMode == TxSingle { - return nil, vterrors.FromError(vtrpcpb.ErrorCode_BAD_INPUT, errors.New("multi-db transaction disallowed")) + return nil, vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, errors.New("multi-db transaction disallowed")) } return &vtgatepb.Session{ InTransaction: true, @@ -644,7 +644,7 @@ func (vtg *VTGate) Commit(ctx context.Context, twopc bool, session *vtgatepb.Ses if twopc && vtg.transactionMode != TxTwoPC { // Rollback the transaction to prevent future deadlocks. vtg.txConn.Rollback(ctx, NewSafeSession(session)) - return vterrors.FromError(vtrpcpb.ErrorCode_BAD_INPUT, errors.New("2pc transaction disallowed")) + return vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, errors.New("2pc transaction disallowed")) } return formatError(vtg.txConn.Commit(ctx, twopc, NewSafeSession(session))) } @@ -955,16 +955,16 @@ func handleExecuteError(err error, statsKey []string, query map[string]interface // First we log in the right category. ec := vterrors.RecoverVtErrorCode(err) switch ec { - case vtrpcpb.ErrorCode_INTEGRITY_ERROR: + case vtrpcpb.Code_ALREADY_EXISTS: // Duplicate key error, no need to log. infoErrors.Add("DupKey", 1) - case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY, vtrpcpb.ErrorCode_BAD_INPUT: + case vtrpcpb.Code_RESOURCE_EXHAUSTED, vtrpcpb.Code_INVALID_ARGUMENT: // Tx pool full error, or bad input, no need to log. normalErrors.Add(statsKey, 1) - case vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY: + case vtrpcpb.Code_PERMISSION_DENIED: // User violated permissions (TableACL), no need to log. infoErrors.Add("PermissionDenied", 1) - case vtrpcpb.ErrorCode_TRANSIENT_ERROR: + case vtrpcpb.Code_UNAVAILABLE: // Temporary error which should be retried by user. Do not log. // As of 01/2017, only the vttablet transaction throttler and the vtgate // master buffer (if buffer full) return this error. diff --git a/go/vt/vtgate/vtgate_test.go b/go/vt/vtgate/vtgate_test.go index d3b30d7539f..a7576a3e39d 100644 --- a/go/vt/vtgate/vtgate_test.go +++ b/go/vt/vtgate/vtgate_test.go @@ -1193,7 +1193,7 @@ func TestVTGateSplitQueryUnsharded(t *testing.T) { func TestIsErrorCausedByVTGate(t *testing.T) { unknownError := fmt.Errorf("unknown error") serverError := &tabletconn.ServerError{ - ServerCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED, + ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, Err: "vttablet: retry: error message", } shardConnUnknownErr := &gateway.ShardError{Err: unknownError} @@ -1602,7 +1602,7 @@ func verifyBoundQueriesAnnotatedAsUnfriendly(t *testing.T, expectedNumQueries in } } -func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before func(sbc *sandboxconn.SandboxConn), after func(sbc *sandboxconn.SandboxConn), expected vtrpcpb.ErrorCode) { +func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before func(sbc *sandboxconn.SandboxConn), after func(sbc *sandboxconn.SandboxConn), expected vtrpcpb.Code) { // Execute for _, sbc := range sbcs { @@ -1988,84 +1988,84 @@ func TestErrorPropagation(t *testing.T) { sbc.MustFailCanceled = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCanceled = 0 - }, vtrpcpb.ErrorCode_CANCELLED_LEGACY) + }, vtrpcpb.Code_CANCELED) - // ErrorCode_UNKNOWN_ERROR + // Code_UNKNOWN testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailUnknownError = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailUnknownError = 0 - }, vtrpcpb.ErrorCode_UNKNOWN_ERROR) + }, vtrpcpb.Code_UNKNOWN) - // ErrorCode_BAD_INPUT + // Code_INVALID_ARGUMENT testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailServer = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailServer = 0 - }, vtrpcpb.ErrorCode_BAD_INPUT) + }, vtrpcpb.Code_INVALID_ARGUMENT) // ErrorCode_DEADLINE_EXCEEDED testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailDeadlineExceeded = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailDeadlineExceeded = 0 - }, vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY) + }, vtrpcpb.Code_DEADLINE_EXCEEDED) - // ErrorCode_INTEGRITY_ERROR + // Code_ALREADY_EXISTS testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailIntegrityError = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailIntegrityError = 0 - }, vtrpcpb.ErrorCode_INTEGRITY_ERROR) + }, vtrpcpb.Code_ALREADY_EXISTS) // ErrorCode_PERMISSION_DENIED testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailPermissionDenied = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailPermissionDenied = 0 - }, vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY) + }, vtrpcpb.Code_PERMISSION_DENIED) // ErrorCode_RESOURCE_EXHAUSTED testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailTxPool = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailTxPool = 0 - }, vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY) + }, vtrpcpb.Code_RESOURCE_EXHAUSTED) - // ErrorCode_QUERY_NOT_SERVED + // Code_FAILED_PRECONDITION testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailRetry = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailRetry = 0 - }, vtrpcpb.ErrorCode_QUERY_NOT_SERVED) + }, vtrpcpb.Code_FAILED_PRECONDITION) - // ErrorCode_NOT_IN_TX + // Code_ABORTED testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailNotTx = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailNotTx = 0 - }, vtrpcpb.ErrorCode_NOT_IN_TX) + }, vtrpcpb.Code_ABORTED) - // ErrorCode_INTERNAL_ERROR + // Code_INTERNAL testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailFatal = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailFatal = 0 - }, vtrpcpb.ErrorCode_INTERNAL_ERROR) + }, vtrpcpb.Code_INTERNAL) - // ErrorCode_TRANSIENT_ERROR + // Code_UNAVAILABLE testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailTransientError = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailTransientError = 0 - }, vtrpcpb.ErrorCode_TRANSIENT_ERROR) + }, vtrpcpb.Code_UNAVAILABLE) // ErrorCode_UNAUTHENTICATED testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailUnauthenticated = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailUnauthenticated = 0 - }, vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY) + }, vtrpcpb.Code_UNAUTHENTICATED) } // This test makes sure that if we start a transaction and hit a critical @@ -2077,7 +2077,7 @@ func TestErrorIssuesRollback(t *testing.T) { // Start a transaction, send one statement. // Simulate an error that should trigger a rollback: - // vtrpcpb.ErrorCode_NOT_IN_TX case. + // vtrpcpb.Code_ABORTED case. session, err := rpcVTGate.Begin(context.Background(), false) if err != nil { t.Fatalf("cannot start a transaction: %v", err) @@ -2155,7 +2155,7 @@ func TestErrorIssuesRollback(t *testing.T) { // Start a transaction, send one statement. // Simulate an error that should *not* trigger a rollback: - // vtrpcpb.ErrorCode_INTEGRITY_ERROR case. + // vtrpcpb.Code_ALREADY_EXISTS case. session, err = rpcVTGate.Begin(context.Background(), false) if err != nil { t.Fatalf("cannot start a transaction: %v", err) diff --git a/go/vt/vtgate/vtgateconntest/client.go b/go/vt/vtgate/vtgateconntest/client.go index 298f6d0f472..1124502df4f 100644 --- a/go/vt/vtgate/vtgateconntest/client.go +++ b/go/vt/vtgate/vtgateconntest/client.go @@ -42,7 +42,7 @@ type fakeVTGateService struct { } const expectedErrMatch string = "test vtgate error" -const expectedCode vtrpcpb.ErrorCode = vtrpcpb.ErrorCode_BAD_INPUT +const expectedCode vtrpcpb.Code = vtrpcpb.Code_INVALID_ARGUMENT var errTestVtGateError = vterrors.FromError(expectedCode, errors.New(expectedErrMatch)) diff --git a/go/vt/worker/grpcvtworkerclient/client.go b/go/vt/worker/grpcvtworkerclient/client.go index 5f3a020b8bd..e8a680c23bb 100644 --- a/go/vt/worker/grpcvtworkerclient/client.go +++ b/go/vt/worker/grpcvtworkerclient/client.go @@ -43,7 +43,7 @@ func gRPCVtworkerClientFactory(addr string, dialTimeout time.Duration) (vtworker } cc, err := grpc.Dial(addr, opt, grpc.WithBlock(), grpc.WithTimeout(dialTimeout)) if err != nil { - return nil, vterrors.NewVitessError(vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, err, "grpc.Dial() err: %v", err) + return nil, vterrors.NewVitessError(vtrpcpb.Code_DEADLINE_EXCEEDED, err, "grpc.Dial() err: %v", err) } c := vtworkerservicepb.NewVtworkerClient(cc) diff --git a/go/vt/worker/instance.go b/go/vt/worker/instance.go index aa1170c7649..3054735791b 100644 --- a/go/vt/worker/instance.go +++ b/go/vt/worker/instance.go @@ -75,7 +75,7 @@ func (wi *Instance) setAndStartWorker(ctx context.Context, wrk Worker, wr *wrang defer wi.currentWorkerMutex.Unlock() if wi.currentContext != nil { - return nil, vterrors.FromError(vtrpcpb.ErrorCode_TRANSIENT_ERROR, + return nil, vterrors.FromError(vtrpcpb.Code_UNAVAILABLE, fmt.Errorf("A worker job is already in progress: %v", wi.currentWorker.StatusAsText())) } @@ -84,14 +84,14 @@ func (wi *Instance) setAndStartWorker(ctx context.Context, wrk Worker, wr *wrang const gracePeriod = 1 * time.Minute gracePeriodEnd := time.Now().Add(gracePeriod) if wi.lastRunStopTime.Before(gracePeriodEnd) { - return nil, vterrors.FromError(vtrpcpb.ErrorCode_TRANSIENT_ERROR, + return nil, vterrors.FromError(vtrpcpb.Code_UNAVAILABLE, fmt.Errorf("A worker job was recently stopped (%f seconds ago): %v", time.Now().Sub(wi.lastRunStopTime).Seconds(), wi.currentWorker)) } // QUERY_NOT_SERVED = FailedPrecondition => manual resolution required. - return nil, vterrors.FromError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED, + return nil, vterrors.FromError(vtrpcpb.Code_FAILED_PRECONDITION, fmt.Errorf("The worker job was stopped %.1f minutes ago, but not reset. You have to reset it manually. Job: %v", time.Now().Sub(wi.lastRunStopTime).Minutes(), wi.currentWorker)) @@ -141,7 +141,7 @@ func (wi *Instance) setAndStartWorker(ctx context.Context, wrk Worker, wr *wrang case <-wi.currentContext.Done(): // Context is done i.e. probably canceled. if wi.currentContext.Err() == context.Canceled { - err = vterrors.NewVitessError(vtrpcpb.ErrorCode_CANCELLED_LEGACY, err, "vtworker command was canceled: %v", err) + err = vterrors.NewVitessError(vtrpcpb.Code_CANCELED, err, "vtworker command was canceled: %v", err) } default: } diff --git a/go/vt/worker/vtworkerclienttest/client_testsuite.go b/go/vt/worker/vtworkerclienttest/client_testsuite.go index f10b4c57eab..cda467ea4e0 100644 --- a/go/vt/worker/vtworkerclienttest/client_testsuite.go +++ b/go/vt/worker/vtworkerclienttest/client_testsuite.go @@ -129,7 +129,7 @@ func commandErrorsBecauseBusy(t *testing.T, client vtworkerclient.Client, server if _, err := stream.Recv(); err != nil { // We see CANCELED from the RPC client (client side cancelation) or // from vtworker itself (server side cancelation). - if vterrors.RecoverVtErrorCode(err) != vtrpcpb.ErrorCode_CANCELLED_LEGACY { + if vterrors.RecoverVtErrorCode(err) != vtrpcpb.Code_CANCELED { errorCodeCheck = fmt.Errorf("Block command should only error due to canceled context: %v", err) } // Stream has finished. @@ -150,7 +150,7 @@ func commandErrorsBecauseBusy(t *testing.T, client vtworkerclient.Client, server // vtworker should send an error back that it's busy and we should retry later. <-blockCommandStarted gotErr := runVtworkerCommand(client, []string{"Ping", "Are you busy?"}) - wantCode := vtrpcpb.ErrorCode_TRANSIENT_ERROR + wantCode := vtrpcpb.Code_UNAVAILABLE if gotCode := vterrors.RecoverVtErrorCode(gotErr); gotCode != wantCode { t.Fatalf("wrong error code for second cmd: got = %v, want = %v, err: %v", gotCode, wantCode, gotErr) } @@ -174,7 +174,7 @@ func commandErrorsBecauseBusy(t *testing.T, client vtworkerclient.Client, server // canceled but not reset yet. New commands are still failing with a // retryable error. gotErr2 := runVtworkerCommand(client, []string{"Ping", "canceled and still busy?"}) - wantCode2 := vtrpcpb.ErrorCode_TRANSIENT_ERROR + wantCode2 := vtrpcpb.Code_UNAVAILABLE if gotCode2 := vterrors.RecoverVtErrorCode(gotErr2); gotCode2 != wantCode2 { t.Fatalf("wrong error code for second cmd before reset: got = %v, want = %v, err: %v", gotCode2, wantCode2, gotErr2) } diff --git a/proto/vtrpc.proto b/proto/vtrpc.proto index 9e62d469291..2f2c9501975 100644 --- a/proto/vtrpc.proto +++ b/proto/vtrpc.proto @@ -250,4 +250,5 @@ enum ErrorCode { message RPCError { ErrorCode legacy_code = 1; string message = 2; + Code code = 3; } diff --git a/py/vtproto/vtrpc_pb2.py b/py/vtproto/vtrpc_pb2.py index e1cc108489c..3abc96acc9d 100644 --- a/py/vtproto/vtrpc_pb2.py +++ b/py/vtproto/vtrpc_pb2.py @@ -20,7 +20,7 @@ name='vtrpc.proto', package='vtrpc', syntax='proto3', - serialized_pb=_b('\n\x0bvtrpc.proto\x12\x05vtrpc\"F\n\x08\x43\x61llerID\x12\x11\n\tprincipal\x18\x01 \x01(\t\x12\x11\n\tcomponent\x18\x02 \x01(\t\x12\x14\n\x0csubcomponent\x18\x03 \x01(\t\"B\n\x08RPCError\x12%\n\x0blegacy_code\x18\x01 \x01(\x0e\x32\x10.vtrpc.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t*\xb6\x02\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\x0c\n\x08\x43\x41NCELED\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x14\n\x10INVALID_ARGUMENT\x10\x03\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x04\x12\r\n\tNOT_FOUND\x10\x05\x12\x12\n\x0e\x41LREADY_EXISTS\x10\x06\x12\x15\n\x11PERMISSION_DENIED\x10\x07\x12\x13\n\x0fUNAUTHENTICATED\x10\x10\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x08\x12\x17\n\x13\x46\x41ILED_PRECONDITION\x10\t\x12\x0b\n\x07\x41\x42ORTED\x10\n\x12\x10\n\x0cOUT_OF_RANGE\x10\x0b\x12\x11\n\rUNIMPLEMENTED\x10\x0c\x12\x0c\n\x08INTERNAL\x10\r\x12\x0f\n\x0bUNAVAILABLE\x10\x0e\x12\r\n\tDATA_LOSS\x10\x0f*\xaa\x02\n\tErrorCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\x14\n\x10\x43\x41NCELLED_LEGACY\x10\x01\x12\x11\n\rUNKNOWN_ERROR\x10\x02\x12\r\n\tBAD_INPUT\x10\x03\x12\x1c\n\x18\x44\x45\x41\x44LINE_EXCEEDED_LEGACY\x10\x04\x12\x13\n\x0fINTEGRITY_ERROR\x10\x05\x12\x1c\n\x18PERMISSION_DENIED_LEGACY\x10\x06\x12\x1d\n\x19RESOURCE_EXHAUSTED_LEGACY\x10\x07\x12\x14\n\x10QUERY_NOT_SERVED\x10\x08\x12\r\n\tNOT_IN_TX\x10\t\x12\x12\n\x0eINTERNAL_ERROR\x10\n\x12\x13\n\x0fTRANSIENT_ERROR\x10\x0b\x12\x1a\n\x16UNAUTHENTICATED_LEGACY\x10\x0c\x42\x1a\n\x18\x63om.youtube.vitess.protob\x06proto3') + serialized_pb=_b('\n\x0bvtrpc.proto\x12\x05vtrpc\"F\n\x08\x43\x61llerID\x12\x11\n\tprincipal\x18\x01 \x01(\t\x12\x11\n\tcomponent\x18\x02 \x01(\t\x12\x14\n\x0csubcomponent\x18\x03 \x01(\t\"]\n\x08RPCError\x12%\n\x0blegacy_code\x18\x01 \x01(\x0e\x32\x10.vtrpc.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x19\n\x04\x63ode\x18\x03 \x01(\x0e\x32\x0b.vtrpc.Code*\xb6\x02\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\x0c\n\x08\x43\x41NCELED\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x14\n\x10INVALID_ARGUMENT\x10\x03\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x04\x12\r\n\tNOT_FOUND\x10\x05\x12\x12\n\x0e\x41LREADY_EXISTS\x10\x06\x12\x15\n\x11PERMISSION_DENIED\x10\x07\x12\x13\n\x0fUNAUTHENTICATED\x10\x10\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x08\x12\x17\n\x13\x46\x41ILED_PRECONDITION\x10\t\x12\x0b\n\x07\x41\x42ORTED\x10\n\x12\x10\n\x0cOUT_OF_RANGE\x10\x0b\x12\x11\n\rUNIMPLEMENTED\x10\x0c\x12\x0c\n\x08INTERNAL\x10\r\x12\x0f\n\x0bUNAVAILABLE\x10\x0e\x12\r\n\tDATA_LOSS\x10\x0f*\xaa\x02\n\tErrorCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\x14\n\x10\x43\x41NCELLED_LEGACY\x10\x01\x12\x11\n\rUNKNOWN_ERROR\x10\x02\x12\r\n\tBAD_INPUT\x10\x03\x12\x1c\n\x18\x44\x45\x41\x44LINE_EXCEEDED_LEGACY\x10\x04\x12\x13\n\x0fINTEGRITY_ERROR\x10\x05\x12\x1c\n\x18PERMISSION_DENIED_LEGACY\x10\x06\x12\x1d\n\x19RESOURCE_EXHAUSTED_LEGACY\x10\x07\x12\x14\n\x10QUERY_NOT_SERVED\x10\x08\x12\r\n\tNOT_IN_TX\x10\t\x12\x12\n\x0eINTERNAL_ERROR\x10\n\x12\x13\n\x0fTRANSIENT_ERROR\x10\x0b\x12\x1a\n\x16UNAUTHENTICATED_LEGACY\x10\x0c\x42\x1a\n\x18\x63om.youtube.vitess.protob\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) @@ -101,8 +101,8 @@ ], containing_type=None, options=None, - serialized_start=163, - serialized_end=473, + serialized_start=190, + serialized_end=500, ) _sym_db.RegisterEnumDescriptor(_CODE) @@ -168,8 +168,8 @@ ], containing_type=None, options=None, - serialized_start=476, - serialized_end=774, + serialized_start=503, + serialized_end=801, ) _sym_db.RegisterEnumDescriptor(_ERRORCODE) @@ -273,6 +273,13 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), + _descriptor.FieldDescriptor( + name='code', full_name='vtrpc.RPCError.code', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), ], extensions=[ ], @@ -286,10 +293,11 @@ oneofs=[ ], serialized_start=94, - serialized_end=160, + serialized_end=187, ) _RPCERROR.fields_by_name['legacy_code'].enum_type = _ERRORCODE +_RPCERROR.fields_by_name['code'].enum_type = _CODE DESCRIPTOR.message_types_by_name['CallerID'] = _CALLERID DESCRIPTOR.message_types_by_name['RPCError'] = _RPCERROR DESCRIPTOR.enum_types_by_name['Code'] = _CODE From f91a867f04055a0662a4b940439ec2ba1c6805d8 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Thu, 16 Feb 2017 15:31:12 -0800 Subject: [PATCH 015/108] vterrors: misc * Renamed old error names to legacy. * Changed all clients to handle legacy and new codes. * Added tests for the conversions. --- go/sqltypes/proto3_test.go | 2 +- go/vt/proto/vtrpc/vtrpc.pb.go | 192 ++++++------ .../tabletconn/grpc_error_test.go | 43 +++ go/vt/vterrors/grpc.go | 62 ++-- go/vt/vterrors/proto3_test.go | 83 +++++ .../java/com/youtube/vitess/client/Proto.java | 30 +- .../vitess/client/cursor/CursorWithError.java | 2 +- .../BinlogTransaction/Statement/Category.php | 5 +- .../Vitess/Proto/Query/MessageAckRequest.php | 289 ++++++++++++++++++ .../Vitess/Proto/Query/MessageAckResponse.php | 74 +++++ .../Proto/Query/MessageStreamRequest.php | 220 +++++++++++++ .../Proto/Query/MessageStreamResponse.php | 74 +++++ .../Vitess/Proto/Queryservice/QueryClient.php | 12 + .../Proto/Throttlerdata/Configuration.php | 48 +++ .../Vitess/Proto/Vtgate/MessageAckRequest.php | 239 +++++++++++++++ .../Proto/Vtgate/MessageStreamRequest.php | 267 ++++++++++++++++ .../Proto/Vtgateservice/VitessClient.php | 12 + php/src/Vitess/Proto/Vtrpc/Code.php | 26 ++ php/src/Vitess/Proto/Vtrpc/ErrorCode.php | 22 -- .../Vitess/Proto/Vtrpc/LegacyErrorCode.php | 22 ++ php/src/Vitess/Proto/Vtrpc/RPCError.php | 79 ++++- php/src/Vitess/ProtoUtils.php | 28 +- proto/vtrpc.proto | 65 ++-- py/vtdb/proto3_encoding.py | 24 ++ py/vtdb/vtgate_utils.py | 12 +- py/vtproto/vtrpc_pb2.py | 58 ++-- 26 files changed, 1737 insertions(+), 253 deletions(-) create mode 100644 go/vt/tabletserver/tabletconn/grpc_error_test.go create mode 100644 go/vt/vterrors/proto3_test.go create mode 100644 php/src/Vitess/Proto/Query/MessageAckRequest.php create mode 100644 php/src/Vitess/Proto/Query/MessageAckResponse.php create mode 100644 php/src/Vitess/Proto/Query/MessageStreamRequest.php create mode 100644 php/src/Vitess/Proto/Query/MessageStreamResponse.php create mode 100644 php/src/Vitess/Proto/Vtgate/MessageAckRequest.php create mode 100644 php/src/Vitess/Proto/Vtgate/MessageStreamRequest.php create mode 100644 php/src/Vitess/Proto/Vtrpc/Code.php delete mode 100644 php/src/Vitess/Proto/Vtrpc/ErrorCode.php create mode 100644 php/src/Vitess/Proto/Vtrpc/LegacyErrorCode.php diff --git a/go/sqltypes/proto3_test.go b/go/sqltypes/proto3_test.go index 129ce30427e..14702e30434 100644 --- a/go/sqltypes/proto3_test.go +++ b/go/sqltypes/proto3_test.go @@ -287,7 +287,7 @@ func TestQueryReponses(t *testing.T) { }, }, { Error: &vtrpcpb.RPCError{ - LegacyCode: vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY, + LegacyCode: vtrpcpb.LegacyErrorCode_DEADLINE_EXCEEDED_LEGACY, Message: "deadline exceeded", Code: vtrpcpb.Code_DEADLINE_EXCEEDED, }, diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go index 9835f06ccaa..db085834fea 100644 --- a/go/vt/proto/vtrpc/vtrpc.pb.go +++ b/go/vt/proto/vtrpc/vtrpc.pb.go @@ -29,8 +29,9 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -// Code represnts canonical error codes. The names and numbers must match -// the ones defined by grpc: https://godoc.org/google.golang.org/grpc/codes. +// Code represnts canonical error codes. The names, numbers and comments +// must match the ones defined by grpc: +// https://godoc.org/google.golang.org/grpc/codes. type Code int32 const ( @@ -120,7 +121,7 @@ const ( // UNIMPLEMENTED indicates operation is not implemented or not // supported/enabled in this service. Code_UNIMPLEMENTED Code = 12 - // INTERNAL errors. Means some invariants expected by underlying + // INTERNAL errors. Means some invariants expected by underlying // system has been broken. If you see one of these errors, // something is very broken. Code_INTERNAL Code = 13 @@ -179,7 +180,7 @@ func (x Code) String() string { } func (Code) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -// ErrorCode is the enum values for Errors. This type is deprecated. +// LegacyErrorCode is the enum values for Errors. This type is deprecated. // Use Code instead. Background: In the initial design, we thought // that we may end up with a different list of canonical error codes // than the ones defined by grpc. In hindisght, we realize that @@ -188,31 +189,31 @@ func (Code) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} // favor of the new Code that matches exactly what grpc defines. // Some names below have a _LEGACY suffix. This is to prevent // name collisions with Code. -type ErrorCode int32 +type LegacyErrorCode int32 const ( - // SUCCESS is returned from a successful call. - ErrorCode_SUCCESS ErrorCode = 0 + // SUCCESS_LEGACY is returned from a successful call. + LegacyErrorCode_SUCCESS_LEGACY LegacyErrorCode = 0 // CANCELLED_LEGACY means that the context was cancelled (and noticed in the app layer, // as opposed to the RPC layer). - ErrorCode_CANCELLED_LEGACY ErrorCode = 1 - // UNKNOWN_ERROR includes: + LegacyErrorCode_CANCELLED_LEGACY LegacyErrorCode = 1 + // UNKNOWN_ERROR_LEGACY includes: // 1. MySQL error codes that we don't explicitly handle. // 2. MySQL response that wasn't as expected. For example, we might expect a MySQL // timestamp to be returned in a particular way, but it wasn't. // 3. Anything else that doesn't fall into a different bucket. - ErrorCode_UNKNOWN_ERROR ErrorCode = 2 - // BAD_INPUT is returned when an end-user either sends SQL that couldn't be parsed correctly, + LegacyErrorCode_UNKNOWN_ERROR_LEGACY LegacyErrorCode = 2 + // BAD_INPUT_LEGACY is returned when an end-user either sends SQL that couldn't be parsed correctly, // or tries a query that isn't supported by Vitess. - ErrorCode_BAD_INPUT ErrorCode = 3 + LegacyErrorCode_BAD_INPUT_LEGACY LegacyErrorCode = 3 // DEADLINE_EXCEEDED_LEGACY is returned when an action is taking longer than a given timeout. - ErrorCode_DEADLINE_EXCEEDED_LEGACY ErrorCode = 4 - // INTEGRITY_ERROR is returned on integrity error from MySQL, usually due to + LegacyErrorCode_DEADLINE_EXCEEDED_LEGACY LegacyErrorCode = 4 + // INTEGRITY_ERROR_LEGACY is returned on integrity error from MySQL, usually due to // duplicate primary keys. - ErrorCode_INTEGRITY_ERROR ErrorCode = 5 + LegacyErrorCode_INTEGRITY_ERROR_LEGACY LegacyErrorCode = 5 // PERMISSION_DENIED_LEGACY errors are returned when a user requests access to something // that they don't have permissions for. - ErrorCode_PERMISSION_DENIED_LEGACY ErrorCode = 6 + LegacyErrorCode_PERMISSION_DENIED_LEGACY LegacyErrorCode = 6 // RESOURCE_EXHAUSTED_LEGACY is returned when a query exceeds its quota in some dimension // and can't be completed due to that. Queries that return RESOURCE_EXHAUSTED // should not be retried, as it could be detrimental to the server's health. @@ -220,8 +221,8 @@ const ( // 1. TxPoolFull: this is retried server-side, and is only returned as an error // if the server-side retries failed. // 2. Query is killed due to it taking too long. - ErrorCode_RESOURCE_EXHAUSTED_LEGACY ErrorCode = 7 - // QUERY_NOT_SERVED means that a query could not be served right now. + LegacyErrorCode_RESOURCE_EXHAUSTED_LEGACY LegacyErrorCode = 7 + // QUERY_NOT_SERVED_LEGACY means that a query could not be served right now. // Client can interpret it as: "the tablet that you sent this query to cannot // serve the query right now, try a different tablet or try again later." // This could be due to various reasons: QueryService is not serving, should @@ -229,71 +230,61 @@ const ( // Clients that receive this error should usually retry the query, but after taking // the appropriate steps to make sure that the query will get sent to the correct // tablet. - ErrorCode_QUERY_NOT_SERVED ErrorCode = 8 - // NOT_IN_TX means that we're not currently in a transaction, but we should be. - ErrorCode_NOT_IN_TX ErrorCode = 9 - // INTERNAL_ERRORs are problems that only the server can fix, not the client. - // These errors are not due to a query itself, but rather due to the state of - // the system. - // Generally, we don't expect the errors to go away by themselves, but they - // may go away after human intervention. - // Examples of scenarios where INTERNAL_ERROR is returned: - // 1. Something is not configured correctly internally. - // 2. A necessary resource is not available, and we don't expect it to become available by itself. - // 3. A sanity check fails. - // 4. Some other internal error occurs. - // Clients should not retry immediately, as there is little chance of success. - // However, it's acceptable for retries to happen internally, for example to - // multiple backends, in case only a subset of backend are not functional. - ErrorCode_INTERNAL_ERROR ErrorCode = 10 - // TRANSIENT_ERROR is used for when there is some error that we expect we can + LegacyErrorCode_QUERY_NOT_SERVED_LEGACY LegacyErrorCode = 8 + // NOT_IN_TX_LEGACY means that we're not currently in a transaction, but we should be. + LegacyErrorCode_NOT_IN_TX_LEGACY LegacyErrorCode = 9 + // INTERNAL_ERROR_LEGACY means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + LegacyErrorCode_INTERNAL_ERROR_LEGACY LegacyErrorCode = 10 + // TRANSIENT_ERROR_LEGACY is used for when there is some error that we expect we can // recover from automatically - often due to a resource limit temporarily being // reached. Retrying this error, with an exponential backoff, should succeed. // Clients should be able to successfully retry the query on the same backends. // Examples of things that can trigger this error: // 1. Query has been throttled // 2. VtGate could have request backlog - ErrorCode_TRANSIENT_ERROR ErrorCode = 11 + LegacyErrorCode_TRANSIENT_ERROR_LEGACY LegacyErrorCode = 11 // UNAUTHENTICATED_LEGACY errors are returned when a user requests access to something, // and we're unable to verify the user's authentication. - ErrorCode_UNAUTHENTICATED_LEGACY ErrorCode = 12 + LegacyErrorCode_UNAUTHENTICATED_LEGACY LegacyErrorCode = 12 ) -var ErrorCode_name = map[int32]string{ - 0: "SUCCESS", +var LegacyErrorCode_name = map[int32]string{ + 0: "SUCCESS_LEGACY", 1: "CANCELLED_LEGACY", - 2: "UNKNOWN_ERROR", - 3: "BAD_INPUT", + 2: "UNKNOWN_ERROR_LEGACY", + 3: "BAD_INPUT_LEGACY", 4: "DEADLINE_EXCEEDED_LEGACY", - 5: "INTEGRITY_ERROR", + 5: "INTEGRITY_ERROR_LEGACY", 6: "PERMISSION_DENIED_LEGACY", 7: "RESOURCE_EXHAUSTED_LEGACY", - 8: "QUERY_NOT_SERVED", - 9: "NOT_IN_TX", - 10: "INTERNAL_ERROR", - 11: "TRANSIENT_ERROR", + 8: "QUERY_NOT_SERVED_LEGACY", + 9: "NOT_IN_TX_LEGACY", + 10: "INTERNAL_ERROR_LEGACY", + 11: "TRANSIENT_ERROR_LEGACY", 12: "UNAUTHENTICATED_LEGACY", } -var ErrorCode_value = map[string]int32{ - "SUCCESS": 0, +var LegacyErrorCode_value = map[string]int32{ + "SUCCESS_LEGACY": 0, "CANCELLED_LEGACY": 1, - "UNKNOWN_ERROR": 2, - "BAD_INPUT": 3, + "UNKNOWN_ERROR_LEGACY": 2, + "BAD_INPUT_LEGACY": 3, "DEADLINE_EXCEEDED_LEGACY": 4, - "INTEGRITY_ERROR": 5, + "INTEGRITY_ERROR_LEGACY": 5, "PERMISSION_DENIED_LEGACY": 6, "RESOURCE_EXHAUSTED_LEGACY": 7, - "QUERY_NOT_SERVED": 8, - "NOT_IN_TX": 9, - "INTERNAL_ERROR": 10, - "TRANSIENT_ERROR": 11, + "QUERY_NOT_SERVED_LEGACY": 8, + "NOT_IN_TX_LEGACY": 9, + "INTERNAL_ERROR_LEGACY": 10, + "TRANSIENT_ERROR_LEGACY": 11, "UNAUTHENTICATED_LEGACY": 12, } -func (x ErrorCode) String() string { - return proto.EnumName(ErrorCode_name, int32(x)) +func (x LegacyErrorCode) String() string { + return proto.EnumName(LegacyErrorCode_name, int32(x)) } -func (ErrorCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (LegacyErrorCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } // CallerID is passed along RPCs to identify the originating client // for a request. It is not meant to be secure, but only @@ -330,9 +321,9 @@ func (*CallerID) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} // We use this so the clients don't have to parse the error messages, // but instead can depend on the value of the code. type RPCError struct { - LegacyCode ErrorCode `protobuf:"varint,1,opt,name=legacy_code,json=legacyCode,enum=vtrpc.ErrorCode" json:"legacy_code,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` - Code Code `protobuf:"varint,3,opt,name=code,enum=vtrpc.Code" json:"code,omitempty"` + LegacyCode LegacyErrorCode `protobuf:"varint,1,opt,name=legacy_code,json=legacyCode,enum=vtrpc.LegacyErrorCode" json:"legacy_code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + Code Code `protobuf:"varint,3,opt,name=code,enum=vtrpc.Code" json:"code,omitempty"` } func (m *RPCError) Reset() { *m = RPCError{} } @@ -344,47 +335,48 @@ func init() { proto.RegisterType((*CallerID)(nil), "vtrpc.CallerID") proto.RegisterType((*RPCError)(nil), "vtrpc.RPCError") proto.RegisterEnum("vtrpc.Code", Code_name, Code_value) - proto.RegisterEnum("vtrpc.ErrorCode", ErrorCode_name, ErrorCode_value) + proto.RegisterEnum("vtrpc.LegacyErrorCode", LegacyErrorCode_name, LegacyErrorCode_value) } func init() { proto.RegisterFile("vtrpc.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 574 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x53, 0xdb, 0x4e, 0x1b, 0x31, - 0x14, 0x24, 0x21, 0xe4, 0x72, 0x36, 0x80, 0x31, 0x2d, 0x4d, 0x11, 0x55, 0x2b, 0x9e, 0x2a, 0x1e, - 0x22, 0xb5, 0xfd, 0x02, 0x67, 0x7d, 0x08, 0x16, 0x8b, 0x9d, 0xfa, 0x42, 0xc9, 0x93, 0x15, 0xc2, - 0x0a, 0x51, 0x05, 0x36, 0xda, 0x04, 0x54, 0x7e, 0xa9, 0x1f, 0xd0, 0xef, 0xab, 0xec, 0x24, 0xa0, - 0x36, 0x4f, 0x89, 0xcf, 0x65, 0x66, 0xce, 0x8c, 0x16, 0x92, 0xa7, 0x79, 0x39, 0x1d, 0x77, 0xa7, - 0x65, 0x31, 0x2f, 0xe8, 0x56, 0x7c, 0x1c, 0xff, 0x84, 0x66, 0x3a, 0x9a, 0x4c, 0xf2, 0x52, 0x70, - 0x7a, 0x04, 0xad, 0x69, 0x79, 0xf7, 0x30, 0xbe, 0x9b, 0x8e, 0x26, 0x9d, 0xca, 0xa7, 0xca, 0xe7, - 0x96, 0x7e, 0x2d, 0x84, 0xee, 0xb8, 0xb8, 0x9f, 0x16, 0x0f, 0xf9, 0xc3, 0xbc, 0x53, 0x5d, 0x74, - 0x5f, 0x0a, 0xf4, 0x18, 0xda, 0xb3, 0xc7, 0xeb, 0xd7, 0x81, 0xcd, 0x38, 0xf0, 0x4f, 0xed, 0xf8, - 0x17, 0x34, 0xf5, 0x20, 0xc5, 0xb2, 0x2c, 0x4a, 0xfa, 0x05, 0x92, 0x49, 0x7e, 0x3b, 0x1a, 0x3f, - 0xfb, 0x71, 0x71, 0x93, 0x47, 0xb6, 0x9d, 0xaf, 0xa4, 0xbb, 0x50, 0x18, 0x47, 0xd2, 0xe2, 0x26, - 0xd7, 0xb0, 0x18, 0x0a, 0xff, 0x69, 0x07, 0x1a, 0xf7, 0xf9, 0x6c, 0x36, 0xba, 0xcd, 0x97, 0xf4, - 0xab, 0x27, 0xfd, 0x08, 0xb5, 0x88, 0xb2, 0x19, 0x51, 0x92, 0x25, 0x4a, 0x04, 0x88, 0x8d, 0x93, - 0x3f, 0x55, 0xa8, 0x45, 0x8c, 0x3a, 0x54, 0xd5, 0x39, 0xd9, 0xa0, 0x6d, 0x68, 0xa6, 0x4c, 0xa6, - 0x98, 0x21, 0x27, 0x15, 0x9a, 0x40, 0xc3, 0xc9, 0x73, 0xa9, 0x7e, 0x48, 0x52, 0xa5, 0x6f, 0x80, - 0x08, 0x79, 0xc9, 0x32, 0xc1, 0x3d, 0xd3, 0x7d, 0x77, 0x81, 0xd2, 0x92, 0x4d, 0xfa, 0x16, 0xf6, - 0x38, 0x32, 0x9e, 0x09, 0x89, 0x1e, 0xaf, 0x52, 0x44, 0x8e, 0x9c, 0xd4, 0xe8, 0x36, 0xb4, 0xa4, - 0xb2, 0xfe, 0x54, 0x39, 0xc9, 0xc9, 0x16, 0xa5, 0xb0, 0xc3, 0x32, 0x8d, 0x8c, 0x0f, 0x3d, 0x5e, - 0x09, 0x63, 0x0d, 0xa9, 0x87, 0xcd, 0x01, 0xea, 0x0b, 0x61, 0x8c, 0x50, 0xd2, 0x73, 0x94, 0x02, - 0x39, 0x69, 0xd0, 0x7d, 0xd8, 0x75, 0x92, 0x39, 0x7b, 0x86, 0xd2, 0x8a, 0x94, 0x59, 0xe4, 0x84, - 0xd0, 0x03, 0xa0, 0x1a, 0x8d, 0x72, 0x3a, 0x0d, 0x2c, 0x67, 0xcc, 0x99, 0x50, 0x6f, 0xd2, 0x77, - 0xb0, 0x7f, 0xca, 0x44, 0x86, 0xdc, 0x0f, 0x34, 0xa6, 0x4a, 0x72, 0x61, 0x85, 0x92, 0xa4, 0x15, - 0x94, 0xb3, 0x9e, 0xd2, 0x61, 0x0a, 0x28, 0x81, 0xb6, 0x72, 0xd6, 0xab, 0x53, 0xaf, 0x99, 0xec, - 0x23, 0x49, 0xe8, 0x1e, 0x6c, 0x3b, 0x29, 0x2e, 0x06, 0x19, 0x86, 0x33, 0x90, 0x93, 0x76, 0xb8, - 0x5c, 0x48, 0x8b, 0x5a, 0xb2, 0x8c, 0x6c, 0xd3, 0x5d, 0x48, 0x9c, 0x64, 0x97, 0x4c, 0x64, 0xac, - 0x97, 0x21, 0xd9, 0x09, 0x07, 0x71, 0x66, 0x99, 0xcf, 0x94, 0x31, 0x64, 0xf7, 0xe4, 0x77, 0x15, - 0x5a, 0x2f, 0x69, 0x04, 0x36, 0xe3, 0xd2, 0x14, 0x8d, 0x21, 0x1b, 0xc1, 0xa7, 0x85, 0x85, 0x41, - 0x56, 0x86, 0x7d, 0x96, 0x0e, 0x49, 0x65, 0xc1, 0x18, 0xad, 0xf4, 0xa8, 0xb5, 0xd2, 0xa4, 0x1a, - 0x20, 0x7b, 0x8c, 0x7b, 0x21, 0x07, 0x2e, 0x38, 0x79, 0x04, 0x9d, 0x35, 0x27, 0x57, 0xfb, 0xb5, - 0x60, 0x4b, 0x90, 0xd7, 0xd7, 0xc2, 0x0e, 0x97, 0x08, 0x5b, 0x61, 0x65, 0xcd, 0xc2, 0xd5, 0x4a, - 0x9d, 0x7e, 0x80, 0xf7, 0xeb, 0xa6, 0xad, 0xda, 0x8d, 0xa0, 0xf3, 0xbb, 0x43, 0x3d, 0xf4, 0x21, - 0x28, 0x83, 0xfa, 0x32, 0x3a, 0xba, 0x0c, 0x4e, 0x48, 0x6f, 0xaf, 0x48, 0x2b, 0x04, 0xb7, 0x72, - 0x65, 0xc9, 0x0a, 0x41, 0x8a, 0xd5, 0x4c, 0x1a, 0x81, 0xd2, 0x2e, 0x8b, 0x09, 0x3d, 0x84, 0x83, - 0xff, 0x62, 0x5b, 0x31, 0xb5, 0x7b, 0x87, 0xd0, 0x19, 0x17, 0xf7, 0xdd, 0xe7, 0xe2, 0x71, 0xfe, - 0x78, 0x9d, 0x77, 0x9f, 0xee, 0xe6, 0xf9, 0x6c, 0xb6, 0xf8, 0xdc, 0xae, 0xeb, 0xf1, 0xe7, 0xdb, - 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x18, 0x69, 0xdc, 0x84, 0x03, 0x00, 0x00, + // 590 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0x4d, 0x4f, 0xdb, 0x40, + 0x10, 0x86, 0xc9, 0x07, 0xf9, 0x18, 0x07, 0xb2, 0x0c, 0x5f, 0x81, 0x52, 0xb5, 0xe2, 0x54, 0x71, + 0xc8, 0xa1, 0x3d, 0xf4, 0xbc, 0xf1, 0x0e, 0x61, 0x85, 0x59, 0xa7, 0xeb, 0x35, 0x25, 0xa7, 0x55, + 0x08, 0x16, 0xa2, 0x0a, 0x38, 0x72, 0x02, 0x12, 0x97, 0xfe, 0xac, 0xfe, 0xa6, 0xfe, 0x8c, 0x6a, + 0x9d, 0xb8, 0x28, 0xe4, 0x94, 0xec, 0xfb, 0xcc, 0xce, 0xbe, 0xf3, 0x8e, 0x0c, 0xde, 0xcb, 0x3c, + 0x9b, 0x8e, 0xbb, 0xd3, 0x2c, 0x9d, 0xa7, 0xb8, 0x99, 0x1f, 0x4e, 0x7f, 0x41, 0xc3, 0x1f, 0x4d, + 0x26, 0x49, 0x26, 0x05, 0x9e, 0x40, 0x73, 0x9a, 0x3d, 0x3c, 0x8d, 0x1f, 0xa6, 0xa3, 0x49, 0xa7, + 0xf4, 0xb9, 0xf4, 0xa5, 0xa9, 0xdf, 0x04, 0x47, 0xc7, 0xe9, 0xe3, 0x34, 0x7d, 0x4a, 0x9e, 0xe6, + 0x9d, 0xf2, 0x82, 0xfe, 0x17, 0xf0, 0x14, 0x5a, 0xb3, 0xe7, 0xdb, 0xb7, 0x82, 0x4a, 0x5e, 0xb0, + 0xa2, 0x9d, 0xfe, 0x86, 0x86, 0x1e, 0xf8, 0x94, 0x65, 0x69, 0x86, 0xdf, 0xc1, 0x9b, 0x24, 0xf7, + 0xa3, 0xf1, 0xab, 0x1d, 0xa7, 0x77, 0x49, 0xfe, 0xda, 0xf6, 0xd7, 0x83, 0xee, 0xc2, 0x61, 0x90, + 0x93, 0xbc, 0xd0, 0x4f, 0xef, 0x12, 0x0d, 0x8b, 0x52, 0xf7, 0x1f, 0x3b, 0x50, 0x7f, 0x4c, 0x66, + 0xb3, 0xd1, 0x7d, 0xb2, 0x34, 0x51, 0x1c, 0xf1, 0x13, 0x54, 0xf3, 0x5e, 0x95, 0xbc, 0x97, 0xb7, + 0xec, 0x95, 0x37, 0xc8, 0xc1, 0xd9, 0x9f, 0x32, 0x54, 0xf3, 0x1e, 0x35, 0x28, 0x87, 0x97, 0x6c, + 0x03, 0x5b, 0xd0, 0xf0, 0xb9, 0xf2, 0x29, 0x20, 0xc1, 0x4a, 0xe8, 0x41, 0x3d, 0x56, 0x97, 0x2a, + 0xfc, 0xa9, 0x58, 0x19, 0xf7, 0x80, 0x49, 0x75, 0xcd, 0x03, 0x29, 0x2c, 0xd7, 0xfd, 0xf8, 0x8a, + 0x94, 0x61, 0x15, 0xdc, 0x87, 0x1d, 0x41, 0x5c, 0x04, 0x52, 0x91, 0xa5, 0x1b, 0x9f, 0x48, 0x90, + 0x60, 0x55, 0xdc, 0x82, 0xa6, 0x0a, 0x8d, 0x3d, 0x0f, 0x63, 0x25, 0xd8, 0x26, 0x22, 0x6c, 0xf3, + 0x40, 0x13, 0x17, 0x43, 0x4b, 0x37, 0x32, 0x32, 0x11, 0xab, 0xb9, 0x9b, 0x03, 0xd2, 0x57, 0x32, + 0x8a, 0x64, 0xa8, 0xac, 0x20, 0x25, 0x49, 0xb0, 0x3a, 0xee, 0x42, 0x3b, 0x56, 0x3c, 0x36, 0x17, + 0xa4, 0x8c, 0xf4, 0xb9, 0x21, 0xc1, 0x18, 0x1e, 0x00, 0x6a, 0x8a, 0xc2, 0x58, 0xfb, 0xee, 0x95, + 0x0b, 0x1e, 0x47, 0x4e, 0x6f, 0xe0, 0x21, 0xec, 0x9e, 0x73, 0x19, 0x90, 0xb0, 0x03, 0x4d, 0x7e, + 0xa8, 0x84, 0x34, 0x32, 0x54, 0xac, 0xe9, 0x9c, 0xf3, 0x5e, 0xa8, 0x5d, 0x15, 0x20, 0x83, 0x56, + 0x18, 0x1b, 0x1b, 0x9e, 0x5b, 0xcd, 0x55, 0x9f, 0x98, 0x87, 0x3b, 0xb0, 0x15, 0x2b, 0x79, 0x35, + 0x08, 0xc8, 0x8d, 0x41, 0x82, 0xb5, 0xdc, 0xe4, 0x52, 0x19, 0xd2, 0x8a, 0x07, 0x6c, 0x0b, 0xdb, + 0xe0, 0xc5, 0x8a, 0x5f, 0x73, 0x19, 0xf0, 0x5e, 0x40, 0x6c, 0xdb, 0x0d, 0x24, 0xb8, 0xe1, 0x36, + 0x08, 0xa3, 0x88, 0xb5, 0xcf, 0xfe, 0x96, 0xa1, 0xfd, 0x6e, 0x27, 0x6e, 0xc8, 0x28, 0xf6, 0x7d, + 0x8a, 0x22, 0x1b, 0x50, 0x9f, 0xfb, 0x43, 0xb6, 0xe1, 0x42, 0x5b, 0xe4, 0xe9, 0x3c, 0x2e, 0xd5, + 0x12, 0x76, 0x60, 0x6f, 0x99, 0xab, 0x25, 0xad, 0x43, 0x5d, 0x90, 0x3c, 0xe4, 0x1e, 0x17, 0x56, + 0xaa, 0x41, 0x6c, 0x0a, 0xb5, 0x82, 0x27, 0xd0, 0x59, 0x0b, 0xb9, 0xa0, 0x55, 0x3c, 0x86, 0x03, + 0xe7, 0xbc, 0xaf, 0xa5, 0x19, 0xae, 0xf6, 0xdb, 0x74, 0x37, 0xd7, 0x42, 0x2e, 0x68, 0x0d, 0x3f, + 0xc2, 0xd1, 0x7a, 0xac, 0x05, 0xae, 0xe3, 0x07, 0x38, 0xfc, 0x11, 0x93, 0x1e, 0x5a, 0xb7, 0xca, + 0x88, 0xf4, 0xf5, 0x1b, 0x6c, 0x38, 0xa7, 0x4e, 0x96, 0xca, 0x9a, 0x9b, 0x42, 0x6d, 0xe2, 0x11, + 0xec, 0x17, 0x29, 0xae, 0x5a, 0x01, 0x67, 0xd3, 0x68, 0xae, 0x22, 0x49, 0xca, 0xac, 0x32, 0xcf, + 0xb1, 0x77, 0x4b, 0x2f, 0x58, 0xab, 0x77, 0x0c, 0x9d, 0x71, 0xfa, 0xd8, 0x7d, 0x4d, 0x9f, 0xe7, + 0xcf, 0xb7, 0x49, 0xf7, 0xe5, 0x61, 0x9e, 0xcc, 0x66, 0x8b, 0x4f, 0xf6, 0xb6, 0x96, 0xff, 0x7c, + 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, 0x5c, 0x6b, 0x47, 0xf6, 0xc8, 0x03, 0x00, 0x00, } diff --git a/go/vt/tabletserver/tabletconn/grpc_error_test.go b/go/vt/tabletserver/tabletconn/grpc_error_test.go new file mode 100644 index 00000000000..5dee218c694 --- /dev/null +++ b/go/vt/tabletserver/tabletconn/grpc_error_test.go @@ -0,0 +1,43 @@ +// Copyright 2017, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tabletconn + +import ( + "testing" + + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" +) + +func TestTabletErrorFromRPCError(t *testing.T) { + testcases := []struct { + in *vtrpcpb.RPCError + want vtrpcpb.Code + }{{ + in: &vtrpcpb.RPCError{ + LegacyCode: vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY, + Message: "bad input", + }, + want: vtrpcpb.Code_INVALID_ARGUMENT, + }, { + in: &vtrpcpb.RPCError{ + LegacyCode: vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY, + Message: "bad input", + Code: vtrpcpb.Code_INVALID_ARGUMENT, + }, + want: vtrpcpb.Code_INVALID_ARGUMENT, + }, { + in: &vtrpcpb.RPCError{ + Message: "bad input", + Code: vtrpcpb.Code_INVALID_ARGUMENT, + }, + want: vtrpcpb.Code_INVALID_ARGUMENT, + }} + for _, tcase := range testcases { + got := TabletErrorFromRPCError(tcase.in).(*ServerError) + if got.ServerCode != tcase.want { + t.Errorf("FromVtRPCError(%v):\n%v, want\n%v", tcase.in, got.ServerCode, tcase.want) + } + } +} diff --git a/go/vt/vterrors/grpc.go b/go/vt/vterrors/grpc.go index 94e6334262b..255f1ae301e 100644 --- a/go/vt/vterrors/grpc.go +++ b/go/vt/vterrors/grpc.go @@ -24,68 +24,68 @@ import ( // See: https://github.com/grpc/grpc-go/issues/319 const GRPCServerErrPrefix = "gRPCServerError:" -// CodeToLegacyErrorCode maps a vtrpcpb.Code to a vtrpcpb.ErrorCode. -func CodeToLegacyErrorCode(code vtrpcpb.Code) vtrpcpb.ErrorCode { +// CodeToLegacyErrorCode maps a vtrpcpb.Code to a vtrpcpb.LegacyErrorCode. +func CodeToLegacyErrorCode(code vtrpcpb.Code) vtrpcpb.LegacyErrorCode { switch code { case vtrpcpb.Code_OK: - return vtrpcpb.ErrorCode_SUCCESS + return vtrpcpb.LegacyErrorCode_SUCCESS_LEGACY case vtrpcpb.Code_CANCELED: - return vtrpcpb.ErrorCode_CANCELLED_LEGACY + return vtrpcpb.LegacyErrorCode_CANCELLED_LEGACY case vtrpcpb.Code_UNKNOWN: - return vtrpcpb.ErrorCode_UNKNOWN_ERROR + return vtrpcpb.LegacyErrorCode_UNKNOWN_ERROR_LEGACY case vtrpcpb.Code_INVALID_ARGUMENT: - return vtrpcpb.ErrorCode_BAD_INPUT + return vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY case vtrpcpb.Code_DEADLINE_EXCEEDED: - return vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY + return vtrpcpb.LegacyErrorCode_DEADLINE_EXCEEDED_LEGACY case vtrpcpb.Code_ALREADY_EXISTS: - return vtrpcpb.ErrorCode_INTEGRITY_ERROR + return vtrpcpb.LegacyErrorCode_INTEGRITY_ERROR_LEGACY case vtrpcpb.Code_PERMISSION_DENIED: - return vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY + return vtrpcpb.LegacyErrorCode_PERMISSION_DENIED_LEGACY case vtrpcpb.Code_RESOURCE_EXHAUSTED: - return vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY + return vtrpcpb.LegacyErrorCode_RESOURCE_EXHAUSTED_LEGACY case vtrpcpb.Code_FAILED_PRECONDITION: - return vtrpcpb.ErrorCode_QUERY_NOT_SERVED + return vtrpcpb.LegacyErrorCode_QUERY_NOT_SERVED_LEGACY case vtrpcpb.Code_ABORTED: - return vtrpcpb.ErrorCode_NOT_IN_TX + return vtrpcpb.LegacyErrorCode_NOT_IN_TX_LEGACY case vtrpcpb.Code_INTERNAL: - return vtrpcpb.ErrorCode_INTERNAL_ERROR + return vtrpcpb.LegacyErrorCode_INTERNAL_ERROR_LEGACY case vtrpcpb.Code_UNAVAILABLE: - return vtrpcpb.ErrorCode_TRANSIENT_ERROR + return vtrpcpb.LegacyErrorCode_TRANSIENT_ERROR_LEGACY case vtrpcpb.Code_UNAUTHENTICATED: - return vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY + return vtrpcpb.LegacyErrorCode_UNAUTHENTICATED_LEGACY default: - return vtrpcpb.ErrorCode_UNKNOWN_ERROR + return vtrpcpb.LegacyErrorCode_UNKNOWN_ERROR_LEGACY } } -// LegacyErrorCodeToCode maps a vtrpcpb.ErrorCode to a gRPC vtrpcpb.Code. -func LegacyErrorCodeToCode(code vtrpcpb.ErrorCode) vtrpcpb.Code { +// LegacyErrorCodeToCode maps a vtrpcpb.LegacyErrorCode to a gRPC vtrpcpb.Code. +func LegacyErrorCodeToCode(code vtrpcpb.LegacyErrorCode) vtrpcpb.Code { switch code { - case vtrpcpb.ErrorCode_SUCCESS: + case vtrpcpb.LegacyErrorCode_SUCCESS_LEGACY: return vtrpcpb.Code_OK - case vtrpcpb.ErrorCode_CANCELLED_LEGACY: + case vtrpcpb.LegacyErrorCode_CANCELLED_LEGACY: return vtrpcpb.Code_CANCELED - case vtrpcpb.ErrorCode_UNKNOWN_ERROR: + case vtrpcpb.LegacyErrorCode_UNKNOWN_ERROR_LEGACY: return vtrpcpb.Code_UNKNOWN - case vtrpcpb.ErrorCode_BAD_INPUT: + case vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY: return vtrpcpb.Code_INVALID_ARGUMENT - case vtrpcpb.ErrorCode_DEADLINE_EXCEEDED_LEGACY: + case vtrpcpb.LegacyErrorCode_DEADLINE_EXCEEDED_LEGACY: return vtrpcpb.Code_DEADLINE_EXCEEDED - case vtrpcpb.ErrorCode_INTEGRITY_ERROR: + case vtrpcpb.LegacyErrorCode_INTEGRITY_ERROR_LEGACY: return vtrpcpb.Code_ALREADY_EXISTS - case vtrpcpb.ErrorCode_PERMISSION_DENIED_LEGACY: + case vtrpcpb.LegacyErrorCode_PERMISSION_DENIED_LEGACY: return vtrpcpb.Code_PERMISSION_DENIED - case vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED_LEGACY: + case vtrpcpb.LegacyErrorCode_RESOURCE_EXHAUSTED_LEGACY: return vtrpcpb.Code_RESOURCE_EXHAUSTED - case vtrpcpb.ErrorCode_QUERY_NOT_SERVED: + case vtrpcpb.LegacyErrorCode_QUERY_NOT_SERVED_LEGACY: return vtrpcpb.Code_FAILED_PRECONDITION - case vtrpcpb.ErrorCode_NOT_IN_TX: + case vtrpcpb.LegacyErrorCode_NOT_IN_TX_LEGACY: return vtrpcpb.Code_ABORTED - case vtrpcpb.ErrorCode_INTERNAL_ERROR: + case vtrpcpb.LegacyErrorCode_INTERNAL_ERROR_LEGACY: return vtrpcpb.Code_INTERNAL - case vtrpcpb.ErrorCode_TRANSIENT_ERROR: + case vtrpcpb.LegacyErrorCode_TRANSIENT_ERROR_LEGACY: return vtrpcpb.Code_UNAVAILABLE - case vtrpcpb.ErrorCode_UNAUTHENTICATED_LEGACY: + case vtrpcpb.LegacyErrorCode_UNAUTHENTICATED_LEGACY: return vtrpcpb.Code_UNAUTHENTICATED default: return vtrpcpb.Code_UNKNOWN diff --git a/go/vt/vterrors/proto3_test.go b/go/vt/vterrors/proto3_test.go new file mode 100644 index 00000000000..d6c5fbaad24 --- /dev/null +++ b/go/vt/vterrors/proto3_test.go @@ -0,0 +1,83 @@ +// Copyright 2017, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vterrors + +import ( + "errors" + "reflect" + "testing" + + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" +) + +func TestFromVtRPCError(t *testing.T) { + testcases := []struct { + in *vtrpcpb.RPCError + want error + }{{ + in: nil, + want: nil, + }, { + in: &vtrpcpb.RPCError{ + LegacyCode: vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY, + Message: "bad input", + }, + want: &VitessError{ + Code: vtrpcpb.Code_INVALID_ARGUMENT, + err: errors.New("bad input"), + }, + }, { + in: &vtrpcpb.RPCError{ + LegacyCode: vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY, + Message: "bad input", + Code: vtrpcpb.Code_INVALID_ARGUMENT, + }, + want: &VitessError{ + Code: vtrpcpb.Code_INVALID_ARGUMENT, + err: errors.New("bad input"), + }, + }, { + in: &vtrpcpb.RPCError{ + Message: "bad input", + Code: vtrpcpb.Code_INVALID_ARGUMENT, + }, + want: &VitessError{ + Code: vtrpcpb.Code_INVALID_ARGUMENT, + err: errors.New("bad input"), + }, + }} + for _, tcase := range testcases { + got := FromVtRPCError(tcase.in) + if !reflect.DeepEqual(got, tcase.want) { + t.Errorf("FromVtRPCError(%v): %v, want %v", tcase.in, got, tcase.want) + } + } +} + +func TestVtRPCErrorFromVtError(t *testing.T) { + testcases := []struct { + in error + want *vtrpcpb.RPCError + }{{ + in: nil, + want: nil, + }, { + in: &VitessError{ + Code: vtrpcpb.Code_INVALID_ARGUMENT, + err: errors.New("bad input"), + }, + want: &vtrpcpb.RPCError{ + LegacyCode: vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY, + Message: "bad input", + Code: vtrpcpb.Code_INVALID_ARGUMENT, + }, + }} + for _, tcase := range testcases { + got := VtRPCErrorFromVtError(tcase.in) + if !reflect.DeepEqual(got, tcase.want) { + t.Errorf("VtRPCErrorFromVtError(%v): %v, want %v", tcase.in, got, tcase.want) + } + } +} diff --git a/java/client/src/main/java/com/youtube/vitess/client/Proto.java b/java/client/src/main/java/com/youtube/vitess/client/Proto.java index 3663ed48fb7..2cc85df1085 100644 --- a/java/client/src/main/java/com/youtube/vitess/client/Proto.java +++ b/java/client/src/main/java/com/youtube/vitess/client/Proto.java @@ -51,19 +51,39 @@ public static void checkError(RPCError error) throws SQLException { String sqlState = getSQLState(error.getMessage()); switch (error.getCode()) { - case SUCCESS: + case OK: break; - case BAD_INPUT: + case INVALID_ARGUMENT: throw new SQLSyntaxErrorException(error.toString(), sqlState, errno); case DEADLINE_EXCEEDED: throw new SQLTimeoutException(error.toString(), sqlState, errno); - case INTEGRITY_ERROR: + case ALREADY_EXISTS: throw new SQLIntegrityConstraintViolationException(error.toString(), sqlState, errno); - case TRANSIENT_ERROR: + case UNAVAILABLE: throw new SQLTransientException(error.toString(), sqlState, errno); case UNAUTHENTICATED: throw new SQLInvalidAuthorizationSpecException(error.toString(), sqlState, errno); - case NOT_IN_TX: + case ABORTED: + throw new SQLRecoverableException(error.toString(), sqlState, errno); + default: + throw new SQLNonTransientException("Vitess RPC error: " + error.toString(), sqlState, + errno); + } + + switch (error.getLegacyCode()) { + case SUCCESS_LEGACY: + break; + case BAD_INPUT_LEGACY: + throw new SQLSyntaxErrorException(error.toString(), sqlState, errno); + case DEADLINE_EXCEEDED_LEGACY: + throw new SQLTimeoutException(error.toString(), sqlState, errno); + case INTEGRITY_ERROR_LEGACY: + throw new SQLIntegrityConstraintViolationException(error.toString(), sqlState, errno); + case TRANSIENT_ERROR_LEGACY: + throw new SQLTransientException(error.toString(), sqlState, errno); + case UNAUTHENTICATED_LEGACY: + throw new SQLInvalidAuthorizationSpecException(error.toString(), sqlState, errno); + case NOT_IN_TX_LEGACY: throw new SQLRecoverableException(error.toString(), sqlState, errno); default: throw new SQLNonTransientException("Vitess RPC error: " + error.toString(), sqlState, diff --git a/java/client/src/main/java/com/youtube/vitess/client/cursor/CursorWithError.java b/java/client/src/main/java/com/youtube/vitess/client/cursor/CursorWithError.java index 4ca6589739e..21741772d6a 100644 --- a/java/client/src/main/java/com/youtube/vitess/client/cursor/CursorWithError.java +++ b/java/client/src/main/java/com/youtube/vitess/client/cursor/CursorWithError.java @@ -13,7 +13,7 @@ public class CursorWithError { public CursorWithError(Query.ResultWithError resultWithError) { if (!resultWithError.hasError() || - Vtrpc.ErrorCode.SUCCESS == resultWithError.getError().getCode()) { + Vtrpc.Code.OK == resultWithError.getError().getCode()) { this.cursor = new SimpleCursor(resultWithError.getResult()); this.error = null; } else { diff --git a/php/src/Vitess/Proto/Binlogdata/BinlogTransaction/Statement/Category.php b/php/src/Vitess/Proto/Binlogdata/BinlogTransaction/Statement/Category.php index ccd0b5bc650..2b2533db623 100644 --- a/php/src/Vitess/Proto/Binlogdata/BinlogTransaction/Statement/Category.php +++ b/php/src/Vitess/Proto/Binlogdata/BinlogTransaction/Statement/Category.php @@ -9,8 +9,11 @@ class Category extends \DrSlump\Protobuf\Enum { const BL_BEGIN = 1; const BL_COMMIT = 2; const BL_ROLLBACK = 3; - const BL_DML = 4; + const BL_DML_DEPRECATED = 4; const BL_DDL = 5; const BL_SET = 6; + const BL_INSERT = 7; + const BL_UPDATE = 8; + const BL_DELETE = 9; } } diff --git a/php/src/Vitess/Proto/Query/MessageAckRequest.php b/php/src/Vitess/Proto/Query/MessageAckRequest.php new file mode 100644 index 00000000000..4ba72e4f0d7 --- /dev/null +++ b/php/src/Vitess/Proto/Query/MessageAckRequest.php @@ -0,0 +1,289 @@ +number = 1; + $f->name = "effective_caller_id"; + $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $f->reference = '\Vitess\Proto\Vtrpc\CallerID'; + $descriptor->addField($f); + + // OPTIONAL MESSAGE immediate_caller_id = 2 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 2; + $f->name = "immediate_caller_id"; + $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $f->reference = '\Vitess\Proto\Query\VTGateCallerID'; + $descriptor->addField($f); + + // OPTIONAL MESSAGE target = 3 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 3; + $f->name = "target"; + $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $f->reference = '\Vitess\Proto\Query\Target'; + $descriptor->addField($f); + + // OPTIONAL STRING name = 4 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 4; + $f->name = "name"; + $f->type = \DrSlump\Protobuf::TYPE_STRING; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $descriptor->addField($f); + + // REPEATED MESSAGE ids = 5 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 5; + $f->name = "ids"; + $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; + $f->rule = \DrSlump\Protobuf::RULE_REPEATED; + $f->reference = '\Vitess\Proto\Query\Value'; + $descriptor->addField($f); + + foreach (self::$__extensions as $cb) { + $descriptor->addField($cb(), true); + } + + return $descriptor; + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasEffectiveCallerId(){ + return $this->_has(1); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Query\MessageAckRequest + */ + public function clearEffectiveCallerId(){ + return $this->_clear(1); + } + + /** + * Get value + * + * @return \Vitess\Proto\Vtrpc\CallerID + */ + public function getEffectiveCallerId(){ + return $this->_get(1); + } + + /** + * Set value + * + * @param \Vitess\Proto\Vtrpc\CallerID $value + * @return \Vitess\Proto\Query\MessageAckRequest + */ + public function setEffectiveCallerId(\Vitess\Proto\Vtrpc\CallerID $value){ + return $this->_set(1, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasImmediateCallerId(){ + return $this->_has(2); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Query\MessageAckRequest + */ + public function clearImmediateCallerId(){ + return $this->_clear(2); + } + + /** + * Get value + * + * @return \Vitess\Proto\Query\VTGateCallerID + */ + public function getImmediateCallerId(){ + return $this->_get(2); + } + + /** + * Set value + * + * @param \Vitess\Proto\Query\VTGateCallerID $value + * @return \Vitess\Proto\Query\MessageAckRequest + */ + public function setImmediateCallerId(\Vitess\Proto\Query\VTGateCallerID $value){ + return $this->_set(2, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasTarget(){ + return $this->_has(3); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Query\MessageAckRequest + */ + public function clearTarget(){ + return $this->_clear(3); + } + + /** + * Get value + * + * @return \Vitess\Proto\Query\Target + */ + public function getTarget(){ + return $this->_get(3); + } + + /** + * Set value + * + * @param \Vitess\Proto\Query\Target $value + * @return \Vitess\Proto\Query\MessageAckRequest + */ + public function setTarget(\Vitess\Proto\Query\Target $value){ + return $this->_set(3, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasName(){ + return $this->_has(4); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Query\MessageAckRequest + */ + public function clearName(){ + return $this->_clear(4); + } + + /** + * Get value + * + * @return string + */ + public function getName(){ + return $this->_get(4); + } + + /** + * Set value + * + * @param string $value + * @return \Vitess\Proto\Query\MessageAckRequest + */ + public function setName( $value){ + return $this->_set(4, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasIds(){ + return $this->_has(5); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Query\MessageAckRequest + */ + public function clearIds(){ + return $this->_clear(5); + } + + /** + * Get value + * + * @param int $idx + * @return \Vitess\Proto\Query\Value + */ + public function getIds($idx = NULL){ + return $this->_get(5, $idx); + } + + /** + * Set value + * + * @param \Vitess\Proto\Query\Value $value + * @return \Vitess\Proto\Query\MessageAckRequest + */ + public function setIds(\Vitess\Proto\Query\Value $value, $idx = NULL){ + return $this->_set(5, $value, $idx); + } + + /** + * Get all elements of + * + * @return \Vitess\Proto\Query\Value[] + */ + public function getIdsList(){ + return $this->_get(5); + } + + /** + * Add a new element to + * + * @param \Vitess\Proto\Query\Value $value + * @return \Vitess\Proto\Query\MessageAckRequest + */ + public function addIds(\Vitess\Proto\Query\Value $value){ + return $this->_add(5, $value); + } + } +} + diff --git a/php/src/Vitess/Proto/Query/MessageAckResponse.php b/php/src/Vitess/Proto/Query/MessageAckResponse.php new file mode 100644 index 00000000000..63211139df8 --- /dev/null +++ b/php/src/Vitess/Proto/Query/MessageAckResponse.php @@ -0,0 +1,74 @@ +number = 1; + $f->name = "result"; + $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $f->reference = '\Vitess\Proto\Query\QueryResult'; + $descriptor->addField($f); + + foreach (self::$__extensions as $cb) { + $descriptor->addField($cb(), true); + } + + return $descriptor; + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasResult(){ + return $this->_has(1); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Query\MessageAckResponse + */ + public function clearResult(){ + return $this->_clear(1); + } + + /** + * Get value + * + * @return \Vitess\Proto\Query\QueryResult + */ + public function getResult(){ + return $this->_get(1); + } + + /** + * Set value + * + * @param \Vitess\Proto\Query\QueryResult $value + * @return \Vitess\Proto\Query\MessageAckResponse + */ + public function setResult(\Vitess\Proto\Query\QueryResult $value){ + return $this->_set(1, $value); + } + } +} + diff --git a/php/src/Vitess/Proto/Query/MessageStreamRequest.php b/php/src/Vitess/Proto/Query/MessageStreamRequest.php new file mode 100644 index 00000000000..defdaeff521 --- /dev/null +++ b/php/src/Vitess/Proto/Query/MessageStreamRequest.php @@ -0,0 +1,220 @@ +number = 1; + $f->name = "effective_caller_id"; + $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $f->reference = '\Vitess\Proto\Vtrpc\CallerID'; + $descriptor->addField($f); + + // OPTIONAL MESSAGE immediate_caller_id = 2 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 2; + $f->name = "immediate_caller_id"; + $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $f->reference = '\Vitess\Proto\Query\VTGateCallerID'; + $descriptor->addField($f); + + // OPTIONAL MESSAGE target = 3 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 3; + $f->name = "target"; + $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $f->reference = '\Vitess\Proto\Query\Target'; + $descriptor->addField($f); + + // OPTIONAL STRING name = 4 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 4; + $f->name = "name"; + $f->type = \DrSlump\Protobuf::TYPE_STRING; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $descriptor->addField($f); + + foreach (self::$__extensions as $cb) { + $descriptor->addField($cb(), true); + } + + return $descriptor; + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasEffectiveCallerId(){ + return $this->_has(1); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Query\MessageStreamRequest + */ + public function clearEffectiveCallerId(){ + return $this->_clear(1); + } + + /** + * Get value + * + * @return \Vitess\Proto\Vtrpc\CallerID + */ + public function getEffectiveCallerId(){ + return $this->_get(1); + } + + /** + * Set value + * + * @param \Vitess\Proto\Vtrpc\CallerID $value + * @return \Vitess\Proto\Query\MessageStreamRequest + */ + public function setEffectiveCallerId(\Vitess\Proto\Vtrpc\CallerID $value){ + return $this->_set(1, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasImmediateCallerId(){ + return $this->_has(2); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Query\MessageStreamRequest + */ + public function clearImmediateCallerId(){ + return $this->_clear(2); + } + + /** + * Get value + * + * @return \Vitess\Proto\Query\VTGateCallerID + */ + public function getImmediateCallerId(){ + return $this->_get(2); + } + + /** + * Set value + * + * @param \Vitess\Proto\Query\VTGateCallerID $value + * @return \Vitess\Proto\Query\MessageStreamRequest + */ + public function setImmediateCallerId(\Vitess\Proto\Query\VTGateCallerID $value){ + return $this->_set(2, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasTarget(){ + return $this->_has(3); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Query\MessageStreamRequest + */ + public function clearTarget(){ + return $this->_clear(3); + } + + /** + * Get value + * + * @return \Vitess\Proto\Query\Target + */ + public function getTarget(){ + return $this->_get(3); + } + + /** + * Set value + * + * @param \Vitess\Proto\Query\Target $value + * @return \Vitess\Proto\Query\MessageStreamRequest + */ + public function setTarget(\Vitess\Proto\Query\Target $value){ + return $this->_set(3, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasName(){ + return $this->_has(4); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Query\MessageStreamRequest + */ + public function clearName(){ + return $this->_clear(4); + } + + /** + * Get value + * + * @return string + */ + public function getName(){ + return $this->_get(4); + } + + /** + * Set value + * + * @param string $value + * @return \Vitess\Proto\Query\MessageStreamRequest + */ + public function setName( $value){ + return $this->_set(4, $value); + } + } +} + diff --git a/php/src/Vitess/Proto/Query/MessageStreamResponse.php b/php/src/Vitess/Proto/Query/MessageStreamResponse.php new file mode 100644 index 00000000000..5696c0fa0fb --- /dev/null +++ b/php/src/Vitess/Proto/Query/MessageStreamResponse.php @@ -0,0 +1,74 @@ +number = 1; + $f->name = "result"; + $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $f->reference = '\Vitess\Proto\Query\QueryResult'; + $descriptor->addField($f); + + foreach (self::$__extensions as $cb) { + $descriptor->addField($cb(), true); + } + + return $descriptor; + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasResult(){ + return $this->_has(1); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Query\MessageStreamResponse + */ + public function clearResult(){ + return $this->_clear(1); + } + + /** + * Get value + * + * @return \Vitess\Proto\Query\QueryResult + */ + public function getResult(){ + return $this->_get(1); + } + + /** + * Set value + * + * @param \Vitess\Proto\Query\QueryResult $value + * @return \Vitess\Proto\Query\MessageStreamResponse + */ + public function setResult(\Vitess\Proto\Query\QueryResult $value){ + return $this->_set(1, $value); + } + } +} + diff --git a/php/src/Vitess/Proto/Queryservice/QueryClient.php b/php/src/Vitess/Proto/Queryservice/QueryClient.php index a6821b1e3c5..396cec1a038 100644 --- a/php/src/Vitess/Proto/Queryservice/QueryClient.php +++ b/php/src/Vitess/Proto/Queryservice/QueryClient.php @@ -105,6 +105,18 @@ public function BeginExecute(\Vitess\Proto\Query\BeginExecuteRequest $argument, public function BeginExecuteBatch(\Vitess\Proto\Query\BeginExecuteBatchRequest $argument, $metadata = array(), $options = array()) { return $this->_simpleRequest('/queryservice.Query/BeginExecuteBatch', $argument, '\Vitess\Proto\Query\BeginExecuteBatchResponse::deserialize', $metadata, $options); } + /** + * @param Vitess\Proto\Query\MessageStreamRequest $input + */ + public function MessageStream($argument, $metadata = array(), $options = array()) { + return $this->_serverStreamRequest('/queryservice.Query/MessageStream', $argument, '\Vitess\Proto\Query\MessageStreamResponse::deserialize', $metadata, $options); + } + /** + * @param Vitess\Proto\Query\MessageAckRequest $input + */ + public function MessageAck(\Vitess\Proto\Query\MessageAckRequest $argument, $metadata = array(), $options = array()) { + return $this->_simpleRequest('/queryservice.Query/MessageAck', $argument, '\Vitess\Proto\Query\MessageAckResponse::deserialize', $metadata, $options); + } /** * @param Vitess\Proto\Query\SplitQueryRequest $input */ diff --git a/php/src/Vitess/Proto/Throttlerdata/Configuration.php b/php/src/Vitess/Proto/Throttlerdata/Configuration.php index 98eaff7c819..f90c3b8f5b6 100644 --- a/php/src/Vitess/Proto/Throttlerdata/Configuration.php +++ b/php/src/Vitess/Proto/Throttlerdata/Configuration.php @@ -45,6 +45,9 @@ class Configuration extends \DrSlump\Protobuf\Message { /** @var float */ public $bad_rate_increase = null; + /** @var float */ + public $max_rate_approach_threshold = null; + /** @var \Closure[] */ protected static $__extensions = array(); @@ -157,6 +160,14 @@ public static function descriptor() $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; $descriptor->addField($f); + // OPTIONAL DOUBLE max_rate_approach_threshold = 14 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 14; + $f->name = "max_rate_approach_threshold"; + $f->type = \DrSlump\Protobuf::TYPE_DOUBLE; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $descriptor->addField($f); + foreach (self::$__extensions as $cb) { $descriptor->addField($cb(), true); } @@ -644,6 +655,43 @@ public function getBadRateIncrease(){ public function setBadRateIncrease( $value){ return $this->_set(13, $value); } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasMaxRateApproachThreshold(){ + return $this->_has(14); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Throttlerdata\Configuration + */ + public function clearMaxRateApproachThreshold(){ + return $this->_clear(14); + } + + /** + * Get value + * + * @return float + */ + public function getMaxRateApproachThreshold(){ + return $this->_get(14); + } + + /** + * Set value + * + * @param float $value + * @return \Vitess\Proto\Throttlerdata\Configuration + */ + public function setMaxRateApproachThreshold( $value){ + return $this->_set(14, $value); + } } } diff --git a/php/src/Vitess/Proto/Vtgate/MessageAckRequest.php b/php/src/Vitess/Proto/Vtgate/MessageAckRequest.php new file mode 100644 index 00000000000..77938d39391 --- /dev/null +++ b/php/src/Vitess/Proto/Vtgate/MessageAckRequest.php @@ -0,0 +1,239 @@ +number = 1; + $f->name = "caller_id"; + $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $f->reference = '\Vitess\Proto\Vtrpc\CallerID'; + $descriptor->addField($f); + + // OPTIONAL STRING keyspace = 2 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 2; + $f->name = "keyspace"; + $f->type = \DrSlump\Protobuf::TYPE_STRING; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $descriptor->addField($f); + + // OPTIONAL STRING name = 3 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 3; + $f->name = "name"; + $f->type = \DrSlump\Protobuf::TYPE_STRING; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $descriptor->addField($f); + + // REPEATED MESSAGE ids = 4 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 4; + $f->name = "ids"; + $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; + $f->rule = \DrSlump\Protobuf::RULE_REPEATED; + $f->reference = '\Vitess\Proto\Query\Value'; + $descriptor->addField($f); + + foreach (self::$__extensions as $cb) { + $descriptor->addField($cb(), true); + } + + return $descriptor; + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasCallerId(){ + return $this->_has(1); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Vtgate\MessageAckRequest + */ + public function clearCallerId(){ + return $this->_clear(1); + } + + /** + * Get value + * + * @return \Vitess\Proto\Vtrpc\CallerID + */ + public function getCallerId(){ + return $this->_get(1); + } + + /** + * Set value + * + * @param \Vitess\Proto\Vtrpc\CallerID $value + * @return \Vitess\Proto\Vtgate\MessageAckRequest + */ + public function setCallerId(\Vitess\Proto\Vtrpc\CallerID $value){ + return $this->_set(1, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasKeyspace(){ + return $this->_has(2); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Vtgate\MessageAckRequest + */ + public function clearKeyspace(){ + return $this->_clear(2); + } + + /** + * Get value + * + * @return string + */ + public function getKeyspace(){ + return $this->_get(2); + } + + /** + * Set value + * + * @param string $value + * @return \Vitess\Proto\Vtgate\MessageAckRequest + */ + public function setKeyspace( $value){ + return $this->_set(2, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasName(){ + return $this->_has(3); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Vtgate\MessageAckRequest + */ + public function clearName(){ + return $this->_clear(3); + } + + /** + * Get value + * + * @return string + */ + public function getName(){ + return $this->_get(3); + } + + /** + * Set value + * + * @param string $value + * @return \Vitess\Proto\Vtgate\MessageAckRequest + */ + public function setName( $value){ + return $this->_set(3, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasIds(){ + return $this->_has(4); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Vtgate\MessageAckRequest + */ + public function clearIds(){ + return $this->_clear(4); + } + + /** + * Get value + * + * @param int $idx + * @return \Vitess\Proto\Query\Value + */ + public function getIds($idx = NULL){ + return $this->_get(4, $idx); + } + + /** + * Set value + * + * @param \Vitess\Proto\Query\Value $value + * @return \Vitess\Proto\Vtgate\MessageAckRequest + */ + public function setIds(\Vitess\Proto\Query\Value $value, $idx = NULL){ + return $this->_set(4, $value, $idx); + } + + /** + * Get all elements of + * + * @return \Vitess\Proto\Query\Value[] + */ + public function getIdsList(){ + return $this->_get(4); + } + + /** + * Add a new element to + * + * @param \Vitess\Proto\Query\Value $value + * @return \Vitess\Proto\Vtgate\MessageAckRequest + */ + public function addIds(\Vitess\Proto\Query\Value $value){ + return $this->_add(4, $value); + } + } +} + diff --git a/php/src/Vitess/Proto/Vtgate/MessageStreamRequest.php b/php/src/Vitess/Proto/Vtgate/MessageStreamRequest.php new file mode 100644 index 00000000000..34b8da745ab --- /dev/null +++ b/php/src/Vitess/Proto/Vtgate/MessageStreamRequest.php @@ -0,0 +1,267 @@ +number = 1; + $f->name = "caller_id"; + $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $f->reference = '\Vitess\Proto\Vtrpc\CallerID'; + $descriptor->addField($f); + + // OPTIONAL STRING keyspace = 2 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 2; + $f->name = "keyspace"; + $f->type = \DrSlump\Protobuf::TYPE_STRING; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $descriptor->addField($f); + + // OPTIONAL STRING shard = 3 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 3; + $f->name = "shard"; + $f->type = \DrSlump\Protobuf::TYPE_STRING; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $descriptor->addField($f); + + // OPTIONAL MESSAGE key_range = 4 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 4; + $f->name = "key_range"; + $f->type = \DrSlump\Protobuf::TYPE_MESSAGE; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $f->reference = '\Vitess\Proto\Topodata\KeyRange'; + $descriptor->addField($f); + + // OPTIONAL STRING name = 5 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 5; + $f->name = "name"; + $f->type = \DrSlump\Protobuf::TYPE_STRING; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $descriptor->addField($f); + + foreach (self::$__extensions as $cb) { + $descriptor->addField($cb(), true); + } + + return $descriptor; + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasCallerId(){ + return $this->_has(1); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Vtgate\MessageStreamRequest + */ + public function clearCallerId(){ + return $this->_clear(1); + } + + /** + * Get value + * + * @return \Vitess\Proto\Vtrpc\CallerID + */ + public function getCallerId(){ + return $this->_get(1); + } + + /** + * Set value + * + * @param \Vitess\Proto\Vtrpc\CallerID $value + * @return \Vitess\Proto\Vtgate\MessageStreamRequest + */ + public function setCallerId(\Vitess\Proto\Vtrpc\CallerID $value){ + return $this->_set(1, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasKeyspace(){ + return $this->_has(2); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Vtgate\MessageStreamRequest + */ + public function clearKeyspace(){ + return $this->_clear(2); + } + + /** + * Get value + * + * @return string + */ + public function getKeyspace(){ + return $this->_get(2); + } + + /** + * Set value + * + * @param string $value + * @return \Vitess\Proto\Vtgate\MessageStreamRequest + */ + public function setKeyspace( $value){ + return $this->_set(2, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasShard(){ + return $this->_has(3); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Vtgate\MessageStreamRequest + */ + public function clearShard(){ + return $this->_clear(3); + } + + /** + * Get value + * + * @return string + */ + public function getShard(){ + return $this->_get(3); + } + + /** + * Set value + * + * @param string $value + * @return \Vitess\Proto\Vtgate\MessageStreamRequest + */ + public function setShard( $value){ + return $this->_set(3, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasKeyRange(){ + return $this->_has(4); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Vtgate\MessageStreamRequest + */ + public function clearKeyRange(){ + return $this->_clear(4); + } + + /** + * Get value + * + * @return \Vitess\Proto\Topodata\KeyRange + */ + public function getKeyRange(){ + return $this->_get(4); + } + + /** + * Set value + * + * @param \Vitess\Proto\Topodata\KeyRange $value + * @return \Vitess\Proto\Vtgate\MessageStreamRequest + */ + public function setKeyRange(\Vitess\Proto\Topodata\KeyRange $value){ + return $this->_set(4, $value); + } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasName(){ + return $this->_has(5); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Vtgate\MessageStreamRequest + */ + public function clearName(){ + return $this->_clear(5); + } + + /** + * Get value + * + * @return string + */ + public function getName(){ + return $this->_get(5); + } + + /** + * Set value + * + * @param string $value + * @return \Vitess\Proto\Vtgate\MessageStreamRequest + */ + public function setName( $value){ + return $this->_set(5, $value); + } + } +} + diff --git a/php/src/Vitess/Proto/Vtgateservice/VitessClient.php b/php/src/Vitess/Proto/Vtgateservice/VitessClient.php index 5dfea161b28..8afc80da968 100644 --- a/php/src/Vitess/Proto/Vtgateservice/VitessClient.php +++ b/php/src/Vitess/Proto/Vtgateservice/VitessClient.php @@ -105,6 +105,18 @@ public function Rollback(\Vitess\Proto\Vtgate\RollbackRequest $argument, $metada public function ResolveTransaction(\Vitess\Proto\Vtgate\ResolveTransactionRequest $argument, $metadata = array(), $options = array()) { return $this->_simpleRequest('/vtgateservice.Vitess/ResolveTransaction', $argument, '\Vitess\Proto\Vtgate\ResolveTransactionResponse::deserialize', $metadata, $options); } + /** + * @param Vitess\Proto\Vtgate\MessageStreamRequest $input + */ + public function MessageStream($argument, $metadata = array(), $options = array()) { + return $this->_serverStreamRequest('/vtgateservice.Vitess/MessageStream', $argument, '\Vitess\Proto\Query\MessageStreamResponse::deserialize', $metadata, $options); + } + /** + * @param Vitess\Proto\Vtgate\MessageAckRequest $input + */ + public function MessageAck(\Vitess\Proto\Vtgate\MessageAckRequest $argument, $metadata = array(), $options = array()) { + return $this->_simpleRequest('/vtgateservice.Vitess/MessageAck', $argument, '\Vitess\Proto\Query\MessageAckResponse::deserialize', $metadata, $options); + } /** * @param Vitess\Proto\Vtgate\SplitQueryRequest $input */ diff --git a/php/src/Vitess/Proto/Vtrpc/Code.php b/php/src/Vitess/Proto/Vtrpc/Code.php new file mode 100644 index 00000000000..c5d4ebb4d99 --- /dev/null +++ b/php/src/Vitess/Proto/Vtrpc/Code.php @@ -0,0 +1,26 @@ +number = 1; - $f->name = "code"; + $f->name = "legacy_code"; $f->type = \DrSlump\Protobuf::TYPE_ENUM; $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; - $f->reference = '\Vitess\Proto\Vtrpc\ErrorCode'; + $f->reference = '\Vitess\Proto\Vtrpc\LegacyErrorCode'; $descriptor->addField($f); // OPTIONAL STRING message = 2 @@ -37,6 +40,15 @@ public static function descriptor() $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; $descriptor->addField($f); + // OPTIONAL ENUM code = 3 + $f = new \DrSlump\Protobuf\Field(); + $f->number = 3; + $f->name = "code"; + $f->type = \DrSlump\Protobuf::TYPE_ENUM; + $f->rule = \DrSlump\Protobuf::RULE_OPTIONAL; + $f->reference = '\Vitess\Proto\Vtrpc\Code'; + $descriptor->addField($f); + foreach (self::$__extensions as $cb) { $descriptor->addField($cb(), true); } @@ -45,39 +57,39 @@ public static function descriptor() } /** - * Check if has a value + * Check if has a value * * @return boolean */ - public function hasCode(){ + public function hasLegacyCode(){ return $this->_has(1); } /** - * Clear value + * Clear value * * @return \Vitess\Proto\Vtrpc\RPCError */ - public function clearCode(){ + public function clearLegacyCode(){ return $this->_clear(1); } /** - * Get value + * Get value * - * @return int - \Vitess\Proto\Vtrpc\ErrorCode + * @return int - \Vitess\Proto\Vtrpc\LegacyErrorCode */ - public function getCode(){ + public function getLegacyCode(){ return $this->_get(1); } /** - * Set value + * Set value * - * @param int - \Vitess\Proto\Vtrpc\ErrorCode $value + * @param int - \Vitess\Proto\Vtrpc\LegacyErrorCode $value * @return \Vitess\Proto\Vtrpc\RPCError */ - public function setCode( $value){ + public function setLegacyCode( $value){ return $this->_set(1, $value); } @@ -117,6 +129,43 @@ public function getMessage(){ public function setMessage( $value){ return $this->_set(2, $value); } + + /** + * Check if has a value + * + * @return boolean + */ + public function hasCode(){ + return $this->_has(3); + } + + /** + * Clear value + * + * @return \Vitess\Proto\Vtrpc\RPCError + */ + public function clearCode(){ + return $this->_clear(3); + } + + /** + * Get value + * + * @return int - \Vitess\Proto\Vtrpc\Code + */ + public function getCode(){ + return $this->_get(3); + } + + /** + * Set value + * + * @param int - \Vitess\Proto\Vtrpc\Code $value + * @return \Vitess\Proto\Vtrpc\RPCError + */ + public function setCode( $value){ + return $this->_set(3, $value); + } } } diff --git a/php/src/Vitess/ProtoUtils.php b/php/src/Vitess/ProtoUtils.php index 699827fae9c..608df1a3ede 100644 --- a/php/src/Vitess/ProtoUtils.php +++ b/php/src/Vitess/ProtoUtils.php @@ -36,19 +36,37 @@ public static function checkError($response) $error = $response->getError(); if ($error) { switch ($error->getCode()) { - case ErrorCode::SUCCESS: + case ErrorCode::OK: break; - case ErrorCode::BAD_INPUT: + case ErrorCode::INVALID_ARGUMENT: throw new Error\BadInput($error->getMessage()); case ErrorCode::DEADLINE_EXCEEDED: throw new Error\DeadlineExceeded($error->getMessage()); - case ErrorCode::INTEGRITY_ERROR: + case ErrorCode::ALREADY_EXISTS: throw new Error\Integrity($error->getMessage()); - case ErrorCode::TRANSIENT_ERROR: + case ErrorCode::UNAVAILABLE: throw new Error\Transient($error->getMessage()); case ErrorCode::UNAUTHENTICATED: throw new Error\Unauthenticated($error->getMessage()); - case ErrorCode::NOT_IN_TX: + case ErrorCode::ABORTED: + throw new Error\Aborted($error->getMessage()); + default: + throw new Exception($error->getCode() . ': ' . $error->getMessage()); + } + switch ($error->getLegacyCode()) { + case ErrorCode::SUCCESS: + break; + case ErrorCode::BAD_INPUT_LEGACY: + throw new Error\BadInput($error->getMessage()); + case ErrorCode::DEADLINE_EXCEEDED_LEGACY: + throw new Error\DeadlineExceeded($error->getMessage()); + case ErrorCode::INTEGRITY_ERROR_LEGACY: + throw new Error\Integrity($error->getMessage()); + case ErrorCode::TRANSIENT_ERROR_LEGACY: + throw new Error\Transient($error->getMessage()); + case ErrorCode::UNAUTHENTICATED_LEGACY: + throw new Error\Unauthenticated($error->getMessage()); + case ErrorCode::NOT_IN_TX_LEGACY: throw new Error\Aborted($error->getMessage()); default: throw new Exception($error->getCode() . ': ' . $error->getMessage()); diff --git a/proto/vtrpc.proto b/proto/vtrpc.proto index 2f2c9501975..ca516f20507 100644 --- a/proto/vtrpc.proto +++ b/proto/vtrpc.proto @@ -33,8 +33,9 @@ message CallerID { string subcomponent = 3; } -// Code represnts canonical error codes. The names and numbers must match -// the ones defined by grpc: https://godoc.org/google.golang.org/grpc/codes. +// Code represnts canonical error codes. The names, numbers and comments +// must match the ones defined by grpc: +// https://godoc.org/google.golang.org/grpc/codes. enum Code { // OK is returned on success. OK = 0; @@ -136,7 +137,7 @@ enum Code { // supported/enabled in this service. UNIMPLEMENTED = 12; - // INTERNAL errors. Means some invariants expected by underlying + // INTERNAL errors. Means some invariants expected by underlying // system has been broken. If you see one of these errors, // something is very broken. INTERNAL = 13; @@ -153,7 +154,7 @@ enum Code { DATA_LOSS = 15; } -// ErrorCode is the enum values for Errors. This type is deprecated. +// LegacyErrorCode is the enum values for Errors. This type is deprecated. // Use Code instead. Background: In the initial design, we thought // that we may end up with a different list of canonical error codes // than the ones defined by grpc. In hindisght, we realize that @@ -162,31 +163,31 @@ enum Code { // favor of the new Code that matches exactly what grpc defines. // Some names below have a _LEGACY suffix. This is to prevent // name collisions with Code. -enum ErrorCode { - // SUCCESS is returned from a successful call. - SUCCESS = 0; +enum LegacyErrorCode { + // SUCCESS_LEGACY is returned from a successful call. + SUCCESS_LEGACY = 0; // CANCELLED_LEGACY means that the context was cancelled (and noticed in the app layer, // as opposed to the RPC layer). CANCELLED_LEGACY = 1; - // UNKNOWN_ERROR includes: + // UNKNOWN_ERROR_LEGACY includes: // 1. MySQL error codes that we don't explicitly handle. // 2. MySQL response that wasn't as expected. For example, we might expect a MySQL // timestamp to be returned in a particular way, but it wasn't. // 3. Anything else that doesn't fall into a different bucket. - UNKNOWN_ERROR = 2; + UNKNOWN_ERROR_LEGACY = 2; - // BAD_INPUT is returned when an end-user either sends SQL that couldn't be parsed correctly, + // BAD_INPUT_LEGACY is returned when an end-user either sends SQL that couldn't be parsed correctly, // or tries a query that isn't supported by Vitess. - BAD_INPUT = 3; + BAD_INPUT_LEGACY = 3; // DEADLINE_EXCEEDED_LEGACY is returned when an action is taking longer than a given timeout. DEADLINE_EXCEEDED_LEGACY = 4; - // INTEGRITY_ERROR is returned on integrity error from MySQL, usually due to + // INTEGRITY_ERROR_LEGACY is returned on integrity error from MySQL, usually due to // duplicate primary keys. - INTEGRITY_ERROR = 5; + INTEGRITY_ERROR_LEGACY = 5; // PERMISSION_DENIED_LEGACY errors are returned when a user requests access to something // that they don't have permissions for. @@ -201,7 +202,7 @@ enum ErrorCode { // 2. Query is killed due to it taking too long. RESOURCE_EXHAUSTED_LEGACY = 7; - // QUERY_NOT_SERVED means that a query could not be served right now. + // QUERY_NOT_SERVED_LEGACY means that a query could not be served right now. // Client can interpret it as: "the tablet that you sent this query to cannot // serve the query right now, try a different tablet or try again later." // This could be due to various reasons: QueryService is not serving, should @@ -209,34 +210,24 @@ enum ErrorCode { // Clients that receive this error should usually retry the query, but after taking // the appropriate steps to make sure that the query will get sent to the correct // tablet. - QUERY_NOT_SERVED = 8; - - // NOT_IN_TX means that we're not currently in a transaction, but we should be. - NOT_IN_TX = 9; - - // INTERNAL_ERRORs are problems that only the server can fix, not the client. - // These errors are not due to a query itself, but rather due to the state of - // the system. - // Generally, we don't expect the errors to go away by themselves, but they - // may go away after human intervention. - // Examples of scenarios where INTERNAL_ERROR is returned: - // 1. Something is not configured correctly internally. - // 2. A necessary resource is not available, and we don't expect it to become available by itself. - // 3. A sanity check fails. - // 4. Some other internal error occurs. - // Clients should not retry immediately, as there is little chance of success. - // However, it's acceptable for retries to happen internally, for example to - // multiple backends, in case only a subset of backend are not functional. - INTERNAL_ERROR = 10; - - // TRANSIENT_ERROR is used for when there is some error that we expect we can + QUERY_NOT_SERVED_LEGACY = 8; + + // NOT_IN_TX_LEGACY means that we're not currently in a transaction, but we should be. + NOT_IN_TX_LEGACY = 9; + + // INTERNAL_ERROR_LEGACY means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + INTERNAL_ERROR_LEGACY = 10; + + // TRANSIENT_ERROR_LEGACY is used for when there is some error that we expect we can // recover from automatically - often due to a resource limit temporarily being // reached. Retrying this error, with an exponential backoff, should succeed. // Clients should be able to successfully retry the query on the same backends. // Examples of things that can trigger this error: // 1. Query has been throttled // 2. VtGate could have request backlog - TRANSIENT_ERROR = 11; + TRANSIENT_ERROR_LEGACY = 11; // UNAUTHENTICATED_LEGACY errors are returned when a user requests access to something, // and we're unable to verify the user's authentication. @@ -248,7 +239,7 @@ enum ErrorCode { // We use this so the clients don't have to parse the error messages, // but instead can depend on the value of the code. message RPCError { - ErrorCode legacy_code = 1; + LegacyErrorCode legacy_code = 1; string message = 2; Code code = 3; } diff --git a/py/vtdb/proto3_encoding.py b/py/vtdb/proto3_encoding.py index e537a6f0c5e..e734cc129e3 100644 --- a/py/vtdb/proto3_encoding.py +++ b/py/vtdb/proto3_encoding.py @@ -14,6 +14,7 @@ from vtproto import query_pb2 from vtproto import topodata_pb2 from vtproto import vtgate_pb2 +from vtproto import vtrpc_pb2 from vtdb import field_types from vtdb import keyrange_constants @@ -56,6 +57,24 @@ # query_pb2.TUPLE: no conversion } +# legacy_code_to_code_map maps legacy error codes +# to the new code that matches grpc's cannonical error codes. +legacy_code_to_code_map = { + vtrpc_pb2.SUCCESS_LEGACY: vtrpc_pb2.OK, + vtrpc_pb2.CANCELLED_LEGACY: vtrpc_pb2.CANCELED, + vtrpc_pb2.UNKNOWN_ERROR_LEGACY: vtrpc_pb2.UNKNOWN, + vtrpc_pb2.BAD_INPUT_LEGACY: vtrpc_pb2.INVALID_ARGUMENT, + vtrpc_pb2.DEADLINE_EXCEEDED_LEGACY: vtrpc_pb2.DEADLINE_EXCEEDED, + vtrpc_pb2.INTEGRITY_ERROR_LEGACY: vtrpc_pb2.ALREADY_EXISTS, + vtrpc_pb2.PERMISSION_DENIED_LEGACY: vtrpc_pb2.PERMISSION_DENIED, + vtrpc_pb2.RESOURCE_EXHAUSTED_LEGACY: vtrpc_pb2.RESOURCE_EXHAUSTED, + vtrpc_pb2.QUERY_NOT_SERVED_LEGACY: vtrpc_pb2.FAILED_PRECONDITION, + vtrpc_pb2.NOT_IN_TX_LEGACY: vtrpc_pb2.ABORTED, + vtrpc_pb2.INTERNAL_ERROR_LEGACY: vtrpc_pb2.INTERNAL, + vtrpc_pb2.TRANSIENT_ERROR_LEGACY: vtrpc_pb2.UNAVAILABLE, + vtrpc_pb2.UNAUTHENTICATED_LEGACY: vtrpc_pb2.UNAUTHENTICATED, +} + INT_UPPERBOUND_PLUS_ONE = 1<<63 @@ -257,6 +276,11 @@ def _extract_rpc_error(self, exec_method, error): """ if error.code: raise vtgate_utils.VitessError(exec_method, error.code, error.message) + elif error.legacy_code: + raise vtgate_utils.VitessError( + exec_method, + legacy_code_to_code_map[error.legacy_code], + error.message) def build_conversions(self, qr_fields): """Builds an array of fields and conversions from a result fields. diff --git a/py/vtdb/vtgate_utils.py b/py/vtdb/vtgate_utils.py index c6b81d6bf24..8b7ebede714 100644 --- a/py/vtdb/vtgate_utils.py +++ b/py/vtdb/vtgate_utils.py @@ -102,7 +102,7 @@ def __init__(self, method_name, code, message): Args: method_name: RPC method name, as a string, that was called. - code: integer that represents the error code. From vtrpc_pb2.ErrorCode. + code: integer that represents the error code. From vtrpc_pb2.Code. message: string representation of the error. """ self.method_name = method_name @@ -114,7 +114,7 @@ def __init__(self, method_name, code, message): def __str__(self): """Print the error nicely, converting the proto error enum to its name.""" return '%s returned %s with message: %s' % ( - self.method_name, vtrpc_pb2.ErrorCode.Name(self.code), self.message) + self.method_name, vtrpc_pb2.Code.Name(self.code), self.message) def convert_to_dbexception(self, args): """Converts from a VitessError to the appropriate dbexceptions class. @@ -128,13 +128,13 @@ def convert_to_dbexception(self, args): # FIXME(alainjobart): this is extremely confusing: self.message is only # used for integrity errors, and nothing else. The other cases # have to provide the message in the args. - if self.code == vtrpc_pb2.TRANSIENT_ERROR: + if self.code == vtrpc_pb2.UNAVAILABLE: if throttler_err_re.search(self.message): return dbexceptions.ThrottledError(args) return dbexceptions.TransientError(args) - if self.code == vtrpc_pb2.QUERY_NOT_SERVED: + if self.code == vtrpc_pb2.FAILED_PRECONDITION: return dbexceptions.QueryNotServed(args) - if self.code == vtrpc_pb2.INTEGRITY_ERROR: + if self.code == vtrpc_pb2.ALREADY_EXISTS: # Prune the error message to truncate after the mysql errno, since # the error message may contain the query string with bind variables. msg = self.message.lower() @@ -142,7 +142,7 @@ def convert_to_dbexception(self, args): pruned_msg = msg[:msg.find(parts[2])] new_args = (pruned_msg,) + tuple(args[1:]) return dbexceptions.IntegrityError(new_args) - if self.code == vtrpc_pb2.BAD_INPUT: + if self.code == vtrpc_pb2.INVALID_ARGUMENT: return dbexceptions.ProgrammingError(args) return dbexceptions.DatabaseError(args) diff --git a/py/vtproto/vtrpc_pb2.py b/py/vtproto/vtrpc_pb2.py index 3abc96acc9d..a23c682851c 100644 --- a/py/vtproto/vtrpc_pb2.py +++ b/py/vtproto/vtrpc_pb2.py @@ -20,7 +20,7 @@ name='vtrpc.proto', package='vtrpc', syntax='proto3', - serialized_pb=_b('\n\x0bvtrpc.proto\x12\x05vtrpc\"F\n\x08\x43\x61llerID\x12\x11\n\tprincipal\x18\x01 \x01(\t\x12\x11\n\tcomponent\x18\x02 \x01(\t\x12\x14\n\x0csubcomponent\x18\x03 \x01(\t\"]\n\x08RPCError\x12%\n\x0blegacy_code\x18\x01 \x01(\x0e\x32\x10.vtrpc.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x19\n\x04\x63ode\x18\x03 \x01(\x0e\x32\x0b.vtrpc.Code*\xb6\x02\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\x0c\n\x08\x43\x41NCELED\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x14\n\x10INVALID_ARGUMENT\x10\x03\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x04\x12\r\n\tNOT_FOUND\x10\x05\x12\x12\n\x0e\x41LREADY_EXISTS\x10\x06\x12\x15\n\x11PERMISSION_DENIED\x10\x07\x12\x13\n\x0fUNAUTHENTICATED\x10\x10\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x08\x12\x17\n\x13\x46\x41ILED_PRECONDITION\x10\t\x12\x0b\n\x07\x41\x42ORTED\x10\n\x12\x10\n\x0cOUT_OF_RANGE\x10\x0b\x12\x11\n\rUNIMPLEMENTED\x10\x0c\x12\x0c\n\x08INTERNAL\x10\r\x12\x0f\n\x0bUNAVAILABLE\x10\x0e\x12\r\n\tDATA_LOSS\x10\x0f*\xaa\x02\n\tErrorCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\x14\n\x10\x43\x41NCELLED_LEGACY\x10\x01\x12\x11\n\rUNKNOWN_ERROR\x10\x02\x12\r\n\tBAD_INPUT\x10\x03\x12\x1c\n\x18\x44\x45\x41\x44LINE_EXCEEDED_LEGACY\x10\x04\x12\x13\n\x0fINTEGRITY_ERROR\x10\x05\x12\x1c\n\x18PERMISSION_DENIED_LEGACY\x10\x06\x12\x1d\n\x19RESOURCE_EXHAUSTED_LEGACY\x10\x07\x12\x14\n\x10QUERY_NOT_SERVED\x10\x08\x12\r\n\tNOT_IN_TX\x10\t\x12\x12\n\x0eINTERNAL_ERROR\x10\n\x12\x13\n\x0fTRANSIENT_ERROR\x10\x0b\x12\x1a\n\x16UNAUTHENTICATED_LEGACY\x10\x0c\x42\x1a\n\x18\x63om.youtube.vitess.protob\x06proto3') + serialized_pb=_b('\n\x0bvtrpc.proto\x12\x05vtrpc\"F\n\x08\x43\x61llerID\x12\x11\n\tprincipal\x18\x01 \x01(\t\x12\x11\n\tcomponent\x18\x02 \x01(\t\x12\x14\n\x0csubcomponent\x18\x03 \x01(\t\"c\n\x08RPCError\x12+\n\x0blegacy_code\x18\x01 \x01(\x0e\x32\x16.vtrpc.LegacyErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x19\n\x04\x63ode\x18\x03 \x01(\x0e\x32\x0b.vtrpc.Code*\xb6\x02\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\x0c\n\x08\x43\x41NCELED\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x14\n\x10INVALID_ARGUMENT\x10\x03\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x04\x12\r\n\tNOT_FOUND\x10\x05\x12\x12\n\x0e\x41LREADY_EXISTS\x10\x06\x12\x15\n\x11PERMISSION_DENIED\x10\x07\x12\x13\n\x0fUNAUTHENTICATED\x10\x10\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x08\x12\x17\n\x13\x46\x41ILED_PRECONDITION\x10\t\x12\x0b\n\x07\x41\x42ORTED\x10\n\x12\x10\n\x0cOUT_OF_RANGE\x10\x0b\x12\x11\n\rUNIMPLEMENTED\x10\x0c\x12\x0c\n\x08INTERNAL\x10\r\x12\x0f\n\x0bUNAVAILABLE\x10\x0e\x12\r\n\tDATA_LOSS\x10\x0f*\xe8\x02\n\x0fLegacyErrorCode\x12\x12\n\x0eSUCCESS_LEGACY\x10\x00\x12\x14\n\x10\x43\x41NCELLED_LEGACY\x10\x01\x12\x18\n\x14UNKNOWN_ERROR_LEGACY\x10\x02\x12\x14\n\x10\x42\x41\x44_INPUT_LEGACY\x10\x03\x12\x1c\n\x18\x44\x45\x41\x44LINE_EXCEEDED_LEGACY\x10\x04\x12\x1a\n\x16INTEGRITY_ERROR_LEGACY\x10\x05\x12\x1c\n\x18PERMISSION_DENIED_LEGACY\x10\x06\x12\x1d\n\x19RESOURCE_EXHAUSTED_LEGACY\x10\x07\x12\x1b\n\x17QUERY_NOT_SERVED_LEGACY\x10\x08\x12\x14\n\x10NOT_IN_TX_LEGACY\x10\t\x12\x19\n\x15INTERNAL_ERROR_LEGACY\x10\n\x12\x1a\n\x16TRANSIENT_ERROR_LEGACY\x10\x0b\x12\x1a\n\x16UNAUTHENTICATED_LEGACY\x10\x0c\x42\x1a\n\x18\x63om.youtube.vitess.protob\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) @@ -101,20 +101,20 @@ ], containing_type=None, options=None, - serialized_start=190, - serialized_end=500, + serialized_start=196, + serialized_end=506, ) _sym_db.RegisterEnumDescriptor(_CODE) Code = enum_type_wrapper.EnumTypeWrapper(_CODE) -_ERRORCODE = _descriptor.EnumDescriptor( - name='ErrorCode', - full_name='vtrpc.ErrorCode', +_LEGACYERRORCODE = _descriptor.EnumDescriptor( + name='LegacyErrorCode', + full_name='vtrpc.LegacyErrorCode', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( - name='SUCCESS', index=0, number=0, + name='SUCCESS_LEGACY', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( @@ -122,11 +122,11 @@ options=None, type=None), _descriptor.EnumValueDescriptor( - name='UNKNOWN_ERROR', index=2, number=2, + name='UNKNOWN_ERROR_LEGACY', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( - name='BAD_INPUT', index=3, number=3, + name='BAD_INPUT_LEGACY', index=3, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( @@ -134,7 +134,7 @@ options=None, type=None), _descriptor.EnumValueDescriptor( - name='INTEGRITY_ERROR', index=5, number=5, + name='INTEGRITY_ERROR_LEGACY', index=5, number=5, options=None, type=None), _descriptor.EnumValueDescriptor( @@ -146,19 +146,19 @@ options=None, type=None), _descriptor.EnumValueDescriptor( - name='QUERY_NOT_SERVED', index=8, number=8, + name='QUERY_NOT_SERVED_LEGACY', index=8, number=8, options=None, type=None), _descriptor.EnumValueDescriptor( - name='NOT_IN_TX', index=9, number=9, + name='NOT_IN_TX_LEGACY', index=9, number=9, options=None, type=None), _descriptor.EnumValueDescriptor( - name='INTERNAL_ERROR', index=10, number=10, + name='INTERNAL_ERROR_LEGACY', index=10, number=10, options=None, type=None), _descriptor.EnumValueDescriptor( - name='TRANSIENT_ERROR', index=11, number=11, + name='TRANSIENT_ERROR_LEGACY', index=11, number=11, options=None, type=None), _descriptor.EnumValueDescriptor( @@ -168,12 +168,12 @@ ], containing_type=None, options=None, - serialized_start=503, - serialized_end=801, + serialized_start=509, + serialized_end=869, ) -_sym_db.RegisterEnumDescriptor(_ERRORCODE) +_sym_db.RegisterEnumDescriptor(_LEGACYERRORCODE) -ErrorCode = enum_type_wrapper.EnumTypeWrapper(_ERRORCODE) +LegacyErrorCode = enum_type_wrapper.EnumTypeWrapper(_LEGACYERRORCODE) OK = 0 CANCELED = 1 UNKNOWN = 2 @@ -191,18 +191,18 @@ INTERNAL = 13 UNAVAILABLE = 14 DATA_LOSS = 15 -SUCCESS = 0 +SUCCESS_LEGACY = 0 CANCELLED_LEGACY = 1 -UNKNOWN_ERROR = 2 -BAD_INPUT = 3 +UNKNOWN_ERROR_LEGACY = 2 +BAD_INPUT_LEGACY = 3 DEADLINE_EXCEEDED_LEGACY = 4 -INTEGRITY_ERROR = 5 +INTEGRITY_ERROR_LEGACY = 5 PERMISSION_DENIED_LEGACY = 6 RESOURCE_EXHAUSTED_LEGACY = 7 -QUERY_NOT_SERVED = 8 -NOT_IN_TX = 9 -INTERNAL_ERROR = 10 -TRANSIENT_ERROR = 11 +QUERY_NOT_SERVED_LEGACY = 8 +NOT_IN_TX_LEGACY = 9 +INTERNAL_ERROR_LEGACY = 10 +TRANSIENT_ERROR_LEGACY = 11 UNAUTHENTICATED_LEGACY = 12 @@ -293,15 +293,15 @@ oneofs=[ ], serialized_start=94, - serialized_end=187, + serialized_end=193, ) -_RPCERROR.fields_by_name['legacy_code'].enum_type = _ERRORCODE +_RPCERROR.fields_by_name['legacy_code'].enum_type = _LEGACYERRORCODE _RPCERROR.fields_by_name['code'].enum_type = _CODE DESCRIPTOR.message_types_by_name['CallerID'] = _CALLERID DESCRIPTOR.message_types_by_name['RPCError'] = _RPCERROR DESCRIPTOR.enum_types_by_name['Code'] = _CODE -DESCRIPTOR.enum_types_by_name['ErrorCode'] = _ERRORCODE +DESCRIPTOR.enum_types_by_name['LegacyErrorCode'] = _LEGACYERRORCODE CallerID = _reflection.GeneratedProtocolMessageType('CallerID', (_message.Message,), dict( DESCRIPTOR = _CALLERID, From f922caca0eea4b23c4c62113730753ea530561d8 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sat, 18 Feb 2017 14:18:56 -0800 Subject: [PATCH 016/108] vterrors: changed tests to use new codes Tests were using fatal/retry style error handling. Those are now updated to use vtrpcpb.Code. Also fixed error code priorities in the error aggregation function. BUG=32851872 --- go/vt/tabletserver/sandboxconn/sandboxconn.go | 108 ++---------------- go/vt/vterrors/aggregate.go | 52 +++++---- go/vt/vterrors/grpc.go | 6 +- go/vt/vtgate/gateway/discoverygateway_test.go | 46 +++----- go/vt/vtgate/resolver_test.go | 32 +++--- go/vt/vtgate/router_dml_test.go | 13 ++- go/vt/vtgate/router_select_test.go | 43 +++---- go/vt/vtgate/scatter_conn.go | 2 +- go/vt/vtgate/scatter_conn_test.go | 16 +-- go/vt/vtgate/tx_conn_test.go | 8 +- go/vt/vtgate/vtgate_test.go | 72 +++++------- 11 files changed, 151 insertions(+), 247 deletions(-) diff --git a/go/vt/tabletserver/sandboxconn/sandboxconn.go b/go/vt/tabletserver/sandboxconn/sandboxconn.go index 3dad88a1414..738cbf03de0 100644 --- a/go/vt/tabletserver/sandboxconn/sandboxconn.go +++ b/go/vt/tabletserver/sandboxconn/sandboxconn.go @@ -26,19 +26,7 @@ type SandboxConn struct { tablet *topodatapb.Tablet // These errors work for all functions. - MustFailRetry int - MustFailFatal int - MustFailServer int - MustFailConn int - MustFailTxPool int - MustFailNotTx int - MustFailCanceled int - MustFailUnknownError int - MustFailDeadlineExceeded int - MustFailIntegrityError int - MustFailPermissionDenied int - MustFailTransientError int - MustFailUnauthenticated int + MustFailCodes map[vtrpcpb.Code]int // These errors are triggered only for specific functions. // For now these are just for the 2PC functions. @@ -95,100 +83,22 @@ var _ queryservice.QueryService = (*SandboxConn)(nil) // compile-time interface // NewSandboxConn returns a new SandboxConn targeted to the provided tablet. func NewSandboxConn(t *topodatapb.Tablet) *SandboxConn { return &SandboxConn{ - tablet: t, + tablet: t, + MustFailCodes: make(map[vtrpcpb.Code]int), } } func (sbc *SandboxConn) getError() error { - if sbc.MustFailRetry > 0 { - sbc.MustFailRetry-- - return &tabletconn.ServerError{ - Err: "retry: err", - ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, - } - } - if sbc.MustFailFatal > 0 { - sbc.MustFailFatal-- - return &tabletconn.ServerError{ - Err: "fatal: err", - ServerCode: vtrpcpb.Code_INTERNAL, - } - } - if sbc.MustFailServer > 0 { - sbc.MustFailServer-- - return &tabletconn.ServerError{ - Err: "error: err", - ServerCode: vtrpcpb.Code_INVALID_ARGUMENT, - } - } - if sbc.MustFailConn > 0 { - sbc.MustFailConn-- - return tabletconn.OperationalError(fmt.Sprintf("error: conn")) - } - if sbc.MustFailTxPool > 0 { - sbc.MustFailTxPool-- - return &tabletconn.ServerError{ - Err: "tx_pool_full: err", - ServerCode: vtrpcpb.Code_RESOURCE_EXHAUSTED, - } - } - if sbc.MustFailNotTx > 0 { - sbc.MustFailNotTx-- - return &tabletconn.ServerError{ - Err: "not_in_tx: err", - ServerCode: vtrpcpb.Code_ABORTED, - } - } - if sbc.MustFailCanceled > 0 { - sbc.MustFailCanceled-- - return &tabletconn.ServerError{ - Err: "canceled: err", - ServerCode: vtrpcpb.Code_CANCELED, - } - } - if sbc.MustFailUnknownError > 0 { - sbc.MustFailUnknownError-- - return &tabletconn.ServerError{ - Err: "unknown error: err", - ServerCode: vtrpcpb.Code_UNKNOWN, - } - } - if sbc.MustFailDeadlineExceeded > 0 { - sbc.MustFailDeadlineExceeded-- - return &tabletconn.ServerError{ - Err: "deadline exceeded: err", - ServerCode: vtrpcpb.Code_DEADLINE_EXCEEDED, - } - } - if sbc.MustFailIntegrityError > 0 { - sbc.MustFailIntegrityError-- - return &tabletconn.ServerError{ - Err: "integrity error: err", - ServerCode: vtrpcpb.Code_ALREADY_EXISTS, + for code, count := range sbc.MustFailCodes { + if count == 0 { + continue } - } - if sbc.MustFailPermissionDenied > 0 { - sbc.MustFailPermissionDenied-- - return &tabletconn.ServerError{ - Err: "permission denied: err", - ServerCode: vtrpcpb.Code_PERMISSION_DENIED, - } - } - if sbc.MustFailTransientError > 0 { - sbc.MustFailTransientError-- + sbc.MustFailCodes[code] = count - 1 return &tabletconn.ServerError{ - Err: "transient error: err", - ServerCode: vtrpcpb.Code_UNAVAILABLE, + Err: fmt.Sprintf("%v error", code), + ServerCode: code, } } - if sbc.MustFailUnauthenticated > 0 { - sbc.MustFailUnauthenticated-- - return &tabletconn.ServerError{ - Err: "unauthenticated: err", - ServerCode: vtrpcpb.Code_UNAUTHENTICATED, - } - } - return nil } diff --git a/go/vt/vterrors/aggregate.go b/go/vt/vterrors/aggregate.go index ae235c19c0e..d82214bdf1c 100644 --- a/go/vt/vterrors/aggregate.go +++ b/go/vt/vterrors/aggregate.go @@ -15,35 +15,47 @@ import ( // errors, which of the errors is the most likely to give the user useful information // about why the query failed and how they should proceed? const ( - PrioritySuccess = iota - PriorityTransientError - PriorityQueryNotServed - PriorityDeadlineExceeded - PriorityCancelled - PriorityIntegrityError - PriorityNotInTx - PriorityUnknownError - PriorityInternalError + // Informational errors. + PriorityOK = iota + PriorityCanceled + PriorityAlreadyExists + PriorityOutOfRange + // Potentially retryable errors. + PriorityUnavailable + PriorityFailedPrecondition PriorityResourceExhausted + PriorityDeadlineExceeded + PriorityAborted + // Permanent errors. + PriorityUnknown PriorityUnauthenticated PriorityPermissionDenied - PriorityBadInput + PriorityInvalidArgument + PriorityNotFound + PriorityUnimplemented + // Serious errors. + PriorityInternal + PriorityDataLoss ) var errorPriorities = map[vtrpcpb.Code]int{ - vtrpcpb.Code_OK: PrioritySuccess, - vtrpcpb.Code_CANCELED: PriorityCancelled, - vtrpcpb.Code_UNKNOWN: PriorityUnknownError, - vtrpcpb.Code_INVALID_ARGUMENT: PriorityBadInput, + vtrpcpb.Code_OK: PriorityOK, + vtrpcpb.Code_CANCELED: PriorityCanceled, + vtrpcpb.Code_UNKNOWN: PriorityUnknown, + vtrpcpb.Code_INVALID_ARGUMENT: PriorityInvalidArgument, vtrpcpb.Code_DEADLINE_EXCEEDED: PriorityDeadlineExceeded, - vtrpcpb.Code_ALREADY_EXISTS: PriorityIntegrityError, + vtrpcpb.Code_NOT_FOUND: PriorityNotFound, + vtrpcpb.Code_ALREADY_EXISTS: PriorityAlreadyExists, vtrpcpb.Code_PERMISSION_DENIED: PriorityPermissionDenied, - vtrpcpb.Code_RESOURCE_EXHAUSTED: PriorityResourceExhausted, - vtrpcpb.Code_FAILED_PRECONDITION: PriorityQueryNotServed, - vtrpcpb.Code_ABORTED: PriorityNotInTx, - vtrpcpb.Code_INTERNAL: PriorityInternalError, - vtrpcpb.Code_UNAVAILABLE: PriorityTransientError, vtrpcpb.Code_UNAUTHENTICATED: PriorityUnauthenticated, + vtrpcpb.Code_RESOURCE_EXHAUSTED: PriorityResourceExhausted, + vtrpcpb.Code_FAILED_PRECONDITION: PriorityFailedPrecondition, + vtrpcpb.Code_ABORTED: PriorityAborted, + vtrpcpb.Code_OUT_OF_RANGE: PriorityOutOfRange, + vtrpcpb.Code_UNIMPLEMENTED: PriorityUnimplemented, + vtrpcpb.Code_INTERNAL: PriorityInternal, + vtrpcpb.Code_UNAVAILABLE: PriorityUnavailable, + vtrpcpb.Code_DATA_LOSS: PriorityDataLoss, } // AggregateVtGateErrorCodes aggregates a list of errors into a single diff --git a/go/vt/vterrors/grpc.go b/go/vt/vterrors/grpc.go index 255f1ae301e..a98da00e7ac 100644 --- a/go/vt/vterrors/grpc.go +++ b/go/vt/vterrors/grpc.go @@ -50,7 +50,8 @@ func CodeToLegacyErrorCode(code vtrpcpb.Code) vtrpcpb.LegacyErrorCode { case vtrpcpb.Code_INTERNAL: return vtrpcpb.LegacyErrorCode_INTERNAL_ERROR_LEGACY case vtrpcpb.Code_UNAVAILABLE: - return vtrpcpb.LegacyErrorCode_TRANSIENT_ERROR_LEGACY + // Legacy code assumes Unavailable errors are sent as Internal. + return vtrpcpb.LegacyErrorCode_INTERNAL_ERROR_LEGACY case vtrpcpb.Code_UNAUTHENTICATED: return vtrpcpb.LegacyErrorCode_UNAUTHENTICATED_LEGACY default: @@ -82,7 +83,8 @@ func LegacyErrorCodeToCode(code vtrpcpb.LegacyErrorCode) vtrpcpb.Code { case vtrpcpb.LegacyErrorCode_NOT_IN_TX_LEGACY: return vtrpcpb.Code_ABORTED case vtrpcpb.LegacyErrorCode_INTERNAL_ERROR_LEGACY: - return vtrpcpb.Code_INTERNAL + // Legacy code sends internal error instead of Unavailable. + return vtrpcpb.Code_UNAVAILABLE case vtrpcpb.LegacyErrorCode_TRANSIENT_ERROR_LEGACY: return vtrpcpb.Code_UNAVAILABLE case vtrpcpb.LegacyErrorCode_UNAUTHENTICATED_LEGACY: diff --git a/go/vt/vtgate/gateway/discoverygateway_test.go b/go/vt/vtgate/gateway/discoverygateway_test.go index 830a4391709..95177217e62 100644 --- a/go/vt/vtgate/gateway/discoverygateway_test.go +++ b/go/vt/vtgate/gateway/discoverygateway_test.go @@ -152,13 +152,13 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway dg.tsc.ResetForTesting() sc1 := hc.AddTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil) sc2 := hc.AddTestTablet("cell", "1.1.1.1", 1002, keyspace, shard, tabletType, true, 10, nil) - sc1.MustFailRetry = 1 - sc2.MustFailRetry = 1 + sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 + sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 ep1 = sc1.Tablet() ep2 := sc2.Tablet() wants := map[string]int{ - fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), retry: err`, ep1): 0, - fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), retry: err`, ep2): 0, + fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), FAILED_PRECONDITION error`, ep1): 0, + fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), FAILED_PRECONDITION error`, ep2): 0, } err = f(dg, target) if _, ok := wants[fmt.Sprintf("%v", err)]; !ok { @@ -170,13 +170,13 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway dg.tsc.ResetForTesting() sc1 = hc.AddTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil) sc2 = hc.AddTestTablet("cell", "1.1.1.1", 1002, keyspace, shard, tabletType, true, 10, nil) - sc1.MustFailFatal = 1 - sc2.MustFailFatal = 1 + sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 + sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 ep1 = sc1.Tablet() ep2 = sc2.Tablet() wants = map[string]int{ - fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), fatal: err`, ep1): 0, - fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), fatal: err`, ep2): 0, + fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), FAILED_PRECONDITION error`, ep1): 0, + fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), FAILED_PRECONDITION error`, ep2): 0, } err = f(dg, target) if _, ok := wants[fmt.Sprintf("%v", err)]; !ok { @@ -187,22 +187,12 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway hc.Reset() dg.tsc.ResetForTesting() sc1 = hc.AddTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil) - sc1.MustFailServer = 1 + sc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 ep1 = sc1.Tablet() - want = fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), error: err`, ep1) + want = fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), INVALID_ARGUMENT error`, ep1) err = f(dg, target) verifyShardError(t, err, want, vtrpcpb.Code_INVALID_ARGUMENT) - // conn error - no retry - hc.Reset() - dg.tsc.ResetForTesting() - sc1 = hc.AddTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil) - sc1.MustFailConn = 1 - ep1 = sc1.Tablet() - want = fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), error: conn`, ep1) - err = f(dg, target) - verifyShardError(t, err, want, vtrpcpb.Code_UNKNOWN) - // no failure hc.Reset() dg.tsc.ResetForTesting() @@ -230,28 +220,28 @@ func testDiscoveryGatewayTransact(t *testing.T, streaming bool, f func(dg Gatewa dg.tsc.ResetForTesting() sc1 := hc.AddTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil) sc2 := hc.AddTestTablet("cell", "1.1.1.1", 1002, keyspace, shard, tabletType, true, 10, nil) - sc1.MustFailRetry = 1 - sc2.MustFailRetry = 1 + sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 + sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 ep1 := sc1.Tablet() ep2 := sc2.Tablet() wants := map[string]int{ - fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), retry: err`, ep1): 0, - fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), retry: err`, ep2): 0, + fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), FAILED_PRECONDITION error`, ep1): 0, + fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), FAILED_PRECONDITION error`, ep2): 0, } err := f(dg, target) if _, ok := wants[fmt.Sprintf("%v", err)]; !ok { t.Errorf("wanted error: %+v, got error: %v", wants, err) } - // conn error - no retry + // server error - no retry hc.Reset() dg.tsc.ResetForTesting() sc1 = hc.AddTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, true, 10, nil) - sc1.MustFailConn = 1 + sc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 ep1 = sc1.Tablet() - want := fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), error: conn`, ep1) + want := fmt.Sprintf(`target: ks.0.replica, used tablet: (%+v), INVALID_ARGUMENT error`, ep1) err = f(dg, target) - verifyShardError(t, err, want, vtrpcpb.Code_UNKNOWN) + verifyShardError(t, err, want, vtrpcpb.Code_INVALID_ARGUMENT) } func verifyShardError(t *testing.T, err error, wantErr string, wantCode vtrpcpb.Code) { diff --git a/go/vt/vtgate/resolver_test.go b/go/vt/vtgate/resolver_test.go index 234982d3a55..321949f7c2e 100644 --- a/go/vt/vtgate/resolver_test.go +++ b/go/vt/vtgate/resolver_test.go @@ -201,11 +201,11 @@ func testResolverGeneric(t *testing.T, name string, action func(res *Resolver) ( hc.Reset() sbc0 = hc.AddTestTablet("aa", "-20", 1, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil) sbc1 = hc.AddTestTablet("aa", "20-40", 1, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil) - sbc0.MustFailServer = 1 - sbc1.MustFailRetry = 1 + sbc0.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 + sbc1.MustFailCodes[vtrpcpb.Code_INTERNAL] = 1 _, err = action(res) - want1 := fmt.Sprintf("target: %s.-20.master, used tablet: (alias: hostname:\"-20\" port_map: keyspace:\"%s\" shard:\"-20\" type:MASTER ), error: err", name, name) - want2 := fmt.Sprintf("target: %s.20-40.master, used tablet: (alias: hostname:\"20-40\" port_map: keyspace:\"%s\" shard:\"20-40\" type:MASTER ), retry: err", name, name) + want1 := fmt.Sprintf("target: %s.-20.master, used tablet: (alias: hostname:\"-20\" port_map: keyspace:\"%s\" shard:\"-20\" type:MASTER ), INVALID_ARGUMENT error", name, name) + want2 := fmt.Sprintf("target: %s.20-40.master, used tablet: (alias: hostname:\"20-40\" port_map: keyspace:\"%s\" shard:\"20-40\" type:MASTER ), INTERNAL error", name, name) want := []string{want1, want2} sort.Strings(want) if err == nil { @@ -234,11 +234,11 @@ func testResolverGeneric(t *testing.T, name string, action func(res *Resolver) ( hc.Reset() sbc0 = hc.AddTestTablet("aa", "-20", 1, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil) sbc1 = hc.AddTestTablet("aa", "20-40", 1, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil) - sbc0.MustFailRetry = 1 - sbc1.MustFailFatal = 1 + sbc0.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 + sbc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 _, err = action(res) - want1 = fmt.Sprintf("target: %s.-20.master, used tablet: (alias: hostname:\"-20\" port_map: keyspace:\"%s\" shard:\"-20\" type:MASTER ), retry: err", name, name) - want2 = fmt.Sprintf("target: %s.20-40.master, used tablet: (alias: hostname:\"20-40\" port_map: keyspace:\"%s\" shard:\"20-40\" type:MASTER ), fatal: err", name, name) + want1 = fmt.Sprintf("target: %s.-20.master, used tablet: (alias: hostname:\"-20\" port_map: keyspace:\"%s\" shard:\"-20\" type:MASTER ), FAILED_PRECONDITION error", name, name) + want2 = fmt.Sprintf("target: %s.20-40.master, used tablet: (alias: hostname:\"20-40\" port_map: keyspace:\"%s\" shard:\"20-40\" type:MASTER ), FAILED_PRECONDITION error", name, name) want = []string{want1, want2} sort.Strings(want) if err == nil { @@ -300,7 +300,7 @@ func testResolverGeneric(t *testing.T, name string, action func(res *Resolver) ( hc.Reset() sbc0 = hc.AddTestTablet("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil) sbc1 = hc.AddTestTablet("aa", "1.1.1.1", 1002, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil) - sbc1.MustFailFatal = 1 + sbc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 i := 0 s.SrvKeyspaceCallback = func() { if i == 1 { @@ -332,7 +332,7 @@ func testResolverGeneric(t *testing.T, name string, action func(res *Resolver) ( hc.Reset() sbc0 = hc.AddTestTablet("aa", "1.1.1.1", 1001, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil) sbc1 = hc.AddTestTablet("aa", "1.1.1.1", 1002, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil) - sbc1.MustFailRetry = 1 + sbc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 i = 0 s.SrvKeyspaceCallback = func() { if i == 1 { @@ -380,9 +380,9 @@ func testResolverStreamGeneric(t *testing.T, name string, action func(res *Resol hc.Reset() sbc0 = hc.AddTestTablet("aa", "-20", 1, name, "-20", topodatapb.TabletType_MASTER, true, 1, nil) hc.AddTestTablet("aa", "20-40", 1, name, "20-40", topodatapb.TabletType_MASTER, true, 1, nil) - sbc0.MustFailRetry = 1 + sbc0.MustFailCodes[vtrpcpb.Code_INTERNAL] = 1 _, err = action(res) - want := fmt.Sprintf("target: %s.-20.master, used tablet: (alias: hostname:\"-20\" port_map: keyspace:\"%s\" shard:\"-20\" type:MASTER ), retry: err", name, name) + want := fmt.Sprintf("target: %s.-20.master, used tablet: (alias: hostname:\"-20\" port_map: keyspace:\"%s\" shard:\"-20\" type:MASTER ), INTERNAL error", name, name) if err == nil || err.Error() != want { t.Errorf("want\n%s\ngot\n%v", want, err) } @@ -484,7 +484,7 @@ func TestResolverExecBatchReresolve(t *testing.T) { res := newTestResolver(hc, new(sandboxTopo), "aa") sbc := hc.AddTestTablet("aa", "0", 1, keyspace, "0", topodatapb.TabletType_MASTER, true, 1, nil) - sbc.MustFailRetry = 20 + sbc.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 20 callcount := 0 buildBatchRequest := func() (*scatterBatchRequest, error) { @@ -501,7 +501,7 @@ func TestResolverExecBatchReresolve(t *testing.T) { } _, err := res.ExecuteBatch(context.Background(), topodatapb.TabletType_MASTER, false, nil, nil, buildBatchRequest) - want := "target: TestResolverExecBatchReresolve.0.master, used tablet: (alias: hostname:\"0\" port_map: keyspace:\"TestResolverExecBatchReresolve\" shard:\"0\" type:MASTER ), retry: err" + want := "target: TestResolverExecBatchReresolve.0.master, used tablet: (alias: hostname:\"0\" port_map: keyspace:\"TestResolverExecBatchReresolve\" shard:\"0\" type:MASTER ), FAILED_PRECONDITION error" if err == nil || err.Error() != want { t.Errorf("want %s, got %v", want, err) } @@ -521,7 +521,7 @@ func TestResolverExecBatchAsTransaction(t *testing.T) { res := newTestResolver(hc, new(sandboxTopo), "aa") sbc := hc.AddTestTablet("aa", "0", 1, keyspace, "0", topodatapb.TabletType_MASTER, true, 1, nil) - sbc.MustFailRetry = 20 + sbc.MustFailCodes[vtrpcpb.Code_INTERNAL] = 20 callcount := 0 buildBatchRequest := func() (*scatterBatchRequest, error) { @@ -538,7 +538,7 @@ func TestResolverExecBatchAsTransaction(t *testing.T) { } _, err := res.ExecuteBatch(context.Background(), topodatapb.TabletType_MASTER, true, nil, nil, buildBatchRequest) - want := "target: TestResolverExecBatchAsTransaction.0.master, used tablet: (alias: hostname:\"0\" port_map: keyspace:\"TestResolverExecBatchAsTransaction\" shard:\"0\" type:MASTER ), retry: err" + want := "target: TestResolverExecBatchAsTransaction.0.master, used tablet: (alias: hostname:\"0\" port_map: keyspace:\"TestResolverExecBatchAsTransaction\" shard:\"0\" type:MASTER ), INTERNAL error" if err == nil || err.Error() != want { t.Errorf("want %v, got %v", want, err) } diff --git a/go/vt/vtgate/router_dml_test.go b/go/vt/vtgate/router_dml_test.go index ef7cf667d6e..10b1aca2198 100644 --- a/go/vt/vtgate/router_dml_test.go +++ b/go/vt/vtgate/router_dml_test.go @@ -15,6 +15,7 @@ import ( _ "github.com/youtube/vitess/go/vt/vtgate/vindexes" querypb "github.com/youtube/vitess/go/vt/proto/query" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) func TestUpdateEqual(t *testing.T) { @@ -623,14 +624,14 @@ func TestInsertFail(t *testing.T) { t.Errorf("routerExec: %v, want %v", err, want) } - sbclookup.MustFailServer = 1 + sbclookup.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = routerExec(router, "insert into user(id, v, name) values (null, 2, 'myname')", nil) want = "execInsertSharded: " if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("routerExec: %v, want prefix %v", err, want) } - sbclookup.MustFailServer = 1 + sbclookup.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = routerExec(router, "insert into user(id, v, name) values (1, 2, 'myname')", nil) want = "execInsertSharded: getInsertShardedRoute: lookup.Create: " if err == nil || !strings.HasPrefix(err.Error(), want) { @@ -643,7 +644,7 @@ func TestInsertFail(t *testing.T) { t.Errorf("routerExec: %v, want %v", err, want) } - sbclookup.MustFailServer = 1 + sbclookup.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = routerExec(router, "insert into music_extra_reversed(music_id, user_id) values (1, 1)", nil) want = "execInsertSharded: getInsertShardedRoute: lookup.Map" if err == nil || !strings.HasPrefix(err.Error(), want) { @@ -672,14 +673,14 @@ func TestInsertFail(t *testing.T) { } getSandbox("TestRouter").ShardSpec = DefaultShardSpec - sbclookup.MustFailServer = 1 + sbclookup.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = routerExec(router, "insert into music(user_id, id) values (1, null)", nil) want = "execInsertSharded:" if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("routerExec: %v, want prefix %v", err, want) } - sbclookup.MustFailServer = 1 + sbclookup.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = routerExec(router, "insert into music(user_id, id) values (1, 2)", nil) want = "execInsertSharded: getInsertShardedRoute: lookup.Create: execInsertUnsharded: target: TestUnsharded.0.master" if err == nil || !strings.HasPrefix(err.Error(), want) { @@ -704,7 +705,7 @@ func TestInsertFail(t *testing.T) { t.Errorf("routerExec: %v, want %v", err, want) } - sbc.MustFailServer = 1 + sbc.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = routerExec(router, "insert into user(id, v, name) values (1, 2, 'myname')", nil) want = "execInsertSharded: target: TestRouter.-20.master" if err == nil || !strings.HasPrefix(err.Error(), want) { diff --git a/go/vt/vtgate/router_select_test.go b/go/vt/vtgate/router_select_test.go index b561147fd63..88cda8b238e 100644 --- a/go/vt/vtgate/router_select_test.go +++ b/go/vt/vtgate/router_select_test.go @@ -20,6 +20,7 @@ import ( querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) func TestUnsharded(t *testing.T) { @@ -411,7 +412,7 @@ func TestSelectEqualFail(t *testing.T) { _, err := routerExec(router, "select id from user where id = (select count(*) from music)", nil) want := "unsupported" if err == nil || !strings.HasPrefix(err.Error(), want) { - t.Errorf("routerExec: %v, must start with %v", err, want) + t.Errorf("routerExec: %v, must contain %v", err, want) } _, err = routerExec(router, "select id from user where id = :aa", nil) @@ -427,7 +428,7 @@ func TestSelectEqualFail(t *testing.T) { t.Errorf("routerExec: %v, want %v", err, want) } - sbclookup.MustFailServer = 1 + sbclookup.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = routerExec(router, "select id from music where id = 1", nil) want = "paramsSelectEqual: lookup.Map" if err == nil || !strings.HasPrefix(err.Error(), want) { @@ -442,7 +443,7 @@ func TestSelectEqualFail(t *testing.T) { } s.ShardSpec = DefaultShardSpec - sbclookup.MustFailServer = 1 + sbclookup.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = routerExec(router, "select id from user where name = 'foo'", nil) want = "paramsSelectEqual: lookup.Map" if err == nil || !strings.HasPrefix(err.Error(), want) { @@ -1254,11 +1255,11 @@ func TestJoinErrors(t *testing.T) { router, sbc1, sbc2, _ := createRouterEnv() // First query fails - sbc1.MustFailServer = 1 + sbc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err := routerExec(router, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1", nil) - want := "error: err" + want := "INVALID_ARGUMENT error" if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("err: %v, must start with %s", err, want) + t.Errorf("err: %v, must contain %s", err, want) } // Field query fails @@ -1267,11 +1268,11 @@ func TestJoinErrors(t *testing.T) { {Name: "id", Type: sqltypes.Int32}, }, }}) - sbc1.MustFailServer = 1 + sbc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = routerExec(router, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 3", nil) - want = "error: err" + want = "INVALID_ARGUMENT error" if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("err: %v, must start with %s", err, want) + t.Errorf("err: %v, must contain %s", err, want) } // Second query fails @@ -1285,11 +1286,11 @@ func TestJoinErrors(t *testing.T) { sqltypes.MakeTrusted(sqltypes.Int32, []byte("3")), }}, }}) - sbc2.MustFailServer = 1 + sbc2.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = routerExec(router, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1", nil) - want = "error: err" + want = "INVALID_ARGUMENT error" if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("err: %v, must start with %s", err, want) + t.Errorf("err: %v, must contain %s", err, want) } // Nested join query fails on get fields @@ -1299,11 +1300,11 @@ func TestJoinErrors(t *testing.T) { {Name: "col", Type: sqltypes.Int32}, }, }}) - sbc1.MustFailServer = 1 + sbc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = routerExec(router, "select u1.id, u2.id from user u1 join (user u2 join user u3 on u3.id = u2.col) where u1.id = 3", nil) - want = "error: err" + want = "INVALID_ARGUMENT error" if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("err: %v, must start with %s", err, want) + t.Errorf("err: %v, must contain %s", err, want) } // Field query fails on stream join @@ -1312,11 +1313,11 @@ func TestJoinErrors(t *testing.T) { {Name: "id", Type: sqltypes.Int32}, }, }}) - sbc1.MustFailServer = 1 + sbc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = routerStream(router, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 3") - want = "error: err" + want = "INVALID_ARGUMENT error" if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("err: %v, must start with %s", err, want) + t.Errorf("err: %v, must contain %s", err, want) } // Second query fails on stream join @@ -1330,10 +1331,10 @@ func TestJoinErrors(t *testing.T) { sqltypes.MakeTrusted(sqltypes.Int32, []byte("3")), }}, }}) - sbc2.MustFailServer = 1 + sbc2.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = routerStream(router, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1") - want = "error: err" + want = "INVALID_ARGUMENT error" if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("err: %v, must start with %s", err, want) + t.Errorf("err: %v, must contain %s", err, want) } } diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index 678b40f93bc..e856a36c338 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -571,7 +571,7 @@ func (stc *ScatterConn) aggregateErrors(errors []error) error { allRetryableError := true for _, e := range errors { connError, ok := e.(*gateway.ShardError) - if !ok || (connError.Code != vtrpcpb.Code_FAILED_PRECONDITION && connError.Code != vtrpcpb.Code_INTERNAL) || connError.InTransaction { + if !ok || connError.Code != vtrpcpb.Code_FAILED_PRECONDITION || connError.InTransaction { allRetryableError = false break } diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go index 6094915c4de..5012346254e 100644 --- a/go/vt/vtgate/scatter_conn_test.go +++ b/go/vt/vtgate/scatter_conn_test.go @@ -129,9 +129,9 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s s.Reset() sc = newTestScatterConn(hc, new(sandboxTopo), "aa") sbc := hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) - sbc.MustFailServer = 1 + sbc.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 qr, err = f(sc, []string{"0"}) - want := fmt.Sprintf("target: %v.0.replica, used tablet: (alias: hostname:\"0\" port_map: keyspace:\"%s\" shard:\"0\" type:REPLICA ), error: err", name, name) + want := fmt.Sprintf("target: %v.0.replica, used tablet: (alias: hostname:\"0\" port_map: keyspace:\"%s\" shard:\"0\" type:REPLICA ), INVALID_ARGUMENT error", name, name) // Verify server error string. if err == nil || err.Error() != want { t.Errorf("want %s, got %v", want, err) @@ -147,11 +147,11 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s sc = newTestScatterConn(hc, new(sandboxTopo), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil) - sbc0.MustFailServer = 1 - sbc1.MustFailServer = 1 + sbc0.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 + sbc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err = f(sc, []string{"0", "1"}) // Verify server errors are consolidated. - want = fmt.Sprintf("target: %v.0.replica, used tablet: (alias: hostname:\"0\" port_map: keyspace:\"%v\" shard:\"0\" type:REPLICA ), error: err\ntarget: %v.1.replica, used tablet: (alias: hostname:\"1\" port_map: keyspace:\"%v\" shard:\"1\" type:REPLICA ), error: err", name, name, name, name) + want = fmt.Sprintf("target: %v.0.replica, used tablet: (alias: hostname:\"0\" port_map: keyspace:\"%v\" shard:\"0\" type:REPLICA ), INVALID_ARGUMENT error\ntarget: %v.1.replica, used tablet: (alias: hostname:\"1\" port_map: keyspace:\"%v\" shard:\"1\" type:REPLICA ), INVALID_ARGUMENT error", name, name, name, name) verifyScatterConnError(t, err, want, vtrpcpb.Code_INVALID_ARGUMENT) // Ensure that we tried only once. if execCount := sbc0.ExecCount.Get(); execCount != 1 { @@ -167,11 +167,11 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s sc = newTestScatterConn(hc, new(sandboxTopo), "aa") sbc0 = hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 = hc.AddTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil) - sbc0.MustFailServer = 1 - sbc1.MustFailTxPool = 1 + sbc0.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 + sbc1.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1 _, err = f(sc, []string{"0", "1"}) // Verify server errors are consolidated. - want = fmt.Sprintf("target: %v.0.replica, used tablet: (alias: hostname:\"0\" port_map: keyspace:\"%v\" shard:\"0\" type:REPLICA ), error: err\ntarget: %v.1.replica, used tablet: (alias: hostname:\"1\" port_map: keyspace:\"%v\" shard:\"1\" type:REPLICA ), tx_pool_full: err", name, name, name, name) + want = fmt.Sprintf("target: %v.0.replica, used tablet: (alias: hostname:\"0\" port_map: keyspace:\"%v\" shard:\"0\" type:REPLICA ), INVALID_ARGUMENT error\ntarget: %v.1.replica, used tablet: (alias: hostname:\"1\" port_map: keyspace:\"%v\" shard:\"1\" type:REPLICA ), RESOURCE_EXHAUSTED error", name, name, name, name) // We should only surface the higher priority error code verifyScatterConnError(t, err, want, vtrpcpb.Code_INVALID_ARGUMENT) // Ensure that we tried only once. diff --git a/go/vt/vtgate/tx_conn_test.go b/go/vt/vtgate/tx_conn_test.go index bac7aaf294c..298c49b6d9a 100644 --- a/go/vt/vtgate/tx_conn_test.go +++ b/go/vt/vtgate/tx_conn_test.go @@ -85,9 +85,9 @@ func TestTxConnCommitSuccess(t *testing.T) { t.Errorf("Session:\n%+v, want\n%+v", *session.Session, wantSession) } - sbc0.MustFailServer = 1 + sbc0.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 err := sc.txConn.Commit(context.Background(), false, session) - want := "error: err" + want := "INVALID_ARGUMENT error" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Commit: %v, want %s", err, want) } @@ -423,9 +423,9 @@ func TestTxConnResolveReadTransactionFail(t *testing.T) { sc, sbc0, _ := newTestTxConnEnv("TestTxConn") dtid := "TestTxConn:0:1234" - sbc0.MustFailServer = 1 + sbc0.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 err := sc.txConn.Resolve(context.Background(), dtid) - want := "error: err" + want := "INVALID_ARGUMENT error" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Resolve: %v, want %s", err, want) } diff --git a/go/vt/vtgate/vtgate_test.go b/go/vt/vtgate/vtgate_test.go index a7576a3e39d..56a5baadbef 100644 --- a/go/vt/vtgate/vtgate_test.go +++ b/go/vt/vtgate/vtgate_test.go @@ -1983,88 +1983,76 @@ func TestErrorPropagation(t *testing.T) { sbcrdonly, } - // ErrorCode_CANCELLED testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailCanceled = 20 + sbc.MustFailCodes[vtrpcpb.Code_CANCELED] = 20 }, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailCanceled = 0 + sbc.MustFailCodes[vtrpcpb.Code_CANCELED] = 0 }, vtrpcpb.Code_CANCELED) - // Code_UNKNOWN testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailUnknownError = 20 + sbc.MustFailCodes[vtrpcpb.Code_UNKNOWN] = 20 }, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailUnknownError = 0 + sbc.MustFailCodes[vtrpcpb.Code_UNKNOWN] = 0 }, vtrpcpb.Code_UNKNOWN) - // Code_INVALID_ARGUMENT testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailServer = 20 + sbc.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 20 }, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailServer = 0 + sbc.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 0 }, vtrpcpb.Code_INVALID_ARGUMENT) - // ErrorCode_DEADLINE_EXCEEDED testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailDeadlineExceeded = 20 + sbc.MustFailCodes[vtrpcpb.Code_DEADLINE_EXCEEDED] = 20 }, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailDeadlineExceeded = 0 + sbc.MustFailCodes[vtrpcpb.Code_DEADLINE_EXCEEDED] = 0 }, vtrpcpb.Code_DEADLINE_EXCEEDED) - // Code_ALREADY_EXISTS testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailIntegrityError = 20 + sbc.MustFailCodes[vtrpcpb.Code_ALREADY_EXISTS] = 20 }, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailIntegrityError = 0 + sbc.MustFailCodes[vtrpcpb.Code_ALREADY_EXISTS] = 0 }, vtrpcpb.Code_ALREADY_EXISTS) - // ErrorCode_PERMISSION_DENIED testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailPermissionDenied = 20 + sbc.MustFailCodes[vtrpcpb.Code_PERMISSION_DENIED] = 20 }, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailPermissionDenied = 0 + sbc.MustFailCodes[vtrpcpb.Code_PERMISSION_DENIED] = 0 }, vtrpcpb.Code_PERMISSION_DENIED) - // ErrorCode_RESOURCE_EXHAUSTED testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailTxPool = 20 + sbc.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 20 }, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailTxPool = 0 + sbc.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 0 }, vtrpcpb.Code_RESOURCE_EXHAUSTED) - // Code_FAILED_PRECONDITION testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailRetry = 20 + sbc.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 20 }, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailRetry = 0 + sbc.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 0 }, vtrpcpb.Code_FAILED_PRECONDITION) - // Code_ABORTED testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailNotTx = 20 + sbc.MustFailCodes[vtrpcpb.Code_ABORTED] = 20 }, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailNotTx = 0 + sbc.MustFailCodes[vtrpcpb.Code_ABORTED] = 0 }, vtrpcpb.Code_ABORTED) - // Code_INTERNAL testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailFatal = 20 + sbc.MustFailCodes[vtrpcpb.Code_INTERNAL] = 20 }, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailFatal = 0 + sbc.MustFailCodes[vtrpcpb.Code_INTERNAL] = 0 }, vtrpcpb.Code_INTERNAL) - // Code_UNAVAILABLE testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailTransientError = 20 + sbc.MustFailCodes[vtrpcpb.Code_UNAVAILABLE] = 20 }, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailTransientError = 0 + sbc.MustFailCodes[vtrpcpb.Code_UNAVAILABLE] = 0 }, vtrpcpb.Code_UNAVAILABLE) - // ErrorCode_UNAUTHENTICATED testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailUnauthenticated = 20 + sbc.MustFailCodes[vtrpcpb.Code_UNAUTHENTICATED] = 20 }, func(sbc *sandboxconn.SandboxConn) { - sbc.MustFailUnauthenticated = 0 + sbc.MustFailCodes[vtrpcpb.Code_UNAUTHENTICATED] = 0 }, vtrpcpb.Code_UNAUTHENTICATED) } @@ -2096,7 +2084,7 @@ func TestErrorIssuesRollback(t *testing.T) { if sbc.RollbackCount.Get() != 0 { t.Errorf("want 0, got %d", sbc.RollbackCount.Get()) } - sbc.MustFailNotTx = 20 + sbc.MustFailCodes[vtrpcpb.Code_ABORTED] = 20 _, err = rpcVTGate.Execute(context.Background(), "select id from t1", nil, @@ -2112,7 +2100,7 @@ func TestErrorIssuesRollback(t *testing.T) { t.Errorf("want 1, got %d", sbc.RollbackCount.Get()) } sbc.RollbackCount.Set(0) - sbc.MustFailNotTx = 0 + sbc.MustFailCodes[vtrpcpb.Code_ABORTED] = 0 // Start a transaction, send one statement. // Simulate an error that should trigger a rollback: @@ -2135,7 +2123,7 @@ func TestErrorIssuesRollback(t *testing.T) { if sbc.RollbackCount.Get() != 0 { t.Errorf("want 0, got %d", sbc.RollbackCount.Get()) } - sbc.MustFailTxPool = 20 + sbc.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 20 _, err = rpcVTGate.Execute(context.Background(), "select id from t1", nil, @@ -2151,7 +2139,7 @@ func TestErrorIssuesRollback(t *testing.T) { t.Errorf("want 1, got %d", sbc.RollbackCount.Get()) } sbc.RollbackCount.Set(0) - sbc.MustFailTxPool = 0 + sbc.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 0 // Start a transaction, send one statement. // Simulate an error that should *not* trigger a rollback: @@ -2174,7 +2162,7 @@ func TestErrorIssuesRollback(t *testing.T) { if sbc.RollbackCount.Get() != 0 { t.Errorf("want 0, got %d", sbc.RollbackCount.Get()) } - sbc.MustFailIntegrityError = 20 + sbc.MustFailCodes[vtrpcpb.Code_ALREADY_EXISTS] = 20 _, err = rpcVTGate.Execute(context.Background(), "select id from t1", nil, @@ -2189,5 +2177,5 @@ func TestErrorIssuesRollback(t *testing.T) { if sbc.RollbackCount.Get() != 0 { t.Errorf("want 0, got %d", sbc.RollbackCount.Get()) } - sbc.MustFailIntegrityError = 0 + sbc.MustFailCodes[vtrpcpb.Code_ALREADY_EXISTS] = 0 } From 51f3fdbdf46b1fae2922d1446bc8605d211d2cc1 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sat, 18 Feb 2017 21:53:02 -0800 Subject: [PATCH 017/108] vterrors: consolidate vtgate errors All VTGate error types have been consolidated to use vterrors. Also, /debug/vars error reporting has been consolidated. Theres is now only one error var, but it has a fourth category which is the Code. We can consider dropping one of the other categories as the number of combinations may be too high. --- go/cmd/vtcombo/tablet_map.go | 3 +- go/vt/tabletserver/sandboxconn/sandboxconn.go | 42 ++--- go/vt/tabletserver/tabletconn/grpc_error.go | 23 +-- .../tabletconn/grpc_error_test.go | 7 +- go/vt/tabletserver/tabletconn/tablet_conn.go | 31 +--- .../tabletconntest/tabletconntest.go | 11 +- go/vt/tabletserver/tx_pool_test.go | 10 +- go/vt/vterrors/aggregate.go | 2 +- go/vt/vterrors/proto3.go | 2 +- go/vt/vterrors/vterrors.go | 53 +++---- go/vt/vtgate/buffer/buffer_test.go | 6 +- go/vt/vtgate/gateway/discoverygateway.go | 27 +--- go/vt/vtgate/gateway/discoverygateway_test.go | 7 +- go/vt/vtgate/gateway/l2vtgategateway.go | 27 +--- go/vt/vtgate/gateway/shard_error.go | 46 +----- go/vt/vtgate/l2vtgate/l2vtgate.go | 2 +- go/vt/vtgate/resolver.go | 9 +- go/vt/vtgate/resolver_test.go | 28 ---- go/vt/vtgate/sandbox_test.go | 6 +- go/vt/vtgate/scatter_conn.go | 53 +------ go/vt/vtgate/scatter_conn_test.go | 25 +-- go/vt/vtgate/tx_conn.go | 12 +- go/vt/vtgate/tx_conn_test.go | 14 +- go/vt/vtgate/vtgate.go | 148 +++++------------- go/vt/vtgate/vtgate_test.go | 75 ++------- go/vt/vtgate/vtgateconntest/client.go | 2 +- .../vtworkerclienttest/client_testsuite.go | 6 +- 27 files changed, 150 insertions(+), 527 deletions(-) diff --git a/go/cmd/vtcombo/tablet_map.go b/go/cmd/vtcombo/tablet_map.go index 347c1b10734..16cfef1f68d 100644 --- a/go/cmd/vtcombo/tablet_map.go +++ b/go/cmd/vtcombo/tablet_map.go @@ -35,6 +35,7 @@ import ( replicationdatapb "github.com/youtube/vitess/go/vt/proto/replicationdata" tabletmanagerdatapb "github.com/youtube/vitess/go/vt/proto/tabletmanagerdata" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" vttestpb "github.com/youtube/vitess/go/vt/proto/vttest" ) @@ -252,7 +253,7 @@ func initTabletMap(ts topo.Server, tpb *vttestpb.VTTestTopology, mysqld mysqlctl func dialer(tablet *topodatapb.Tablet, timeout time.Duration) (queryservice.QueryService, error) { t, ok := tabletMap[tablet.Alias.Uid] if !ok { - return nil, tabletconn.OperationalError("connection refused") + return nil, vterrors.New(vtrpcpb.Code_UNAVAILABLE, "connection refused") } return &internalTabletConn{ diff --git a/go/vt/tabletserver/sandboxconn/sandboxconn.go b/go/vt/tabletserver/sandboxconn/sandboxconn.go index 738cbf03de0..14093bafa9e 100644 --- a/go/vt/tabletserver/sandboxconn/sandboxconn.go +++ b/go/vt/tabletserver/sandboxconn/sandboxconn.go @@ -13,7 +13,7 @@ import ( "github.com/youtube/vitess/go/sync2" "github.com/youtube/vitess/go/vt/tabletserver/queryservice" "github.com/youtube/vitess/go/vt/tabletserver/querytypes" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" + "github.com/youtube/vitess/go/vt/vterrors" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" @@ -94,10 +94,7 @@ func (sbc *SandboxConn) getError() error { continue } sbc.MustFailCodes[code] = count - 1 - return &tabletconn.ServerError{ - Err: fmt.Sprintf("%v error", code), - ServerCode: code, - } + return vterrors.New(code, fmt.Sprintf("%v error", code)) } return nil } @@ -189,10 +186,7 @@ func (sbc *SandboxConn) Prepare(ctx context.Context, target *querypb.Target, tra sbc.PrepareCount.Add(1) if sbc.MustFailPrepare > 0 { sbc.MustFailPrepare-- - return &tabletconn.ServerError{ - Err: "error: err", - ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, - } + return vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "error: err") } return sbc.getError() } @@ -202,10 +196,7 @@ func (sbc *SandboxConn) CommitPrepared(ctx context.Context, target *querypb.Targ sbc.CommitPreparedCount.Add(1) if sbc.MustFailCommitPrepared > 0 { sbc.MustFailCommitPrepared-- - return &tabletconn.ServerError{ - Err: "error: err", - ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, - } + return vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "error: err") } return sbc.getError() } @@ -215,10 +206,7 @@ func (sbc *SandboxConn) RollbackPrepared(ctx context.Context, target *querypb.Ta sbc.RollbackPreparedCount.Add(1) if sbc.MustFailRollbackPrepared > 0 { sbc.MustFailRollbackPrepared-- - return &tabletconn.ServerError{ - Err: "error: err", - ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, - } + return vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "error: err") } return sbc.getError() } @@ -228,10 +216,7 @@ func (sbc *SandboxConn) CreateTransaction(ctx context.Context, target *querypb.T sbc.CreateTransactionCount.Add(1) if sbc.MustFailCreateTransaction > 0 { sbc.MustFailCreateTransaction-- - return &tabletconn.ServerError{ - Err: "error: err", - ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, - } + return vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "error: err") } return sbc.getError() } @@ -242,10 +227,7 @@ func (sbc *SandboxConn) StartCommit(ctx context.Context, target *querypb.Target, sbc.StartCommitCount.Add(1) if sbc.MustFailStartCommit > 0 { sbc.MustFailStartCommit-- - return &tabletconn.ServerError{ - Err: "error: err", - ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, - } + return vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "error: err") } return sbc.getError() } @@ -256,10 +238,7 @@ func (sbc *SandboxConn) SetRollback(ctx context.Context, target *querypb.Target, sbc.SetRollbackCount.Add(1) if sbc.MustFailSetRollback > 0 { sbc.MustFailSetRollback-- - return &tabletconn.ServerError{ - Err: "error: err", - ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, - } + return vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "error: err") } return sbc.getError() } @@ -270,10 +249,7 @@ func (sbc *SandboxConn) ConcludeTransaction(ctx context.Context, target *querypb sbc.ConcludeTransactionCount.Add(1) if sbc.MustFailConcludeTransaction > 0 { sbc.MustFailConcludeTransaction-- - return &tabletconn.ServerError{ - Err: "error: err", - ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, - } + return vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "error: err") } return sbc.getError() } diff --git a/go/vt/tabletserver/tabletconn/grpc_error.go b/go/vt/tabletserver/tabletconn/grpc_error.go index 1e21efdb97d..78ce341ef64 100644 --- a/go/vt/tabletserver/tabletconn/grpc_error.go +++ b/go/vt/tabletserver/tabletconn/grpc_error.go @@ -1,9 +1,7 @@ package tabletconn import ( - "fmt" "io" - "strings" "github.com/youtube/vitess/go/vt/vterrors" "google.golang.org/grpc" @@ -18,19 +16,7 @@ func TabletErrorFromGRPC(err error) error { if err == nil || err == io.EOF { return nil } - - // TODO(aaijazi): Unfortunately, there's no better way to check for - // a gRPC server error (vs a client error). - // See: https://github.com/grpc/grpc-go/issues/319 - if !strings.Contains(err.Error(), vterrors.GRPCServerErrPrefix) { - return OperationalError(fmt.Sprintf("vttablet: %v", err)) - } - - // server side error, convert it - return &ServerError{ - Err: fmt.Sprintf("vttablet: %v", err), - ServerCode: vterrors.GRPCToCode(grpc.Code(err)), - } + return vterrors.New(vterrors.GRPCToCode(grpc.Code(err)), "vttablet: "+err.Error()) } // TabletErrorFromRPCError returns a ServerError from a vtrpcpb.ServerError @@ -42,10 +28,5 @@ func TabletErrorFromRPCError(err *vtrpcpb.RPCError) error { if code == vtrpcpb.Code_OK { code = vterrors.LegacyErrorCodeToCode(err.LegacyCode) } - - // server side error, convert it - return &ServerError{ - Err: fmt.Sprintf("vttablet: %v", err), - ServerCode: code, - } + return vterrors.New(code, "vttablet: "+err.Message) } diff --git a/go/vt/tabletserver/tabletconn/grpc_error_test.go b/go/vt/tabletserver/tabletconn/grpc_error_test.go index 5dee218c694..82498ac9ade 100644 --- a/go/vt/tabletserver/tabletconn/grpc_error_test.go +++ b/go/vt/tabletserver/tabletconn/grpc_error_test.go @@ -8,6 +8,7 @@ import ( "testing" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" + "github.com/youtube/vitess/go/vt/vterrors" ) func TestTabletErrorFromRPCError(t *testing.T) { @@ -35,9 +36,9 @@ func TestTabletErrorFromRPCError(t *testing.T) { want: vtrpcpb.Code_INVALID_ARGUMENT, }} for _, tcase := range testcases { - got := TabletErrorFromRPCError(tcase.in).(*ServerError) - if got.ServerCode != tcase.want { - t.Errorf("FromVtRPCError(%v):\n%v, want\n%v", tcase.in, got.ServerCode, tcase.want) + got := vterrors.Code(TabletErrorFromRPCError(tcase.in)) + if got != tcase.want { + t.Errorf("FromVtRPCError(%v):\n%v, want\n%v", tcase.in, got, tcase.want) } } } diff --git a/go/vt/tabletserver/tabletconn/tablet_conn.go b/go/vt/tabletserver/tabletconn/tablet_conn.go index fbcb30c0cc8..eed669e453d 100644 --- a/go/vt/tabletserver/tabletconn/tablet_conn.go +++ b/go/vt/tabletserver/tabletconn/tablet_conn.go @@ -11,14 +11,15 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/vt/tabletserver/queryservice" + "github.com/youtube/vitess/go/vt/vterrors" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) -const ( +var ( // ConnClosed is returned when the underlying connection was closed. - ConnClosed = OperationalError("vttablet: Connection Closed") + ConnClosed = vterrors.New(vtrpcpb.Code_UNAVAILABLE, "vttablet: Connection Closed") ) var ( @@ -26,32 +27,6 @@ var ( TabletProtocol = flag.String("tablet_protocol", "grpc", "how to talk to the vttablets") ) -// ServerError represents an error that was returned from -// a vttablet server. it implements vterrors.VtError. -type ServerError struct { - Err string - // ServerCode is the error code that we got from the server. - ServerCode vtrpcpb.Code -} - -func (e *ServerError) Error() string { return e.Err } - -// VtErrorCode returns the underlying Vitess error code. -// This makes ServerError implement vterrors.VtError. -func (e *ServerError) VtErrorCode() vtrpcpb.Code { return e.ServerCode } - -// OperationalError represents an error due to a failure to -// communicate with vttablet. -type OperationalError string - -func (e OperationalError) Error() string { return string(e) } - -// In all the following calls, context is an opaque structure that may -// carry data related to the call. For instance, if an incoming RPC -// call is responsible for these outgoing calls, and the incoming -// protocol and outgoing protocols support forwarding information, use -// context. - // TabletDialer represents a function that will return a QueryService // object that can communicate with a tablet. Only the tablet's // HostName and PortMap should be used (and maybe the alias for debug diff --git a/go/vt/tabletserver/tabletconntest/tabletconntest.go b/go/vt/tabletserver/tabletconntest/tabletconntest.go index 28e745e4350..5eef7d2112d 100644 --- a/go/vt/tabletserver/tabletconntest/tabletconntest.go +++ b/go/vt/tabletserver/tabletconntest/tabletconntest.go @@ -58,20 +58,11 @@ func testErrorHelper(t *testing.T, f *FakeQueryService, name string, ef func(con } // First we check the recoverable vtrpc code is right. - code := vterrors.RecoverVtErrorCode(err) + code := vterrors.Code(err) if code != e.Code { t.Errorf("unexpected server code from %v: got %v, wanted %v", name, code, e.Code) } - // Double-check we always get a ServerError, although - // we don't really care that much. - if !f.TestingGateway { - if _, ok := err.(*tabletconn.ServerError); !ok { - t.Errorf("error wasn't a tabletconn.ServerError for %v?", name) - continue - } - } - // and last we check we preserve the text, with the right prefix if !strings.Contains(err.Error(), e.Prefix()+e.Message) { t.Errorf("client error message '%v' for %v doesn't contain expected server text message '%v'", err.Error(), name, e.Prefix()+e.Message) diff --git a/go/vt/tabletserver/tx_pool_test.go b/go/vt/tabletserver/tx_pool_test.go index 94130ebc6c2..7632abdb318 100644 --- a/go/vt/tabletserver/tx_pool_test.go +++ b/go/vt/tabletserver/tx_pool_test.go @@ -190,7 +190,7 @@ func TestTxPoolBeginWithPoolConnectionError_Errno2006_Permanent(t *testing.T) { if err == nil || !strings.Contains(err.Error(), "Lost connection to MySQL server") || !strings.Contains(err.Error(), "(errno 2013)") { t.Fatalf("Begin did not return the reconnect error: %v", err) } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_INTERNAL; got != want { + if got, want := vterrors.Code(err), vtrpcpb.Code_INTERNAL; got != want { t.Errorf("wrong error code for reconnect error after Begin: got = %v, want = %v", got, want) } } @@ -212,7 +212,7 @@ func TestTxPoolBeginWithPoolConnectionError_Errno2013(t *testing.T) { if err == nil || !strings.Contains(err.Error(), "(errno 2013)") { t.Fatalf("Begin must return connection error with MySQL errno 2013: %v", err) } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_UNKNOWN; got != want { + if got, want := vterrors.Code(err), vtrpcpb.Code_UNKNOWN; got != want { t.Errorf("wrong error code for Begin error: got = %v, want = %v", got, want) } } @@ -253,7 +253,7 @@ func TestTxPoolBeginWithError(t *testing.T) { if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Begin: %v, want %s", err, want) } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_UNKNOWN; got != want { + if got, want := vterrors.Code(err), vtrpcpb.Code_UNKNOWN; got != want { t.Errorf("wrong error code for Begin error: got = %v, want = %v", got, want) } } @@ -333,7 +333,7 @@ func TestTxPoolExecFailDueToConnFail_Errno2006(t *testing.T) { if err == nil || !strings.Contains(err.Error(), "(errno 2006)") { t.Fatalf("Exec must return connection error with MySQL errno 2006: %v", err) } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_INTERNAL; got != want { + if got, want := vterrors.Code(err), vtrpcpb.Code_INTERNAL; got != want { t.Errorf("wrong error code for Exec error: got = %v, want = %v", got, want) } } @@ -366,7 +366,7 @@ func TestTxPoolExecFailDueToConnFail_Errno2013(t *testing.T) { if err == nil || !strings.Contains(err.Error(), "(errno 2013)") { t.Fatalf("Exec must return connection error with MySQL errno 2013: %v", err) } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_UNKNOWN; got != want { + if got, want := vterrors.Code(err), vtrpcpb.Code_UNKNOWN; got != want { t.Errorf("wrong error code for Exec error: got = %v, want = %v", got, want) } } diff --git a/go/vt/vterrors/aggregate.go b/go/vt/vterrors/aggregate.go index d82214bdf1c..bc423b44057 100644 --- a/go/vt/vterrors/aggregate.go +++ b/go/vt/vterrors/aggregate.go @@ -64,7 +64,7 @@ var errorPriorities = map[vtrpcpb.Code]int{ func AggregateVtGateErrorCodes(errors []error) vtrpcpb.Code { highCode := vtrpcpb.Code_OK for _, e := range errors { - code := RecoverVtErrorCode(e) + code := Code(e) if errorPriorities[code] > errorPriorities[highCode] { highCode = code } diff --git a/go/vt/vterrors/proto3.go b/go/vt/vterrors/proto3.go index 08597dcd96d..fff4e15b406 100644 --- a/go/vt/vterrors/proto3.go +++ b/go/vt/vterrors/proto3.go @@ -36,7 +36,7 @@ func VtRPCErrorFromVtError(err error) *vtrpcpb.RPCError { if err == nil { return nil } - code := RecoverVtErrorCode(err) + code := Code(err) return &vtrpcpb.RPCError{ LegacyCode: CodeToLegacyErrorCode(code), Code: code, diff --git a/go/vt/vterrors/vterrors.go b/go/vt/vterrors/vterrors.go index d6ad2b09aaa..6049cbc8ec1 100644 --- a/go/vt/vterrors/vterrors.go +++ b/go/vt/vterrors/vterrors.go @@ -1,6 +1,7 @@ package vterrors import ( + "errors" "fmt" "sort" "strings" @@ -8,15 +9,27 @@ import ( vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) +// Code returns the error code if it's a VitessError. +// Otherwise, it returns unknown. +func Code(err error) vtrpcpb.Code { + if err, ok := err.(*VitessError); ok { + return err.Code + } + if err, ok := err.(VtError); ok { + return err.VtErrorCode() + } + return vtrpcpb.Code_UNKNOWN +} + // ConcatenateErrors aggregates an array of errors into a single error by string concatenation. -func ConcatenateErrors(errors []error) error { - errStrs := make([]string, 0, len(errors)) - for _, e := range errors { +func ConcatenateErrors(errs []error) error { + errStrs := make([]string, 0, len(errs)) + for _, e := range errs { errStrs = append(errStrs, fmt.Sprintf("%v", e)) } // sort the error strings so we always have deterministic ordering sort.Strings(errStrs) - return fmt.Errorf("%v", strings.Join(errStrs, "\n")) + return errors.New(strings.Join(errStrs, "\n")) } // VtError is implemented by any type that exposes a vtrpcpb.ErrorCode. @@ -24,14 +37,6 @@ type VtError interface { VtErrorCode() vtrpcpb.Code } -// RecoverVtErrorCode attempts to recover a vtrpcpb.ErrorCode from an error. -func RecoverVtErrorCode(err error) vtrpcpb.Code { - if vtErr, ok := err.(VtError); ok { - return vtErr.VtErrorCode() - } - return vtrpcpb.Code_UNKNOWN -} - // VitessError is the error type that we use internally for passing structured errors. type VitessError struct { // Error code of the Vitess error. @@ -47,6 +52,14 @@ type VitessError struct { err error } +// New creates a new error using the code and input string. +func New(code vtrpcpb.Code, in string) error { + return &VitessError{ + Code: code, + err: errors.New(in), + } +} + // Error implements the error interface. It will return the redefined error message, if there // is one. If there isn't, it will return the original error message. func (e *VitessError) Error() string { @@ -98,21 +111,7 @@ func NewVitessError(code vtrpcpb.Code, err error, format string, args ...interfa // doesn't wrap it in a new VitessError instance, but only changes the 'Message' field). // Otherwise, it returns a string prefixed with the given prefix. func WithPrefix(prefix string, in error) error { - if vitessError, ok := in.(*VitessError); ok { - return &VitessError{ - Code: vitessError.Code, - err: vitessError.err, - Message: fmt.Sprintf("%s%s", prefix, in.Error()), - } - } - if vtError, ok := in.(VtError); ok { - return &VitessError{ - Code: vtError.VtErrorCode(), - err: in, - Message: fmt.Sprintf("%s%s", prefix, in.Error()), - } - } - return fmt.Errorf("%s%s", prefix, in) + return New(Code(in), fmt.Sprintf("%s%v", prefix, in)) } // WithSuffix allows a string to be suffixed to an error. diff --git a/go/vt/vtgate/buffer/buffer_test.go b/go/vt/vtgate/buffer/buffer_test.go index e95221f7b5e..26d5c86e9f3 100644 --- a/go/vt/vtgate/buffer/buffer_test.go +++ b/go/vt/vtgate/buffer/buffer_test.go @@ -517,7 +517,7 @@ func isCanceledError(err error) error { if err == nil { return fmt.Errorf("buffering should have stopped early and returned an error because the request was canceled from the outside") } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_UNAVAILABLE; got != want { + if got, want := vterrors.Code(err), vtrpcpb.Code_UNAVAILABLE; got != want { return fmt.Errorf("wrong error code for canceled buffered request. got = %v, want = %v", got, want) } if got, want := err.Error(), "context was canceled before failover finished: context canceled"; got != want { @@ -531,7 +531,7 @@ func isEvictedError(err error) error { if err == nil { return errors.New("request should have been evicted because the buffer was full") } - if got, want := vterrors.RecoverVtErrorCode(err), vtrpcpb.Code_UNAVAILABLE; got != want { + if got, want := vterrors.Code(err), vtrpcpb.Code_UNAVAILABLE; got != want { return fmt.Errorf("wrong error code for evicted buffered request. got = %v, want = %v full error: %v", got, want, err) } if got, want := err.Error(), entryEvictedError.Error(); !strings.Contains(got, want) { @@ -568,7 +568,7 @@ func TestEvictionNotPossible(t *testing.T) { if bufferErr == nil || retryDone != nil { t.Fatalf("buffer should have returned an error because it's full: err: %v retryDone: %v", bufferErr, retryDone) } - if got, want := vterrors.RecoverVtErrorCode(bufferErr), vtrpcpb.Code_UNAVAILABLE; got != want { + if got, want := vterrors.Code(bufferErr), vtrpcpb.Code_UNAVAILABLE; got != want { t.Fatalf("wrong error code for evicted buffered request. got = %v, want = %v", got, want) } if got, want := bufferErr.Error(), bufferFullError.Error(); !strings.Contains(got, want) { diff --git a/go/vt/vtgate/gateway/discoverygateway.go b/go/vt/vtgate/gateway/discoverygateway.go index 65485a7533c..1e375c112c4 100644 --- a/go/vt/vtgate/gateway/discoverygateway.go +++ b/go/vt/vtgate/gateway/discoverygateway.go @@ -19,7 +19,6 @@ import ( "github.com/youtube/vitess/go/flagutil" "github.com/youtube/vitess/go/vt/discovery" "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vtgate/buffer" @@ -259,29 +258,15 @@ func (dg *discoveryGateway) canRetry(ctx context.Context, err error, inTransacti return false default: } - if serverError, ok := err.(*tabletconn.ServerError); ok { - switch serverError.ServerCode { - case vtrpcpb.Code_INTERNAL: - // Do not retry on fatal error for streaming query. - // For streaming query, vttablet sends: - // - QUERY_NOT_SERVED, if streaming is not started yet; - // - INTERNAL_ERROR, if streaming is broken halfway. - // For non-streaming query, handle as QUERY_NOT_SERVED. - if isStreaming { - return false - } - fallthrough - case vtrpcpb.Code_FAILED_PRECONDITION: - // Retry on QUERY_NOT_SERVED and - // INTERNAL_ERROR if not in a transaction. - return !inTransaction - default: - // Not retry for RESOURCE_EXHAUSTED and normal - // server errors. + switch vterrors.Code(err) { + case vtrpcpb.Code_INTERNAL: + if isStreaming { return false } + fallthrough + case vtrpcpb.Code_FAILED_PRECONDITION: + return !inTransaction } - // Do not retry on operational error. return false } diff --git a/go/vt/vtgate/gateway/discoverygateway_test.go b/go/vt/vtgate/gateway/discoverygateway_test.go index 95177217e62..4785177bea9 100644 --- a/go/vt/vtgate/gateway/discoverygateway_test.go +++ b/go/vt/vtgate/gateway/discoverygateway_test.go @@ -2,7 +2,6 @@ package gateway import ( "fmt" - "reflect" "testing" "golang.org/x/net/context" @@ -248,11 +247,7 @@ func verifyShardError(t *testing.T, err error, wantErr string, wantCode vtrpcpb. if err == nil || err.Error() != wantErr { t.Errorf("wanted error: %s, got error: %v", wantErr, err) } - if _, ok := err.(*ShardError); !ok { - t.Errorf("wanted error type *ShardConnError, got error type: %v", reflect.TypeOf(err)) - } - code := vterrors.RecoverVtErrorCode(err) - if code != wantCode { + if code := vterrors.Code(err); code != wantCode { t.Errorf("wanted error code: %s, got: %v", wantCode, code) } } diff --git a/go/vt/vtgate/gateway/l2vtgategateway.go b/go/vt/vtgate/gateway/l2vtgategateway.go index cbcf1568ebd..e18ce916577 100644 --- a/go/vt/vtgate/gateway/l2vtgategateway.go +++ b/go/vt/vtgate/gateway/l2vtgategateway.go @@ -22,6 +22,7 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -236,29 +237,15 @@ func (lg *l2VTGateGateway) canRetry(ctx context.Context, err error, inTransactio return false default: } - if serverError, ok := err.(*tabletconn.ServerError); ok { - switch serverError.ServerCode { - case vtrpcpb.Code_INTERNAL: - // Do not retry on fatal error for streaming query. - // For streaming query, vttablet sends: - // - QUERY_NOT_SERVED, if streaming is not started yet; - // - INTERNAL_ERROR, if streaming is broken halfway. - // For non-streaming query, handle as QUERY_NOT_SERVED. - if isStreaming { - return false - } - fallthrough - case vtrpcpb.Code_FAILED_PRECONDITION: - // Retry on QUERY_NOT_SERVED and - // INTERNAL_ERROR if not in a transaction. - return !inTransaction - default: - // Not retry for RESOURCE_EXHAUSTED and normal - // server errors. + switch vterrors.Code(err) { + case vtrpcpb.Code_INTERNAL: + if isStreaming { return false } + fallthrough + case vtrpcpb.Code_FAILED_PRECONDITION: + return !inTransaction } - // Do not retry on operational error. return false } diff --git a/go/vt/vtgate/gateway/shard_error.go b/go/vt/vtgate/gateway/shard_error.go index 16cc3e0e9da..e3dc332147e 100644 --- a/go/vt/vtgate/gateway/shard_error.go +++ b/go/vt/vtgate/gateway/shard_error.go @@ -12,55 +12,15 @@ import ( querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) -// ShardError is the error about a specific shard. -// It implements vterrors.VtError. -type ShardError struct { - // ShardIdentifier is the keyspace+shard. - ShardIdentifier string - // InTransaction indicates if it is inside a transaction. - InTransaction bool - // Err preserves the original error, so that we don't need to parse the error string. - Err error - // Code is the error code to use for all the tablet errors in aggregate - Code vtrpcpb.Code -} - -// Error returns the error string. -func (e *ShardError) Error() string { - if e.ShardIdentifier == "" { - return fmt.Sprintf("%v", e.Err) - } - return fmt.Sprintf("%s, %v", e.ShardIdentifier, e.Err) -} - -// VtErrorCode returns the underlying Vitess error code. -// This is part of vterrors.VtError interface. -func (e *ShardError) VtErrorCode() vtrpcpb.Code { - return e.Code -} - -// NewShardError returns a ShardError which preserves the original -// error code if possible, adds the connection context and adds a bit -// to determine whether the keyspace/shard needs to be re-resolved for -// a potential sharding event (namely, if we were in a transaction). +// NewShardError returns a new error with the shard info amended. func NewShardError(in error, target *querypb.Target, tablet *topodatapb.Tablet, inTransaction bool) error { if in == nil { return nil } - var shardIdentifier string if tablet != nil { - shardIdentifier = fmt.Sprintf("target: %s.%s.%s, used tablet: (%+v)", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType), tablet) - } else { - shardIdentifier = fmt.Sprintf("target: %s.%s.%s", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType)) - } - - return &ShardError{ - ShardIdentifier: shardIdentifier, - InTransaction: inTransaction, - Err: in, - Code: vterrors.RecoverVtErrorCode(in), + return vterrors.WithPrefix(fmt.Sprintf("target: %s.%s.%s, used tablet: (%+v), ", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType), tablet), in) } + return vterrors.WithPrefix(fmt.Sprintf("target: %s.%s.%s, ", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType)), in) } diff --git a/go/vt/vtgate/l2vtgate/l2vtgate.go b/go/vt/vtgate/l2vtgate/l2vtgate.go index 0ade1a2a205..1b443a23731 100644 --- a/go/vt/vtgate/l2vtgate/l2vtgate.go +++ b/go/vt/vtgate/l2vtgate/l2vtgate.go @@ -97,7 +97,7 @@ func (l *L2VTGate) endAction(startTime time.Time, statsKey []string, err *error) // Don't increment the error counter for duplicate // keys or bad queries, as those errors are caused by // client queries and are not VTGate's fault. - ec := vterrors.RecoverVtErrorCode(*err) + ec := vterrors.Code(*err) if ec != vtrpcpb.Code_ALREADY_EXISTS && ec != vtrpcpb.Code_INVALID_ARGUMENT { l.tabletCallErrorCount.Add(statsKey, 1) } diff --git a/go/vt/vtgate/resolver.go b/go/vt/vtgate/resolver.go index 5e3a9db6197..fcf3d801707 100644 --- a/go/vt/vtgate/resolver.go +++ b/go/vt/vtgate/resolver.go @@ -54,14 +54,7 @@ func NewResolver(serv topo.SrvTopoServer, cell string, sc *ScatterConn) *Resolve // isRetryableError will be true if the error should be retried. func isRetryableError(err error) bool { - switch e := err.(type) { - case *ScatterConnError: - return e.Retryable - case *gateway.ShardError: - return e.Code == vtrpcpb.Code_FAILED_PRECONDITION - default: - return false - } + return vterrors.Code(err) == vtrpcpb.Code_FAILED_PRECONDITION } // ExecuteKeyspaceIds executes a non-streaming query based on KeyspaceIds. diff --git a/go/vt/vtgate/resolver_test.go b/go/vt/vtgate/resolver_test.go index 321949f7c2e..b485f22e35c 100644 --- a/go/vt/vtgate/resolver_test.go +++ b/go/vt/vtgate/resolver_test.go @@ -15,9 +15,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/vtgate/gateway" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" @@ -552,32 +550,6 @@ func TestResolverExecBatchAsTransaction(t *testing.T) { } } -func TestIsRetryableError(t *testing.T) { - var connErrorTests = []struct { - in error - outBool bool - }{ - {fmt.Errorf("generic error"), false}, - {&ScatterConnError{Retryable: true}, true}, - {&ScatterConnError{Retryable: false}, false}, - {&gateway.ShardError{Code: vtrpcpb.Code_FAILED_PRECONDITION}, true}, - {&gateway.ShardError{Code: vtrpcpb.Code_INTERNAL}, false}, - // tabletconn.ServerError will not come directly here, - // they'll be wrapped in ScatterConnError or ShardConnError. - // So they can't be retried as is. - {&tabletconn.ServerError{ServerCode: vtrpcpb.Code_FAILED_PRECONDITION}, false}, - {&tabletconn.ServerError{ServerCode: vtrpcpb.Code_PERMISSION_DENIED}, false}, - } - - for _, tt := range connErrorTests { - gotBool := isRetryableError(tt.in) - if gotBool != tt.outBool { - t.Errorf("isConnError(%v) => %v, want %v", - tt.in, gotBool, tt.outBool) - } - } -} - func newTestResolver(hc discovery.HealthCheck, serv topo.SrvTopoServer, cell string) *Resolver { sc := newTestScatterConn(hc, serv, cell) return NewResolver(serv, cell, sc) diff --git a/go/vt/vtgate/sandbox_test.go b/go/vt/vtgate/sandbox_test.go index a842e8bacac..1c3324873bc 100644 --- a/go/vt/vtgate/sandbox_test.go +++ b/go/vt/vtgate/sandbox_test.go @@ -16,10 +16,12 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/sandboxconn" "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/vterrors" "golang.org/x/net/context" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" vschemapb "github.com/youtube/vitess/go/vt/proto/vschema" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) // sandbox_test.go provides a sandbox for unit testing VTGate. @@ -268,12 +270,12 @@ func sandboxDialer(tablet *topodatapb.Tablet, timeout time.Duration) (queryservi sand.DialCounter++ if sand.DialMustFail > 0 { sand.DialMustFail-- - return nil, tabletconn.OperationalError(fmt.Sprintf("conn error")) + return nil, vterrors.New(vtrpcpb.Code_UNAVAILABLE, "conn error") } if sand.DialMustTimeout > 0 { time.Sleep(timeout) sand.DialMustTimeout-- - return nil, tabletconn.OperationalError(fmt.Sprintf("conn unreachable")) + return nil, vterrors.New(vtrpcpb.Code_UNAVAILABLE, "conn unreachable") } sbc := sandboxconn.NewSandboxConn(tablet) return sbc, nil diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index e856a36c338..02d08551482 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -5,7 +5,6 @@ package vtgate import ( - "fmt" "math/rand" "sync" "time" @@ -78,7 +77,7 @@ func (stc *ScatterConn) endAction(startTime time.Time, allErrors *concurrency.Al // Don't increment the error counter for duplicate // keys or bad queries, as those errors are caused by // client queries and are not VTGate's fault. - ec := vterrors.RecoverVtErrorCode(*err) + ec := vterrors.Code(*err) if ec != vtrpcpb.Code_ALREADY_EXISTS && ec != vtrpcpb.Code_INVALID_ARGUMENT { stc.tabletCallErrorCount.Add(statsKey, 1) } @@ -322,7 +321,7 @@ func (stc *ScatterConn) ExecuteBatch( stc.txConn.Rollback(ctx, session) } if allErrors.HasErrors() { - return nil, allErrors.AggrError(stc.aggregateErrors) + return nil, allErrors.AggrError(vterrors.AggregateVtGateErrors) } return results, nil } @@ -359,7 +358,7 @@ func (stc *ScatterConn) StreamExecute( return stc.processOneStreamingResult(&mu, &fieldSent, qr, callback) }) }) - return allErrors.AggrError(stc.aggregateErrors) + return allErrors.AggrError(vterrors.AggregateVtGateErrors) } // StreamExecuteMulti is like StreamExecute, @@ -383,7 +382,7 @@ func (stc *ScatterConn) StreamExecuteMulti( return stc.processOneStreamingResult(&mu, &fieldSent, qr, callback) }) }) - return allErrors.AggrError(stc.aggregateErrors) + return allErrors.AggrError(vterrors.AggregateVtGateErrors) } // MessageStream streams messages from the specified shards. @@ -396,7 +395,7 @@ func (stc *ScatterConn) MessageStream(ctx context.Context, keyspace string, shar return stc.processOneStreamingResult(&mu, &fieldSent, qr, callback) }) }) - return allErrors.AggrError(stc.aggregateErrors) + return allErrors.AggrError(vterrors.AggregateVtGateErrors) } // MessageAck acks messages across multiple shards. @@ -417,7 +416,7 @@ func (stc *ScatterConn) MessageAck(ctx context.Context, keyspace string, shardID mu.Unlock() return nil }) - return totalCount, allErrors.AggrError(stc.aggregateErrors) + return totalCount, allErrors.AggrError(vterrors.AggregateVtGateErrors) } // UpdateStream just sends the query to the gateway, @@ -489,7 +488,7 @@ func (stc *ScatterConn) SplitQuery( ) if allErrors.HasErrors() { - err := allErrors.AggrError(stc.aggregateErrors) + err := allErrors.AggrError(vterrors.AggregateVtGateErrors) return nil, err } // We shuffle the query-parts here. External frameworks like MapReduce may @@ -547,42 +546,6 @@ func (stc *ScatterConn) GetGatewayCacheStatus() gateway.TabletCacheStatusList { return stc.gateway.CacheStatus() } -// ScatterConnError is the ScatterConn specific error. -// It implements vterrors.VtError. -type ScatterConnError struct { - Retryable bool - // Preserve the original errors, so that we don't need to parse the error string. - Errs []error - // serverCode is the error code to use for all the server errors in aggregate - serverCode vtrpcpb.Code -} - -func (e *ScatterConnError) Error() string { - return fmt.Sprintf("%v", vterrors.ConcatenateErrors(e.Errs)) -} - -// VtErrorCode returns the underlying Vitess error code -// This is part of vterrors.VtError interface. -func (e *ScatterConnError) VtErrorCode() vtrpcpb.Code { - return e.serverCode -} - -func (stc *ScatterConn) aggregateErrors(errors []error) error { - allRetryableError := true - for _, e := range errors { - connError, ok := e.(*gateway.ShardError) - if !ok || connError.Code != vtrpcpb.Code_FAILED_PRECONDITION || connError.InTransaction { - allRetryableError = false - break - } - } - return &ScatterConnError{ - Retryable: allRetryableError, - Errs: errors, - serverCode: vterrors.AggregateVtGateErrorCodes(errors), - } -} - // multiGo performs the requested 'action' on the specified // shards in parallel. This does not handle any transaction state. // The action function must match the shardActionFunc signature. @@ -699,7 +662,7 @@ end: stc.txConn.Rollback(ctx, session) } if allErrors.HasErrors() { - return allErrors.AggrError(stc.aggregateErrors) + return allErrors.AggrError(vterrors.AggregateVtGateErrors) } return nil } diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go index 5012346254e..f83480190b7 100644 --- a/go/vt/vtgate/scatter_conn_test.go +++ b/go/vt/vtgate/scatter_conn_test.go @@ -12,7 +12,6 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vtgate/gateway" @@ -102,11 +101,7 @@ func verifyScatterConnError(t *testing.T, err error, wantErr string, wantCode vt if err == nil || err.Error() != wantErr { t.Errorf("wanted error: %s, got error: %v", wantErr, err) } - if _, ok := err.(*ScatterConnError); !ok { - t.Errorf("wanted error type *ScatterConnError, got error type: %v", reflect.TypeOf(err)) - } - code := vterrors.RecoverVtErrorCode(err) - if code != wantCode { + if code := vterrors.Code(err); code != wantCode { t.Errorf("wanted error code: %s, got: %v", wantCode, code) } } @@ -278,24 +273,6 @@ func TestScatterConnStreamExecuteSendError(t *testing.T) { } } -func TestScatterConnError(t *testing.T) { - err := &ScatterConnError{ - Retryable: false, - Errs: []error{ - &gateway.ShardError{Code: vtrpcpb.Code_PERMISSION_DENIED, Err: &tabletconn.ServerError{Err: "tabletconn error"}}, - fmt.Errorf("generic error"), - tabletconn.ConnClosed, - }, - } - - errString := err.Error() - wantErrString := "generic error\ntabletconn error\nvttablet: Connection Closed" - - if errString != wantErrString { - t.Errorf("got: %v, want: %v", errString, wantErrString) - } -} - func TestScatterConnQueryNotInTransaction(t *testing.T) { s := createSandbox("TestScatterConnQueryNotInTransaction") hc := discovery.NewFakeHealthCheck() diff --git a/go/vt/vtgate/tx_conn.go b/go/vt/vtgate/tx_conn.go index b64ce6560f7..907d6cbd490 100644 --- a/go/vt/vtgate/tx_conn.go +++ b/go/vt/vtgate/tx_conn.go @@ -199,7 +199,7 @@ func (txc *TxConn) runSessions(shardSessions []*vtgatepb.Session_ShardSession, a }(s) } wg.Wait() - return allErrors.AggrError(aggregateTxConnErrors) + return allErrors.AggrError(vterrors.AggregateVtGateErrors) } // runTargets executes the action for all targets in parallel and returns a consolildated error. @@ -220,13 +220,5 @@ func (txc *TxConn) runTargets(targets []*querypb.Target, action func(*querypb.Ta }(t) } wg.Wait() - return allErrors.AggrError(aggregateTxConnErrors) -} - -func aggregateTxConnErrors(errors []error) error { - return &ScatterConnError{ - Retryable: false, - Errs: errors, - serverCode: vterrors.AggregateVtGateErrorCodes(errors), - } + return allErrors.AggrError(vterrors.AggregateVtGateErrors) } diff --git a/go/vt/vtgate/tx_conn_test.go b/go/vt/vtgate/tx_conn_test.go index 298c49b6d9a..3312b859995 100644 --- a/go/vt/vtgate/tx_conn_test.go +++ b/go/vt/vtgate/tx_conn_test.go @@ -25,7 +25,7 @@ func TestTxConnCommitRollbackIncorrectSession(t *testing.T) { sc, _, _ := newTestTxConnEnv("TestTxConn") // nil session err := sc.txConn.Commit(context.Background(), false, nil) - if got := vterrors.RecoverVtErrorCode(err); got != vtrpcpb.Code_INVALID_ARGUMENT { + if got := vterrors.Code(err); got != vtrpcpb.Code_INVALID_ARGUMENT { t.Errorf("Commit: %v, want %v", got, vtrpcpb.Code_INVALID_ARGUMENT) } @@ -37,7 +37,7 @@ func TestTxConnCommitRollbackIncorrectSession(t *testing.T) { // not in transaction session := NewSafeSession(&vtgatepb.Session{}) err = sc.txConn.Commit(context.Background(), false, session) - if got := vterrors.RecoverVtErrorCode(err); got != vtrpcpb.Code_ABORTED { + if got := vterrors.Code(err); got != vtrpcpb.Code_ABORTED { t.Errorf("Commit: %v, want %v", got, vtrpcpb.Code_ABORTED) } } @@ -615,10 +615,9 @@ func TestTxConnMultiGoSessions(t *testing.T) { if err == nil || err.Error() != want { t.Errorf("runSessions(2): %v, want %s", err, want) } - errCode := err.(*ScatterConnError).VtErrorCode() wantCode := vtrpcpb.Code_INTERNAL - if errCode != wantCode { - t.Errorf("Error code: %v, want %v", errCode, wantCode) + if code := vterrors.Code(err); code != wantCode { + t.Errorf("Error code: %v, want %v", code, wantCode) } err = txc.runSessions(input, func(s *vtgatepb.Session_ShardSession) error { @@ -654,10 +653,9 @@ func TestTxConnMultiGoTargets(t *testing.T) { if err == nil || err.Error() != want { t.Errorf("runTargets(2): %v, want %s", err, want) } - errCode := err.(*ScatterConnError).VtErrorCode() wantCode := vtrpcpb.Code_INTERNAL - if errCode != wantCode { - t.Errorf("Error code: %v, want %v", errCode, wantCode) + if code := vterrors.Code(err); code != wantCode { + t.Errorf("Error code: %v, want %v", code, wantCode) } err = txc.runTargets(input, func(t *querypb.Target) error { diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index a240028c743..29d0bccba92 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -27,7 +27,6 @@ import ( "github.com/youtube/vitess/go/vt/servenv" "github.com/youtube/vitess/go/vt/sqlannotation" "github.com/youtube/vitess/go/vt/tabletserver/querytypes" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vterrors" @@ -82,11 +81,10 @@ var ( errorsByOperation *stats.Rates errorsByKeyspace *stats.Rates errorsByDbType *stats.Rates + errorsByCode *stats.Rates // Error counters should be global so they can be set from anywhere - normalErrors *stats.MultiCounters - infoErrors *stats.Counters - internalErrors *stats.Counters + errorCounts *stats.MultiCounters ) // VTGate is the rpc interface to vtgate. Only one instance @@ -181,17 +179,16 @@ func Init(ctx context.Context, hc discovery.HealthCheck, topoServer topo.Server, logMessageStream: logutil.NewThrottledLogger("MessageStream", 5*time.Second), } - normalErrors = stats.NewMultiCounters("VtgateApiErrorCounts", []string{"Operation", "Keyspace", "DbType"}) - infoErrors = stats.NewCounters("VtgateInfoErrorCounts") - internalErrors = stats.NewCounters("VtgateInternalErrorCounts") + errorCounts = stats.NewMultiCounters("VtgateApiErrorCounts", []string{"Operation", "Keyspace", "DbType", "Code"}) qpsByOperation = stats.NewRates("QPSByOperation", stats.CounterForDimension(rpcVTGate.timings, "Operation"), 15, 1*time.Minute) qpsByKeyspace = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(rpcVTGate.timings, "Keyspace"), 15, 1*time.Minute) qpsByDbType = stats.NewRates("QPSByDbType", stats.CounterForDimension(rpcVTGate.timings, "DbType"), 15, 1*time.Minute) - errorsByOperation = stats.NewRates("ErrorsByOperation", stats.CounterForDimension(normalErrors, "Operation"), 15, 1*time.Minute) - errorsByKeyspace = stats.NewRates("ErrorsByKeyspace", stats.CounterForDimension(normalErrors, "Keyspace"), 15, 1*time.Minute) - errorsByDbType = stats.NewRates("ErrorsByDbType", stats.CounterForDimension(normalErrors, "DbType"), 15, 1*time.Minute) + errorsByOperation = stats.NewRates("ErrorsByOperation", stats.CounterForDimension(errorCounts, "Operation"), 15, 1*time.Minute) + errorsByKeyspace = stats.NewRates("ErrorsByKeyspace", stats.CounterForDimension(errorCounts, "Keyspace"), 15, 1*time.Minute) + errorsByDbType = stats.NewRates("ErrorsByDbType", stats.CounterForDimension(errorCounts, "DbType"), 15, 1*time.Minute) + errorsByCode = stats.NewRates("ErrorsByCode", stats.CounterForDimension(errorCounts, "Code"), 15, 1*time.Minute) servenv.OnRun(func() { for _, f := range RegisterVTGates { @@ -245,7 +242,7 @@ func (vtg *VTGate) Execute(ctx context.Context, sql string, bindVariables map[st "NotInTransaction": notInTransaction, "Options": options, } - err = handleExecuteError(err, statsKey, query, vtg.logExecute) + err = recordAndAnnotateError(err, statsKey, query, vtg.logExecute) return nil, err } @@ -286,7 +283,7 @@ func (vtg *VTGate) ExecuteShards(ctx context.Context, sql string, bindVariables "NotInTransaction": notInTransaction, "Options": options, } - err = handleExecuteError(err, statsKey, query, vtg.logExecuteShards) + err = recordAndAnnotateError(err, statsKey, query, vtg.logExecuteShards) return nil, err } @@ -315,7 +312,7 @@ func (vtg *VTGate) ExecuteKeyspaceIds(ctx context.Context, sql string, bindVaria "NotInTransaction": notInTransaction, "Options": options, } - err = handleExecuteError(err, statsKey, query, vtg.logExecuteKeyspaceIds) + err = recordAndAnnotateError(err, statsKey, query, vtg.logExecuteKeyspaceIds) return nil, err } @@ -344,7 +341,7 @@ func (vtg *VTGate) ExecuteKeyRanges(ctx context.Context, sql string, bindVariabl "NotInTransaction": notInTransaction, "Options": options, } - err = handleExecuteError(err, statsKey, query, vtg.logExecuteKeyRanges) + err = recordAndAnnotateError(err, statsKey, query, vtg.logExecuteKeyRanges) return nil, err } @@ -374,7 +371,7 @@ func (vtg *VTGate) ExecuteEntityIds(ctx context.Context, sql string, bindVariabl "NotInTransaction": notInTransaction, "Options": options, } - err = handleExecuteError(err, statsKey, query, vtg.logExecuteEntityIds) + err = recordAndAnnotateError(err, statsKey, query, vtg.logExecuteEntityIds) return nil, err } @@ -404,7 +401,7 @@ func (vtg *VTGate) ExecuteBatch(ctx context.Context, sqlList []string, bindVaria "AsTransaction": asTransaction, "Options": options, } - err = handleExecuteError(err, statsKey, query, vtg.logExecute) + err = recordAndAnnotateError(err, statsKey, query, vtg.logExecute) return nil, err } @@ -442,7 +439,7 @@ func (vtg *VTGate) ExecuteBatchShards(ctx context.Context, queries []*vtgatepb.B "Session": session, "Options": options, } - err = handleExecuteError(err, statsKey, query, vtg.logExecuteBatchShards) + err = recordAndAnnotateError(err, statsKey, query, vtg.logExecuteBatchShards) return nil, err } @@ -478,7 +475,7 @@ func (vtg *VTGate) ExecuteBatchKeyspaceIds(ctx context.Context, queries []*vtgat "Session": session, "Options": options, } - err = handleExecuteError(err, statsKey, query, vtg.logExecuteBatchKeyspaceIds) + err = recordAndAnnotateError(err, statsKey, query, vtg.logExecuteBatchKeyspaceIds) return nil, err } @@ -509,7 +506,7 @@ func (vtg *VTGate) StreamExecute(ctx context.Context, sql string, bindVariables "TabletType": ltt, "Options": options, } - return handleExecuteError(err, statsKey, query, vtg.logStreamExecute) + return recordAndAnnotateError(err, statsKey, query, vtg.logStreamExecute) } return nil } @@ -548,7 +545,7 @@ func (vtg *VTGate) StreamExecuteKeyspaceIds(ctx context.Context, sql string, bin "TabletType": ltt, "Options": options, } - return handleExecuteError(err, statsKey, query, vtg.logStreamExecuteKeyspaceIds) + return recordAndAnnotateError(err, statsKey, query, vtg.logStreamExecuteKeyspaceIds) } return nil } @@ -587,7 +584,7 @@ func (vtg *VTGate) StreamExecuteKeyRanges(ctx context.Context, sql string, bindV "TabletType": ltt, "Options": options, } - return handleExecuteError(err, statsKey, query, vtg.logStreamExecuteKeyRanges) + return recordAndAnnotateError(err, statsKey, query, vtg.logStreamExecuteKeyRanges) } return nil } @@ -623,7 +620,7 @@ func (vtg *VTGate) StreamExecuteShards(ctx context.Context, sql string, bindVari "TabletType": ltt, "Options": options, } - return handleExecuteError(err, statsKey, query, vtg.logStreamExecuteShards) + return recordAndAnnotateError(err, statsKey, query, vtg.logStreamExecuteShards) } return nil } @@ -823,15 +820,14 @@ func (vtg *VTGate) MessageStream(ctx context.Context, keyspace string, shard str callback, ) if err != nil { - normalErrors.Add(statsKey, 1) - query := map[string]interface{}{ + request := map[string]interface{}{ "Keyspace": keyspace, "Shard": shard, "KeyRange": keyRange, "TabletType": ltt, "MessageName": name, } - logError(err, query, vtg.logMessageStream) + recordAndAnnotateError(err, statsKey, request, vtg.logMessageStream) } return formatError(err) } @@ -866,15 +862,14 @@ func (vtg *VTGate) UpdateStream(ctx context.Context, keyspace string, shard stri callback, ) if err != nil { - normalErrors.Add(statsKey, 1) - query := map[string]interface{}{ + request := map[string]interface{}{ "Keyspace": keyspace, "Shard": shard, "KeyRange": keyRange, "TabletType": ltt, "Timestamp": timestamp, } - logError(err, query, vtg.logUpdateStream) + recordAndAnnotateError(err, statsKey, request, vtg.logUpdateStream) } return formatError(err) } @@ -889,93 +884,22 @@ func (vtg *VTGate) VSchemaStats() *VSchemaStats { return vtg.router.planner.VSchemaStats() } -// Any errors that are caused by VTGate dependencies (e.g, VtTablet) should be logged -// as errors in those components, but logged to Info in VTGate itself. -func logError(err error, query map[string]interface{}, logger *logutil.ThrottledLogger) { - if err == context.DeadlineExceeded { - // Count these but don't log them because they are very common and not - // likely to indicate a bug in VTGate - infoErrors.Add("TimeoutErrors", 1) - return - } - logMethod := logger.Errorf - if !isErrorCausedByVTGate(err) { - infoErrors.Add("NonVtgateErrors", 1) - // Log non-vtgate errors (e.g. a query failed on vttablet because a failover - // is in progress) on INFO only because vttablet already logs them as ERROR. - logMethod = logger.Infof - } - logMethod("%v, query: %+v", err, query) -} - -// Returns true if a given error is caused entirely due to VTGate, and not any -// of the components that it depends on. -// If the error is an aggregation of multiple errors e.g. in case of a scatter -// query, the function returns true if *any* error is caused by vtgate. -// Consequently, the function returns false if *all* errors are caused by -// vttablet (actual errors) or the client (e.g. context canceled). -func isErrorCausedByVTGate(err error) bool { - var errQueue []error - errQueue = append(errQueue, err) - for len(errQueue) > 0 { - // pop the first item from the queue - e := errQueue[0] - errQueue = errQueue[1:] - - switch e := e.(type) { - case *ScatterConnError: - errQueue = append(errQueue, e.Errs...) - case *gateway.ShardError: - errQueue = append(errQueue, e.Err) - case tabletconn.OperationalError: - // Communication with vttablet failed i.e. the error is caused by vtgate. - // (For actual vttablet errors, see the next case "ServerError".) - return true - case *tabletconn.ServerError: - // The query failed on vttablet and it returned this error. - // Ignore it and check the next error in the queue. - default: - if e == context.Canceled { - // Caused by the client and not vtgate. - // Ignore it and check the next error in the queue. - continue - } - - // Return true if even a single error within - // the error queue was caused by VTGate. If - // we're not certain what caused the error, we - // default to assuming that VTGate was at fault. - return true - } +func recordAndAnnotateError(err error, statsKey []string, request map[string]interface{}, logger *logutil.ThrottledLogger) error { + ec := vterrors.Code(err) + fullkey := []string{ + statsKey[0], + statsKey[1], + statsKey[2], + ec.String(), } - return false -} - -func handleExecuteError(err error, statsKey []string, query map[string]interface{}, logger *logutil.ThrottledLogger) error { - // First we log in the right category. - ec := vterrors.RecoverVtErrorCode(err) + errorCounts.Add(fullkey, 1) + // Most errors are not logged by vtgate beecause they're either too spammy or logged elsewhere. switch ec { - case vtrpcpb.Code_ALREADY_EXISTS: - // Duplicate key error, no need to log. - infoErrors.Add("DupKey", 1) - case vtrpcpb.Code_RESOURCE_EXHAUSTED, vtrpcpb.Code_INVALID_ARGUMENT: - // Tx pool full error, or bad input, no need to log. - normalErrors.Add(statsKey, 1) - case vtrpcpb.Code_PERMISSION_DENIED: - // User violated permissions (TableACL), no need to log. - infoErrors.Add("PermissionDenied", 1) - case vtrpcpb.Code_UNAVAILABLE: - // Temporary error which should be retried by user. Do not log. - // As of 01/2017, only the vttablet transaction throttler and the vtgate - // master buffer (if buffer full) return this error. - infoErrors.Add("TransientError", 1) - default: - // Regular error, we will log if caused by vtgate. - normalErrors.Add(statsKey, 1) - logError(err, query, logger) + case vtrpcpb.Code_UNKNOWN, vtrpcpb.Code_INTERNAL, vtrpcpb.Code_DATA_LOSS: + logger.Errorf("%v, request: %+v", err, request) } - // Then we suffix the error with our address. + // Suffix the error with our address. s := fmt.Sprintf(", vtgate: %v", servenv.ListeningURL.String()) return vterrors.WithSuffix(err, s) } @@ -993,7 +917,7 @@ func (vtg *VTGate) HandlePanic(err *error) { if x := recover(); x != nil { log.Errorf("Uncaught panic:\n%v\n%s", x, tb.Stack(4)) *err = fmt.Errorf("uncaught panic: %v, vtgate: %v", x, servenv.ListeningURL.String()) - internalErrors.Add("Panic", 1) + errorCounts.Add([]string{"Panic", "Unknown", "Unknown", vtrpcpb.Code_INTERNAL.String()}, 1) } } diff --git a/go/vt/vtgate/vtgate_test.go b/go/vt/vtgate/vtgate_test.go index 56a5baadbef..11dccd54b69 100644 --- a/go/vt/vtgate/vtgate_test.go +++ b/go/vt/vtgate/vtgate_test.go @@ -18,10 +18,8 @@ import ( "github.com/youtube/vitess/go/vt/key" "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/tabletserver/sandboxconn" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vterrors" - "github.com/youtube/vitess/go/vt/vtgate/gateway" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" @@ -1190,53 +1188,6 @@ func TestVTGateSplitQueryUnsharded(t *testing.T) { } } -func TestIsErrorCausedByVTGate(t *testing.T) { - unknownError := fmt.Errorf("unknown error") - serverError := &tabletconn.ServerError{ - ServerCode: vtrpcpb.Code_FAILED_PRECONDITION, - Err: "vttablet: retry: error message", - } - shardConnUnknownErr := &gateway.ShardError{Err: unknownError} - shardConnServerErr := &gateway.ShardError{Err: serverError} - shardConnCancelledErr := &gateway.ShardError{Err: context.Canceled} - scatterConnErrAllUnknownErrs := &ScatterConnError{ - Errs: []error{unknownError, unknownError, unknownError}, - } - scatterConnErrMixed := &ScatterConnError{ - Errs: []error{unknownError, shardConnServerErr, shardConnCancelledErr}, - } - scatterConnErrAllNonVTGateErrs := &ScatterConnError{ - Errs: []error{shardConnServerErr, shardConnServerErr, shardConnCancelledErr}, - } - - inputToWant := map[error]bool{ - unknownError: true, - serverError: false, - context.Canceled: false, - // OperationalErrors that are not tabletconn.Cancelled might be from VTGate - tabletconn.ConnClosed: true, - // Errors wrapped in ShardConnError should get unwrapped - shardConnUnknownErr: true, - shardConnServerErr: false, - shardConnCancelledErr: false, - // We consider a ScatterConnErr with all unknown errors to be from VTGate - scatterConnErrAllUnknownErrs: true, - // We consider a ScatterConnErr with a mix of errors to be from VTGate - scatterConnErrMixed: true, - // If every error in ScatterConnErr list is caused by external components, we shouldn't - // consider the error to be from VTGate - scatterConnErrAllNonVTGateErrs: false, - } - - for input, want := range inputToWant { - got := isErrorCausedByVTGate(input) - if got != want { - t.Errorf("isErrorCausedByVTGate(%v) => %v, want %v", - input, got, want) - } - } -} - // Functions for testing // keyspace_id and 'filtered_replication_unfriendly' // annotations. @@ -1619,7 +1570,7 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before if err == nil { t.Errorf("error %v not propagated for Execute", expected) } else { - ec := vterrors.RecoverVtErrorCode(err) + ec := vterrors.Code(err) if ec != expected { t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err) } @@ -1644,7 +1595,7 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before if err == nil { t.Errorf("error %v not propagated for ExecuteShards", expected) } else { - ec := vterrors.RecoverVtErrorCode(err) + ec := vterrors.Code(err) if ec != expected { t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err) } @@ -1669,7 +1620,7 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before if err == nil { t.Errorf("error %v not propagated for ExecuteKeyspaceIds", expected) } else { - ec := vterrors.RecoverVtErrorCode(err) + ec := vterrors.Code(err) if ec != expected { t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err) } @@ -1694,7 +1645,7 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before if err == nil { t.Errorf("error %v not propagated for ExecuteKeyRanges", expected) } else { - ec := vterrors.RecoverVtErrorCode(err) + ec := vterrors.Code(err) if ec != expected { t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err) } @@ -1726,7 +1677,7 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before if err == nil { t.Errorf("error %v not propagated for ExecuteEntityIds", expected) } else { - ec := vterrors.RecoverVtErrorCode(err) + ec := vterrors.Code(err) if ec != expected { t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err) } @@ -1762,7 +1713,7 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before if err == nil { t.Errorf("error %v not propagated for ExecuteBatchShards", expected) } else { - ec := vterrors.RecoverVtErrorCode(err) + ec := vterrors.Code(err) if ec != expected { t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err) } @@ -1800,7 +1751,7 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before if err == nil { t.Errorf("error %v not propagated for ExecuteBatchShards", expected) } else { - ec := vterrors.RecoverVtErrorCode(err) + ec := vterrors.Code(err) if ec != expected { t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err) } @@ -1825,7 +1776,7 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before if err == nil { t.Errorf("error %v not propagated for StreamExecute", expected) } else { - ec := vterrors.RecoverVtErrorCode(err) + ec := vterrors.Code(err) if ec != expected { t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err) } @@ -1851,7 +1802,7 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before if err == nil { t.Errorf("error %v not propagated for StreamExecuteShards", expected) } else { - ec := vterrors.RecoverVtErrorCode(err) + ec := vterrors.Code(err) if ec != expected { t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err) } @@ -1877,7 +1828,7 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before if err == nil { t.Errorf("error %v not propagated for StreamExecuteKeyspaceIds", expected) } else { - ec := vterrors.RecoverVtErrorCode(err) + ec := vterrors.Code(err) if ec != expected { t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err) } @@ -1903,7 +1854,7 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before if err == nil { t.Errorf("error %v not propagated for StreamExecuteKeyRanges", expected) } else { - ec := vterrors.RecoverVtErrorCode(err) + ec := vterrors.Code(err) if ec != expected { t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err) } @@ -1933,7 +1884,7 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before if err == nil { t.Errorf("error %v not propagated for Commit", expected) } else { - ec := vterrors.RecoverVtErrorCode(err) + ec := vterrors.Code(err) if ec != expected { t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err) } @@ -1959,7 +1910,7 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before if err == nil { t.Errorf("error %v not propagated for SplitQuery", expected) } else { - ec := vterrors.RecoverVtErrorCode(err) + ec := vterrors.Code(err) if ec != expected { t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err) } diff --git a/go/vt/vtgate/vtgateconntest/client.go b/go/vt/vtgate/vtgateconntest/client.go index 1124502df4f..f161744231f 100644 --- a/go/vt/vtgate/vtgateconntest/client.go +++ b/go/vt/vtgate/vtgateconntest/client.go @@ -985,7 +985,7 @@ func verifyError(t *testing.T, err error, method string) { return } // verify error code - code := vterrors.RecoverVtErrorCode(err) + code := vterrors.Code(err) if code != expectedCode { t.Errorf("Unexpected error code from %s: got %v, wanted %v", method, code, expectedCode) } diff --git a/go/vt/worker/vtworkerclienttest/client_testsuite.go b/go/vt/worker/vtworkerclienttest/client_testsuite.go index cda467ea4e0..769d3ed4fab 100644 --- a/go/vt/worker/vtworkerclienttest/client_testsuite.go +++ b/go/vt/worker/vtworkerclienttest/client_testsuite.go @@ -129,7 +129,7 @@ func commandErrorsBecauseBusy(t *testing.T, client vtworkerclient.Client, server if _, err := stream.Recv(); err != nil { // We see CANCELED from the RPC client (client side cancelation) or // from vtworker itself (server side cancelation). - if vterrors.RecoverVtErrorCode(err) != vtrpcpb.Code_CANCELED { + if vterrors.Code(err) != vtrpcpb.Code_CANCELED { errorCodeCheck = fmt.Errorf("Block command should only error due to canceled context: %v", err) } // Stream has finished. @@ -151,7 +151,7 @@ func commandErrorsBecauseBusy(t *testing.T, client vtworkerclient.Client, server <-blockCommandStarted gotErr := runVtworkerCommand(client, []string{"Ping", "Are you busy?"}) wantCode := vtrpcpb.Code_UNAVAILABLE - if gotCode := vterrors.RecoverVtErrorCode(gotErr); gotCode != wantCode { + if gotCode := vterrors.Code(gotErr); gotCode != wantCode { t.Fatalf("wrong error code for second cmd: got = %v, want = %v, err: %v", gotCode, wantCode, gotErr) } @@ -175,7 +175,7 @@ func commandErrorsBecauseBusy(t *testing.T, client vtworkerclient.Client, server // retryable error. gotErr2 := runVtworkerCommand(client, []string{"Ping", "canceled and still busy?"}) wantCode2 := vtrpcpb.Code_UNAVAILABLE - if gotCode2 := vterrors.RecoverVtErrorCode(gotErr2); gotCode2 != wantCode2 { + if gotCode2 := vterrors.Code(gotErr2); gotCode2 != wantCode2 { t.Fatalf("wrong error code for second cmd before reset: got = %v, want = %v, err: %v", gotCode2, wantCode2, gotErr2) } From ae7b2be938df982fefa61da9a573081f18a401a6 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 19 Feb 2017 14:04:54 -0800 Subject: [PATCH 018/108] vterrors: improve retry in gateways The wrapper functionality now takes over the retry decision because it's better suited to make it. For example, a streaming query should not be retried if it's already sent results. --- .../queryservice/fakes/error_query_service.go | 2 +- go/vt/tabletserver/queryservice/wrapped.go | 144 ++++++++++++------ go/vt/vterrors/vterrors.go | 5 +- go/vt/vtgate/gateway/discoverygateway.go | 32 +--- go/vt/vtgate/gateway/l2vtgategateway.go | 34 +---- go/vt/vtgate/l2vtgate/l2vtgate.go | 5 +- 6 files changed, 115 insertions(+), 107 deletions(-) diff --git a/go/vt/tabletserver/queryservice/fakes/error_query_service.go b/go/vt/tabletserver/queryservice/fakes/error_query_service.go index a221e3d84ea..f2ad434d6eb 100644 --- a/go/vt/tabletserver/queryservice/fakes/error_query_service.go +++ b/go/vt/tabletserver/queryservice/fakes/error_query_service.go @@ -12,7 +12,7 @@ import ( // ErrorQueryService is an object that returns an error for all methods. var ErrorQueryService = queryservice.Wrap( nil, - func(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction, isStreaming bool, inner func(context.Context, *querypb.Target, queryservice.QueryService) error) error { + func(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction bool, inner func(context.Context, *querypb.Target, queryservice.QueryService) (error, bool)) error { return fmt.Errorf("ErrorQueryService does not implement any method") }, ) diff --git a/go/vt/tabletserver/queryservice/wrapped.go b/go/vt/tabletserver/queryservice/wrapped.go index b2d02115bd3..096a3ce6e73 100644 --- a/go/vt/tabletserver/queryservice/wrapped.go +++ b/go/vt/tabletserver/queryservice/wrapped.go @@ -9,15 +9,20 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) var _ QueryService = &wrappedService{} // WrapperFunc defines the signature for the wrapper function used by Wrap. // Parameter ordering is as follows: original parameters, connection, method name, additional parameters and inner func. -type WrapperFunc func(ctx context.Context, target *querypb.Target, conn QueryService, name string, inTransaction, isStreaming bool, inner func(context.Context, *querypb.Target, QueryService) error) error +// The inner function returns err and canRetry. +// If canRetry is true, the error is specific to the current vttablet and can be retried elsewhere. +// The flag will be false if there was no error. +type WrapperFunc func(ctx context.Context, target *querypb.Target, conn QueryService, name string, inTransaction bool, inner func(context.Context, *querypb.Target, QueryService) (err error, canRetry bool)) error // Wrap returns a wrapped version of the original QueryService implementation. // This lets you avoid repeating boiler-plate code by consolidating it in the @@ -38,6 +43,25 @@ func Wrap(impl QueryService, wrapper WrapperFunc) QueryService { } } +// canRetry returns true if the error is retryable on a different vttablet. +// Nil error or a canceled context make it return +// false. Otherwise, the error code determines the outcome. +func canRetry(ctx context.Context, err error) bool { + if err == nil { + return false + } + select { + case <-ctx.Done(): + return false + default: + } + switch vterrors.Code(err) { + case vtrpcpb.Code_UNAVAILABLE, vtrpcpb.Code_FAILED_PRECONDITION, vtrpcpb.Code_RESOURCE_EXHAUSTED: + return true + } + return false +} + // wrappedService wraps an existing QueryService with // a decorator function. type wrappedService struct { @@ -46,152 +70,181 @@ type wrappedService struct { } func (ws *wrappedService) Begin(ctx context.Context, target *querypb.Target) (transactionID int64, err error) { - err = ws.wrapper(ctx, target, ws.impl, "Begin", false, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { + err = ws.wrapper(ctx, target, ws.impl, "Begin", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { var innerErr error transactionID, innerErr = conn.Begin(ctx, target) - return innerErr + return innerErr, canRetry(ctx, innerErr) }) return transactionID, err } func (ws *wrappedService) Commit(ctx context.Context, target *querypb.Target, transactionID int64) error { - return ws.wrapper(ctx, target, ws.impl, "Commit", true, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.Commit(ctx, target, transactionID) + return ws.wrapper(ctx, target, ws.impl, "Commit", true, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + innerErr := conn.Commit(ctx, target, transactionID) + return innerErr, canRetry(ctx, innerErr) }) } func (ws *wrappedService) Rollback(ctx context.Context, target *querypb.Target, transactionID int64) error { - return ws.wrapper(ctx, target, ws.impl, "Rollback", true, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.Rollback(ctx, target, transactionID) + return ws.wrapper(ctx, target, ws.impl, "Rollback", true, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + innerErr := conn.Rollback(ctx, target, transactionID) + return innerErr, canRetry(ctx, innerErr) }) } func (ws *wrappedService) Prepare(ctx context.Context, target *querypb.Target, transactionID int64, dtid string) error { - return ws.wrapper(ctx, target, ws.impl, "Prepare", true, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.Prepare(ctx, target, transactionID, dtid) + return ws.wrapper(ctx, target, ws.impl, "Prepare", true, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + innerErr := conn.Prepare(ctx, target, transactionID, dtid) + return innerErr, canRetry(ctx, innerErr) }) } func (ws *wrappedService) CommitPrepared(ctx context.Context, target *querypb.Target, dtid string) (err error) { - return ws.wrapper(ctx, target, ws.impl, "CommitPrepared", true, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.CommitPrepared(ctx, target, dtid) + return ws.wrapper(ctx, target, ws.impl, "CommitPrepared", true, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + innerErr := conn.CommitPrepared(ctx, target, dtid) + return innerErr, canRetry(ctx, innerErr) }) } func (ws *wrappedService) RollbackPrepared(ctx context.Context, target *querypb.Target, dtid string, originalID int64) (err error) { - return ws.wrapper(ctx, target, ws.impl, "RollbackPrepared", true, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.RollbackPrepared(ctx, target, dtid, originalID) + return ws.wrapper(ctx, target, ws.impl, "RollbackPrepared", true, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + innerErr := conn.RollbackPrepared(ctx, target, dtid, originalID) + return innerErr, canRetry(ctx, innerErr) }) } func (ws *wrappedService) CreateTransaction(ctx context.Context, target *querypb.Target, dtid string, participants []*querypb.Target) (err error) { - return ws.wrapper(ctx, target, ws.impl, "CreateTransaction", true, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.CreateTransaction(ctx, target, dtid, participants) + return ws.wrapper(ctx, target, ws.impl, "CreateTransaction", true, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + innerErr := conn.CreateTransaction(ctx, target, dtid, participants) + return innerErr, canRetry(ctx, innerErr) }) } func (ws *wrappedService) StartCommit(ctx context.Context, target *querypb.Target, transactionID int64, dtid string) (err error) { - return ws.wrapper(ctx, target, ws.impl, "StartCommit", true, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.StartCommit(ctx, target, transactionID, dtid) + return ws.wrapper(ctx, target, ws.impl, "StartCommit", true, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + innerErr := conn.StartCommit(ctx, target, transactionID, dtid) + return innerErr, canRetry(ctx, innerErr) }) } func (ws *wrappedService) SetRollback(ctx context.Context, target *querypb.Target, dtid string, transactionID int64) (err error) { - return ws.wrapper(ctx, target, ws.impl, "SetRollback", true, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.SetRollback(ctx, target, dtid, transactionID) + return ws.wrapper(ctx, target, ws.impl, "SetRollback", true, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + innerErr := conn.SetRollback(ctx, target, dtid, transactionID) + return innerErr, canRetry(ctx, innerErr) }) } func (ws *wrappedService) ConcludeTransaction(ctx context.Context, target *querypb.Target, dtid string) (err error) { - return ws.wrapper(ctx, target, ws.impl, "ConcludeTransaction", true, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.ConcludeTransaction(ctx, target, dtid) + return ws.wrapper(ctx, target, ws.impl, "ConcludeTransaction", true, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + innerErr := conn.ConcludeTransaction(ctx, target, dtid) + return innerErr, canRetry(ctx, innerErr) }) } func (ws *wrappedService) ReadTransaction(ctx context.Context, target *querypb.Target, dtid string) (metadata *querypb.TransactionMetadata, err error) { - err = ws.wrapper(ctx, target, ws.impl, "ReadTransaction", false, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { + err = ws.wrapper(ctx, target, ws.impl, "ReadTransaction", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { var innerErr error metadata, innerErr = conn.ReadTransaction(ctx, target, dtid) - return innerErr + return innerErr, canRetry(ctx, innerErr) }) return metadata, err } func (ws *wrappedService) Execute(ctx context.Context, target *querypb.Target, query string, bindVars map[string]interface{}, transactionID int64, options *querypb.ExecuteOptions) (qr *sqltypes.Result, err error) { - err = ws.wrapper(ctx, target, ws.impl, "Execute", transactionID != 0, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { + inTransaction := (transactionID != 0) + err = ws.wrapper(ctx, target, ws.impl, "Execute", inTransaction, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { var innerErr error qr, innerErr = conn.Execute(ctx, target, query, bindVars, transactionID, options) - return innerErr + // You cannot retry if you're in a transaction. + retryable := canRetry(ctx, innerErr) && (!inTransaction) + return innerErr, retryable }) return qr, err } func (ws *wrappedService) StreamExecute(ctx context.Context, target *querypb.Target, query string, bindVars map[string]interface{}, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { - return ws.wrapper(ctx, target, ws.impl, "StreamExecute", false, true, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.StreamExecute(ctx, target, query, bindVars, options, callback) + return ws.wrapper(ctx, target, ws.impl, "StreamExecute", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + streamingStarted := false + innerErr := conn.StreamExecute(ctx, target, query, bindVars, options, func(qr *sqltypes.Result) error { + streamingStarted = true + return callback(qr) + }) + // You cannot restart a stream once it's sent results. + retryable := canRetry(ctx, innerErr) && (!streamingStarted) + return innerErr, retryable }) } func (ws *wrappedService) ExecuteBatch(ctx context.Context, target *querypb.Target, queries []querytypes.BoundQuery, asTransaction bool, transactionID int64, options *querypb.ExecuteOptions) (qrs []sqltypes.Result, err error) { - err = ws.wrapper(ctx, target, ws.impl, "ExecuteBatch", transactionID != 0, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { + inTransaction := (transactionID != 0) + err = ws.wrapper(ctx, target, ws.impl, "ExecuteBatch", inTransaction, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { var innerErr error qrs, innerErr = conn.ExecuteBatch(ctx, target, queries, asTransaction, transactionID, options) - return innerErr + // You cannot retry if you're in a transaction. + retryable := canRetry(ctx, innerErr) && (!inTransaction) + return innerErr, retryable }) return qrs, err } func (ws *wrappedService) BeginExecute(ctx context.Context, target *querypb.Target, query string, bindVars map[string]interface{}, options *querypb.ExecuteOptions) (qr *sqltypes.Result, transactionID int64, err error) { - err = ws.wrapper(ctx, target, ws.impl, "BeginExecute", false, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { + err = ws.wrapper(ctx, target, ws.impl, "BeginExecute", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { var innerErr error qr, transactionID, innerErr = conn.BeginExecute(ctx, target, query, bindVars, options) - return innerErr + // If a transaction was started, we cannot retry. + retryable := canRetry(ctx, innerErr) && (transactionID == 0) + return innerErr, retryable }) return qr, transactionID, err } func (ws *wrappedService) BeginExecuteBatch(ctx context.Context, target *querypb.Target, queries []querytypes.BoundQuery, asTransaction bool, options *querypb.ExecuteOptions) (qrs []sqltypes.Result, transactionID int64, err error) { - err = ws.wrapper(ctx, target, ws.impl, "BeginExecuteBatch", false, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { + err = ws.wrapper(ctx, target, ws.impl, "BeginExecuteBatch", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { var innerErr error qrs, transactionID, innerErr = conn.BeginExecuteBatch(ctx, target, queries, asTransaction, options) - return innerErr + // If a transaction was started, we cannot retry. + retryable := canRetry(ctx, innerErr) && (transactionID == 0) + return innerErr, retryable }) return qrs, transactionID, err } func (ws *wrappedService) MessageStream(ctx context.Context, target *querypb.Target, name string, callback func(*sqltypes.Result) error) error { - return ws.wrapper(ctx, target, ws.impl, "MessageStream", false, true, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.MessageStream(ctx, target, name, callback) + return ws.wrapper(ctx, target, ws.impl, "MessageStream", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + innerErr := conn.MessageStream(ctx, target, name, callback) + return innerErr, canRetry(ctx, innerErr) }) } func (ws *wrappedService) MessageAck(ctx context.Context, target *querypb.Target, name string, ids []*querypb.Value) (count int64, err error) { - err = ws.wrapper(ctx, target, ws.impl, "MessageAck", false, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { + err = ws.wrapper(ctx, target, ws.impl, "MessageAck", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { var innerErr error count, innerErr = conn.MessageAck(ctx, target, name, ids) - return innerErr + return innerErr, canRetry(ctx, innerErr) }) return count, err } func (ws *wrappedService) SplitQuery(ctx context.Context, target *querypb.Target, query querytypes.BoundQuery, splitColumns []string, splitCount int64, numRowsPerQueryPart int64, algorithm querypb.SplitQueryRequest_Algorithm) (queries []querytypes.QuerySplit, err error) { - err = ws.wrapper(ctx, target, ws.impl, "SplitQuery", false, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { + err = ws.wrapper(ctx, target, ws.impl, "SplitQuery", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { var innerErr error queries, innerErr = conn.SplitQuery(ctx, target, query, splitColumns, splitCount, numRowsPerQueryPart, algorithm) - return innerErr + return innerErr, canRetry(ctx, innerErr) }) return queries, err } func (ws *wrappedService) UpdateStream(ctx context.Context, target *querypb.Target, position string, timestamp int64, callback func(*querypb.StreamEvent) error) error { - return ws.wrapper(ctx, target, ws.impl, "UpdateStream", false, true, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.UpdateStream(ctx, target, position, timestamp, callback) + return ws.wrapper(ctx, target, ws.impl, "UpdateStream", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + innerErr := conn.UpdateStream(ctx, target, position, timestamp, callback) + return innerErr, canRetry(ctx, innerErr) }) } func (ws *wrappedService) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { - return ws.wrapper(ctx, nil, ws.impl, "StreamHealth", false, true, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.StreamHealth(ctx, callback) + return ws.wrapper(ctx, nil, ws.impl, "StreamHealth", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + innerErr := conn.StreamHealth(ctx, callback) + return innerErr, canRetry(ctx, innerErr) }) } @@ -200,7 +253,8 @@ func (ws *wrappedService) HandlePanic(err *error) { } func (ws *wrappedService) Close(ctx context.Context) error { - return ws.wrapper(ctx, nil, ws.impl, "Close", false, false, func(ctx context.Context, target *querypb.Target, conn QueryService) error { - return conn.Close(ctx) + return ws.wrapper(ctx, nil, ws.impl, "Close", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { + // No point retrying Close. + return conn.Close(ctx), false }) } diff --git a/go/vt/vterrors/vterrors.go b/go/vt/vterrors/vterrors.go index 6049cbc8ec1..b989cc7b295 100644 --- a/go/vt/vterrors/vterrors.go +++ b/go/vt/vterrors/vterrors.go @@ -10,8 +10,11 @@ import ( ) // Code returns the error code if it's a VitessError. -// Otherwise, it returns unknown. +// If err is nil, it returns ok. Otherwise, it returns unknown. func Code(err error) vtrpcpb.Code { + if err == nil { + return vtrpcpb.Code_OK + } if err, ok := err.(*VitessError); ok { return err.Code } diff --git a/go/vt/vtgate/gateway/discoverygateway.go b/go/vt/vtgate/gateway/discoverygateway.go index 1e375c112c4..221848aae1a 100644 --- a/go/vt/vtgate/gateway/discoverygateway.go +++ b/go/vt/vtgate/gateway/discoverygateway.go @@ -161,7 +161,7 @@ func (dg *discoveryGateway) CacheStatus() TabletCacheStatusList { // the middle of a transaction. While returning the error check if it maybe a result of // a resharding event, and set the re-resolve bit and let the upper layers // re-resolve and retry. -func (dg *discoveryGateway) withRetry(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction, isStreaming bool, inner func(ctx context.Context, target *querypb.Target, conn queryservice.QueryService) error) error { +func (dg *discoveryGateway) withRetry(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction bool, inner func(ctx context.Context, target *querypb.Target, conn queryservice.QueryService) (error, bool)) error { var tabletLastUsed *topodatapb.Tablet var err error invalidTablets := make(map[string]bool) @@ -234,9 +234,10 @@ func (dg *discoveryGateway) withRetry(ctx context.Context, target *querypb.Targe } startTime := time.Now() - err = inner(ctx, ts.Target, conn) + var canRetry bool + err, canRetry = inner(ctx, ts.Target, conn) dg.updateStats(target, startTime, err) - if dg.canRetry(ctx, err, inTransaction, isStreaming) { + if canRetry { invalidTablets[ts.Key] = true continue } @@ -245,31 +246,6 @@ func (dg *discoveryGateway) withRetry(ctx context.Context, target *querypb.Targe return NewShardError(err, target, tabletLastUsed, inTransaction) } -// canRetry determines whether a query can be retried or not. -// OperationalErrors like retry/fatal are retryable if query is not in a txn. -// All other errors are non-retryable. -func (dg *discoveryGateway) canRetry(ctx context.Context, err error, inTransaction, isStreaming bool) bool { - if err == nil { - return false - } - // Do not retry if ctx.Done() is closed. - select { - case <-ctx.Done(): - return false - default: - } - switch vterrors.Code(err) { - case vtrpcpb.Code_INTERNAL: - if isStreaming { - return false - } - fallthrough - case vtrpcpb.Code_FAILED_PRECONDITION: - return !inTransaction - } - return false -} - func shuffleTablets(tablets []discovery.TabletStats) { index := 0 length := len(tablets) diff --git a/go/vt/vtgate/gateway/l2vtgategateway.go b/go/vt/vtgate/gateway/l2vtgategateway.go index e18ce916577..1a69200a106 100644 --- a/go/vt/vtgate/gateway/l2vtgategateway.go +++ b/go/vt/vtgate/gateway/l2vtgategateway.go @@ -22,11 +22,9 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" - "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) const ( @@ -206,7 +204,7 @@ func (lg *l2VTGateGateway) getConn(keyspace, shard string) (*l2VTGateConn, error // the middle of a transaction. While returning the error check if it maybe a result of // a resharding event, and set the re-resolve bit and let the upper layers // re-resolve and retry. -func (lg *l2VTGateGateway) withRetry(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction, isStreaming bool, inner func(context.Context, *querypb.Target, queryservice.QueryService) error) error { +func (lg *l2VTGateGateway) withRetry(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction bool, inner func(context.Context, *querypb.Target, queryservice.QueryService) (error, bool)) error { l2conn, err := lg.getConn(target.Keyspace, target.Shard) if err != nil { return fmt.Errorf("no configured destination for %v/%v: %v", target.Keyspace, target.Shard, err) @@ -214,9 +212,10 @@ func (lg *l2VTGateGateway) withRetry(ctx context.Context, target *querypb.Target for i := 0; i < lg.retryCount+1; i++ { startTime := time.Now() - err = inner(ctx, target, l2conn.conn) + var canRetry bool + err, canRetry = inner(ctx, target, l2conn.conn) lg.updateStats(l2conn, target.TabletType, startTime, err) - if lg.canRetry(ctx, err, inTransaction, isStreaming) { + if canRetry { continue } break @@ -224,31 +223,6 @@ func (lg *l2VTGateGateway) withRetry(ctx context.Context, target *querypb.Target return NewShardError(err, target, nil, inTransaction) } -// canRetry determines whether a query can be retried or not. -// OperationalErrors like retry/fatal are retryable if query is not in a txn. -// All other errors are non-retryable. -func (lg *l2VTGateGateway) canRetry(ctx context.Context, err error, inTransaction, isStreaming bool) bool { - if err == nil { - return false - } - // Do not retry if ctx.Done() is closed. - select { - case <-ctx.Done(): - return false - default: - } - switch vterrors.Code(err) { - case vtrpcpb.Code_INTERNAL: - if isStreaming { - return false - } - fallthrough - case vtrpcpb.Code_FAILED_PRECONDITION: - return !inTransaction - } - return false -} - func (lg *l2VTGateGateway) updateStats(conn *l2VTGateConn, tabletType topodatapb.TabletType, startTime time.Time, err error) { elapsed := time.Now().Sub(startTime) aggr := lg.getStatsAggregator(conn, tabletType) diff --git a/go/vt/vtgate/l2vtgate/l2vtgate.go b/go/vt/vtgate/l2vtgate/l2vtgate.go index 1b443a23731..4e8859c3c4e 100644 --- a/go/vt/vtgate/l2vtgate/l2vtgate.go +++ b/go/vt/vtgate/l2vtgate/l2vtgate.go @@ -65,12 +65,13 @@ func Init(hc discovery.HealthCheck, topoServer topo.Server, serv topo.SrvTopoSer } l2VTGate.QueryService = queryservice.Wrap( gw, - func(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction, isStreaming bool, inner func(context.Context, *querypb.Target, queryservice.QueryService) error) (err error) { + func(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction bool, inner func(context.Context, *querypb.Target, queryservice.QueryService) (error, bool)) (err error) { if target != nil { startTime, statsKey := l2VTGate.startAction(name, target) defer l2VTGate.endAction(startTime, statsKey, &err) } - return inner(ctx, target, conn) + err, _ = inner(ctx, target, conn) + return err }, ) servenv.OnRun(func() { From c002282c4f6f5c33f4abb6d83c922621c4950999 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 19 Feb 2017 16:23:20 -0800 Subject: [PATCH 019/108] vterrors: abolish TabletError --- go/mysqlconn/constants.go | 19 ++ go/vt/mysqlctl/schema.go | 4 +- go/vt/tabletserver/codex.go | 30 +-- go/vt/tabletserver/codex_test.go | 41 ++-- go/vt/tabletserver/connpool/dbconn.go | 18 +- go/vt/tabletserver/connpool/dbconn_test.go | 4 +- go/vt/tabletserver/endtoend/acl_test.go | 6 +- go/vt/tabletserver/endtoend/batch_test.go | 2 +- .../endtoend/compatibility_test.go | 22 +- go/vt/tabletserver/endtoend/config_test.go | 16 +- go/vt/tabletserver/endtoend/misc_test.go | 10 +- go/vt/tabletserver/endtoend/stream_test.go | 9 +- .../tabletserver/endtoend/transaction_test.go | 27 +-- .../engines/schema/schema_engine.go | 27 ++- .../engines/schema/schema_engine_test.go | 10 +- go/vt/tabletserver/messager_engine.go | 3 +- go/vt/tabletserver/messager_engine_test.go | 2 +- go/vt/tabletserver/query_engine.go | 14 +- go/vt/tabletserver/query_executor.go | 40 ++-- go/vt/tabletserver/query_executor_test.go | 98 +++----- go/vt/tabletserver/query_rules.go | 73 +++--- go/vt/tabletserver/query_rules_test.go | 10 +- go/vt/tabletserver/queryservice/wrapped.go | 8 +- .../tabletconntest/fakequeryservice.go | 3 +- .../tabletconntest/tabletconntest.go | 29 ++- go/vt/tabletserver/tabletenv/logstats.go | 2 +- go/vt/tabletserver/tabletenv/logstats_test.go | 8 +- go/vt/tabletserver/tabletenv/tablet_error.go | 9 +- go/vt/tabletserver/tabletserver.go | 212 +++++++++--------- go/vt/tabletserver/tabletserver_test.go | 88 ++++---- go/vt/tabletserver/testutils_test.go | 16 -- go/vt/tabletserver/twopc.go | 6 +- go/vt/tabletserver/tx_executor.go | 27 +-- go/vt/tabletserver/tx_executor_test.go | 16 +- go/vt/tabletserver/tx_pool.go | 24 +- go/vt/tabletserver/tx_pool_test.go | 39 ++-- go/vt/vterrors/grpc.go | 2 +- go/vt/vterrors/proto3.go | 7 +- go/vt/vterrors/proto3_test.go | 21 +- go/vt/vterrors/vterrors.go | 29 ++- go/vt/vtgate/buffer/buffer.go | 45 ++-- test/update_stream.py | 2 +- test/vertical_split.py | 2 +- test/vtgatev2_test.py | 24 -- 44 files changed, 497 insertions(+), 607 deletions(-) diff --git a/go/mysqlconn/constants.go b/go/mysqlconn/constants.go index 9c0ce2e96ef..e698d4ae680 100644 --- a/go/mysqlconn/constants.go +++ b/go/mysqlconn/constants.go @@ -1,5 +1,7 @@ package mysqlconn +import "github.com/youtube/vitess/go/sqldb" + const ( // MaxPacketSize is the maximum payload length of a packet // the server supports. @@ -183,6 +185,10 @@ const ( // Sent when the streaming calls are not done in the right order. CRCommandsOutOfSync = 2014 + // CRNamedPipeStateError is CR_NAMEDPIPESETSTATE_ERROR. + // This is the highest possible number for a connection error. + CRNamedPipeStateError = 2018 + // CRCantReadCharset is CR_CANT_READ_CHARSET CRCantReadCharset = 2019 @@ -340,3 +346,16 @@ var CharacterSetMap = map[string]uint8{ func IsNum(typ uint8) bool { return ((typ <= 9 /* MYSQL_TYPE_INT24 */ && typ != 7 /* MYSQL_TYPE_TIMESTAMP */) || typ == 13 /* MYSQL_TYPE_YEAR */ || typ == 246 /* MYSQL_TYPE_NEWDECIMAL */) } + +// IsConnErr returns true if the error is a connection error. +func IsConnErr(err error) bool { + if sqlErr, ok := err.(*sqldb.SQLError); ok { + num := sqlErr.Number() + // Don't count query kill as connection error. + if num == CRServerLost { + return false + } + return num >= CRUnknownError && num <= CRNamedPipeStateError + } + return false +} diff --git a/go/vt/mysqlctl/schema.go b/go/vt/mysqlctl/schema.go index 16e932431de..4a270ec097f 100644 --- a/go/vt/mysqlctl/schema.go +++ b/go/vt/mysqlctl/schema.go @@ -189,7 +189,7 @@ func (mysqld *Mysqld) GetPrimaryKeyColumns(dbName, table string) ([]string, erro } } if keyNameIndex == -1 || seqInIndexIndex == -1 || columnNameIndex == -1 { - return nil, fmt.Errorf("Unknown columns in 'show index' result: %v", qr.Fields) + return nil, fmt.Errorf("unknown columns in 'show index' result: %v", qr.Fields) } columns := make([]string, 0, 5) @@ -206,7 +206,7 @@ func (mysqld *Mysqld) GetPrimaryKeyColumns(dbName, table string) ([]string, erro return nil, err } if seqInIndex != expectedIndex { - return nil, fmt.Errorf("Unexpected index: %v != %v", seqInIndex, expectedIndex) + return nil, fmt.Errorf("unexpected index: %v != %v", seqInIndex, expectedIndex) } expectedIndex++ diff --git a/go/vt/tabletserver/codex.go b/go/vt/tabletserver/codex.go index a9bf11f5916..ae3ea261d2f 100644 --- a/go/vt/tabletserver/codex.go +++ b/go/vt/tabletserver/codex.go @@ -8,7 +8,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/sqlparser" "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" @@ -42,7 +42,7 @@ func resolvePKValues(table *schema.Table, pkValues []interface{}, bindVars map[s if length == -1 { length = len(list) } else if len(list) != length { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "mismatched lengths for values %v", pkValues) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "mismatched lengths for values %v", pkValues) } return nil } @@ -93,7 +93,7 @@ func resolvePKValues(table *schema.Table, pkValues []interface{}, bindVars map[s func resolveListArg(col *schema.TableColumn, key string, bindVars map[string]interface{}) ([]sqltypes.Value, error) { val, _, err := sqlparser.FetchBindVar(key, bindVars) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } switch list := val.(type) { @@ -102,7 +102,7 @@ func resolveListArg(col *schema.TableColumn, key string, bindVars map[string]int for i, v := range list { sqlval, err := sqltypes.BuildConverted(col.Type, v) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } if err = validateValue(col, sqlval); err != nil { return nil, err @@ -112,7 +112,7 @@ func resolveListArg(col *schema.TableColumn, key string, bindVars map[string]int return resolved, nil case *querypb.BindVariable: if list.Type != querypb.Type_TUPLE { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "expecting list for bind var %s: %v", key, list) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "expecting list for bind var %s: %v", key, list) } resolved := make([]sqltypes.Value, len(list.Values)) for i, v := range list.Values { @@ -120,7 +120,7 @@ func resolveListArg(col *schema.TableColumn, key string, bindVars map[string]int sqlval := sqltypes.MakeTrusted(v.Type, v.Value) sqlval, err := sqltypes.BuildConverted(col.Type, sqlval) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } if err = validateValue(col, sqlval); err != nil { return nil, err @@ -129,7 +129,7 @@ func resolveListArg(col *schema.TableColumn, key string, bindVars map[string]int } return resolved, nil default: - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "unknown type for bind variable %v", key) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unknown type for bind variable %v", key) } } @@ -159,12 +159,12 @@ func resolveValue(col *schema.TableColumn, value interface{}, bindVars map[strin if v, ok := value.(string); ok { value, _, err = sqlparser.FetchBindVar(v, bindVars) if err != nil { - return result, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + return result, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } } result, err = sqltypes.BuildConverted(col.Type, value) if err != nil { - return result, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + return result, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } if err = validateValue(col, result); err != nil { return result, err @@ -178,23 +178,23 @@ func resolveNumber(value interface{}, bindVars map[string]interface{}) (int64, e if v, ok := value.(string); ok { value, _, err = sqlparser.FetchBindVar(v, bindVars) if err != nil { - return 0, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } } v, err := sqltypes.BuildValue(value) if err != nil { - return 0, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } ret, err := v.ParseInt64() if err != nil { - return 0, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } return ret, nil } func validateRow(table *schema.Table, columnNumbers []int, row []sqltypes.Value) error { if len(row) != len(columnNumbers) { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "data inconsistency %d vs %d", len(row), len(columnNumbers)) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "data inconsistency %d vs %d", len(row), len(columnNumbers)) } for j, value := range row { if err := validateValue(&table.Columns[columnNumbers[j]], value); err != nil { @@ -211,11 +211,11 @@ func validateValue(col *schema.TableColumn, value sqltypes.Value) error { } if sqltypes.IsIntegral(col.Type) { if !value.IsIntegral() { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "type mismatch, expecting numeric type for %v for column: %v", value, col) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "type mismatch, expecting numeric type for %v for column: %v", value, col) } } else if col.Type == sqltypes.VarBinary { if !value.IsQuoted() { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "type mismatch, expecting string type for %v for column: %v", value, col) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "type mismatch, expecting string type for %v for column: %v", value, col) } } return nil diff --git a/go/vt/tabletserver/codex_test.go b/go/vt/tabletserver/codex_test.go index 3ebb2893fc0..976544ca58c 100644 --- a/go/vt/tabletserver/codex_test.go +++ b/go/vt/tabletserver/codex_test.go @@ -15,6 +15,7 @@ import ( querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vterrors" ) func TestCodexBuildValuesList(t *testing.T) { @@ -57,7 +58,7 @@ func TestCodexBuildValuesList(t *testing.T) { // invalid value bindVars["pk1"] = struct{}{} pkValues = []interface{}{":pk1"} - wantErr := "error: unexpected type struct {}: {}" + wantErr := "unexpected type struct {}: {}" got, err := buildValueList(table, pkValues, bindVars) @@ -68,7 +69,7 @@ func TestCodexBuildValuesList(t *testing.T) { // type mismatch int bindVars["pk1"] = "str" pkValues = []interface{}{":pk1"} - wantErr = "error: strconv.ParseInt" + wantErr = "strconv.ParseInt" got, err = buildValueList(table, pkValues, bindVars) if err == nil || !strings.Contains(err.Error(), wantErr) { @@ -79,7 +80,7 @@ func TestCodexBuildValuesList(t *testing.T) { bindVars["pk1"] = 1 bindVars["pk2"] = 1 pkValues = []interface{}{":pk1", ":pk2"} - wantErr = "error: type mismatch, expecting string type for 1" + wantErr = "type mismatch, expecting string type for 1" got, err = buildValueList(table, pkValues, bindVars) if err == nil || !strings.Contains(err.Error(), wantErr) { @@ -211,7 +212,7 @@ func TestCodexBuildValuesList(t *testing.T) { pk1Val, "::list", } - wantErr = "error: empty list supplied for list" + wantErr = "empty list supplied for list" got, err = buildValueList(table, pkValues, bindVars) if err == nil || !strings.Contains(err.Error(), wantErr) { t.Fatalf("got %v, want %v", err, wantErr) @@ -225,7 +226,7 @@ func TestCodexBuildValuesList(t *testing.T) { pk1Val, ":list", } - wantErr = "error: unexpected arg type []interface {} for key list" + wantErr = "unexpected arg type []interface {} for key list" got, err = buildValueList(table, pkValues, bindVars) if err == nil || !strings.Contains(err.Error(), wantErr) { t.Fatalf("got %v, want %v", err, wantErr) @@ -233,7 +234,6 @@ func TestCodexBuildValuesList(t *testing.T) { } func TestCodexResolvePKValues(t *testing.T) { - testUtils := newTestUtils() table := createTable("Table", []string{"pk1", "pk2", "col1"}, []querypb.Type{sqltypes.Int64, sqltypes.VarBinary, sqltypes.Int32}, @@ -257,7 +257,9 @@ func TestCodexResolvePKValues(t *testing.T) { pkValues = make([]interface{}, 0, 10) pkValues = append(pkValues, sqltypes.MakeString([]byte("type_mismatch"))) _, _, err = resolvePKValues(table, pkValues, nil) - testUtils.checkTabletError(t, err, vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt") + if code := vterrors.Code(err); code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Errorf("resolvePKValues: %v, want %v", code, vtrpcpb.Code_INVALID_ARGUMENT) + } // pkValues with different length bindVariables = make(map[string]interface{}) bindVariables[key] = 1 @@ -269,7 +271,9 @@ func TestCodexResolvePKValues(t *testing.T) { pkValues = append(pkValues, []interface{}{":" + key}) pkValues = append(pkValues, []interface{}{":" + key2, ":" + key3}) _, _, err = resolvePKValues(table, pkValues, bindVariables) - testUtils.checkTabletError(t, err, vtrpcpb.Code_INVALID_ARGUMENT, "mismatched lengths") + if code := vterrors.Code(err); code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Errorf("resolvePKValues: %v, want %v", code, vtrpcpb.Code_INVALID_ARGUMENT) + } } func TestCodexResolveListArg(t *testing.T) { @@ -284,7 +288,9 @@ func TestCodexResolveListArg(t *testing.T) { bindVariables[key] = []interface{}{fmt.Errorf("error is not supported")} _, err := resolveListArg(table.GetPKColumn(0), "::"+key, bindVariables) - testUtils.checkTabletError(t, err, vtrpcpb.Code_INVALID_ARGUMENT, "") + if code := vterrors.Code(err); code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Errorf("resolvePKValues: %v, want %v", code, vtrpcpb.Code_INVALID_ARGUMENT) + } // This should successfully convert. bindVariables[key] = []interface{}{"1"} @@ -322,19 +328,19 @@ func TestResolveNumber(t *testing.T) { bv: map[string]interface{}{ "a": []interface{}{10}, }, - outErr: "error: unexpected type []interface {}: [10]", + outErr: "unexpected type []interface {}: [10]", }, { v: ":a", - outErr: "error: missing bind var a", + outErr: "missing bind var a", }, { v: make(chan int), - outErr: "error: unexpected type chan int", + outErr: "unexpected type chan int", }, { v: int64(1), out: int64(1), }, { v: 1.2, - outErr: "error: strconv.ParseInt", + outErr: "strconv.ParseInt", }} for _, tc := range testcases { got, err := resolveNumber(tc.v, tc.bv) @@ -406,17 +412,20 @@ func TestCodexBuildStreamComment(t *testing.T) { } func TestCodexValidateRow(t *testing.T) { - testUtils := newTestUtils() table := createTable("Table", []string{"pk1", "pk2", "col1"}, []querypb.Type{sqltypes.Int64, sqltypes.VarBinary, sqltypes.Int32}, []string{"pk1", "pk2"}) // #columns and #rows do not match err := validateRow(table, []int{1}, []sqltypes.Value{}) - testUtils.checkTabletError(t, err, vtrpcpb.Code_INVALID_ARGUMENT, "data inconsistency") + if code := vterrors.Code(err); code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Errorf("validateRow: %v, want %v", code, vtrpcpb.Code_INVALID_ARGUMENT) + } // column 0 is int type but row is in string type err = validateRow(table, []int{0}, []sqltypes.Value{sqltypes.MakeString([]byte("str"))}) - testUtils.checkTabletError(t, err, vtrpcpb.Code_INVALID_ARGUMENT, "type mismatch") + if code := vterrors.Code(err); code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Errorf("validateRow: %v, want %v", code, vtrpcpb.Code_INVALID_ARGUMENT) + } } func TestCodexApplyFilterWithPKDefaults(t *testing.T) { diff --git a/go/vt/tabletserver/connpool/dbconn.go b/go/vt/tabletserver/connpool/dbconn.go index b25a2fdf78e..0a17e7c2abe 100644 --- a/go/vt/tabletserver/connpool/dbconn.go +++ b/go/vt/tabletserver/connpool/dbconn.go @@ -10,13 +10,13 @@ import ( "time" log "github.com/golang/glog" + "github.com/youtube/vitess/go/mysqlconn" "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/sync2" "github.com/youtube/vitess/go/trace" "github.com/youtube/vitess/go/vt/dbconnpool" querypb "github.com/youtube/vitess/go/vt/proto/query" - vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "golang.org/x/net/context" ) @@ -62,14 +62,14 @@ func (dbc *DBConn) Exec(ctx context.Context, query string, maxrows int, wantfiel switch { case err == nil: return r, nil - case !tabletenv.IsConnErr(err): + case !mysqlconn.IsConnErr(err): // MySQL error that isn't due to a connection issue - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, err) + return nil, err case attempt == 2: // If the MySQL connection is bad, we assume that there is nothing wrong with // the query itself, and retrying it might succeed. The MySQL connection might // fix itself, or the query could succeed on a different VtTablet. - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) + return nil, err } // Connection error. Try to reconnect. @@ -78,7 +78,7 @@ func (dbc *DBConn) Exec(ctx context.Context, query string, maxrows int, wantfiel dbc.pool.checker.CheckMySQL() // Return the error of the reconnect and not the original connection error. // NOTE: We return a tryable error code here. - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, reconnectErr) + return nil, reconnectErr } // Reconnect succeeded. Retry query at second attempt. @@ -130,7 +130,7 @@ func (dbc *DBConn) Stream(ctx context.Context, query string, callback func(*sqlt switch { case err == nil: return nil - case !tabletenv.IsConnErr(err) || resultSent || attempt == 2: + case !mysqlconn.IsConnErr(err) || resultSent || attempt == 2: // MySQL error that isn't due to a connection issue return err } @@ -190,16 +190,14 @@ func (dbc *DBConn) Kill(reason string) error { killConn, err := dbc.pool.dbaPool.Get(context.TODO()) if err != nil { log.Warningf("Failed to get conn from dba pool: %v", err) - // TODO(aaijazi): Find the right error code for an internal error that we don't want to retry - return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Failed to get conn from dba pool: %v", err) + return err } defer killConn.Recycle() sql := fmt.Sprintf("kill %d", dbc.conn.ID()) _, err = killConn.ExecuteFetch(sql, 10000, false) if err != nil { log.Errorf("Could not kill query %s: %v", dbc.Current(), err) - // TODO(aaijazi): Find the right error code for an internal error that we don't want to retry - return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Could not kill query %s: %v", dbc.Current(), err) + return err } return nil } diff --git a/go/vt/tabletserver/connpool/dbconn_test.go b/go/vt/tabletserver/connpool/dbconn_test.go index 31ea3a648cd..6b44cc6f90b 100644 --- a/go/vt/tabletserver/connpool/dbconn_test.go +++ b/go/vt/tabletserver/connpool/dbconn_test.go @@ -80,7 +80,7 @@ func TestDBConnKill(t *testing.T) { // Kill failed because we are not able to connect to the database db.EnableConnFail() err = dbConn.Kill("test kill") - want := "Failed to get conn from dba pool" + want := "Lost connection" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Exec: %v, want %s", err, want) } @@ -100,7 +100,7 @@ func TestDBConnKill(t *testing.T) { // Kill failed because "kill query_id" failed db.AddRejectedQuery(newKillQuery, errors.New("rejected")) err = dbConn.Kill("test kill") - want = "Could not kill query" + want = "rejected" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Exec: %v, want %s", err, want) } diff --git a/go/vt/tabletserver/endtoend/acl_test.go b/go/vt/tabletserver/endtoend/acl_test.go index b09891fd1d6..3a9c204c440 100644 --- a/go/vt/tabletserver/endtoend/acl_test.go +++ b/go/vt/tabletserver/endtoend/acl_test.go @@ -17,7 +17,7 @@ import ( func TestTableACLNoAccess(t *testing.T) { client := framework.NewClient() - aclErr := "error: table acl error" + aclErr := "table acl error" execCases := []struct { query string err string @@ -163,12 +163,12 @@ func TestQueryRules(t *testing.T) { query := "select * from vitess_test where intval=:asdfg" bv := map[string]interface{}{"asdfg": 1} _, err = client.Execute(query, bv) - want = "error: Query disallowed due to rule: disallow bindvar 'asdfg'" + want = "disallowed due to rule: disallow bindvar 'asdfg'" if err == nil || err.Error() != want { t.Errorf("Error: %v, want %s", err, want) } _, err = client.StreamExecute(query, bv) - want = "error: Query disallowed due to rule: disallow bindvar 'asdfg'" + want = "disallowed due to rule: disallow bindvar 'asdfg'" if err == nil || err.Error() != want { t.Errorf("Error: %v, want %s", err, want) } diff --git a/go/vt/tabletserver/endtoend/batch_test.go b/go/vt/tabletserver/endtoend/batch_test.go index c6db1ef0ac6..c39def952e5 100644 --- a/go/vt/tabletserver/endtoend/batch_test.go +++ b/go/vt/tabletserver/endtoend/batch_test.go @@ -182,7 +182,7 @@ func TestBatchTransaction(t *testing.T) { } defer client.Rollback() qrl, err = client.ExecuteBatch(queries, true) - want := "error: cannot start a new transaction in the scope of an existing one" + want := "cannot start a new transaction in the scope of an existing one" if err == nil || err.Error() != want { t.Errorf("Error: %v, want %s", err, want) } diff --git a/go/vt/tabletserver/endtoend/compatibility_test.go b/go/vt/tabletserver/endtoend/compatibility_test.go index d07682ae3a2..581416a37d4 100644 --- a/go/vt/tabletserver/endtoend/compatibility_test.go +++ b/go/vt/tabletserver/endtoend/compatibility_test.go @@ -680,39 +680,39 @@ func TestTypeLimits(t *testing.T) { }{{ query: "insert into vitess_ints(tiny) values('str')", bv: nil, - out: "error: strconv.ParseInt", + out: "strconv.ParseInt", }, { query: "insert into vitess_ints(tiny) values(:str)", bv: map[string]interface{}{"str": "str"}, - out: "error: strconv.ParseInt", + out: "strconv.ParseInt", }, { query: "insert into vitess_ints(tiny) values(1.2)", bv: nil, - out: "error: DML too complex", + out: "DML too complex", }, { query: "insert into vitess_ints(tiny) values(:fl)", bv: map[string]interface{}{"fl": 1.2}, - out: "error: type mismatch", + out: "type mismatch", }, { query: "insert into vitess_strings(vb) values(1)", bv: nil, - out: "error: type mismatch", + out: "type mismatch", }, { query: "insert into vitess_strings(vb) values(:id)", bv: map[string]interface{}{"id": 1}, - out: "error: type mismatch", + out: "type mismatch", }, { query: "insert into vitess_strings(vb) select tiny from vitess_ints", bv: nil, - out: "error: type mismatch", + out: "type mismatch", }, { query: "insert into vitess_ints(tiny) select num from vitess_fracts", bv: nil, - out: "error: type mismatch", + out: "type mismatch", }, { query: "insert into vitess_ints(tiny) select vb from vitess_strings", bv: nil, - out: "error: type mismatch", + out: "type mismatch", }} for _, tcase := range mismatchCases { _, err := client.Execute(tcase.query, tcase.bv) @@ -721,7 +721,7 @@ func TestTypeLimits(t *testing.T) { } } - want := "error: Out of range" + want := "Out of range" for _, query := range []string{ "insert into vitess_ints(tiny) values(-129)", "insert into vitess_ints(tiny) select medium from vitess_ints", @@ -732,7 +732,7 @@ func TestTypeLimits(t *testing.T) { } } - want = "error: Data too long" + want = "Data too long" _, err := client.Execute("insert into vitess_strings(vb) values('12345678901234567')", nil) if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("Error: %v, want %s", err, want) diff --git a/go/vt/tabletserver/endtoend/config_test.go b/go/vt/tabletserver/endtoend/config_test.go index 296cd6c5737..594d477b2cd 100644 --- a/go/vt/tabletserver/endtoend/config_test.go +++ b/go/vt/tabletserver/endtoend/config_test.go @@ -11,8 +11,10 @@ import ( "testing" "time" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" ) // compareIntDiff returns an error if end[tag] != start[tag]+diff. @@ -175,7 +177,7 @@ func TestMexResultSize(t *testing.T) { client := framework.NewClient() query := "select * from vitess_test" _, err := client.Execute(query, nil) - want := "error: Row count exceeded" + want := "Row count exceeded" if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("Error: %v, must start with %s", err, want) } @@ -305,14 +307,12 @@ func TestQueryTimeout(t *testing.T) { return } _, err = client.Execute("select sleep(1) from vitess_test", nil) - want := "error: the query was killed" - if err == nil || !strings.HasPrefix(err.Error(), want) { - t.Errorf("Error: %v, must start with %s", err, want) + if code := vterrors.Code(err); code != vtrpcpb.Code_DEADLINE_EXCEEDED { + t.Errorf("Error code: %v, want %v", code, vtrpcpb.Code_DEADLINE_EXCEEDED) } _, err = client.Execute("select 1 from dual", nil) - want = "not_in_tx: Transaction" - if err == nil || !strings.HasPrefix(err.Error(), want) { - t.Errorf("Error: %v, must start with %s", err, want) + if code := vterrors.Code(err); code != vtrpcpb.Code_ABORTED { + t.Errorf("Error code: %v, want %v", code, vtrpcpb.Code_ABORTED) } vend := framework.DebugVars() if err := verifyIntValue(vend, "QueryTimeout", int(100*time.Millisecond)); err != nil { @@ -347,7 +347,7 @@ func TestStrictMode(t *testing.T) { } defer client.Rollback() - want := "error: DML too complex" + want := "DML too complex" for _, query := range queries { _, err = client.Execute(query, nil) if err == nil || err.Error() != want { diff --git a/go/vt/tabletserver/endtoend/misc_test.go b/go/vt/tabletserver/endtoend/misc_test.go index ba2019f5991..e9a8d515996 100644 --- a/go/vt/tabletserver/endtoend/misc_test.go +++ b/go/vt/tabletserver/endtoend/misc_test.go @@ -157,7 +157,7 @@ func TestNocacheListArgs(t *testing.T) { "list": []interface{}{}, }, ) - want := "error: empty list supplied for list" + want := "empty list supplied for list" if err == nil || err.Error() != want { t.Errorf("Error: %v, want %s", err, want) return @@ -168,11 +168,11 @@ func TestIntegrityError(t *testing.T) { vstart := framework.DebugVars() client := framework.NewClient() _, err := client.Execute("insert into vitess_test values(1, null, null, null)", nil) - want := "error: Duplicate entry '1'" + want := "Duplicate entry '1'" if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("Error: %v, want prefix %s", err, want) } - if err := compareIntDiff(framework.DebugVars(), "InfoErrors/DupKey", vstart, 1); err != nil { + if err := compareIntDiff(framework.DebugVars(), "Errors/ALREADY_EXISTS", vstart, 1); err != nil { t.Error(err) } } @@ -220,7 +220,7 @@ func TestUpsertNonPKHit(t *testing.T) { "(2, 1) on duplicate key update id2 = 2", nil, ) - want := "error: Duplicate entry '1' for key 'id2_idx'" + want := "Duplicate entry '1' for key 'id2_idx'" if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("Execute: %v, must start with %s", err, want) } @@ -251,7 +251,7 @@ func TestSchemaReload(t *testing.T) { if err == nil { return } - want := "error: table vitess_temp not found in schema" + want := "table vitess_temp not found in schema" if err.Error() != want { t.Errorf("Error: %v, want %s", err, want) return diff --git a/go/vt/tabletserver/endtoend/stream_test.go b/go/vt/tabletserver/endtoend/stream_test.go index 486d04d4de3..18d6a171e3f 100644 --- a/go/vt/tabletserver/endtoend/stream_test.go +++ b/go/vt/tabletserver/endtoend/stream_test.go @@ -13,7 +13,9 @@ import ( "time" "github.com/youtube/vitess/go/sqltypes" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" + "github.com/youtube/vitess/go/vt/vterrors" ) func TestStreamUnion(t *testing.T) { @@ -102,9 +104,8 @@ func TestStreamTerminate(t *testing.T) { return nil }, ) - want := "error: the query was killed" - if err == nil || !strings.HasPrefix(err.Error(), want) { - t.Errorf("Error: %v, must start with %s", err, want) + if code := vterrors.Code(err); code != vtrpcpb.Code_DEADLINE_EXCEEDED { + t.Errorf("Errorcode: %v, want %v", code, vtrpcpb.Code_DEADLINE_EXCEEDED) } } @@ -140,7 +141,7 @@ func populateBigData(client *framework.QueryClient) error { func TestStreamError(t *testing.T) { _, err := framework.NewClient().StreamExecute("select count(abcd) from vitess_big", nil) - want := "error: Unknown column" + want := "Unknown column" if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("Error: %v, must start with %s", err, want) } diff --git a/go/vt/tabletserver/endtoend/transaction_test.go b/go/vt/tabletserver/endtoend/transaction_test.go index b84fe36f79f..7ba9919a89c 100644 --- a/go/vt/tabletserver/endtoend/transaction_test.go +++ b/go/vt/tabletserver/endtoend/transaction_test.go @@ -15,9 +15,11 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver" "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) func TestCommit(t *testing.T) { @@ -290,9 +292,9 @@ func TestAutoCommitOff(t *testing.T) { defer framework.Server.SetAutoCommit(true) _, err := framework.NewClient().Execute("insert into vitess_test values(4, null, null, null)", nil) - want := "error: Disallowed outside transaction" + want := "disallowed outside transaction" if err == nil || !strings.HasPrefix(err.Error(), want) { - t.Errorf("Error: %v, must start with %s", err, want) + t.Errorf("%v, must start with %s", err, want) } } @@ -328,11 +330,11 @@ func TestTxPoolSize(t *testing.T) { client2 := framework.NewClient() err = client2.Begin() - want := "tx_pool_full" + want := "connection limit exceeded" if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("Error: %v, must contain %s", err, want) + t.Errorf("%v, must contain %s", err, want) } - if err := compareIntDiff(framework.DebugVars(), "Errors/TxPoolFull", vstart, 1); err != nil { + if err := compareIntDiff(framework.DebugVars(), "Errors/RESOURCE_EXHAUSTED", vstart, 1); err != nil { t.Error(err) } } @@ -368,9 +370,8 @@ func TestTxTimeout(t *testing.T) { // Ensure commit fails. err = client.Commit() - want := "not_in_tx: Transaction" - if err == nil || !strings.HasPrefix(err.Error(), want) { - t.Errorf("Error: %v, must contain %s", err, want) + if code := vterrors.Code(err); code != vtrpcpb.Code_ABORTED { + t.Errorf("Commit code: %v, want %v", code, vtrpcpb.Code_ABORTED) } } @@ -379,9 +380,9 @@ func TestForUpdate(t *testing.T) { client := framework.NewClient() query := fmt.Sprintf("select * from vitess_test where intval=2 %s", mode) _, err := client.Execute(query, nil) - want := "error: Disallowed" + want := "disallowed" if err == nil || !strings.HasPrefix(err.Error(), want) { - t.Errorf("Error: %v, must have prefix %s", err, want) + t.Errorf("%v, must have prefix %s", err, want) } // We should not get errors here @@ -555,7 +556,7 @@ func TestMMCommitFlow(t *testing.T) { err = client.CreateTransaction("aa", []*querypb.Target{}) want := "Duplicate entry" if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("Error: %v, must contain %s", err, want) + t.Errorf("%v, must contain %s", err, want) } err = client.StartCommit("aa") @@ -564,9 +565,9 @@ func TestMMCommitFlow(t *testing.T) { } err = client.SetRollback("aa", 0) - want = "error: could not transition to ROLLBACK: aa" + want = "could not transition to ROLLBACK: aa" if err == nil || err.Error() != want { - t.Errorf("Error: %v, must contain %s", err, want) + t.Errorf("%v, must contain %s", err, want) } info, err := client.ReadTransaction("aa") diff --git a/go/vt/tabletserver/engines/schema/schema_engine.go b/go/vt/tabletserver/engines/schema/schema_engine.go index f4002edfcba..bed1980f147 100644 --- a/go/vt/tabletserver/engines/schema/schema_engine.go +++ b/go/vt/tabletserver/engines/schema/schema_engine.go @@ -27,6 +27,7 @@ import ( "github.com/youtube/vitess/go/vt/sqlparser" "github.com/youtube/vitess/go/vt/tabletserver/connpool" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) @@ -96,7 +97,7 @@ func (se *Engine) Open(dbaParams *sqldb.ConnParams) error { conn, err := se.conns.Get(ctx) if err != nil { - return tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) + return err } defer conn.Recycle() @@ -107,13 +108,13 @@ func (se *Engine) Open(dbaParams *sqldb.ConnParams) error { if se.strictMode.Get() { if err := conn.VerifyMode(); err != nil { - return tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, err.Error()) + return vterrors.Errorf(vtrpcpb.Code_UNKNOWN, err.Error()) } } tableData, err := conn.Exec(ctx, mysqlconn.BaseShowTables, maxTableCount, false) if err != nil { - return tabletenv.PrefixTabletError(vtrpcpb.Code_INTERNAL, err, "Could not get table list: ") + return vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not get table list: %v", err) } tables := make(map[string]*Table, len(tableData.Rows)+1) @@ -155,7 +156,7 @@ func (se *Engine) Open(dbaParams *sqldb.ConnParams) error { // Fail if we can't load the schema for any tables, but we know that some tables exist. This points to a configuration problem. if len(tableData.Rows) != 0 && len(tables) == 1 { // len(tables) is always at least 1 because of the "dual" table - return tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "could not get schema for any tables") + return vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not get schema for any tables") } se.tables = tables se.lastChange = curTime @@ -198,7 +199,7 @@ func (se *Engine) Reload(ctx context.Context) error { curTime, tableData, err := func() (int64, *sqltypes.Result, error) { conn, err := se.conns.Get(ctx) if err != nil { - return 0, nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) + return 0, nil, err } defer conn.Recycle() curTime, err := se.mysqlTime(ctx, conn) @@ -264,14 +265,14 @@ func (se *Engine) Reload(ctx context.Context) error { func (se *Engine) mysqlTime(ctx context.Context, conn *connpool.DBConn) (int64, error) { tm, err := conn.Exec(ctx, "select unix_timestamp()", 1, false) if err != nil { - return 0, tabletenv.PrefixTabletError(vtrpcpb.Code_UNKNOWN, err, "Could not get MySQL time: ") + return 0, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not get MySQL time: %v", err) } if len(tm.Rows) != 1 || len(tm.Rows[0]) != 1 || tm.Rows[0][0].IsNull() { - return 0, tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "Unexpected result for MySQL time: %+v", tm.Rows) + return 0, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "unexpected result for MySQL time: %+v", tm.Rows) } t, err := strconv.ParseInt(tm.Rows[0][0].String(), 10, 64) if err != nil { - return 0, tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "Could not parse time %+v: %v", tm, err) + return 0, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not parse time %+v: %v", tm, err) } return t, nil } @@ -281,19 +282,18 @@ func (se *Engine) TableWasCreatedOrAltered(ctx context.Context, tableName string se.mu.Lock() defer se.mu.Unlock() if !se.isOpen { - return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "DDL called on closed schema") + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "DDL called on closed schema") } conn, err := se.conns.Get(ctx) if err != nil { - return tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) + return err } defer conn.Recycle() tableData, err := conn.Exec(ctx, mysqlconn.BaseShowTablesForTable(tableName), 1, false) if err != nil { tabletenv.InternalErrors.Add("Schema", 1) - return tabletenv.PrefixTabletError(vtrpcpb.Code_INTERNAL, err, - fmt.Sprintf("TableWasCreatedOrAltered: information_schema query failed for table %s: ", tableName)) + return vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "TableWasCreatedOrAltered: information_schema query failed for table %s: %v", tableName, err) } if len(tableData.Rows) != 1 { // This can happen if DDLs race with each other. @@ -308,8 +308,7 @@ func (se *Engine) TableWasCreatedOrAltered(ctx context.Context, tableName string ) if err != nil { tabletenv.InternalErrors.Add("Schema", 1) - return tabletenv.PrefixTabletError(vtrpcpb.Code_INTERNAL, err, - fmt.Sprintf("TableWasCreatedOrAltered: failed to load table %s: ", tableName)) + return vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "TableWasCreatedOrAltered: failed to load table %s: %v", tableName, err) } // table_rows, data_length, index_length, max_data_length table.SetMysqlStats(row[4], row[5], row[6], row[7], row[8]) diff --git a/go/vt/tabletserver/engines/schema/schema_engine_test.go b/go/vt/tabletserver/engines/schema/schema_engine_test.go index b198f3e3b46..9ea39383d2b 100644 --- a/go/vt/tabletserver/engines/schema/schema_engine_test.go +++ b/go/vt/tabletserver/engines/schema/schema_engine_test.go @@ -36,7 +36,7 @@ func TestStrictMode(t *testing.T) { se := newEngine(10, 1*time.Second, 1*time.Second, true) t.Log(se) err := se.Open(db.ConnParams()) - want := "error: could not verify mode" + want := "could not verify mode" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("se.Open: %v, must contain %s", err, want) } @@ -57,7 +57,7 @@ func TestOpenFailedDueToMissMySQLTime(t *testing.T) { }) se := newEngine(10, 1*time.Second, 1*time.Second, false) err := se.Open(db.ConnParams()) - want := "Could not get MySQL time" + want := "could not get MySQL time" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("se.Open: %v, want %s", err, want) } @@ -77,7 +77,7 @@ func TestOpenFailedDueToIncorrectMysqlRowNum(t *testing.T) { }) se := newEngine(10, 1*time.Second, 1*time.Second, false) err := se.Open(db.ConnParams()) - want := "Unexpected result for MySQL time" + want := "unexpected result for MySQL time" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("se.Open: %v, want %s", err, want) } @@ -97,7 +97,7 @@ func TestOpenFailedDueToInvalidTimeFormat(t *testing.T) { }) se := newEngine(10, 1*time.Second, 1*time.Second, false) err := se.Open(db.ConnParams()) - want := "Could not parse time" + want := "could not parse time" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("se.Open: %v, want %s", err, want) } @@ -112,7 +112,7 @@ func TestOpenFailedDueToExecErr(t *testing.T) { db.AddRejectedQuery(mysqlconn.BaseShowTables, fmt.Errorf("injected error")) se := newEngine(10, 1*time.Second, 1*time.Second, false) err := se.Open(db.ConnParams()) - want := "Could not get table list" + want := "could not get table list" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("se.Open: %v, want %s", err, want) } diff --git a/go/vt/tabletserver/messager_engine.go b/go/vt/tabletserver/messager_engine.go index 17abf17dbb7..9b11bad6999 100644 --- a/go/vt/tabletserver/messager_engine.go +++ b/go/vt/tabletserver/messager_engine.go @@ -15,6 +15,7 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/connpool" "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) @@ -77,7 +78,7 @@ func (me *MessagerEngine) Subscribe(name string, rcv *messageReceiver) error { defer me.mu.Unlock() mm := me.managers[name] if mm == nil { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "message table %s not found", name) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "message table %s not found", name) } mm.Subscribe(rcv) return nil diff --git a/go/vt/tabletserver/messager_engine_test.go b/go/vt/tabletserver/messager_engine_test.go index 6bbdfa9f737..3b4cce3f5a7 100644 --- a/go/vt/tabletserver/messager_engine_test.go +++ b/go/vt/tabletserver/messager_engine_test.go @@ -121,7 +121,7 @@ func TestSubscribe(t *testing.T) { <-r2.ch // Error case. - want := "error: message table t3 not found" + want := "message table t3 not found" err = me.Subscribe("t3", r1.rcv) if err == nil || err.Error() != want { t.Errorf("Subscribe: %v, want %s", err, want) diff --git a/go/vt/tabletserver/query_engine.go b/go/vt/tabletserver/query_engine.go index bcfe4c92941..cf9e162633a 100644 --- a/go/vt/tabletserver/query_engine.go +++ b/go/vt/tabletserver/query_engine.go @@ -17,6 +17,7 @@ import ( "github.com/youtube/vitess/go/acl" "github.com/youtube/vitess/go/cache" + "github.com/youtube/vitess/go/mysqlconn" "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/sync2" "github.com/youtube/vitess/go/trace" @@ -30,6 +31,7 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" @@ -270,7 +272,8 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats } splan, err := planbuilder.GetExecPlan(sql, GetTable) if err != nil { - return nil, tabletenv.PrefixTabletError(vtrpcpb.Code_UNKNOWN, err, "") + // TODO(sougou): Inspect to see if GetExecPlan can return coded error. + return nil, vterrors.New(vtrpcpb.Code_UNKNOWN, err.Error()) } plan := &ExecPlan{ExecPlan: splan, Table: table} plan.Rules = qe.queryRuleSources.filterByPlan(sql, plan.PlanID, plan.TableName.String()) @@ -281,7 +284,7 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats } else { conn, err := qe.conns.Get(ctx) if err != nil { - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) + return nil, err } defer conn.Recycle() @@ -290,7 +293,7 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats r, err := conn.Exec(ctx, sql, 1, true) logStats.AddRewrittenSQL(sql, start) if err != nil { - return nil, tabletenv.PrefixTabletError(vtrpcpb.Code_INTERNAL, err, "Error fetching fields: ") + return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "error fetching fields: %v", err) } plan.Fields = r.Fields } @@ -316,7 +319,8 @@ func (qe *QueryEngine) GetStreamPlan(sql string) (*ExecPlan, error) { } splan, err := planbuilder.GetStreamExecPlan(sql, GetTable) if err != nil { - return nil, tabletenv.PrefixTabletError(vtrpcpb.Code_INVALID_ARGUMENT, err, "") + // TODO(sougou): Inspect to see if GetStreamExecPlan can return coded error. + return nil, vterrors.New(vtrpcpb.Code_UNKNOWN, err.Error()) } plan := &ExecPlan{ExecPlan: splan, Table: table} plan.Rules = qe.queryRuleSources.filterByPlan(sql, plan.PlanID, plan.TableName.String()) @@ -333,7 +337,7 @@ func (qe *QueryEngine) ClearQueryPlanCache() { func (qe *QueryEngine) IsMySQLReachable() bool { conn, err := dbconnpool.NewDBConnection(&qe.dbconfigs.App, tabletenv.MySQLStats) if err != nil { - if tabletenv.IsConnErr(err) { + if mysqlconn.IsConnErr(err) { return false } log.Warningf("checking MySQL, unexpected error: %v", err) diff --git a/go/vt/tabletserver/query_executor.go b/go/vt/tabletserver/query_executor.go index 0e62f3ad5f9..a176c3cd1e2 100644 --- a/go/vt/tabletserver/query_executor.go +++ b/go/vt/tabletserver/query_executor.go @@ -14,6 +14,7 @@ import ( "github.com/youtube/vitess/go/hack" "github.com/youtube/vitess/go/mysqlconn" + "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/trace" "github.com/youtube/vitess/go/vt/callerid" @@ -23,6 +24,7 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" @@ -87,7 +89,7 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { switch qre.plan.PlanID { case planbuilder.PlanPassDML: if qre.tsv.qe.strictMode.Get() { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "DML too complex") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "DML too complex") } return qre.txFetch(conn, qre.plan.FullQuery, qre.bindVars, nil, false, true) case planbuilder.PlanInsertPK: @@ -114,7 +116,7 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { case planbuilder.PlanPassSelect: return qre.execSelect() case planbuilder.PlanSelectLock: - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "Disallowed outside transaction") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "disallowed outside transaction") case planbuilder.PlanSet: return qre.execSet() case planbuilder.PlanOther: @@ -126,7 +128,7 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { return qre.execSQL(conn, qre.query, true) default: if !qre.tsv.qe.autoCommit.Get() { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "Disallowed outside transaction") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "disallowed outside transaction") } return qre.execDmlAutoCommit() } @@ -165,7 +167,7 @@ func (qre *QueryExecutor) execDmlAutoCommit() (reply *sqltypes.Result, err error switch qre.plan.PlanID { case planbuilder.PlanPassDML: if qre.tsv.qe.strictMode.Get() { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "DML too complex") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "DML too complex") } reply, err = qre.txFetch(conn, qre.plan.FullQuery, qre.bindVars, nil, false, true) case planbuilder.PlanInsertPK: @@ -181,7 +183,7 @@ func (qre *QueryExecutor) execDmlAutoCommit() (reply *sqltypes.Result, err error case planbuilder.PlanUpsertPK: reply, err = qre.execUpsertPK(conn) default: - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported query: %s", qre.query) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported query: %s", qre.query) } return reply, err }) @@ -229,9 +231,9 @@ func (qre *QueryExecutor) checkPermissions() error { action, desc := qre.plan.Rules.getAction(remoteAddr, username, qre.bindVars) switch action { case QRFail: - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "Query disallowed due to rule: %s", desc) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "disallowed due to rule: %s", desc) case QRFailRetry: - return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "Query disallowed due to rule: %s", desc) + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "disallowed due to rule: %s", desc) } // Check for SuperUser calling directly to VTTablet (e.g. VTWorker) @@ -243,7 +245,7 @@ func (qre *QueryExecutor) checkPermissions() error { callerID := callerid.ImmediateCallerIDFromContext(qre.ctx) if callerID == nil { if qre.tsv.qe.strictTableACL { - return tabletenv.NewTabletError(vtrpcpb.Code_UNAUTHENTICATED, "missing caller id") + return vterrors.Errorf(vtrpcpb.Code_UNAUTHENTICATED, "missing caller id") } return nil } @@ -260,7 +262,7 @@ func (qre *QueryExecutor) checkPermissions() error { } if qre.plan.Authorized == nil { - return tabletenv.NewTabletError(vtrpcpb.Code_PERMISSION_DENIED, "table acl error: nil acl") + return vterrors.Errorf(vtrpcpb.Code_PERMISSION_DENIED, "table acl error: nil acl") } tableACLStatsKey := []string{ qre.plan.TableName.String(), @@ -279,7 +281,7 @@ func (qre *QueryExecutor) checkPermissions() error { errStr := fmt.Sprintf("table acl error: %q cannot run %v on table %q", callerID.Username, qre.plan.PlanID, qre.plan.TableName) tabletenv.TableaclDenied.Add(tableACLStatsKey, 1) qre.tsv.qe.accessCheckerLogger.Infof("%s", errStr) - return tabletenv.NewTabletError(vtrpcpb.Code_PERMISSION_DENIED, "%s", errStr) + return vterrors.Errorf(vtrpcpb.Code_PERMISSION_DENIED, "%s", errStr) } return nil } @@ -290,7 +292,7 @@ func (qre *QueryExecutor) checkPermissions() error { func (qre *QueryExecutor) execDDL() (*sqltypes.Result, error) { ddlPlan := planbuilder.DDLParse(qre.query) if ddlPlan.Action == "" { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "DDL is not understood") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "DDL is not understood") } conn, err := qre.tsv.te.txPool.LocalBegin(qre.ctx) @@ -464,7 +466,7 @@ func (qre *QueryExecutor) execInsertSubquery(conn *TxConnection) (*sqltypes.Resu return &sqltypes.Result{RowsAffected: 0}, nil } if len(qre.plan.ColumnNumbers) != len(innerRows[0]) { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "Subquery length does not match column list") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Subquery length does not match column list") } pkRows := make([][]sqltypes.Value, len(innerRows)) for i, innerRow := range innerRows { @@ -494,15 +496,15 @@ func (qre *QueryExecutor) execUpsertPK(conn *TxConnection) (*sqltypes.Result, er if err == nil { return result, nil } - terr, ok := err.(*tabletenv.TabletError) + sqlErr, ok := err.(*sqldb.SQLError) if !ok { return result, err } - if terr.SQLError != mysqlconn.ERDupEntry { + if sqlErr.Number() != mysqlconn.ERDupEntry { return nil, err } // If the error didn't match pk, just return the error without updating. - if !strings.Contains(terr.Message, "'PRIMARY'") { + if !strings.Contains(sqlErr.Error(), "'PRIMARY'") { return nil, err } // At this point, we know the insert failed due to a duplicate pk row. @@ -600,7 +602,7 @@ func (qre *QueryExecutor) getConn(pool *connpool.Pool) (*connpool.DBConn, error) case tabletenv.ErrConnPoolClosed: return nil, err } - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) + return nil, err } func (qre *QueryExecutor) qFetch(logStats *tabletenv.LogStats, parsedQuery *sqlparser.ParsedQuery, bindVars map[string]interface{}) (*sqltypes.Result, error) { @@ -615,7 +617,7 @@ func (qre *QueryExecutor) qFetch(logStats *tabletenv.LogStats, parsedQuery *sqlp conn, err := qre.tsv.qe.conns.Get(qre.ctx) logStats.WaitingForConnection += time.Now().Sub(waitingForConnectionStart) if err != nil { - q.Err = tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) + q.Err = err } else { defer conn.Recycle() q.Result, q.Err = qre.execSQL(conn, sql, false) @@ -671,7 +673,7 @@ func (qre *QueryExecutor) generateFinalSQL(parsedQuery *sqlparser.ParsedQuery, b bindVars["#maxLimit"] = qre.tsv.qe.maxResultSize.Get() + 1 sql, err := parsedQuery.GenerateQuery(bindVars) if err != nil { - return "", tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%s", err) + return "", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%s", err) } if buildStreamComment != nil { sql = append(sql, buildStreamComment...) @@ -696,7 +698,7 @@ func (qre *QueryExecutor) execStreamSQL(conn *connpool.DBConn, sql string, inclu qre.logStats.AddRewrittenSQL(sql, start) if err != nil { // MySQL error that isn't due to a connection issue - return tabletenv.NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, err) + return err } return nil } diff --git a/go/vt/tabletserver/query_executor_test.go b/go/vt/tabletserver/query_executor_test.go index d2cb9cdca1a..a1d2c87d0e0 100644 --- a/go/vt/tabletserver/query_executor_test.go +++ b/go/vt/tabletserver/query_executor_test.go @@ -25,6 +25,7 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" tableaclpb "github.com/youtube/vitess/go/vt/proto/tableacl" @@ -85,16 +86,9 @@ func TestQueryExecutorPlanPassDmlStrictMode(t *testing.T) { defer tsv.StopService() defer testCommitHelper(t, tsv, qre) checkPlanID(t, planbuilder.PlanPassDML, qre.plan.PlanID) - got, err = qre.Execute() - if err == nil { - t.Fatal("qre.Execute() = nil, want error") - } - tabletError, ok := err.(*tabletenv.TabletError) - if !ok { - t.Fatalf("got: %v, want: a tabletenv.TabletError", tabletError) - } - if tabletError.Code != vtrpcpb.Code_INVALID_ARGUMENT { - t.Fatalf("got: %s, want: BAD_INPUT", tabletError.Code) + _, err = qre.Execute() + if code := vterrors.Code(err); code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Fatalf("qre.Execute: %v, want %v", code, vtrpcpb.Code_INVALID_ARGUMENT) } } @@ -125,15 +119,8 @@ func TestQueryExecutorPlanPassDmlStrictModeAutoCommit(t *testing.T) { defer tsv.StopService() checkPlanID(t, planbuilder.PlanPassDML, qre.plan.PlanID) _, err = qre.Execute() - if err == nil { - t.Fatal("got: nil, want: error") - } - tabletError, ok := err.(*tabletenv.TabletError) - if !ok { - t.Fatalf("got: %v, want: *tabletenv.TabletError", tabletError) - } - if tabletError.Code != vtrpcpb.Code_INVALID_ARGUMENT { - t.Fatalf("got: %s, want: BAD_INPUT", tabletError.Code) + if code := vterrors.Code(err); code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Fatalf("qre.Execute: %v, want %v", code, vtrpcpb.Code_INVALID_ARGUMENT) } } @@ -313,6 +300,7 @@ func TestQueryExecutorPlanUpsertPk(t *testing.T) { txid := newTransaction(tsv) qre := newTestQueryExecutor(ctx, tsv, query, txid) defer tsv.StopService() + defer testCommitHelper(t, tsv, qre) checkPlanID(t, planbuilder.PlanUpsertPK, qre.plan.PlanID) got, err := qre.Execute() if err != nil { @@ -326,20 +314,19 @@ func TestQueryExecutorPlanUpsertPk(t *testing.T) { if !reflect.DeepEqual(gotqueries, wantqueries) { t.Errorf("queries: %v, want %v", gotqueries, wantqueries) } - testCommitHelper(t, tsv, qre) db.AddRejectedQuery("insert into test_table values (1) /* _stream test_table (pk ) (1 ); */", errRejected) txid = newTransaction(tsv) qre = newTestQueryExecutor(ctx, tsv, query, txid) + defer testCommitHelper(t, tsv, qre) _, err = qre.Execute() - wantErr := "error: rejected" + wantErr := "rejected" if err == nil || !strings.Contains(err.Error(), wantErr) { t.Errorf("qre.Execute() = %v, want %v", err, wantErr) } if gotqueries = fetchRecordedQueries(qre); gotqueries != nil { t.Errorf("queries: %v, want nil", gotqueries) } - testCommitHelper(t, tsv, qre) db.AddRejectedQuery( "insert into test_table values (1) /* _stream test_table (pk ) (1 ); */", @@ -348,8 +335,9 @@ func TestQueryExecutorPlanUpsertPk(t *testing.T) { db.AddQuery("update test_table set val = 1 where pk in (1) /* _stream test_table (pk ) (1 ); */", &sqltypes.Result{}) txid = newTransaction(tsv) qre = newTestQueryExecutor(ctx, tsv, query, txid) + defer testCommitHelper(t, tsv, qre) _, err = qre.Execute() - wantErr = "error: err (errno 1062) (sqlstate 23000)" + wantErr = "err (errno 1062) (sqlstate 23000)" if err == nil || !strings.Contains(err.Error(), wantErr) { t.Errorf("qre.Execute() = %v, want %v", err, wantErr) } @@ -357,7 +345,6 @@ func TestQueryExecutorPlanUpsertPk(t *testing.T) { if gotqueries = fetchRecordedQueries(qre); gotqueries != nil { t.Errorf("queries: %v, want nil", gotqueries) } - testCommitHelper(t, tsv, qre) db.AddRejectedQuery( "insert into test_table values (1) /* _stream test_table (pk ) (1 ); */", @@ -369,6 +356,7 @@ func TestQueryExecutorPlanUpsertPk(t *testing.T) { ) txid = newTransaction(tsv) qre = newTestQueryExecutor(ctx, tsv, query, txid) + defer testCommitHelper(t, tsv, qre) got, err = qre.Execute() if err != nil { t.Fatalf("qre.Execute() = %v, want nil", err) @@ -384,7 +372,6 @@ func TestQueryExecutorPlanUpsertPk(t *testing.T) { if !reflect.DeepEqual(gotqueries, wantqueries) { t.Errorf("queries: %v, want %v", gotqueries, wantqueries) } - testCommitHelper(t, tsv, qre) } func TestQueryExecutorPlanUpsertPkAutoCommit(t *testing.T) { @@ -408,7 +395,7 @@ func TestQueryExecutorPlanUpsertPkAutoCommit(t *testing.T) { db.AddRejectedQuery("insert into test_table values (1) /* _stream test_table (pk ) (1 ); */", errRejected) _, err = qre.Execute() - wantErr := "error: rejected" + wantErr := "rejected" if err == nil || !strings.Contains(err.Error(), wantErr) { t.Fatalf("qre.Execute() = %v, want %v", err, wantErr) } @@ -419,7 +406,7 @@ func TestQueryExecutorPlanUpsertPkAutoCommit(t *testing.T) { ) db.AddQuery("update test_table set val = 1 where pk in (1) /* _stream test_table (pk ) (1 ); */", &sqltypes.Result{}) _, err = qre.Execute() - wantErr = "error: err (errno 1062) (sqlstate 23000)" + wantErr = "err (errno 1062) (sqlstate 23000)" if err == nil || !strings.Contains(err.Error(), wantErr) { t.Fatalf("qre.Execute() = %v, want %v", err, wantErr) } @@ -683,15 +670,8 @@ func TestQueryExecutorPlanPassSelectWithLockOutsideATransaction(t *testing.T) { defer tsv.StopService() checkPlanID(t, planbuilder.PlanSelectLock, qre.plan.PlanID) _, err := qre.Execute() - if err == nil { - t.Fatal("got: nil, want: error") - } - got, ok := err.(*tabletenv.TabletError) - if !ok { - t.Fatalf("got: %v, want: *tabletenv.TabletError", err) - } - if got.Code != vtrpcpb.Code_INVALID_ARGUMENT { - t.Fatalf("got: %s, want: BAD_INPUT", got.Code) + if code := vterrors.Code(err); code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Fatalf("qre.Execute: %v, want %v", code, vtrpcpb.Code_INVALID_ARGUMENT) } } @@ -1031,12 +1011,8 @@ func TestQueryExecutorTableAclNoPermission(t *testing.T) { if err == nil { t.Fatal("got: nil, want: error") } - tabletError, ok := err.(*tabletenv.TabletError) - if !ok { - t.Fatalf("got: %v, want: *tabletenv.TabletError", err) - } - if tabletError.Code != vtrpcpb.Code_PERMISSION_DENIED { - t.Fatalf("got: %s, want: PERMISSION_DENIED", tabletError.Code) + if code := vterrors.Code(err); code != vtrpcpb.Code_PERMISSION_DENIED { + t.Fatalf("qre.Execute: %v, want %v", code, vtrpcpb.Code_PERMISSION_DENIED) } } @@ -1082,18 +1058,12 @@ func TestQueryExecutorTableAclExemptACL(t *testing.T) { checkPlanID(t, planbuilder.PlanPassSelect, qre.plan.PlanID) // query should fail because current user do not have read permissions _, err := qre.Execute() - if err == nil { - t.Fatal("got: nil, want: error") - } - tabletError, ok := err.(*tabletenv.TabletError) - if !ok { - t.Fatalf("got: %v, want: *tabletenv.TabletError", err) - } - if tabletError.Code != vtrpcpb.Code_PERMISSION_DENIED { - t.Fatalf("got: %s, want: PERMISSION_DENIED", tabletError.Code) + if code := vterrors.Code(err); code != vtrpcpb.Code_PERMISSION_DENIED { + t.Fatalf("qre.Execute: %v, want %v", code, vtrpcpb.Code_PERMISSION_DENIED) } - if !strings.Contains(tabletError.Error(), "table acl error") { - t.Fatalf("got %s, want tablet errorL table acl error", tabletError.Error()) + wanterr := "table acl error" + if !strings.Contains(err.Error(), wanterr) { + t.Fatalf("qre.Execute: %v, want %s", err, wanterr) } // table acl should be ignored since this is an exempt user. @@ -1222,15 +1192,8 @@ func TestQueryExecutorBlacklistQRFail(t *testing.T) { checkPlanID(t, planbuilder.PlanPassSelect, qre.plan.PlanID) // execute should fail because query has been blacklisted _, err := qre.Execute() - if err == nil { - t.Fatal("got: nil, want: error") - } - got, ok := err.(*tabletenv.TabletError) - if !ok { - t.Fatalf("got: %v, want: *tabletenv.TabletError", err) - } - if got.Code != vtrpcpb.Code_INVALID_ARGUMENT { - t.Fatalf("got: %s, want: BAD_INPUT", got.Code) + if code := vterrors.Code(err); code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Fatalf("qre.Execute: %v, want %v", code, vtrpcpb.Code_INVALID_ARGUMENT) } } @@ -1282,15 +1245,8 @@ func TestQueryExecutorBlacklistQRRetry(t *testing.T) { checkPlanID(t, planbuilder.PlanPassSelect, qre.plan.PlanID) _, err := qre.Execute() - if err == nil { - t.Fatal("got: nil, want: error") - } - got, ok := err.(*tabletenv.TabletError) - if !ok { - t.Fatalf("got: %v, want: *tabletenv.TabletError", err) - } - if got.Code != vtrpcpb.Code_FAILED_PRECONDITION { - t.Fatalf("got: %s, want: QUERY_NOT_SERVED", got.Code) + if code := vterrors.Code(err); code != vtrpcpb.Code_FAILED_PRECONDITION { + t.Fatalf("tsv.qe.queryRuleSources.SetRules: %v, want %v", code, vtrpcpb.Code_FAILED_PRECONDITION) } } diff --git a/go/vt/tabletserver/query_rules.go b/go/vt/tabletserver/query_rules.go index 409e8b47397..03508c79906 100644 --- a/go/vt/tabletserver/query_rules.go +++ b/go/vt/tabletserver/query_rules.go @@ -14,7 +14,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/key" "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -92,12 +92,7 @@ func (qrs *QueryRules) UnmarshalJSON(data []byte) (err error) { dec.UseNumber() err = dec.Decode(&rulesInfo) if err != nil { - // TODO(aaijazi): There doesn't seem to be a better error code for this, but - // we consider InternalErrors to be retriable (which this error shouldn't be). - // Ideally, we should have an error code that means "This isn't the query's - // fault, but don't retry either, as this will be a global problem". - // (true for all INTERNAL_ERRORS in query_rules) - return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "%v", err) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } for _, ruleInfo := range rulesInfo { qr, err := BuildQueryRule(ruleInfo) @@ -333,7 +328,7 @@ func (qr *QueryRule) AddBindVarCond(name string, onAbsent, onMismatch bool, op O // Change the value to compiled regexp re, err := regexp.Compile(makeExact(v)) if err != nil { - return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "processing %s: %v", v, err) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "processing %s: %v", v, err) } converted = bvcre{re} } else { @@ -346,13 +341,13 @@ func (qr *QueryRule) AddBindVarCond(name string, onAbsent, onMismatch bool, op O b := bvcKeyRange(*v) converted = &b default: - return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "type %T not allowed as condition operand (%v)", value, value) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "type %T not allowed as condition operand (%v)", value, value) } qr.bindVarConds = append(qr.bindVarConds, BindVarCond{name, onAbsent, onMismatch, op, converted}) return nil Error: - return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "invalid operator %s for type %T (%v)", op, value, value) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid operator %v for type %T (%v)", op, value, value) } // filterByPlan returns a new QueryRule if the query and planid match. @@ -885,7 +880,7 @@ func MapStrOperator(strop string) (op Operator, err error) { if op, ok := opmap[strop]; ok { return op, nil } - return QRNoOp, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "invalid Operator %s", strop) + return QRNoOp, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid Operator %s", strop) } // BuildQueryRule builds a query rule from a ruleInfo. @@ -899,15 +894,15 @@ func BuildQueryRule(ruleInfo map[string]interface{}) (qr *QueryRule, err error) case "Name", "Description", "RequestIP", "User", "Query", "Action": sv, ok = v.(string) if !ok { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for %s", k) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want string for %s", k) } case "Plans", "BindVarConds", "TableNames": lv, ok = v.([]interface{}) if !ok { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want list for %s", k) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want list for %s", k) } default: - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "unrecognized tag %s", k) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unrecognized tag %s", k) } switch k { case "Name": @@ -917,27 +912,27 @@ func BuildQueryRule(ruleInfo map[string]interface{}) (qr *QueryRule, err error) case "RequestIP": err = qr.SetIPCond(sv) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "could not set IP condition: %v", sv) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "could not set IP condition: %v", sv) } case "User": err = qr.SetUserCond(sv) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "could not set User condition: %v", sv) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "could not set User condition: %v", sv) } case "Query": err = qr.SetQueryCond(sv) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "could not set Query condition: %v", sv) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "could not set Query condition: %v", sv) } case "Plans": for _, p := range lv { pv, ok := p.(string) if !ok { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for Plans") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want string for Plans") } pt, ok := planbuilder.PlanByName(pv) if !ok { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "invalid plan name: %s", pv) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid plan name: %s", pv) } qr.AddPlanCond(pt) } @@ -945,7 +940,7 @@ func BuildQueryRule(ruleInfo map[string]interface{}) (qr *QueryRule, err error) for _, t := range lv { tableName, ok := t.(string) if !ok { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for TableNames") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want string for TableNames") } qr.AddTableCond(tableName) } @@ -967,7 +962,7 @@ func BuildQueryRule(ruleInfo map[string]interface{}) (qr *QueryRule, err error) case "FAIL_RETRY": qr.act = QRFailRetry default: - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "invalid Action %s", sv) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid Action %s", sv) } } } @@ -977,41 +972,41 @@ func BuildQueryRule(ruleInfo map[string]interface{}) (qr *QueryRule, err error) func buildBindVarCondition(bvc interface{}) (name string, onAbsent, onMismatch bool, op Operator, value interface{}, err error) { bvcinfo, ok := bvc.(map[string]interface{}) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want json object for bind var conditions") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want json object for bind var conditions") return } var v interface{} v, ok = bvcinfo["Name"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Name missing in BindVarConds") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Name missing in BindVarConds") return } name, ok = v.(string) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for Name in BindVarConds") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want string for Name in BindVarConds") return } v, ok = bvcinfo["OnAbsent"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "OnAbsent missing in BindVarConds") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "OnAbsent missing in BindVarConds") return } onAbsent, ok = v.(bool) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want bool for OnAbsent") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want bool for OnAbsent") return } v, ok = bvcinfo["Operator"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Operator missing in BindVarConds") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Operator missing in BindVarConds") return } strop, ok := v.(string) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for Operator") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want string for Operator") return } op, err = MapStrOperator(strop) @@ -1023,7 +1018,7 @@ func buildBindVarCondition(bvc interface{}) (name string, onAbsent, onMismatch b } v, ok = bvcinfo["Value"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Value missing in BindVarConds") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Value missing in BindVarConds") return } if op >= QREqual && op <= QRLessEqual { @@ -1034,50 +1029,50 @@ func buildBindVarCondition(bvc interface{}) (name string, onAbsent, onMismatch b // Maybe uint64 value, err = strconv.ParseUint(string(v), 10, 64) if err != nil { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want int64/uint64: %s", string(v)) + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want int64/uint64: %s", string(v)) return } } case string: value = v default: - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string or number: %v", v) + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want string or number: %v", v) return } } else if op == QRMatch || op == QRNoMatch { strvalue, ok := v.(string) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string: %v", v) + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want string: %v", v) return } value = strvalue } else if op == QRIn || op == QRNotIn { kr, ok := v.(map[string]interface{}) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want keyrange for Value") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want keyrange for Value") return } keyrange := &topodatapb.KeyRange{} strstart, ok := kr["Start"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Start missing in KeyRange") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Start missing in KeyRange") return } start, ok := strstart.(string) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for Start") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want string for Start") return } keyrange.Start = []byte(start) strend, ok := kr["End"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "End missing in KeyRange") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "End missing in KeyRange") return } end, ok := strend.(string) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want string for End") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want string for End") return } keyrange.End = []byte(end) @@ -1086,12 +1081,12 @@ func buildBindVarCondition(bvc interface{}) (name string, onAbsent, onMismatch b v, ok = bvcinfo["OnMismatch"] if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "OnMismatch missing in BindVarConds") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "OnMismatch missing in BindVarConds") return } onMismatch, ok = v.(bool) if !ok { - err = tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "want bool for OnMismatch") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "want bool for OnMismatch") return } return diff --git a/go/vt/tabletserver/query_rules_test.go b/go/vt/tabletserver/query_rules_test.go index 85e80b7f8c4..c276a81a006 100644 --- a/go/vt/tabletserver/query_rules_test.go +++ b/go/vt/tabletserver/query_rules_test.go @@ -14,7 +14,7 @@ import ( "github.com/youtube/vitess/go/vt/key" "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -792,12 +792,8 @@ func TestInvalidJSON(t *testing.T) { } qrs := NewQueryRules() err := qrs.UnmarshalJSON([]byte(`{`)) - terr, ok := err.(*tabletenv.TabletError) - if !ok { - t.Fatalf("invalid json, should get a tablet error") - } - if terr.Code != vtrpcpb.Code_INTERNAL { - t.Fatalf("got: %v wanted: INTERNAL_ERROR", terr.Code) + if code := vterrors.Code(err); code != vtrpcpb.Code_INVALID_ARGUMENT { + t.Errorf("qrs.UnmarshalJSON: %v, want %v", code, vtrpcpb.Code_INVALID_ARGUMENT) } } diff --git a/go/vt/tabletserver/queryservice/wrapped.go b/go/vt/tabletserver/queryservice/wrapped.go index 096a3ce6e73..c137a783dc2 100644 --- a/go/vt/tabletserver/queryservice/wrapped.go +++ b/go/vt/tabletserver/queryservice/wrapped.go @@ -191,9 +191,7 @@ func (ws *wrappedService) BeginExecute(ctx context.Context, target *querypb.Targ err = ws.wrapper(ctx, target, ws.impl, "BeginExecute", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { var innerErr error qr, transactionID, innerErr = conn.BeginExecute(ctx, target, query, bindVars, options) - // If a transaction was started, we cannot retry. - retryable := canRetry(ctx, innerErr) && (transactionID == 0) - return innerErr, retryable + return innerErr, canRetry(ctx, innerErr) }) return qr, transactionID, err } @@ -202,9 +200,7 @@ func (ws *wrappedService) BeginExecuteBatch(ctx context.Context, target *querypb err = ws.wrapper(ctx, target, ws.impl, "BeginExecuteBatch", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (error, bool) { var innerErr error qrs, transactionID, innerErr = conn.BeginExecuteBatch(ctx, target, queries, asTransaction, options) - // If a transaction was started, we cannot retry. - retryable := canRetry(ctx, innerErr) && (transactionID == 0) - return innerErr, retryable + return innerErr, canRetry(ctx, innerErr) }) return qrs, transactionID, err } diff --git a/go/vt/tabletserver/tabletconntest/fakequeryservice.go b/go/vt/tabletserver/tabletconntest/fakequeryservice.go index 2e9c3e09a79..50a35748fc2 100644 --- a/go/vt/tabletserver/tabletconntest/fakequeryservice.go +++ b/go/vt/tabletserver/tabletconntest/fakequeryservice.go @@ -12,7 +12,6 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/callerid" "github.com/youtube/vitess/go/vt/tabletserver/querytypes" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -28,7 +27,7 @@ type FakeQueryService struct { // these fields are used to simulate and synchronize on errors HasError bool HasBeginError bool - TabletError *tabletenv.TabletError + TabletError error ErrorWait chan struct{} // these fields are used to simulate and synchronize on panics diff --git a/go/vt/tabletserver/tabletconntest/tabletconntest.go b/go/vt/tabletserver/tabletconntest/tabletconntest.go index 5eef7d2112d..bd9dbacd2de 100644 --- a/go/vt/tabletserver/tabletconntest/tabletconntest.go +++ b/go/vt/tabletserver/tabletconntest/tabletconntest.go @@ -17,7 +17,6 @@ import ( "github.com/youtube/vitess/go/vt/callerid" "github.com/youtube/vitess/go/vt/tabletserver/queryservice" "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" "golang.org/x/net/context" @@ -29,24 +28,24 @@ import ( // testErrorHelper will check one instance of each error type, // to make sure we propagate the errors properly. func testErrorHelper(t *testing.T, f *FakeQueryService, name string, ef func(context.Context) error) { - errors := []*tabletenv.TabletError{ + errors := []error{ // A few generic errors - tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "generic error"), - tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "uncaught panic"), - tabletenv.NewTabletError(vtrpcpb.Code_UNAUTHENTICATED, "missing caller id"), - tabletenv.NewTabletError(vtrpcpb.Code_PERMISSION_DENIED, "table acl error: nil acl"), + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "generic error"), + vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "uncaught panic"), + vterrors.Errorf(vtrpcpb.Code_UNAUTHENTICATED, "missing caller id"), + vterrors.Errorf(vtrpcpb.Code_PERMISSION_DENIED, "table acl error: nil acl"), // Client will retry on this specific error - tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "Query disallowed due to rule: %v", "cool rule"), + vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "query disallowed due to rule: %v", "cool rule"), // Client may retry on another server on this specific error - tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Could not verify strict mode"), + vterrors.Errorf(vtrpcpb.Code_INTERNAL, "could not verify strict mode"), // This is usually transaction pool full - tabletenv.NewTabletError(vtrpcpb.Code_RESOURCE_EXHAUSTED, "Transaction pool connection limit exceeded"), + vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, "transaction pool connection limit exceeded"), // Transaction expired or was unknown - tabletenv.NewTabletError(vtrpcpb.Code_ABORTED, "Transaction 12"), + vterrors.Errorf(vtrpcpb.Code_ABORTED, "transaction 12"), } for _, e := range errors { f.TabletError = e @@ -59,13 +58,13 @@ func testErrorHelper(t *testing.T, f *FakeQueryService, name string, ef func(con // First we check the recoverable vtrpc code is right. code := vterrors.Code(err) - if code != e.Code { - t.Errorf("unexpected server code from %v: got %v, wanted %v", name, code, e.Code) + wantcode := vterrors.Code(e) + if code != wantcode { + t.Errorf("unexpected server code from %v: got %v, wanted %v", name, code, wantcode) } - // and last we check we preserve the text, with the right prefix - if !strings.Contains(err.Error(), e.Prefix()+e.Message) { - t.Errorf("client error message '%v' for %v doesn't contain expected server text message '%v'", err.Error(), name, e.Prefix()+e.Message) + if !strings.Contains(err.Error(), e.Error()) { + t.Errorf("client error message '%v' for %v doesn't contain expected server text message '%v'", err.Error(), name, e) } } f.TabletError = nil diff --git a/go/vt/tabletserver/tabletenv/logstats.go b/go/vt/tabletserver/tabletenv/logstats.go index 5c87ebcba1d..db30558d6df 100644 --- a/go/vt/tabletserver/tabletenv/logstats.go +++ b/go/vt/tabletserver/tabletenv/logstats.go @@ -47,7 +47,7 @@ type LogStats struct { QuerySources byte Rows [][]sqltypes.Value TransactionID int64 - Error *TabletError + Error error } // NewLogStats constructs a new LogStats with supplied Method and ctx diff --git a/go/vt/tabletserver/tabletenv/logstats_test.go b/go/vt/tabletserver/tabletenv/logstats_test.go index cfab666bd28..60175e8d2a1 100644 --- a/go/vt/tabletserver/tabletenv/logstats_test.go +++ b/go/vt/tabletserver/tabletenv/logstats_test.go @@ -5,6 +5,7 @@ package tabletenv import ( + "errors" "net/url" "strings" "testing" @@ -15,8 +16,6 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/callinfo" "github.com/youtube/vitess/go/vt/callinfo/fakecallinfo" - - vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) func TestLogStats(t *testing.T) { @@ -106,10 +105,7 @@ func TestLogStatsErrorStr(t *testing.T) { t.Fatalf("should not get error in stats, but got: %s", logStats.ErrorStr()) } errStr := "unknown error" - logStats.Error = &TabletError{ - Code: vtrpcpb.Code_UNKNOWN, - Message: errStr, - } + logStats.Error = errors.New(errStr) if !strings.Contains(logStats.ErrorStr(), errStr) { t.Fatalf("expect string '%s' in error message, but got: %s", errStr, logStats.ErrorStr()) } diff --git a/go/vt/tabletserver/tabletenv/tablet_error.go b/go/vt/tabletserver/tabletenv/tablet_error.go index eb7f8201047..940e43951af 100644 --- a/go/vt/tabletserver/tabletenv/tablet_error.go +++ b/go/vt/tabletserver/tabletenv/tablet_error.go @@ -15,6 +15,7 @@ import ( "github.com/youtube/vitess/go/mysqlconn" "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/tb" + "github.com/youtube/vitess/go/vt/vterrors" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) @@ -23,12 +24,8 @@ const ( maxErrLen = 5000 ) -// ErrConnPoolClosed is returned / panicked when the connection pool is closed. -var ErrConnPoolClosed = NewTabletError( - // connection pool being closed is not the query's fault, it can be retried on a - // different VtTablet. - vtrpcpb.Code_INTERNAL, - "connection pool is closed") +// ErrConnPoolClosed is returned when the connection pool is closed. +var ErrConnPoolClosed = vterrors.New(vtrpcpb.Code_UNAVAILABLE, "connection pool is closed") // TabletError is the error type we use in this library. // It implements vterrors.VtError interface. diff --git a/go/vt/tabletserver/tabletserver.go b/go/vt/tabletserver/tabletserver.go index 9c787f1b9a0..028a4661410 100644 --- a/go/vt/tabletserver/tabletserver.go +++ b/go/vt/tabletserver/tabletserver.go @@ -20,6 +20,7 @@ import ( "github.com/youtube/vitess/go/history" "github.com/youtube/vitess/go/mysqlconn" "github.com/youtube/vitess/go/mysqlconn/replication" + "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/sync2" @@ -38,6 +39,7 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/tabletserver/txthrottler" "github.com/youtube/vitess/go/vt/utils" + "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -259,7 +261,7 @@ func (tsv *TabletServer) InitDBConfig(target querypb.Target, dbconfigs dbconfigs tsv.mu.Lock() defer tsv.mu.Unlock() if tsv.state != StateNotConnected { - return tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "InitDBConfig failed, current state: %s", stateName[tsv.state]) + return vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "InitDBConfig failed, current state: %s", stateName[tsv.state]) } tsv.target = target tsv.dbconfigs = dbconfigs @@ -377,7 +379,7 @@ func (tsv *TabletServer) decideAction(tabletType topodatapb.TabletType, serving tsv.setState(StateTransitioning) return actionServeNewType, nil case StateTransitioning, StateShuttingDown: - return actionNone, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "cannot SetServingType, current state: %s", stateName[tsv.state]) + return actionNone, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot SetServingType, current state: %s", stateName[tsv.state]) default: panic("unreachable") } @@ -596,7 +598,8 @@ func (tsv *TabletServer) Begin(ctx context.Context, target *querypb.Target) (tra func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tabletenv.QueryStats.Record("BEGIN", time.Now()) if tsv.txThrottler.Throttle() { - return tabletenv.NewTabletError(vtrpcpb.Code_UNAVAILABLE, "Transaction throttled") + // TODO(erez): I think this should be RESOURCE_EXHAUSTED. + return vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "Transaction throttled") } transactionID, err = tsv.te.txPool.Begin(ctx) logStats.TransactionID = transactionID @@ -857,10 +860,10 @@ func (tsv *TabletServer) StreamExecute(ctx context.Context, target *querypb.Targ // transaction. If AsTransaction is true, TransactionId must be 0. func (tsv *TabletServer) ExecuteBatch(ctx context.Context, target *querypb.Target, queries []querytypes.BoundQuery, asTransaction bool, transactionID int64, options *querypb.ExecuteOptions) (results []sqltypes.Result, err error) { if len(queries) == 0 { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "Empty query list") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Empty query list") } if asTransaction && transactionID != 0 { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "cannot start a new transaction in the scope of an existing one") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "cannot start a new transaction in the scope of an existing one") } allowOnShutdown := (transactionID != 0) @@ -873,7 +876,7 @@ func (tsv *TabletServer) ExecuteBatch(ctx context.Context, target *querypb.Targe if asTransaction { transactionID, err = tsv.Begin(ctx, target) if err != nil { - return nil, tsv.handleError("batch", nil, err, nil) + return nil, tsv.convertAndLogError("batch", nil, err, nil) } // If transaction was not committed by the end, it means // that there was an error, roll it back. @@ -887,14 +890,14 @@ func (tsv *TabletServer) ExecuteBatch(ctx context.Context, target *querypb.Targe for _, bound := range queries { localReply, err := tsv.Execute(ctx, target, bound.Sql, bound.BindVariables, transactionID, options) if err != nil { - return nil, tsv.handleError("batch", nil, err, nil) + return nil, tsv.convertAndLogError("batch", nil, err, nil) } results = append(results, *localReply) } if asTransaction { if err = tsv.Commit(ctx, target, transactionID); err != nil { transactionID = 0 - return nil, tsv.handleError("batch", nil, err, nil) + return nil, tsv.convertAndLogError("batch", nil, err, nil) } transactionID = 0 } @@ -955,7 +958,7 @@ func (tsv *TabletServer) MessageAck(ctx context.Context, target *querypb.Target, for _, val := range ids { v, err := sqltypes.BuildConverted(val.Type, val.Value) if err != nil { - return 0, tsv.handleError("message_ack", nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "invalid type: %v", err), nil) + return 0, tsv.convertAndLogError("message_ack", nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid type: %v", err), nil) } sids = append(sids, v.String()) } @@ -989,7 +992,7 @@ func (tsv *TabletServer) execDML(ctx context.Context, target *querypb.Target, qu query, bv, err := queryGenerator() if err != nil { - return 0, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) } transactionID, err := tsv.Begin(ctx, target) @@ -1103,7 +1106,7 @@ func (tsv *TabletServer) execRequest( err = exec(ctx, logStats) if err != nil { - return tsv.handleError(sql, bindVariables, err, logStats) + return tsv.convertAndLogError(sql, bindVariables, err, logStats) } return nil } @@ -1121,7 +1124,7 @@ func (tsv *TabletServer) handlePanicAndSendLogStats( x, tb.Stack(4) /* Skip the last 4 boiler-plate frames. */) log.Errorf(errorMessage) - terr := tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "%s", errorMessage) + terr := vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "%s", errorMessage) *err = terr tabletenv.InternalErrors.Add("Panic", 1) if logStats != nil { @@ -1133,89 +1136,86 @@ func (tsv *TabletServer) handlePanicAndSendLogStats( } } -func (tsv *TabletServer) handleError( - sql string, - bindVariables map[string]interface{}, - err error, - logStats *tabletenv.LogStats, -) error { - var terr *tabletenv.TabletError - defer func() { - if logStats != nil { - logStats.Error = terr - } - }() - terr, ok := err.(*tabletenv.TabletError) - if !ok { - terr = tabletenv.NewTabletError(vtrpcpb.Code_UNKNOWN, "%v", err) - // We only want to see TabletError here. - tabletenv.InternalErrors.Add("UnknownError", 1) +func (tsv *TabletServer) convertAndLogError(sql string, bindVariables map[string]interface{}, err error, logStats *tabletenv.LogStats) error { + if err == nil { + return nil } + err = tsv.convertError(sql, bindVariables, err) - // If TerseErrors is on, strip the error message returned by MySQL and only - // keep the error number and sql state. - // This avoids leaking PII which may be contained in the bind variables: Since - // vttablet has to rewrite and include the bind variables in the query for - // MySQL, the bind variables data would show up in the error message. - // - // If no bind variables are specified, we do not strip the error message and - // the full user query may be included. We do this on purpose for use cases - // where users manually write queries and need the error message to debug - // e.g. syntax errors on the rewritten query. - var myError error - if tsv.TerseErrors && terr.SQLError != 0 && len(bindVariables) != 0 { - switch { - // Google internal flavor error only. Do not strip it because the vtgate - // buffer starts buffering master traffic when it sees the full error. - case terr.SQLError == 1227 && terr.Message == "failover in progress (errno 1227) (sqlstate 42000)": - myError = terr - default: - // Non-whitelisted error. Strip the error message. - myError = &tabletenv.TabletError{ - SQLError: terr.SQLError, - SQLState: terr.SQLState, - Code: terr.Code, - Message: fmt.Sprintf("(errno %d) (sqlstate %s) during query: %s", terr.SQLError, terr.SQLState, sql), - } - } - } else { - myError = terr + if logStats != nil { + logStats.Error = err } + errCode := vterrors.Code(err) + tabletenv.ErrorStats.Add(errCode.String(), 1) - terr.RecordStats() - - logMethod := log.Infof + logMethod := log.Errorf // Suppress or demote some errors in logs. - switch terr.Code { - case vtrpcpb.Code_FAILED_PRECONDITION: - return myError + switch errCode { + case vtrpcpb.Code_FAILED_PRECONDITION, vtrpcpb.Code_ALREADY_EXISTS: + return err case vtrpcpb.Code_RESOURCE_EXHAUSTED: logMethod = logTxPoolFull.Errorf - case vtrpcpb.Code_INTERNAL: - logMethod = log.Errorf case vtrpcpb.Code_ABORTED: logMethod = log.Warningf - default: - // We want to suppress/demote some MySQL error codes. - switch terr.SQLError { - case mysqlconn.ERDupEntry: - return myError - case mysqlconn.ERLockWaitTimeout, - mysqlconn.ERLockDeadlock, - mysqlconn.ERDataTooLong, - mysqlconn.ERDataOutOfRange, - mysqlconn.ERBadNullError: - logMethod = log.Infof - case 0: - if !strings.Contains(terr.Error(), "Row count exceeded") { - logMethod = log.Errorf - } - default: - logMethod = log.Errorf + case vtrpcpb.Code_INVALID_ARGUMENT, vtrpcpb.Code_DEADLINE_EXCEEDED: + logMethod = log.Infof + } + logMethod("%v: %v", err, querytypes.QueryAsString(sql, bindVariables)) + return err +} + +func (tsv *TabletServer) convertError(sql string, bindVariables map[string]interface{}, err error) error { + sqlErr, ok := err.(*sqldb.SQLError) + if !ok { + return err + } + + errCode := vterrors.Code(err) + errstr := err.Error() + errnum := sqlErr.Number() + sqlState := sqlErr.SQLState() + switch errnum { + case mysqlconn.EROptionPreventsStatement: + // Special-case this error code. It's probably because + // there was a failover and there are old clients still connected. + if strings.Contains(errstr, "read-only") { + errCode = vtrpcpb.Code_FAILED_PRECONDITION + } + case 1227: // Google internal overloaded error code. + if strings.Contains(errstr, "failover in progress") { + errCode = vtrpcpb.Code_FAILED_PRECONDITION } + case mysqlconn.ERDupEntry: + errCode = vtrpcpb.Code_ALREADY_EXISTS + case mysqlconn.ERDataTooLong, mysqlconn.ERDataOutOfRange, mysqlconn.ERBadNullError: + errCode = vtrpcpb.Code_INVALID_ARGUMENT + case mysqlconn.ERLockWaitTimeout: + errCode = vtrpcpb.Code_DEADLINE_EXCEEDED + case mysqlconn.ERLockDeadlock: + // A deadlock rollsback the transaction. + errCode = vtrpcpb.Code_ABORTED + case mysqlconn.CRServerLost: + // Query was killed. + errCode = vtrpcpb.Code_DEADLINE_EXCEEDED + case mysqlconn.CRServerGone: + errCode = vtrpcpb.Code_UNAVAILABLE + } + + // If TerseErrors is on, strip the error message returned by MySQL and only + // keep the error number and sql state. + // We assume that bind variable have PII, which are included in the MySQL + // query and come back as part of the error message. Removing the MySQL + // error helps us avoid leaking PII. + // There are two exceptions: + // 1. If no bind vars were specified, it's likely that the query was issued + // by someone manually. So, we don't suppress the error. + // 2. FAILED_PRECONDITION errors. These are caused when a failover is in progress. + // If so, we don't want to suppress the error. This will allow VTGate to + // detect and perform buffering during failovers. + if tsv.TerseErrors && len(bindVariables) != 0 && errCode != vtrpcpb.Code_FAILED_PRECONDITION { + errstr = fmt.Sprintf("(errno %d) (sqlstate %s) during query: %s", errnum, sqlState, sql) } - logMethod("%v: %v", terr, querytypes.QueryAsString(sql, bindVariables)) - return myError + return vterrors.New(errCode, errstr) } // validateSplitQueryParameters perform some validations on the SplitQuery parameters @@ -1231,20 +1231,20 @@ func validateSplitQueryParameters( // Check that the caller requested a RDONLY tablet. // Since we're called by VTGate this should not normally be violated. if target.TabletType != topodatapb.TabletType_RDONLY { - return tabletenv.NewTabletError( + return vterrors.Errorf( vtrpcpb.Code_INVALID_ARGUMENT, "SplitQuery must be called with a RDONLY tablet. TableType passed is: %v", target.TabletType) } if numRowsPerQueryPart < 0 { - return tabletenv.NewTabletError( + return vterrors.Errorf( vtrpcpb.Code_INVALID_ARGUMENT, "splitQuery: numRowsPerQueryPart must be non-negative. Got: %v. SQL: %v", numRowsPerQueryPart, querytypes.QueryAsString(query.Sql, query.BindVariables)) } if splitCount < 0 { - return tabletenv.NewTabletError( + return vterrors.Errorf( vtrpcpb.Code_INVALID_ARGUMENT, "splitQuery: splitCount must be non-negative. Got: %v. SQL: %v", splitCount, @@ -1252,7 +1252,7 @@ func validateSplitQueryParameters( } if (splitCount == 0 && numRowsPerQueryPart == 0) || (splitCount != 0 && numRowsPerQueryPart != 0) { - return tabletenv.NewTabletError( + return vterrors.Errorf( vtrpcpb.Code_INVALID_ARGUMENT, "splitQuery: exactly one of {numRowsPerQueryPart, splitCount} must be"+ " non zero. Got: numRowsPerQueryPart=%v, splitCount=%v. SQL: %v", @@ -1262,7 +1262,7 @@ func validateSplitQueryParameters( } if algorithm != querypb.SplitQueryRequest_EQUAL_SPLITS && algorithm != querypb.SplitQueryRequest_FULL_SCAN { - return tabletenv.NewTabletError( + return vterrors.Errorf( vtrpcpb.Code_INVALID_ARGUMENT, "splitquery: unsupported algorithm: %v. SQL: %v", algorithm, @@ -1374,7 +1374,7 @@ func splitQueryToTabletError(err error) error { if err == nil { return nil } - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "splitquery: %v", err) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "splitquery: %v", err) } // StreamHealth streams the health status to callback. @@ -1462,11 +1462,11 @@ func (tsv *TabletServer) UpdateStream(ctx context.Context, target *querypb.Targe if position != "" { p, err = replication.DecodePosition(position) if err != nil { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "cannot parse position: %v", err) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "cannot parse position: %v", err) } } } else if position != "" { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "at most one of position and timestamp should be specified") + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "at most one of position and timestamp should be specified") } // Validate proper target is used. @@ -1486,11 +1486,11 @@ func (tsv *TabletServer) UpdateStream(ctx context.Context, target *querypb.Targe err = s.Stream(streamCtx) switch err { case mysqlctl.ErrBinlogUnavailable: - return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "%v", err) + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "%v", err) case nil, io.EOF: return nil default: - return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "%v", err) + return vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "%v", err) } } @@ -1523,30 +1523,28 @@ func (tsv *TabletServer) startRequest(ctx context.Context, target *querypb.Targe if allowOnShutdown && tsv.state == StateShuttingDown { goto verifyTarget } - return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "operation not allowed in state %s", stateName[tsv.state]) + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "operation not allowed in state %s", stateName[tsv.state]) verifyTarget: if target != nil { // a valid target needs to be used - if target.Keyspace != tsv.target.Keyspace { - return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "Invalid keyspace %v", target.Keyspace) - } - if target.Shard != tsv.target.Shard { - return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "Invalid shard %v", target.Shard) - } - if isTx && tsv.target.TabletType != topodatapb.TabletType_MASTER { - return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "transactional statement disallowed on non-master tablet: %v", tsv.target.TabletType) - } - if target.TabletType != tsv.target.TabletType { + switch { + case target.Keyspace != tsv.target.Keyspace: + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "invalid keyspace %v", target.Keyspace) + case target.Shard != tsv.target.Shard: + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "invalid shard %v", target.Shard) + case isTx && tsv.target.TabletType != topodatapb.TabletType_MASTER: + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "transactional statement disallowed on non-master tablet: %v", tsv.target.TabletType) + case target.TabletType != tsv.target.TabletType: for _, otherType := range tsv.alsoAllow { if target.TabletType == otherType { goto ok } } - return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "Invalid tablet type: %v, want: %v or %v", target.TabletType, tsv.target.TabletType, tsv.alsoAllow) + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "invalid tablet type: %v, want: %v or %v", target.TabletType, tsv.target.TabletType, tsv.alsoAllow) } } else if !tabletenv.IsLocalContext(ctx) { - return tabletenv.NewTabletError(vtrpcpb.Code_FAILED_PRECONDITION, "No target") + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "No target") } ok: diff --git a/go/vt/tabletserver/tabletserver_test.go b/go/vt/tabletserver/tabletserver_test.go index e64f00e11e3..e42d7ac145e 100644 --- a/go/vt/tabletserver/tabletserver_test.go +++ b/go/vt/tabletserver/tabletserver_test.go @@ -21,9 +21,11 @@ import ( "github.com/golang/protobuf/proto" "github.com/youtube/vitess/go/mysqlconn" "github.com/youtube/vitess/go/mysqlconn/fakesqldb" + "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -355,7 +357,10 @@ func TestTabletServerAllSchemaFailure(t *testing.T) { err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) defer tsv.StopService() // tabletsever shouldn't start if it can't access schema for any tables - testUtils.checkTabletError(t, err, vtrpcpb.Code_UNKNOWN, "could not get schema for any tables") + wanterr := "could not get schema for any tables" + if err == nil || err.Error() != wanterr { + t.Errorf("tsv.StartService: %v, want %s", err, wanterr) + } } func TestTabletServerCheckMysql(t *testing.T) { @@ -512,7 +517,7 @@ func TestTabletServerTarget(t *testing.T) { target2 := proto.Clone(&target1).(*querypb.Target) target2.TabletType = topodatapb.TabletType_REPLICA _, err = tsv.Execute(ctx, target2, "select * from test_table limit 1000", nil, 0, nil) - want := "Invalid tablet type" + want := "invalid tablet type" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("err: %v, must contain %s", err, want) } @@ -532,7 +537,7 @@ func TestTabletServerTarget(t *testing.T) { target2 = proto.Clone(&target1).(*querypb.Target) target2.Keyspace = "bad" _, err = tsv.Execute(ctx, target2, "select * from test_table limit 1000", nil, 0, nil) - want = "Invalid keyspace bad" + want = "invalid keyspace bad" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("err: %v, must contain %s", err, want) } @@ -541,7 +546,7 @@ func TestTabletServerTarget(t *testing.T) { target2 = proto.Clone(&target1).(*querypb.Target) target2.Shard = "bad" _, err = tsv.Execute(ctx, target2, "select * from test_table limit 1000", nil, 0, nil) - want = "Invalid shard bad" + want = "invalid shard bad" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("err: %v, must contain %s", err, want) } @@ -786,7 +791,7 @@ func TestTabletServerStartCommit(t *testing.T) { db.AddQuery(commitTransition, &sqltypes.Result{}) txid = newTxForPrep(tsv) err = tsv.StartCommit(ctx, &target, txid, "aa") - want := "error: could not transition to COMMIT: aa" + want := "could not transition to COMMIT: aa" if err == nil || err.Error() != want { t.Errorf("Prepare err: %v, want %s", err, want) } @@ -810,7 +815,7 @@ func TestTabletserverSetRollback(t *testing.T) { db.AddQuery(rollbackTransition, &sqltypes.Result{}) txid = newTxForPrep(tsv) err = tsv.SetRollback(ctx, &target, "aa", txid) - want := "error: could not transition to ROLLBACK: aa" + want := "could not transition to ROLLBACK: aa" if err == nil || err.Error() != want { t.Errorf("Prepare err: %v, want %s", err, want) } @@ -959,7 +964,7 @@ func TestTabletServerBeginFail(t *testing.T) { defer cancel() tsv.Begin(ctx, &target) _, err = tsv.Begin(ctx, &target) - want := "tx_pool_full: Transaction pool connection limit exceeded" + want := "transaction pool connection limit exceeded" if err == nil || err.Error() != want { t.Fatalf("Begin err: %v, want %v", err, want) } @@ -1018,7 +1023,7 @@ func TestTabletServerCommiRollbacktFail(t *testing.T) { defer tsv.StopService() ctx := context.Background() err = tsv.Commit(ctx, &target, -1) - want := "not_in_tx: Transaction -1: not found" + want := "transaction -1: not found" if err == nil || err.Error() != want { t.Fatalf("Commit err: %v, want %v", err, want) } @@ -1455,7 +1460,7 @@ func TestMessageStream(t *testing.T) { ctx := context.Background() target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} - wanterr := "error: message table nomsg not found" + wanterr := "message table nomsg not found" if err := tsv.MessageStream(ctx, &target, "nomsg", func(qr *sqltypes.Result) error { return nil }); err == nil || err.Error() != wanterr { @@ -1525,13 +1530,13 @@ func TestMessageAck(t *testing.T) { Value: []byte("2"), }} _, err := tsv.MessageAck(ctx, &target, "nonmsg", ids) - want := "error: message table nonmsg not found in schema" + want := "message table nonmsg not found in schema" if err == nil || err.Error() != want { t.Errorf("tsv.MessageAck(invalid): %v, want %s", err, want) } _, err = tsv.MessageAck(ctx, &target, "msg", ids) - want = "error: query: select time_scheduled, id from msg where id in ('1', '2') and time_acked is null limit 10001 for update is not supported on fakesqldb" + want = "query: select time_scheduled, id from msg where id in ('1', '2') and time_acked is null limit 10001 for update is not supported on fakesqldb" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("tsv.MessageAck(invalid): %v, want %s", err, want) } @@ -1568,13 +1573,13 @@ func TestRescheduleMessages(t *testing.T) { target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} _, err := tsv.PostponeMessages(ctx, &target, "nonmsg", []string{"1", "2"}) - want := "error: message table nonmsg not found in schema" + want := "message table nonmsg not found in schema" if err == nil || err.Error() != want { t.Errorf("tsv.PostponeMessages(invalid): %v, want %s", err, want) } _, err = tsv.PostponeMessages(ctx, &target, "msg", []string{"1", "2"}) - want = "error: query: select time_scheduled, id from msg where id in ('1', '2') and time_acked is null limit 10001 for update is not supported" + want = "query: select time_scheduled, id from msg where id in ('1', '2') and time_acked is null limit 10001 for update is not supported" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("tsv.PostponeMessages(invalid):\n%v, want\n%s", err, want) } @@ -1611,13 +1616,13 @@ func TestPurgeMessages(t *testing.T) { target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} _, err := tsv.PurgeMessages(ctx, &target, "nonmsg", 0) - want := "error: message table nonmsg not found in schema" + want := "message table nonmsg not found in schema" if err == nil || err.Error() != want { t.Errorf("tsv.PurgeMessages(invalid): %v, want %s", err, want) } _, err = tsv.PurgeMessages(ctx, &target, "msg", 0) - want = "error: query: select time_scheduled, id from msg where time_scheduled < 0 and time_acked is not null limit 500 for update is not supported" + want = "query: select time_scheduled, id from msg where time_scheduled < 0 and time_acked is not null limit 500 for update is not supported" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("tsv.PurgeMessages(invalid):\n%v, want\n%s", err, want) } @@ -1774,7 +1779,7 @@ func TestTabletServerSplitQueryEqualSplitsOnStringColumn(t *testing.T) { 0, /* numRowsPerQueryPart */ querypb.SplitQueryRequest_EQUAL_SPLITS) want := - "error: splitquery: using the EQUAL_SPLITS algorithm in SplitQuery" + + "splitquery: using the EQUAL_SPLITS algorithm in SplitQuery" + " requires having a numeric (integral or float) split-column." + " Got type: {Name: 'name_string', Type: VARCHAR}" if err.Error() != want { @@ -1797,15 +1802,14 @@ func TestHandleExecTabletError(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() tsv := NewTabletServer(config) - err := tsv.handleError( + err := tsv.convertError( "select * from test_table", nil, - tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "tablet error"), - nil, + vterrors.Errorf(vtrpcpb.Code_INTERNAL, "tablet error"), ) - want := "fatal: tablet error" + want := "tablet error" if err == nil || err.Error() != want { - t.Errorf("Error: %v, want '%s'", err, want) + t.Errorf("%v, want '%s'", err, want) } } @@ -1814,15 +1818,14 @@ func TestTerseErrorsNonSQLError(t *testing.T) { config := testUtils.newQueryServiceConfig() config.TerseErrors = true tsv := NewTabletServer(config) - err := tsv.handleError( + err := tsv.convertError( "select * from test_table", nil, - tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "tablet error"), - nil, + vterrors.Errorf(vtrpcpb.Code_INTERNAL, "tablet error"), ) - want := "fatal: tablet error" + want := "tablet error" if err == nil || err.Error() != want { - t.Errorf("Error: %v, want '%s'", err, want) + t.Errorf("%v, want '%s'", err, want) } } @@ -1831,20 +1834,14 @@ func TestTerseErrorsBindVars(t *testing.T) { config := testUtils.newQueryServiceConfig() config.TerseErrors = true tsv := NewTabletServer(config) - err := tsv.handleError( + err := tsv.convertError( "select * from test_table", map[string]interface{}{"a": 1}, - &tabletenv.TabletError{ - Code: vtrpcpb.Code_DEADLINE_EXCEEDED, - Message: "msg", - SQLError: 10, - SQLState: "HY000", - }, - nil, + sqldb.NewSQLError(10, "HY000", "msg"), ) - want := "error: (errno 10) (sqlstate HY000) during query: select * from test_table" + want := "(errno 10) (sqlstate HY000) during query: select * from test_table" if err == nil || err.Error() != want { - t.Errorf("Error: %v, want '%s'", err, want) + t.Errorf("%v, want '%s'", err, want) } } @@ -1853,10 +1850,10 @@ func TestTerseErrorsNoBindVars(t *testing.T) { config := testUtils.newQueryServiceConfig() config.TerseErrors = true tsv := NewTabletServer(config) - err := tsv.handleError("", nil, tabletenv.NewTabletError(vtrpcpb.Code_DEADLINE_EXCEEDED, "msg"), nil) - want := "error: msg" + err := tsv.convertError("", nil, vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "msg")) + want := "msg" if err == nil || err.Error() != want { - t.Errorf("Error: %v, want '%s'", err, want) + t.Errorf("%v, want '%s'", err, want) } } @@ -1866,16 +1863,11 @@ func TestTerseErrorsIgnoreFailoverInProgress(t *testing.T) { config.TerseErrors = true tsv := NewTabletServer(config) - err := tsv.handleError("select * from test_table where id = :a", + err := tsv.convertError("select * from test_table where id = :a", map[string]interface{}{"a": 1}, - &tabletenv.TabletError{ - Code: vtrpcpb.Code_INTERNAL, - Message: "failover in progress (errno 1227) (sqlstate 42000)", - SQLError: 1227, - SQLState: "42000", - }, - nil /* logStats */) - if got, want := err.Error(), "fatal: failover in progress (errno 1227) (sqlstate 42000)"; got != want { + sqldb.NewSQLError(1227, "42000", "failover in progress"), + ) + if got, want := err.Error(), "failover in progress (errno 1227) (sqlstate 42000)"; got != want { t.Fatalf("'failover in progress' text must never be stripped: got = %v, want = %v", got, want) } } diff --git a/go/vt/tabletserver/testutils_test.go b/go/vt/tabletserver/testutils_test.go index eccc9461851..9a870f3ef8e 100644 --- a/go/vt/tabletserver/testutils_test.go +++ b/go/vt/tabletserver/testutils_test.go @@ -9,15 +9,12 @@ import ( "fmt" "math/rand" "reflect" - "strings" "testing" "github.com/youtube/vitess/go/mysqlconn/fakesqldb" "github.com/youtube/vitess/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" - - vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) var errRejected = errors.New("rejected") @@ -41,19 +38,6 @@ func (util *testUtils) checkEqual(t *testing.T, expected interface{}, result int } } -func (util *testUtils) checkTabletError(t *testing.T, err interface{}, tabletErrCode vtrpcpb.Code, tabletErrStr string) { - tabletError, ok := err.(*tabletenv.TabletError) - if !ok { - t.Fatalf("should return a TabletError, but got err: %v", err) - } - if tabletError.Code != tabletErrCode { - t.Fatalf("got a TabletError with error code %s but wanted: %s", tabletError.Code, tabletErrCode) - } - if !strings.Contains(tabletError.Error(), tabletErrStr) { - t.Fatalf("expect the tablet error should contain string: '%s', but it does not. Got tablet error: '%s'", tabletErrStr, tabletError.Error()) - } -} - func (util *testUtils) newMysqld(dbcfgs *dbconfigs.DBConfigs) mysqlctl.MysqlDaemon { cnf := mysqlctl.NewMycnf(11111, 6802) // Assigning ServerID to be different from tablet UID to make sure that there are no diff --git a/go/vt/tabletserver/twopc.go b/go/vt/tabletserver/twopc.go index 65a6b2ec331..36de7d2d335 100644 --- a/go/vt/tabletserver/twopc.go +++ b/go/vt/tabletserver/twopc.go @@ -20,7 +20,7 @@ import ( "github.com/youtube/vitess/go/vt/dbconnpool" "github.com/youtube/vitess/go/vt/sqlparser" "github.com/youtube/vitess/go/vt/tabletserver/connpool" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -137,7 +137,7 @@ func (tpc *TwoPC) Init(sidecarDBName string, dbaparams *sqldb.ConnParams) error } for _, s := range statements { if _, err := conn.ExecuteFetch(s, 0, false); err != nil { - return tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, err.Error()) + return err } } tpc.insertRedoTx = buildParsedQuery( @@ -368,7 +368,7 @@ func (tpc *TwoPC) Transition(ctx context.Context, conn *TxConnection, dtid strin return err } if qr.RowsAffected != 1 { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "could not transition to %v: %s", state, dtid) + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "could not transition to %v: %s", state, dtid) } return nil } diff --git a/go/vt/tabletserver/tx_executor.go b/go/vt/tabletserver/tx_executor.go index c069c3fc1f2..67d6077157f 100644 --- a/go/vt/tabletserver/tx_executor.go +++ b/go/vt/tabletserver/tx_executor.go @@ -15,6 +15,7 @@ import ( querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" ) // TxExecutor is used for executing a transactional request. @@ -32,7 +33,7 @@ type TxExecutor struct { // protocol, will perform all the cleanup. func (txe *TxExecutor) Prepare(transactionID int64, dtid string) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") } defer tabletenv.QueryStats.Record("PREPARE", time.Now()) txe.logStats.TransactionID = transactionID @@ -51,7 +52,7 @@ func (txe *TxExecutor) Prepare(transactionID int64, dtid string) error { err = txe.te.preparedPool.Put(conn, dtid) if err != nil { txe.te.txPool.localRollback(txe.ctx, conn) - return tabletenv.NewTabletError(vtrpcpb.Code_RESOURCE_EXHAUSTED, "prepare failed for transaction %d: %v", transactionID, err) + return vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, "prepare failed for transaction %d: %v", transactionID, err) } localConn, err := txe.te.txPool.LocalBegin(txe.ctx) @@ -78,12 +79,12 @@ func (txe *TxExecutor) Prepare(transactionID int64, dtid string) error { // marked as failed in the redo log. func (txe *TxExecutor) CommitPrepared(dtid string) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") } defer tabletenv.QueryStats.Record("COMMIT_PREPARED", time.Now()) conn, err := txe.te.preparedPool.FetchForCommit(dtid) if err != nil { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "cannot commit dtid %s, state: %v", dtid, err) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "cannot commit dtid %s, state: %v", dtid, err) } if conn == nil { return nil @@ -153,7 +154,7 @@ func (txe *TxExecutor) markFailed(ctx context.Context, dtid string) { // killer will be the one to eventually roll it back. func (txe *TxExecutor) RollbackPrepared(dtid string, originalID int64) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") } defer tabletenv.QueryStats.Record("ROLLBACK_PREPARED", time.Now()) conn, err := txe.te.txPool.LocalBegin(txe.ctx) @@ -183,7 +184,7 @@ returnConn: // CreateTransaction creates the metadata for a 2PC transaction. func (txe *TxExecutor) CreateTransaction(dtid string, participants []*querypb.Target) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") } defer tabletenv.QueryStats.Record("CREATE_TRANSACTION", time.Now()) conn, err := txe.te.txPool.LocalBegin(txe.ctx) @@ -203,7 +204,7 @@ func (txe *TxExecutor) CreateTransaction(dtid string, participants []*querypb.Ta // decision to commit the associated 2pc transaction. func (txe *TxExecutor) StartCommit(transactionID int64, dtid string) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") } defer tabletenv.QueryStats.Record("START_COMMIT", time.Now()) txe.logStats.TransactionID = transactionID @@ -225,7 +226,7 @@ func (txe *TxExecutor) StartCommit(transactionID int64, dtid string) error { // If a transaction id is provided, that transaction is also rolled back. func (txe *TxExecutor) SetRollback(dtid string, transactionID int64) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") } defer tabletenv.QueryStats.Record("SET_ROLLBACK", time.Now()) txe.logStats.TransactionID = transactionID @@ -257,7 +258,7 @@ func (txe *TxExecutor) SetRollback(dtid string, transactionID int64) error { // essentially resolving it. func (txe *TxExecutor) ConcludeTransaction(dtid string) error { if !txe.te.twopcEnabled { - return tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") } defer tabletenv.QueryStats.Record("RESOLVE", time.Now()) @@ -277,7 +278,7 @@ func (txe *TxExecutor) ConcludeTransaction(dtid string) error { // ReadTransaction returns the metadata for the sepcified dtid. func (txe *TxExecutor) ReadTransaction(dtid string) (*querypb.TransactionMetadata, error) { if !txe.te.twopcEnabled { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") } return txe.te.twoPC.ReadTransaction(txe.ctx, dtid) } @@ -285,15 +286,15 @@ func (txe *TxExecutor) ReadTransaction(dtid string) (*querypb.TransactionMetadat // ReadTwopcInflight returns info about all in-flight 2pc transactions. func (txe *TxExecutor) ReadTwopcInflight() (distributed []*DistributedTx, prepared, failed []*PreparedTx, err error) { if !txe.te.twopcEnabled { - return nil, nil, nil, tabletenv.NewTabletError(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") } prepared, failed, err = txe.te.twoPC.ReadAllRedo(txe.ctx) if err != nil { - return nil, nil, nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Could not read redo: %v", err) + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Could not read redo: %v", err) } distributed, err = txe.te.twoPC.ReadAllTransactions(txe.ctx) if err != nil { - return nil, nil, nil, tabletenv.NewTabletError(vtrpcpb.Code_INTERNAL, "Could not read redo: %v", err) + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Could not read redo: %v", err) } return distributed, prepared, failed, nil } diff --git a/go/vt/tabletserver/tx_executor_test.go b/go/vt/tabletserver/tx_executor_test.go index 0cc84bb8596..51b032db756 100644 --- a/go/vt/tabletserver/tx_executor_test.go +++ b/go/vt/tabletserver/tx_executor_test.go @@ -69,7 +69,7 @@ func TestTxExecutorPrepareNotInTx(t *testing.T) { defer db.Close() defer tsv.StopService() err := txe.Prepare(0, "aa") - want := "not_in_tx: Transaction 0: not found" + want := "transaction 0: not found" if err == nil || err.Error() != want { t.Errorf("Prepare err: %v, want %s", err, want) } @@ -101,7 +101,7 @@ func TestTxExecutorPrepareRedoBeginFail(t *testing.T) { db.AddRejectedQuery("begin", errors.New("begin fail")) err := txe.Prepare(txid, "aa") defer txe.RollbackPrepared("aa", 0) - want := "error: begin fail" + want := "begin fail" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Prepare err: %v, want %s", err, want) } @@ -128,7 +128,7 @@ func TestTxExecutorPrepareRedoCommitFail(t *testing.T) { db.AddRejectedQuery("commit", errors.New("commit fail")) err := txe.Prepare(txid, "aa") defer txe.RollbackPrepared("aa", 0) - want := "error: commit fail" + want := "commit fail" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Prepare err: %v, want %s", err, want) } @@ -192,7 +192,7 @@ func TestTxExecutorCommitRedoCommitFail(t *testing.T) { defer txe.RollbackPrepared("aa", 0) db.AddRejectedQuery("commit", errors.New("commit fail")) err = txe.CommitPrepared("aa") - want := "error: commit fail" + want := "commit fail" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Prepare err: %v, want %s", err, want) } @@ -209,7 +209,7 @@ func TestTxExecutorRollbackBeginFail(t *testing.T) { } db.AddRejectedQuery("begin", errors.New("begin fail")) err = txe.RollbackPrepared("aa", txid) - want := "error: begin fail" + want := "begin fail" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Prepare err: %v, want %s", err, want) } @@ -265,7 +265,7 @@ func TestExecutorStartCommit(t *testing.T) { db.AddQuery(commitTransition, &sqltypes.Result{}) txid = newTxForPrep(tsv) err = txe.StartCommit(txid, "aa") - want := "error: could not transition to COMMIT: aa" + want := "could not transition to COMMIT: aa" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Prepare err: %v, want %s", err, want) } @@ -287,7 +287,7 @@ func TestExecutorSetRollback(t *testing.T) { db.AddQuery(rollbackTransition, &sqltypes.Result{}) txid = newTxForPrep(tsv) err = txe.SetRollback("aa", txid) - want := "error: could not transition to ROLLBACK: aa" + want := "could not transition to ROLLBACK: aa" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Prepare err: %v, want %s", err, want) } @@ -541,7 +541,7 @@ func TestNoTwopc(t *testing.T) { }, }} - want := "error: 2pc is not enabled" + want := "2pc is not enabled" for _, tc := range testcases { err := tc.fun() if err == nil || err.Error() != want { diff --git a/go/vt/tabletserver/tx_pool.go b/go/vt/tabletserver/tx_pool.go index 26fcd2eea64..3ae726260eb 100644 --- a/go/vt/tabletserver/tx_pool.go +++ b/go/vt/tabletserver/tx_pool.go @@ -14,6 +14,7 @@ import ( log "github.com/golang/glog" "golang.org/x/net/context" + "github.com/youtube/vitess/go/mysqlconn" "github.com/youtube/vitess/go/pools" "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" @@ -23,6 +24,7 @@ import ( "github.com/youtube/vitess/go/vt/callerid" "github.com/youtube/vitess/go/vt/tabletserver/connpool" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" @@ -146,18 +148,13 @@ func (axp *TxPool) Begin(ctx context.Context) (int64, error) { return 0, err case pools.ErrTimeout: axp.LogActive() - return 0, tabletenv.NewTabletError(vtrpcpb.Code_RESOURCE_EXHAUSTED, "Transaction pool connection limit exceeded") + return 0, vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, "transaction pool connection limit exceeded") } - return 0, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) + return 0, err } if _, err := conn.Exec(ctx, "begin", 1, false); err != nil { conn.Recycle() - if _, ok := err.(*tabletenv.TabletError); ok { - // Exec() already returned a TabletError. Don't wrap err into another - // TabletError and instead preserve the error code. - return 0, err - } - return 0, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, err) + return 0, err } transactionID := axp.lastID.Add(1) axp.activePool.Register( @@ -196,7 +193,7 @@ func (axp *TxPool) Rollback(ctx context.Context, transactionID int64) error { func (axp *TxPool) Get(transactionID int64, reason string) (*TxConnection, error) { v, err := axp.activePool.Get(transactionID, reason) if err != nil { - return nil, tabletenv.NewTabletError(vtrpcpb.Code_ABORTED, "Transaction %d: %v", transactionID, err) + return nil, vterrors.Errorf(vtrpcpb.Code_ABORTED, "transaction %d: %v", transactionID, err) } return v.(*TxConnection), nil } @@ -219,7 +216,7 @@ func (axp *TxPool) LocalCommit(ctx context.Context, conn *TxConnection, messager txStats.Add("Completed", time.Now().Sub(conn.StartTime)) if _, err := conn.Exec(ctx, "commit", 1, false); err != nil { conn.Close() - return tabletenv.NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, err) + return err } messager.UpdateCaches(conn.NewMessages, conn.ChangedMessages) return nil @@ -238,7 +235,7 @@ func (axp *TxPool) localRollback(ctx context.Context, conn *TxConnection) error txStats.Add("Aborted", time.Now().Sub(conn.StartTime)) if _, err := conn.Exec(ctx, "rollback", 1, false); err != nil { conn.Close() - return tabletenv.NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, err) + return err } return nil } @@ -304,11 +301,10 @@ func newTxConnection(conn *connpool.DBConn, transactionID int64, pool *TxPool, i func (txc *TxConnection) Exec(ctx context.Context, query string, maxrows int, wantfields bool) (*sqltypes.Result, error) { r, err := txc.DBConn.ExecOnce(ctx, query, maxrows, wantfields) if err != nil { - if tabletenv.IsConnErr(err) { + if mysqlconn.IsConnErr(err) { txc.pool.checker.CheckMySQL() - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, err) } - return nil, tabletenv.NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, err) + return nil, err } return r, nil } diff --git a/go/vt/tabletserver/tx_pool_test.go b/go/vt/tabletserver/tx_pool_test.go index 7632abdb318..b853f89de8b 100644 --- a/go/vt/tabletserver/tx_pool_test.go +++ b/go/vt/tabletserver/tx_pool_test.go @@ -13,7 +13,9 @@ import ( "golang.org/x/net/context" + "github.com/youtube/vitess/go/mysqlconn" "github.com/youtube/vitess/go/mysqlconn/fakesqldb" + "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" @@ -117,25 +119,6 @@ func TestTxPoolTransactionKiller(t *testing.T) { } } -func TestTxPoolBeginAfterConnPoolClosed(t *testing.T) { - db := fakesqldb.New(t) - defer db.Close() - txPool := newTxPool() - txPool.SetTimeout(time.Duration(10)) - txPool.Open(db.ConnParams(), db.ConnParams()) - - txPool.Close() - - _, err := txPool.Begin(context.Background()) - if err == nil { - t.Fatalf("expect to get an error") - } - terr, ok := err.(*tabletenv.TabletError) - if !ok || terr != tabletenv.ErrConnPoolClosed { - t.Fatalf("get error: %v, but expect: %v", terr, tabletenv.ErrConnPoolClosed) - } -} - // TestTxPoolBeginWithPoolConnectionError_TransientErrno2006 tests the case // where we see a transient errno 2006 e.g. because MySQL killed the // db connection. DBConn.Exec() is going to reconnect and retry automatically @@ -190,8 +173,12 @@ func TestTxPoolBeginWithPoolConnectionError_Errno2006_Permanent(t *testing.T) { if err == nil || !strings.Contains(err.Error(), "Lost connection to MySQL server") || !strings.Contains(err.Error(), "(errno 2013)") { t.Fatalf("Begin did not return the reconnect error: %v", err) } - if got, want := vterrors.Code(err), vtrpcpb.Code_INTERNAL; got != want { - t.Errorf("wrong error code for reconnect error after Begin: got = %v, want = %v", got, want) + sqlErr, ok := err.(*sqldb.SQLError) + if !ok { + t.Fatalf("Unexpected error type: %T, want %T", err, &sqldb.SQLError{}) + } + if num := sqlErr.Number(); num != mysqlconn.CRServerLost { + t.Errorf("Unexpected error code: %d, want %d", num, mysqlconn.CRServerLost) } } @@ -298,7 +285,7 @@ func TestTxPoolGetConnNonExistentTransaction(t *testing.T) { txPool.Open(db.ConnParams(), db.ConnParams()) defer txPool.Close() _, err := txPool.Get(12345, "for query") - want := "not_in_tx: Transaction 12345: not found" + want := "transaction 12345: not found" if err == nil || err.Error() != want { t.Errorf("Get: %v, want %s", err, want) } @@ -333,8 +320,12 @@ func TestTxPoolExecFailDueToConnFail_Errno2006(t *testing.T) { if err == nil || !strings.Contains(err.Error(), "(errno 2006)") { t.Fatalf("Exec must return connection error with MySQL errno 2006: %v", err) } - if got, want := vterrors.Code(err), vtrpcpb.Code_INTERNAL; got != want { - t.Errorf("wrong error code for Exec error: got = %v, want = %v", got, want) + sqlErr, ok := err.(*sqldb.SQLError) + if !ok { + t.Fatalf("Unexpected error type: %T, want %T", err, &sqldb.SQLError{}) + } + if num := sqlErr.Number(); num != mysqlconn.CRServerGone { + t.Errorf("Unexpected error code: %d, want %d", num, mysqlconn.CRServerGone) } } diff --git a/go/vt/vterrors/grpc.go b/go/vt/vterrors/grpc.go index a98da00e7ac..92c430c9a14 100644 --- a/go/vt/vterrors/grpc.go +++ b/go/vt/vterrors/grpc.go @@ -152,7 +152,7 @@ func FromGRPCError(err error) error { return err } return &VitessError{ - Code: GRPCToCode(grpc.Code(err)), + code: GRPCToCode(grpc.Code(err)), err: err, } } diff --git a/go/vt/vterrors/proto3.go b/go/vt/vterrors/proto3.go index fff4e15b406..91051ac0d11 100644 --- a/go/vt/vterrors/proto3.go +++ b/go/vt/vterrors/proto3.go @@ -5,8 +5,6 @@ package vterrors import ( - "errors" - vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) @@ -25,10 +23,7 @@ func FromVtRPCError(rpcErr *vtrpcpb.RPCError) error { if code == vtrpcpb.Code_OK { code = LegacyErrorCodeToCode(rpcErr.LegacyCode) } - return &VitessError{ - Code: code, - err: errors.New(rpcErr.Message), - } + return New(code, rpcErr.Message) } // VtRPCErrorFromVtError converts from a VtError to a vtrpcpb.RPCError. diff --git a/go/vt/vterrors/proto3_test.go b/go/vt/vterrors/proto3_test.go index d6c5fbaad24..e40c0107564 100644 --- a/go/vt/vterrors/proto3_test.go +++ b/go/vt/vterrors/proto3_test.go @@ -5,7 +5,6 @@ package vterrors import ( - "errors" "reflect" "testing" @@ -24,29 +23,20 @@ func TestFromVtRPCError(t *testing.T) { LegacyCode: vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY, Message: "bad input", }, - want: &VitessError{ - Code: vtrpcpb.Code_INVALID_ARGUMENT, - err: errors.New("bad input"), - }, + want: New(vtrpcpb.Code_INVALID_ARGUMENT, "bad input"), }, { in: &vtrpcpb.RPCError{ LegacyCode: vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY, Message: "bad input", Code: vtrpcpb.Code_INVALID_ARGUMENT, }, - want: &VitessError{ - Code: vtrpcpb.Code_INVALID_ARGUMENT, - err: errors.New("bad input"), - }, + want: New(vtrpcpb.Code_INVALID_ARGUMENT, "bad input"), }, { in: &vtrpcpb.RPCError{ Message: "bad input", Code: vtrpcpb.Code_INVALID_ARGUMENT, }, - want: &VitessError{ - Code: vtrpcpb.Code_INVALID_ARGUMENT, - err: errors.New("bad input"), - }, + want: New(vtrpcpb.Code_INVALID_ARGUMENT, "bad input"), }} for _, tcase := range testcases { got := FromVtRPCError(tcase.in) @@ -64,10 +54,7 @@ func TestVtRPCErrorFromVtError(t *testing.T) { in: nil, want: nil, }, { - in: &VitessError{ - Code: vtrpcpb.Code_INVALID_ARGUMENT, - err: errors.New("bad input"), - }, + in: New(vtrpcpb.Code_INVALID_ARGUMENT, "bad input"), want: &vtrpcpb.RPCError{ LegacyCode: vtrpcpb.LegacyErrorCode_BAD_INPUT_LEGACY, Message: "bad input", diff --git a/go/vt/vterrors/vterrors.go b/go/vt/vterrors/vterrors.go index b989cc7b295..a92d5ff33db 100644 --- a/go/vt/vterrors/vterrors.go +++ b/go/vt/vterrors/vterrors.go @@ -16,7 +16,7 @@ func Code(err error) vtrpcpb.Code { return vtrpcpb.Code_OK } if err, ok := err.(*VitessError); ok { - return err.Code + return err.code } if err, ok := err.(VtError); ok { return err.VtErrorCode() @@ -42,8 +42,7 @@ type VtError interface { // VitessError is the error type that we use internally for passing structured errors. type VitessError struct { - // Error code of the Vitess error. - Code vtrpcpb.Code + code vtrpcpb.Code // Error message that should be returned. This allows us to change an error message // without losing the underlying error. For example, if you have an error like // context.DeadlikeExceeded, you don't want to modify it - otherwise you would lose @@ -58,11 +57,19 @@ type VitessError struct { // New creates a new error using the code and input string. func New(code vtrpcpb.Code, in string) error { return &VitessError{ - Code: code, + code: code, err: errors.New(in), } } +// Errorf returns a new error built using Printf style arguments. +func Errorf(code vtrpcpb.Code, format string, args ...interface{}) error { + return &VitessError{ + code: code, + err: errors.New(fmt.Sprintf(format, args...)), + } +} + // Error implements the error interface. It will return the redefined error message, if there // is one. If there isn't, it will return the original error message. func (e *VitessError) Error() string { @@ -74,15 +81,15 @@ func (e *VitessError) Error() string { // VtErrorCode returns the underlying Vitess error code. func (e *VitessError) VtErrorCode() vtrpcpb.Code { - return e.Code + return e.code } // AsString returns a VitessError as a string, with more detailed information than Error(). func (e *VitessError) AsString() string { if e.Message != "" { - return fmt.Sprintf("Code: %v, Message: %v, err: %v", e.Code, e.Message, e.err) + return fmt.Sprintf("Code: %v, Message: %v, err: %v", e.code, e.Message, e.err) } - return fmt.Sprintf("Code: %v, err: %v", e.Code, e.err) + return fmt.Sprintf("Code: %v, err: %v", e.code, e.err) } // FromError returns a VitessError with the supplied error code by wrapping an @@ -93,7 +100,7 @@ func (e *VitessError) AsString() string { // errors.New("no valid endpoint")) func FromError(code vtrpcpb.Code, err error) error { return &VitessError{ - Code: code, + code: code, err: err, } } @@ -102,7 +109,7 @@ func FromError(code vtrpcpb.Code, err error) error { // Useful for preserving an underlying error while creating a new error message. func NewVitessError(code vtrpcpb.Code, err error, format string, args ...interface{}) error { return &VitessError{ - Code: code, + code: code, Message: fmt.Sprintf(format, args...), err: err, } @@ -125,14 +132,14 @@ func WithPrefix(prefix string, in error) error { func WithSuffix(in error, suffix string) error { if vitessError, ok := in.(*VitessError); ok { return &VitessError{ - Code: vitessError.Code, + code: vitessError.code, err: vitessError.err, Message: fmt.Sprintf("%s%s", in.Error(), suffix), } } if vtError, ok := in.(VtError); ok { return &VitessError{ - Code: vtError.VtErrorCode(), + code: vtError.VtErrorCode(), err: in, Message: fmt.Sprintf("%s%s", in.Error(), suffix), } diff --git a/go/vt/vtgate/buffer/buffer.go b/go/vt/vtgate/buffer/buffer.go index ff0defb2603..8557d788673 100644 --- a/go/vt/vtgate/buffer/buffer.go +++ b/go/vt/vtgate/buffer/buffer.go @@ -218,31 +218,26 @@ func (b *Buffer) StatsUpdate(ts *discovery.TabletStats) { func causedByFailover(err error) bool { log.V(2).Infof("Checking error (type: %T) if it is caused by a failover. err: %v", err, err) - if vtErr, ok := err.(vterrors.VtError); ok { - switch vtErr.VtErrorCode() { - case vtrpcpb.Code_FAILED_PRECONDITION: - // All flavors. - if strings.Contains(err.Error(), "retry: operation not allowed in state NOT_SERVING") || - strings.Contains(err.Error(), "retry: operation not allowed in state SHUTTING_DOWN") || - // Match 1290 if -queryserver-config-terse-errors explicitly hid the error message - // (which it does to avoid logging the original query including any PII). - strings.Contains(err.Error(), "retry: (errno 1290) (sqlstate HY000) during query:") { - return true - } - // MariaDB flavor. - if strings.Contains(err.Error(), "retry: The MariaDB server is running with the --read-only option so it cannot execute this statement (errno 1290) (sqlstate HY000)") { - return true - } - // MySQL flavor. - if strings.Contains(err.Error(), "retry: The MySQL server is running with the --read-only option so it cannot execute this statement (errno 1290) (sqlstate HY000)") { - return true - } - case vtrpcpb.Code_INTERNAL: - // Google internal flavor. - if strings.Contains(err.Error(), "fatal: failover in progress (errno 1227) (sqlstate 42000)") { - return true - } - } + if vterrors.Code(err) != vtrpcpb.Code_FAILED_PRECONDITION { + return false + } + switch { + // All flavors. + case strings.Contains(err.Error(), "operation not allowed in state NOT_SERVING") || + strings.Contains(err.Error(), "operation not allowed in state SHUTTING_DOWN") || + // Match 1290 if -queryserver-config-terse-errors explicitly hid the error message + // (which it does to avoid logging the original query including any PII). + strings.Contains(err.Error(), "(errno 1290) (sqlstate HY000) during query:"): + return true + // MariaDB flavor. + case strings.Contains(err.Error(), "The MariaDB server is running with the --read-only option so it cannot execute this statement (errno 1290) (sqlstate HY000)"): + return true + // MySQL flavor. + case strings.Contains(err.Error(), "The MySQL server is running with the --read-only option so it cannot execute this statement (errno 1290) (sqlstate HY000)"): + return true + // Google internal flavor. + case strings.Contains(err.Error(), "failover in progress (errno 1227) (sqlstate 42000)"): + return true } return false } diff --git a/test/update_stream.py b/test/update_stream.py index 62e2d5d793d..e78f010d7a6 100755 --- a/test/update_stream.py +++ b/test/update_stream.py @@ -636,7 +636,7 @@ def test_timestamp_start_too_old(self): shard='0'): self.assertFail('got an event: %s %d' % (str(event), resume_timestamp)) except dbexceptions.QueryNotServed as e: - self.assertIn('retry: cannot find relevant binlogs on this server', + self.assertIn('cannot find relevant binlogs on this server', str(e)) diff --git a/test/vertical_split.py b/test/vertical_split.py index 98cf0ed0df6..96f590ed637 100755 --- a/test/vertical_split.py +++ b/test/vertical_split.py @@ -337,7 +337,7 @@ def _check_blacklisted_tables(self, t, expected): 'select count(1) from %s' % table], expect_fail=True) self.assertIn( - 'retry: Query disallowed due to rule: enforce blacklisted tables', + 'disallowed due to rule: enforce blacklisted tables', stderr) else: # table is not blacklisted, should just work diff --git a/test/vtgatev2_test.py b/test/vtgatev2_test.py index 2d227c19e4b..2a614375262 100755 --- a/test/vtgatev2_test.py +++ b/test/vtgatev2_test.py @@ -1103,30 +1103,6 @@ def _get_non_vtgate_errors(self): return 0 return v['VtgateInfoErrorCounts']['NonVtgateErrors'] - def test_vttablet_errors_not_logged(self): - """Verifies that errors from VtTablet aren't logged as such in VTGate. - - Instead of making assertions by reading the log stream, we read a debug - vars that is incremented by VTGate whenever it chooses to log exceptions - to Infof instead of Errorf. - """ - before = self._get_non_vtgate_errors() - - vtgate_conn = get_connection() - keyspace_id = SHARD_KID_MAP[SHARD_NAMES[self.shard_index]][0] - cursor = vtgate_conn.cursor( - tablet_type='master', keyspace=KEYSPACE_NAME, - keyspace_ids=[pack_kid(keyspace_id)], - writable=True) - with self.assertRaises(dbexceptions.DatabaseError): - cursor.execute('this is not valid syntax, throw an error', {}) - - after = self._get_non_vtgate_errors() - self.assertEqual(after - before, 1, - 'No errors in VTGate that were not logged as exceptions' - ' (%d to %d)' % (before, after)) - vtgate_conn.close() - def test_error_on_dml(self): vtgate_conn = get_connection() vtgate_conn.begin() From ceaddb1f125412b9c6deccf5f763ea419dc43efd Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Tue, 21 Feb 2017 19:35:44 -0800 Subject: [PATCH 020/108] vterrors: simplify API Also delete TabletError --- go/cmd/vtcombo/tablet_map.go | 38 +-- .../vtgateclienttest/goclienttest/errors.go | 11 +- go/cmd/vtgateclienttest/services/errors.go | 52 +--- go/mysqlconn/constants_test.go | 32 +++ go/sqltypes/proto3.go | 4 +- go/sqltypes/proto3_test.go | 4 +- go/vt/dtids/dtids.go | 8 +- go/vt/proto/vtrpc/vtrpc.pb.go | 4 +- go/vt/tabletmanager/grpctmserver/server.go | 6 +- go/vt/tabletserver/connpool/pool.go | 9 +- go/vt/tabletserver/grpcqueryservice/server.go | 60 ++--- go/vt/tabletserver/grpctabletconn/conn.go | 58 ++--- go/vt/tabletserver/query_executor.go | 2 +- go/vt/tabletserver/querytypes/bound_query.go | 20 +- go/vt/tabletserver/tabletconn/grpc_error.go | 13 +- .../tabletconn/grpc_error_test.go | 2 +- go/vt/tabletserver/tabletenv/tablet_error.go | 185 -------------- .../tabletenv/tablet_error_test.go | 227 ------------------ go/vt/tabletserver/tabletenv/tabletenv.go | 11 + go/vt/tabletserver/tabletserver.go | 6 +- go/vt/tabletserver/twopc.go | 2 +- go/vt/tabletserver/tx_executor.go | 18 +- go/vt/tabletserver/tx_pool.go | 2 +- .../grpcthrottlerclient.go | 10 +- go/vt/vterrors/aggregate.go | 34 ++- go/vt/vterrors/aggregate_test.go | 16 +- go/vt/vterrors/doc.go | 33 +-- go/vt/vterrors/grpc.go | 49 +--- go/vt/vterrors/proto3.go | 14 +- go/vt/vterrors/proto3_test.go | 4 +- go/vt/vterrors/vterrors.go | 136 ++--------- go/vt/vtgate/buffer/buffer.go | 10 +- go/vt/vtgate/buffer/buffer_test.go | 8 +- go/vt/vtgate/buffer/shard_buffer.go | 2 +- go/vt/vtgate/gateway/discoverygateway.go | 15 +- go/vt/vtgate/gateway/discoverygateway_test.go | 6 +- go/vt/vtgate/gateway/shard_error.go | 6 +- go/vt/vtgate/grpcvtgateconn/conn.go | 70 +++--- go/vt/vtgate/grpcvtgateservice/server.go | 64 ++--- go/vt/vtgate/masterbuffer/masterbuffer.go | 6 +- go/vt/vtgate/resolver.go | 5 +- go/vt/vtgate/safe_session.go | 3 +- go/vt/vtgate/scatter_conn.go | 14 +- go/vt/vtgate/topo_utils.go | 32 +-- go/vt/vtgate/tx_conn.go | 12 +- go/vt/vtgate/tx_conn_test.go | 9 +- go/vt/vtgate/vtgate.go | 13 +- go/vt/vtgate/vtgate_test.go | 2 +- go/vt/vtgate/vtgateconntest/client.go | 6 +- go/vt/worker/command.go | 2 +- go/vt/worker/grpcvtworkerclient/client.go | 6 +- go/vt/worker/grpcvtworkerserver/server.go | 2 +- go/vt/worker/instance.go | 18 +- go/vt/worker/vtworkerclient/wrapper.go | 6 +- .../vtworkerclienttest/client_testsuite.go | 2 +- proto/vtrpc.proto | 4 +- py/vtdb/grpc_vtgate_client.py | 2 +- 57 files changed, 399 insertions(+), 996 deletions(-) create mode 100644 go/mysqlconn/constants_test.go delete mode 100644 go/vt/tabletserver/tabletenv/tablet_error.go delete mode 100644 go/vt/tabletserver/tabletenv/tablet_error_test.go diff --git a/go/cmd/vtcombo/tablet_map.go b/go/cmd/vtcombo/tablet_map.go index 16cfef1f68d..00744a116f3 100644 --- a/go/cmd/vtcombo/tablet_map.go +++ b/go/cmd/vtcombo/tablet_map.go @@ -282,7 +282,7 @@ func (itc *internalTabletConn) Execute(ctx context.Context, target *querypb.Targ } reply, err := itc.tablet.qsc.QueryService().Execute(ctx, target, query, bindVars, transactionID, options) if err != nil { - return nil, tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return nil, tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } return reply, nil } @@ -305,7 +305,7 @@ func (itc *internalTabletConn) ExecuteBatch(ctx context.Context, target *querypb } results, err := itc.tablet.qsc.QueryService().ExecuteBatch(ctx, target, q, asTransaction, transactionID, options) if err != nil { - return nil, tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return nil, tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } return results, nil } @@ -323,14 +323,14 @@ func (itc *internalTabletConn) StreamExecute(ctx context.Context, target *queryp } err = itc.tablet.qsc.QueryService().StreamExecute(ctx, target, query, bindVars, options, callback) - return tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // Begin is part of queryservice.QueryService func (itc *internalTabletConn) Begin(ctx context.Context, target *querypb.Target) (int64, error) { transactionID, err := itc.tablet.qsc.QueryService().Begin(ctx, target) if err != nil { - return 0, tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return 0, tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } return transactionID, nil } @@ -338,61 +338,61 @@ func (itc *internalTabletConn) Begin(ctx context.Context, target *querypb.Target // Commit is part of queryservice.QueryService func (itc *internalTabletConn) Commit(ctx context.Context, target *querypb.Target, transactionID int64) error { err := itc.tablet.qsc.QueryService().Commit(ctx, target, transactionID) - return tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // Rollback is part of queryservice.QueryService func (itc *internalTabletConn) Rollback(ctx context.Context, target *querypb.Target, transactionID int64) error { err := itc.tablet.qsc.QueryService().Rollback(ctx, target, transactionID) - return tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // Prepare is part of queryservice.QueryService func (itc *internalTabletConn) Prepare(ctx context.Context, target *querypb.Target, transactionID int64, dtid string) error { err := itc.tablet.qsc.QueryService().Prepare(ctx, target, transactionID, dtid) - return tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // CommitPrepared is part of queryservice.QueryService func (itc *internalTabletConn) CommitPrepared(ctx context.Context, target *querypb.Target, dtid string) error { err := itc.tablet.qsc.QueryService().CommitPrepared(ctx, target, dtid) - return tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // RollbackPrepared is part of queryservice.QueryService func (itc *internalTabletConn) RollbackPrepared(ctx context.Context, target *querypb.Target, dtid string, originalID int64) error { err := itc.tablet.qsc.QueryService().RollbackPrepared(ctx, target, dtid, originalID) - return tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // CreateTransaction is part of queryservice.QueryService func (itc *internalTabletConn) CreateTransaction(ctx context.Context, target *querypb.Target, dtid string, participants []*querypb.Target) error { err := itc.tablet.qsc.QueryService().CreateTransaction(ctx, target, dtid, participants) - return tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // StartCommit is part of queryservice.QueryService func (itc *internalTabletConn) StartCommit(ctx context.Context, target *querypb.Target, transactionID int64, dtid string) error { err := itc.tablet.qsc.QueryService().StartCommit(ctx, target, transactionID, dtid) - return tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // SetRollback is part of queryservice.QueryService func (itc *internalTabletConn) SetRollback(ctx context.Context, target *querypb.Target, dtid string, transactionID int64) error { err := itc.tablet.qsc.QueryService().SetRollback(ctx, target, dtid, transactionID) - return tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // ConcludeTransaction is part of queryservice.QueryService func (itc *internalTabletConn) ConcludeTransaction(ctx context.Context, target *querypb.Target, dtid string) error { err := itc.tablet.qsc.QueryService().ConcludeTransaction(ctx, target, dtid) - return tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // ReadTransaction is part of queryservice.QueryService func (itc *internalTabletConn) ReadTransaction(ctx context.Context, target *querypb.Target, dtid string) (metadata *querypb.TransactionMetadata, err error) { metadata, err = itc.tablet.qsc.QueryService().ReadTransaction(ctx, target, dtid) - return metadata, tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return metadata, tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // BeginExecute is part of queryservice.QueryService @@ -418,13 +418,13 @@ func (itc *internalTabletConn) BeginExecuteBatch(ctx context.Context, target *qu // MessageStream is part of queryservice.QueryService func (itc *internalTabletConn) MessageStream(ctx context.Context, target *querypb.Target, name string, callback func(*sqltypes.Result) error) error { err := itc.tablet.qsc.QueryService().MessageStream(ctx, target, name, callback) - return tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // MessageAck is part of queryservice.QueryService func (itc *internalTabletConn) MessageAck(ctx context.Context, target *querypb.Target, name string, ids []*querypb.Value) (int64, error) { count, err := itc.tablet.qsc.QueryService().MessageAck(ctx, target, name, ids) - return count, tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return count, tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // Handle panic is part of the QueryService interface. @@ -460,7 +460,7 @@ func (itc *internalTabletConn) SplitQuery( numRowsPerQueryPart, algorithm) if err != nil { - return nil, tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return nil, tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } return splits, nil } @@ -468,13 +468,13 @@ func (itc *internalTabletConn) SplitQuery( // StreamHealth is part of queryservice.QueryService func (itc *internalTabletConn) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { err := itc.tablet.qsc.QueryService().StreamHealth(ctx, callback) - return tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // UpdateStream is part of queryservice.QueryService. func (itc *internalTabletConn) UpdateStream(ctx context.Context, target *querypb.Target, position string, timestamp int64, callback func(*querypb.StreamEvent) error) error { err := itc.tablet.qsc.QueryService().UpdateStream(ctx, target, position, timestamp, callback) - return tabletconn.TabletErrorFromGRPC(vterrors.ToGRPCError(err)) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } // diff --git a/go/cmd/vtgateclienttest/goclienttest/errors.go b/go/cmd/vtgateclienttest/goclienttest/errors.go index 7682d4c70b6..125806fecb8 100644 --- a/go/cmd/vtgateclienttest/goclienttest/errors.go +++ b/go/cmd/vtgateclienttest/goclienttest/errors.go @@ -263,14 +263,7 @@ func checkError(t *testing.T, err error, query, errStr string, errCode vtrpcpb.C t.Errorf("[%v] expected error, got nil", query) return } - switch vtErr := err.(type) { - case *vterrors.VitessError: - if got, want := vtErr.VtErrorCode(), errCode; got != want { - t.Errorf("[%v] error code = %v, want %v", query, got, want) - } - default: - t.Errorf("[%v] unrecognized error type: %T, error: %#v", query, err, err) - return + if got, want := vterrors.Code(err), errCode; got != want { + t.Errorf("[%v] error code = %v, want %v", query, got, want) } - } diff --git a/go/cmd/vtgateclienttest/services/errors.go b/go/cmd/vtgateclienttest/services/errors.go index 511fd3dd310..fbe5d2eb7f2 100644 --- a/go/cmd/vtgateclienttest/services/errors.go +++ b/go/cmd/vtgateclienttest/services/errors.go @@ -5,8 +5,6 @@ package services import ( - "errors" - "fmt" "strings" "golang.org/x/net/context" @@ -80,56 +78,26 @@ func requestToPartialError(request string, session *vtgatepb.Session) error { func trimmedRequestToError(received string) error { switch received { case "bad input": - return vterrors.FromError( - vtrpcpb.Code_INVALID_ARGUMENT, - errors.New("vtgate test client forced error: bad input"), - ) + return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "vtgate test client forced error: bad input") case "deadline exceeded": - return vterrors.FromError( - vtrpcpb.Code_DEADLINE_EXCEEDED, - errors.New("vtgate test client forced error: deadline exceeded"), - ) + return vterrors.New(vtrpcpb.Code_DEADLINE_EXCEEDED, "vtgate test client forced error: deadline exceeded") case "integrity error": - return vterrors.FromError( - vtrpcpb.Code_ALREADY_EXISTS, - errors.New("vtgate test client forced error: integrity error (errno 1062) (sqlstate 23000)"), - ) + return vterrors.New(vtrpcpb.Code_ALREADY_EXISTS, "vtgate test client forced error: integrity error (errno 1062) (sqlstate 23000)") // request backlog and general throttling type errors case "transient error": - return vterrors.FromError( - vtrpcpb.Code_UNAVAILABLE, - errors.New("request_backlog: too many requests in flight: vtgate test client forced error: transient error"), - ) + return vterrors.New(vtrpcpb.Code_UNAVAILABLE, "request_backlog: too many requests in flight: vtgate test client forced error: transient error") case "throttled error": - return vterrors.FromError( - vtrpcpb.Code_UNAVAILABLE, - errors.New("request_backlog: exceeded XXX quota, rate limiting: vtgate test client forced error: transient error"), - ) + return vterrors.New(vtrpcpb.Code_UNAVAILABLE, "request_backlog: exceeded XXX quota, rate limiting: vtgate test client forced error: transient error") case "unauthenticated": - return vterrors.FromError( - vtrpcpb.Code_UNAUTHENTICATED, - errors.New("vtgate test client forced error: unauthenticated"), - ) + return vterrors.New(vtrpcpb.Code_UNAUTHENTICATED, "vtgate test client forced error: unauthenticated") case "aborted": - return vterrors.FromError( - vtrpcpb.Code_ABORTED, - errors.New("vtgate test client forced error: aborted"), - ) + return vterrors.New(vtrpcpb.Code_ABORTED, "vtgate test client forced error: aborted") case "query not served": - return vterrors.FromError( - vtrpcpb.Code_FAILED_PRECONDITION, - errors.New("vtgate test client forced error: query not served"), - ) + return vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "vtgate test client forced error: query not served") case "unknown error": - return vterrors.FromError( - vtrpcpb.Code_UNKNOWN, - errors.New("vtgate test client forced error: unknown error"), - ) + return vterrors.New(vtrpcpb.Code_UNKNOWN, "vtgate test client forced error: unknown error") default: - return vterrors.FromError( - vtrpcpb.Code_UNKNOWN, - fmt.Errorf("vtgate test client error request unrecognized: %v", received), - ) + return vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "vtgate test client error request unrecognized: %v", received) } } diff --git a/go/mysqlconn/constants_test.go b/go/mysqlconn/constants_test.go new file mode 100644 index 00000000000..b7f474bd076 --- /dev/null +++ b/go/mysqlconn/constants_test.go @@ -0,0 +1,32 @@ +package mysqlconn + +import ( + "errors" + "testing" + + "github.com/youtube/vitess/go/sqldb" +) + +func TestIsConnErr(t *testing.T) { + testcases := []struct { + in error + want bool + }{{ + in: errors.New("t"), + }, { + in: sqldb.NewSQLError(5, "", ""), + }, { + in: sqldb.NewSQLError(CRServerGone, "", ""), + want: true, + }, { + in: sqldb.NewSQLError(CRServerLost, "", ""), + }, { + in: sqldb.NewSQLError(CRCantReadCharset, "", ""), + }} + for _, tcase := range testcases { + got := IsConnErr(tcase.in) + if got != tcase.want { + t.Errorf("IsConnErr(%#v): %v, want %v", tcase.in, got, tcase.want) + } + } +} diff --git a/go/sqltypes/proto3.go b/go/sqltypes/proto3.go index fa4f6139d61..146692061a5 100644 --- a/go/sqltypes/proto3.go +++ b/go/sqltypes/proto3.go @@ -136,7 +136,7 @@ func QueryResponsesToProto3(qr []QueryResponse) []*querypb.ResultWithError { for i, q := range qr { result[i] = &querypb.ResultWithError{ Result: ResultToProto3(q.QueryResult), - Error: vterrors.VtRPCErrorFromVtError(q.QueryError), + Error: vterrors.ToVTRPC(q.QueryError), } } return result @@ -151,7 +151,7 @@ func Proto3ToQueryReponses(qr []*querypb.ResultWithError) []QueryResponse { for i, q := range qr { result[i] = QueryResponse{ QueryResult: Proto3ToResult(q.Result), - QueryError: vterrors.FromVtRPCError(q.Error), + QueryError: vterrors.FromVTRPC(q.Error), } } return result diff --git a/go/sqltypes/proto3_test.go b/go/sqltypes/proto3_test.go index 14702e30434..e09e4c2b5da 100644 --- a/go/sqltypes/proto3_test.go +++ b/go/sqltypes/proto3_test.go @@ -8,8 +8,6 @@ import ( "reflect" "testing" - "errors" - querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" "github.com/youtube/vitess/go/vt/vterrors" @@ -244,7 +242,7 @@ func TestQueryReponses(t *testing.T) { QueryError: nil, }, { QueryResult: nil, - QueryError: vterrors.FromError(vtrpcpb.Code_DEADLINE_EXCEEDED, errors.New("deadline exceeded")), + QueryError: vterrors.New(vtrpcpb.Code_DEADLINE_EXCEEDED, "deadline exceeded"), }, } diff --git a/go/vt/dtids/dtids.go b/go/vt/dtids/dtids.go index 7a602a1a373..03aa48c82cb 100644 --- a/go/vt/dtids/dtids.go +++ b/go/vt/dtids/dtids.go @@ -27,7 +27,7 @@ func New(mmShard *vtgatepb.Session_ShardSession) string { func ShardSession(dtid string) (*vtgatepb.Session_ShardSession, error) { splits := strings.Split(dtid, ":") if len(splits) != 3 { - return nil, vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("invalid parts in dtid: %s", dtid)) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid parts in dtid: %s", dtid) } target := &querypb.Target{ Keyspace: splits[0], @@ -36,7 +36,7 @@ func ShardSession(dtid string) (*vtgatepb.Session_ShardSession, error) { } txid, err := strconv.ParseInt(splits[2], 10, 0) if err != nil { - return nil, vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("invalid transaction id in dtid: %s", dtid)) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid transaction id in dtid: %s", dtid) } return &vtgatepb.Session_ShardSession{ Target: target, @@ -48,11 +48,11 @@ func ShardSession(dtid string) (*vtgatepb.Session_ShardSession, error) { func TransactionID(dtid string) (int64, error) { splits := strings.Split(dtid, ":") if len(splits) != 3 { - return 0, vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("invalid parts in dtid: %s", dtid)) + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid parts in dtid: %s", dtid) } txid, err := strconv.ParseInt(splits[2], 10, 0) if err != nil { - return 0, vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("invalid transaction id in dtid: %s", dtid)) + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid transaction id in dtid: %s", dtid) } return txid, nil } diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go index db085834fea..c560989b2a6 100644 --- a/go/vt/proto/vtrpc/vtrpc.pb.go +++ b/go/vt/proto/vtrpc/vtrpc.pb.go @@ -29,7 +29,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -// Code represnts canonical error codes. The names, numbers and comments +// Code represents canonical error codes. The names, numbers and comments // must match the ones defined by grpc: // https://godoc.org/google.golang.org/grpc/codes. type Code int32 @@ -185,7 +185,7 @@ func (Code) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} // that we may end up with a different list of canonical error codes // than the ones defined by grpc. In hindisght, we realize that // the grpc error codes are fairly generic and mostly sufficient. -// In order to avoid confusion, thie type will be deprecated in +// In order to avoid confusion, this type will be deprecated in // favor of the new Code that matches exactly what grpc defines. // Some names below have a _LEGACY suffix. This is to prevent // name collisions with Code. diff --git a/go/vt/tabletmanager/grpctmserver/server.go b/go/vt/tabletmanager/grpctmserver/server.go index c4000988750..b530c8c888c 100644 --- a/go/vt/tabletmanager/grpctmserver/server.go +++ b/go/vt/tabletmanager/grpctmserver/server.go @@ -172,7 +172,7 @@ func (s *server) ExecuteFetchAsDba(ctx context.Context, request *tabletmanagerda response = &tabletmanagerdatapb.ExecuteFetchAsDbaResponse{} qr, err := s.agent.ExecuteFetchAsDba(ctx, request.Query, request.DbName, int(request.MaxRows), request.DisableBinlogs, request.ReloadSchema) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } response.Result = qr return response, nil @@ -184,7 +184,7 @@ func (s *server) ExecuteFetchAsAllPrivs(ctx context.Context, request *tabletmana response = &tabletmanagerdatapb.ExecuteFetchAsAllPrivsResponse{} qr, err := s.agent.ExecuteFetchAsAllPrivs(ctx, request.Query, request.DbName, int(request.MaxRows), request.ReloadSchema) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } response.Result = qr return response, nil @@ -196,7 +196,7 @@ func (s *server) ExecuteFetchAsApp(ctx context.Context, request *tabletmanagerda response = &tabletmanagerdatapb.ExecuteFetchAsAppResponse{} qr, err := s.agent.ExecuteFetchAsApp(ctx, request.Query, int(request.MaxRows)) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } response.Result = qr return response, nil diff --git a/go/vt/tabletserver/connpool/pool.go b/go/vt/tabletserver/connpool/pool.go index 37369f74fbf..84c99e8b712 100644 --- a/go/vt/tabletserver/connpool/pool.go +++ b/go/vt/tabletserver/connpool/pool.go @@ -12,10 +12,15 @@ import ( "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/vt/dbconnpool" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vterrors" "golang.org/x/net/context" ) +// ErrConnPoolClosed is returned when the connection pool is closed. +var ErrConnPoolClosed = vterrors.New(vtrpcpb.Code_UNAVAILABLE, "connection pool is closed") + // usedNames is for preventing expvar from panicking. Tests // create pool objects multiple time. If a name was previously // used, expvar initialization is skipped. @@ -108,7 +113,7 @@ func (cp *Pool) Close() { func (cp *Pool) Get(ctx context.Context) (*DBConn, error) { p := cp.pool() if p == nil { - return nil, tabletenv.ErrConnPoolClosed + return nil, ErrConnPoolClosed } r, err := p.Get(ctx) if err != nil { @@ -121,7 +126,7 @@ func (cp *Pool) Get(ctx context.Context) (*DBConn, error) { func (cp *Pool) Put(conn *DBConn) { p := cp.pool() if p == nil { - panic(tabletenv.ErrConnPoolClosed) + panic(ErrConnPoolClosed) } if conn == nil { p.Put(nil) diff --git a/go/vt/tabletserver/grpcqueryservice/server.go b/go/vt/tabletserver/grpcqueryservice/server.go index 05217d6ce9f..92c6a4ab113 100644 --- a/go/vt/tabletserver/grpcqueryservice/server.go +++ b/go/vt/tabletserver/grpcqueryservice/server.go @@ -34,11 +34,11 @@ func (q *query) Execute(ctx context.Context, request *querypb.ExecuteRequest) (r ) bv, err := querytypes.Proto3ToBindVariables(request.Query.BindVariables) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } result, err := q.server.Execute(ctx, request.Target, request.Query.Sql, bv, request.TransactionId, request.Options) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.ExecuteResponse{ Result: sqltypes.ResultToProto3(result), @@ -54,11 +54,11 @@ func (q *query) ExecuteBatch(ctx context.Context, request *querypb.ExecuteBatchR ) bql, err := querytypes.Proto3ToBoundQueryList(request.Queries) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } results, err := q.server.ExecuteBatch(ctx, request.Target, bql, request.AsTransaction, request.TransactionId, request.Options) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.ExecuteBatchResponse{ Results: sqltypes.ResultsToProto3(results), @@ -74,14 +74,14 @@ func (q *query) StreamExecute(request *querypb.StreamExecuteRequest, stream quer ) bv, err := querytypes.Proto3ToBindVariables(request.Query.BindVariables) if err != nil { - return vterrors.ToGRPCError(err) + return vterrors.ToGRPC(err) } if err := q.server.StreamExecute(ctx, request.Target, request.Query.Sql, bv, request.Options, func(reply *sqltypes.Result) error { return stream.Send(&querypb.StreamExecuteResponse{ Result: sqltypes.ResultToProto3(reply), }) }); err != nil { - return vterrors.ToGRPCError(err) + return vterrors.ToGRPC(err) } return nil } @@ -95,7 +95,7 @@ func (q *query) Begin(ctx context.Context, request *querypb.BeginRequest) (respo ) transactionID, err := q.server.Begin(ctx, request.Target) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.BeginResponse{ @@ -111,7 +111,7 @@ func (q *query) Commit(ctx context.Context, request *querypb.CommitRequest) (res request.ImmediateCallerId, ) if err := q.server.Commit(ctx, request.Target, request.TransactionId); err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.CommitResponse{}, nil } @@ -124,7 +124,7 @@ func (q *query) Rollback(ctx context.Context, request *querypb.RollbackRequest) request.ImmediateCallerId, ) if err := q.server.Rollback(ctx, request.Target, request.TransactionId); err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.RollbackResponse{}, nil @@ -138,7 +138,7 @@ func (q *query) Prepare(ctx context.Context, request *querypb.PrepareRequest) (r request.ImmediateCallerId, ) if err := q.server.Prepare(ctx, request.Target, request.TransactionId, request.Dtid); err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.PrepareResponse{}, nil @@ -152,7 +152,7 @@ func (q *query) CommitPrepared(ctx context.Context, request *querypb.CommitPrepa request.ImmediateCallerId, ) if err := q.server.CommitPrepared(ctx, request.Target, request.Dtid); err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.CommitPreparedResponse{}, nil @@ -166,7 +166,7 @@ func (q *query) RollbackPrepared(ctx context.Context, request *querypb.RollbackP request.ImmediateCallerId, ) if err := q.server.RollbackPrepared(ctx, request.Target, request.Dtid, request.TransactionId); err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.RollbackPreparedResponse{}, nil @@ -180,7 +180,7 @@ func (q *query) CreateTransaction(ctx context.Context, request *querypb.CreateTr request.ImmediateCallerId, ) if err := q.server.CreateTransaction(ctx, request.Target, request.Dtid, request.Participants); err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.CreateTransactionResponse{}, nil @@ -194,7 +194,7 @@ func (q *query) StartCommit(ctx context.Context, request *querypb.StartCommitReq request.ImmediateCallerId, ) if err := q.server.StartCommit(ctx, request.Target, request.TransactionId, request.Dtid); err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.StartCommitResponse{}, nil @@ -208,7 +208,7 @@ func (q *query) SetRollback(ctx context.Context, request *querypb.SetRollbackReq request.ImmediateCallerId, ) if err := q.server.SetRollback(ctx, request.Target, request.Dtid, request.TransactionId); err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.SetRollbackResponse{}, nil @@ -222,7 +222,7 @@ func (q *query) ConcludeTransaction(ctx context.Context, request *querypb.Conclu request.ImmediateCallerId, ) if err := q.server.ConcludeTransaction(ctx, request.Target, request.Dtid); err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.ConcludeTransactionResponse{}, nil @@ -237,7 +237,7 @@ func (q *query) ReadTransaction(ctx context.Context, request *querypb.ReadTransa ) result, err := q.server.ReadTransaction(ctx, request.Target, request.Dtid) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.ReadTransactionResponse{Metadata: result}, nil @@ -252,7 +252,7 @@ func (q *query) BeginExecute(ctx context.Context, request *querypb.BeginExecuteR ) bv, err := querytypes.Proto3ToBindVariables(request.Query.BindVariables) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } result, transactionID, err := q.server.BeginExecute(ctx, request.Target, request.Query.Sql, bv, request.Options) @@ -260,11 +260,11 @@ func (q *query) BeginExecute(ctx context.Context, request *querypb.BeginExecuteR // if we have a valid transactionID, return the error in-band if transactionID != 0 { return &querypb.BeginExecuteResponse{ - Error: vterrors.VtRPCErrorFromVtError(err), + Error: vterrors.ToVTRPC(err), TransactionId: transactionID, }, nil } - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.BeginExecuteResponse{ Result: sqltypes.ResultToProto3(result), @@ -281,7 +281,7 @@ func (q *query) BeginExecuteBatch(ctx context.Context, request *querypb.BeginExe ) bql, err := querytypes.Proto3ToBoundQueryList(request.Queries) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } results, transactionID, err := q.server.BeginExecuteBatch(ctx, request.Target, bql, request.AsTransaction, request.Options) @@ -289,11 +289,11 @@ func (q *query) BeginExecuteBatch(ctx context.Context, request *querypb.BeginExe // if we have a valid transactionID, return the error in-band if transactionID != 0 { return &querypb.BeginExecuteBatchResponse{ - Error: vterrors.VtRPCErrorFromVtError(err), + Error: vterrors.ToVTRPC(err), TransactionId: transactionID, }, nil } - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.BeginExecuteBatchResponse{ Results: sqltypes.ResultsToProto3(results), @@ -313,7 +313,7 @@ func (q *query) MessageStream(request *querypb.MessageStreamRequest, stream quer Result: sqltypes.ResultToProto3(qr), }) }); err != nil { - return vterrors.ToGRPCError(err) + return vterrors.ToGRPC(err) } return nil } @@ -327,7 +327,7 @@ func (q *query) MessageAck(ctx context.Context, request *querypb.MessageAckReque ) count, err := q.server.MessageAck(ctx, request.Target, request.Name, request.Ids) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.MessageAckResponse{ Result: &querypb.QueryResult{ @@ -346,7 +346,7 @@ func (q *query) SplitQuery(ctx context.Context, request *querypb.SplitQueryReque bq, err := querytypes.Proto3ToBoundQuery(request.Query) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } splits := []querytypes.QuerySplit{} splits, err = q.server.SplitQuery( @@ -358,11 +358,11 @@ func (q *query) SplitQuery(ctx context.Context, request *querypb.SplitQueryReque request.NumRowsPerQueryPart, request.Algorithm) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } qs, err := querytypes.QuerySplitsToProto3(splits) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } return &querypb.SplitQueryResponse{Queries: qs}, nil } @@ -371,7 +371,7 @@ func (q *query) SplitQuery(ctx context.Context, request *querypb.SplitQueryReque func (q *query) StreamHealth(request *querypb.StreamHealthRequest, stream queryservicepb.Query_StreamHealthServer) (err error) { defer q.server.HandlePanic(&err) if err = q.server.StreamHealth(stream.Context(), stream.Send); err != nil { - return vterrors.ToGRPCError(err) + return vterrors.ToGRPC(err) } return nil } @@ -388,7 +388,7 @@ func (q *query) UpdateStream(request *querypb.UpdateStreamRequest, stream querys Event: reply, }) }); err != nil { - return vterrors.ToGRPCError(err) + return vterrors.ToGRPC(err) } return nil } diff --git a/go/vt/tabletserver/grpctabletconn/conn.go b/go/vt/tabletserver/grpctabletconn/conn.go index a9d240a3c5d..44ca989b4de 100644 --- a/go/vt/tabletserver/grpctabletconn/conn.go +++ b/go/vt/tabletserver/grpctabletconn/conn.go @@ -104,7 +104,7 @@ func (conn *gRPCQueryClient) Execute(ctx context.Context, target *querypb.Target } er, err := conn.c.Execute(ctx, req) if err != nil { - return nil, tabletconn.TabletErrorFromGRPC(err) + return nil, tabletconn.ErrorFromGRPC(err) } return sqltypes.Proto3ToResult(er.Result), nil } @@ -135,7 +135,7 @@ func (conn *gRPCQueryClient) ExecuteBatch(ctx context.Context, target *querypb.T } ebr, err := conn.c.ExecuteBatch(ctx, req) if err != nil { - return nil, tabletconn.TabletErrorFromGRPC(err) + return nil, tabletconn.ErrorFromGRPC(err) } return sqltypes.Proto3ToResults(ebr.Results), nil } @@ -173,7 +173,7 @@ func (conn *gRPCQueryClient) StreamExecute(ctx context.Context, target *querypb. } stream, err := conn.c.StreamExecute(ctx, req) if err != nil { - return nil, tabletconn.TabletErrorFromGRPC(err) + return nil, tabletconn.ErrorFromGRPC(err) } return stream, nil }() @@ -184,7 +184,7 @@ func (conn *gRPCQueryClient) StreamExecute(ctx context.Context, target *querypb. for { ser, err := stream.Recv() if err != nil { - return tabletconn.TabletErrorFromGRPC(err) + return tabletconn.ErrorFromGRPC(err) } if fields == nil { fields = ser.Result.Fields @@ -213,7 +213,7 @@ func (conn *gRPCQueryClient) Begin(ctx context.Context, target *querypb.Target) } br, err := conn.c.Begin(ctx, req) if err != nil { - return 0, tabletconn.TabletErrorFromGRPC(err) + return 0, tabletconn.ErrorFromGRPC(err) } return br.TransactionId, nil } @@ -234,7 +234,7 @@ func (conn *gRPCQueryClient) Commit(ctx context.Context, target *querypb.Target, } _, err := conn.c.Commit(ctx, req) if err != nil { - return tabletconn.TabletErrorFromGRPC(err) + return tabletconn.ErrorFromGRPC(err) } return nil } @@ -255,7 +255,7 @@ func (conn *gRPCQueryClient) Rollback(ctx context.Context, target *querypb.Targe } _, err := conn.c.Rollback(ctx, req) if err != nil { - return tabletconn.TabletErrorFromGRPC(err) + return tabletconn.ErrorFromGRPC(err) } return nil } @@ -277,7 +277,7 @@ func (conn *gRPCQueryClient) Prepare(ctx context.Context, target *querypb.Target } _, err := conn.c.Prepare(ctx, req) if err != nil { - return tabletconn.TabletErrorFromGRPC(err) + return tabletconn.ErrorFromGRPC(err) } return nil } @@ -298,7 +298,7 @@ func (conn *gRPCQueryClient) CommitPrepared(ctx context.Context, target *querypb } _, err := conn.c.CommitPrepared(ctx, req) if err != nil { - return tabletconn.TabletErrorFromGRPC(err) + return tabletconn.ErrorFromGRPC(err) } return nil } @@ -320,7 +320,7 @@ func (conn *gRPCQueryClient) RollbackPrepared(ctx context.Context, target *query } _, err := conn.c.RollbackPrepared(ctx, req) if err != nil { - return tabletconn.TabletErrorFromGRPC(err) + return tabletconn.ErrorFromGRPC(err) } return nil } @@ -342,7 +342,7 @@ func (conn *gRPCQueryClient) CreateTransaction(ctx context.Context, target *quer } _, err := conn.c.CreateTransaction(ctx, req) if err != nil { - return tabletconn.TabletErrorFromGRPC(err) + return tabletconn.ErrorFromGRPC(err) } return nil } @@ -365,7 +365,7 @@ func (conn *gRPCQueryClient) StartCommit(ctx context.Context, target *querypb.Ta } _, err := conn.c.StartCommit(ctx, req) if err != nil { - return tabletconn.TabletErrorFromGRPC(err) + return tabletconn.ErrorFromGRPC(err) } return nil } @@ -388,7 +388,7 @@ func (conn *gRPCQueryClient) SetRollback(ctx context.Context, target *querypb.Ta } _, err := conn.c.SetRollback(ctx, req) if err != nil { - return tabletconn.TabletErrorFromGRPC(err) + return tabletconn.ErrorFromGRPC(err) } return nil } @@ -410,7 +410,7 @@ func (conn *gRPCQueryClient) ConcludeTransaction(ctx context.Context, target *qu } _, err := conn.c.ConcludeTransaction(ctx, req) if err != nil { - return tabletconn.TabletErrorFromGRPC(err) + return tabletconn.ErrorFromGRPC(err) } return nil } @@ -431,7 +431,7 @@ func (conn *gRPCQueryClient) ReadTransaction(ctx context.Context, target *queryp } response, err := conn.c.ReadTransaction(ctx, req) if err != nil { - return nil, tabletconn.TabletErrorFromGRPC(err) + return nil, tabletconn.ErrorFromGRPC(err) } return response.Metadata, nil } @@ -458,10 +458,10 @@ func (conn *gRPCQueryClient) BeginExecute(ctx context.Context, target *querypb.T } reply, err := conn.c.BeginExecute(ctx, req) if err != nil { - return nil, 0, tabletconn.TabletErrorFromGRPC(err) + return nil, 0, tabletconn.ErrorFromGRPC(err) } if reply.Error != nil { - return nil, reply.TransactionId, tabletconn.TabletErrorFromRPCError(reply.Error) + return nil, reply.TransactionId, tabletconn.ErrorFromVTRPC(reply.Error) } return sqltypes.Proto3ToResult(reply.Result), reply.TransactionId, nil } @@ -492,10 +492,10 @@ func (conn *gRPCQueryClient) BeginExecuteBatch(ctx context.Context, target *quer reply, err := conn.c.BeginExecuteBatch(ctx, req) if err != nil { - return nil, 0, tabletconn.TabletErrorFromGRPC(err) + return nil, 0, tabletconn.ErrorFromGRPC(err) } if reply.Error != nil { - return nil, reply.TransactionId, tabletconn.TabletErrorFromRPCError(reply.Error) + return nil, reply.TransactionId, tabletconn.ErrorFromVTRPC(reply.Error) } return sqltypes.Proto3ToResults(reply.Results), reply.TransactionId, nil } @@ -521,7 +521,7 @@ func (conn *gRPCQueryClient) MessageStream(ctx context.Context, target *querypb. } stream, err := conn.c.MessageStream(ctx, req) if err != nil { - return nil, tabletconn.TabletErrorFromGRPC(err) + return nil, tabletconn.ErrorFromGRPC(err) } return stream, nil }() @@ -532,7 +532,7 @@ func (conn *gRPCQueryClient) MessageStream(ctx context.Context, target *querypb. for { msr, err := stream.Recv() if err != nil { - return tabletconn.TabletErrorFromGRPC(err) + return tabletconn.ErrorFromGRPC(err) } if fields == nil { fields = msr.Result.Fields @@ -562,7 +562,7 @@ func (conn *gRPCQueryClient) MessageAck(ctx context.Context, target *querypb.Tar } reply, err := conn.c.MessageAck(ctx, req) if err != nil { - return 0, tabletconn.TabletErrorFromGRPC(err) + return 0, tabletconn.ErrorFromGRPC(err) } return int64(reply.Result.RowsAffected), nil } @@ -586,7 +586,7 @@ func (conn *gRPCQueryClient) SplitQuery( q, err := querytypes.BoundQueryToProto3(query.Sql, query.BindVariables) if err != nil { - return nil, tabletconn.TabletErrorFromGRPC(err) + return nil, tabletconn.ErrorFromGRPC(err) } req := &querypb.SplitQueryRequest{ Target: target, @@ -600,11 +600,11 @@ func (conn *gRPCQueryClient) SplitQuery( } sqr, err := conn.c.SplitQuery(ctx, req) if err != nil { - return nil, tabletconn.TabletErrorFromGRPC(err) + return nil, tabletconn.ErrorFromGRPC(err) } split, err := querytypes.Proto3ToQuerySplits(sqr.Queries) if err != nil { - return nil, tabletconn.TabletErrorFromGRPC(err) + return nil, tabletconn.ErrorFromGRPC(err) } return split, nil } @@ -624,7 +624,7 @@ func (conn *gRPCQueryClient) StreamHealth(ctx context.Context, callback func(*qu stream, err := conn.c.StreamHealth(ctx, &querypb.StreamHealthRequest{}) if err != nil { - return nil, tabletconn.TabletErrorFromGRPC(err) + return nil, tabletconn.ErrorFromGRPC(err) } return stream, nil }() @@ -634,7 +634,7 @@ func (conn *gRPCQueryClient) StreamHealth(ctx context.Context, callback func(*qu for { shr, err := stream.Recv() if err != nil { - return tabletconn.TabletErrorFromGRPC(err) + return tabletconn.ErrorFromGRPC(err) } if err := callback(shr); err != nil { if err == nil || err == io.EOF { @@ -667,7 +667,7 @@ func (conn *gRPCQueryClient) UpdateStream(ctx context.Context, target *querypb.T } stream, err := conn.c.UpdateStream(ctx, req) if err != nil { - return nil, tabletconn.TabletErrorFromGRPC(err) + return nil, tabletconn.ErrorFromGRPC(err) } return stream, nil }() @@ -677,7 +677,7 @@ func (conn *gRPCQueryClient) UpdateStream(ctx context.Context, target *querypb.T for { r, err := stream.Recv() if err != nil { - return tabletconn.TabletErrorFromGRPC(err) + return tabletconn.ErrorFromGRPC(err) } if err := callback(r.Event); err != nil { if err == nil || err == io.EOF { diff --git a/go/vt/tabletserver/query_executor.go b/go/vt/tabletserver/query_executor.go index a176c3cd1e2..ab8179672d3 100644 --- a/go/vt/tabletserver/query_executor.go +++ b/go/vt/tabletserver/query_executor.go @@ -599,7 +599,7 @@ func (qre *QueryExecutor) getConn(pool *connpool.Pool) (*connpool.DBConn, error) case nil: qre.logStats.WaitingForConnection += time.Now().Sub(start) return conn, nil - case tabletenv.ErrConnPoolClosed: + case connpool.ErrConnPoolClosed: return nil, err } return nil, err diff --git a/go/vt/tabletserver/querytypes/bound_query.go b/go/vt/tabletserver/querytypes/bound_query.go index c687658c440..92e57d90b64 100644 --- a/go/vt/tabletserver/querytypes/bound_query.go +++ b/go/vt/tabletserver/querytypes/bound_query.go @@ -7,9 +7,8 @@ package querytypes import ( + "bytes" "fmt" - - "github.com/youtube/vitess/go/bytes2" ) // This file defines the BoundQuery type. @@ -34,14 +33,14 @@ type BoundQuery struct { // QueryAsString prints a readable version of query+bind variables, // and also truncates data if it's too long func QueryAsString(sql string, bindVariables map[string]interface{}) string { - buf := bytes2.NewChunkedWriter(1024) - fmt.Fprintf(buf, "Sql: %#v, BindVars: {", sql) + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "Sql: %q, BindVars: {", slimit(sql, 5000)) for k, v := range bindVariables { switch val := v.(type) { case []byte: - fmt.Fprintf(buf, "%s: %#v, ", k, slimit(string(val))) + fmt.Fprintf(buf, "%s: %q, ", k, slimit(string(val), 256)) case string: - fmt.Fprintf(buf, "%s: %#v, ", k, slimit(val)) + fmt.Fprintf(buf, "%s: %q, ", k, slimit(val, 256)) default: fmt.Fprintf(buf, "%s: %v, ", k, v) } @@ -50,10 +49,9 @@ func QueryAsString(sql string, bindVariables map[string]interface{}) string { return string(buf.Bytes()) } -func slimit(s string) string { - l := len(s) - if l > 256 { - l = 256 +func slimit(s string, max int) string { + if l := len(s); l > max { + return s[:max] } - return s[:l] + return s } diff --git a/go/vt/tabletserver/tabletconn/grpc_error.go b/go/vt/tabletserver/tabletconn/grpc_error.go index 78ce341ef64..98764523df0 100644 --- a/go/vt/tabletserver/tabletconn/grpc_error.go +++ b/go/vt/tabletserver/tabletconn/grpc_error.go @@ -9,18 +9,19 @@ import ( vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) -// TabletErrorFromGRPC returns a ServerError or a -// OperationalError from the gRPC error. -func TabletErrorFromGRPC(err error) error { +// ErrorFromGRPC converts a GRPC error to vtError for +// tabletserver calls. +func ErrorFromGRPC(err error) error { // io.EOF is end of stream. Don't treat it as an error. if err == nil || err == io.EOF { return nil } - return vterrors.New(vterrors.GRPCToCode(grpc.Code(err)), "vttablet: "+err.Error()) + return vterrors.New(vtrpcpb.Code(grpc.Code(err)), "vttablet: "+err.Error()) } -// TabletErrorFromRPCError returns a ServerError from a vtrpcpb.ServerError -func TabletErrorFromRPCError(err *vtrpcpb.RPCError) error { +// ErrorFromVTRPC converts a *vtrpcpb.RPCError to vtError for +// tabletserver calls. +func ErrorFromVTRPC(err *vtrpcpb.RPCError) error { if err == nil { return nil } diff --git a/go/vt/tabletserver/tabletconn/grpc_error_test.go b/go/vt/tabletserver/tabletconn/grpc_error_test.go index 82498ac9ade..3369ce49200 100644 --- a/go/vt/tabletserver/tabletconn/grpc_error_test.go +++ b/go/vt/tabletserver/tabletconn/grpc_error_test.go @@ -36,7 +36,7 @@ func TestTabletErrorFromRPCError(t *testing.T) { want: vtrpcpb.Code_INVALID_ARGUMENT, }} for _, tcase := range testcases { - got := vterrors.Code(TabletErrorFromRPCError(tcase.in)) + got := vterrors.Code(ErrorFromVTRPC(tcase.in)) if got != tcase.want { t.Errorf("FromVtRPCError(%v):\n%v, want\n%v", tcase.in, got, tcase.want) } diff --git a/go/vt/tabletserver/tabletenv/tablet_error.go b/go/vt/tabletserver/tabletenv/tablet_error.go deleted file mode 100644 index 940e43951af..00000000000 --- a/go/vt/tabletserver/tabletenv/tablet_error.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2012, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tabletenv - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - log "github.com/golang/glog" - - "github.com/youtube/vitess/go/mysqlconn" - "github.com/youtube/vitess/go/sqldb" - "github.com/youtube/vitess/go/tb" - "github.com/youtube/vitess/go/vt/vterrors" - - vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" -) - -const ( - maxErrLen = 5000 -) - -// ErrConnPoolClosed is returned when the connection pool is closed. -var ErrConnPoolClosed = vterrors.New(vtrpcpb.Code_UNAVAILABLE, "connection pool is closed") - -// TabletError is the error type we use in this library. -// It implements vterrors.VtError interface. -type TabletError struct { - Message string - SQLError int - SQLState string - // Code will be used to transmit the error across RPC boundaries - Code vtrpcpb.Code -} - -// NewTabletError returns a TabletError of the given type -func NewTabletError(errCode vtrpcpb.Code, format string, args ...interface{}) *TabletError { - return &TabletError{ - Message: printable(fmt.Sprintf(format, args...)), - Code: errCode, - } -} - -// NewTabletErrorSQL returns a TabletError based on the error -func NewTabletErrorSQL(errCode vtrpcpb.Code, err error) *TabletError { - var errnum int - errstr := err.Error() - sqlState := sqldb.SQLStateGeneral - if sqlErr, ok := err.(*sqldb.SQLError); ok { - errnum = sqlErr.Number() - sqlState = sqlErr.SQLState() - switch errnum { - case mysqlconn.EROptionPreventsStatement: - // Override error type if MySQL is in read-only mode. It's probably because - // there was a remaster and there are old clients still connected. - if strings.Contains(errstr, "read-only") { - errCode = vtrpcpb.Code_FAILED_PRECONDITION - } - case mysqlconn.ERDupEntry: - errCode = vtrpcpb.Code_ALREADY_EXISTS - case mysqlconn.ERDataTooLong, mysqlconn.ERDataOutOfRange: - errCode = vtrpcpb.Code_INVALID_ARGUMENT - default: - } - } - return &TabletError{ - Message: printable(errstr), - SQLError: errnum, - SQLState: sqlState, - Code: errCode, - } -} - -// PrefixTabletError attempts to add a string prefix to a TabletError, -// while preserving its Code. If the given error is not a -// TabletError, a new TabletError is returned with the desired Code. -func PrefixTabletError(errCode vtrpcpb.Code, err error, prefix string) error { - if terr, ok := err.(*TabletError); ok { - return NewTabletError(terr.Code, "%s%s", prefix, terr.Message) - } - return NewTabletError(errCode, "%s%s", prefix, err) -} - -func printable(in string) string { - if len(in) > maxErrLen { - in = in[:maxErrLen] - } - in = fmt.Sprintf("%q", in) - return in[1 : len(in)-1] -} - -var errExtract = regexp.MustCompile(`.*\(errno ([0-9]*)\).*`) - -// IsConnErr returns true if the error is a connection error. If -// the error is of type TabletError or hasNumber, it checks the error -// code. Otherwise, it parses the string looking for (errno xxxx) -// and uses the extracted value to determine if it's a conn error. -func IsConnErr(err error) bool { - var sqlError int - switch err := err.(type) { - case *TabletError: - sqlError = err.SQLError - case *sqldb.SQLError: - sqlError = err.Number() - default: - match := errExtract.FindStringSubmatch(err.Error()) - if len(match) < 2 { - return false - } - var convErr error - sqlError, convErr = strconv.Atoi(match[1]) - if convErr != nil { - return false - } - } - // CRServerLost means that someone sniped the query. - if sqlError == mysqlconn.CRServerLost { - return false - } - return sqlError >= 2000 && sqlError <= 2018 -} - -func (te *TabletError) Error() string { - return te.Prefix() + te.Message -} - -// VtErrorCode returns the underlying Vitess error code -func (te *TabletError) VtErrorCode() vtrpcpb.Code { - return te.Code -} - -// Prefix returns the prefix for the error, like error, fatal, etc. -func (te *TabletError) Prefix() string { - prefix := "error: " - switch te.Code { - case vtrpcpb.Code_FAILED_PRECONDITION: - prefix = "retry: " - case vtrpcpb.Code_INTERNAL: - prefix = "fatal: " - case vtrpcpb.Code_RESOURCE_EXHAUSTED: - prefix = "tx_pool_full: " - case vtrpcpb.Code_ABORTED: - prefix = "not_in_tx: " - } - // Special case for killed queries. - if te.SQLError == mysqlconn.CRServerLost { - prefix = prefix + "the query was killed either because it timed out or was canceled: " - } - return prefix -} - -// RecordStats will record the error in the proper stat bucket -func (te *TabletError) RecordStats() { - switch te.Code { - case vtrpcpb.Code_FAILED_PRECONDITION: - InfoErrors.Add("Retry", 1) - case vtrpcpb.Code_INTERNAL: - ErrorStats.Add("Fatal", 1) - case vtrpcpb.Code_RESOURCE_EXHAUSTED: - ErrorStats.Add("TxPoolFull", 1) - case vtrpcpb.Code_ABORTED: - ErrorStats.Add("NotInTx", 1) - default: - switch te.SQLError { - case mysqlconn.ERDupEntry: - InfoErrors.Add("DupKey", 1) - case mysqlconn.ERLockWaitTimeout, mysqlconn.ERLockDeadlock: - ErrorStats.Add("Deadlock", 1) - default: - ErrorStats.Add("Fail", 1) - } - } -} - -// LogError logs panics and increments InternalErrors. -func LogError() { - if x := recover(); x != nil { - log.Errorf("Uncaught panic:\n%v\n%s", x, tb.Stack(4)) - InternalErrors.Add("Panic", 1) - } -} diff --git a/go/vt/tabletserver/tabletenv/tablet_error_test.go b/go/vt/tabletserver/tabletenv/tablet_error_test.go deleted file mode 100644 index f007332b4ca..00000000000 --- a/go/vt/tabletserver/tabletenv/tablet_error_test.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2015, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tabletenv - -import ( - "fmt" - "testing" - - "github.com/youtube/vitess/go/mysqlconn" - "github.com/youtube/vitess/go/sqldb" - - vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" -) - -func TestTabletErrorCode(t *testing.T) { - tErr := NewTabletError(vtrpcpb.Code_INTERNAL, "error") - wantCode := vtrpcpb.Code_INTERNAL - code := tErr.VtErrorCode() - if wantCode != code { - t.Errorf("VtErrorCode() => %v, want %v", code, wantCode) - } -} - -func TestTabletErrorRetriableErrorTypeOverwrite(t *testing.T) { - sqlErr := sqldb.NewSQLError(mysqlconn.EROptionPreventsStatement, mysqlconn.SSUnknownSQLState, "read-only") - tabletErr := NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqlErr) - if tabletErr.Code != vtrpcpb.Code_FAILED_PRECONDITION { - t.Fatalf("got: %v wanted: QUERY_NOT_SERVED", tabletErr.Code) - } - - sqlErr = sqldb.NewSQLError(mysqlconn.ERDupEntry, mysqlconn.SSDupKey, "error") - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqlErr) - if tabletErr.Code != vtrpcpb.Code_ALREADY_EXISTS { - t.Fatalf("got: %v wanted: INTEGRITY_ERROR", tabletErr.Code) - } - - sqlErr = sqldb.NewSQLError(mysqlconn.ERDataTooLong, mysqlconn.SSDataTooLong, "error") - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqlErr) - if tabletErr.Code != vtrpcpb.Code_INVALID_ARGUMENT { - t.Fatalf("got: %v wanted: BAD_INPUT", tabletErr.Code) - } - - sqlErr = sqldb.NewSQLError(mysqlconn.ERDataOutOfRange, mysqlconn.SSDataOutOfRange, "error") - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqlErr) - if tabletErr.Code != vtrpcpb.Code_INVALID_ARGUMENT { - t.Fatalf("got: %v wanted: BAD_INPUT", tabletErr.Code) - } -} - -func TestTabletErrorRetriableErrorTypeOverwrite2(t *testing.T) { -} - -func TestTabletErrorMsgTooLong(t *testing.T) { - buf := make([]byte, 2*maxErrLen) - for i := 0; i < 2*maxErrLen; i++ { - buf[i] = 'a' - } - msg := string(buf) - sqlErr := sqldb.NewSQLError(mysqlconn.ERDupEntry, mysqlconn.SSDupKey, msg) - tabletErr := NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqlErr) - if tabletErr.Code != vtrpcpb.Code_ALREADY_EXISTS { - t.Fatalf("got %v wanted INTEGRITY_ERROR", tabletErr.Code) - } - if tabletErr.Message != string(buf[:maxErrLen]) { - t.Fatalf("message should be capped, only %d character will be shown", maxErrLen) - } -} - -func TestTabletErrorConnError(t *testing.T) { - tabletErr := NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqldb.NewSQLError(1999, "HY000", "test")) - if IsConnErr(tabletErr) { - t.Fatalf("tablet error: %v is not a connection error", tabletErr) - } - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqldb.NewSQLError(2000, mysqlconn.SSUnknownSQLState, "test")) - if !IsConnErr(tabletErr) { - t.Fatalf("tablet error: %v is a connection error", tabletErr) - } - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqldb.NewSQLError(mysqlconn.CRServerLost, mysqlconn.SSUnknownSQLState, "test")) - if IsConnErr(tabletErr) { - t.Fatalf("tablet error: %v is not a connection error", tabletErr) - } - want := "fatal: the query was killed either because it timed out or was canceled: test (errno 2013) (sqlstate HY000)" - if tabletErr.Error() != want { - t.Fatalf("tablet error: %v, want %s", tabletErr, want) - } - sqlErr := sqldb.NewSQLError(1998, "HY000", "test") - if IsConnErr(sqlErr) { - t.Fatalf("sql error: %v is not a connection error", sqlErr) - } - sqlErr = sqldb.NewSQLError(2001, "HY000", "test") - if !IsConnErr(sqlErr) { - t.Fatalf("sql error: %v is a connection error", sqlErr) - } - - err := fmt.Errorf("(errno 2005)") - if !IsConnErr(err) { - t.Fatalf("error: %v is a connection error", err) - } - - err = fmt.Errorf("(errno 123456789012345678901234567890)") - if IsConnErr(err) { - t.Fatalf("error: %v is not a connection error", err) - } -} - -func TestTabletErrorPrefix(t *testing.T) { - tabletErr := NewTabletErrorSQL(vtrpcpb.Code_FAILED_PRECONDITION, sqldb.NewSQLError(2000, "HY000", "test")) - if tabletErr.Prefix() != "retry: " { - t.Fatalf("tablet error with error code: QUERY_NOT_SERVED should has prefix: 'retry: '") - } - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqldb.NewSQLError(2000, "HY000", "test")) - if tabletErr.Prefix() != "fatal: " { - t.Fatalf("tablet error with error code: INTERNAL_ERROR should has prefix: 'fatal: '") - } - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_RESOURCE_EXHAUSTED, sqldb.NewSQLError(2000, "HY000", "test")) - if tabletErr.Prefix() != "tx_pool_full: " { - t.Fatalf("tablet error with error code: RESOURCE_EXHAUSTED should has prefix: 'tx_pool_full: '") - } - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_ABORTED, sqldb.NewSQLError(2000, "HY000", "test")) - if tabletErr.Prefix() != "not_in_tx: " { - t.Fatalf("tablet error with error code: NOT_IN_TX should has prefix: 'not_in_tx: '") - } -} - -func TestTabletErrorRecordStats(t *testing.T) { - tabletErr := NewTabletErrorSQL(vtrpcpb.Code_FAILED_PRECONDITION, sqldb.NewSQLError(2000, "HY000", "test")) - retryCounterBefore := InfoErrors.Counts()["Retry"] - tabletErr.RecordStats() - retryCounterAfter := InfoErrors.Counts()["Retry"] - if retryCounterAfter-retryCounterBefore != 1 { - t.Fatalf("tablet error with error code QUERY_NOT_SERVED should increase Retry error count by 1") - } - - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqldb.NewSQLError(2000, "HY000", "test")) - fatalCounterBefore := ErrorStats.Counts()["Fatal"] - tabletErr.RecordStats() - fatalCounterAfter := ErrorStats.Counts()["Fatal"] - if fatalCounterAfter-fatalCounterBefore != 1 { - t.Fatalf("tablet error with error code INTERNAL_ERROR should increase Fatal error count by 1") - } - - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_RESOURCE_EXHAUSTED, sqldb.NewSQLError(2000, "HY000", "test")) - txPoolFullCounterBefore := ErrorStats.Counts()["TxPoolFull"] - tabletErr.RecordStats() - txPoolFullCounterAfter := ErrorStats.Counts()["TxPoolFull"] - if txPoolFullCounterAfter-txPoolFullCounterBefore != 1 { - t.Fatalf("tablet error with error code RESOURCE_EXHAUSTED should increase TxPoolFull error count by 1") - } - - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_ABORTED, sqldb.NewSQLError(2000, "HY000", "test")) - notInTxCounterBefore := ErrorStats.Counts()["NotInTx"] - tabletErr.RecordStats() - notInTxCounterAfter := ErrorStats.Counts()["NotInTx"] - if notInTxCounterAfter-notInTxCounterBefore != 1 { - t.Fatalf("tablet error with error code NOT_IN_TX should increase NotInTx error count by 1") - } - - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, sqldb.NewSQLError(mysqlconn.ERDupEntry, mysqlconn.SSDupKey, "test")) - dupKeyCounterBefore := InfoErrors.Counts()["DupKey"] - tabletErr.RecordStats() - dupKeyCounterAfter := InfoErrors.Counts()["DupKey"] - if dupKeyCounterAfter-dupKeyCounterBefore != 1 { - t.Fatalf("sql error with SQL error mysqlconn.ERDupEntry should increase DupKey error count by 1") - } - - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, sqldb.NewSQLError(mysqlconn.ERLockWaitTimeout, mysqlconn.SSUnknownSQLState, "test")) - lockWaitTimeoutCounterBefore := ErrorStats.Counts()["Deadlock"] - tabletErr.RecordStats() - lockWaitTimeoutCounterAfter := ErrorStats.Counts()["Deadlock"] - if lockWaitTimeoutCounterAfter-lockWaitTimeoutCounterBefore != 1 { - t.Fatalf("sql error with SQL error mysqlconn.ERLockWaitTimeout should increase Deadlock error count by 1") - } - - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, sqldb.NewSQLError(mysqlconn.ERLockDeadlock, mysqlconn.SSLockDeadlock, "test")) - deadlockCounterBefore := ErrorStats.Counts()["Deadlock"] - tabletErr.RecordStats() - deadlockCounterAfter := ErrorStats.Counts()["Deadlock"] - if deadlockCounterAfter-deadlockCounterBefore != 1 { - t.Fatalf("sql error with SQL error mysqlconn.ERLockDeadlock should increase Deadlock error count by 1") - } - - tabletErr = NewTabletErrorSQL(vtrpcpb.Code_UNKNOWN, sqldb.NewSQLError(mysqlconn.EROptionPreventsStatement, mysqlconn.SSUnknownSQLState, "test")) - failCounterBefore := ErrorStats.Counts()["Fail"] - tabletErr.RecordStats() - failCounterAfter := ErrorStats.Counts()["Fail"] - if failCounterAfter-failCounterBefore != 1 { - t.Fatalf("sql error with SQL error mysqlconn.EROptionPreventsStatement should increase Fail error count by 1") - } -} - -func TestTabletErrorLogUncaughtErr(t *testing.T) { - panicCountBefore := InternalErrors.Counts()["Panic"] - defer func() { - panicCountAfter := InternalErrors.Counts()["Panic"] - if panicCountAfter-panicCountBefore != 1 { - t.Fatalf("Panic count should increase by 1 for uncaught panic") - } - }() - defer LogError() - panic("unknown error") -} - -func TestTabletErrorTxPoolFull(t *testing.T) { - tabletErr := NewTabletErrorSQL(vtrpcpb.Code_RESOURCE_EXHAUSTED, sqldb.NewSQLError(1000, "HY000", "test")) - defer func() { - err := recover() - if err != nil { - t.Fatalf("error should have been handled already") - } - }() - defer LogError() - panic(tabletErr) -} - -func TestTabletErrorFatal(t *testing.T) { - tabletErr := NewTabletErrorSQL(vtrpcpb.Code_INTERNAL, sqldb.NewSQLError(1000, "HY000", "test")) - defer func() { - err := recover() - if err != nil { - t.Fatalf("error should have been handled already") - } - }() - defer LogError() - panic(tabletErr) -} diff --git a/go/vt/tabletserver/tabletenv/tabletenv.go b/go/vt/tabletserver/tabletenv/tabletenv.go index 3c1c24df1ad..f000734fdcd 100644 --- a/go/vt/tabletserver/tabletenv/tabletenv.go +++ b/go/vt/tabletserver/tabletenv/tabletenv.go @@ -10,7 +10,10 @@ import ( "context" "time" + log "github.com/golang/glog" + "github.com/youtube/vitess/go/stats" + "github.com/youtube/vitess/go/tb" "github.com/youtube/vitess/go/vt/callerid" "github.com/youtube/vitess/go/vt/sqlparser" ) @@ -61,3 +64,11 @@ func RecordUserQuery(ctx context.Context, tableName sqlparser.TableIdent, queryT UserTableQueryCount.Add([]string{tableName.String(), username, queryType}, 1) UserTableQueryTimesNs.Add([]string{tableName.String(), username, queryType}, int64(duration)) } + +// LogError logs panics and increments InternalErrors. +func LogError() { + if x := recover(); x != nil { + log.Errorf("Uncaught panic:\n%v\n%s", x, tb.Stack(4)) + InternalErrors.Add("Panic", 1) + } +} diff --git a/go/vt/tabletserver/tabletserver.go b/go/vt/tabletserver/tabletserver.go index 028a4661410..9fa05bcc4f7 100644 --- a/go/vt/tabletserver/tabletserver.go +++ b/go/vt/tabletserver/tabletserver.go @@ -1530,9 +1530,9 @@ verifyTarget: // a valid target needs to be used switch { case target.Keyspace != tsv.target.Keyspace: - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "invalid keyspace %v", target.Keyspace) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid keyspace %v", target.Keyspace) case target.Shard != tsv.target.Shard: - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "invalid shard %v", target.Shard) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid shard %v", target.Shard) case isTx && tsv.target.TabletType != topodatapb.TabletType_MASTER: return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "transactional statement disallowed on non-master tablet: %v", tsv.target.TabletType) case target.TabletType != tsv.target.TabletType: @@ -1544,7 +1544,7 @@ verifyTarget: return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "invalid tablet type: %v, want: %v or %v", target.TabletType, tsv.target.TabletType, tsv.alsoAllow) } } else if !tabletenv.IsLocalContext(ctx) { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "No target") + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "No target") } ok: diff --git a/go/vt/tabletserver/twopc.go b/go/vt/tabletserver/twopc.go index 36de7d2d335..38fdc58ad49 100644 --- a/go/vt/tabletserver/twopc.go +++ b/go/vt/tabletserver/twopc.go @@ -368,7 +368,7 @@ func (tpc *TwoPC) Transition(ctx context.Context, conn *TxConnection, dtid strin return err } if qr.RowsAffected != 1 { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "could not transition to %v: %s", state, dtid) + return vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "could not transition to %v: %s", state, dtid) } return nil } diff --git a/go/vt/tabletserver/tx_executor.go b/go/vt/tabletserver/tx_executor.go index 67d6077157f..4ca1acc7ab4 100644 --- a/go/vt/tabletserver/tx_executor.go +++ b/go/vt/tabletserver/tx_executor.go @@ -33,7 +33,7 @@ type TxExecutor struct { // protocol, will perform all the cleanup. func (txe *TxExecutor) Prepare(transactionID int64, dtid string) error { if !txe.te.twopcEnabled { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("PREPARE", time.Now()) txe.logStats.TransactionID = transactionID @@ -79,7 +79,7 @@ func (txe *TxExecutor) Prepare(transactionID int64, dtid string) error { // marked as failed in the redo log. func (txe *TxExecutor) CommitPrepared(dtid string) error { if !txe.te.twopcEnabled { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("COMMIT_PREPARED", time.Now()) conn, err := txe.te.preparedPool.FetchForCommit(dtid) @@ -154,7 +154,7 @@ func (txe *TxExecutor) markFailed(ctx context.Context, dtid string) { // killer will be the one to eventually roll it back. func (txe *TxExecutor) RollbackPrepared(dtid string, originalID int64) error { if !txe.te.twopcEnabled { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("ROLLBACK_PREPARED", time.Now()) conn, err := txe.te.txPool.LocalBegin(txe.ctx) @@ -184,7 +184,7 @@ returnConn: // CreateTransaction creates the metadata for a 2PC transaction. func (txe *TxExecutor) CreateTransaction(dtid string, participants []*querypb.Target) error { if !txe.te.twopcEnabled { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("CREATE_TRANSACTION", time.Now()) conn, err := txe.te.txPool.LocalBegin(txe.ctx) @@ -204,7 +204,7 @@ func (txe *TxExecutor) CreateTransaction(dtid string, participants []*querypb.Ta // decision to commit the associated 2pc transaction. func (txe *TxExecutor) StartCommit(transactionID int64, dtid string) error { if !txe.te.twopcEnabled { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("START_COMMIT", time.Now()) txe.logStats.TransactionID = transactionID @@ -226,7 +226,7 @@ func (txe *TxExecutor) StartCommit(transactionID int64, dtid string) error { // If a transaction id is provided, that transaction is also rolled back. func (txe *TxExecutor) SetRollback(dtid string, transactionID int64) error { if !txe.te.twopcEnabled { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("SET_ROLLBACK", time.Now()) txe.logStats.TransactionID = transactionID @@ -258,7 +258,7 @@ func (txe *TxExecutor) SetRollback(dtid string, transactionID int64) error { // essentially resolving it. func (txe *TxExecutor) ConcludeTransaction(dtid string) error { if !txe.te.twopcEnabled { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("RESOLVE", time.Now()) @@ -278,7 +278,7 @@ func (txe *TxExecutor) ConcludeTransaction(dtid string) error { // ReadTransaction returns the metadata for the sepcified dtid. func (txe *TxExecutor) ReadTransaction(dtid string) (*querypb.TransactionMetadata, error) { if !txe.te.twopcEnabled { - return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } return txe.te.twoPC.ReadTransaction(txe.ctx, dtid) } @@ -286,7 +286,7 @@ func (txe *TxExecutor) ReadTransaction(dtid string) (*querypb.TransactionMetadat // ReadTwopcInflight returns info about all in-flight 2pc transactions. func (txe *TxExecutor) ReadTwopcInflight() (distributed []*DistributedTx, prepared, failed []*PreparedTx, err error) { if !txe.te.twopcEnabled { - return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "2pc is not enabled") + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } prepared, failed, err = txe.te.twoPC.ReadAllRedo(txe.ctx) if err != nil { diff --git a/go/vt/tabletserver/tx_pool.go b/go/vt/tabletserver/tx_pool.go index 3ae726260eb..ac4c40ffbe0 100644 --- a/go/vt/tabletserver/tx_pool.go +++ b/go/vt/tabletserver/tx_pool.go @@ -144,7 +144,7 @@ func (axp *TxPool) Begin(ctx context.Context) (int64, error) { conn, err := axp.conns.Get(ctx) if err != nil { switch err { - case tabletenv.ErrConnPoolClosed: + case connpool.ErrConnPoolClosed: return 0, err case pools.ErrTimeout: axp.LogActive() diff --git a/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go b/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go index 17c1fde7702..1fa12332235 100644 --- a/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go +++ b/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go @@ -49,7 +49,7 @@ func factory(addr string) (throttlerclient.Client, error) { func (c *client) MaxRates(ctx context.Context) (map[string]int64, error) { response, err := c.gRPCClient.MaxRates(ctx, &throttlerdata.MaxRatesRequest{}) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return response.Rates, nil } @@ -63,7 +63,7 @@ func (c *client) SetMaxRate(ctx context.Context, rate int64) ([]string, error) { response, err := c.gRPCClient.SetMaxRate(ctx, request) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return response.Names, nil } @@ -74,7 +74,7 @@ func (c *client) GetConfiguration(ctx context.Context, throttlerName string) (ma ThrottlerName: throttlerName, }) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return response.Configurations, nil } @@ -87,7 +87,7 @@ func (c *client) UpdateConfiguration(ctx context.Context, throttlerName string, CopyZeroValues: copyZeroValues, }) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return response.Names, nil } @@ -98,7 +98,7 @@ func (c *client) ResetConfiguration(ctx context.Context, throttlerName string) ( ThrottlerName: throttlerName, }) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return response.Names, nil } diff --git a/go/vt/vterrors/aggregate.go b/go/vt/vterrors/aggregate.go index bc423b44057..41606ac46f2 100644 --- a/go/vt/vterrors/aggregate.go +++ b/go/vt/vterrors/aggregate.go @@ -5,6 +5,9 @@ package vterrors import ( + "sort" + "strings" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) @@ -58,10 +61,17 @@ var errorPriorities = map[vtrpcpb.Code]int{ vtrpcpb.Code_DATA_LOSS: PriorityDataLoss, } -// AggregateVtGateErrorCodes aggregates a list of errors into a single -// error code. It does so by finding the highest priority error code -// in the list. -func AggregateVtGateErrorCodes(errors []error) vtrpcpb.Code { +// Aggregate aggregates several errors into a single one. +// The resulting error code will be the one with the highest +// priority as defined by the priority constants in this package. +func Aggregate(errors []error) error { + if len(errors) == 0 { + return nil + } + return New(aggregateCodes(errors), aggregateErrors(errors)) +} + +func aggregateCodes(errors []error) vtrpcpb.Code { highCode := vtrpcpb.Code_OK for _, e := range errors { code := Code(e) @@ -72,13 +82,13 @@ func AggregateVtGateErrorCodes(errors []error) vtrpcpb.Code { return highCode } -// AggregateVtGateErrors aggregates several errors into a single one. -func AggregateVtGateErrors(errors []error) error { - if len(errors) == 0 { - return nil +// ConcatenateErrors aggregates an array of errors into a single error by string concatenation. +func aggregateErrors(errs []error) string { + errStrs := make([]string, 0, len(errs)) + for _, e := range errs { + errStrs = append(errStrs, e.Error()) } - return FromError( - AggregateVtGateErrorCodes(errors), - ConcatenateErrors(errors), - ) + // sort the error strings so we always have deterministic ordering + sort.Strings(errStrs) + return strings.Join(errStrs, "\n") } diff --git a/go/vt/vterrors/aggregate_test.go b/go/vt/vterrors/aggregate_test.go index d76518fe51c..4ef8f431f01 100644 --- a/go/vt/vterrors/aggregate_test.go +++ b/go/vt/vterrors/aggregate_test.go @@ -13,10 +13,10 @@ import ( vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) -var errGeneric = errors.New("generic error") +var errGeneric = "generic error" func errFromCode(c vtrpcpb.Code) error { - return FromError(c, errGeneric) + return New(c, errGeneric) } func TestAggregateVtGateErrorCodes(t *testing.T) { @@ -60,7 +60,7 @@ func TestAggregateVtGateErrorCodes(t *testing.T) { }, } for _, tc := range testcases { - out := AggregateVtGateErrorCodes(tc.input) + out := aggregateCodes(tc.input) if out != tc.expected { t.Errorf("AggregateVtGateErrorCodes(%v) = %v \nwant: %v", tc.input, out, tc.expected) @@ -83,14 +83,18 @@ func TestAggregateVtGateErrors(t *testing.T) { errFromCode(vtrpcpb.Code_UNAVAILABLE), errFromCode(vtrpcpb.Code_INVALID_ARGUMENT), }, - expected: FromError( + expected: New( vtrpcpb.Code_INVALID_ARGUMENT, - ConcatenateErrors([]error{errGeneric, errGeneric, errGeneric}), + aggregateErrors([]error{ + errors.New(errGeneric), + errors.New(errGeneric), + errors.New(errGeneric), + }), ), }, } for _, tc := range testcases { - out := AggregateVtGateErrors(tc.input) + out := Aggregate(tc.input) if !reflect.DeepEqual(out, tc.expected) { t.Errorf("AggregateVtGateErrors(%+v) = %+v \nwant: %+v", tc.input, out, tc.expected) diff --git a/go/vt/vterrors/doc.go b/go/vt/vterrors/doc.go index c3a8a2a7355..f0120dfd52e 100644 --- a/go/vt/vterrors/doc.go +++ b/go/vt/vterrors/doc.go @@ -13,37 +13,16 @@ errors are passed around (even through RPCs), the code is propagated. To handle errors, only the code should be looked at (and not string-matching on the error message). -For instance, see this document for the Google Cloud Error Codes. -https://cloud.google.com/datastore/docs/concepts/errors - Vitess defines the error codes in /proto/vtrpc.proto. Along with an RPCError message that can be used to transmit errors through RPCs, in -the message payloads. - -Vitess then defines the VtError interface, for all errors that have a code. -See vterrors.go in this library. +the message payloads. These codes match the names and numbers defined +by gRPC. -Vitess also defines a VitessError error implementation, that can wrap +Vitess also defines a VitessError error implementation, that can convert any error and add a code to it. -To easily transmit these codes through gRPC, we map these codes to -gRPC error codes in grpc.go, in this library. So if a gRPC call only -returns an error, we return a gRPC error with the right gRPC error -code. If a gRPC call needs to return both an error and some data (like -vtgateservice.Execute that can return an updated Session along with -the error), we can just return an RPCError in the result. - -Some libraries define their own error structures that implement the -VtError interface. Usually, it is to add extra data to it. For an -example, see ../tabletserver/tablet_error.go that adds the SQL error -codes to the error structure. These SQL errors however are all mapped -to their appropriate canonical error code, see the function NewTabletErrorSQL -in that file for the mapping. - -When transmitting any error through RPC boundaries, we are careful to -always preserve the error code. When augmenting / aggregating errors, -we also preserve the error codes: -- See WithPrefix and WithSuffix in this package for augmentation. -- See aggregate.go in this package for aggregation. +While sending an error through gRPC, these codes are transmitted +using gRPC's error propagation mechanism and decoded back to +the original code on the other end. */ diff --git a/go/vt/vterrors/grpc.go b/go/vt/vterrors/grpc.go index 92c430c9a14..827e04fabee 100644 --- a/go/vt/vterrors/grpc.go +++ b/go/vt/vterrors/grpc.go @@ -18,12 +18,6 @@ import ( // Use these methods to return an error through gRPC and still // retain its code. -// GRPCServerErrPrefix is the string we prefix gRPC server errors with. This is -// necessary because there is currently no good way, in gRPC, to differentiate -// between an error from a server vs the client. -// See: https://github.com/grpc/grpc-go/issues/319 -const GRPCServerErrPrefix = "gRPCServerError:" - // CodeToLegacyErrorCode maps a vtrpcpb.Code to a vtrpcpb.LegacyErrorCode. func CodeToLegacyErrorCode(code vtrpcpb.Code) vtrpcpb.LegacyErrorCode { switch code { @@ -94,56 +88,34 @@ func LegacyErrorCodeToCode(code vtrpcpb.LegacyErrorCode) vtrpcpb.Code { } } -// CodeToGRPC maps a vtrpcpb.Code to a grpc Code. -func CodeToGRPC(code vtrpcpb.Code) codes.Code { - return codes.Code(code) -} - -// GRPCToCode maps a grpc Code to a vtrpcpb.Code -func GRPCToCode(code codes.Code) vtrpcpb.Code { - return vtrpcpb.Code(code) -} - -// toGRPCCode will attempt to determine the best gRPC code for a particular error. -func toGRPCCode(err error) codes.Code { - if err == nil { - return codes.OK - } - if vtErr, ok := err.(VtError); ok { - return CodeToGRPC(vtErr.VtErrorCode()) - } - // Returns the underlying gRPC Code, or codes.Unknown if one doesn't exist. - return grpc.Code(err) -} - // truncateError shortens errors because gRPC has a size restriction on them. -func truncateError(err error) error { +func truncateError(err error) string { // For more details see: https://github.com/grpc/grpc-go/issues/443 // The gRPC spec says "Clients may limit the size of Response-Headers, // Trailers, and Trailers-Only, with a default of 8 KiB each suggested." // Therefore, we assume 8 KiB minus some headroom. GRPCErrorLimit := 8*1024 - 512 if len(err.Error()) <= GRPCErrorLimit { - return err + return err.Error() } truncateInfo := "[...] [remainder of the error is truncated because gRPC has a size limit on errors.]" truncatedErr := err.Error()[:GRPCErrorLimit] - return fmt.Errorf("%v %v", truncatedErr, truncateInfo) + return fmt.Sprintf("%v %v", truncatedErr, truncateInfo) } -// ToGRPCError returns an error as a gRPC error, with the appropriate error code. -func ToGRPCError(err error) error { +// ToGRPC returns an error as a gRPC error, with the appropriate error code. +func ToGRPC(err error) error { if err == nil { return nil } - return grpc.Errorf(toGRPCCode(err), "%v %v", GRPCServerErrPrefix, truncateError(err)) + return grpc.Errorf(codes.Code(Code(err)), "%v", truncateError(err)) } -// FromGRPCError returns a gRPC error as a VitessError, translating between error codes. +// FromGRPC returns a gRPC error as a vtError, translating between error codes. // However, there are a few errors which are not translated and passed as they // are. For example, io.EOF since our code base checks for this error to find // out that a stream has finished. -func FromGRPCError(err error) error { +func FromGRPC(err error) error { if err == nil { return nil } @@ -151,8 +123,5 @@ func FromGRPCError(err error) error { // Do not wrap io.EOF because we compare against it for finished streams. return err } - return &VitessError{ - code: GRPCToCode(grpc.Code(err)), - err: err, - } + return New(vtrpcpb.Code(grpc.Code(err)), err.Error()) } diff --git a/go/vt/vterrors/proto3.go b/go/vt/vterrors/proto3.go index 91051ac0d11..417aaf4b26b 100644 --- a/go/vt/vterrors/proto3.go +++ b/go/vt/vterrors/proto3.go @@ -9,13 +9,13 @@ import ( ) // This file contains the necessary methods to send and receive errors -// as payloads of proto3 structures. It converts VitessError to and from -// vtrpcpb.Error. Use these methods when a RPC call can return both +// as payloads of proto3 structures. It converts vtError to and from +// *vtrpcpb.RPCError. Use these methods when a RPC call can return both // data and an error. -// FromVtRPCError recovers a VitessError from a *vtrpcpb.RPCError (which is how VitessErrors -// are transmitted across proto3 RPC boundaries). -func FromVtRPCError(rpcErr *vtrpcpb.RPCError) error { +// FromVTRPC recovers a vtError from a *vtrpcpb.RPCError (which is how vtError +// is transmitted across proto3 RPC boundaries). +func FromVTRPC(rpcErr *vtrpcpb.RPCError) error { if rpcErr == nil { return nil } @@ -26,8 +26,8 @@ func FromVtRPCError(rpcErr *vtrpcpb.RPCError) error { return New(code, rpcErr.Message) } -// VtRPCErrorFromVtError converts from a VtError to a vtrpcpb.RPCError. -func VtRPCErrorFromVtError(err error) *vtrpcpb.RPCError { +// ToVTRPC converts from vtError to a vtrpcpb.RPCError. +func ToVTRPC(err error) *vtrpcpb.RPCError { if err == nil { return nil } diff --git a/go/vt/vterrors/proto3_test.go b/go/vt/vterrors/proto3_test.go index e40c0107564..7716b3f7610 100644 --- a/go/vt/vterrors/proto3_test.go +++ b/go/vt/vterrors/proto3_test.go @@ -39,7 +39,7 @@ func TestFromVtRPCError(t *testing.T) { want: New(vtrpcpb.Code_INVALID_ARGUMENT, "bad input"), }} for _, tcase := range testcases { - got := FromVtRPCError(tcase.in) + got := FromVTRPC(tcase.in) if !reflect.DeepEqual(got, tcase.want) { t.Errorf("FromVtRPCError(%v): %v, want %v", tcase.in, got, tcase.want) } @@ -62,7 +62,7 @@ func TestVtRPCErrorFromVtError(t *testing.T) { }, }} for _, tcase := range testcases { - got := VtRPCErrorFromVtError(tcase.in) + got := ToVTRPC(tcase.in) if !reflect.DeepEqual(got, tcase.want) { t.Errorf("VtRPCErrorFromVtError(%v): %v, want %v", tcase.in, got, tcase.want) } diff --git a/go/vt/vterrors/vterrors.go b/go/vt/vterrors/vterrors.go index a92d5ff33db..d9cd7b6d98f 100644 --- a/go/vt/vterrors/vterrors.go +++ b/go/vt/vterrors/vterrors.go @@ -1,148 +1,44 @@ package vterrors import ( - "errors" "fmt" - "sort" - "strings" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) -// Code returns the error code if it's a VitessError. -// If err is nil, it returns ok. Otherwise, it returns unknown. -func Code(err error) vtrpcpb.Code { - if err == nil { - return vtrpcpb.Code_OK - } - if err, ok := err.(*VitessError); ok { - return err.code - } - if err, ok := err.(VtError); ok { - return err.VtErrorCode() - } - return vtrpcpb.Code_UNKNOWN -} - -// ConcatenateErrors aggregates an array of errors into a single error by string concatenation. -func ConcatenateErrors(errs []error) error { - errStrs := make([]string, 0, len(errs)) - for _, e := range errs { - errStrs = append(errStrs, fmt.Sprintf("%v", e)) - } - // sort the error strings so we always have deterministic ordering - sort.Strings(errStrs) - return errors.New(strings.Join(errStrs, "\n")) -} - -// VtError is implemented by any type that exposes a vtrpcpb.ErrorCode. -type VtError interface { - VtErrorCode() vtrpcpb.Code -} - -// VitessError is the error type that we use internally for passing structured errors. -type VitessError struct { +type vtError struct { code vtrpcpb.Code - // Error message that should be returned. This allows us to change an error message - // without losing the underlying error. For example, if you have an error like - // context.DeadlikeExceeded, you don't want to modify it - otherwise you would lose - // the ability to programatically check for that error. However, you might want to - // add some context to the error, giving you a message like "command failed: deadline exceeded". - // To do that, you can create a NewVitessError to wrap the original error, but redefine - // the error message. - Message string - err error + err string } // New creates a new error using the code and input string. func New(code vtrpcpb.Code, in string) error { - return &VitessError{ + return &vtError{ code: code, - err: errors.New(in), + err: in, } } // Errorf returns a new error built using Printf style arguments. func Errorf(code vtrpcpb.Code, format string, args ...interface{}) error { - return &VitessError{ + return &vtError{ code: code, - err: errors.New(fmt.Sprintf(format, args...)), - } -} - -// Error implements the error interface. It will return the redefined error message, if there -// is one. If there isn't, it will return the original error message. -func (e *VitessError) Error() string { - if e.Message == "" { - return fmt.Sprintf("%v", e.err) + err: fmt.Sprintf(format, args...), } - return e.Message } -// VtErrorCode returns the underlying Vitess error code. -func (e *VitessError) VtErrorCode() vtrpcpb.Code { - return e.code +func (e *vtError) Error() string { + return e.err } -// AsString returns a VitessError as a string, with more detailed information than Error(). -func (e *VitessError) AsString() string { - if e.Message != "" { - return fmt.Sprintf("Code: %v, Message: %v, err: %v", e.code, e.Message, e.err) - } - return fmt.Sprintf("Code: %v, err: %v", e.code, e.err) -} - -// FromError returns a VitessError with the supplied error code by wrapping an -// existing error. -// Use this method also when you want to create a VitessError without a custom -// message. For example: -// err := vterrors.FromError(vtrpcpb.Code_INTERNAL, -// errors.New("no valid endpoint")) -func FromError(code vtrpcpb.Code, err error) error { - return &VitessError{ - code: code, - err: err, - } -} - -// NewVitessError returns a VitessError backed error with the given arguments. -// Useful for preserving an underlying error while creating a new error message. -func NewVitessError(code vtrpcpb.Code, err error, format string, args ...interface{}) error { - return &VitessError{ - code: code, - Message: fmt.Sprintf(format, args...), - err: err, - } -} - -// WithPrefix allows a string to be prefixed to an error. -// If the original error implements the VtError interface it returns a VitessError wrapping the -// original error (with one exception: if the original error is an instance of VitessError it -// doesn't wrap it in a new VitessError instance, but only changes the 'Message' field). -// Otherwise, it returns a string prefixed with the given prefix. -func WithPrefix(prefix string, in error) error { - return New(Code(in), fmt.Sprintf("%s%v", prefix, in)) -} - -// WithSuffix allows a string to be suffixed to an error. -// If the original error implements the VtError interface it returns a VitessError wrapping the -// original error (with one exception: if the original error is an instance of VitessError -// it doesn't wrap it in a new VitessError instance, but only changes the 'Message' field). -// Otherwise, it returns a string suffixed with the given suffix. -func WithSuffix(in error, suffix string) error { - if vitessError, ok := in.(*VitessError); ok { - return &VitessError{ - code: vitessError.code, - err: vitessError.err, - Message: fmt.Sprintf("%s%s", in.Error(), suffix), - } +// Code returns the error code if it's a vtError. +// If err is nil, it returns ok. Otherwise, it returns unknown. +func Code(err error) vtrpcpb.Code { + if err == nil { + return vtrpcpb.Code_OK } - if vtError, ok := in.(VtError); ok { - return &VitessError{ - code: vtError.VtErrorCode(), - err: in, - Message: fmt.Sprintf("%s%s", in.Error(), suffix), - } + if err, ok := err.(*vtError); ok { + return err.code } - return fmt.Errorf("%s%s", in, suffix) + return vtrpcpb.Code_UNKNOWN } diff --git a/go/vt/vtgate/buffer/buffer.go b/go/vt/vtgate/buffer/buffer.go index 8557d788673..33b267cdbeb 100644 --- a/go/vt/vtgate/buffer/buffer.go +++ b/go/vt/vtgate/buffer/buffer.go @@ -12,7 +12,6 @@ package buffer import ( "context" - "errors" "fmt" "strings" "sync" @@ -29,9 +28,9 @@ import ( ) var ( - bufferFullError = vterrors.FromError(vtrpcpb.Code_UNAVAILABLE, errors.New("master buffer is full")) - entryEvictedError = vterrors.FromError(vtrpcpb.Code_UNAVAILABLE, errors.New("buffer full: request evicted for newer request")) - contextCanceledError = vterrors.FromError(vtrpcpb.Code_UNAVAILABLE, errors.New("context was canceled before failover finished")) + bufferFullError = vterrors.New(vtrpcpb.Code_UNAVAILABLE, "master buffer is full") + entryEvictedError = vterrors.New(vtrpcpb.Code_UNAVAILABLE, "buffer full: request evicted for newer request") + contextCanceledError = vterrors.New(vtrpcpb.Code_UNAVAILABLE, "context was canceled before failover finished") ) // bufferMode specifies how the buffer is configured for a given shard. @@ -215,6 +214,9 @@ func (b *Buffer) StatsUpdate(ts *discovery.TabletStats) { // causedByFailover returns true if "err" was supposedly caused by a failover. // To simplify things, we've merged the detection for different MySQL flavors // in one function. Supported flavors: MariaDB, MySQL, Google internal. +// TODO(mberlin): This function does not have to check the specific error messages. +// The previous error revamp ensures that FAILED_PRECONDITION is returned only +// during failover. func causedByFailover(err error) bool { log.V(2).Infof("Checking error (type: %T) if it is caused by a failover. err: %v", err, err) diff --git a/go/vt/vtgate/buffer/buffer_test.go b/go/vt/vtgate/buffer/buffer_test.go index 26d5c86e9f3..5c27caa0450 100644 --- a/go/vt/vtgate/buffer/buffer_test.go +++ b/go/vt/vtgate/buffer/buffer_test.go @@ -26,10 +26,10 @@ const ( ) var ( - failoverErr = vterrors.FromError(vtrpcpb.Code_FAILED_PRECONDITION, - errors.New("vttablet: rpc error: code = 9 desc = gRPCServerError: retry: operation not allowed in state SHUTTING_DOWN")) - nonFailoverErr = vterrors.FromError(vtrpcpb.Code_FAILED_PRECONDITION, - errors.New("vttablet: rpc error: code = 9 desc = gRPCServerError: retry: TODO(mberlin): Insert here any realistic error not caused by a failover")) + failoverErr = vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, + "vttablet: rpc error: code = 9 desc = gRPCServerError: retry: operation not allowed in state SHUTTING_DOWN") + nonFailoverErr = vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, + "vttablet: rpc error: code = 9 desc = gRPCServerError: retry: TODO(mberlin): Insert here any realistic error not caused by a failover") statsKeyJoined = fmt.Sprintf("%s.%s", keyspace, shard) diff --git a/go/vt/vtgate/buffer/shard_buffer.go b/go/vt/vtgate/buffer/shard_buffer.go index e0fd1160131..5bd7647d4a2 100644 --- a/go/vt/vtgate/buffer/shard_buffer.go +++ b/go/vt/vtgate/buffer/shard_buffer.go @@ -349,7 +349,7 @@ func (sb *shardBuffer) wait(ctx context.Context, e *entry) error { select { case <-ctx.Done(): sb.remove(e) - return vterrors.WithSuffix(contextCanceledError, fmt.Sprintf(": %v", ctx.Err())) + return vterrors.Errorf(vterrors.Code(contextCanceledError), "%v: %v", contextCanceledError, ctx.Err()) case <-e.done: return e.err } diff --git a/go/vt/vtgate/gateway/discoverygateway.go b/go/vt/vtgate/gateway/discoverygateway.go index 221848aae1a..31b8f6c4af5 100644 --- a/go/vt/vtgate/gateway/discoverygateway.go +++ b/go/vt/vtgate/gateway/discoverygateway.go @@ -178,11 +178,10 @@ func (dg *discoveryGateway) withRetry(ctx context.Context, target *querypb.Targe retryDone, bufferErr := dg.buffer.WaitForFailoverEnd(ctx, target.Keyspace, target.Shard, err) if bufferErr != nil { // Buffering failed e.g. buffer is already full. Do not retry. - err = vterrors.WithSuffix( - vterrors.WithPrefix( - "failed to automatically buffer and retry failed request during failover: ", - bufferErr), - fmt.Sprintf(" original err (type=%T): %v", err, err)) + err = vterrors.Errorf( + vterrors.Code(err), + "failed to automatically buffer and retry failed request during failover: %v original err (type=%T): %v", + bufferErr, err, err) break } @@ -198,7 +197,7 @@ func (dg *discoveryGateway) withRetry(ctx context.Context, target *querypb.Targe tablets := dg.tsc.GetHealthyTabletStats(target.Keyspace, target.Shard, target.TabletType) if len(tablets) == 0 { // fail fast if there is no tablet - err = vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("no valid tablet")) + err = vterrors.New(vtrpcpb.Code_UNAVAILABLE, "no valid tablet") break } shuffleTablets(tablets) @@ -214,7 +213,7 @@ func (dg *discoveryGateway) withRetry(ctx context.Context, target *querypb.Targe if ts == nil { if err == nil { // do not override error from last attempt. - err = vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("no available connection")) + err = vterrors.New(vtrpcpb.Code_UNAVAILABLE, "no available connection") } break } @@ -223,7 +222,7 @@ func (dg *discoveryGateway) withRetry(ctx context.Context, target *querypb.Targe tabletLastUsed = ts.Tablet conn := dg.hc.GetConnection(ts.Key) if conn == nil { - err = vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("no connection for key %v tablet %+v", ts.Key, ts.Tablet)) + err = vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "no connection for key %v tablet %+v", ts.Key, ts.Tablet) invalidTablets[ts.Key] = true continue } diff --git a/go/vt/vtgate/gateway/discoverygateway_test.go b/go/vt/vtgate/gateway/discoverygateway_test.go index 4785177bea9..9df2ae102f1 100644 --- a/go/vt/vtgate/gateway/discoverygateway_test.go +++ b/go/vt/vtgate/gateway/discoverygateway_test.go @@ -128,7 +128,7 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway dg.tsc.ResetForTesting() want := "target: ks.0.replica, no valid tablet" err := f(dg, target) - verifyShardError(t, err, want, vtrpcpb.Code_INTERNAL) + verifyShardError(t, err, want, vtrpcpb.Code_UNAVAILABLE) // tablet with error hc.Reset() @@ -136,7 +136,7 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway hc.AddTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, false, 10, fmt.Errorf("no connection")) want = "target: ks.0.replica, no valid tablet" err = f(dg, target) - verifyShardError(t, err, want, vtrpcpb.Code_INTERNAL) + verifyShardError(t, err, want, vtrpcpb.Code_UNAVAILABLE) // tablet without connection hc.Reset() @@ -144,7 +144,7 @@ func testDiscoveryGatewayGeneric(t *testing.T, streaming bool, f func(dg Gateway ep1 := hc.AddTestTablet("cell", "1.1.1.1", 1001, keyspace, shard, tabletType, false, 10, nil).Tablet() want = fmt.Sprintf(`target: ks.0.replica, no valid tablet`) err = f(dg, target) - verifyShardError(t, err, want, vtrpcpb.Code_INTERNAL) + verifyShardError(t, err, want, vtrpcpb.Code_UNAVAILABLE) // retry error hc.Reset() diff --git a/go/vt/vtgate/gateway/shard_error.go b/go/vt/vtgate/gateway/shard_error.go index e3dc332147e..99515b3a99b 100644 --- a/go/vt/vtgate/gateway/shard_error.go +++ b/go/vt/vtgate/gateway/shard_error.go @@ -5,8 +5,6 @@ package gateway import ( - "fmt" - "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vterrors" @@ -20,7 +18,7 @@ func NewShardError(in error, target *querypb.Target, tablet *topodatapb.Tablet, return nil } if tablet != nil { - return vterrors.WithPrefix(fmt.Sprintf("target: %s.%s.%s, used tablet: (%+v), ", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType), tablet), in) + return vterrors.Errorf(vterrors.Code(in), "target: %s.%s.%s, used tablet: (%+v), %v", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType), tablet, in) } - return vterrors.WithPrefix(fmt.Sprintf("target: %s.%s.%s, ", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType)), in) + return vterrors.Errorf(vterrors.Code(in), "target: %s.%s.%s, %v", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType), in) } diff --git a/go/vt/vtgate/grpcvtgateconn/conn.go b/go/vt/vtgate/grpcvtgateconn/conn.go index 2f701f039fa..80c303b0846 100644 --- a/go/vt/vtgate/grpcvtgateconn/conn.go +++ b/go/vt/vtgate/grpcvtgateconn/conn.go @@ -77,10 +77,10 @@ func (conn *vtgateConn) Execute(ctx context.Context, query string, bindVars map[ } response, err := conn.c.Execute(ctx, request) if err != nil { - return nil, session, vterrors.FromGRPCError(err) + return nil, session, vterrors.FromGRPC(err) } if response.Error != nil { - return nil, response.Session, vterrors.FromVtRPCError(response.Error) + return nil, response.Session, vterrors.FromVTRPC(response.Error) } return sqltypes.Proto3ToResult(response.Result), response.Session, nil } @@ -105,10 +105,10 @@ func (conn *vtgateConn) ExecuteShards(ctx context.Context, query string, keyspac } response, err := conn.c.ExecuteShards(ctx, request) if err != nil { - return nil, session, vterrors.FromGRPCError(err) + return nil, session, vterrors.FromGRPC(err) } if response.Error != nil { - return nil, response.Session, vterrors.FromVtRPCError(response.Error) + return nil, response.Session, vterrors.FromVTRPC(response.Error) } return sqltypes.Proto3ToResult(response.Result), response.Session, nil } @@ -133,10 +133,10 @@ func (conn *vtgateConn) ExecuteKeyspaceIds(ctx context.Context, query string, ke } response, err := conn.c.ExecuteKeyspaceIds(ctx, request) if err != nil { - return nil, session, vterrors.FromGRPCError(err) + return nil, session, vterrors.FromGRPC(err) } if response.Error != nil { - return nil, response.Session, vterrors.FromVtRPCError(response.Error) + return nil, response.Session, vterrors.FromVTRPC(response.Error) } return sqltypes.Proto3ToResult(response.Result), response.Session, nil } @@ -161,10 +161,10 @@ func (conn *vtgateConn) ExecuteKeyRanges(ctx context.Context, query string, keys } response, err := conn.c.ExecuteKeyRanges(ctx, request) if err != nil { - return nil, session, vterrors.FromGRPCError(err) + return nil, session, vterrors.FromGRPC(err) } if response.Error != nil { - return nil, response.Session, vterrors.FromVtRPCError(response.Error) + return nil, response.Session, vterrors.FromVTRPC(response.Error) } return sqltypes.Proto3ToResult(response.Result), response.Session, nil } @@ -190,10 +190,10 @@ func (conn *vtgateConn) ExecuteEntityIds(ctx context.Context, query string, keys } response, err := conn.c.ExecuteEntityIds(ctx, request) if err != nil { - return nil, session, vterrors.FromGRPCError(err) + return nil, session, vterrors.FromGRPC(err) } if response.Error != nil { - return nil, response.Session, vterrors.FromVtRPCError(response.Error) + return nil, response.Session, vterrors.FromVTRPC(response.Error) } return sqltypes.Proto3ToResult(response.Result), response.Session, nil } @@ -218,10 +218,10 @@ func (conn *vtgateConn) ExecuteBatch(ctx context.Context, queryList []string, bi } response, err := conn.c.ExecuteBatch(ctx, request) if err != nil { - return nil, session, vterrors.FromGRPCError(err) + return nil, session, vterrors.FromGRPC(err) } if response.Error != nil { - return nil, response.Session, vterrors.FromVtRPCError(response.Error) + return nil, response.Session, vterrors.FromVTRPC(response.Error) } return sqltypes.Proto3ToQueryReponses(response.Results), response.Session, nil } @@ -241,10 +241,10 @@ func (conn *vtgateConn) ExecuteBatchShards(ctx context.Context, queries []*vtgat } response, err := conn.c.ExecuteBatchShards(ctx, request) if err != nil { - return nil, session, vterrors.FromGRPCError(err) + return nil, session, vterrors.FromGRPC(err) } if response.Error != nil { - return nil, response.Session, vterrors.FromVtRPCError(response.Error) + return nil, response.Session, vterrors.FromVTRPC(response.Error) } return sqltypes.Proto3ToResults(response.Results), response.Session, nil } @@ -264,10 +264,10 @@ func (conn *vtgateConn) ExecuteBatchKeyspaceIds(ctx context.Context, queries []* } response, err := conn.c.ExecuteBatchKeyspaceIds(ctx, request) if err != nil { - return nil, session, vterrors.FromGRPCError(err) + return nil, session, vterrors.FromGRPC(err) } if response.Error != nil { - return nil, response.Session, vterrors.FromVtRPCError(response.Error) + return nil, response.Session, vterrors.FromVTRPC(response.Error) } return sqltypes.Proto3ToResults(response.Results), response.Session, nil } @@ -280,10 +280,7 @@ type streamExecuteAdapter struct { func (a *streamExecuteAdapter) Recv() (*sqltypes.Result, error) { qr, err := a.recv() if err != nil { - if err != io.EOF { - err = vterrors.FromGRPCError(err) - } - return nil, err + return nil, vterrors.FromGRPC(err) } if a.fields == nil { a.fields = qr.Fields @@ -305,7 +302,7 @@ func (conn *vtgateConn) StreamExecute(ctx context.Context, query string, bindVar } stream, err := conn.c.StreamExecute(ctx, req) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return &streamExecuteAdapter{ recv: func() (*querypb.QueryResult, error) { @@ -333,7 +330,7 @@ func (conn *vtgateConn) StreamExecuteShards(ctx context.Context, query string, k } stream, err := conn.c.StreamExecuteShards(ctx, req) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return &streamExecuteAdapter{ recv: func() (*querypb.QueryResult, error) { @@ -361,7 +358,7 @@ func (conn *vtgateConn) StreamExecuteKeyRanges(ctx context.Context, query string } stream, err := conn.c.StreamExecuteKeyRanges(ctx, req) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return &streamExecuteAdapter{ recv: func() (*querypb.QueryResult, error) { @@ -389,7 +386,7 @@ func (conn *vtgateConn) StreamExecuteKeyspaceIds(ctx context.Context, query stri } stream, err := conn.c.StreamExecuteKeyspaceIds(ctx, req) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return &streamExecuteAdapter{ recv: func() (*querypb.QueryResult, error) { @@ -409,7 +406,7 @@ func (conn *vtgateConn) Begin(ctx context.Context, singledb bool) (interface{}, } response, err := conn.c.Begin(ctx, request) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return response.Session, nil } @@ -421,7 +418,7 @@ func (conn *vtgateConn) Commit(ctx context.Context, session interface{}, twopc b Atomic: twopc, } _, err := conn.c.Commit(ctx, request) - return vterrors.FromGRPCError(err) + return vterrors.FromGRPC(err) } func (conn *vtgateConn) Rollback(ctx context.Context, session interface{}) error { @@ -430,7 +427,7 @@ func (conn *vtgateConn) Rollback(ctx context.Context, session interface{}) error Session: session.(*vtgatepb.Session), } _, err := conn.c.Rollback(ctx, request) - return vterrors.FromGRPCError(err) + return vterrors.FromGRPC(err) } func (conn *vtgateConn) ResolveTransaction(ctx context.Context, dtid string) error { @@ -439,7 +436,7 @@ func (conn *vtgateConn) ResolveTransaction(ctx context.Context, dtid string) err Dtid: dtid, } _, err := conn.c.ResolveTransaction(ctx, request) - return vterrors.FromGRPCError(err) + return vterrors.FromGRPC(err) } func (conn *vtgateConn) MessageStream(ctx context.Context, keyspace string, shard string, keyRange *topodatapb.KeyRange, name string, callback func(*sqltypes.Result) error) error { @@ -452,14 +449,14 @@ func (conn *vtgateConn) MessageStream(ctx context.Context, keyspace string, shar } stream, err := conn.c.MessageStream(ctx, request) if err != nil { - return vterrors.FromGRPCError(err) + return vterrors.FromGRPC(err) } var fields []*querypb.Field for { r, err := stream.Recv() if err != nil { if err != io.EOF { - return vterrors.FromGRPCError(err) + return vterrors.FromGRPC(err) } return nil } @@ -482,7 +479,7 @@ func (conn *vtgateConn) MessageAck(ctx context.Context, keyspace string, name st } r, err := conn.c.MessageAck(ctx, request) if err != nil { - return 0, vterrors.FromGRPCError(err) + return 0, vterrors.FromGRPC(err) } return int64(r.Result.RowsAffected), nil } @@ -512,7 +509,7 @@ func (conn *vtgateConn) SplitQuery( } response, err := conn.c.SplitQuery(ctx, request) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return response.Splits, nil } @@ -523,7 +520,7 @@ func (conn *vtgateConn) GetSrvKeyspace(ctx context.Context, keyspace string) (*t } response, err := conn.c.GetSrvKeyspace(ctx, request) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return response.SrvKeyspace, nil } @@ -535,10 +532,7 @@ type updateStreamAdapter struct { func (a *updateStreamAdapter) Recv() (*querypb.StreamEvent, int64, error) { r, err := a.stream.Recv() if err != nil { - if err != io.EOF { - err = vterrors.FromGRPCError(err) - } - return nil, 0, err + return nil, 0, vterrors.FromGRPC(err) } return r.Event, r.ResumeTimestamp, nil } @@ -555,7 +549,7 @@ func (conn *vtgateConn) UpdateStream(ctx context.Context, keyspace string, shard } stream, err := conn.c.UpdateStream(ctx, req) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return &updateStreamAdapter{ stream: stream, diff --git a/go/vt/vtgate/grpcvtgateservice/server.go b/go/vt/vtgate/grpcvtgateservice/server.go index e67c5335ef3..91728004067 100644 --- a/go/vt/vtgate/grpcvtgateservice/server.go +++ b/go/vt/vtgate/grpcvtgateservice/server.go @@ -89,13 +89,13 @@ func (vtg *VTGate) Execute(ctx context.Context, request *vtgatepb.ExecuteRequest ctx = withCallerIDContext(ctx, request.CallerId) bv, err := querytypes.Proto3ToBindVariables(request.Query.BindVariables) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } result, err := vtg.server.Execute(ctx, string(request.Query.Sql), bv, request.Keyspace, request.TabletType, request.Session, request.NotInTransaction, request.Options) return &vtgatepb.ExecuteResponse{ Result: sqltypes.ResultToProto3(result), Session: request.Session, - Error: vterrors.VtRPCErrorFromVtError(err), + Error: vterrors.ToVTRPC(err), }, nil } @@ -105,7 +105,7 @@ func (vtg *VTGate) ExecuteShards(ctx context.Context, request *vtgatepb.ExecuteS ctx = withCallerIDContext(ctx, request.CallerId) bv, err := querytypes.Proto3ToBindVariables(request.Query.BindVariables) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } result, err := vtg.server.ExecuteShards(ctx, string(request.Query.Sql), @@ -119,7 +119,7 @@ func (vtg *VTGate) ExecuteShards(ctx context.Context, request *vtgatepb.ExecuteS return &vtgatepb.ExecuteShardsResponse{ Result: sqltypes.ResultToProto3(result), Session: request.Session, - Error: vterrors.VtRPCErrorFromVtError(err), + Error: vterrors.ToVTRPC(err), }, nil } @@ -129,7 +129,7 @@ func (vtg *VTGate) ExecuteKeyspaceIds(ctx context.Context, request *vtgatepb.Exe ctx = withCallerIDContext(ctx, request.CallerId) bv, err := querytypes.Proto3ToBindVariables(request.Query.BindVariables) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } result, err := vtg.server.ExecuteKeyspaceIds(ctx, string(request.Query.Sql), @@ -143,7 +143,7 @@ func (vtg *VTGate) ExecuteKeyspaceIds(ctx context.Context, request *vtgatepb.Exe return &vtgatepb.ExecuteKeyspaceIdsResponse{ Result: sqltypes.ResultToProto3(result), Session: request.Session, - Error: vterrors.VtRPCErrorFromVtError(err), + Error: vterrors.ToVTRPC(err), }, nil } @@ -153,7 +153,7 @@ func (vtg *VTGate) ExecuteKeyRanges(ctx context.Context, request *vtgatepb.Execu ctx = withCallerIDContext(ctx, request.CallerId) bv, err := querytypes.Proto3ToBindVariables(request.Query.BindVariables) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } result, err := vtg.server.ExecuteKeyRanges(ctx, string(request.Query.Sql), @@ -167,7 +167,7 @@ func (vtg *VTGate) ExecuteKeyRanges(ctx context.Context, request *vtgatepb.Execu return &vtgatepb.ExecuteKeyRangesResponse{ Result: sqltypes.ResultToProto3(result), Session: request.Session, - Error: vterrors.VtRPCErrorFromVtError(err), + Error: vterrors.ToVTRPC(err), }, nil } @@ -177,7 +177,7 @@ func (vtg *VTGate) ExecuteEntityIds(ctx context.Context, request *vtgatepb.Execu ctx = withCallerIDContext(ctx, request.CallerId) bv, err := querytypes.Proto3ToBindVariables(request.Query.BindVariables) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } result, err := vtg.server.ExecuteEntityIds(ctx, string(request.Query.Sql), @@ -192,7 +192,7 @@ func (vtg *VTGate) ExecuteEntityIds(ctx context.Context, request *vtgatepb.Execu return &vtgatepb.ExecuteEntityIdsResponse{ Result: sqltypes.ResultToProto3(result), Session: request.Session, - Error: vterrors.VtRPCErrorFromVtError(err), + Error: vterrors.ToVTRPC(err), }, nil } @@ -206,7 +206,7 @@ func (vtg *VTGate) ExecuteBatch(ctx context.Context, request *vtgatepb.ExecuteBa for queryNum, query := range request.Queries { bv, err := querytypes.Proto3ToBindVariables(query.BindVariables) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } sqlQueries[queryNum] = query.Sql bindVars[queryNum] = bv @@ -215,7 +215,7 @@ func (vtg *VTGate) ExecuteBatch(ctx context.Context, request *vtgatepb.ExecuteBa return &vtgatepb.ExecuteBatchResponse{ Results: sqltypes.QueryResponsesToProto3(results), Session: request.Session, - Error: vterrors.VtRPCErrorFromVtError(err), + Error: vterrors.ToVTRPC(err), }, nil } @@ -232,7 +232,7 @@ func (vtg *VTGate) ExecuteBatchShards(ctx context.Context, request *vtgatepb.Exe return &vtgatepb.ExecuteBatchShardsResponse{ Results: sqltypes.ResultsToProto3(result), Session: request.Session, - Error: vterrors.VtRPCErrorFromVtError(err), + Error: vterrors.ToVTRPC(err), }, nil } @@ -250,7 +250,7 @@ func (vtg *VTGate) ExecuteBatchKeyspaceIds(ctx context.Context, request *vtgatep return &vtgatepb.ExecuteBatchKeyspaceIdsResponse{ Results: sqltypes.ResultsToProto3(result), Session: request.Session, - Error: vterrors.VtRPCErrorFromVtError(err), + Error: vterrors.ToVTRPC(err), }, nil } @@ -260,7 +260,7 @@ func (vtg *VTGate) StreamExecute(request *vtgatepb.StreamExecuteRequest, stream ctx := withCallerIDContext(stream.Context(), request.CallerId) bv, err := querytypes.Proto3ToBindVariables(request.Query.BindVariables) if err != nil { - return vterrors.ToGRPCError(err) + return vterrors.ToGRPC(err) } vtgErr := vtg.server.StreamExecute(ctx, string(request.Query.Sql), @@ -273,7 +273,7 @@ func (vtg *VTGate) StreamExecute(request *vtgatepb.StreamExecuteRequest, stream Result: sqltypes.ResultToProto3(value), }) }) - return vterrors.ToGRPCError(vtgErr) + return vterrors.ToGRPC(vtgErr) } // StreamExecuteShards is the RPC version of vtgateservice.VTGateService method @@ -282,7 +282,7 @@ func (vtg *VTGate) StreamExecuteShards(request *vtgatepb.StreamExecuteShardsRequ ctx := withCallerIDContext(stream.Context(), request.CallerId) bv, err := querytypes.Proto3ToBindVariables(request.Query.BindVariables) if err != nil { - return vterrors.ToGRPCError(err) + return vterrors.ToGRPC(err) } vtgErr := vtg.server.StreamExecuteShards(ctx, string(request.Query.Sql), @@ -296,7 +296,7 @@ func (vtg *VTGate) StreamExecuteShards(request *vtgatepb.StreamExecuteShardsRequ Result: sqltypes.ResultToProto3(value), }) }) - return vterrors.ToGRPCError(vtgErr) + return vterrors.ToGRPC(vtgErr) } // StreamExecuteKeyspaceIds is the RPC version of @@ -306,7 +306,7 @@ func (vtg *VTGate) StreamExecuteKeyspaceIds(request *vtgatepb.StreamExecuteKeysp ctx := withCallerIDContext(stream.Context(), request.CallerId) bv, err := querytypes.Proto3ToBindVariables(request.Query.BindVariables) if err != nil { - return vterrors.ToGRPCError(err) + return vterrors.ToGRPC(err) } vtgErr := vtg.server.StreamExecuteKeyspaceIds(ctx, string(request.Query.Sql), @@ -320,7 +320,7 @@ func (vtg *VTGate) StreamExecuteKeyspaceIds(request *vtgatepb.StreamExecuteKeysp Result: sqltypes.ResultToProto3(value), }) }) - return vterrors.ToGRPCError(vtgErr) + return vterrors.ToGRPC(vtgErr) } // StreamExecuteKeyRanges is the RPC version of @@ -330,7 +330,7 @@ func (vtg *VTGate) StreamExecuteKeyRanges(request *vtgatepb.StreamExecuteKeyRang ctx := withCallerIDContext(stream.Context(), request.CallerId) bv, err := querytypes.Proto3ToBindVariables(request.Query.BindVariables) if err != nil { - return vterrors.ToGRPCError(err) + return vterrors.ToGRPC(err) } vtgErr := vtg.server.StreamExecuteKeyRanges(ctx, string(request.Query.Sql), @@ -344,7 +344,7 @@ func (vtg *VTGate) StreamExecuteKeyRanges(request *vtgatepb.StreamExecuteKeyRang Result: sqltypes.ResultToProto3(value), }) }) - return vterrors.ToGRPCError(vtgErr) + return vterrors.ToGRPC(vtgErr) } // Begin is the RPC version of vtgateservice.VTGateService method @@ -357,7 +357,7 @@ func (vtg *VTGate) Begin(ctx context.Context, request *vtgatepb.BeginRequest) (r Session: session, }, nil } - return nil, vterrors.ToGRPCError(vtgErr) + return nil, vterrors.ToGRPC(vtgErr) } // Commit is the RPC version of vtgateservice.VTGateService method @@ -369,7 +369,7 @@ func (vtg *VTGate) Commit(ctx context.Context, request *vtgatepb.CommitRequest) if vtgErr == nil { return response, nil } - return nil, vterrors.ToGRPCError(vtgErr) + return nil, vterrors.ToGRPC(vtgErr) } // Rollback is the RPC version of vtgateservice.VTGateService method @@ -381,7 +381,7 @@ func (vtg *VTGate) Rollback(ctx context.Context, request *vtgatepb.RollbackReque if vtgErr == nil { return response, nil } - return nil, vterrors.ToGRPCError(vtgErr) + return nil, vterrors.ToGRPC(vtgErr) } // ResolveTransaction is the RPC version of vtgateservice.VTGateService method @@ -393,7 +393,7 @@ func (vtg *VTGate) ResolveTransaction(ctx context.Context, request *vtgatepb.Res if vtgErr == nil { return response, nil } - return nil, vterrors.ToGRPCError(vtgErr) + return nil, vterrors.ToGRPC(vtgErr) } // MessageStream is the RPC version of vtgateservice.VTGateService method @@ -405,7 +405,7 @@ func (vtg *VTGate) MessageStream(request *vtgatepb.MessageStreamRequest, stream Result: sqltypes.ResultToProto3(qr), }) }) - return vterrors.ToGRPCError(vtgErr) + return vterrors.ToGRPC(vtgErr) } // MessageAck is the RPC version of vtgateservice.VTGateService method @@ -414,7 +414,7 @@ func (vtg *VTGate) MessageAck(ctx context.Context, request *vtgatepb.MessageAckR ctx = withCallerIDContext(ctx, request.CallerId) count, vtgErr := vtg.server.MessageAck(ctx, request.Keyspace, request.Name, request.Ids) if vtgErr != nil { - return nil, vterrors.ToGRPCError(vtgErr) + return nil, vterrors.ToGRPC(vtgErr) } return &querypb.MessageAckResponse{ Result: &querypb.QueryResult{ @@ -430,7 +430,7 @@ func (vtg *VTGate) SplitQuery(ctx context.Context, request *vtgatepb.SplitQueryR ctx = withCallerIDContext(ctx, request.CallerId) bv, err := querytypes.Proto3ToBindVariables(request.Query.BindVariables) if err != nil { - return nil, vterrors.ToGRPCError(err) + return nil, vterrors.ToGRPC(err) } splits, vtgErr := vtg.server.SplitQuery( ctx, @@ -442,7 +442,7 @@ func (vtg *VTGate) SplitQuery(ctx context.Context, request *vtgatepb.SplitQueryR request.NumRowsPerQueryPart, request.Algorithm) if vtgErr != nil { - return nil, vterrors.ToGRPCError(vtgErr) + return nil, vterrors.ToGRPC(vtgErr) } return &vtgatepb.SplitQueryResponse{ Splits: splits, @@ -454,7 +454,7 @@ func (vtg *VTGate) GetSrvKeyspace(ctx context.Context, request *vtgatepb.GetSrvK defer vtg.server.HandlePanic(&err) sk, vtgErr := vtg.server.GetSrvKeyspace(ctx, request.Keyspace) if vtgErr != nil { - return nil, vterrors.ToGRPCError(vtgErr) + return nil, vterrors.ToGRPC(vtgErr) } return &vtgatepb.GetSrvKeyspaceResponse{ SrvKeyspace: sk, @@ -478,7 +478,7 @@ func (vtg *VTGate) UpdateStream(request *vtgatepb.UpdateStreamRequest, stream vt ResumeTimestamp: resumeTimestamp, }) }) - return vterrors.ToGRPCError(vtgErr) + return vterrors.ToGRPC(vtgErr) } func init() { diff --git a/go/vt/vtgate/masterbuffer/masterbuffer.go b/go/vt/vtgate/masterbuffer/masterbuffer.go index c69eff62b8c..c96569b2918 100644 --- a/go/vt/vtgate/masterbuffer/masterbuffer.go +++ b/go/vt/vtgate/masterbuffer/masterbuffer.go @@ -16,7 +16,6 @@ but will not return transient errors during the buffering time. package masterbuffer import ( - "errors" "flag" "sync" "time" @@ -47,10 +46,7 @@ var ( var timeSleep = time.Sleep // errBufferFull is the error returned a buffer request is rejected because the buffer is full. -var errBufferFull = vterrors.FromError( - vtrpcpb.Code_UNAVAILABLE, - errors.New("master request buffer full, rejecting request"), -) +var errBufferFull = vterrors.New(vtrpcpb.Code_UNAVAILABLE, "master request buffer full, rejecting request") // FakeBuffer will pretend to buffer master requests in VTGate. // Requests *will NOT actually be buffered*, they will just be delayed. diff --git a/go/vt/vtgate/resolver.go b/go/vt/vtgate/resolver.go index fcf3d801707..8afc2f99838 100644 --- a/go/vt/vtgate/resolver.go +++ b/go/vt/vtgate/resolver.go @@ -63,10 +63,7 @@ func isRetryableError(err error) bool { // on being able to uniquely route a write. func (res *Resolver) ExecuteKeyspaceIds(ctx context.Context, sql string, bindVariables map[string]interface{}, keyspace string, keyspaceIds [][]byte, tabletType topodatapb.TabletType, session *vtgatepb.Session, notInTransaction bool, options *querypb.ExecuteOptions) (*sqltypes.Result, error) { if sqlannotation.IsDML(sql) && len(keyspaceIds) > 1 { - return nil, vterrors.FromError( - vtrpcpb.Code_INVALID_ARGUMENT, - fmt.Errorf("DML should not span multiple keyspace_ids"), - ) + return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "DML should not span multiple keyspace_ids") } mapToShards := func(k string) (string, []string, error) { return mapKeyspaceIdsToShards( diff --git a/go/vt/vtgate/safe_session.go b/go/vt/vtgate/safe_session.go index c5ec9be60a4..e773474e1ce 100644 --- a/go/vt/vtgate/safe_session.go +++ b/go/vt/vtgate/safe_session.go @@ -5,7 +5,6 @@ package vtgate import ( - "fmt" "sync" "github.com/youtube/vitess/go/vt/vterrors" @@ -63,7 +62,7 @@ func (session *SafeSession) Append(shardSession *vtgatepb.Session_ShardSession) session.ShardSessions = append(session.ShardSessions, shardSession) if session.SingleDb && len(session.ShardSessions) > 1 { session.mustRollback = true - return vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Errorf("multi-db transaction attempted: %v", session.ShardSessions)) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "multi-db transaction attempted: %v", session.ShardSessions) } return nil } diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index 02d08551482..ed1c8659156 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -321,7 +321,7 @@ func (stc *ScatterConn) ExecuteBatch( stc.txConn.Rollback(ctx, session) } if allErrors.HasErrors() { - return nil, allErrors.AggrError(vterrors.AggregateVtGateErrors) + return nil, allErrors.AggrError(vterrors.Aggregate) } return results, nil } @@ -358,7 +358,7 @@ func (stc *ScatterConn) StreamExecute( return stc.processOneStreamingResult(&mu, &fieldSent, qr, callback) }) }) - return allErrors.AggrError(vterrors.AggregateVtGateErrors) + return allErrors.AggrError(vterrors.Aggregate) } // StreamExecuteMulti is like StreamExecute, @@ -382,7 +382,7 @@ func (stc *ScatterConn) StreamExecuteMulti( return stc.processOneStreamingResult(&mu, &fieldSent, qr, callback) }) }) - return allErrors.AggrError(vterrors.AggregateVtGateErrors) + return allErrors.AggrError(vterrors.Aggregate) } // MessageStream streams messages from the specified shards. @@ -395,7 +395,7 @@ func (stc *ScatterConn) MessageStream(ctx context.Context, keyspace string, shar return stc.processOneStreamingResult(&mu, &fieldSent, qr, callback) }) }) - return allErrors.AggrError(vterrors.AggregateVtGateErrors) + return allErrors.AggrError(vterrors.Aggregate) } // MessageAck acks messages across multiple shards. @@ -416,7 +416,7 @@ func (stc *ScatterConn) MessageAck(ctx context.Context, keyspace string, shardID mu.Unlock() return nil }) - return totalCount, allErrors.AggrError(vterrors.AggregateVtGateErrors) + return totalCount, allErrors.AggrError(vterrors.Aggregate) } // UpdateStream just sends the query to the gateway, @@ -488,7 +488,7 @@ func (stc *ScatterConn) SplitQuery( ) if allErrors.HasErrors() { - err := allErrors.AggrError(vterrors.AggregateVtGateErrors) + err := allErrors.AggrError(vterrors.Aggregate) return nil, err } // We shuffle the query-parts here. External frameworks like MapReduce may @@ -662,7 +662,7 @@ end: stc.txConn.Rollback(ctx, session) } if allErrors.HasErrors() { - return allErrors.AggrError(vterrors.AggregateVtGateErrors) + return allErrors.AggrError(vterrors.Aggregate) } return nil } diff --git a/go/vt/vtgate/topo_utils.go b/go/vt/vtgate/topo_utils.go index 25cb0a91b60..29ef91b0668 100644 --- a/go/vt/vtgate/topo_utils.go +++ b/go/vt/vtgate/topo_utils.go @@ -6,7 +6,6 @@ package vtgate import ( "encoding/hex" - "fmt" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/key" @@ -47,9 +46,7 @@ func getAnyShard(ctx context.Context, topoServ topo.SrvTopoServer, cell, keyspac return "", "", err } if len(allShards) == 0 { - return "", "", vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, - fmt.Errorf("No shards found for this tabletType"), - ) + return "", "", vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "No shards found for this tabletType") } return keyspace, allShards[0].Name, nil } @@ -57,10 +54,7 @@ func getAnyShard(ctx context.Context, topoServ topo.SrvTopoServer, cell, keyspac func getKeyspaceShards(ctx context.Context, topoServ topo.SrvTopoServer, cell, keyspace string, tabletType topodatapb.TabletType) (string, *topodatapb.SrvKeyspace, []*topodatapb.ShardReference, error) { srvKeyspace, err := topoServ.GetSrvKeyspace(ctx, cell, keyspace) if err != nil { - return "", nil, nil, vterrors.NewVitessError( - vtrpcpb.Code_INTERNAL, err, - "keyspace %v fetch error: %v", keyspace, err, - ) + return "", nil, nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "keyspace %v fetch error: %v", keyspace, err) } // check if the keyspace has been redirected for this tabletType. @@ -69,29 +63,21 @@ func getKeyspaceShards(ctx context.Context, topoServ topo.SrvTopoServer, cell, k keyspace = sf.Keyspace srvKeyspace, err = topoServ.GetSrvKeyspace(ctx, cell, keyspace) if err != nil { - return "", nil, nil, vterrors.NewVitessError( - vtrpcpb.Code_INTERNAL, err, - "keyspace %v fetch error: %v", keyspace, err, - ) + return "", nil, nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "keyspace %v fetch error: %v", keyspace, err) } } } partition := topoproto.SrvKeyspaceGetPartition(srvKeyspace, tabletType) if partition == nil { - return "", nil, nil, vterrors.NewVitessError( - vtrpcpb.Code_INTERNAL, err, - "No partition found for tabletType %v in keyspace %v", topoproto.TabletTypeLString(tabletType), keyspace, - ) + return "", nil, nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "No partition found for tabletType %v in keyspace %v", topoproto.TabletTypeLString(tabletType), keyspace) } return keyspace, srvKeyspace, partition.ShardReferences, nil } func getShardForKeyspaceID(allShards []*topodatapb.ShardReference, keyspaceID []byte) (string, error) { if len(allShards) == 0 { - return "", vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, - fmt.Errorf("No shards found for this tabletType"), - ) + return "", vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "No shards found for this tabletType") } for _, shardReference := range allShards { @@ -99,9 +85,7 @@ func getShardForKeyspaceID(allShards []*topodatapb.ShardReference, keyspaceID [] return shardReference.Name, nil } } - return "", vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, - fmt.Errorf("KeyspaceId %v didn't match any shards %+v", hex.EncodeToString(keyspaceID), allShards), - ) + return "", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "KeyspaceId %v didn't match any shards %+v", hex.EncodeToString(keyspaceID), allShards) } func mapEntityIdsToShards(ctx context.Context, topoServ topo.SrvTopoServer, cell, keyspace string, entityIds []*vtgatepb.ExecuteEntityIdsRequest_EntityId, tabletType topodatapb.TabletType) (string, map[string][]interface{}, error) { @@ -179,9 +163,7 @@ func mapExactShards(ctx context.Context, topoServ topo.SrvTopoServer, cell, keys } shardnum++ } - return keyspace, nil, vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, - fmt.Errorf("keyrange %v does not exactly match shards", key.KeyRangeString(kr)), - ) + return keyspace, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "keyrange %v does not exactly match shards", key.KeyRangeString(kr)) } func boundShardQueriesToScatterBatchRequest(boundQueries []*vtgatepb.BoundShardQuery) (*scatterBatchRequest, error) { diff --git a/go/vt/vtgate/tx_conn.go b/go/vt/vtgate/tx_conn.go index 907d6cbd490..9335f23bcb2 100644 --- a/go/vt/vtgate/tx_conn.go +++ b/go/vt/vtgate/tx_conn.go @@ -5,8 +5,6 @@ package vtgate import ( - "errors" - "fmt" "sync" "golang.org/x/net/context" @@ -37,10 +35,10 @@ func NewTxConn(gw gateway.Gateway) *TxConn { // is used to ensure atomicity. func (txc *TxConn) Commit(ctx context.Context, twopc bool, session *SafeSession) error { if session == nil { - return vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, errors.New("cannot commit: empty session")) + return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "cannot commit: empty session") } if !session.InTransaction() { - return vterrors.FromError(vtrpcpb.Code_ABORTED, errors.New("cannot commit: not in transaction")) + return vterrors.New(vtrpcpb.Code_ABORTED, "cannot commit: not in transaction") } if twopc { return txc.commit2PC(ctx, session) @@ -155,7 +153,7 @@ func (txc *TxConn) Resolve(ctx context.Context, dtid string) error { } default: // Should never happen. - return vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("invalid state: %v", transaction.State)) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid state: %v", transaction.State) } return nil } @@ -199,7 +197,7 @@ func (txc *TxConn) runSessions(shardSessions []*vtgatepb.Session_ShardSession, a }(s) } wg.Wait() - return allErrors.AggrError(vterrors.AggregateVtGateErrors) + return allErrors.AggrError(vterrors.Aggregate) } // runTargets executes the action for all targets in parallel and returns a consolildated error. @@ -220,5 +218,5 @@ func (txc *TxConn) runTargets(targets []*querypb.Target, action func(*querypb.Ta }(t) } wg.Wait() - return allErrors.AggrError(vterrors.AggregateVtGateErrors) + return allErrors.AggrError(vterrors.Aggregate) } diff --git a/go/vt/vtgate/tx_conn_test.go b/go/vt/vtgate/tx_conn_test.go index 3312b859995..4566cd95c5c 100644 --- a/go/vt/vtgate/tx_conn_test.go +++ b/go/vt/vtgate/tx_conn_test.go @@ -6,7 +6,6 @@ package vtgate import ( "context" - "fmt" "reflect" "strings" "testing" @@ -592,7 +591,7 @@ func TestTxConnMultiGoSessions(t *testing.T) { }, }} err := txc.runSessions(input, func(s *vtgatepb.Session_ShardSession) error { - return vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("err %s", s.Target.Keyspace)) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "err %s", s.Target.Keyspace) }) want := "err 0" if err == nil || err.Error() != want { @@ -609,7 +608,7 @@ func TestTxConnMultiGoSessions(t *testing.T) { }, }} err = txc.runSessions(input, func(s *vtgatepb.Session_ShardSession) error { - return vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("err %s", s.Target.Keyspace)) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "err %s", s.Target.Keyspace) }) want = "err 0\nerr 1" if err == nil || err.Error() != want { @@ -634,7 +633,7 @@ func TestTxConnMultiGoTargets(t *testing.T) { Keyspace: "0", }} err := txc.runTargets(input, func(t *querypb.Target) error { - return vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("err %s", t.Keyspace)) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "err %s", t.Keyspace) }) want := "err 0" if err == nil || err.Error() != want { @@ -647,7 +646,7 @@ func TestTxConnMultiGoTargets(t *testing.T) { Keyspace: "1", }} err = txc.runTargets(input, func(t *querypb.Target) error { - return vterrors.FromError(vtrpcpb.Code_INTERNAL, fmt.Errorf("err %s", t.Keyspace)) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "err %s", t.Keyspace) }) want = "err 0\nerr 1" if err == nil || err.Error() != want { diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index 29d0bccba92..884a8abbf30 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -7,7 +7,6 @@ package vtgate import ( - "errors" "flag" "fmt" "math" @@ -628,7 +627,7 @@ func (vtg *VTGate) StreamExecuteShards(ctx context.Context, sql string, bindVari // Begin begins a transaction. It has to be concluded by a Commit or Rollback. func (vtg *VTGate) Begin(ctx context.Context, singledb bool) (*vtgatepb.Session, error) { if !singledb && vtg.transactionMode == TxSingle { - return nil, vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, errors.New("multi-db transaction disallowed")) + return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "multi-db transaction disallowed") } return &vtgatepb.Session{ InTransaction: true, @@ -641,7 +640,7 @@ func (vtg *VTGate) Commit(ctx context.Context, twopc bool, session *vtgatepb.Ses if twopc && vtg.transactionMode != TxTwoPC { // Rollback the transaction to prevent future deadlocks. vtg.txConn.Rollback(ctx, NewSafeSession(session)) - return vterrors.FromError(vtrpcpb.Code_INVALID_ARGUMENT, errors.New("2pc transaction disallowed")) + return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "2pc transaction disallowed") } return formatError(vtg.txConn.Commit(ctx, twopc, NewSafeSession(session))) } @@ -898,18 +897,14 @@ func recordAndAnnotateError(err error, statsKey []string, request map[string]int case vtrpcpb.Code_UNKNOWN, vtrpcpb.Code_INTERNAL, vtrpcpb.Code_DATA_LOSS: logger.Errorf("%v, request: %+v", err, request) } - - // Suffix the error with our address. - s := fmt.Sprintf(", vtgate: %v", servenv.ListeningURL.String()) - return vterrors.WithSuffix(err, s) + return vterrors.Errorf(vterrors.Code(err), "vtgate: %s: %v", servenv.ListeningURL.String(), err) } func formatError(err error) error { if err == nil { return nil } - s := fmt.Sprintf(", vtgate: %v", servenv.ListeningURL.String()) - return vterrors.WithSuffix(err, s) + return vterrors.Errorf(vterrors.Code(err), "vtgate: %s: %v", servenv.ListeningURL.String(), err) } // HandlePanic recovers from panics, and logs / increment counters diff --git a/go/vt/vtgate/vtgate_test.go b/go/vt/vtgate/vtgate_test.go index 11dccd54b69..f477fa91be6 100644 --- a/go/vt/vtgate/vtgate_test.go +++ b/go/vt/vtgate/vtgate_test.go @@ -286,7 +286,7 @@ func TestVTGateExecuteWithKeyspace(t *testing.T) { nil, false, nil) - want := "keyspace aa not found in vschema, vtgate: " + want := "vtgate: : keyspace aa not found in vschema" if err == nil || err.Error() != want { t.Errorf("Execute: %v, want %s", err, want) } diff --git a/go/vt/vtgate/vtgateconntest/client.go b/go/vt/vtgate/vtgateconntest/client.go index f161744231f..d7103844f8c 100644 --- a/go/vt/vtgate/vtgateconntest/client.go +++ b/go/vt/vtgate/vtgateconntest/client.go @@ -44,7 +44,7 @@ type fakeVTGateService struct { const expectedErrMatch string = "test vtgate error" const expectedCode vtrpcpb.Code = vtrpcpb.Code_INVALID_ARGUMENT -var errTestVtGateError = vterrors.FromError(expectedCode, errors.New(expectedErrMatch)) +var errTestVtGateError = vterrors.New(expectedCode, expectedErrMatch) func newContext() context.Context { ctx := context.Background() @@ -989,10 +989,6 @@ func verifyError(t *testing.T, err error, method string) { if code != expectedCode { t.Errorf("Unexpected error code from %s: got %v, wanted %v", method, code, expectedCode) } - // verify error type - if _, ok := err.(*vterrors.VitessError); !ok { - t.Errorf("Unexpected error type from %s: got %v, wanted *vterrors.VitessError", method, reflect.TypeOf(err)) - } verifyErrorString(t, err, method) } diff --git a/go/vt/worker/command.go b/go/vt/worker/command.go index 222e4a9b455..981d1066042 100644 --- a/go/vt/worker/command.go +++ b/go/vt/worker/command.go @@ -126,7 +126,7 @@ func (wi *Instance) RunCommand(ctx context.Context, args []string, wr *wrangler. } done, err := wi.setAndStartWorker(ctx, wrk, wr) if err != nil { - return nil, nil, vterrors.WithPrefix("cannot set worker: ", err) + return nil, nil, vterrors.Errorf(vterrors.Code(err), "cannot set worker: %v", err) } return wrk, done, nil } diff --git a/go/vt/worker/grpcvtworkerclient/client.go b/go/vt/worker/grpcvtworkerclient/client.go index e8a680c23bb..afbc5b8cd79 100644 --- a/go/vt/worker/grpcvtworkerclient/client.go +++ b/go/vt/worker/grpcvtworkerclient/client.go @@ -43,7 +43,7 @@ func gRPCVtworkerClientFactory(addr string, dialTimeout time.Duration) (vtworker } cc, err := grpc.Dial(addr, opt, grpc.WithBlock(), grpc.WithTimeout(dialTimeout)) if err != nil { - return nil, vterrors.NewVitessError(vtrpcpb.Code_DEADLINE_EXCEEDED, err, "grpc.Dial() err: %v", err) + return nil, vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "grpc.Dial() err: %v", err) } c := vtworkerservicepb.NewVtworkerClient(cc) @@ -60,7 +60,7 @@ type eventStreamAdapter struct { func (e *eventStreamAdapter) Recv() (*logutilpb.Event, error) { le, err := e.stream.Recv() if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return le.Event, nil } @@ -73,7 +73,7 @@ func (client *gRPCVtworkerClient) ExecuteVtworkerCommand(ctx context.Context, ar stream, err := client.c.ExecuteVtworkerCommand(ctx, query) if err != nil { - return nil, vterrors.FromGRPCError(err) + return nil, vterrors.FromGRPC(err) } return &eventStreamAdapter{stream}, nil } diff --git a/go/vt/worker/grpcvtworkerserver/server.go b/go/vt/worker/grpcvtworkerserver/server.go index 5206c45d592..9331564f299 100644 --- a/go/vt/worker/grpcvtworkerserver/server.go +++ b/go/vt/worker/grpcvtworkerserver/server.go @@ -57,7 +57,7 @@ func (s *VtworkerServer) ExecuteVtworkerCommand(args *vtworkerdatapb.ExecuteVtwo err = s.wi.WaitForCommand(worker, done) } - return vterrors.ToGRPCError(err) + return vterrors.ToGRPC(err) } // StartServer registers the VtworkerServer for RPCs diff --git a/go/vt/worker/instance.go b/go/vt/worker/instance.go index 3054735791b..1c5af73af23 100644 --- a/go/vt/worker/instance.go +++ b/go/vt/worker/instance.go @@ -75,8 +75,7 @@ func (wi *Instance) setAndStartWorker(ctx context.Context, wrk Worker, wr *wrang defer wi.currentWorkerMutex.Unlock() if wi.currentContext != nil { - return nil, vterrors.FromError(vtrpcpb.Code_UNAVAILABLE, - fmt.Errorf("A worker job is already in progress: %v", wi.currentWorker.StatusAsText())) + return nil, vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "A worker job is already in progress: %v", wi.currentWorker.StatusAsText()) } if wi.currentWorker != nil { @@ -84,17 +83,14 @@ func (wi *Instance) setAndStartWorker(ctx context.Context, wrk Worker, wr *wrang const gracePeriod = 1 * time.Minute gracePeriodEnd := time.Now().Add(gracePeriod) if wi.lastRunStopTime.Before(gracePeriodEnd) { - return nil, vterrors.FromError(vtrpcpb.Code_UNAVAILABLE, - fmt.Errorf("A worker job was recently stopped (%f seconds ago): %v", - time.Now().Sub(wi.lastRunStopTime).Seconds(), - wi.currentWorker)) + return nil, vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "A worker job was recently stopped (%f seconds ago): %v", time.Now().Sub(wi.lastRunStopTime).Seconds(), wi.currentWorker) } // QUERY_NOT_SERVED = FailedPrecondition => manual resolution required. - return nil, vterrors.FromError(vtrpcpb.Code_FAILED_PRECONDITION, - fmt.Errorf("The worker job was stopped %.1f minutes ago, but not reset. You have to reset it manually. Job: %v", - time.Now().Sub(wi.lastRunStopTime).Minutes(), - wi.currentWorker)) + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, + "The worker job was stopped %.1f minutes ago, but not reset. You have to reset it manually. Job: %v", + time.Now().Sub(wi.lastRunStopTime).Minutes(), + wi.currentWorker) } wi.currentWorker = wrk @@ -141,7 +137,7 @@ func (wi *Instance) setAndStartWorker(ctx context.Context, wrk Worker, wr *wrang case <-wi.currentContext.Done(): // Context is done i.e. probably canceled. if wi.currentContext.Err() == context.Canceled { - err = vterrors.NewVitessError(vtrpcpb.Code_CANCELED, err, "vtworker command was canceled: %v", err) + err = vterrors.Errorf(vtrpcpb.Code_CANCELED, "vtworker command was canceled: %v", err) } default: } diff --git a/go/vt/worker/vtworkerclient/wrapper.go b/go/vt/worker/vtworkerclient/wrapper.go index f7f13990c57..963f787f417 100644 --- a/go/vt/worker/vtworkerclient/wrapper.go +++ b/go/vt/worker/vtworkerclient/wrapper.go @@ -25,14 +25,14 @@ func RunCommandAndWait(ctx context.Context, server string, args []string, recv f // TODO(mberlin): vtctlclient exposes dialTimeout as flag. If there are no use cases, remove it there as well to be consistent? client, err := New(server, 30*time.Second /* dialTimeout */) if err != nil { - return vterrors.WithPrefix("cannot dial to server "+server+": ", err) + return vterrors.Errorf(vterrors.Code(err), "cannot dial to server "+server+": %v", err) } defer client.Close() // run the command stream, err := client.ExecuteVtworkerCommand(ctx, args) if err != nil { - return vterrors.WithPrefix("cannot execute remote command: ", err) + return vterrors.Errorf(vterrors.Code(err), "cannot execute remote command: %v", err) } for { @@ -43,7 +43,7 @@ func RunCommandAndWait(ctx context.Context, server string, args []string, recv f case io.EOF: return nil default: - return vterrors.WithPrefix("stream error: ", err) + return vterrors.Errorf(vterrors.Code(err), "stream error: %v", err) } } } diff --git a/go/vt/worker/vtworkerclienttest/client_testsuite.go b/go/vt/worker/vtworkerclienttest/client_testsuite.go index 769d3ed4fab..ffbf9d662f7 100644 --- a/go/vt/worker/vtworkerclienttest/client_testsuite.go +++ b/go/vt/worker/vtworkerclienttest/client_testsuite.go @@ -101,7 +101,7 @@ func runVtworkerCommand(client vtworkerclient.Client, args []string) error { case io.EOF: return nil default: - return vterrors.WithPrefix("unexpected error when reading the stream: ", err) + return vterrors.Errorf(vterrors.Code(err), "unexpected error when reading the stream: %v", err) } } } diff --git a/proto/vtrpc.proto b/proto/vtrpc.proto index ca516f20507..d8ceb548955 100644 --- a/proto/vtrpc.proto +++ b/proto/vtrpc.proto @@ -33,7 +33,7 @@ message CallerID { string subcomponent = 3; } -// Code represnts canonical error codes. The names, numbers and comments +// Code represents canonical error codes. The names, numbers and comments // must match the ones defined by grpc: // https://godoc.org/google.golang.org/grpc/codes. enum Code { @@ -159,7 +159,7 @@ enum Code { // that we may end up with a different list of canonical error codes // than the ones defined by grpc. In hindisght, we realize that // the grpc error codes are fairly generic and mostly sufficient. -// In order to avoid confusion, thie type will be deprecated in +// In order to avoid confusion, this type will be deprecated in // favor of the new Code that matches exactly what grpc defines. // Some names below have a _LEGACY suffix. This is to prevent // name collisions with Code. diff --git a/py/vtdb/grpc_vtgate_client.py b/py/vtdb/grpc_vtgate_client.py index 7037c9e19d2..7c7ac23fa80 100644 --- a/py/vtdb/grpc_vtgate_client.py +++ b/py/vtdb/grpc_vtgate_client.py @@ -334,7 +334,7 @@ def _convert_exception(exc, *args, **kwargs): if vtgate_utils.throttler_err_re.search(details): return dbexceptions.ThrottledError(new_args) else: - return dbexceptions.TransientError(new_args) + return dbexceptions.TransientError(details, new_args) elif code == grpc.StatusCode.ALREADY_EXISTS: new_exc = _prune_integrity_error(details, new_args) elif code == grpc.StatusCode.FAILED_PRECONDITION: From 6410c4422935c21de7be9dde77d4fdf5f59cacfe Mon Sep 17 00:00:00 2001 From: dengchao12 Date: Thu, 23 Feb 2017 09:57:58 +0800 Subject: [PATCH 021/108] Cipher must be a legal UTF8 string. --- go/mysqlconn/server.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/go/mysqlconn/server.go b/go/mysqlconn/server.go index 72098b062ba..93e76fffaff 100644 --- a/go/mysqlconn/server.go +++ b/go/mysqlconn/server.go @@ -268,6 +268,14 @@ func (c *Conn) writeHandshakeV10(serverVersion string) ([]byte, error) { if _, err := rand.Read(cipher); err != nil { return nil, err } + + // Cipher must be a legal UTF8 string. + for i := 0; i < len(cipher); i++ { + cipher[i] &= 0x7f + if cipher[i] == '\x00' || cipher[i] == '$' { + cipher[i] += 1 + } + } pos += copy(data[pos:], cipher[:8]) // One filler byte, always 0. From b1b0a9a887fb610e66b447f775766c53a3496a1d Mon Sep 17 00:00:00 2001 From: Yipei Wang Date: Wed, 22 Feb 2017 18:56:29 -0800 Subject: [PATCH 022/108] workflow: clean the design for parallelrunner and horizontal resharding workflow. First round comments resolved. (addressing race condition warnings) --- go/vt/workflow/manager.go | 2 + go/vt/workflow/node.go | 32 +- go/vt/workflow/resharding/checkpoint.go | 4 - .../horizontal_resharding_workflow.go | 402 +++++++++--------- .../horizontal_resharding_workflow_test.go | 65 +-- go/vt/workflow/resharding/parallel_runner.go | 133 +++--- .../resharding/parallel_runner_test.go | 42 +- .../workflow/resharding/{task.go => tasks.go} | 75 ++-- go/vt/workflow/resharding/test_workflow.go | 181 ++++++++ 9 files changed, 550 insertions(+), 386 deletions(-) rename go/vt/workflow/resharding/{task.go => tasks.go} (56%) create mode 100644 go/vt/workflow/resharding/test_workflow.go diff --git a/go/vt/workflow/manager.go b/go/vt/workflow/manager.go index 1d48516912c..0910894192b 100644 --- a/go/vt/workflow/manager.go +++ b/go/vt/workflow/manager.go @@ -39,6 +39,8 @@ type Factory interface { // variable filled it. This Init method should fill in the // Name and Data attributes, based on the provided args. // This is called during the Manager.Create phase. + // TODO(yipeiw): We should extend the interface to pass the topology server + // as well. The topology server is needed in the resarding workflow. Init(w *workflowpb.Workflow, args []string) error // Instantiate loads a workflow from the proto representation diff --git a/go/vt/workflow/node.go b/go/vt/workflow/node.go index 01cb3c4a358..11aaa3bf951 100644 --- a/go/vt/workflow/node.go +++ b/go/vt/workflow/node.go @@ -191,13 +191,13 @@ func (n *Node) deepCopyFrom(otherNode *Node, copyChildren bool) error { *n = *otherNode n.Children = oldChildren - /*n.Actions = []*Action{} + n.Actions = []*Action{} for _, otherAction := range otherNode.Actions { action := &Action{} *action = *otherAction n.Actions = append(n.Actions, action) } - */ + if !copyChildren { return nil } @@ -220,6 +220,30 @@ func (n *Node) deepCopyFrom(otherNode *Node, copyChildren bool) error { return nil } +// GetChildByPath returns the child node given the relative path to this node. +// The caller must ensure that the node tree is not modified during the call. +func (n *Node) GetChildByPath(subPath string) (*Node, error) { + // Find the subnode if needed. + parts := strings.Split(subPath, "/") + + currentNode := n + for i := 0; i < len(parts); i++ { + childPathName := parts[i] + found := false + for _, child := range currentNode.Children { + if child.PathName == childPathName { + found = true + currentNode = child + break + } + } + if !found { + return nil, fmt.Errorf("node %v has no children named %v", currentNode.Path, childPathName) + } + } + return currentNode, nil +} + // ActionParameters describe an action initiated by the user. type ActionParameters struct { // Path is the path of the Node the action was performed on. @@ -397,10 +421,6 @@ func (m *NodeManager) Action(ctx context.Context, ap *ActionParameters) error { return n.Listener.Action(ctx, ap.Path, ap.Name) } -func (m *NodeManager) GetNodeByPath(nodePath string) (*Node, error) { - return m.getNodeByPath(nodePath) -} - func (m *NodeManager) getNodeByPath(nodePath string) (*Node, error) { m.mu.Lock() defer m.mu.Unlock() diff --git a/go/vt/workflow/resharding/checkpoint.go b/go/vt/workflow/resharding/checkpoint.go index 892030ccac3..7392675cb82 100644 --- a/go/vt/workflow/resharding/checkpoint.go +++ b/go/vt/workflow/resharding/checkpoint.go @@ -2,7 +2,6 @@ package resharding import ( "context" - "fmt" "sync" "github.com/golang/protobuf/proto" @@ -42,9 +41,6 @@ func (c *CheckpointWriter) UpdateTask(taskID string, status workflowpb.TaskState } t := c.checkpoint.Tasks[taskID] - - fmt.Printf("error message send to task %v: %v\n", t.Id, errorMessage) - t.State = status t.Error = errorMessage return c.saveLocked() diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow.go b/go/vt/workflow/resharding/horizontal_resharding_workflow.go index b7f66031456..61eb6ce18b6 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow.go @@ -3,7 +3,6 @@ package resharding // Package resharding contains a workflow for automatic horizontal resharding. // The workflow assumes that there are as many vtworker processes running as source shards. // Plus, these vtworker processes must be reachable via RPC. -// TO DO: it can be used to save checkpointer import ( "flag" @@ -32,6 +31,7 @@ const ( horizontalReshardingFactoryName = "horizontal_resharding" ) +// PhaseType is used to store the phase name in a workflow. type PhaseType string const ( @@ -44,110 +44,14 @@ const ( phaseMigrateMaster PhaseType = "migrate_master" ) -// HorizontalReshardingWorkflow contains meta-information and methods to -// control the horizontal resharding workflow. -type HorizontalReshardingWorkflow struct { - ctx context.Context - wr ReshardingWrangler - manager *workflow.Manager - topoServer topo.Server - wi *topo.WorkflowInfo - // logger is the logger we export UI logs from. - logger *logutil.MemoryLogger - - // rootUINode is the root node representing the workflow in the UI. - rootUINode *workflow.Node - copySchemaUINode *workflow.Node - cloneUINode *workflow.Node - waitForFilteredReplicationUINode *workflow.Node - diffUINode *workflow.Node - migrateRdonlyUINode *workflow.Node - migrateReplicaUINode *workflow.Node - migrateMasterUINode *workflow.Node - - checkpoint *workflowpb.WorkflowCheckpoint - checkpointWriter *CheckpointWriter -} - -// Run executes the horizontal resharding process. -// It implements the workflow.Workflow interface. -func (hw *HorizontalReshardingWorkflow) Run(ctx context.Context, manager *workflow.Manager, wi *topo.WorkflowInfo) error { - hw.ctx = ctx - hw.topoServer = manager.TopoServer() - hw.manager = manager - hw.wr = wrangler.New(logutil.NewConsoleLogger(), manager.TopoServer(), tmclient.NewTabletManagerClient()) - hw.wi = wi - hw.checkpointWriter = NewCheckpointWriter(hw.topoServer, hw.checkpoint, hw.wi) - - hw.rootUINode.Display = workflow.NodeDisplayDeterminate - hw.rootUINode.BroadcastChanges(true /* updateChildren */) - - if err := hw.runWorkflow(); err != nil { - return err - } - hw.setUIMessage(fmt.Sprintf("Horizontal Resharding is finished sucessfully.")) - return nil -} - -func (hw *HorizontalReshardingWorkflow) runWorkflow() error { - copySchemaTasks := hw.GetTasks(hw.checkpoint, phaseCopySchema) - copySchemaRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.copySchemaUINode, hw.checkpointWriter, copySchemaTasks, hw.runCopySchema, PARALLEL) - if err := copySchemaRunner.Run(); err != nil { - return err - } - - cloneTasks := hw.GetTasks(hw.checkpoint, phaseClone) - cloneRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.cloneUINode, hw.checkpointWriter, cloneTasks, hw.runSplitClone, PARALLEL) - if err := cloneRunner.Run(); err != nil { - return err - } - - waitForFilteredReplicationTasks := hw.GetTasks(hw.checkpoint, phaseWaitForFilteredReplication) - waitForFilteredReplicationRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.waitForFilteredReplicationUINode, hw.checkpointWriter, waitForFilteredReplicationTasks, hw.runWaitForFilteredReplication, PARALLEL) - if err := waitForFilteredReplicationRunner.Run(); err != nil { - return err - } - - diffTasks := hw.GetTasks(hw.checkpoint, phaseDiff) - diffRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.diffUINode, hw.checkpointWriter, diffTasks, hw.runSplitDiff, SEQUENTIAL) - if err := diffRunner.Run(); err != nil { - return err - } - - migrateRdonlyTasks := hw.GetTasks(hw.checkpoint, phaseMigrateRdonly) - migrateRdonlyRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.migrateRdonlyUINode, hw.checkpointWriter, migrateRdonlyTasks, hw.runMigrate, SEQUENTIAL) - if err := migrateRdonlyRunner.Run(); err != nil { - return err - } - - migrateReplicaTasks := hw.GetTasks(hw.checkpoint, phaseMigrateRdonly) - migrateReplicaRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.migrateReplicaUINode, hw.checkpointWriter, migrateReplicaTasks, hw.runMigrate, SEQUENTIAL) - if err := migrateReplicaRunner.Run(); err != nil { - return err - } - - migrateMasterTasks := hw.GetTasks(hw.checkpoint, phaseMigrateMaster) - migrateMasterRunner := NewParallelRunner(hw.ctx, hw.manager.NodeManager(), hw.migrateMasterUINode, hw.checkpointWriter, migrateMasterTasks, hw.runMigrate, SEQUENTIAL) - if err := migrateMasterRunner.Run(); err != nil { - return err - } - - return nil -} - -func (hw *HorizontalReshardingWorkflow) setUIMessage(message string) { - log.Infof("Horizontal resharding : %v.", message) - hw.rootUINode.Log = hw.logger.String() - hw.rootUINode.Message = message - hw.rootUINode.BroadcastChanges(false /* updateChildren */) -} - +// Register registers the HorizontalReshardingWorkflowFactory as a factory +// in the workflow framework. func Register() { workflow.Register(horizontalReshardingFactoryName, &HorizontalReshardingWorkflowFactory{}) } -// HorizontalReshardingWorkflowFactory is the factory to register -// the HorizontalReshardingWorkflow. +// HorizontalReshardingWorkflowFactory is the factory to create +// a horizontal resharding workflow. type HorizontalReshardingWorkflowFactory struct{} // Init is part of the workflow.Factory interface. @@ -166,9 +70,7 @@ func (*HorizontalReshardingWorkflowFactory) Init(w *workflowpb.Workflow, args [] vtworkers := strings.Split(*vtworkersStr, ",") w.Name = fmt.Sprintf("Horizontal resharding on keyspace %s", *keyspace) - ts := topo.Open() - defer ts.Close() - checkpoint, err := initCheckpoint(*keyspace, vtworkers, ts) + checkpoint, err := initCheckpoint(*keyspace, vtworkers) if err != nil { return err } @@ -192,179 +94,293 @@ func (*HorizontalReshardingWorkflowFactory) Instantiate(w *workflowpb.Workflow, hw := &HorizontalReshardingWorkflow{ checkpoint: checkpoint, rootUINode: rootNode, - copySchemaUINode: &workflow.Node{ - Name: "CopySchemaShard", - PathName: string(phaseCopySchema), - }, - cloneUINode: &workflow.Node{ - Name: "SplitClone", - PathName: string(phaseClone), - }, - waitForFilteredReplicationUINode: &workflow.Node{ - Name: "WaitForFilteredReplication", - PathName: string(phaseWaitForFilteredReplication), - }, - diffUINode: &workflow.Node{ - Name: "SplitDiff", - PathName: string(phaseDiff), - }, - migrateRdonlyUINode: &workflow.Node{ - Name: "MigrateServedTypeRDONLY", - PathName: string(phaseMigrateRdonly), - }, - migrateReplicaUINode: &workflow.Node{ - Name: "MigrateServedTypeREPLICA", - PathName: string(phaseMigrateReplica), - }, - migrateMasterUINode: &workflow.Node{ - Name: "MigrateServedTypeMASTER", - PathName: string(phaseMigrateMaster), - }, - logger: logutil.NewMemoryLogger(), + logger: logutil.NewMemoryLogger(), + } + copySchemaUINode := &workflow.Node{ + Name: "CopySchemaShard", + PathName: string(phaseCopySchema), + } + cloneUINode := &workflow.Node{ + Name: "SplitClone", + PathName: string(phaseClone), + } + waitForFilteredReplicationUINode := &workflow.Node{ + Name: "WaitForFilteredReplication", + PathName: string(phaseWaitForFilteredReplication), + } + diffUINode := &workflow.Node{ + Name: "SplitDiff", + PathName: string(phaseDiff), + } + migrateRdonlyUINode := &workflow.Node{ + Name: "MigrateServedTypeRDONLY", + PathName: string(phaseMigrateRdonly), + } + migrateReplicaUINode := &workflow.Node{ + Name: "MigrateServedTypeREPLICA", + PathName: string(phaseMigrateReplica), } + migrateMasterUINode := &workflow.Node{ + Name: "MigrateServedTypeMASTER", + PathName: string(phaseMigrateMaster), + } + hw.rootUINode.Children = []*workflow.Node{ - hw.copySchemaUINode, - hw.cloneUINode, - hw.waitForFilteredReplicationUINode, - hw.diffUINode, - hw.migrateRdonlyUINode, - hw.migrateReplicaUINode, - hw.migrateMasterUINode, + copySchemaUINode, + cloneUINode, + waitForFilteredReplicationUINode, + diffUINode, + migrateRdonlyUINode, + migrateReplicaUINode, + migrateMasterUINode, } - destinationShards := strings.Split(hw.checkpoint.Settings["destination_shards"], ",") sourceShards := strings.Split(hw.checkpoint.Settings["source_shards"], ",") + destinationShards := strings.Split(hw.checkpoint.Settings["destination_shards"], ",") - createUINodes(phaseCopySchema, destinationShards, hw.copySchemaUINode) - createUINodes(phaseClone, sourceShards, hw.cloneUINode) - createUINodes(phaseWaitForFilteredReplication, destinationShards, hw.waitForFilteredReplicationUINode) - createUINodes(phaseDiff, destinationShards, hw.diffUINode) - createUINodes(phaseMigrateRdonly, sourceShards, hw.migrateRdonlyUINode) - createUINodes(phaseMigrateReplica, sourceShards, hw.migrateReplicaUINode) - createUINodes(phaseMigrateMaster, sourceShards, hw.migrateMasterUINode) + if err := createUINodes(hw.rootUINode, phaseCopySchema, destinationShards); err != nil { + return hw, err + } + if err := createUINodes(hw.rootUINode, phaseClone, sourceShards); err != nil { + return hw, err + } + if err := createUINodes(hw.rootUINode, phaseWaitForFilteredReplication, destinationShards); err != nil { + return hw, err + } + if err := createUINodes(hw.rootUINode, phaseDiff, destinationShards); err != nil { + return hw, err + } + if err := createUINodes(hw.rootUINode, phaseMigrateRdonly, sourceShards); err != nil { + return hw, err + } + if err := createUINodes(hw.rootUINode, phaseMigrateReplica, sourceShards); err != nil { + return hw, err + } + if err := createUINodes(hw.rootUINode, phaseMigrateMaster, sourceShards); err != nil { + return hw, err + } return hw, nil } -func createUINodes(phaseName PhaseType, shards []string, rootNode *workflow.Node) { +func createUINodes(rootNode *workflow.Node, phaseName PhaseType, shards []string) error { + phaseNode, err := rootNode.GetChildByPath(string(phaseName)) + if err != nil { + return fmt.Errorf("fails to find phase node for: %v", phaseName) + } + for _, shard := range shards { - taskID := createTaskID(phaseName, shard) taskUINode := &workflow.Node{ Name: "Shard " + shard, - PathName: taskID, + PathName: shard, } - rootNode.Children = append(rootNode.Children, taskUINode) + phaseNode.Children = append(phaseNode.Children, taskUINode) } + return nil } // initCheckpoint initialize the checkpoint for the horizontal workflow. -func initCheckpoint(keyspace string, vtworkers []string, ts topo.Server) (*workflowpb.WorkflowCheckpoint, error) { - sourceShardList, destinationShardList, err := findSourceAndDestinationShards(ts, keyspace) +func initCheckpoint(keyspace string, vtworkers []string) (*workflowpb.WorkflowCheckpoint, error) { + sourceShards, destinationShards, err := findSourceAndDestinationShards(keyspace) if err != nil { return nil, err } - return initCheckpointFromShards(keyspace, vtworkers, sourceShardList, destinationShardList) + return initCheckpointFromShards(keyspace, vtworkers, sourceShards, destinationShards) } -func findSourceAndDestinationShards(ts topo.Server, keyspace string) ([]string, []string, error) { +func findSourceAndDestinationShards(keyspace string) ([]string, []string, error) { + ts := topo.Open() + defer ts.Close() + overlappingShards, err := topotools.FindOverlappingShards(context.Background(), ts, keyspace) if err != nil { return nil, nil, err } - var sourceShardList, destinationShardList []string + var sourceShards, destinationShards []string for _, os := range overlappingShards { - var sourceShard *topo.ShardInfo - var destinationShards []*topo.ShardInfo + var sourceShardInfo *topo.ShardInfo + var destinationShardInfos []*topo.ShardInfo // Judge which side is source shard by checking the number of servedTypes. if len(os.Left[0].ServedTypes) > 0 { - sourceShard = os.Left[0] - destinationShards = os.Right + sourceShardInfo = os.Left[0] + destinationShardInfos = os.Right } else { - sourceShard = os.Right[0] - destinationShards = os.Left + sourceShardInfo = os.Right[0] + destinationShardInfos = os.Left } - sourceShardList = append(sourceShardList, sourceShard.ShardName()) - for _, d := range destinationShards { - destinationShardList = append(destinationShardList, d.ShardName()) + sourceShards = append(sourceShards, sourceShardInfo.ShardName()) + for _, d := range destinationShardInfos { + destinationShards = append(destinationShards, d.ShardName()) } } - return sourceShardList, destinationShardList, nil + return sourceShards, destinationShards, nil } -func initCheckpointFromShards(keyspace string, vtworkers, sourceShardList, destinationShardList []string) (*workflowpb.WorkflowCheckpoint, error) { - taskMap := make(map[string]*workflowpb.Task) - initTasks(phaseCopySchema, destinationShardList, taskMap, func(i int, shard string) map[string]string { +func initCheckpointFromShards(keyspace string, vtworkers, sourceShards, destinationShards []string) (*workflowpb.WorkflowCheckpoint, error) { + if len(vtworkers) != len(sourceShards) { + return nil, fmt.Errorf("there are %v vtworkers, %v source shards: the number should be same", len(vtworkers), len(sourceShards)) + } + + tasks := make(map[string]*workflowpb.Task) + initTasks(tasks, phaseCopySchema, destinationShards, func(i int, shard string) map[string]string { return map[string]string{ - "source_shard": sourceShardList[0], - "destination_shard": shard, "keyspace": keyspace, + "source_shard": sourceShards[0], + "destination_shard": shard, } }) - - initTasks(phaseClone, sourceShardList, taskMap, func(i int, shard string) map[string]string { + initTasks(tasks, phaseClone, sourceShards, func(i int, shard string) map[string]string { return map[string]string{ + "keyspace": keyspace, "source_shard": shard, "vtworker": vtworkers[i], - "keyspace": keyspace, } }) - - initTasks(phaseWaitForFilteredReplication, destinationShardList, taskMap, func(i int, shard string) map[string]string { + initTasks(tasks, phaseWaitForFilteredReplication, destinationShards, func(i int, shard string) map[string]string { return map[string]string{ - "destination_shard": shard, "keyspace": keyspace, + "destination_shard": shard, } }) - - initTasks(phaseDiff, destinationShardList, taskMap, func(i int, shard string) map[string]string { + initTasks(tasks, phaseDiff, destinationShards, func(i int, shard string) map[string]string { return map[string]string{ - "destination_shard": shard, "keyspace": keyspace, + "destination_shard": shard, "vtworker": vtworkers[0], } }) - - initTasks(phaseMigrateRdonly, sourceShardList, taskMap, func(i int, shard string) map[string]string { + initTasks(tasks, phaseMigrateRdonly, sourceShards, func(i int, shard string) map[string]string { return map[string]string{ - "source_shard": shard, "keyspace": keyspace, + "source_shard": shard, "served_type": topodatapb.TabletType_RDONLY.String(), } }) - initTasks(phaseMigrateReplica, sourceShardList, taskMap, func(i int, shard string) map[string]string { + initTasks(tasks, phaseMigrateReplica, sourceShards, func(i int, shard string) map[string]string { return map[string]string{ - "source_shard": shard, "keyspace": keyspace, + "source_shard": shard, "served_type": topodatapb.TabletType_REPLICA.String(), } }) - initTasks(phaseMigrateMaster, sourceShardList, taskMap, func(i int, shard string) map[string]string { + initTasks(tasks, phaseMigrateMaster, sourceShards, func(i int, shard string) map[string]string { return map[string]string{ - "source_shard": shard, "keyspace": keyspace, + "source_shard": shard, "served_type": topodatapb.TabletType_MASTER.String(), } }) return &workflowpb.WorkflowCheckpoint{ CodeVersion: codeVersion, - Tasks: taskMap, + Tasks: tasks, Settings: map[string]string{ - "source_shards": strings.Join(sourceShardList, ","), - "destination_shards": strings.Join(destinationShardList, ","), + "source_shards": strings.Join(sourceShards, ","), + "destination_shards": strings.Join(destinationShards, ","), }, }, nil } -func initTasks(phase PhaseType, shards []string, taskMap map[string]*workflowpb.Task, getAttributes func(int, string) map[string]string) { +func initTasks(tasks map[string]*workflowpb.Task, phase PhaseType, shards []string, getAttributes func(int, string) map[string]string) { for i, shard := range shards { taskID := createTaskID(phase, shard) - taskMap[taskID] = &workflowpb.Task{ + tasks[taskID] = &workflowpb.Task{ Id: taskID, State: workflowpb.TaskState_TaskNotStarted, Attributes: getAttributes(i, shard), } } } + +// HorizontalReshardingWorkflow contains meta-information and methods to +// control the horizontal resharding workflow. +type HorizontalReshardingWorkflow struct { + ctx context.Context + wr ReshardingWrangler + manager *workflow.Manager + topoServer topo.Server + wi *topo.WorkflowInfo + // logger is the logger we export UI logs from. + logger *logutil.MemoryLogger + + // rootUINode is the root node representing the workflow in the UI. + rootUINode *workflow.Node + + checkpoint *workflowpb.WorkflowCheckpoint + checkpointWriter *CheckpointWriter +} + +// Run executes the horizontal resharding process. +// It implements the workflow.Workflow interface. +func (hw *HorizontalReshardingWorkflow) Run(ctx context.Context, manager *workflow.Manager, wi *topo.WorkflowInfo) error { + hw.ctx = ctx + hw.topoServer = manager.TopoServer() + hw.manager = manager + hw.wr = wrangler.New(logutil.NewConsoleLogger(), manager.TopoServer(), tmclient.NewTabletManagerClient()) + hw.wi = wi + hw.checkpointWriter = NewCheckpointWriter(hw.topoServer, hw.checkpoint, hw.wi) + + hw.rootUINode.Display = workflow.NodeDisplayDeterminate + hw.rootUINode.BroadcastChanges(true /* updateChildren */) + + if err := hw.runWorkflow(); err != nil { + return err + } + hw.setUIMessage(fmt.Sprintf("Horizontal Resharding is finished sucessfully.")) + return nil +} + +func (hw *HorizontalReshardingWorkflow) runWorkflow() error { + copySchemaTasks := hw.GetTasks(phaseCopySchema) + copySchemaRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, copySchemaTasks, hw.runCopySchema, Parallel) + if err := copySchemaRunner.Run(); err != nil { + return err + } + + cloneTasks := hw.GetTasks(phaseClone) + cloneRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, cloneTasks, hw.runSplitClone, Parallel) + if err := cloneRunner.Run(); err != nil { + return err + } + + waitForFilteredReplicationTasks := hw.GetTasks(phaseWaitForFilteredReplication) + waitForFilteredReplicationRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, waitForFilteredReplicationTasks, hw.runWaitForFilteredReplication, Parallel) + if err := waitForFilteredReplicationRunner.Run(); err != nil { + return err + } + + diffTasks := hw.GetTasks(phaseDiff) + diffRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, diffTasks, hw.runSplitDiff, Sequential) + if err := diffRunner.Run(); err != nil { + return err + } + + migrateRdonlyTasks := hw.GetTasks(phaseMigrateRdonly) + migrateRdonlyRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, migrateRdonlyTasks, hw.runMigrate, Sequential) + if err := migrateRdonlyRunner.Run(); err != nil { + return err + } + + migrateReplicaTasks := hw.GetTasks(phaseMigrateReplica) + migrateReplicaRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, migrateReplicaTasks, hw.runMigrate, Sequential) + if err := migrateReplicaRunner.Run(); err != nil { + return err + } + + migrateMasterTasks := hw.GetTasks(phaseMigrateMaster) + migrateMasterRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, migrateMasterTasks, hw.runMigrate, Sequential) + if err := migrateMasterRunner.Run(); err != nil { + return err + } + + return nil +} + +func (hw *HorizontalReshardingWorkflow) setUIMessage(message string) { + log.Infof("Horizontal resharding : %v.", message) + hw.rootUINode.Log = hw.logger.String() + hw.rootUINode.Message = message + hw.rootUINode.BroadcastChanges(false /* updateChildren */) +} diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go index 17fa0d525e5..fe5096e737d 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go @@ -39,28 +39,10 @@ func TestHorizontalResharding(t *testing.T) { t.Errorf("initialize checkpoint fails: %v", err) } - // Create the workflow. - ts := memorytopo.NewServer("cell") - w := &workflowpb.Workflow{ - Uuid: "test_hw", - FactoryName: horizontalReshardingFactoryName, - State: workflowpb.WorkflowState_NotStarted, - } - wi, err := ts.CreateWorkflow(ctx, w) + hw, err := createWorkflow(ctx, mockWranglerInterface, checkpoint) if err != nil { - t.Errorf("initialize WorkflowInfo fails: %v", err) + t.Errorf("initialize Workflow fails: %v", err) } - hw := &HorizontalReshardingWorkflow{ - ctx: ctx, - wr: mockWranglerInterface, - manager: workflow.NewManager(ts), - wi: wi, - topoServer: ts, - logger: logutil.NewMemoryLogger(), - checkpoint: checkpoint, - checkpointWriter: NewCheckpointWriter(ts, checkpoint, wi), - } - if err := hw.runWorkflow(); err != nil { t.Errorf("%s: Horizontal resharding workflow should not fail", err) } @@ -75,7 +57,7 @@ func TestHorizontalReshardingRetry(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() ctx := context.Background() - mockWranglerInterface := setupMockWrangler(ctx, ctrl) + mockWranglerInterface := setupMockWranglerForRetry(ctx, ctrl) // Set up fakeworkerclient. It is used at SplitClone and SplitDiff phase. fakeVtworkerClient := setupFakeVtworker() @@ -92,19 +74,13 @@ func TestHorizontalReshardingRetry(t *testing.T) { if err != nil { t.Errorf("initialize checkpoint fails: %v", err) } - setTaskSuccessOrFailure(checkpoint, createTaskID(PhaseCopySchema, "80-"), true /* isSuccess*/) - setTaskSuccessOrFailure(checkpoint, createTaskID(PhaseCopySchema, "-80"), false /* isSuccess*/) + setTaskSuccessOrFailure(checkpoint, createTaskID(phaseCopySchema, "80-"), true /* isSuccess*/) + setTaskSuccessOrFailure(checkpoint, createTaskID(phaseCopySchema, "-80"), false /* isSuccess*/) - // Create the workflow. - ts := memorytopo.NewServer("cell") - hw := &HorizontalReshardingWorkflow{ - ctx: ctx, - wr: mockWranglerInterface, - topoServer: ts, - logger: logutil.NewMemoryLogger(), - checkpoint: checkpoint, + hw, err := createWorkflow(ctx, mockWranglerInterface, checkpoint) + if err != nil { + t.Errorf("initialize Workflow fails: %v", err) } - // Rerunning the workflow. if err := hw.runWorkflow(); err != nil { t.Errorf("%s: Horizontal resharding workflow should not fail", err) @@ -123,6 +99,30 @@ func setTaskSuccessOrFailure(checkpoint *workflowpb.WorkflowCheckpoint, taskID s } } +func createWorkflow(ctx context.Context, mockWranglerInterface *MockReshardingWrangler, checkpoint *workflowpb.WorkflowCheckpoint) (*HorizontalReshardingWorkflow, error) { + ts := memorytopo.NewServer("cell") + w := &workflowpb.Workflow{ + Uuid: "test_hw", + FactoryName: horizontalReshardingFactoryName, + State: workflowpb.WorkflowState_NotStarted, + } + wi, err := ts.CreateWorkflow(ctx, w) + if err != nil { + return nil, err + } + hw := &HorizontalReshardingWorkflow{ + ctx: ctx, + wr: mockWranglerInterface, + manager: workflow.NewManager(ts), + wi: wi, + topoServer: ts, + logger: logutil.NewMemoryLogger(), + checkpoint: checkpoint, + checkpointWriter: NewCheckpointWriter(ts, checkpoint, wi), + } + return hw, nil +} + func setupFakeVtworker() *fakevtworkerclient.FakeVtworkerClient { flag.Set("vtworker_client_protocol", "fake") fakeVtworkerClient := fakevtworkerclient.NewFakeVtworkerClient() @@ -211,6 +211,7 @@ func setupMockWrangler(ctx context.Context, ctrl *gomock.Controller) *MockReshar } return mockWranglerInterface } + func verifySuccess(t *testing.T, checkpoint *workflowpb.WorkflowCheckpoint) { for _, task := range checkpoint.Tasks { if task.State != workflowpb.TaskState_TaskDone || task.Error != "" { diff --git a/go/vt/workflow/resharding/parallel_runner.go b/go/vt/workflow/resharding/parallel_runner.go index a6a8e7a5f4d..ac7e504d375 100644 --- a/go/vt/workflow/resharding/parallel_runner.go +++ b/go/vt/workflow/resharding/parallel_runner.go @@ -2,7 +2,6 @@ package resharding import ( "fmt" - "path" "sync" log "github.com/golang/glog" @@ -16,46 +15,48 @@ import ( type level int const ( - // SEQUENTIAL means that the tasks will run sequentially. - SEQUENTIAL level = iota - //PARALLEL means that the tasks will run in parallel. - PARALLEL + // Sequential means that the tasks will run sequentially. + Sequential level = iota + //Parallel means that the tasks will run in parallel. + Parallel ) // ParallelRunner is used to control executing tasks concurrently. // Each phase has its own ParallelRunner object. type ParallelRunner struct { ctx context.Context - nodeManager *workflow.NodeManager - phaseUINode *workflow.Node + rootUINode *workflow.Node checkpointWriter *CheckpointWriter // tasks stores selected tasks for the phase with expected execution order. tasks []*workflowpb.Task concurrencyLevel level executeFunc func(context.Context, *workflowpb.Task) error - // mu is used to protect the retryActionRegistery. + + // mu is used to protect the retryActionRegistry. mu sync.Mutex // retryAtionRegistry stores the data for retry actions. // Each task can retrieve its RetryController through its UI node path. retryActionRegistry map[string]*RetryController - // reportTaskStatus gives the worklfow debug option to output the task status through UI. + + // reportTaskStatus gives the worklflow debug option to output the task + // status through UI. + // TODO(yipeiw): We will remove this option and make it always report task + // status, once we can unit test resharding workflow through manager + // (we ignore creating UI nodes when manually creating the workflow now). reportTaskStatus bool - // taskFinished stores the channels for synchroizing the finish of tasks. - taskFinished map[string]chan struct{} } -func NewParallelRunner(ctx context.Context, nodeManager *workflow.NodeManager, phaseUINode *workflow.Node, cp *CheckpointWriter, tasks []*workflowpb.Task, executeFunc func(context.Context, *workflowpb.Task) error, concurrencyLevel level) *ParallelRunner { +// NewParallelRunner returns a new ParallelRunner. +func NewParallelRunner(ctx context.Context, rootUINode *workflow.Node, cp *CheckpointWriter, tasks []*workflowpb.Task, executeFunc func(context.Context, *workflowpb.Task) error, concurrencyLevel level) *ParallelRunner { return &ParallelRunner{ ctx: ctx, - nodeManager: nodeManager, - phaseUINode: phaseUINode, + rootUINode: rootUINode, checkpointWriter: cp, tasks: tasks, executeFunc: executeFunc, concurrencyLevel: concurrencyLevel, retryActionRegistry: make(map[string]*RetryController), reportTaskStatus: false, - taskFinished: make(map[string]chan struct{}), } } @@ -63,9 +64,9 @@ func NewParallelRunner(ctx context.Context, nodeManager *workflow.NodeManager, p func (p *ParallelRunner) Run() error { var parallelNum int // default value is 0. The task will not run in this case. switch p.concurrencyLevel { - case SEQUENTIAL: + case Sequential: parallelNum = 1 - case PARALLEL: + case Parallel: parallelNum = len(p.tasks) default: panic(fmt.Sprintf("BUG: Invalid concurrency level: %v", p.concurrencyLevel)) @@ -79,37 +80,38 @@ func (p *ParallelRunner) Run() error { } sem <- true - p.taskFinished[task.Id] = make(chan struct{}) go func(t *workflowpb.Task) { defer func() { <-sem }() - defer close(p.taskFinished[t.Id]) + defer p.setFinishUIMessage(t.Id) taskID := t.Id for { - err := p.executeFunc(p.ctx, t) - // Update the task status in the checkpoint. - if updateErr := p.checkpointWriter.UpdateTask(taskID, workflowpb.TaskState_TaskDone, err); updateErr != nil { + // Update the task status to running in the checkpoint. + if updateErr := p.checkpointWriter.UpdateTask(taskID, workflowpb.TaskState_TaskRunning, nil); updateErr != nil { // Only logging the error rather then passing it to ErrorRecorder. // Errors in ErrorRecorder will lead to the stop of a workflow. We // don't want to stop the workflow if only checkpointing fails. log.Errorf("%v", updateErr) } + err := p.executeFunc(p.ctx, t) + // Update the task status to done in the checkpoint. + if updateErr := p.checkpointWriter.UpdateTask(taskID, workflowpb.TaskState_TaskDone, err); updateErr != nil { + log.Errorf("%v", updateErr) + } // The function returns if the task is executed successfully. if err == nil { + log.Infof("task %v has finished.", taskID) return } - // When task fails, first check whether the context is cancelled. + // When task fails, first check whether the context is canceled. // If so, return right away. If not, enable the retry action. select { case <-p.ctx.Done(): return default: } - - fmt.Printf("enabling retry action for task: %v", taskID) - - retryChannel, registerID := p.addRetryAction(taskID) + retryChannel, nodePath := p.addRetryAction(taskID) // Block the task execution until the retry action is triggered // or the context is canceled. @@ -117,16 +119,11 @@ func (p *ParallelRunner) Run() error { case <-retryChannel: continue case <-p.ctx.Done(): - p.unregisterRetryController(registerID) + p.unregisterRetryController(nodePath) return } } }(task) - - // Update task finish information on the UI. - if p.reportTaskStatus { - go p.setFinishUIMessage(task.Id) - } } // Wait until all running jobs are done. @@ -138,78 +135,72 @@ func (p *ParallelRunner) Run() error { } // Action handles the retry action. It implements the interface ActionListener. -func (p *ParallelRunner) Action(ctx context.Context, pathName, name string) error { +func (p *ParallelRunner) Action(ctx context.Context, path, name string) error { switch name { case "Retry": - return p.triggerRetry(pathName) + return p.triggerRetry(path) default: return fmt.Errorf("Unknown action: %v", name) } } -func (p *ParallelRunner) addRetryAction(taskID string) (chan struct{}, string) { - taskNodePath := path.Join(p.phaseUINode.Path, taskID) - node, err := p.nodeManager.GetNodeByPath(taskNodePath) - if err != nil { - panic(fmt.Errorf("nodepath %v not found", taskNodePath)) - } - - retryController := CreateRetryController(node, p /* actionListener */) - p.registerRetryController(node.Path, retryController) - node.BroadcastChanges(false /* updateChildren */) - return retryController.retryChannel, node.PathName -} - func (p *ParallelRunner) triggerRetry(nodePath string) error { p.mu.Lock() + defer p.mu.Unlock() c, ok := p.retryActionRegistry[nodePath] if !ok { - p.mu.Unlock() - return fmt.Errorf("Unknown node path for the action: %v", nodePath) + return fmt.Errorf("Unregistered action for node: %v", nodePath) } - p.mu.Unlock() - - p.unregisterRetryController(nodePath) + p.unregisterRetryControllerLocked(nodePath) c.triggerRetry() return nil } -func (p *ParallelRunner) registerRetryController(nodePath string, c *RetryController) { +func (p *ParallelRunner) addRetryAction(taskID string) (chan struct{}, string) { + node, err := p.rootUINode.GetChildByPath(taskID) + if err != nil { + panic(fmt.Errorf("node on child path %v not found", taskID)) + } + p.mu.Lock() defer p.mu.Unlock() + retryController := CreateRetryController(node, p /* actionListener */) + p.registerRetryControllerLocked(node.Path, retryController) + node.BroadcastChanges(false /* updateChildren */) + return retryController.retryChannel, node.Path +} + +func (p *ParallelRunner) registerRetryControllerLocked(nodePath string, c *RetryController) { if _, ok := p.retryActionRegistry[nodePath]; ok { - panic(fmt.Errorf("duplicate retry action on node: %v", nodePath)) + panic(fmt.Errorf("duplicate retry action for node: %v", nodePath)) } p.retryActionRegistry[nodePath] = c } func (p *ParallelRunner) unregisterRetryController(nodePath string) { p.mu.Lock() - defer p.mu.Unlock() + p.mu.Unlock() + p.unregisterRetryControllerLocked(nodePath) +} + +func (p *ParallelRunner) unregisterRetryControllerLocked(nodePath string) { if _, ok := p.retryActionRegistry[nodePath]; !ok { - log.Warningf("retry action on node: %v doesn't exist, cannot unregister it", nodePath) + log.Warningf("retry action for node: %v doesn't exist, cannot unregister it", nodePath) } else { delete(p.retryActionRegistry, nodePath) } } func (p *ParallelRunner) setFinishUIMessage(taskID string) { - done, ok := p.taskFinished[taskID] - if !ok { - panic(fmt.Errorf("the finish channl for task %v not found", taskID)) - } - - taskNodePath := path.Join(p.phaseUINode.Path, taskID) - taskNode, err := p.nodeManager.GetNodeByPath(taskNodePath) - if err != nil { - panic(fmt.Errorf("nodepath %v not found", taskNodePath)) - } + if p.reportTaskStatus { + taskNode, err := p.rootUINode.GetChildByPath(taskID) + if err != nil { + panic(fmt.Errorf("nodepath %v not found", taskID)) + } - select { - case <-done: + p.mu.Lock() + defer p.mu.Unlock() taskNode.Message = fmt.Sprintf("task %v finished", taskID) taskNode.BroadcastChanges(false /* updateChildren */) - case <-p.ctx.Done(): - return } } diff --git a/go/vt/workflow/resharding/parallel_runner_test.go b/go/vt/workflow/resharding/parallel_runner_test.go index fea4fc1e9a1..7c83c51fae2 100644 --- a/go/vt/workflow/resharding/parallel_runner_test.go +++ b/go/vt/workflow/resharding/parallel_runner_test.go @@ -71,17 +71,8 @@ func TestParallelRunnerRetryAction(t *testing.T) { defer m.NodeManager().CloseWatcher(index) go func() { // This goroutine is used to detect and trigger the retry actions. - task1ID := createTestTaskID(PhaseSimple, 0) - task2ID := createTestTaskID(PhaseSimple, 1) - - task1Node, err := m.NodeManager().GetNodeByPath(path.Join("/"+uuid, task1ID)) - if err != nil { - t.Errorf("fail to find node for task %v: %v", task1ID, err) - } - task2Node, err := m.NodeManager().GetNodeByPath(path.Join("/"+uuid, task2ID)) - if err != nil { - t.Errorf("fail to find node for task %v: %v", task2ID, err) - } + task1ID := createTestTaskID(phaseSimple, 0) + task2ID := createTestTaskID(phaseSimple, 1) retry1 := false retry2 := false @@ -95,24 +86,22 @@ func TestParallelRunnerRetryAction(t *testing.T) { if strings.Contains(monitorStr, "Retry") { if strings.Contains(monitorStr, task1ID) { verifyTaskSuccessOrFailure(context.Background(), t, ts, uuid, task1ID, false /* isSuccess*/) - verifyRetryAction(t, task1Node) retry1 = true } if strings.Contains(monitorStr, task2ID) { verifyTaskSuccessOrFailure(context.Background(), t, ts, uuid, task2ID, false /* isSuccess*/) - verifyRetryAction(t, task2Node) retry2 = true } } // After detecting both tasks have enabled retry actions after failure, // retry task1, check its success, then retry task2, check its success. if retry1 && retry2 { - clickRetry(ctx, t, m, task1Node.Path) - waitForFinished(ctx, t, notifications, task1ID, task1Node) + clickRetry(ctx, t, m, path.Join("/"+uuid, task1ID)) + waitForFinished(ctx, t, notifications, task1ID) verifyTaskSuccessOrFailure(context.Background(), t, ts, uuid, task1ID, true /* isSuccess*/) - clickRetry(ctx, t, m, task2Node.Path) - waitForFinished(ctx, t, notifications, task2ID, task2Node) + clickRetry(ctx, t, m, path.Join("/"+uuid, task2ID)) + waitForFinished(ctx, t, notifications, task2ID) verifyTaskSuccessOrFailure(context.Background(), t, ts, uuid, task2ID, true /* isSuccess*/) return } @@ -163,18 +152,19 @@ func clickRetry(ctx context.Context, t *testing.T, m *workflow.Manager, nodePath } } -func waitForFinished(ctx context.Context, t *testing.T, notifications chan []byte, taskID string, node *workflow.Node) { +func waitForFinished(ctx context.Context, t *testing.T, notifications chan []byte, taskID string) { for { select { case monitor, ok := <-notifications: + monitorStr := string(monitor) if !ok { - t.Errorf("unexpected notification: %v, %v", ok, string(monitor)) + t.Errorf("unexpected notification: %v, %v", ok, monitorStr) } finishMessage := fmt.Sprintf(`"message":"task %v finished"`, taskID) - if strings.Contains(string(monitor), finishMessage) { - if len(node.Actions) != 0 { - t.Fatalf("the node actions should be empty after triggering retry: %v", node.Actions) + if strings.Contains(monitorStr, finishMessage) { + if strings.Contains(monitorStr, `"actions":[{"name:`) { + t.Fatalf("the node actions should be empty after triggering retry: %v", monitorStr) } return } @@ -218,12 +208,6 @@ func verifyTaskSuccessOrFailure(ctx context.Context, t *testing.T, ts topo.Serve taskError = errMessage } if task.State != workflowpb.TaskState_TaskDone || task.Error != taskError { - t.Errorf("task: %v should succeed. Task status: %v, %v", task.State, task.Error) - } -} - -func verifyRetryAction(t *testing.T, node *workflow.Node) { - if len(node.Actions) != 1 || node.Actions[0].Name != "Retry" { - t.Errorf("unexpected Ation values: %v", node.Actions) + t.Errorf("task: %v should succeed. Task status: %v, %v", task.Id, task.State, task.Error) } } diff --git a/go/vt/workflow/resharding/task.go b/go/vt/workflow/resharding/tasks.go similarity index 56% rename from go/vt/workflow/resharding/task.go rename to go/vt/workflow/resharding/tasks.go index 7a8e47ce90c..7c624dfe70c 100644 --- a/go/vt/workflow/resharding/task.go +++ b/go/vt/workflow/resharding/tasks.go @@ -15,47 +15,44 @@ import ( ) func createTaskID(phase PhaseType, shardName string) string { - return fmt.Sprintf("%s_%s", phase, shardName) + return fmt.Sprintf("%s/%s", phase, shardName) } // GetTasks returns selected tasks for a phase from the checkpoint // with expected execution order. -func (hw *HorizontalReshardingWorkflow) GetTasks(checkpoint *workflowpb.WorkflowCheckpoint, phase PhaseType) []*workflowpb.Task { +func (hw *HorizontalReshardingWorkflow) GetTasks(phase PhaseType) []*workflowpb.Task { var shards []string switch phase { case phaseCopySchema, phaseWaitForFilteredReplication, phaseDiff: - shards = strings.Split(checkpoint.Settings["destination_shards"], ",") + shards = strings.Split(hw.checkpoint.Settings["destination_shards"], ",") case phaseClone, phaseMigrateRdonly, phaseMigrateReplica, phaseMigrateMaster: - shards = strings.Split(checkpoint.Settings["source_shards"], ",") + shards = strings.Split(hw.checkpoint.Settings["source_shards"], ",") + default: + panic(fmt.Sprintf("BUG: unknown phase type: %v", phase)) } var tasks []*workflowpb.Task for _, s := range shards { taskID := createTaskID(phase, s) - tasks = append(tasks, checkpoint.Tasks[taskID]) + tasks = append(tasks, hw.checkpoint.Tasks[taskID]) } return tasks } func (hw *HorizontalReshardingWorkflow) runCopySchema(ctx context.Context, t *workflowpb.Task) error { - s := t.Attributes["source_shard"] - d := t.Attributes["destination_shard"] keyspace := t.Attributes["keyspace"] - err := hw.wr.CopySchemaShardFromShard(ctx, nil /* tableArray*/, nil /* excludeTableArray */, true, /*includeViews*/ - keyspace, s, keyspace, d, wrangler.DefaultWaitSlaveTimeout) - if err != nil { - hw.logger.Infof("Horizontal Resharding: error in CopySchemaShard from %s to %s: %v.", s, d, err) - } - hw.logger.Infof("Horizontal Resharding: CopySchemaShard from %s to %s is finished.", s, d) - return err + sourceShard := t.Attributes["source_shard"] + destShard := t.Attributes["destination_shard"] + return hw.wr.CopySchemaShardFromShard(ctx, nil /* tableArray*/, nil /* excludeTableArray */, true, /*includeViews*/ + keyspace, sourceShard, keyspace, destShard, wrangler.DefaultWaitSlaveTimeout) } func (hw *HorizontalReshardingWorkflow) runSplitClone(ctx context.Context, t *workflowpb.Task) error { - s := t.Attributes["source_shard"] - worker := t.Attributes["vtworker"] keyspace := t.Attributes["keyspace"] + sourceShard := t.Attributes["source_shard"] + worker := t.Attributes["vtworker"] - sourceKeyspaceShard := topoproto.KeyspaceShardString(keyspace, s) + sourceKeyspaceShard := topoproto.KeyspaceShardString(keyspace, sourceShard) // Reset the vtworker to avoid error if vtworker command has been called elsewhere. // This is because vtworker class doesn't cleanup the environment after execution. automation.ExecuteVtworker(ctx, worker, []string{"Reset"}) @@ -63,46 +60,30 @@ func (hw *HorizontalReshardingWorkflow) runSplitClone(ctx context.Context, t *wo // Therefore, we can reuse the normal end to end test setting, which has only 1 rdonly tablet. // TODO(yipeiw): Add min_healthy_rdonly_tablets as an input argument in UI. args := []string{"SplitClone", "--min_healthy_rdonly_tablets=1", sourceKeyspaceShard} - if _, err := automation.ExecuteVtworker(hw.ctx, worker, args); err != nil { - hw.logger.Infof("Horizontal resharding: error in SplitClone in keyspace %s: %v.", keyspace, err) - return err - } - hw.logger.Infof("Horizontal resharding: SplitClone is finished.") - - return nil + _, err := automation.ExecuteVtworker(hw.ctx, worker, args) + return err } func (hw *HorizontalReshardingWorkflow) runWaitForFilteredReplication(ctx context.Context, t *workflowpb.Task) error { - d := t.Attributes["destination_shard"] keyspace := t.Attributes["keyspace"] - - if err := hw.wr.WaitForFilteredReplication(ctx, keyspace, d, wrangler.DefaultWaitForFilteredReplicationMaxDelay); err != nil { - hw.logger.Infof("Horizontal Resharding: error in WaitForFilteredReplication: %v.", err) - return err - } - hw.logger.Infof("Horizontal Resharding:WaitForFilteredReplication is finished on " + d) - return nil + destShard := t.Attributes["destination_shard"] + return hw.wr.WaitForFilteredReplication(ctx, keyspace, destShard, wrangler.DefaultWaitForFilteredReplicationMaxDelay) } func (hw *HorizontalReshardingWorkflow) runSplitDiff(ctx context.Context, t *workflowpb.Task) error { - d := t.Attributes["destination_shard"] - worker := t.Attributes["vtworker"] keyspace := t.Attributes["keyspace"] + destShard := t.Attributes["destination_shard"] + worker := t.Attributes["vtworker"] automation.ExecuteVtworker(hw.ctx, worker, []string{"Reset"}) - args := []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", topoproto.KeyspaceShardString(keyspace, d)} + args := []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", topoproto.KeyspaceShardString(keyspace, destShard)} _, err := automation.ExecuteVtworker(ctx, worker, args) - if err != nil { - return err - } - - hw.logger.Infof("Horizontal resharding: SplitDiff is finished.") - return nil + return err } func (hw *HorizontalReshardingWorkflow) runMigrate(ctx context.Context, t *workflowpb.Task) error { - s := t.Attributes["source_shard"] keyspace := t.Attributes["keyspace"] + sourceShard := t.Attributes["source_shard"] servedTypeStr := t.Attributes["served_type"] servedType, err := topoproto.ParseTabletType(servedTypeStr) @@ -116,13 +97,5 @@ func (hw *HorizontalReshardingWorkflow) runMigrate(ctx context.Context, t *workf return fmt.Errorf("wrong served type to be migrated: %v", servedTypeStr) } - sourceKeyspaceShard := topoproto.KeyspaceShardString(keyspace, s) - err = hw.wr.MigrateServedTypes(ctx, keyspace, s, nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime) - if err != nil { - hw.logger.Infof("Horizontal Resharding: error in MigrateServedTypes on servedType %s: %v.", servedType, err) - return err - } - hw.logger.Infof("Horizontal Resharding: MigrateServedTypes is finished on tablet %s serve type %s.", sourceKeyspaceShard, servedType) - - return nil + return hw.wr.MigrateServedTypes(ctx, keyspace, sourceShard, nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime) } diff --git a/go/vt/workflow/resharding/test_workflow.go b/go/vt/workflow/resharding/test_workflow.go new file mode 100644 index 00000000000..0a332e36d13 --- /dev/null +++ b/go/vt/workflow/resharding/test_workflow.go @@ -0,0 +1,181 @@ +package resharding + +import ( + "errors" + "flag" + "fmt" + "strconv" + "sync" + + log "github.com/golang/glog" + "golang.org/x/net/context" + + "github.com/golang/protobuf/proto" + "github.com/youtube/vitess/go/vt/logutil" + "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/workflow" + + workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" +) + +const ( + testWorkflowFactoryName = "test_workflow" + phaseSimple PhaseType = "simple" + errMessage = "fake error for testing retry" +) + +func createTestTaskID(phase PhaseType, count int) string { + return fmt.Sprintf("%s/%v", phase, count) +} + +func init() { + workflow.Register(testWorkflowFactoryName, &TestWorkflowFactory{}) +} + +// TestWorkflow is created to simplfy the unit test of ParallelRunner. +type TestWorkflow struct { + ctx context.Context + manager *workflow.Manager + topoServer topo.Server + wi *topo.WorkflowInfo + logger *logutil.MemoryLogger + + retryMu sync.Mutex + // retryFlags stores the retry flag for all the tasks. + retryFlags map[string]bool + + rootUINode *workflow.Node + + checkpoint *workflowpb.WorkflowCheckpoint + checkpointWriter *CheckpointWriter +} + +// Run implements the worklfow.Workflow interface. +func (tw *TestWorkflow) Run(ctx context.Context, manager *workflow.Manager, wi *topo.WorkflowInfo) error { + tw.ctx = ctx + tw.topoServer = manager.TopoServer() + tw.manager = manager + tw.wi = wi + tw.checkpointWriter = NewCheckpointWriter(tw.topoServer, tw.checkpoint, tw.wi) + tw.rootUINode.Display = workflow.NodeDisplayDeterminate + tw.rootUINode.BroadcastChanges(true /* updateChildren */) + + simpleTasks := tw.getTasks(phaseSimple) + simpleRunner := NewParallelRunner(tw.ctx, tw.rootUINode, tw.checkpointWriter, simpleTasks, tw.runSimple, Parallel) + simpleRunner.reportTaskStatus = true + if err := simpleRunner.Run(); err != nil { + return err + } + + log.Infof("Horizontal resharding is finished successfully.") + return nil +} + +func (tw *TestWorkflow) getTasks(phaseName PhaseType) []*workflowpb.Task { + count, err := strconv.Atoi(tw.checkpoint.Settings["count"]) + if err != nil { + log.Info("converting count in checkpoint.Settings to int fails: %v \n", tw.checkpoint.Settings["count"]) + return nil + } + var tasks []*workflowpb.Task + for i := 0; i < count; i++ { + taskID := createTestTaskID(phaseName, i) + tasks = append(tasks, tw.checkpoint.Tasks[taskID]) + } + return tasks +} + +func (tw *TestWorkflow) runSimple(ctx context.Context, t *workflowpb.Task) error { + log.Info("The number passed to me is %v \n", t.Attributes["number"]) + + tw.retryMu.Lock() + defer tw.retryMu.Unlock() + if tw.retryFlags[t.Id] { + log.Info("I will fail at this time since retry flag is true.") + tw.retryFlags[t.Id] = false + return errors.New(errMessage) + } + return nil +} + +// TestWorkflowFactory is the factory to create a test workflow. +type TestWorkflowFactory struct{} + +// Init is part of the workflow.Factory interface. +func (*TestWorkflowFactory) Init(w *workflowpb.Workflow, args []string) error { + subFlags := flag.NewFlagSet(testWorkflowFactoryName, flag.ContinueOnError) + retryFlag := subFlags.Bool("retry", false, "The retry flag should be true if the retry action need to be tested") + count := subFlags.Int("count", 0, "The number of simple tasks") + if err := subFlags.Parse(args); err != nil { + return err + } + + // Initialize the checkpoint. + taskMap := make(map[string]*workflowpb.Task) + for i := 0; i < *count; i++ { + taskID := createTestTaskID(phaseSimple, i) + taskMap[taskID] = &workflowpb.Task{ + Id: taskID, + State: workflowpb.TaskState_TaskNotStarted, + Attributes: map[string]string{"number": fmt.Sprintf("%v", i)}, + } + } + checkpoint := &workflowpb.WorkflowCheckpoint{ + CodeVersion: 0, + Tasks: taskMap, + Settings: map[string]string{"count": fmt.Sprintf("%v", *count), "retry": fmt.Sprintf("%v", *retryFlag)}, + } + var err error + w.Data, err = proto.Marshal(checkpoint) + if err != nil { + return err + } + return nil +} + +// Instantiate is part the workflow.Factory interface. +func (*TestWorkflowFactory) Instantiate(w *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { + checkpoint := &workflowpb.WorkflowCheckpoint{} + if err := proto.Unmarshal(w.Data, checkpoint); err != nil { + return nil, err + } + // Get the retry flags for all tasks from the checkpoint. + retry, err := strconv.ParseBool(checkpoint.Settings["retry"]) + if err != nil { + log.Errorf("converting retry in checkpoint.Settings to bool fails: %v \n", checkpoint.Settings["retry"]) + return nil, err + } + retryFlags := make(map[string]bool) + for _, task := range checkpoint.Tasks { + retryFlags[task.Id] = retry + } + + tw := &TestWorkflow{ + checkpoint: checkpoint, + rootUINode: rootNode, + logger: logutil.NewMemoryLogger(), + retryFlags: retryFlags, + } + + count, err := strconv.Atoi(checkpoint.Settings["count"]) + if err != nil { + log.Errorf("converting count in checkpoint.Settings to int fails: %v \n", checkpoint.Settings["count"]) + return nil, err + } + + phaseNode := &workflow.Node{ + Name: string(phaseSimple), + PathName: string(phaseSimple), + } + tw.rootUINode.Children = append(tw.rootUINode.Children, phaseNode) + + for i := 0; i < count; i++ { + taskName := fmt.Sprintf("%v", i) + taskUINode := &workflow.Node{ + Name: taskName, + PathName: taskName, + } + phaseNode.Children = append(phaseNode.Children, taskUINode) + } + return tw, nil +} From aa8634e7029b8ddd75b556acc2447d927c0a1e7b Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Wed, 22 Feb 2017 19:26:05 -0800 Subject: [PATCH 023/108] proto: Regenerate protobuf files. This is a follow-up fix for https://github.com/youtube/vitess/pull/2495. We were not able to fix Yipei's workstation such that it would use the same protobuf generator as Travis and our setups does. Therefore, I'm re-generating the files separately on my machine. --- go/vt/proto/automation/automation.pb.go | 2 +- .../automationservice/automationservice.pb.go | 2 +- go/vt/proto/binlogdata/binlogdata.pb.go | 2 +- go/vt/proto/binlogservice/binlogservice.pb.go | 2 +- go/vt/proto/logutil/logutil.pb.go | 2 +- go/vt/proto/mysqlctl/mysqlctl.pb.go | 2 +- go/vt/proto/query/query.pb.go | 2 +- go/vt/proto/queryservice/queryservice.pb.go | 2 +- .../replicationdata/replicationdata.pb.go | 2 +- go/vt/proto/tableacl/tableacl.pb.go | 2 +- .../tabletmanagerdata/tabletmanagerdata.pb.go | 2 +- .../tabletmanagerservice.pb.go | 2 +- go/vt/proto/throttlerdata/throttlerdata.pb.go | 2 +- .../throttlerservice/throttlerservice.pb.go | 2 +- go/vt/proto/topodata/topodata.pb.go | 2 +- go/vt/proto/vschema/vschema.pb.go | 2 +- go/vt/proto/vtctldata/vtctldata.pb.go | 2 +- go/vt/proto/vtctlservice/vtctlservice.pb.go | 2 +- go/vt/proto/vtgate/vtgate.pb.go | 2 +- go/vt/proto/vtgateservice/vtgateservice.pb.go | 2 +- go/vt/proto/vtrpc/vtrpc.pb.go | 2 +- go/vt/proto/vttest/vttest.pb.go | 2 +- go/vt/proto/vtworkerdata/vtworkerdata.pb.go | 2 +- .../vtworkerservice/vtworkerservice.pb.go | 2 +- go/vt/proto/workflow/workflow.pb.go | 84 ------------------- 25 files changed, 24 insertions(+), 108 deletions(-) diff --git a/go/vt/proto/automation/automation.pb.go b/go/vt/proto/automation/automation.pb.go index 317772d05f7..c63e1a8151b 100644 --- a/go/vt/proto/automation/automation.pb.go +++ b/go/vt/proto/automation/automation.pb.go @@ -251,7 +251,7 @@ func init() { proto.RegisterFile("automation.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 562 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xdb, 0x6a, 0xdb, 0x4c, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x94, 0xdb, 0x6a, 0xdb, 0x4c, 0x14, 0x85, 0x7f, 0xc9, 0x87, 0xbf, 0xde, 0x6a, 0x1c, 0x31, 0x34, 0x46, 0x09, 0x4d, 0x23, 0xab, 0x37, 0x26, 0x05, 0x43, 0x9d, 0x8b, 0x94, 0xb4, 0x85, 0x1a, 0x5b, 0x84, 0xe0, 0x22, 0x85, 0xb1, 0x4c, 0xa1, 0xbd, 0x30, 0x53, 0x67, 0x2e, 0x54, 0xcb, 0x92, 0x32, 0x33, 0x2a, 0xf8, 0x05, 0xfa, diff --git a/go/vt/proto/automationservice/automationservice.pb.go b/go/vt/proto/automationservice/automationservice.pb.go index d856f86f0dc..08be3dbb684 100644 --- a/go/vt/proto/automationservice/automationservice.pb.go +++ b/go/vt/proto/automationservice/automationservice.pb.go @@ -148,7 +148,7 @@ func init() { proto.RegisterFile("automationservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 150 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0x2c, 0x2d, 0xc9, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0x2c, 0x2d, 0xc9, 0xcf, 0x4d, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc4, 0x90, 0x90, 0x12, 0x40, 0x08, 0x41, 0x14, 0x19, 0x35, 0x32, 0x71, 0x71, 0x39, 0xc2, 0x05, 0x85, 0x4a, 0xb8, 0xc4, 0x5d, 0xf3, 0x0a, 0x4b, 0x53, 0x4b, 0x53, diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index 46c45fb7550..57c00efc7ff 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -247,7 +247,7 @@ func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 540 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0x5d, 0x6e, 0xda, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x54, 0x5d, 0x6e, 0xda, 0x40, 0x10, 0xae, 0xb1, 0x43, 0xec, 0x71, 0x9a, 0x2c, 0x9b, 0x26, 0xb2, 0x90, 0x2a, 0x21, 0xbf, 0x94, 0x97, 0xba, 0x95, 0x7b, 0x02, 0x6c, 0xaf, 0x10, 0xc9, 0x02, 0xd1, 0xe2, 0xbc, 0xf4, 0xc5, 0x32, 0x64, 0x4b, 0x11, 0xc4, 0x06, 0xef, 0x26, 0x2a, 0xe7, 0xe8, 0x29, 0x7a, 0x91, 0xde, 0xa4, 0xf7, diff --git a/go/vt/proto/binlogservice/binlogservice.pb.go b/go/vt/proto/binlogservice/binlogservice.pb.go index b5eac227ece..451617a6f5d 100644 --- a/go/vt/proto/binlogservice/binlogservice.pb.go +++ b/go/vt/proto/binlogservice/binlogservice.pb.go @@ -204,7 +204,7 @@ func init() { proto.RegisterFile("binlogservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 149 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4e, 0xca, 0xcc, 0xcb, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x4e, 0xca, 0xcc, 0xcb, 0xc9, 0x4f, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x45, 0x11, 0x94, 0x12, 0x80, 0x70, 0x53, 0x12, 0x4b, 0x12, 0x21, 0x0a, 0x8c, 0x0e, 0x31, 0x72, 0xf1, 0x84, 0x16, 0xa4, 0x24, 0x96, 0xa4, 0x06, 0x97, 0x14, 0xa5, 0x26, 0xe6, 0x0a, 0x45, diff --git a/go/vt/proto/logutil/logutil.pb.go b/go/vt/proto/logutil/logutil.pb.go index a3e7a31b2bf..43dbdd597d3 100644 --- a/go/vt/proto/logutil/logutil.pb.go +++ b/go/vt/proto/logutil/logutil.pb.go @@ -104,7 +104,7 @@ func init() { proto.RegisterFile("logutil.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 235 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0x41, 0x4b, 0xc3, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x90, 0x41, 0x4b, 0xc3, 0x40, 0x10, 0x85, 0xdd, 0x66, 0xd7, 0xd8, 0x09, 0x2d, 0x61, 0xf0, 0xb0, 0xc7, 0x58, 0x3c, 0x04, 0x0f, 0x3d, 0x54, 0xf0, 0xae, 0x12, 0xa5, 0x50, 0x12, 0x18, 0x05, 0xcf, 0x55, 0x47, 0x59, 0xd8, 0xee, 0x8a, 0x4d, 0xf3, 0x33, 0xfc, 0xcd, 0x92, 0x89, 0x91, 0xde, 0xe6, 0x7d, 0xef, 0xf1, 0xde, 0xb2, diff --git a/go/vt/proto/mysqlctl/mysqlctl.pb.go b/go/vt/proto/mysqlctl/mysqlctl.pb.go index d7e00e1ed55..c6b3947c555 100644 --- a/go/vt/proto/mysqlctl/mysqlctl.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl.pb.go @@ -292,7 +292,7 @@ func init() { proto.RegisterFile("mysqlctl.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 289 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x4b, 0xfb, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x4b, 0xfb, 0x30, 0x1c, 0xc5, 0x7f, 0xfd, 0x89, 0x52, 0xbf, 0x6e, 0x56, 0xa2, 0x76, 0x5d, 0x41, 0xad, 0x39, 0xc8, 0x4e, 0x13, 0xf4, 0xa4, 0x37, 0x29, 0x78, 0x13, 0x21, 0x43, 0xf0, 0x56, 0xaa, 0xcd, 0x6a, 0xa1, 0x26, 0x5d, 0x92, 0x32, 0xfc, 0xc7, 0xfc, 0xfb, 0xc4, 0x34, 0xe9, 0x3a, 0x3b, 0x3d, 0xf6, 0x7d, diff --git a/go/vt/proto/query/query.pb.go b/go/vt/proto/query/query.pb.go index 188b4ac6d10..1dfb9d76cf4 100644 --- a/go/vt/proto/query/query.pb.go +++ b/go/vt/proto/query/query.pb.go @@ -2090,7 +2090,7 @@ func init() { proto.RegisterFile("query.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 2797 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3a, 0x49, 0x73, 0x1b, 0xc7, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x3a, 0x49, 0x73, 0x1b, 0xc7, 0xd5, 0x1a, 0x2c, 0x24, 0xf0, 0x40, 0x80, 0xcd, 0x06, 0x69, 0xc1, 0x94, 0x17, 0x7e, 0x63, 0xcb, 0xd6, 0x27, 0x3b, 0x8c, 0x4c, 0x29, 0x8a, 0xcb, 0xce, 0xa2, 0x21, 0x38, 0x94, 0x61, 0x61, 0x53, 0x63, 0x20, 0x87, 0x2e, 0x57, 0x4d, 0x0d, 0x81, 0x16, 0x39, 0x45, 0x00, 0x03, 0xcd, 0x34, 0x28, diff --git a/go/vt/proto/queryservice/queryservice.pb.go b/go/vt/proto/queryservice/queryservice.pb.go index d7eb26e6354..9c005868159 100644 --- a/go/vt/proto/queryservice/queryservice.pb.go +++ b/go/vt/proto/queryservice/queryservice.pb.go @@ -934,7 +934,7 @@ func init() { proto.RegisterFile("queryservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 491 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x95, 0xdb, 0x6e, 0xd4, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x95, 0xdb, 0x6e, 0xd4, 0x40, 0x0c, 0x86, 0xe1, 0xa2, 0x2d, 0x72, 0xc3, 0x69, 0x4a, 0x81, 0xa6, 0xa5, 0x2d, 0x7d, 0x80, 0x0a, 0x01, 0x12, 0x52, 0x25, 0x2e, 0xda, 0x08, 0x04, 0xaa, 0x38, 0x65, 0x59, 0x89, 0x2b, 0xa4, 0xd9, 0xc4, 0x5a, 0xa2, 0xcd, 0x26, 0xd9, 0xc9, 0x04, 0xc1, 0x13, 0xf1, 0x9a, 0x88, 0x4c, 0xec, 0xcc, diff --git a/go/vt/proto/replicationdata/replicationdata.pb.go b/go/vt/proto/replicationdata/replicationdata.pb.go index 9abeae1d35a..6733adef566 100644 --- a/go/vt/proto/replicationdata/replicationdata.pb.go +++ b/go/vt/proto/replicationdata/replicationdata.pb.go @@ -53,7 +53,7 @@ func init() { proto.RegisterFile("replicationdata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 241 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd0, 0xc1, 0x4a, 0x03, 0x31, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0xd0, 0xc1, 0x4a, 0x03, 0x31, 0x10, 0xc6, 0x71, 0x52, 0xed, 0x5a, 0x47, 0xb4, 0x1a, 0x2d, 0x04, 0x2f, 0x2e, 0x9e, 0x82, 0x07, 0x11, 0x7d, 0x03, 0xbd, 0xe8, 0x41, 0x90, 0xf4, 0x01, 0x42, 0xba, 0x1b, 0x6c, 0x60, 0xcd, 0x6c, 0x33, 0x53, 0xc1, 0xd7, 0xf1, 0x49, 0xa5, 0x49, 0xbb, 0x48, 0x8f, 0xf9, 0xfe, 0xbf, 0x43, 0x18, diff --git a/go/vt/proto/tableacl/tableacl.pb.go b/go/vt/proto/tableacl/tableacl.pb.go index 6f2ce2dd42a..5840705e279 100644 --- a/go/vt/proto/tableacl/tableacl.pb.go +++ b/go/vt/proto/tableacl/tableacl.pb.go @@ -69,7 +69,7 @@ func init() { proto.RegisterFile("tableacl.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 207 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x49, 0x4c, 0xca, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x49, 0x4c, 0xca, 0x49, 0x4d, 0x4c, 0xce, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf1, 0x95, 0x96, 0x33, 0x72, 0xf1, 0x85, 0x80, 0x38, 0xee, 0x45, 0xf9, 0xa5, 0x05, 0xc1, 0x05, 0xa9, 0xc9, 0x42, 0x42, 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go index 5333cc3dc96..b0faa63f945 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -1291,7 +1291,7 @@ func init() { proto.RegisterFile("tabletmanagerdata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 2049 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x5b, 0x6f, 0x1b, 0xc7, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0x5b, 0x6f, 0x1b, 0xc7, 0xf5, 0x07, 0x45, 0x49, 0x96, 0x0e, 0x2f, 0x22, 0x97, 0xba, 0x50, 0x0a, 0xfe, 0xba, 0xac, 0x9d, 0x7f, 0x54, 0x17, 0x55, 0x6a, 0x25, 0x0d, 0x82, 0x04, 0x29, 0xaa, 0xab, 0xed, 0xc4, 0x89, 0x95, 0x95, 0x2f, 0x45, 0x5f, 0x16, 0x43, 0xee, 0x11, 0xb9, 0xd0, 0x72, 0x77, 0x3d, 0x33, 0x2b, 0x89, diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go index e8ea7feaa58..b248d39f21d 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go @@ -1633,7 +1633,7 @@ func init() { proto.RegisterFile("tabletmanagerservice.proto", fileDescriptor0) var fileDescriptor0 = []byte{ // 969 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x6d, 0x6f, 0x1c, 0x35, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x98, 0x6d, 0x6f, 0x1c, 0x35, 0x10, 0xc7, 0x39, 0x09, 0x0a, 0x98, 0xc7, 0x5a, 0x88, 0xa2, 0x20, 0x01, 0x4d, 0x5a, 0x1e, 0x52, 0x54, 0xf5, 0x81, 0xf2, 0xfe, 0x2e, 0xbd, 0xb6, 0x41, 0x44, 0x1c, 0x77, 0x8d, 0x82, 0x84, 0x84, 0xe4, 0xec, 0x4d, 0x6f, 0x97, 0x78, 0x6d, 0x63, 0x7b, 0xa3, 0xe4, 0x15, 0x12, 0x12, 0xaf, 0x90, diff --git a/go/vt/proto/throttlerdata/throttlerdata.pb.go b/go/vt/proto/throttlerdata/throttlerdata.pb.go index 796485739df..c0e9a9fbf43 100644 --- a/go/vt/proto/throttlerdata/throttlerdata.pb.go +++ b/go/vt/proto/throttlerdata/throttlerdata.pb.go @@ -282,7 +282,7 @@ func init() { proto.RegisterFile("throttlerdata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 711 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xdd, 0x4e, 0xdb, 0x4a, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x55, 0xdd, 0x4e, 0xdb, 0x4a, 0x10, 0x96, 0x09, 0xe1, 0xc0, 0x84, 0x00, 0x59, 0x38, 0x60, 0xc2, 0xd1, 0x51, 0x8e, 0xa5, 0xa3, 0x46, 0x48, 0xcd, 0x45, 0x50, 0x55, 0x5a, 0x54, 0x09, 0x52, 0xaa, 0xaa, 0x55, 0xcb, 0x85, 0x69, 0x7b, 0xd1, 0x9b, 0xd5, 0xc6, 0x1e, 0x1c, 0x0b, 0xdb, 0xeb, 0xee, 0x2e, 0x25, 0xe9, 0x43, 0xf4, diff --git a/go/vt/proto/throttlerservice/throttlerservice.pb.go b/go/vt/proto/throttlerservice/throttlerservice.pb.go index 2dee35d9275..66841caef2c 100644 --- a/go/vt/proto/throttlerservice/throttlerservice.pb.go +++ b/go/vt/proto/throttlerservice/throttlerservice.pb.go @@ -267,7 +267,7 @@ func init() { proto.RegisterFile("throttlerservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 214 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2b, 0xc9, 0x28, 0xca, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x2b, 0xc9, 0x28, 0xca, 0x2f, 0x29, 0xc9, 0x49, 0x2d, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x40, 0x17, 0x97, 0x12, 0x86, 0x8b, 0xa4, 0x24, 0x96, 0x24, 0x42, 0x94, 0x19, 0x7d, 0x66, 0xe6, 0xe2, 0x0c, 0x81, 0x89, 0x0b, 0xf9, 0x72, 0x71, 0xf8, 0x26, 0x56, 0x04, diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index 75cbf7ec352..fe4098946c3 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -548,7 +548,7 @@ func init() { proto.RegisterFile("topodata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 1096 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x6f, 0x6f, 0xe3, 0xc4, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x56, 0x6f, 0x6f, 0xe3, 0xc4, 0x13, 0xfe, 0xd9, 0x71, 0xd2, 0x64, 0x9c, 0xe6, 0x7c, 0xfb, 0xbb, 0x43, 0x96, 0x11, 0xa2, 0x8a, 0x84, 0xa8, 0x0e, 0x11, 0x50, 0x8e, 0x83, 0xea, 0x24, 0xa4, 0xa6, 0xa9, 0x0f, 0xd2, 0x3f, 0x69, 0xd8, 0xa4, 0x82, 0xbe, 0xb2, 0x9c, 0x78, 0xdb, 0xb3, 0xea, 0x64, 0xcd, 0xee, 0xa6, 0x52, 0x3e, diff --git a/go/vt/proto/vschema/vschema.pb.go b/go/vt/proto/vschema/vschema.pb.go index 0086cb0dcb9..dc15d87b4e9 100644 --- a/go/vt/proto/vschema/vschema.pb.go +++ b/go/vt/proto/vschema/vschema.pb.go @@ -175,7 +175,7 @@ func init() { proto.RegisterFile("vschema.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 436 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0xd1, 0x6a, 0xd4, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x93, 0xd1, 0x6a, 0xd4, 0x40, 0x14, 0x86, 0x99, 0xc4, 0x4d, 0xb3, 0x27, 0x26, 0xd5, 0xa1, 0x96, 0x10, 0x11, 0x97, 0xa0, 0xb8, 0x57, 0xb9, 0xd8, 0x22, 0x68, 0x45, 0x51, 0x8a, 0x17, 0x45, 0x41, 0x49, 0xa5, 0xb7, 0x65, 0x9a, 0x3d, 0xd0, 0xd2, 0xcd, 0x24, 0x66, 0x92, 0x68, 0x5e, 0xc5, 0x1b, 0xc1, 0x37, 0xf0, 0x0d, 0xa5, diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index c762ac11e48..a112ca33c52 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -68,7 +68,7 @@ func init() { proto.RegisterFile("vtctldata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 175 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0x2b, 0x49, 0x2e, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0x2b, 0x49, 0x2e, 0xc9, 0x49, 0x49, 0x2c, 0x49, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0x0b, 0x48, 0xf1, 0xe6, 0xe4, 0xa7, 0x97, 0x96, 0x64, 0xe6, 0x40, 0x64, 0x94, 0xc2, 0xb9, 0xa4, 0x5c, 0x2b, 0x52, 0x93, 0x4b, 0x4b, 0x52, 0xc3, 0x40, 0x4a, 0x9c, 0xf3, 0x73, 0x73, 0x13, 0xf3, 0x52, 0x82, diff --git a/go/vt/proto/vtctlservice/vtctlservice.pb.go b/go/vt/proto/vtctlservice/vtctlservice.pb.go index 16c84edc29f..094c4197ff9 100644 --- a/go/vt/proto/vtctlservice/vtctlservice.pb.go +++ b/go/vt/proto/vtctlservice/vtctlservice.pb.go @@ -136,7 +136,7 @@ func init() { proto.RegisterFile("vtctlservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 118 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x49, 0x2e, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x49, 0x2e, 0xc9, 0x29, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x41, 0x16, 0x93, 0xe2, 0x07, 0xf3, 0x52, 0x12, 0x4b, 0x12, 0x21, 0xd2, 0x46, 0x85, 0x5c, 0xac, 0x61, 0x20, 0x21, 0xa1, 0x0c, 0x2e, 0x61, 0xd7, 0x8a, 0xd4, 0xe4, 0xd2, 0x92, 0x54, 0x30, 0xdf, diff --git a/go/vt/proto/vtgate/vtgate.pb.go b/go/vt/proto/vtgate/vtgate.pb.go index 7bd4ef3ff0a..c0a6a28df6e 100644 --- a/go/vt/proto/vtgate/vtgate.pb.go +++ b/go/vt/proto/vtgate/vtgate.pb.go @@ -1772,7 +1772,7 @@ func init() { proto.RegisterFile("vtgate.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 1652 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0xcb, 0x6e, 0x1b, 0x47, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x5a, 0xcb, 0x6e, 0x1b, 0x47, 0x16, 0x45, 0x77, 0xf3, 0x79, 0x49, 0x51, 0x52, 0x89, 0x92, 0x69, 0x5a, 0x63, 0xc9, 0x8d, 0x11, 0x4c, 0x8f, 0x05, 0x7a, 0x2c, 0xcf, 0x0b, 0xb3, 0x99, 0xb1, 0x64, 0x61, 0x20, 0x78, 0xec, 0x38, 0x25, 0xc5, 0x49, 0x80, 0x18, 0x8d, 0x16, 0x59, 0x90, 0x3a, 0x24, 0xbb, 0xe9, 0xae, 0x6a, 0x3a, diff --git a/go/vt/proto/vtgateservice/vtgateservice.pb.go b/go/vt/proto/vtgateservice/vtgateservice.pb.go index a3acda6d7c8..af61880068e 100644 --- a/go/vt/proto/vtgateservice/vtgateservice.pb.go +++ b/go/vt/proto/vtgateservice/vtgateservice.pb.go @@ -1045,7 +1045,7 @@ func init() { proto.RegisterFile("vtgateservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 551 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x95, 0x5f, 0x6f, 0xd3, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x95, 0x5f, 0x6f, 0xd3, 0x30, 0x14, 0xc5, 0xe1, 0x81, 0x82, 0x2e, 0x2d, 0x42, 0x1e, 0x74, 0x5b, 0xd9, 0x18, 0x2b, 0x62, 0xe3, 0x29, 0x42, 0x20, 0x21, 0x21, 0x21, 0xa1, 0x16, 0x2a, 0x84, 0xa6, 0x01, 0x6b, 0xf9, 0xf3, 0xc4, 0x83, 0x9b, 0x5e, 0x65, 0x51, 0xd3, 0x24, 0x8d, 0x9d, 0x88, 0x7e, 0x65, 0x3e, 0x05, 0x5a, 0x62, diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go index c560989b2a6..33fc6351b56 100644 --- a/go/vt/proto/vtrpc/vtrpc.pb.go +++ b/go/vt/proto/vtrpc/vtrpc.pb.go @@ -342,7 +342,7 @@ func init() { proto.RegisterFile("vtrpc.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 590 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x93, 0x4d, 0x4f, 0xdb, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x93, 0x4d, 0x4f, 0xdb, 0x40, 0x10, 0x86, 0xc9, 0x07, 0xf9, 0x18, 0x07, 0xb2, 0x0c, 0x5f, 0x81, 0x52, 0xb5, 0xe2, 0x54, 0x71, 0xc8, 0xa1, 0x3d, 0xf4, 0xbc, 0xf1, 0x0e, 0x61, 0x85, 0x59, 0xa7, 0xeb, 0x35, 0x25, 0xa7, 0x55, 0x08, 0x16, 0xa2, 0x0a, 0x38, 0x72, 0x02, 0x12, 0x97, 0xfe, 0xac, 0xfe, 0xa6, 0xfe, 0x8c, 0x6a, diff --git a/go/vt/proto/vttest/vttest.pb.go b/go/vt/proto/vttest/vttest.pb.go index 98ecfd79e41..4457ce4edb4 100644 --- a/go/vt/proto/vttest/vttest.pb.go +++ b/go/vt/proto/vttest/vttest.pb.go @@ -107,7 +107,7 @@ func init() { proto.RegisterFile("vttest.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 297 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0xcd, 0x6a, 0xf3, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x51, 0xcd, 0x6a, 0xf3, 0x30, 0x10, 0xc4, 0x49, 0xec, 0xef, 0xcb, 0xe6, 0x87, 0x20, 0x72, 0xd0, 0xad, 0x69, 0x4a, 0xc1, 0xa7, 0x50, 0xda, 0x47, 0x08, 0xed, 0xa5, 0xd0, 0x82, 0x6b, 0x72, 0x35, 0x8e, 0xb5, 0x4d, 0x4d, 0x65, 0x4b, 0x48, 0x8a, 0xc1, 0xaf, 0xd1, 0x27, 0x2e, 0x5e, 0xcb, 0xf4, 0xe2, 0xdb, 0x68, 0x66, 0x76, diff --git a/go/vt/proto/vtworkerdata/vtworkerdata.pb.go b/go/vt/proto/vtworkerdata/vtworkerdata.pb.go index a0e55e41ee9..71559a88fbe 100644 --- a/go/vt/proto/vtworkerdata/vtworkerdata.pb.go +++ b/go/vt/proto/vtworkerdata/vtworkerdata.pb.go @@ -66,7 +66,7 @@ func init() { proto.RegisterFile("vtworkerdata.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 147 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x29, 0xcf, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2b, 0x29, 0xcf, 0x2f, 0xca, 0x4e, 0x2d, 0x4a, 0x49, 0x2c, 0x49, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x41, 0x16, 0x93, 0xe2, 0xcd, 0xc9, 0x4f, 0x2f, 0x2d, 0xc9, 0xcc, 0x81, 0x48, 0x2a, 0x19, 0x73, 0xc9, 0xba, 0x56, 0xa4, 0x26, 0x97, 0x96, 0xa4, 0x86, 0x41, 0x55, 0x39, 0xe7, 0xe7, 0xe6, 0x26, diff --git a/go/vt/proto/vtworkerservice/vtworkerservice.pb.go b/go/vt/proto/vtworkerservice/vtworkerservice.pb.go index 75cf04ee7f9..1ffc24c794b 100644 --- a/go/vt/proto/vtworkerservice/vtworkerservice.pb.go +++ b/go/vt/proto/vtworkerservice/vtworkerservice.pb.go @@ -140,7 +140,7 @@ func init() { proto.RegisterFile("vtworkerservice.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 123 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2d, 0x2b, 0x29, 0xcf, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x2d, 0x2b, 0x29, 0xcf, 0x2f, 0xca, 0x4e, 0x2d, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x47, 0x13, 0x96, 0x12, 0x82, 0x09, 0xa4, 0x24, 0x96, 0x24, 0x42, 0x14, 0x19, 0x35, 0x33, 0x72, 0x71, 0x84, 0x41, 0x85, 0x85, 0xca, 0xb9, 0xc4, 0x5c, 0x2b, 0x52, 0x93, 0x4b, diff --git a/go/vt/proto/workflow/workflow.pb.go b/go/vt/proto/workflow/workflow.pb.go index cb3cf7a64d6..cf5eeccff26 100644 --- a/go/vt/proto/workflow/workflow.pb.go +++ b/go/vt/proto/workflow/workflow.pb.go @@ -123,62 +123,6 @@ func (m *Workflow) String() string { return proto.CompactTextString(m func (*Workflow) ProtoMessage() {} func (*Workflow) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (m *Workflow) GetUuid() string { - if m != nil { - return m.Uuid - } - return "" -} - -func (m *Workflow) GetFactoryName() string { - if m != nil { - return m.FactoryName - } - return "" -} - -func (m *Workflow) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Workflow) GetState() WorkflowState { - if m != nil { - return m.State - } - return WorkflowState_NotStarted -} - -func (m *Workflow) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *Workflow) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -func (m *Workflow) GetStartTime() int64 { - if m != nil { - return m.StartTime - } - return 0 -} - -func (m *Workflow) GetEndTime() int64 { - if m != nil { - return m.EndTime - } - return 0 -} - type WorkflowCheckpoint struct { // code_version is used to detect incompabilities between the version of the // running workflow and the one which wrote the checkpoint. If they don't @@ -198,13 +142,6 @@ func (m *WorkflowCheckpoint) String() string { return proto.CompactTe func (*WorkflowCheckpoint) ProtoMessage() {} func (*WorkflowCheckpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } -func (m *WorkflowCheckpoint) GetCodeVersion() int32 { - if m != nil { - return m.CodeVersion - } - return 0 -} - func (m *WorkflowCheckpoint) GetTasks() map[string]*Task { if m != nil { return m.Tasks @@ -232,20 +169,6 @@ func (m *Task) String() string { return proto.CompactTextString(m) } func (*Task) ProtoMessage() {} func (*Task) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } -func (m *Task) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Task) GetState() TaskState { - if m != nil { - return m.State - } - return TaskState_TaskNotStarted -} - func (m *Task) GetAttributes() map[string]string { if m != nil { return m.Attributes @@ -253,13 +176,6 @@ func (m *Task) GetAttributes() map[string]string { return nil } -func (m *Task) GetError() string { - if m != nil { - return m.Error - } - return "" -} - func init() { proto.RegisterType((*Workflow)(nil), "workflow.Workflow") proto.RegisterType((*WorkflowCheckpoint)(nil), "workflow.WorkflowCheckpoint") From a8e29355cb96b8627b5e6276e6def6346ea51886 Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Wed, 22 Feb 2017 20:30:47 -0800 Subject: [PATCH 024/108] vendor: Pin the Protobuf Go generator to the latest version. I've also regenerated all protobuf files. Note that the new version has added getters for primitive types. Do NOT use these getters and instead always use the field names instead. --- go/vt/proto/automation/automation.pb.go | 98 ++++ go/vt/proto/binlogdata/binlogdata.pb.go | 56 ++ go/vt/proto/logutil/logutil.pb.go | 42 ++ go/vt/proto/mysqlctl/mysqlctl.pb.go | 14 + go/vt/proto/query/query.pb.go | 511 ++++++++++++++++++ .../replicationdata/replicationdata.pb.go | 49 ++ go/vt/proto/tableacl/tableacl.pb.go | 35 ++ .../tabletmanagerdata/tabletmanagerdata.pb.go | 476 ++++++++++++++++ go/vt/proto/throttlerdata/throttlerdata.pb.go | 154 ++++++ go/vt/proto/topodata/topodata.pb.go | 238 ++++++++ go/vt/proto/vschema/vschema.pb.go | 56 ++ go/vt/proto/vtctldata/vtctldata.pb.go | 14 + go/vt/proto/vtgate/vtgate.pb.go | 483 +++++++++++++++++ go/vt/proto/vtrpc/vtrpc.pb.go | 42 ++ go/vt/proto/vttest/vttest.pb.go | 63 +++ go/vt/proto/vtworkerdata/vtworkerdata.pb.go | 7 + go/vt/proto/workflow/workflow.pb.go | 84 +++ vendor/vendor.json | 24 +- 18 files changed, 2434 insertions(+), 12 deletions(-) diff --git a/go/vt/proto/automation/automation.pb.go b/go/vt/proto/automation/automation.pb.go index c63e1a8151b..38dbf1e67c9 100644 --- a/go/vt/proto/automation/automation.pb.go +++ b/go/vt/proto/automation/automation.pb.go @@ -105,6 +105,13 @@ func (m *ClusterOperation) String() string { return proto.CompactText func (*ClusterOperation) ProtoMessage() {} func (*ClusterOperation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *ClusterOperation) GetId() string { + if m != nil { + return m.Id + } + return "" +} + func (m *ClusterOperation) GetSerialTasks() []*TaskContainer { if m != nil { return m.SerialTasks @@ -112,6 +119,20 @@ func (m *ClusterOperation) GetSerialTasks() []*TaskContainer { return nil } +func (m *ClusterOperation) GetState() ClusterOperationState { + if m != nil { + return m.State + } + return ClusterOperationState_UNKNOWN_CLUSTER_OPERATION_STATE +} + +func (m *ClusterOperation) GetError() string { + if m != nil { + return m.Error + } + return "" +} + // TaskContainer holds one or more task which may be executed in parallel. // "concurrency", if > 0, limits the amount of concurrently executed tasks. type TaskContainer struct { @@ -131,6 +152,13 @@ func (m *TaskContainer) GetParallelTasks() []*Task { return nil } +func (m *TaskContainer) GetConcurrency() int32 { + if m != nil { + return m.Concurrency + } + return 0 +} + // Task represents a specific task which should be automatically executed. type Task struct { // Task specification. @@ -150,6 +178,13 @@ func (m *Task) String() string { return proto.CompactTextString(m) } func (*Task) ProtoMessage() {} func (*Task) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *Task) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *Task) GetParameters() map[string]string { if m != nil { return m.Parameters @@ -157,6 +192,34 @@ func (m *Task) GetParameters() map[string]string { return nil } +func (m *Task) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Task) GetState() TaskState { + if m != nil { + return m.State + } + return TaskState_UNKNOWN_TASK_STATE +} + +func (m *Task) GetOutput() string { + if m != nil { + return m.Output + } + return "" +} + +func (m *Task) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type EnqueueClusterOperationRequest struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` @@ -167,6 +230,13 @@ func (m *EnqueueClusterOperationRequest) String() string { return pro func (*EnqueueClusterOperationRequest) ProtoMessage() {} func (*EnqueueClusterOperationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *EnqueueClusterOperationRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *EnqueueClusterOperationRequest) GetParameters() map[string]string { if m != nil { return m.Parameters @@ -183,6 +253,13 @@ func (m *EnqueueClusterOperationResponse) String() string { return pr func (*EnqueueClusterOperationResponse) ProtoMessage() {} func (*EnqueueClusterOperationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *EnqueueClusterOperationResponse) GetId() string { + if m != nil { + return m.Id + } + return "" +} + type GetClusterOperationStateRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` } @@ -192,6 +269,13 @@ func (m *GetClusterOperationStateRequest) String() string { return pr func (*GetClusterOperationStateRequest) ProtoMessage() {} func (*GetClusterOperationStateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (m *GetClusterOperationStateRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + type GetClusterOperationStateResponse struct { State ClusterOperationState `protobuf:"varint,1,opt,name=state,enum=automation.ClusterOperationState" json:"state,omitempty"` } @@ -203,6 +287,13 @@ func (*GetClusterOperationStateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (m *GetClusterOperationStateResponse) GetState() ClusterOperationState { + if m != nil { + return m.State + } + return ClusterOperationState_UNKNOWN_CLUSTER_OPERATION_STATE +} + type GetClusterOperationDetailsRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` } @@ -214,6 +305,13 @@ func (*GetClusterOperationDetailsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (m *GetClusterOperationDetailsRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + type GetClusterOperationDetailsResponse struct { // Full snapshot of the execution e.g. including output of each task. ClusterOp *ClusterOperation `protobuf:"bytes,2,opt,name=cluster_op,json=clusterOp" json:"cluster_op,omitempty"` diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index 57c00efc7ff..c9cbcf14543 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -98,6 +98,27 @@ func (m *Charset) String() string { return proto.CompactTextString(m) func (*Charset) ProtoMessage() {} func (*Charset) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Charset) GetClient() int32 { + if m != nil { + return m.Client + } + return 0 +} + +func (m *Charset) GetConn() int32 { + if m != nil { + return m.Conn + } + return 0 +} + +func (m *Charset) GetServer() int32 { + if m != nil { + return m.Server + } + return 0 +} + // BinlogTransaction describes a transaction inside the binlogs. // It is streamed by vttablet for filtered replication, used during resharding. type BinlogTransaction struct { @@ -140,6 +161,13 @@ func (m *BinlogTransaction_Statement) String() string { return proto. func (*BinlogTransaction_Statement) ProtoMessage() {} func (*BinlogTransaction_Statement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } +func (m *BinlogTransaction_Statement) GetCategory() BinlogTransaction_Statement_Category { + if m != nil { + return m.Category + } + return BinlogTransaction_Statement_BL_UNRECOGNIZED +} + func (m *BinlogTransaction_Statement) GetCharset() *Charset { if m != nil { return m.Charset @@ -147,6 +175,13 @@ func (m *BinlogTransaction_Statement) GetCharset() *Charset { return nil } +func (m *BinlogTransaction_Statement) GetSql() []byte { + if m != nil { + return m.Sql + } + return nil +} + // StreamKeyRangeRequest is the payload to StreamKeyRange type StreamKeyRangeRequest struct { // where to start @@ -162,6 +197,13 @@ func (m *StreamKeyRangeRequest) String() string { return proto.Compac func (*StreamKeyRangeRequest) ProtoMessage() {} func (*StreamKeyRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *StreamKeyRangeRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + func (m *StreamKeyRangeRequest) GetKeyRange() *topodata.KeyRange { if m != nil { return m.KeyRange @@ -208,6 +250,20 @@ func (m *StreamTablesRequest) String() string { return proto.CompactT func (*StreamTablesRequest) ProtoMessage() {} func (*StreamTablesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *StreamTablesRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *StreamTablesRequest) GetTables() []string { + if m != nil { + return m.Tables + } + return nil +} + func (m *StreamTablesRequest) GetCharset() *Charset { if m != nil { return m.Charset diff --git a/go/vt/proto/logutil/logutil.pb.go b/go/vt/proto/logutil/logutil.pb.go index 43dbdd597d3..98596199609 100644 --- a/go/vt/proto/logutil/logutil.pb.go +++ b/go/vt/proto/logutil/logutil.pb.go @@ -73,6 +73,20 @@ func (m *Time) String() string { return proto.CompactTextString(m) } func (*Time) ProtoMessage() {} func (*Time) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Time) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Time) GetNanoseconds() int32 { + if m != nil { + return m.Nanoseconds + } + return 0 +} + // Event is a single logging event type Event struct { Time *Time `protobuf:"bytes,1,opt,name=time" json:"time,omitempty"` @@ -94,6 +108,34 @@ func (m *Event) GetTime() *Time { return nil } +func (m *Event) GetLevel() Level { + if m != nil { + return m.Level + } + return Level_INFO +} + +func (m *Event) GetFile() string { + if m != nil { + return m.File + } + return "" +} + +func (m *Event) GetLine() int64 { + if m != nil { + return m.Line + } + return 0 +} + +func (m *Event) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + func init() { proto.RegisterType((*Time)(nil), "logutil.Time") proto.RegisterType((*Event)(nil), "logutil.Event") diff --git a/go/vt/proto/mysqlctl/mysqlctl.pb.go b/go/vt/proto/mysqlctl/mysqlctl.pb.go index c6b3947c555..68ea55ec20a 100644 --- a/go/vt/proto/mysqlctl/mysqlctl.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl.pb.go @@ -49,6 +49,13 @@ func (m *StartRequest) String() string { return proto.CompactTextStri func (*StartRequest) ProtoMessage() {} func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *StartRequest) GetMysqldArgs() []string { + if m != nil { + return m.MysqldArgs + } + return nil +} + type StartResponse struct { } @@ -66,6 +73,13 @@ func (m *ShutdownRequest) String() string { return proto.CompactTextS func (*ShutdownRequest) ProtoMessage() {} func (*ShutdownRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *ShutdownRequest) GetWaitForMysqld() bool { + if m != nil { + return m.WaitForMysqld + } + return false +} + type ShutdownResponse struct { } diff --git a/go/vt/proto/query/query.pb.go b/go/vt/proto/query/query.pb.go index 1dfb9d76cf4..bc54e6429a9 100644 --- a/go/vt/proto/query/query.pb.go +++ b/go/vt/proto/query/query.pb.go @@ -490,6 +490,27 @@ func (m *Target) String() string { return proto.CompactTextString(m) func (*Target) ProtoMessage() {} func (*Target) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Target) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *Target) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *Target) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + // VTGateCallerID is sent by VTGate to VTTablet to describe the // caller. If possible, this information is secure. For instance, // if using unique certificates that guarantee that VTGate->VTTablet @@ -507,6 +528,13 @@ func (m *VTGateCallerID) String() string { return proto.CompactTextSt func (*VTGateCallerID) ProtoMessage() {} func (*VTGateCallerID) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *VTGateCallerID) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + // EventToken is a structure that describes a point in time in a // replication stream on one shard. The most recent known replication // position can be retrieved from vttablet when executing a query. It @@ -527,6 +555,27 @@ func (m *EventToken) String() string { return proto.CompactTextString func (*EventToken) ProtoMessage() {} func (*EventToken) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *EventToken) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *EventToken) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *EventToken) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + // Value represents a typed value. type Value struct { Type Type `protobuf:"varint,1,opt,name=type,enum=query.Type" json:"type,omitempty"` @@ -538,6 +587,20 @@ func (m *Value) String() string { return proto.CompactTextString(m) } func (*Value) ProtoMessage() {} func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *Value) GetType() Type { + if m != nil { + return m.Type + } + return Type_NULL_TYPE +} + +func (m *Value) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + // BindVariable represents a single bind variable in a Query. type BindVariable struct { Type Type `protobuf:"varint,1,opt,name=type,enum=query.Type" json:"type,omitempty"` @@ -551,6 +614,20 @@ func (m *BindVariable) String() string { return proto.CompactTextStri func (*BindVariable) ProtoMessage() {} func (*BindVariable) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *BindVariable) GetType() Type { + if m != nil { + return m.Type + } + return Type_NULL_TYPE +} + +func (m *BindVariable) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + func (m *BindVariable) GetValues() []*Value { if m != nil { return m.Values @@ -571,6 +648,13 @@ func (m *BoundQuery) String() string { return proto.CompactTextString func (*BoundQuery) ProtoMessage() {} func (*BoundQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (m *BoundQuery) GetSql() string { + if m != nil { + return m.Sql + } + return "" +} + func (m *BoundQuery) GetBindVariables() map[string]*BindVariable { if m != nil { return m.BindVariables @@ -597,6 +681,13 @@ func (m *ExecuteOptions) String() string { return proto.CompactTextSt func (*ExecuteOptions) ProtoMessage() {} func (*ExecuteOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (m *ExecuteOptions) GetIncludeEventToken() bool { + if m != nil { + return m.IncludeEventToken + } + return false +} + func (m *ExecuteOptions) GetCompareEventToken() *EventToken { if m != nil { return m.CompareEventToken @@ -604,6 +695,13 @@ func (m *ExecuteOptions) GetCompareEventToken() *EventToken { return nil } +func (m *ExecuteOptions) GetIncludedFields() ExecuteOptions_IncludedFields { + if m != nil { + return m.IncludedFields + } + return ExecuteOptions_TYPE_AND_NAME +} + // Field describes a single column returned by a query type Field struct { // name of the field as returned by mysql C API @@ -632,6 +730,76 @@ func (m *Field) String() string { return proto.CompactTextString(m) } func (*Field) ProtoMessage() {} func (*Field) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (m *Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Field) GetType() Type { + if m != nil { + return m.Type + } + return Type_NULL_TYPE +} + +func (m *Field) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *Field) GetOrgTable() string { + if m != nil { + return m.OrgTable + } + return "" +} + +func (m *Field) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *Field) GetOrgName() string { + if m != nil { + return m.OrgName + } + return "" +} + +func (m *Field) GetColumnLength() uint32 { + if m != nil { + return m.ColumnLength + } + return 0 +} + +func (m *Field) GetCharset() uint32 { + if m != nil { + return m.Charset + } + return 0 +} + +func (m *Field) GetDecimals() uint32 { + if m != nil { + return m.Decimals + } + return 0 +} + +func (m *Field) GetFlags() uint32 { + if m != nil { + return m.Flags + } + return 0 +} + // Row is a database row. type Row struct { // lengths contains the length of each value in values. @@ -648,6 +816,20 @@ func (m *Row) String() string { return proto.CompactTextString(m) } func (*Row) ProtoMessage() {} func (*Row) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (m *Row) GetLengths() []int64 { + if m != nil { + return m.Lengths + } + return nil +} + +func (m *Row) GetValues() []byte { + if m != nil { + return m.Values + } + return nil +} + // ResultExtras contains optional out-of-band information. Usually the // extras are requested by adding ExecuteOptions flags. type ResultExtras struct { @@ -671,6 +853,13 @@ func (m *ResultExtras) GetEventToken() *EventToken { return nil } +func (m *ResultExtras) GetFresher() bool { + if m != nil { + return m.Fresher + } + return false +} + // QueryResult is returned by Execute and ExecuteStream. // // As returned by Execute, len(fields) is always equal to len(row) @@ -700,6 +889,20 @@ func (m *QueryResult) GetFields() []*Field { return nil } +func (m *QueryResult) GetRowsAffected() uint64 { + if m != nil { + return m.RowsAffected + } + return 0 +} + +func (m *QueryResult) GetInsertId() uint64 { + if m != nil { + return m.InsertId + } + return 0 +} + func (m *QueryResult) GetRows() []*Row { if m != nil { return m.Rows @@ -760,6 +963,20 @@ func (m *StreamEvent_Statement) String() string { return proto.Compac func (*StreamEvent_Statement) ProtoMessage() {} func (*StreamEvent_Statement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} } +func (m *StreamEvent_Statement) GetCategory() StreamEvent_Statement_Category { + if m != nil { + return m.Category + } + return StreamEvent_Statement_Error +} + +func (m *StreamEvent_Statement) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + func (m *StreamEvent_Statement) GetPrimaryKeyFields() []*Field { if m != nil { return m.PrimaryKeyFields @@ -774,6 +991,13 @@ func (m *StreamEvent_Statement) GetPrimaryKeyValues() []*Row { return nil } +func (m *StreamEvent_Statement) GetSql() []byte { + if m != nil { + return m.Sql + } + return nil +} + // ExecuteRequest is the payload to Execute type ExecuteRequest struct { EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` @@ -817,6 +1041,13 @@ func (m *ExecuteRequest) GetQuery() *BoundQuery { return nil } +func (m *ExecuteRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + func (m *ExecuteRequest) GetOptions() *ExecuteOptions { if m != nil { return m.Options @@ -914,6 +1145,20 @@ func (m *ExecuteBatchRequest) GetQueries() []*BoundQuery { return nil } +func (m *ExecuteBatchRequest) GetAsTransaction() bool { + if m != nil { + return m.AsTransaction + } + return false +} + +func (m *ExecuteBatchRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + func (m *ExecuteBatchRequest) GetOptions() *ExecuteOptions { if m != nil { return m.Options @@ -1047,6 +1292,13 @@ func (m *BeginResponse) String() string { return proto.CompactTextStr func (*BeginResponse) ProtoMessage() {} func (*BeginResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (m *BeginResponse) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + // CommitRequest is the payload to Commit type CommitRequest struct { EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` @@ -1081,6 +1333,13 @@ func (m *CommitRequest) GetTarget() *Target { return nil } +func (m *CommitRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + // CommitResponse is the returned value from Commit type CommitResponse struct { } @@ -1124,6 +1383,13 @@ func (m *RollbackRequest) GetTarget() *Target { return nil } +func (m *RollbackRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + // RollbackResponse is the returned value from Rollback type RollbackResponse struct { } @@ -1168,6 +1434,20 @@ func (m *PrepareRequest) GetTarget() *Target { return nil } +func (m *PrepareRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *PrepareRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + // PrepareResponse is the returned value from Prepare type PrepareResponse struct { } @@ -1211,6 +1491,13 @@ func (m *CommitPreparedRequest) GetTarget() *Target { return nil } +func (m *CommitPreparedRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + // CommitPreparedResponse is the returned value from CommitPrepared type CommitPreparedResponse struct { } @@ -1255,6 +1542,20 @@ func (m *RollbackPreparedRequest) GetTarget() *Target { return nil } +func (m *RollbackPreparedRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *RollbackPreparedRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + // RollbackPreparedResponse is the returned value from RollbackPrepared type RollbackPreparedResponse struct { } @@ -1299,6 +1600,13 @@ func (m *CreateTransactionRequest) GetTarget() *Target { return nil } +func (m *CreateTransactionRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + func (m *CreateTransactionRequest) GetParticipants() []*Target { if m != nil { return m.Participants @@ -1350,6 +1658,20 @@ func (m *StartCommitRequest) GetTarget() *Target { return nil } +func (m *StartCommitRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *StartCommitRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + // StartCommitResponse is the returned value from StartCommit type StartCommitResponse struct { } @@ -1394,6 +1716,20 @@ func (m *SetRollbackRequest) GetTarget() *Target { return nil } +func (m *SetRollbackRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *SetRollbackRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + // SetRollbackResponse is the returned value from SetRollback type SetRollbackResponse struct { } @@ -1437,6 +1773,13 @@ func (m *ConcludeTransactionRequest) GetTarget() *Target { return nil } +func (m *ConcludeTransactionRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + // ConcludeTransactionResponse is the returned value from ConcludeTransaction type ConcludeTransactionResponse struct { } @@ -1480,6 +1823,13 @@ func (m *ReadTransactionRequest) GetTarget() *Target { return nil } +func (m *ReadTransactionRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + // ReadTransactionResponse is the returned value from ReadTransaction type ReadTransactionResponse struct { Metadata *TransactionMetadata `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` @@ -1576,6 +1926,13 @@ func (m *BeginExecuteResponse) GetResult() *QueryResult { return nil } +func (m *BeginExecuteResponse) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + // BeginExecuteBatchRequest is the payload to BeginExecuteBatch type BeginExecuteBatchRequest struct { EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` @@ -1619,6 +1976,13 @@ func (m *BeginExecuteBatchRequest) GetQueries() []*BoundQuery { return nil } +func (m *BeginExecuteBatchRequest) GetAsTransaction() bool { + if m != nil { + return m.AsTransaction + } + return false +} + func (m *BeginExecuteBatchRequest) GetOptions() *ExecuteOptions { if m != nil { return m.Options @@ -1656,6 +2020,13 @@ func (m *BeginExecuteBatchResponse) GetResults() []*QueryResult { return nil } +func (m *BeginExecuteBatchResponse) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + // MessageStreamRequest is the request payload for MessageStream. type MessageStreamRequest struct { EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` @@ -1691,6 +2062,13 @@ func (m *MessageStreamRequest) GetTarget() *Target { return nil } +func (m *MessageStreamRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + // MessageStreamResponse is a response for MessageStream. type MessageStreamResponse struct { Result *QueryResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` @@ -1744,6 +2122,13 @@ func (m *MessageAckRequest) GetTarget() *Target { return nil } +func (m *MessageAckRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *MessageAckRequest) GetIds() []*Value { if m != nil { return m.Ids @@ -1818,6 +2203,34 @@ func (m *SplitQueryRequest) GetQuery() *BoundQuery { return nil } +func (m *SplitQueryRequest) GetSplitColumn() []string { + if m != nil { + return m.SplitColumn + } + return nil +} + +func (m *SplitQueryRequest) GetSplitCount() int64 { + if m != nil { + return m.SplitCount + } + return 0 +} + +func (m *SplitQueryRequest) GetNumRowsPerQueryPart() int64 { + if m != nil { + return m.NumRowsPerQueryPart + } + return 0 +} + +func (m *SplitQueryRequest) GetAlgorithm() SplitQueryRequest_Algorithm { + if m != nil { + return m.Algorithm + } + return SplitQueryRequest_EQUAL_SPLITS +} + // QuerySplit represents one query to execute on the tablet type QuerySplit struct { // query is the query to execute @@ -1838,6 +2251,13 @@ func (m *QuerySplit) GetQuery() *BoundQuery { return nil } +func (m *QuerySplit) GetRowCount() int64 { + if m != nil { + return m.RowCount + } + return 0 +} + // SplitQueryResponse is returned by SplitQuery and represents all the queries // to execute in order to get the entire data set. type SplitQueryResponse struct { @@ -1901,6 +2321,48 @@ func (m *RealtimeStats) String() string { return proto.CompactTextStr func (*RealtimeStats) ProtoMessage() {} func (*RealtimeStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } +func (m *RealtimeStats) GetHealthError() string { + if m != nil { + return m.HealthError + } + return "" +} + +func (m *RealtimeStats) GetSecondsBehindMaster() uint32 { + if m != nil { + return m.SecondsBehindMaster + } + return 0 +} + +func (m *RealtimeStats) GetBinlogPlayersCount() int32 { + if m != nil { + return m.BinlogPlayersCount + } + return 0 +} + +func (m *RealtimeStats) GetSecondsBehindMasterFilteredReplication() int64 { + if m != nil { + return m.SecondsBehindMasterFilteredReplication + } + return 0 +} + +func (m *RealtimeStats) GetCpuUsage() float64 { + if m != nil { + return m.CpuUsage + } + return 0 +} + +func (m *RealtimeStats) GetQps() float64 { + if m != nil { + return m.Qps + } + return 0 +} + // StreamHealthResponse is streamed by StreamHealth on a regular basis type StreamHealthResponse struct { // target is the current server type. Only queries with that exact Target @@ -1932,6 +2394,20 @@ func (m *StreamHealthResponse) GetTarget() *Target { return nil } +func (m *StreamHealthResponse) GetServing() bool { + if m != nil { + return m.Serving + } + return false +} + +func (m *StreamHealthResponse) GetTabletExternallyReparentedTimestamp() int64 { + if m != nil { + return m.TabletExternallyReparentedTimestamp + } + return 0 +} + func (m *StreamHealthResponse) GetRealtimeStats() *RealtimeStats { if m != nil { return m.RealtimeStats @@ -1980,6 +2456,20 @@ func (m *UpdateStreamRequest) GetTarget() *Target { return nil } +func (m *UpdateStreamRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *UpdateStreamRequest) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + // UpdateStreamResponse is returned by UpdateStream type UpdateStreamResponse struct { Event *StreamEvent `protobuf:"bytes,1,opt,name=event" json:"event,omitempty"` @@ -2010,6 +2500,27 @@ func (m *TransactionMetadata) String() string { return proto.CompactT func (*TransactionMetadata) ProtoMessage() {} func (*TransactionMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } +func (m *TransactionMetadata) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +func (m *TransactionMetadata) GetState() TransactionState { + if m != nil { + return m.State + } + return TransactionState_UNKNOWN +} + +func (m *TransactionMetadata) GetTimeCreated() int64 { + if m != nil { + return m.TimeCreated + } + return 0 +} + func (m *TransactionMetadata) GetParticipants() []*Target { if m != nil { return m.Participants diff --git a/go/vt/proto/replicationdata/replicationdata.pb.go b/go/vt/proto/replicationdata/replicationdata.pb.go index 6733adef566..667bd10090c 100644 --- a/go/vt/proto/replicationdata/replicationdata.pb.go +++ b/go/vt/proto/replicationdata/replicationdata.pb.go @@ -45,6 +45,55 @@ func (m *Status) String() string { return proto.CompactTextString(m) func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Status) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *Status) GetSlaveIoRunning() bool { + if m != nil { + return m.SlaveIoRunning + } + return false +} + +func (m *Status) GetSlaveSqlRunning() bool { + if m != nil { + return m.SlaveSqlRunning + } + return false +} + +func (m *Status) GetSecondsBehindMaster() uint32 { + if m != nil { + return m.SecondsBehindMaster + } + return 0 +} + +func (m *Status) GetMasterHost() string { + if m != nil { + return m.MasterHost + } + return "" +} + +func (m *Status) GetMasterPort() int32 { + if m != nil { + return m.MasterPort + } + return 0 +} + +func (m *Status) GetMasterConnectRetry() int32 { + if m != nil { + return m.MasterConnectRetry + } + return 0 +} + func init() { proto.RegisterType((*Status)(nil), "replicationdata.Status") } diff --git a/go/vt/proto/tableacl/tableacl.pb.go b/go/vt/proto/tableacl/tableacl.pb.go index 5840705e279..2baa4ecbf7c 100644 --- a/go/vt/proto/tableacl/tableacl.pb.go +++ b/go/vt/proto/tableacl/tableacl.pb.go @@ -44,6 +44,41 @@ func (m *TableGroupSpec) String() string { return proto.CompactTextSt func (*TableGroupSpec) ProtoMessage() {} func (*TableGroupSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *TableGroupSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TableGroupSpec) GetTableNamesOrPrefixes() []string { + if m != nil { + return m.TableNamesOrPrefixes + } + return nil +} + +func (m *TableGroupSpec) GetReaders() []string { + if m != nil { + return m.Readers + } + return nil +} + +func (m *TableGroupSpec) GetWriters() []string { + if m != nil { + return m.Writers + } + return nil +} + +func (m *TableGroupSpec) GetAdmins() []string { + if m != nil { + return m.Admins + } + return nil +} + type Config struct { TableGroups []*TableGroupSpec `protobuf:"bytes,1,rep,name=table_groups,json=tableGroups" json:"table_groups,omitempty"` } diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go index b0faa63f945..280956aa4cd 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -144,6 +144,55 @@ func (m *TableDefinition) String() string { return proto.CompactTextS func (*TableDefinition) ProtoMessage() {} func (*TableDefinition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *TableDefinition) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TableDefinition) GetSchema() string { + if m != nil { + return m.Schema + } + return "" +} + +func (m *TableDefinition) GetColumns() []string { + if m != nil { + return m.Columns + } + return nil +} + +func (m *TableDefinition) GetPrimaryKeyColumns() []string { + if m != nil { + return m.PrimaryKeyColumns + } + return nil +} + +func (m *TableDefinition) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *TableDefinition) GetDataLength() uint64 { + if m != nil { + return m.DataLength + } + return 0 +} + +func (m *TableDefinition) GetRowCount() uint64 { + if m != nil { + return m.RowCount + } + return 0 +} + type SchemaDefinition struct { DatabaseSchema string `protobuf:"bytes,1,opt,name=database_schema,json=databaseSchema" json:"database_schema,omitempty"` TableDefinitions []*TableDefinition `protobuf:"bytes,2,rep,name=table_definitions,json=tableDefinitions" json:"table_definitions,omitempty"` @@ -155,6 +204,13 @@ func (m *SchemaDefinition) String() string { return proto.CompactText func (*SchemaDefinition) ProtoMessage() {} func (*SchemaDefinition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *SchemaDefinition) GetDatabaseSchema() string { + if m != nil { + return m.DatabaseSchema + } + return "" +} + func (m *SchemaDefinition) GetTableDefinitions() []*TableDefinition { if m != nil { return m.TableDefinitions @@ -162,6 +218,13 @@ func (m *SchemaDefinition) GetTableDefinitions() []*TableDefinition { return nil } +func (m *SchemaDefinition) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + type SchemaChangeResult struct { // before_schema holds the schema before each change. BeforeSchema *SchemaDefinition `protobuf:"bytes,1,opt,name=before_schema,json=beforeSchema" json:"before_schema,omitempty"` @@ -203,6 +266,27 @@ func (m *UserPermission) String() string { return proto.CompactTextSt func (*UserPermission) ProtoMessage() {} func (*UserPermission) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *UserPermission) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *UserPermission) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *UserPermission) GetPasswordChecksum() uint64 { + if m != nil { + return m.PasswordChecksum + } + return 0 +} + func (m *UserPermission) GetPrivileges() map[string]string { if m != nil { return m.Privileges @@ -224,6 +308,27 @@ func (m *DbPermission) String() string { return proto.CompactTextStri func (*DbPermission) ProtoMessage() {} func (*DbPermission) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *DbPermission) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *DbPermission) GetDb() string { + if m != nil { + return m.Db + } + return "" +} + +func (m *DbPermission) GetUser() string { + if m != nil { + return m.User + } + return "" +} + func (m *DbPermission) GetPrivileges() map[string]string { if m != nil { return m.Privileges @@ -268,6 +373,20 @@ func (m *BlpPosition) String() string { return proto.CompactTextStrin func (*BlpPosition) ProtoMessage() {} func (*BlpPosition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (m *BlpPosition) GetUid() uint32 { + if m != nil { + return m.Uid + } + return 0 +} + +func (m *BlpPosition) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + type PingRequest struct { Payload string `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` } @@ -277,6 +396,13 @@ func (m *PingRequest) String() string { return proto.CompactTextStrin func (*PingRequest) ProtoMessage() {} func (*PingRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (m *PingRequest) GetPayload() string { + if m != nil { + return m.Payload + } + return "" +} + type PingResponse struct { Payload string `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` } @@ -286,6 +412,13 @@ func (m *PingResponse) String() string { return proto.CompactTextStri func (*PingResponse) ProtoMessage() {} func (*PingResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (m *PingResponse) GetPayload() string { + if m != nil { + return m.Payload + } + return "" +} + type SleepRequest struct { // duration is in nanoseconds Duration int64 `protobuf:"varint,1,opt,name=duration" json:"duration,omitempty"` @@ -296,6 +429,13 @@ func (m *SleepRequest) String() string { return proto.CompactTextStri func (*SleepRequest) ProtoMessage() {} func (*SleepRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (m *SleepRequest) GetDuration() int64 { + if m != nil { + return m.Duration + } + return 0 +} + type SleepResponse struct { } @@ -315,6 +455,20 @@ func (m *ExecuteHookRequest) String() string { return proto.CompactTe func (*ExecuteHookRequest) ProtoMessage() {} func (*ExecuteHookRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (m *ExecuteHookRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ExecuteHookRequest) GetParameters() []string { + if m != nil { + return m.Parameters + } + return nil +} + func (m *ExecuteHookRequest) GetExtraEnv() map[string]string { if m != nil { return m.ExtraEnv @@ -333,6 +487,27 @@ func (m *ExecuteHookResponse) String() string { return proto.CompactT func (*ExecuteHookResponse) ProtoMessage() {} func (*ExecuteHookResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (m *ExecuteHookResponse) GetExitStatus() int64 { + if m != nil { + return m.ExitStatus + } + return 0 +} + +func (m *ExecuteHookResponse) GetStdout() string { + if m != nil { + return m.Stdout + } + return "" +} + +func (m *ExecuteHookResponse) GetStderr() string { + if m != nil { + return m.Stderr + } + return "" +} + type GetSchemaRequest struct { Tables []string `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` IncludeViews bool `protobuf:"varint,2,opt,name=include_views,json=includeViews" json:"include_views,omitempty"` @@ -344,6 +519,27 @@ func (m *GetSchemaRequest) String() string { return proto.CompactText func (*GetSchemaRequest) ProtoMessage() {} func (*GetSchemaRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (m *GetSchemaRequest) GetTables() []string { + if m != nil { + return m.Tables + } + return nil +} + +func (m *GetSchemaRequest) GetIncludeViews() bool { + if m != nil { + return m.IncludeViews + } + return false +} + +func (m *GetSchemaRequest) GetExcludeTables() []string { + if m != nil { + return m.ExcludeTables + } + return nil +} + type GetSchemaResponse struct { SchemaDefinition *SchemaDefinition `protobuf:"bytes,1,opt,name=schema_definition,json=schemaDefinition" json:"schema_definition,omitempty"` } @@ -425,6 +621,13 @@ func (m *ChangeTypeRequest) String() string { return proto.CompactTex func (*ChangeTypeRequest) ProtoMessage() {} func (*ChangeTypeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (m *ChangeTypeRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + type ChangeTypeResponse struct { } @@ -474,6 +677,13 @@ func (m *IgnoreHealthErrorRequest) String() string { return proto.Com func (*IgnoreHealthErrorRequest) ProtoMessage() {} func (*IgnoreHealthErrorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (m *IgnoreHealthErrorRequest) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + type IgnoreHealthErrorResponse struct { } @@ -494,6 +704,13 @@ func (m *ReloadSchemaRequest) String() string { return proto.CompactT func (*ReloadSchemaRequest) ProtoMessage() {} func (*ReloadSchemaRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (m *ReloadSchemaRequest) GetWaitPosition() string { + if m != nil { + return m.WaitPosition + } + return "" +} + type ReloadSchemaResponse struct { } @@ -511,6 +728,13 @@ func (m *PreflightSchemaRequest) String() string { return proto.Compa func (*PreflightSchemaRequest) ProtoMessage() {} func (*PreflightSchemaRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (m *PreflightSchemaRequest) GetChanges() []string { + if m != nil { + return m.Changes + } + return nil +} + type PreflightSchemaResponse struct { // change_results has for each change the schema before and after it. // The number of elements is identical to the length of "changes" in the request. @@ -542,6 +766,27 @@ func (m *ApplySchemaRequest) String() string { return proto.CompactTe func (*ApplySchemaRequest) ProtoMessage() {} func (*ApplySchemaRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } +func (m *ApplySchemaRequest) GetSql() string { + if m != nil { + return m.Sql + } + return "" +} + +func (m *ApplySchemaRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +func (m *ApplySchemaRequest) GetAllowReplication() bool { + if m != nil { + return m.AllowReplication + } + return false +} + func (m *ApplySchemaRequest) GetBeforeSchema() *SchemaDefinition { if m != nil { return m.BeforeSchema @@ -593,6 +838,41 @@ func (m *ExecuteFetchAsDbaRequest) String() string { return proto.Com func (*ExecuteFetchAsDbaRequest) ProtoMessage() {} func (*ExecuteFetchAsDbaRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } +func (m *ExecuteFetchAsDbaRequest) GetQuery() []byte { + if m != nil { + return m.Query + } + return nil +} + +func (m *ExecuteFetchAsDbaRequest) GetDbName() string { + if m != nil { + return m.DbName + } + return "" +} + +func (m *ExecuteFetchAsDbaRequest) GetMaxRows() uint64 { + if m != nil { + return m.MaxRows + } + return 0 +} + +func (m *ExecuteFetchAsDbaRequest) GetDisableBinlogs() bool { + if m != nil { + return m.DisableBinlogs + } + return false +} + +func (m *ExecuteFetchAsDbaRequest) GetReloadSchema() bool { + if m != nil { + return m.ReloadSchema + } + return false +} + type ExecuteFetchAsDbaResponse struct { Result *query.QueryResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` } @@ -621,6 +901,34 @@ func (m *ExecuteFetchAsAllPrivsRequest) String() string { return prot func (*ExecuteFetchAsAllPrivsRequest) ProtoMessage() {} func (*ExecuteFetchAsAllPrivsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } +func (m *ExecuteFetchAsAllPrivsRequest) GetQuery() []byte { + if m != nil { + return m.Query + } + return nil +} + +func (m *ExecuteFetchAsAllPrivsRequest) GetDbName() string { + if m != nil { + return m.DbName + } + return "" +} + +func (m *ExecuteFetchAsAllPrivsRequest) GetMaxRows() uint64 { + if m != nil { + return m.MaxRows + } + return 0 +} + +func (m *ExecuteFetchAsAllPrivsRequest) GetReloadSchema() bool { + if m != nil { + return m.ReloadSchema + } + return false +} + type ExecuteFetchAsAllPrivsResponse struct { Result *query.QueryResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` } @@ -647,6 +955,20 @@ func (m *ExecuteFetchAsAppRequest) String() string { return proto.Com func (*ExecuteFetchAsAppRequest) ProtoMessage() {} func (*ExecuteFetchAsAppRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } +func (m *ExecuteFetchAsAppRequest) GetQuery() []byte { + if m != nil { + return m.Query + } + return nil +} + +func (m *ExecuteFetchAsAppRequest) GetMaxRows() uint64 { + if m != nil { + return m.MaxRows + } + return 0 +} + type ExecuteFetchAsAppResponse struct { Result *query.QueryResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` } @@ -704,6 +1026,13 @@ func (m *MasterPositionResponse) String() string { return proto.Compa func (*MasterPositionResponse) ProtoMessage() {} func (*MasterPositionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } +func (m *MasterPositionResponse) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + type StopSlaveRequest struct { } @@ -730,6 +1059,20 @@ func (m *StopSlaveMinimumRequest) String() string { return proto.Comp func (*StopSlaveMinimumRequest) ProtoMessage() {} func (*StopSlaveMinimumRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } +func (m *StopSlaveMinimumRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *StopSlaveMinimumRequest) GetWaitTimeout() int64 { + if m != nil { + return m.WaitTimeout + } + return 0 +} + type StopSlaveMinimumResponse struct { Position string `protobuf:"bytes,1,opt,name=position" json:"position,omitempty"` } @@ -739,6 +1082,13 @@ func (m *StopSlaveMinimumResponse) String() string { return proto.Com func (*StopSlaveMinimumResponse) ProtoMessage() {} func (*StopSlaveMinimumResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } +func (m *StopSlaveMinimumResponse) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + type StartSlaveRequest struct { } @@ -769,6 +1119,13 @@ func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } +func (m *TabletExternallyReparentedRequest) GetExternalId() string { + if m != nil { + return m.ExternalId + } + return "" +} + type TabletExternallyReparentedResponse struct { } @@ -814,6 +1171,13 @@ func (m *GetSlavesResponse) String() string { return proto.CompactTex func (*GetSlavesResponse) ProtoMessage() {} func (*GetSlavesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } +func (m *GetSlavesResponse) GetAddrs() []string { + if m != nil { + return m.Addrs + } + return nil +} + type WaitBlpPositionRequest struct { BlpPosition *BlpPosition `protobuf:"bytes,1,opt,name=blp_position,json=blpPosition" json:"blp_position,omitempty"` WaitTimeout int64 `protobuf:"varint,2,opt,name=wait_timeout,json=waitTimeout" json:"wait_timeout,omitempty"` @@ -831,6 +1195,13 @@ func (m *WaitBlpPositionRequest) GetBlpPosition() *BlpPosition { return nil } +func (m *WaitBlpPositionRequest) GetWaitTimeout() int64 { + if m != nil { + return m.WaitTimeout + } + return 0 +} + type WaitBlpPositionResponse struct { } @@ -896,6 +1267,13 @@ func (m *RunBlpUntilRequest) GetBlpPositions() []*BlpPosition { return nil } +func (m *RunBlpUntilRequest) GetWaitTimeout() int64 { + if m != nil { + return m.WaitTimeout + } + return 0 +} + type RunBlpUntilResponse struct { Position string `protobuf:"bytes,1,opt,name=position" json:"position,omitempty"` } @@ -905,6 +1283,13 @@ func (m *RunBlpUntilResponse) String() string { return proto.CompactT func (*RunBlpUntilResponse) ProtoMessage() {} func (*RunBlpUntilResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64} } +func (m *RunBlpUntilResponse) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + type ResetReplicationRequest struct { } @@ -938,6 +1323,13 @@ func (m *InitMasterResponse) String() string { return proto.CompactTe func (*InitMasterResponse) ProtoMessage() {} func (*InitMasterResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{68} } +func (m *InitMasterResponse) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + type PopulateReparentJournalRequest struct { TimeCreatedNs int64 `protobuf:"varint,1,opt,name=time_created_ns,json=timeCreatedNs" json:"time_created_ns,omitempty"` ActionName string `protobuf:"bytes,2,opt,name=action_name,json=actionName" json:"action_name,omitempty"` @@ -950,6 +1342,20 @@ func (m *PopulateReparentJournalRequest) String() string { return pro func (*PopulateReparentJournalRequest) ProtoMessage() {} func (*PopulateReparentJournalRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{69} } +func (m *PopulateReparentJournalRequest) GetTimeCreatedNs() int64 { + if m != nil { + return m.TimeCreatedNs + } + return 0 +} + +func (m *PopulateReparentJournalRequest) GetActionName() string { + if m != nil { + return m.ActionName + } + return "" +} + func (m *PopulateReparentJournalRequest) GetMasterAlias() *topodata.TabletAlias { if m != nil { return m.MasterAlias @@ -957,6 +1363,13 @@ func (m *PopulateReparentJournalRequest) GetMasterAlias() *topodata.TabletAlias return nil } +func (m *PopulateReparentJournalRequest) GetReplicationPosition() string { + if m != nil { + return m.ReplicationPosition + } + return "" +} + type PopulateReparentJournalResponse struct { } @@ -985,6 +1398,20 @@ func (m *InitSlaveRequest) GetParent() *topodata.TabletAlias { return nil } +func (m *InitSlaveRequest) GetReplicationPosition() string { + if m != nil { + return m.ReplicationPosition + } + return "" +} + +func (m *InitSlaveRequest) GetTimeCreatedNs() int64 { + if m != nil { + return m.TimeCreatedNs + } + return 0 +} + type InitSlaveResponse struct { } @@ -1010,6 +1437,13 @@ func (m *DemoteMasterResponse) String() string { return proto.Compact func (*DemoteMasterResponse) ProtoMessage() {} func (*DemoteMasterResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{74} } +func (m *DemoteMasterResponse) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + type PromoteSlaveWhenCaughtUpRequest struct { Position string `protobuf:"bytes,1,opt,name=position" json:"position,omitempty"` } @@ -1021,6 +1455,13 @@ func (*PromoteSlaveWhenCaughtUpRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{75} } +func (m *PromoteSlaveWhenCaughtUpRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + type PromoteSlaveWhenCaughtUpResponse struct { Position string `protobuf:"bytes,1,opt,name=position" json:"position,omitempty"` } @@ -1032,6 +1473,13 @@ func (*PromoteSlaveWhenCaughtUpResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{76} } +func (m *PromoteSlaveWhenCaughtUpResponse) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + type SlaveWasPromotedRequest struct { } @@ -1066,6 +1514,20 @@ func (m *SetMasterRequest) GetParent() *topodata.TabletAlias { return nil } +func (m *SetMasterRequest) GetTimeCreatedNs() int64 { + if m != nil { + return m.TimeCreatedNs + } + return 0 +} + +func (m *SetMasterRequest) GetForceStartSlave() bool { + if m != nil { + return m.ForceStartSlave + } + return false +} + type SetMasterResponse struct { } @@ -1144,6 +1606,13 @@ func (m *PromoteSlaveResponse) String() string { return proto.Compact func (*PromoteSlaveResponse) ProtoMessage() {} func (*PromoteSlaveResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{86} } +func (m *PromoteSlaveResponse) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + type BackupRequest struct { Concurrency int64 `protobuf:"varint,1,opt,name=concurrency" json:"concurrency,omitempty"` } @@ -1153,6 +1622,13 @@ func (m *BackupRequest) String() string { return proto.CompactTextStr func (*BackupRequest) ProtoMessage() {} func (*BackupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{87} } +func (m *BackupRequest) GetConcurrency() int64 { + if m != nil { + return m.Concurrency + } + return 0 +} + type BackupResponse struct { Event *logutil.Event `protobuf:"bytes,1,opt,name=event" json:"event,omitempty"` } diff --git a/go/vt/proto/throttlerdata/throttlerdata.pb.go b/go/vt/proto/throttlerdata/throttlerdata.pb.go index c0e9a9fbf43..3e7c25463c1 100644 --- a/go/vt/proto/throttlerdata/throttlerdata.pb.go +++ b/go/vt/proto/throttlerdata/throttlerdata.pb.go @@ -76,6 +76,13 @@ func (m *SetMaxRateRequest) String() string { return proto.CompactTex func (*SetMaxRateRequest) ProtoMessage() {} func (*SetMaxRateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *SetMaxRateRequest) GetRate() int64 { + if m != nil { + return m.Rate + } + return 0 +} + // SetMaxRateResponse is returned by the SetMaxRate RPC. type SetMaxRateResponse struct { // names is the list of throttler names which were updated. @@ -87,6 +94,13 @@ func (m *SetMaxRateResponse) String() string { return proto.CompactTe func (*SetMaxRateResponse) ProtoMessage() {} func (*SetMaxRateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *SetMaxRateResponse) GetNames() []string { + if m != nil { + return m.Names + } + return nil +} + // Configuration holds the configuration parameters for the // MaxReplicationLagModule which adaptively adjusts the throttling rate based on // the observed replication lag across all replicas. @@ -175,6 +189,104 @@ func (m *Configuration) String() string { return proto.CompactTextStr func (*Configuration) ProtoMessage() {} func (*Configuration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *Configuration) GetTargetReplicationLagSec() int64 { + if m != nil { + return m.TargetReplicationLagSec + } + return 0 +} + +func (m *Configuration) GetMaxReplicationLagSec() int64 { + if m != nil { + return m.MaxReplicationLagSec + } + return 0 +} + +func (m *Configuration) GetInitialRate() int64 { + if m != nil { + return m.InitialRate + } + return 0 +} + +func (m *Configuration) GetMaxIncrease() float64 { + if m != nil { + return m.MaxIncrease + } + return 0 +} + +func (m *Configuration) GetEmergencyDecrease() float64 { + if m != nil { + return m.EmergencyDecrease + } + return 0 +} + +func (m *Configuration) GetMinDurationBetweenIncreasesSec() int64 { + if m != nil { + return m.MinDurationBetweenIncreasesSec + } + return 0 +} + +func (m *Configuration) GetMaxDurationBetweenIncreasesSec() int64 { + if m != nil { + return m.MaxDurationBetweenIncreasesSec + } + return 0 +} + +func (m *Configuration) GetMinDurationBetweenDecreasesSec() int64 { + if m != nil { + return m.MinDurationBetweenDecreasesSec + } + return 0 +} + +func (m *Configuration) GetSpreadBacklogAcrossSec() int64 { + if m != nil { + return m.SpreadBacklogAcrossSec + } + return 0 +} + +func (m *Configuration) GetIgnoreNSlowestReplicas() int32 { + if m != nil { + return m.IgnoreNSlowestReplicas + } + return 0 +} + +func (m *Configuration) GetIgnoreNSlowestRdonlys() int32 { + if m != nil { + return m.IgnoreNSlowestRdonlys + } + return 0 +} + +func (m *Configuration) GetAgeBadRateAfterSec() int64 { + if m != nil { + return m.AgeBadRateAfterSec + } + return 0 +} + +func (m *Configuration) GetBadRateIncrease() float64 { + if m != nil { + return m.BadRateIncrease + } + return 0 +} + +func (m *Configuration) GetMaxRateApproachThreshold() float64 { + if m != nil { + return m.MaxRateApproachThreshold + } + return 0 +} + // GetConfigurationRequest is the payload for the GetConfiguration RPC. type GetConfigurationRequest struct { // throttler_name specifies which throttler to select. If empty, all active @@ -187,6 +299,13 @@ func (m *GetConfigurationRequest) String() string { return proto.Comp func (*GetConfigurationRequest) ProtoMessage() {} func (*GetConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (m *GetConfigurationRequest) GetThrottlerName() string { + if m != nil { + return m.ThrottlerName + } + return "" +} + // GetConfigurationResponse is returned by the GetConfiguration RPC. type GetConfigurationResponse struct { // max_rates returns the configurations for each throttler. @@ -223,6 +342,13 @@ func (m *UpdateConfigurationRequest) String() string { return proto.C func (*UpdateConfigurationRequest) ProtoMessage() {} func (*UpdateConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (m *UpdateConfigurationRequest) GetThrottlerName() string { + if m != nil { + return m.ThrottlerName + } + return "" +} + func (m *UpdateConfigurationRequest) GetConfiguration() *Configuration { if m != nil { return m.Configuration @@ -230,6 +356,13 @@ func (m *UpdateConfigurationRequest) GetConfiguration() *Configuration { return nil } +func (m *UpdateConfigurationRequest) GetCopyZeroValues() bool { + if m != nil { + return m.CopyZeroValues + } + return false +} + // UpdateConfigurationResponse is returned by the UpdateConfiguration RPC. type UpdateConfigurationResponse struct { // names is the list of throttler names which were updated. @@ -241,6 +374,13 @@ func (m *UpdateConfigurationResponse) String() string { return proto. func (*UpdateConfigurationResponse) ProtoMessage() {} func (*UpdateConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (m *UpdateConfigurationResponse) GetNames() []string { + if m != nil { + return m.Names + } + return nil +} + // ResetConfigurationRequest is the payload for the ResetConfiguration RPC. type ResetConfigurationRequest struct { // throttler_name specifies which throttler to reset. If empty, all active @@ -253,6 +393,13 @@ func (m *ResetConfigurationRequest) String() string { return proto.Co func (*ResetConfigurationRequest) ProtoMessage() {} func (*ResetConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (m *ResetConfigurationRequest) GetThrottlerName() string { + if m != nil { + return m.ThrottlerName + } + return "" +} + // ResetConfigurationResponse is returned by the ResetConfiguration RPC. type ResetConfigurationResponse struct { // names is the list of throttler names which were updated. @@ -264,6 +411,13 @@ func (m *ResetConfigurationResponse) String() string { return proto.C func (*ResetConfigurationResponse) ProtoMessage() {} func (*ResetConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (m *ResetConfigurationResponse) GetNames() []string { + if m != nil { + return m.Names + } + return nil +} + func init() { proto.RegisterType((*MaxRatesRequest)(nil), "throttlerdata.MaxRatesRequest") proto.RegisterType((*MaxRatesResponse)(nil), "throttlerdata.MaxRatesResponse") diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index fe4098946c3..38ba2c01d78 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -146,6 +146,20 @@ func (m *KeyRange) String() string { return proto.CompactTextString(m func (*KeyRange) ProtoMessage() {} func (*KeyRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *KeyRange) GetStart() []byte { + if m != nil { + return m.Start + } + return nil +} + +func (m *KeyRange) GetEnd() []byte { + if m != nil { + return m.End + } + return nil +} + // TabletAlias is a globally unique tablet identifier. type TabletAlias struct { // cell is the cell (or datacenter) the tablet is in @@ -160,6 +174,20 @@ func (m *TabletAlias) String() string { return proto.CompactTextStrin func (*TabletAlias) ProtoMessage() {} func (*TabletAlias) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *TabletAlias) GetCell() string { + if m != nil { + return m.Cell + } + return "" +} + +func (m *TabletAlias) GetUid() uint32 { + if m != nil { + return m.Uid + } + return 0 +} + // Tablet represents information about a running instance of vttablet. type Tablet struct { // alias is the unique name of the tablet. @@ -198,6 +226,20 @@ func (m *Tablet) GetAlias() *TabletAlias { return nil } +func (m *Tablet) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +func (m *Tablet) GetIp() string { + if m != nil { + return m.Ip + } + return "" +} + func (m *Tablet) GetPortMap() map[string]int32 { if m != nil { return m.PortMap @@ -205,6 +247,20 @@ func (m *Tablet) GetPortMap() map[string]int32 { return nil } +func (m *Tablet) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *Tablet) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + func (m *Tablet) GetKeyRange() *KeyRange { if m != nil { return m.KeyRange @@ -212,6 +268,20 @@ func (m *Tablet) GetKeyRange() *KeyRange { return nil } +func (m *Tablet) GetType() TabletType { + if m != nil { + return m.Type + } + return TabletType_UNKNOWN +} + +func (m *Tablet) GetDbNameOverride() string { + if m != nil { + return m.DbNameOverride + } + return "" +} + func (m *Tablet) GetTags() map[string]string { if m != nil { return m.Tags @@ -282,6 +352,13 @@ func (m *Shard) GetSourceShards() []*Shard_SourceShard { return nil } +func (m *Shard) GetCells() []string { + if m != nil { + return m.Cells + } + return nil +} + func (m *Shard) GetTabletControls() []*Shard_TabletControl { if m != nil { return m.TabletControls @@ -300,6 +377,20 @@ func (m *Shard_ServedType) String() string { return proto.CompactText func (*Shard_ServedType) ProtoMessage() {} func (*Shard_ServedType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } +func (m *Shard_ServedType) GetTabletType() TabletType { + if m != nil { + return m.TabletType + } + return TabletType_UNKNOWN +} + +func (m *Shard_ServedType) GetCells() []string { + if m != nil { + return m.Cells + } + return nil +} + // SourceShard represents a data source for filtered replication // accross shards. When this is used in a destination shard, the master // of that shard will run filtered replication. @@ -321,6 +412,27 @@ func (m *Shard_SourceShard) String() string { return proto.CompactTex func (*Shard_SourceShard) ProtoMessage() {} func (*Shard_SourceShard) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 1} } +func (m *Shard_SourceShard) GetUid() uint32 { + if m != nil { + return m.Uid + } + return 0 +} + +func (m *Shard_SourceShard) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *Shard_SourceShard) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + func (m *Shard_SourceShard) GetKeyRange() *KeyRange { if m != nil { return m.KeyRange @@ -328,6 +440,13 @@ func (m *Shard_SourceShard) GetKeyRange() *KeyRange { return nil } +func (m *Shard_SourceShard) GetTables() []string { + if m != nil { + return m.Tables + } + return nil +} + // TabletControl controls tablet's behavior type Shard_TabletControl struct { // which tablet type is affected @@ -343,6 +462,34 @@ func (m *Shard_TabletControl) String() string { return proto.CompactT func (*Shard_TabletControl) ProtoMessage() {} func (*Shard_TabletControl) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 2} } +func (m *Shard_TabletControl) GetTabletType() TabletType { + if m != nil { + return m.TabletType + } + return TabletType_UNKNOWN +} + +func (m *Shard_TabletControl) GetCells() []string { + if m != nil { + return m.Cells + } + return nil +} + +func (m *Shard_TabletControl) GetDisableQueryService() bool { + if m != nil { + return m.DisableQueryService + } + return false +} + +func (m *Shard_TabletControl) GetBlacklistedTables() []string { + if m != nil { + return m.BlacklistedTables + } + return nil +} + // A Keyspace contains data about a keyspace. type Keyspace struct { // name of the column used for sharding @@ -361,6 +508,20 @@ func (m *Keyspace) String() string { return proto.CompactTextString(m func (*Keyspace) ProtoMessage() {} func (*Keyspace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *Keyspace) GetShardingColumnName() string { + if m != nil { + return m.ShardingColumnName + } + return "" +} + +func (m *Keyspace) GetShardingColumnType() KeyspaceIdType { + if m != nil { + return m.ShardingColumnType + } + return KeyspaceIdType_UNSET +} + func (m *Keyspace) GetServedFroms() []*Keyspace_ServedFrom { if m != nil { return m.ServedFroms @@ -384,6 +545,27 @@ func (m *Keyspace_ServedFrom) String() string { return proto.CompactT func (*Keyspace_ServedFrom) ProtoMessage() {} func (*Keyspace_ServedFrom) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } +func (m *Keyspace_ServedFrom) GetTabletType() TabletType { + if m != nil { + return m.TabletType + } + return TabletType_UNKNOWN +} + +func (m *Keyspace_ServedFrom) GetCells() []string { + if m != nil { + return m.Cells + } + return nil +} + +func (m *Keyspace_ServedFrom) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + // ShardReplication describes the MySQL replication relationships // whithin a cell. type ShardReplication struct { @@ -433,6 +615,13 @@ func (m *ShardReference) String() string { return proto.CompactTextSt func (*ShardReference) ProtoMessage() {} func (*ShardReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (m *ShardReference) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *ShardReference) GetKeyRange() *KeyRange { if m != nil { return m.KeyRange @@ -462,6 +651,20 @@ func (m *SrvKeyspace) GetPartitions() []*SrvKeyspace_KeyspacePartition { return nil } +func (m *SrvKeyspace) GetShardingColumnName() string { + if m != nil { + return m.ShardingColumnName + } + return "" +} + +func (m *SrvKeyspace) GetShardingColumnType() KeyspaceIdType { + if m != nil { + return m.ShardingColumnType + } + return KeyspaceIdType_UNSET +} + func (m *SrvKeyspace) GetServedFrom() []*SrvKeyspace_ServedFrom { if m != nil { return m.ServedFrom @@ -483,6 +686,13 @@ func (*SrvKeyspace_KeyspacePartition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } +func (m *SrvKeyspace_KeyspacePartition) GetServedType() TabletType { + if m != nil { + return m.ServedType + } + return TabletType_UNKNOWN +} + func (m *SrvKeyspace_KeyspacePartition) GetShardReferences() []*ShardReference { if m != nil { return m.ShardReferences @@ -504,6 +714,20 @@ func (m *SrvKeyspace_ServedFrom) String() string { return proto.Compa func (*SrvKeyspace_ServedFrom) ProtoMessage() {} func (*SrvKeyspace_ServedFrom) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 1} } +func (m *SrvKeyspace_ServedFrom) GetTabletType() TabletType { + if m != nil { + return m.TabletType + } + return TabletType_UNKNOWN +} + +func (m *SrvKeyspace_ServedFrom) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + // CellInfo contains information about a cell. CellInfo objects are // stored in the global topology server, and describe how to reach // local topology servers. @@ -523,6 +747,20 @@ func (m *CellInfo) String() string { return proto.CompactTextString(m func (*CellInfo) ProtoMessage() {} func (*CellInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (m *CellInfo) GetServerAddress() string { + if m != nil { + return m.ServerAddress + } + return "" +} + +func (m *CellInfo) GetRoot() string { + if m != nil { + return m.Root + } + return "" +} + func init() { proto.RegisterType((*KeyRange)(nil), "topodata.KeyRange") proto.RegisterType((*TabletAlias)(nil), "topodata.TabletAlias") diff --git a/go/vt/proto/vschema/vschema.pb.go b/go/vt/proto/vschema/vschema.pb.go index dc15d87b4e9..7e269a2bd35 100644 --- a/go/vt/proto/vschema/vschema.pb.go +++ b/go/vt/proto/vschema/vschema.pb.go @@ -46,6 +46,13 @@ func (m *Keyspace) String() string { return proto.CompactTextString(m func (*Keyspace) ProtoMessage() {} func (*Keyspace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Keyspace) GetSharded() bool { + if m != nil { + return m.Sharded + } + return false +} + func (m *Keyspace) GetVindexes() map[string]*Vindex { if m != nil { return m.Vindexes @@ -82,6 +89,13 @@ func (m *Vindex) String() string { return proto.CompactTextString(m) func (*Vindex) ProtoMessage() {} func (*Vindex) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *Vindex) GetType() string { + if m != nil { + return m.Type + } + return "" +} + func (m *Vindex) GetParams() map[string]string { if m != nil { return m.Params @@ -89,6 +103,13 @@ func (m *Vindex) GetParams() map[string]string { return nil } +func (m *Vindex) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + // Table is the table info for a Keyspace. type Table struct { // If the table is a sequence, type must be @@ -106,6 +127,13 @@ func (m *Table) String() string { return proto.CompactTextString(m) } func (*Table) ProtoMessage() {} func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *Table) GetType() string { + if m != nil { + return m.Type + } + return "" +} + func (m *Table) GetColumnVindexes() []*ColumnVindex { if m != nil { return m.ColumnVindexes @@ -132,6 +160,20 @@ func (m *ColumnVindex) String() string { return proto.CompactTextStri func (*ColumnVindex) ProtoMessage() {} func (*ColumnVindex) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *ColumnVindex) GetColumn() string { + if m != nil { + return m.Column + } + return "" +} + +func (m *ColumnVindex) GetName() string { + if m != nil { + return m.Name + } + return "" +} + // Autoincrement is used to designate a column as auto-inc. type AutoIncrement struct { Column string `protobuf:"bytes,1,opt,name=column" json:"column,omitempty"` @@ -144,6 +186,20 @@ func (m *AutoIncrement) String() string { return proto.CompactTextStr func (*AutoIncrement) ProtoMessage() {} func (*AutoIncrement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *AutoIncrement) GetColumn() string { + if m != nil { + return m.Column + } + return "" +} + +func (m *AutoIncrement) GetSequence() string { + if m != nil { + return m.Sequence + } + return "" +} + // SrvVSchema is the roll-up of all the Keyspace schema for a cell. type SrvVSchema struct { // keyspaces is a map of keyspace name -> Keyspace object. diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index a112ca33c52..3a9025a0954 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -42,6 +42,20 @@ func (m *ExecuteVtctlCommandRequest) String() string { return proto.C func (*ExecuteVtctlCommandRequest) ProtoMessage() {} func (*ExecuteVtctlCommandRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *ExecuteVtctlCommandRequest) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *ExecuteVtctlCommandRequest) GetActionTimeout() int64 { + if m != nil { + return m.ActionTimeout + } + return 0 +} + // ExecuteVtctlCommandResponse is streamed back by ExecuteVtctlCommand. type ExecuteVtctlCommandResponse struct { Event *logutil.Event `protobuf:"bytes,1,opt,name=event" json:"event,omitempty"` diff --git a/go/vt/proto/vtgate/vtgate.pb.go b/go/vt/proto/vtgate/vtgate.pb.go index c0a6a28df6e..179b0591340 100644 --- a/go/vt/proto/vtgate/vtgate.pb.go +++ b/go/vt/proto/vtgate/vtgate.pb.go @@ -89,6 +89,13 @@ func (m *Session) String() string { return proto.CompactTextString(m) func (*Session) ProtoMessage() {} func (*Session) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Session) GetInTransaction() bool { + if m != nil { + return m.InTransaction + } + return false +} + func (m *Session) GetShardSessions() []*Session_ShardSession { if m != nil { return m.ShardSessions @@ -96,6 +103,13 @@ func (m *Session) GetShardSessions() []*Session_ShardSession { return nil } +func (m *Session) GetSingleDb() bool { + if m != nil { + return m.SingleDb + } + return false +} + type Session_ShardSession struct { Target *query.Target `protobuf:"bytes,1,opt,name=target" json:"target,omitempty"` TransactionId int64 `protobuf:"varint,2,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"` @@ -113,6 +127,13 @@ func (m *Session_ShardSession) GetTarget() *query.Target { return nil } +func (m *Session_ShardSession) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + // ExecuteRequest is the payload to Execute. type ExecuteRequest struct { // caller_id identifies the caller. This is the effective caller ID, @@ -159,6 +180,27 @@ func (m *ExecuteRequest) GetQuery() *query.BoundQuery { return nil } +func (m *ExecuteRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *ExecuteRequest) GetNotInTransaction() bool { + if m != nil { + return m.NotInTransaction + } + return false +} + +func (m *ExecuteRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + func (m *ExecuteRequest) GetOptions() *query.ExecuteOptions { if m != nil { return m.Options @@ -252,6 +294,34 @@ func (m *ExecuteShardsRequest) GetQuery() *query.BoundQuery { return nil } +func (m *ExecuteShardsRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *ExecuteShardsRequest) GetShards() []string { + if m != nil { + return m.Shards + } + return nil +} + +func (m *ExecuteShardsRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *ExecuteShardsRequest) GetNotInTransaction() bool { + if m != nil { + return m.NotInTransaction + } + return false +} + func (m *ExecuteShardsRequest) GetOptions() *query.ExecuteOptions { if m != nil { return m.Options @@ -346,6 +416,34 @@ func (m *ExecuteKeyspaceIdsRequest) GetQuery() *query.BoundQuery { return nil } +func (m *ExecuteKeyspaceIdsRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *ExecuteKeyspaceIdsRequest) GetKeyspaceIds() [][]byte { + if m != nil { + return m.KeyspaceIds + } + return nil +} + +func (m *ExecuteKeyspaceIdsRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *ExecuteKeyspaceIdsRequest) GetNotInTransaction() bool { + if m != nil { + return m.NotInTransaction + } + return false +} + func (m *ExecuteKeyspaceIdsRequest) GetOptions() *query.ExecuteOptions { if m != nil { return m.Options @@ -440,6 +538,13 @@ func (m *ExecuteKeyRangesRequest) GetQuery() *query.BoundQuery { return nil } +func (m *ExecuteKeyRangesRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + func (m *ExecuteKeyRangesRequest) GetKeyRanges() []*topodata.KeyRange { if m != nil { return m.KeyRanges @@ -447,6 +552,20 @@ func (m *ExecuteKeyRangesRequest) GetKeyRanges() []*topodata.KeyRange { return nil } +func (m *ExecuteKeyRangesRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *ExecuteKeyRangesRequest) GetNotInTransaction() bool { + if m != nil { + return m.NotInTransaction + } + return false +} + func (m *ExecuteKeyRangesRequest) GetOptions() *query.ExecuteOptions { if m != nil { return m.Options @@ -543,6 +662,20 @@ func (m *ExecuteEntityIdsRequest) GetQuery() *query.BoundQuery { return nil } +func (m *ExecuteEntityIdsRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *ExecuteEntityIdsRequest) GetEntityColumnName() string { + if m != nil { + return m.EntityColumnName + } + return "" +} + func (m *ExecuteEntityIdsRequest) GetEntityKeyspaceIds() []*ExecuteEntityIdsRequest_EntityId { if m != nil { return m.EntityKeyspaceIds @@ -550,6 +683,20 @@ func (m *ExecuteEntityIdsRequest) GetEntityKeyspaceIds() []*ExecuteEntityIdsRequ return nil } +func (m *ExecuteEntityIdsRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *ExecuteEntityIdsRequest) GetNotInTransaction() bool { + if m != nil { + return m.NotInTransaction + } + return false +} + func (m *ExecuteEntityIdsRequest) GetOptions() *query.ExecuteOptions { if m != nil { return m.Options @@ -573,6 +720,27 @@ func (*ExecuteEntityIdsRequest_EntityId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } +func (m *ExecuteEntityIdsRequest_EntityId) GetType() query.Type { + if m != nil { + return m.Type + } + return query.Type_NULL_TYPE +} + +func (m *ExecuteEntityIdsRequest_EntityId) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *ExecuteEntityIdsRequest_EntityId) GetKeyspaceId() []byte { + if m != nil { + return m.KeyspaceId + } + return nil +} + // ExecuteEntityIdsResponse is the returned value from ExecuteEntityIds. type ExecuteEntityIdsResponse struct { // error contains an application level error if necessary. Note the @@ -660,6 +828,27 @@ func (m *ExecuteBatchRequest) GetQueries() []*query.BoundQuery { return nil } +func (m *ExecuteBatchRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *ExecuteBatchRequest) GetAsTransaction() bool { + if m != nil { + return m.AsTransaction + } + return false +} + +func (m *ExecuteBatchRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + func (m *ExecuteBatchRequest) GetOptions() *query.ExecuteOptions { if m != nil { return m.Options @@ -729,6 +918,20 @@ func (m *BoundShardQuery) GetQuery() *query.BoundQuery { return nil } +func (m *BoundShardQuery) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *BoundShardQuery) GetShards() []string { + if m != nil { + return m.Shards + } + return nil +} + // ExecuteBatchShardsRequest is the payload to ExecuteBatchShards type ExecuteBatchShardsRequest struct { // caller_id identifies the caller. This is the effective caller ID, @@ -775,6 +978,20 @@ func (m *ExecuteBatchShardsRequest) GetQueries() []*BoundShardQuery { return nil } +func (m *ExecuteBatchShardsRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *ExecuteBatchShardsRequest) GetAsTransaction() bool { + if m != nil { + return m.AsTransaction + } + return false +} + func (m *ExecuteBatchShardsRequest) GetOptions() *query.ExecuteOptions { if m != nil { return m.Options @@ -845,6 +1062,20 @@ func (m *BoundKeyspaceIdQuery) GetQuery() *query.BoundQuery { return nil } +func (m *BoundKeyspaceIdQuery) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *BoundKeyspaceIdQuery) GetKeyspaceIds() [][]byte { + if m != nil { + return m.KeyspaceIds + } + return nil +} + // ExecuteBatchKeyspaceIdsRequest is the payload to ExecuteBatchKeyspaceId. type ExecuteBatchKeyspaceIdsRequest struct { // caller_id identifies the caller. This is the effective caller ID, @@ -890,6 +1121,20 @@ func (m *ExecuteBatchKeyspaceIdsRequest) GetQueries() []*BoundKeyspaceIdQuery { return nil } +func (m *ExecuteBatchKeyspaceIdsRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *ExecuteBatchKeyspaceIdsRequest) GetAsTransaction() bool { + if m != nil { + return m.AsTransaction + } + return false +} + func (m *ExecuteBatchKeyspaceIdsRequest) GetOptions() *query.ExecuteOptions { if m != nil { return m.Options @@ -971,6 +1216,20 @@ func (m *StreamExecuteRequest) GetQuery() *query.BoundQuery { return nil } +func (m *StreamExecuteRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *StreamExecuteRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + func (m *StreamExecuteRequest) GetOptions() *query.ExecuteOptions { if m != nil { return m.Options @@ -1034,6 +1293,27 @@ func (m *StreamExecuteShardsRequest) GetQuery() *query.BoundQuery { return nil } +func (m *StreamExecuteShardsRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *StreamExecuteShardsRequest) GetShards() []string { + if m != nil { + return m.Shards + } + return nil +} + +func (m *StreamExecuteShardsRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + func (m *StreamExecuteShardsRequest) GetOptions() *query.ExecuteOptions { if m != nil { return m.Options @@ -1100,6 +1380,27 @@ func (m *StreamExecuteKeyspaceIdsRequest) GetQuery() *query.BoundQuery { return nil } +func (m *StreamExecuteKeyspaceIdsRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *StreamExecuteKeyspaceIdsRequest) GetKeyspaceIds() [][]byte { + if m != nil { + return m.KeyspaceIds + } + return nil +} + +func (m *StreamExecuteKeyspaceIdsRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + func (m *StreamExecuteKeyspaceIdsRequest) GetOptions() *query.ExecuteOptions { if m != nil { return m.Options @@ -1166,6 +1467,13 @@ func (m *StreamExecuteKeyRangesRequest) GetQuery() *query.BoundQuery { return nil } +func (m *StreamExecuteKeyRangesRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + func (m *StreamExecuteKeyRangesRequest) GetKeyRanges() []*topodata.KeyRange { if m != nil { return m.KeyRanges @@ -1173,6 +1481,13 @@ func (m *StreamExecuteKeyRangesRequest) GetKeyRanges() []*topodata.KeyRange { return nil } +func (m *StreamExecuteKeyRangesRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + func (m *StreamExecuteKeyRangesRequest) GetOptions() *query.ExecuteOptions { if m != nil { return m.Options @@ -1222,6 +1537,13 @@ func (m *BeginRequest) GetCallerId() *vtrpc.CallerID { return nil } +func (m *BeginRequest) GetSingleDb() bool { + if m != nil { + return m.SingleDb + } + return false +} + // BeginResponse is the returned value from Begin. type BeginResponse struct { // session is the initial session information to use for subsequent queries. @@ -1271,6 +1593,13 @@ func (m *CommitRequest) GetSession() *Session { return nil } +func (m *CommitRequest) GetAtomic() bool { + if m != nil { + return m.Atomic + } + return false +} + // CommitResponse is the returned value from Commit. type CommitResponse struct { } @@ -1338,6 +1667,13 @@ func (m *ResolveTransactionRequest) GetCallerId() *vtrpc.CallerID { return nil } +func (m *ResolveTransactionRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + // MessageStreamRequest is the request payload for MessageStream. type MessageStreamRequest struct { // caller_id identifies the caller. This is the effective caller ID, @@ -1365,6 +1701,20 @@ func (m *MessageStreamRequest) GetCallerId() *vtrpc.CallerID { return nil } +func (m *MessageStreamRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *MessageStreamRequest) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + func (m *MessageStreamRequest) GetKeyRange() *topodata.KeyRange { if m != nil { return m.KeyRange @@ -1372,6 +1722,13 @@ func (m *MessageStreamRequest) GetKeyRange() *topodata.KeyRange { return nil } +func (m *MessageStreamRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + // MessageAckRequest is the request payload for MessageAck. type MessageAckRequest struct { // caller_id identifies the caller. This is the effective caller ID, @@ -1397,6 +1754,20 @@ func (m *MessageAckRequest) GetCallerId() *vtrpc.CallerID { return nil } +func (m *MessageAckRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *MessageAckRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *MessageAckRequest) GetIds() []*query.Value { if m != nil { return m.Ids @@ -1516,6 +1887,13 @@ func (m *SplitQueryRequest) GetCallerId() *vtrpc.CallerID { return nil } +func (m *SplitQueryRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + func (m *SplitQueryRequest) GetQuery() *query.BoundQuery { if m != nil { return m.Query @@ -1523,6 +1901,41 @@ func (m *SplitQueryRequest) GetQuery() *query.BoundQuery { return nil } +func (m *SplitQueryRequest) GetSplitColumn() []string { + if m != nil { + return m.SplitColumn + } + return nil +} + +func (m *SplitQueryRequest) GetSplitCount() int64 { + if m != nil { + return m.SplitCount + } + return 0 +} + +func (m *SplitQueryRequest) GetNumRowsPerQueryPart() int64 { + if m != nil { + return m.NumRowsPerQueryPart + } + return 0 +} + +func (m *SplitQueryRequest) GetAlgorithm() query.SplitQueryRequest_Algorithm { + if m != nil { + return m.Algorithm + } + return query.SplitQueryRequest_EQUAL_SPLITS +} + +func (m *SplitQueryRequest) GetUseSplitQueryV2() bool { + if m != nil { + return m.UseSplitQueryV2 + } + return false +} + // SplitQueryResponse is the returned value from SplitQuery. type SplitQueryResponse struct { // splits contains the queries to run to fetch the entire data set. @@ -1555,6 +1968,13 @@ func (*SplitQueryResponse_KeyRangePart) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38, 0} } +func (m *SplitQueryResponse_KeyRangePart) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + func (m *SplitQueryResponse_KeyRangePart) GetKeyRanges() []*topodata.KeyRange { if m != nil { return m.KeyRanges @@ -1576,6 +1996,20 @@ func (*SplitQueryResponse_ShardPart) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38, 1} } +func (m *SplitQueryResponse_ShardPart) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *SplitQueryResponse_ShardPart) GetShards() []string { + if m != nil { + return m.Shards + } + return nil +} + type SplitQueryResponse_Part struct { // query is the query and bind variables to execute. Query *query.BoundQuery `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` @@ -1614,6 +2048,13 @@ func (m *SplitQueryResponse_Part) GetShardPart() *SplitQueryResponse_ShardPart { return nil } +func (m *SplitQueryResponse_Part) GetSize() int64 { + if m != nil { + return m.Size + } + return 0 +} + // GetSrvKeyspaceRequest is the payload to GetSrvKeyspace. type GetSrvKeyspaceRequest struct { // keyspace name to fetch. @@ -1625,6 +2066,13 @@ func (m *GetSrvKeyspaceRequest) String() string { return proto.Compac func (*GetSrvKeyspaceRequest) ProtoMessage() {} func (*GetSrvKeyspaceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } +func (m *GetSrvKeyspaceRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + // GetSrvKeyspaceResponse is the returned value from GetSrvKeyspace. type GetSrvKeyspaceResponse struct { // srv_keyspace is the topology object for the SrvKeyspace. @@ -1679,6 +2127,20 @@ func (m *UpdateStreamRequest) GetCallerId() *vtrpc.CallerID { return nil } +func (m *UpdateStreamRequest) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *UpdateStreamRequest) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + func (m *UpdateStreamRequest) GetKeyRange() *topodata.KeyRange { if m != nil { return m.KeyRange @@ -1686,6 +2148,20 @@ func (m *UpdateStreamRequest) GetKeyRange() *topodata.KeyRange { return nil } +func (m *UpdateStreamRequest) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + +func (m *UpdateStreamRequest) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + func (m *UpdateStreamRequest) GetEvent() *query.EventToken { if m != nil { return m.Event @@ -1717,6 +2193,13 @@ func (m *UpdateStreamResponse) GetEvent() *query.StreamEvent { return nil } +func (m *UpdateStreamResponse) GetResumeTimestamp() int64 { + if m != nil { + return m.ResumeTimestamp + } + return 0 +} + func init() { proto.RegisterType((*Session)(nil), "vtgate.Session") proto.RegisterType((*Session_ShardSession)(nil), "vtgate.Session.ShardSession") diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go index 33fc6351b56..f88edca9057 100644 --- a/go/vt/proto/vtrpc/vtrpc.pb.go +++ b/go/vt/proto/vtrpc/vtrpc.pb.go @@ -316,6 +316,27 @@ func (m *CallerID) String() string { return proto.CompactTextString(m func (*CallerID) ProtoMessage() {} func (*CallerID) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *CallerID) GetPrincipal() string { + if m != nil { + return m.Principal + } + return "" +} + +func (m *CallerID) GetComponent() string { + if m != nil { + return m.Component + } + return "" +} + +func (m *CallerID) GetSubcomponent() string { + if m != nil { + return m.Subcomponent + } + return "" +} + // RPCError is an application-level error structure returned by // VtTablet (and passed along by VtGate if appropriate). // We use this so the clients don't have to parse the error messages, @@ -331,6 +352,27 @@ func (m *RPCError) String() string { return proto.CompactTextString(m func (*RPCError) ProtoMessage() {} func (*RPCError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *RPCError) GetLegacyCode() LegacyErrorCode { + if m != nil { + return m.LegacyCode + } + return LegacyErrorCode_SUCCESS_LEGACY +} + +func (m *RPCError) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *RPCError) GetCode() Code { + if m != nil { + return m.Code + } + return Code_OK +} + func init() { proto.RegisterType((*CallerID)(nil), "vtrpc.CallerID") proto.RegisterType((*RPCError)(nil), "vtrpc.RPCError") diff --git a/go/vt/proto/vttest/vttest.pb.go b/go/vt/proto/vttest/vttest.pb.go index 4457ce4edb4..18bb04df568 100644 --- a/go/vt/proto/vttest/vttest.pb.go +++ b/go/vt/proto/vttest/vttest.pb.go @@ -47,6 +47,20 @@ func (m *Shard) String() string { return proto.CompactTextString(m) } func (*Shard) ProtoMessage() {} func (*Shard) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Shard) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Shard) GetDbNameOverride() string { + if m != nil { + return m.DbNameOverride + } + return "" +} + // Keyspace describes a single keyspace. type Keyspace struct { // name has to be unique in a VTTestTopology. @@ -70,6 +84,13 @@ func (m *Keyspace) String() string { return proto.CompactTextString(m func (*Keyspace) ProtoMessage() {} func (*Keyspace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *Keyspace) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *Keyspace) GetShards() []*Shard { if m != nil { return m.Shards @@ -77,6 +98,41 @@ func (m *Keyspace) GetShards() []*Shard { return nil } +func (m *Keyspace) GetShardingColumnName() string { + if m != nil { + return m.ShardingColumnName + } + return "" +} + +func (m *Keyspace) GetShardingColumnType() string { + if m != nil { + return m.ShardingColumnType + } + return "" +} + +func (m *Keyspace) GetServedFrom() string { + if m != nil { + return m.ServedFrom + } + return "" +} + +func (m *Keyspace) GetReplicaCount() int32 { + if m != nil { + return m.ReplicaCount + } + return 0 +} + +func (m *Keyspace) GetRdonlyCount() int32 { + if m != nil { + return m.RdonlyCount + } + return 0 +} + // VTTestTopology describes the keyspaces in the topology. type VTTestTopology struct { // all keyspaces in the topology. @@ -97,6 +153,13 @@ func (m *VTTestTopology) GetKeyspaces() []*Keyspace { return nil } +func (m *VTTestTopology) GetCells() []string { + if m != nil { + return m.Cells + } + return nil +} + func init() { proto.RegisterType((*Shard)(nil), "vttest.Shard") proto.RegisterType((*Keyspace)(nil), "vttest.Keyspace") diff --git a/go/vt/proto/vtworkerdata/vtworkerdata.pb.go b/go/vt/proto/vtworkerdata/vtworkerdata.pb.go index 71559a88fbe..c15c4236931 100644 --- a/go/vt/proto/vtworkerdata/vtworkerdata.pb.go +++ b/go/vt/proto/vtworkerdata/vtworkerdata.pb.go @@ -40,6 +40,13 @@ func (m *ExecuteVtworkerCommandRequest) String() string { return prot func (*ExecuteVtworkerCommandRequest) ProtoMessage() {} func (*ExecuteVtworkerCommandRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *ExecuteVtworkerCommandRequest) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + // ExecuteVtworkerCommandResponse is streamed back by ExecuteVtworkerCommand. type ExecuteVtworkerCommandResponse struct { Event *logutil.Event `protobuf:"bytes,1,opt,name=event" json:"event,omitempty"` diff --git a/go/vt/proto/workflow/workflow.pb.go b/go/vt/proto/workflow/workflow.pb.go index cf5eeccff26..cb3cf7a64d6 100644 --- a/go/vt/proto/workflow/workflow.pb.go +++ b/go/vt/proto/workflow/workflow.pb.go @@ -123,6 +123,62 @@ func (m *Workflow) String() string { return proto.CompactTextString(m func (*Workflow) ProtoMessage() {} func (*Workflow) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Workflow) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + +func (m *Workflow) GetFactoryName() string { + if m != nil { + return m.FactoryName + } + return "" +} + +func (m *Workflow) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Workflow) GetState() WorkflowState { + if m != nil { + return m.State + } + return WorkflowState_NotStarted +} + +func (m *Workflow) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *Workflow) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *Workflow) GetStartTime() int64 { + if m != nil { + return m.StartTime + } + return 0 +} + +func (m *Workflow) GetEndTime() int64 { + if m != nil { + return m.EndTime + } + return 0 +} + type WorkflowCheckpoint struct { // code_version is used to detect incompabilities between the version of the // running workflow and the one which wrote the checkpoint. If they don't @@ -142,6 +198,13 @@ func (m *WorkflowCheckpoint) String() string { return proto.CompactTe func (*WorkflowCheckpoint) ProtoMessage() {} func (*WorkflowCheckpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *WorkflowCheckpoint) GetCodeVersion() int32 { + if m != nil { + return m.CodeVersion + } + return 0 +} + func (m *WorkflowCheckpoint) GetTasks() map[string]*Task { if m != nil { return m.Tasks @@ -169,6 +232,20 @@ func (m *Task) String() string { return proto.CompactTextString(m) } func (*Task) ProtoMessage() {} func (*Task) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *Task) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Task) GetState() TaskState { + if m != nil { + return m.State + } + return TaskState_TaskNotStarted +} + func (m *Task) GetAttributes() map[string]string { if m != nil { return m.Attributes @@ -176,6 +253,13 @@ func (m *Task) GetAttributes() map[string]string { return nil } +func (m *Task) GetError() string { + if m != nil { + return m.Error + } + return "" +} + func init() { proto.RegisterType((*Workflow)(nil), "workflow.Workflow") proto.RegisterType((*WorkflowCheckpoint)(nil), "workflow.WorkflowCheckpoint") diff --git a/vendor/vendor.json b/vendor/vendor.json index 025960a2531..90b6ee03194 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -385,32 +385,32 @@ { "checksumSHA1": "ZIqcmeEc/EU9HsKJHuJ1Iy+ZNgE=", "path": "github.com/golang/protobuf/protoc-gen-go", - "revision": "2bc9827a78f95c6665b5fe0abd1fd66b496ae2d8", - "revisionTime": "2016-11-03T22:44:32Z" + "revision": "69b215d01a5606c843240eab4937eab3acee6530", + "revisionTime": "2017-02-17T23:44:32Z" }, { - "checksumSHA1": "juNiTc9bfhQYo4BkWc83sW4Z5gw=", + "checksumSHA1": "AjyXQ5eohrCPS/jSWZFPn5E8wnQ=", "path": "github.com/golang/protobuf/protoc-gen-go/descriptor", - "revision": "2bc9827a78f95c6665b5fe0abd1fd66b496ae2d8", - "revisionTime": "2016-11-03T22:44:32Z" + "revision": "69b215d01a5606c843240eab4937eab3acee6530", + "revisionTime": "2017-02-17T23:44:32Z" }, { - "checksumSHA1": "lPJ5a2uV2CPHch++4zKkJ1au0sw=", + "checksumSHA1": "T/EqMkqzvjQUL1c+yN32kketgfE=", "path": "github.com/golang/protobuf/protoc-gen-go/generator", - "revision": "2bc9827a78f95c6665b5fe0abd1fd66b496ae2d8", - "revisionTime": "2016-11-03T22:44:32Z" + "revision": "69b215d01a5606c843240eab4937eab3acee6530", + "revisionTime": "2017-02-17T23:44:32Z" }, { "checksumSHA1": "u5V5OglAZoibucYHK3OtIFYM+w0=", "path": "github.com/golang/protobuf/protoc-gen-go/grpc", - "revision": "2bc9827a78f95c6665b5fe0abd1fd66b496ae2d8", - "revisionTime": "2016-11-03T22:44:32Z" + "revision": "69b215d01a5606c843240eab4937eab3acee6530", + "revisionTime": "2017-02-17T23:44:32Z" }, { "checksumSHA1": "zps2+aJoFhpFf2F8TsU9zCGXL2c=", "path": "github.com/golang/protobuf/protoc-gen-go/plugin", - "revision": "2bc9827a78f95c6665b5fe0abd1fd66b496ae2d8", - "revisionTime": "2016-11-03T22:44:32Z" + "revision": "69b215d01a5606c843240eab4937eab3acee6530", + "revisionTime": "2017-02-17T23:44:32Z" }, { "checksumSHA1": "/vLtyN6HK5twSZIFerD199YTmjk=", From 60e76d88faaec72849f057a96031529de2607103 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 22 Feb 2017 22:27:52 -0800 Subject: [PATCH 025/108] Add a TLS mysqlconn -> real MySQL test. In the process, allow a vttest user to specify extra my.cnf. --- go/mysqlconn/client.go | 11 ++++-- go/mysqlconn/client_test.go | 69 ++++++++++++++++++++++++++++++++- go/vt/vttest/local_cluster.go | 11 ++++++ py/vttest/run_local_database.py | 10 ++++- 4 files changed, 95 insertions(+), 6 deletions(-) diff --git a/go/mysqlconn/client.go b/go/mysqlconn/client.go index 318cf70864d..342f8cdecee 100644 --- a/go/mysqlconn/client.go +++ b/go/mysqlconn/client.go @@ -208,13 +208,18 @@ func (c *Conn) clientHandshake(characterSet uint8, params *sqldb.ConnParams) err } // The ServerName to verify depends on what the hostname is. + // - If using a socket, we use "localhost". // - If it is an IP address, we need to prefix it with 'IP:'. // - If not, we can just use it as is. // We may need to add a ServerName field to ConnParams to // make this more explicit. - serverName := params.Host - if net.ParseIP(params.Host) != nil { - serverName = "IP:" + params.Host + serverName := "localhost" + if params.Host != "" { + if net.ParseIP(params.Host) != nil { + serverName = "IP:" + params.Host + } else { + serverName = params.Host + } } // Build the TLS config. diff --git a/go/mysqlconn/client_test.go b/go/mysqlconn/client_test.go index cb83fc98ca7..f1b3ec259fb 100644 --- a/go/mysqlconn/client_test.go +++ b/go/mysqlconn/client_test.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "net" "os" + "path" "strings" "sync" "testing" @@ -13,6 +14,7 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/sqldb" + "github.com/youtube/vitess/go/vt/tlstest" "github.com/youtube/vitess/go/vt/vttest" ) @@ -169,13 +171,65 @@ func testDupEntryWithRealDatabase(t *testing.T, params *sqldb.ConnParams) { assertSQLError(t, err, ERDupEntry, SSDupKey, "Duplicate entry") } +// testTLS tests our client can connect via SSL. +func testTLS(t *testing.T, params *sqldb.ConnParams) { + // First make sure the official 'mysql' client can connect. + output, ok := runMysql(t, params, "status") + if !ok { + t.Fatalf("'mysql -e status' failed: %v", output) + } + if !strings.Contains(output, "Cipher in use is") { + t.Fatalf("cannot connect via SSL: %v", output) + } + + // Now connect with our client. + ctx := context.Background() + conn, err := Connect(ctx, params) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + result, err := conn.ExecuteFetch("SHOW STATUS LIKE 'Ssl_cipher'", 10, true) + if err != nil { + t.Fatalf("SHOW STATUS LIKE 'Ssl_cipher' failed: %v", err) + } + if len(result.Rows) != 1 || result.Rows[0][0].String() != "Ssl_cipher" || + result.Rows[0][1].String() == "" { + t.Fatalf("SHOW STATUS LIKE 'Ssl_cipher' returned unexpected result: %v", result) + } +} + // TestWithRealDatabase runs a real MySQL database, and runs all kinds // of tests on it. To minimize overhead, we only run one database, and // run all the tests on it. func TestWithRealDatabase(t *testing.T) { + // Create the certs. + root, err := ioutil.TempDir("", "TestTLSServer") + if err != nil { + t.Fatalf("TempDir failed: %v", err) + } + defer os.RemoveAll(root) + tlstest.CreateCA(root) + tlstest.CreateSignedCert(root, tlstest.CA, "01", "server", "localhost") + tlstest.CreateSignedCert(root, tlstest.CA, "02", "client", "Client Cert") + + // Create the extra SSL my.cnf lines. + cnf := fmt.Sprintf(` +ssl-ca=%v/ca-cert.pem +ssl-cert=%v/server-cert.pem +ssl-key=%v/server-key.pem +`, root, root, root) + extraMyCnf := path.Join(root, "ssl_my.cnf") + if err := ioutil.WriteFile(extraMyCnf, []byte(cnf), os.ModePerm); err != nil { + t.Fatalf("ioutil.WriteFile(%v) failed: %v", extraMyCnf, err) + } + + // Launch MySQL. hdl, err := vttest.LaunchVitess( vttest.MySQLOnly("vttest"), - vttest.NoStderr()) + vttest.NoStderr(), + vttest.ExtraMyCnf(extraMyCnf)) if err != nil { t.Fatal(err) } @@ -224,4 +278,17 @@ func TestWithRealDatabase(t *testing.T) { t.Run("Schema", func(t *testing.T) { testSchema(t, ¶ms) }) + + // Test SSL. First we make sure a real 'mysql' client gets it. + params.Flags = CapabilityClientSSL + params.SslCa = path.Join(root, "ca-cert.pem") + params.SslCert = path.Join(root, "client-cert.pem") + params.SslKey = path.Join(root, "client-key.pem") + t.Run("TLS", func(t *testing.T) { + testTLS(t, ¶ms) + }) + + // Uncomment to sleep and be able to connect to MySQL + // fmt.Printf("Connect to MySQL using parameters: %v\n", params) + // time.Sleep(10 * time.Minute) } diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go index ec650ccd8c3..df7a14cf9af 100644 --- a/go/vt/vttest/local_cluster.go +++ b/go/vt/vttest/local_cluster.go @@ -176,6 +176,17 @@ func Schema(schema string) VitessOption { } } +// ExtraMyCnf adds one or more 'my.cnf'-style config files to MySQL. +// (if more than one, the ':' separator should be used). +func ExtraMyCnf(extraMyCnf string) VitessOption { + return VitessOption{ + beforeRun: func(hdl *Handle) error { + hdl.cmd.Args = append(hdl.cmd.Args, "--extra_my_cnf", extraMyCnf) + return nil + }, + } +} + // InitDataOptions contain the command line arguments that configure // initialization of vttest with random data. See the documentation of // the corresponding command line flags in py/vttest/run_local_database.py diff --git a/py/vttest/run_local_database.py b/py/vttest/run_local_database.py index 95a5e0fe2ee..a3901bd76d0 100755 --- a/py/vttest/run_local_database.py +++ b/py/vttest/run_local_database.py @@ -84,6 +84,10 @@ def main(cmdline_options): init_data_opts.max_table_shard_size = cmdline_options.max_table_shard_size init_data_opts.null_probability = cmdline_options.null_probability + extra_my_cnf = os.path.join(os.environ['VTTOP'], 'config/mycnf/vtcombo.cnf') + if cmdline_options.extra_my_cnf: + extra_my_cnf += ':' + cmdline_options.extra_my_cnf + with local_database.LocalDatabase( topology, cmdline_options.schema_dir, @@ -92,8 +96,7 @@ def main(cmdline_options): web_dir=cmdline_options.web_dir, web_dir2=cmdline_options.web_dir2, default_schema_dir=cmdline_options.default_schema_dir, - extra_my_cnf=os.path.join( - os.environ['VTTOP'], 'config/mycnf/vtcombo.cnf')) as local_db: + extra_my_cnf=extra_my_cnf) as local_db: print json.dumps(local_db.config()) sys.stdout.flush() try: @@ -167,6 +170,9 @@ def main(cmdline_options): parser.add_option( '--web_dir2', help='location of the vtctld2 web server files.') + parser.add_option( + '-f', '--extra_my_cnf', + help='extra files to add to the config, separated by ":"') parser.add_option( '-v', '--verbose', action='store_true', help='Display extra error messages.') From be9568f7ca754ec8cad2053824afd96c93d7156c Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Thu, 23 Feb 2017 09:33:35 -0500 Subject: [PATCH 026/108] just skip any unparseable rbr events --- .../replication/binlog_event_common.go | 36 ++++++++++--------- .../replication/binlog_event_make_test.go | 6 ++-- go/vt/binlog/binlog_streamer.go | 12 +++++-- 3 files changed, 33 insertions(+), 21 deletions(-) diff --git a/go/mysqlconn/replication/binlog_event_common.go b/go/mysqlconn/replication/binlog_event_common.go index d84c18ce8bf..c420276cca0 100644 --- a/go/mysqlconn/replication/binlog_event_common.go +++ b/go/mysqlconn/replication/binlog_event_common.go @@ -397,28 +397,28 @@ func cellLength(data []byte, pos int, tmc *TableMapColumn) (int, error) { // FIXME(alainjobart) are the ints signed? It seems Tiny is unsigned, // but the others are. -func cellData(data []byte, pos int, tmc *TableMapColumn) (string, int) { +func cellData(data []byte, pos int, tmc *TableMapColumn) (string, int, error) { switch tmc.Type { case TypeTiny: - return fmt.Sprintf("%v", data[pos]), 1 + return fmt.Sprintf("%v", data[pos]), 1, nil case TypeShort, TypeYear: val := binary.LittleEndian.Uint16(data[pos : pos+2]) - return fmt.Sprintf("%v", val), 2 + return fmt.Sprintf("%v", val), 2, nil case TypeLong, TypeInt24: val := binary.LittleEndian.Uint32(data[pos : pos+4]) - return fmt.Sprintf("%v", val), 4 + return fmt.Sprintf("%v", val), 4, nil case TypeLongLong: val := binary.LittleEndian.Uint64(data[pos : pos+8]) - return fmt.Sprintf("%v", val), 8 + return fmt.Sprintf("%v", val), 8, nil case TypeTimestamp, TypeDate, TypeTime, TypeDateTime: - panic(fmt.Errorf("NYI")) + panic(fmt.Errorf("Not yet implemented type %v", tmc.Type)) case TypeVarchar: // Varchar length is two bytes here. l := int(uint64(data[pos]) | uint64(data[pos+1])<<8) - return string(data[pos+2 : pos+2+l]), 2 + l + return string(data[pos+2 : pos+2+l]), 2 + l, nil default: - panic(fmt.Errorf("Unsupported type %v", tmc.Type)) + return "", 0, fmt.Errorf("Unsupported type %v", tmc.Type) } } @@ -554,8 +554,7 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) { } // StringValues is a helper method to return the string value of all columns in a row in a Row. -// Will panic if anything goes wrong, this is meant for tests for now. -func (rs *Rows) StringValues(tm *TableMap, rowIndex int) []string { +func (rs *Rows) StringValues(tm *TableMap, rowIndex int) ([]string, error) { var result []string valueIndex := 0 @@ -574,18 +573,20 @@ func (rs *Rows) StringValues(tm *TableMap, rowIndex int) []string { } // We have real data - value, l := cellData(data, pos, &tm.Columns[c]) + value, l, err := cellData(data, pos, &tm.Columns[c]) + if err != nil { + return nil, err + } result = append(result, value) pos += l valueIndex++ } - return result + return result, nil } // StringIdentifies is a helper method to return the string identify of all columns in a row in a Row. -// Will panic if anything goes wrong, this is meant for tests for now. -func (rs *Rows) StringIdentifies(tm *TableMap, rowIndex int) []string { +func (rs *Rows) StringIdentifies(tm *TableMap, rowIndex int) ([]string, error) { var result []string valueIndex := 0 @@ -604,11 +605,14 @@ func (rs *Rows) StringIdentifies(tm *TableMap, rowIndex int) []string { } // We have real data - value, l := cellData(data, pos, &tm.Columns[c]) + value, l, err := cellData(data, pos, &tm.Columns[c]) + if err != nil { + return nil, err + } result = append(result, value) pos += l valueIndex++ } - return result + return result, nil } diff --git a/go/mysqlconn/replication/binlog_event_make_test.go b/go/mysqlconn/replication/binlog_event_make_test.go index 24fc1bc5ccc..7ea027edcf3 100644 --- a/go/mysqlconn/replication/binlog_event_make_test.go +++ b/go/mysqlconn/replication/binlog_event_make_test.go @@ -317,11 +317,11 @@ func TestRowsEvent(t *testing.T) { // Test the Rows we just created, to be sure. // 1076895760 is 0x40302010. - identifies := rows.StringIdentifies(tm, 0) + identifies, err := rows.StringIdentifies(tm, 0) if expected := []string{"1076895760", "abc"}; !reflect.DeepEqual(identifies, expected) { t.Fatalf("bad Rows idenfity, got %v expected %v", identifies, expected) } - values := rows.StringValues(tm, 0) + values, err := rows.StringValues(tm, 0) if expected := []string{"1076895760", "abcd"}; !reflect.DeepEqual(values, expected) { t.Fatalf("bad Rows data, got %v expected %v", values, expected) } @@ -334,7 +334,7 @@ func TestRowsEvent(t *testing.T) { t.Fatalf("NewRowsEvent().IsUpdateRows() if false") } - event, _, err := event.StripChecksum(f) + event, _, err = event.StripChecksum(f) if err != nil { t.Fatalf("StripChecksum failed: %v", err) } diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index 5ea5421aa65..8c9d19758bb 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -388,8 +388,16 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan replication. } statements = append(statements, setTimestamp) for i := range rows.Rows { - identifies := rows.StringIdentifies(tm, i) - values := rows.StringValues(tm, i) + identifies, err := rows.StringIdentifies(tm, i) + if err != nil { + log.Warningf("Failed to parse UPDATE due to error %v", err) + continue + } + values, err := rows.StringValues(tm, i) + if err != nil { + log.Warningf("Failed to parse UPDATE due to error %v", err) + continue + } update := &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, Sql: []byte(fmt.Sprintf("WIP: update table %v set values = %v where identifies = %v", tm.Name, values, identifies)), From 61f482f9095d48de58ccb862a5c8d5a15b84cf2f Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Thu, 23 Feb 2017 10:12:39 -0500 Subject: [PATCH 027/108] fix test --- go/mysqlconn/replication_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/go/mysqlconn/replication_test.go b/go/mysqlconn/replication_test.go index 8cf0370d20e..7e6cef5cfd5 100644 --- a/go/mysqlconn/replication_test.go +++ b/go/mysqlconn/replication_test.go @@ -543,7 +543,7 @@ func testRowReplicationWithRealDatabase(t *testing.T, params *sqldb.ConnParams) } // Check it has 2 rows, and first value is '10', second value is 'nice name'. - values := wr.StringValues(tableMap, 0) + values, _ := wr.StringValues(tableMap, 0) t.Logf("Got WriteRows event data: %v %v", wr, values) if expected := []string{"10", "nice name"}; !reflect.DeepEqual(values, expected) { t.Fatalf("StringValues returned %v, expected %v", values, expected) @@ -560,14 +560,14 @@ func testRowReplicationWithRealDatabase(t *testing.T, params *sqldb.ConnParams) } // Check it has 2 identify rows, and first value is '10', second value is 'nice name'. - values := ur.StringIdentifies(tableMap, 0) + values, _ := ur.StringIdentifies(tableMap, 0) t.Logf("Got UpdateRows event identify: %v %v", ur, values) if expected := []string{"10", "nice name"}; !reflect.DeepEqual(values, expected) { t.Fatalf("StringIdentifies returned %v, expected %v", values, expected) } // Check it has 2 values rows, and first value is '10', second value is 'nicer name'. - values = ur.StringValues(tableMap, 0) + values, _ = ur.StringValues(tableMap, 0) t.Logf("Got UpdateRows event data: %v %v", ur, values) if expected := []string{"10", "nicer name"}; !reflect.DeepEqual(values, expected) { t.Fatalf("StringValues returned %v, expected %v", values, expected) @@ -584,7 +584,7 @@ func testRowReplicationWithRealDatabase(t *testing.T, params *sqldb.ConnParams) } // Check it has 2 rows, and first value is '10', second value is 'nicer name'. - values := dr.StringIdentifies(tableMap, 0) + values, _ := dr.StringIdentifies(tableMap, 0) t.Logf("Got DeleteRows event identify: %v %v", dr, values) if expected := []string{"10", "nicer name"}; !reflect.DeepEqual(values, expected) { t.Fatalf("StringIdentifies returned %v, expected %v", values, expected) From 0bdc2d206a67237f7a43da220ba43e52a7a6c96b Mon Sep 17 00:00:00 2001 From: Erez Louidor Date: Thu, 16 Feb 2017 13:31:03 -0800 Subject: [PATCH 028/108] Moved creation of topo.Server outside of tx_throttler. --- go/cmd/vtcombo/tablet_map.go | 2 +- go/cmd/vttablet/vttablet.go | 4 +- .../tabletserver/endtoend/framework/server.go | 2 +- go/vt/tabletserver/message_manager_test.go | 20 ++--- go/vt/tabletserver/messager_engine_test.go | 10 +-- go/vt/tabletserver/query_executor_test.go | 2 +- go/vt/tabletserver/tabletserver.go | 17 ++++- go/vt/tabletserver/tabletserver_test.go | 76 +++++++++---------- .../tabletserver/txthrottler/tx_throttler.go | 20 ++--- .../txthrottler/tx_throttler_test.go | 8 +- .../wait_for_filtered_replication_test.go | 2 +- 11 files changed, 83 insertions(+), 80 deletions(-) diff --git a/go/cmd/vtcombo/tablet_map.go b/go/cmd/vtcombo/tablet_map.go index 00744a116f3..940403638e1 100644 --- a/go/cmd/vtcombo/tablet_map.go +++ b/go/cmd/vtcombo/tablet_map.go @@ -65,7 +65,7 @@ func createTablet(ctx context.Context, ts topo.Server, cell string, uid uint32, log.Infof("Creating %v tablet %v for %v/%v", tabletType, topoproto.TabletAliasString(alias), keyspace, shard) flag.Set("debug-url-prefix", fmt.Sprintf("/debug-%d", uid)) - controller := tabletserver.NewServer() + controller := tabletserver.NewServer(ts) initTabletType := tabletType if tabletType == topodatapb.TabletType_MASTER { initTabletType = topodatapb.TabletType_REPLICA diff --git a/go/cmd/vttablet/vttablet.go b/go/cmd/vttablet/vttablet.go index 75347b53184..9a2e79f1d1c 100644 --- a/go/cmd/vttablet/vttablet.go +++ b/go/cmd/vttablet/vttablet.go @@ -74,7 +74,8 @@ func main() { } // creates and registers the query service - qsc := tabletserver.NewServer() + ts := topo.Open() + qsc := tabletserver.NewServer(ts) servenv.OnRun(func() { qsc.Register() addStatusParts(qsc) @@ -118,7 +119,6 @@ func main() { if servenv.GRPCPort != nil { gRPCPort = int32(*servenv.GRPCPort) } - ts := topo.Open() agent, err = tabletmanager.NewActionAgent(context.Background(), ts, mysqld, qsc, tabletAlias, *dbcfgs, mycnf, int32(*servenv.Port), gRPCPort) if err != nil { log.Error(err) diff --git a/go/vt/tabletserver/endtoend/framework/server.go b/go/vt/tabletserver/endtoend/framework/server.go index 0c7c0069a93..a672597debf 100644 --- a/go/vt/tabletserver/endtoend/framework/server.go +++ b/go/vt/tabletserver/endtoend/framework/server.go @@ -71,7 +71,7 @@ func StartServer(connParams sqldb.ConnParams) error { TabletType: topodatapb.TabletType_MASTER, } - Server = tabletserver.NewTabletServer(config) + Server = tabletserver.NewTabletServerWithNilTopoServer(config) Server.Register() err := Server.StartService(Target, dbcfgs, mysqld) if err != nil { diff --git a/go/vt/tabletserver/message_manager_test.go b/go/vt/tabletserver/message_manager_test.go index 2466e0df233..cd0b861b1b6 100644 --- a/go/vt/tabletserver/message_manager_test.go +++ b/go/vt/tabletserver/message_manager_test.go @@ -110,7 +110,7 @@ func TestReceiverEOF(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -150,7 +150,7 @@ func TestMessageManagerState(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -188,7 +188,7 @@ func TestMessageManagerAdd(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -241,7 +241,7 @@ func TestMessageManagerSend(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -303,7 +303,7 @@ func TestMessageManagerBatchSend(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -361,7 +361,7 @@ func TestMessageManagerPoller(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -450,7 +450,7 @@ func TestMessagesPending1(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -530,7 +530,7 @@ func TestMessagesPending2(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -596,7 +596,7 @@ func TestMMGenerate(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -662,7 +662,7 @@ func TestMMGenerate(t *testing.T) { func newMMConnPool(db *fakesqldb.DB) *connpool.Pool { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) pool := connpool.New( config.PoolNamePrefix+"MesasgeConnPool", diff --git a/go/vt/tabletserver/messager_engine_test.go b/go/vt/tabletserver/messager_engine_test.go index 3b4cce3f5a7..f9a7cb10bf2 100644 --- a/go/vt/tabletserver/messager_engine_test.go +++ b/go/vt/tabletserver/messager_engine_test.go @@ -28,7 +28,7 @@ func TestMESchemaChanged(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -93,7 +93,7 @@ func TestSubscribe(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -134,7 +134,7 @@ func TestLockDB(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -206,7 +206,7 @@ func TestMESendDiscard(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -253,7 +253,7 @@ func TestMEGenerate(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) diff --git a/go/vt/tabletserver/query_executor_test.go b/go/vt/tabletserver/query_executor_test.go index a1d2c87d0e0..8c5e781b12e 100644 --- a/go/vt/tabletserver/query_executor_test.go +++ b/go/vt/tabletserver/query_executor_test.go @@ -1295,7 +1295,7 @@ func newTestTabletServer(ctx context.Context, flags executorFlags, db *fakesqldb } else { config.TwoPCAbandonAge = 10 } - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) testUtils := newTestUtils() dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} diff --git a/go/vt/tabletserver/tabletserver.go b/go/vt/tabletserver/tabletserver.go index 9fa05bcc4f7..8f0985de090 100644 --- a/go/vt/tabletserver/tabletserver.go +++ b/go/vt/tabletserver/tabletserver.go @@ -38,6 +38,7 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/splitquery" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/tabletserver/txthrottler" + "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/utils" "github.com/youtube/vitess/go/vt/vterrors" @@ -122,6 +123,7 @@ type TabletServer struct { // txThrottler is used to throttle transactions based on the observed replication lag. txThrottler *txthrottler.TxThrottler + topoServer topo.Server // streamHealthMutex protects all the following fields streamHealthMutex sync.Mutex @@ -150,15 +152,21 @@ type MySQLChecker interface { } // NewServer creates a new TabletServer based on the command line flags. -func NewServer() *TabletServer { - return NewTabletServer(tabletenv.Config) +func NewServer(topoServer topo.Server) *TabletServer { + return NewTabletServer(tabletenv.Config, topoServer) } var tsOnce sync.Once +// NewTabletServerWithNilTopoServer is typically used in tests that don't need a topoSever +// member. +func NewTabletServerWithNilTopoServer(config tabletenv.TabletConfig) *TabletServer { + return NewTabletServer(config, topo.Server{}) +} + // NewTabletServer creates an instance of TabletServer. Only one instance // of TabletServer can be created per process. -func NewTabletServer(config tabletenv.TabletConfig) *TabletServer { +func NewTabletServer(config tabletenv.TabletConfig, topoServer topo.Server) *TabletServer { tsv := &TabletServer{ QueryTimeout: sync2.NewAtomicDuration(time.Duration(config.QueryTimeout * 1e9)), BeginTimeout: sync2.NewAtomicDuration(time.Duration(config.TxPoolTimeout * 1e9)), @@ -166,11 +174,12 @@ func NewTabletServer(config tabletenv.TabletConfig) *TabletServer { checkMySQLThrottler: sync2.NewSemaphore(1, 0), streamHealthMap: make(map[int]chan<- *querypb.StreamHealthResponse), history: history.New(10), + topoServer: topoServer, } tsv.se = schema.NewEngine(tsv, config) tsv.qe = NewQueryEngine(tsv, tsv.se, config) tsv.te = NewTxEngine(tsv, config) - tsv.txThrottler = txthrottler.CreateTxThrottlerFromTabletConfig() + tsv.txThrottler = txthrottler.CreateTxThrottlerFromTabletConfig(topoServer) tsv.messager = NewMessagerEngine(tsv, config) tsv.watcher = NewReplicationWatcher(tsv.se, config) tsv.updateStreamList = &binlog.StreamList{} diff --git a/go/vt/tabletserver/tabletserver_test.go b/go/vt/tabletserver/tabletserver_test.go index e42d7ac145e..c0889f24562 100644 --- a/go/vt/tabletserver/tabletserver_test.go +++ b/go/vt/tabletserver/tabletserver_test.go @@ -50,7 +50,7 @@ func TestTabletServerGetState(t *testing.T) { } testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) for i, state := range states { tsv.setState(state) if stateName := tsv.GetState(); stateName != names[i] { @@ -69,7 +69,7 @@ func TestTabletServerAllowQueriesFailBadConn(t *testing.T) { db.EnableConnFail() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) checkTabletServerState(t, tsv, StateNotConnected) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} @@ -85,7 +85,7 @@ func TestTabletServerAllowQueries(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) checkTabletServerState(t, tsv, StateNotConnected) dbconfigs := testUtils.newDBConfigs(db) tsv.setState(StateServing) @@ -109,7 +109,7 @@ func TestTabletServerInitDBConfig(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) tsv.setState(StateServing) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} dbconfigs := testUtils.newDBConfigs(db) @@ -130,7 +130,7 @@ func TestDecideAction(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} dbconfigs := testUtils.newDBConfigs(db) err := tsv.InitDBConfig(target, dbconfigs, nil) @@ -237,7 +237,7 @@ func TestSetServingType(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.InitDBConfig(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -319,7 +319,7 @@ func TestTabletServerSingleSchemaFailure(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} originalSchemaErrorCount := tabletenv.InternalErrors.Counts()["Schema"] @@ -351,7 +351,7 @@ func TestTabletServerAllSchemaFailure(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -368,7 +368,7 @@ func TestTabletServerCheckMysql(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -397,7 +397,7 @@ func TestTabletServerCheckMysqlFailInvalidConn(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -415,7 +415,7 @@ func TestTabletServerCheckMysqlFailInvalidConn(t *testing.T) { func TestTabletServerCheckMysqlInUnintialized(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) // TabletServer start request fail because we are in StateNotConnected; // however, isMySQLReachable should return true. Here, we always assume // MySQL is healthy unless we've verified it is not. @@ -444,7 +444,7 @@ func TestTabletServerReconnect(t *testing.T) { db.AddQuery("select addr from test_table where 1 != 1", &sqltypes.Result{}) testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -492,7 +492,7 @@ func TestTabletServerTarget(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target1 := querypb.Target{ Keyspace: "test_keyspace", @@ -952,7 +952,7 @@ func TestTabletServerBeginFail(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TransactionCap = 1 - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -987,7 +987,7 @@ func TestTabletServerCommitTransaction(t *testing.T) { } db.AddQuery(executeSQL, executeSQLResult) config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1013,7 +1013,7 @@ func TestTabletServerCommiRollbacktFail(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1050,7 +1050,7 @@ func TestTabletServerRollback(t *testing.T) { } db.AddQuery(executeSQL, executeSQLResult) config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1154,7 +1154,7 @@ func TestTabletServerStreamExecute(t *testing.T) { db.AddQuery(executeSQL, executeSQLResult) config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1181,7 +1181,7 @@ func TestTabletServerExecuteBatch(t *testing.T) { db.AddQuery(sql, sqlResult) db.AddQuery(expanedSQL, sqlResult) config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1206,7 +1206,7 @@ func TestTabletServerExecuteBatchFailEmptyQueryList(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1227,7 +1227,7 @@ func TestTabletServerExecuteBatchFailAsTransaction(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1255,7 +1255,7 @@ func TestTabletServerExecuteBatchBeginFail(t *testing.T) { // make "begin" query fail db.AddRejectedQuery("begin", errRejected) config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1281,7 +1281,7 @@ func TestTabletServerExecuteBatchCommitFail(t *testing.T) { // make "commit" query fail db.AddRejectedQuery("commit", errRejected) config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1320,7 +1320,7 @@ func TestTabletServerExecuteBatchSqlExecFailInTransaction(t *testing.T) { db.AddRejectedQuery(expanedSQL, errRejected) config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1363,7 +1363,7 @@ func TestTabletServerExecuteBatchSqlSucceedInTransaction(t *testing.T) { config := testUtils.newQueryServiceConfig() config.EnableAutoCommit = true - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1387,7 +1387,7 @@ func TestTabletServerExecuteBatchCallCommitWithoutABegin(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1417,7 +1417,7 @@ func TestExecuteBatchNestedTransaction(t *testing.T) { db.AddQuery(sql, sqlResult) db.AddQuery(expanedSQL, sqlResult) config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1669,7 +1669,7 @@ func TestTabletServerSplitQuery(t *testing.T) { }) testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_RDONLY} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1700,7 +1700,7 @@ func TestTabletServerSplitQueryInvalidQuery(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_RDONLY} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1730,7 +1730,7 @@ func TestTabletServerSplitQueryInvalidParams(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_RDONLY} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1759,7 +1759,7 @@ func TestTabletServerSplitQueryEqualSplitsOnStringColumn(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_RDONLY} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) @@ -1793,7 +1793,7 @@ func TestHandleExecUnknownError(t *testing.T) { var err error testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) defer tsv.handlePanicAndSendLogStats("select * from test_table", nil, &err, logStats) panic("unknown exec error") } @@ -1801,7 +1801,7 @@ func TestHandleExecUnknownError(t *testing.T) { func TestHandleExecTabletError(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) err := tsv.convertError( "select * from test_table", nil, @@ -1817,7 +1817,7 @@ func TestTerseErrorsNonSQLError(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TerseErrors = true - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) err := tsv.convertError( "select * from test_table", nil, @@ -1833,7 +1833,7 @@ func TestTerseErrorsBindVars(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TerseErrors = true - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) err := tsv.convertError( "select * from test_table", map[string]interface{}{"a": 1}, @@ -1849,7 +1849,7 @@ func TestTerseErrorsNoBindVars(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TerseErrors = true - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) err := tsv.convertError("", nil, vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "msg")) want := "msg" if err == nil || err.Error() != want { @@ -1861,7 +1861,7 @@ func TestTerseErrorsIgnoreFailoverInProgress(t *testing.T) { testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() config.TerseErrors = true - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) err := tsv.convertError("select * from test_table where id = :a", map[string]interface{}{"a": 1}, @@ -1877,7 +1877,7 @@ func TestConfigChanges(t *testing.T) { defer db.Close() testUtils := newTestUtils() config := testUtils.newQueryServiceConfig() - tsv := NewTabletServer(config) + tsv := NewTabletServerWithNilTopoServer(config) dbconfigs := testUtils.newDBConfigs(db) target := querypb.Target{TabletType: topodatapb.TabletType_MASTER} err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) diff --git a/go/vt/tabletserver/txthrottler/tx_throttler.go b/go/vt/tabletserver/txthrottler/tx_throttler.go index 30d567c1ab2..e46e7ed8a22 100644 --- a/go/vt/tabletserver/txthrottler/tx_throttler.go +++ b/go/vt/tabletserver/txthrottler/tx_throttler.go @@ -23,7 +23,8 @@ import ( // It uses a discovery.HealthCheck to send replication-lag updates to the wrapped throttler. // // Intended Usage: -// t := CreateTxThrottlerFromTabletConfig() +// // Assuming topoServer is a topo.Server variable pointing to a Vitess topology server. +// t := CreateTxThrottlerFromTabletConfig(topoServer) // // // A transaction throttler must be opened before its first use: // if err := t.Open(keyspace, shard); err != nil { @@ -59,8 +60,8 @@ type TxThrottler struct { // any error occurs. // This function calls tryCreateTxThrottler that does the actual creation work // and returns an error if one occurred. -func CreateTxThrottlerFromTabletConfig() *TxThrottler { - txThrottler, err := tryCreateTxThrottler() +func CreateTxThrottlerFromTabletConfig(topoServer topo.Server) *TxThrottler { + txThrottler, err := tryCreateTxThrottler(topoServer) if err != nil { log.Errorf("Error creating transaction throttler. Transaction throttling will"+ " be disabled. Error: %v", err) @@ -74,7 +75,7 @@ func CreateTxThrottlerFromTabletConfig() *TxThrottler { return txThrottler } -func tryCreateTxThrottler() (*TxThrottler, error) { +func tryCreateTxThrottler(topoServer topo.Server) (*TxThrottler, error) { if !tabletenv.Config.EnableTxThrottler { return newTxThrottler(&txThrottlerConfig{enabled: false}) } @@ -91,6 +92,7 @@ func tryCreateTxThrottler() (*TxThrottler, error) { return newTxThrottler(&txThrottlerConfig{ enabled: true, + topoServer: topoServer, throttlerConfig: &throttlerConfig, healthCheckCells: healthCheckCells, }) @@ -104,6 +106,7 @@ type txThrottlerConfig struct { // returns false. enabled bool + topoServer topo.Server throttlerConfig *throttlerdatapb.Configuration // healthCheckCells stores the cell names in which running vttablets will be monitored for // replication lag. @@ -139,7 +142,6 @@ type txThrottlerState struct { throttleMu sync.Mutex throttler ThrottlerInterface - topoServer topo.Server healthCheck discovery.HealthCheck topologyWatchers []TopologyWatcherInterface } @@ -147,13 +149,11 @@ type txThrottlerState struct { // These vars store the functions used to create the topo server, healthcheck, // topology watchers and go/vt/throttler. These are provided here so that they can be overridden // in tests to generate mocks. -type topoServerFactoryFunc func() topo.Server type healthCheckFactoryFunc func() discovery.HealthCheck type topologyWatcherFactoryFunc func(topoServer topo.Server, tr discovery.TabletRecorder, cell, keyspace, shard string, refreshInterval time.Duration, topoReadConcurrency int) TopologyWatcherInterface type throttlerFactoryFunc func(name, unit string, threadCount int, maxRate, maxReplicationLag int64) (ThrottlerInterface, error) var ( - topoServerFactory topoServerFactoryFunc healthCheckFactory healthCheckFactoryFunc topologyWatcherFactory topologyWatcherFactoryFunc throttlerFactory throttlerFactoryFunc @@ -164,7 +164,6 @@ func init() { } func resetTxThrottlerFactories() { - topoServerFactory = topo.Open healthCheckFactory = discovery.NewDefaultHealthCheck topologyWatcherFactory = func(topoServer topo.Server, tr discovery.TabletRecorder, cell, keyspace, shard string, refreshInterval time.Duration, topoReadConcurrency int) TopologyWatcherInterface { return discovery.NewShardReplicationWatcher( @@ -255,7 +254,6 @@ func newTxThrottlerState(config *txThrottlerConfig, keyspace, shard string, result := &txThrottlerState{ throttler: t, } - result.topoServer = topoServerFactory() result.healthCheck = healthCheckFactory() result.healthCheck.SetListener(result, false /* sendDownEvents */) result.topologyWatchers = make( @@ -264,7 +262,7 @@ func newTxThrottlerState(config *txThrottlerConfig, keyspace, shard string, result.topologyWatchers = append( result.topologyWatchers, topologyWatcherFactory( - result.topoServer, + config.topoServer, result.healthCheck, /* TabletRecorder */ cell, keyspace, @@ -297,8 +295,6 @@ func (ts *txThrottlerState) deallocateResources() { ts.healthCheck.Close() ts.healthCheck = nil - ts.topoServer.Close() - // After ts.healthCheck is closed txThrottlerState.StatsUpdate() is guaranteed not // to be executing, so we can safely close the throttler. ts.throttler.Close() diff --git a/go/vt/tabletserver/txthrottler/tx_throttler_test.go b/go/vt/tabletserver/txthrottler/tx_throttler_test.go index 047f09b74fd..169b65b9ff9 100644 --- a/go/vt/tabletserver/txthrottler/tx_throttler_test.go +++ b/go/vt/tabletserver/txthrottler/tx_throttler_test.go @@ -17,7 +17,7 @@ func TestDisabledThrottler(t *testing.T) { oldConfig := tabletenv.Config defer func() { tabletenv.Config = oldConfig }() tabletenv.Config.EnableTxThrottler = false - throttler := CreateTxThrottlerFromTabletConfig() + throttler := CreateTxThrottlerFromTabletConfig(topo.Server{}) if err := throttler.Open("keyspace", "shard"); err != nil { t.Fatalf("want: nil, got: %v", err) } @@ -32,9 +32,7 @@ func TestEnabledThrottler(t *testing.T) { defer mockCtrl.Finish() defer resetTxThrottlerFactories() - mockTopoServer, mockImpl := NewMockServer(mockCtrl) - mockImpl.EXPECT().Close() - topoServerFactory = func() topo.Server { return mockTopoServer } + mockTopoServer, _ := NewMockServer(mockCtrl) mockHealthCheck := NewMockHealthCheck(mockCtrl) var hcListener discovery.HealthCheckStatsListener @@ -95,7 +93,7 @@ func TestEnabledThrottler(t *testing.T) { tabletenv.Config.EnableTxThrottler = true tabletenv.Config.TxThrottlerHealthCheckCells = []string{"cell1", "cell2"} - throttler, err := tryCreateTxThrottler() + throttler, err := tryCreateTxThrottler(mockTopoServer) if err != nil { t.Fatalf("want: nil, got: %v", err) } diff --git a/go/vt/wrangler/testlib/wait_for_filtered_replication_test.go b/go/vt/wrangler/testlib/wait_for_filtered_replication_test.go index 14ea0dccb32..371de0ceb0a 100644 --- a/go/vt/wrangler/testlib/wait_for_filtered_replication_test.go +++ b/go/vt/wrangler/testlib/wait_for_filtered_replication_test.go @@ -111,7 +111,7 @@ func waitForFilteredReplication(t *testing.T, expectedErr string, initialStats * dest.Agent.BinlogPlayerMap = tabletmanager.NewBinlogPlayerMap(ts, nil, nil) // Use real, but trimmed down QueryService. - qs := tabletserver.NewTabletServer(tabletenv.DefaultQsConfig) + qs := tabletserver.NewTabletServerWithNilTopoServer(tabletenv.DefaultQsConfig) grpcqueryservice.Register(dest.RPCServer, qs) qs.BroadcastHealth(42, initialStats) From 2dfccb4d947b6c87236141784f2ac7b250c9a5c5 Mon Sep 17 00:00:00 2001 From: Erez Louidor Date: Wed, 22 Feb 2017 16:06:28 -0800 Subject: [PATCH 029/108] Added documentation for the replication-lag-based-throttler --- ...icatoinLagBasedThrottlingOfTransactions.md | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 doc/ReplicatoinLagBasedThrottlingOfTransactions.md diff --git a/doc/ReplicatoinLagBasedThrottlingOfTransactions.md b/doc/ReplicatoinLagBasedThrottlingOfTransactions.md new file mode 100644 index 00000000000..ee70279d877 --- /dev/null +++ b/doc/ReplicatoinLagBasedThrottlingOfTransactions.md @@ -0,0 +1,42 @@ +# Replication Lag Based Throttling of Transactions +Vitess supports throttling of transactions based on replication lag. When this +feature is turned on, each VTTablet master monitors the replication lag from +the replicas, and based on the observed replication lag tries to rate-limit the +received transactions to keep the replication lag under a configured limit. + +The decision of whether to throttle a transaction is done in the "BEGIN" +statement rather than in the "COMMIT" statement to avoid having a transaction +perform a lot of work just to eventually be throttled and potentially +rolled-back if the open-transaction timeout is exceeded. + +If a BEGIN statement is throttled the client receives the gRPC UNAVAILABLE +error code. + +The following VTTablet command line flags control the replication-lag based +throttler: + +* *enable-tx-throttler* + +A boolean flag controling whether the replication-lag-based throttling is enabled. + +* *tx-throttler-config* + +A text-format representation of the [throttlerdata.Configuration](https://github.com/youtube/vitess/blob/master/proto/throttlerdata.proto) protocol buffer +that contains configuration options for the throttler. +The most important fields in that message are *target_replication_lag_sec* and +*max_replication_lag_sec* that specify the desired limits on the replication lag. See the comments in the protocol definition file for more details. +If this is not specified a [default](https://github.com/youtube/vitess/blob/master/go/vt/tabletserver/tabletenv/config.go) configuration will be used. + +* *tx-throttler-healthcheck-cells* + +A comma separated list of datacenter cells. The throttler will only monitor +the non-RDONLY replicas found in these cells for replication lag. + +# Caveats and Known Issues +* The throttler keeps trying to explore the maximum rate possible while keeping +the replication lag under the desired limit; as such the desired replication +lag limit may occasionally be slightly violated. + +* Transactions are considered homegenous. There is currently no support +for specifying how "expensive" a transaction is. + From 4202a2d562e99fa0720537fdd2694ea2b4454aa4 Mon Sep 17 00:00:00 2001 From: Yipei Wang Date: Thu, 23 Feb 2017 18:14:41 -0800 Subject: [PATCH 030/108] workflow: Remove RetryController object and simplify ParallelRunner retry logic. --- go/vt/workflow/node.go | 1 + go/vt/workflow/node_test.go | 2 +- .../horizontal_resharding_workflow_test.go | 55 ++---------- ...er.go => mock_resharding_wrangler_test.go} | 0 go/vt/workflow/resharding/parallel_runner.go | 85 +++++++++++-------- go/vt/workflow/resharding/retry_controller.go | 37 -------- 6 files changed, 55 insertions(+), 125 deletions(-) rename go/vt/workflow/resharding/{mock_resharding_wrangler.go => mock_resharding_wrangler_test.go} (100%) delete mode 100644 go/vt/workflow/resharding/retry_controller.go diff --git a/go/vt/workflow/node.go b/go/vt/workflow/node.go index 11aaa3bf951..f3f6c1ca4f7 100644 --- a/go/vt/workflow/node.go +++ b/go/vt/workflow/node.go @@ -418,6 +418,7 @@ func (m *NodeManager) Action(ctx context.Context, ap *ActionParameters) error { if n.Listener == nil { return fmt.Errorf("Action %v is invoked on a node without listener (node path is %v)", ap.Name, ap.Path) } + return n.Listener.Action(ctx, ap.Path, ap.Name) } diff --git a/go/vt/workflow/node_test.go b/go/vt/workflow/node_test.go index 12a667b442d..5c8152070c8 100644 --- a/go/vt/workflow/node_test.go +++ b/go/vt/workflow/node_test.go @@ -85,7 +85,7 @@ func TestNodeManagerWithRoot(t *testing.T) { t.Errorf("unexpected Action error: %v", err) } if len(tw.actions) != 1 || tw.actions[0].Path != n.Path || tw.actions[0].Name != "action" { - t.Errorf("unexpected Ation callback values: %v", tw.actions) + t.Errorf("unexpected Action callback values: %v", tw.actions) } // Delete root node, make sure we get notified. diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go index fe5096e737d..1d9c151eebb 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go @@ -136,17 +136,7 @@ func setupMockWranglerForRetry(ctx context.Context, ctrl *gomock.Controller) *Mo mockWranglerInterface := NewMockReshardingWrangler(ctrl) // Set the expected behaviors for mock wrangler. copy schema to shard 80- // should not be called. - mockWranglerInterface.EXPECT().CopySchemaShardFromShard( - ctx, - nil, /* tableArray*/ - nil, /* excludeTableArray */ - true, /*includeViews*/ - "test_keyspace", - "0", - "test_keyspace", - "-80", - wrangler.DefaultWaitSlaveTimeout).Return(nil) - + mockWranglerInterface.EXPECT().CopySchemaShardFromShard(ctx, nil /* tableArray*/, nil /* excludeTableArray */, true /*includeViews*/, "test_keyspace", "0", "test_keyspace", "-80", wrangler.DefaultWaitSlaveTimeout).Return(nil) mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "-80", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "80-", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) @@ -154,15 +144,7 @@ func setupMockWranglerForRetry(ctx context.Context, ctrl *gomock.Controller) *Mo topodatapb.TabletType_REPLICA, topodatapb.TabletType_MASTER} for _, servedType := range servedTypeParams { - mockWranglerInterface.EXPECT().MigrateServedTypes( - ctx, - "test_keyspace", - "0", - nil, /* cells */ - servedType, - false, /* reverse */ - false, /* skipReFreshState */ - wrangler.DefaultFilteredReplicationWaitTime).Return(nil) + mockWranglerInterface.EXPECT().MigrateServedTypes(ctx, "test_keyspace", "0", nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime).Return(nil) } return mockWranglerInterface } @@ -170,27 +152,8 @@ func setupMockWranglerForRetry(ctx context.Context, ctrl *gomock.Controller) *Mo func setupMockWrangler(ctx context.Context, ctrl *gomock.Controller) *MockReshardingWrangler { mockWranglerInterface := NewMockReshardingWrangler(ctrl) // Set the expected behaviors for mock wrangler. - mockWranglerInterface.EXPECT().CopySchemaShardFromShard( - ctx, - nil, /* tableArray*/ - nil, /* excludeTableArray */ - true, /*includeViews*/ - "test_keyspace", - "0", - "test_keyspace", - "-80", - wrangler.DefaultWaitSlaveTimeout).Return(nil) - - mockWranglerInterface.EXPECT().CopySchemaShardFromShard( - ctx, - nil, /* tableArray*/ - nil, /* excludeTableArray */ - true, /*includeViews*/ - "test_keyspace", - "0", - "test_keyspace", - "80-", - wrangler.DefaultWaitSlaveTimeout).Return(nil) + mockWranglerInterface.EXPECT().CopySchemaShardFromShard(ctx, nil /* tableArray*/, nil /* excludeTableArray */, true /*includeViews*/, "test_keyspace", "0", "test_keyspace", "-80", wrangler.DefaultWaitSlaveTimeout).Return(nil) + mockWranglerInterface.EXPECT().CopySchemaShardFromShard(ctx, nil /* tableArray*/, nil /* excludeTableArray */, true /*includeViews*/, "test_keyspace", "0", "test_keyspace", "80-", wrangler.DefaultWaitSlaveTimeout).Return(nil) mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "-80", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "80-", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) @@ -199,15 +162,7 @@ func setupMockWrangler(ctx context.Context, ctrl *gomock.Controller) *MockReshar topodatapb.TabletType_REPLICA, topodatapb.TabletType_MASTER} for _, servedType := range servedTypeParams { - mockWranglerInterface.EXPECT().MigrateServedTypes( - ctx, - "test_keyspace", - "0", - nil, /* cells */ - servedType, - false, /* reverse */ - false, /* skipReFreshState */ - wrangler.DefaultFilteredReplicationWaitTime).Return(nil) + mockWranglerInterface.EXPECT().MigrateServedTypes(ctx, "test_keyspace", "0", nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime).Return(nil) } return mockWranglerInterface } diff --git a/go/vt/workflow/resharding/mock_resharding_wrangler.go b/go/vt/workflow/resharding/mock_resharding_wrangler_test.go similarity index 100% rename from go/vt/workflow/resharding/mock_resharding_wrangler.go rename to go/vt/workflow/resharding/mock_resharding_wrangler_test.go diff --git a/go/vt/workflow/resharding/parallel_runner.go b/go/vt/workflow/resharding/parallel_runner.go index ac7e504d375..0e76e32c38c 100644 --- a/go/vt/workflow/resharding/parallel_runner.go +++ b/go/vt/workflow/resharding/parallel_runner.go @@ -2,6 +2,7 @@ package resharding import ( "fmt" + "strings" "sync" log "github.com/golang/glog" @@ -32,11 +33,13 @@ type ParallelRunner struct { concurrencyLevel level executeFunc func(context.Context, *workflowpb.Task) error - // mu is used to protect the retryActionRegistry. + // mu is used to protect the access to retryActionRegistry and + // serialize UI node changes. mu sync.Mutex - // retryAtionRegistry stores the data for retry actions. - // Each task can retrieve its RetryController through its UI node path. - retryActionRegistry map[string]*RetryController + // retryActionRegistry stores the data for retry actions. + // Each task can retrieve the channel for synchronizing retrying + // through its UI node path. + retryActionRegistry map[string]chan struct{} // reportTaskStatus gives the worklflow debug option to output the task // status through UI. @@ -55,7 +58,7 @@ func NewParallelRunner(ctx context.Context, rootUINode *workflow.Node, cp *Check tasks: tasks, executeFunc: executeFunc, concurrencyLevel: concurrencyLevel, - retryActionRegistry: make(map[string]*RetryController), + retryActionRegistry: make(map[string]chan struct{}), reportTaskStatus: false, } } @@ -111,7 +114,7 @@ func (p *ParallelRunner) Run() error { return default: } - retryChannel, nodePath := p.addRetryAction(taskID) + retryChannel := p.addRetryAction(taskID) // Block the task execution until the retry action is triggered // or the context is canceled. @@ -119,7 +122,6 @@ func (p *ParallelRunner) Run() error { case <-retryChannel: continue case <-p.ctx.Done(): - p.unregisterRetryController(nodePath) return } } @@ -138,57 +140,66 @@ func (p *ParallelRunner) Run() error { func (p *ParallelRunner) Action(ctx context.Context, path, name string) error { switch name { case "Retry": - return p.triggerRetry(path) + // Extract the path relative to the root node. + parts := strings.Split(path, "/") + taskID := strings.Join(parts[2:], "/") + return p.triggerRetry(taskID) default: return fmt.Errorf("Unknown action: %v", name) } } -func (p *ParallelRunner) triggerRetry(nodePath string) error { +func (p *ParallelRunner) triggerRetry(taskID string) error { p.mu.Lock() defer p.mu.Unlock() - c, ok := p.retryActionRegistry[nodePath] + + // Unregister the retry channel. + retryChannel, ok := p.retryActionRegistry[taskID] if !ok { - return fmt.Errorf("Unregistered action for node: %v", nodePath) + return fmt.Errorf("Unregistered action for node: %v", taskID) } - p.unregisterRetryControllerLocked(nodePath) - c.triggerRetry() - return nil -} + delete(p.retryActionRegistry, taskID) -func (p *ParallelRunner) addRetryAction(taskID string) (chan struct{}, string) { + // Disable the retry action and synchronize for retrying the job. node, err := p.rootUINode.GetChildByPath(taskID) if err != nil { - panic(fmt.Errorf("node on child path %v not found", taskID)) + panic(fmt.Sprintf("BUG: node on child path %v not found", taskID)) } - - p.mu.Lock() - defer p.mu.Unlock() - retryController := CreateRetryController(node, p /* actionListener */) - p.registerRetryControllerLocked(node.Path, retryController) + if len(node.Actions) == 0 { + panic(fmt.Sprintf("BUG: node actions should not be empty")) + } + node.Actions = []*workflow.Action{} node.BroadcastChanges(false /* updateChildren */) - return retryController.retryChannel, node.Path + close(retryChannel) + return nil } -func (p *ParallelRunner) registerRetryControllerLocked(nodePath string, c *RetryController) { - if _, ok := p.retryActionRegistry[nodePath]; ok { - panic(fmt.Errorf("duplicate retry action for node: %v", nodePath)) +func (p *ParallelRunner) addRetryAction(taskID string) chan struct{} { + node, err := p.rootUINode.GetChildByPath(taskID) + if err != nil { + panic(fmt.Sprintf("BUG: node on child path %v not found", taskID)) } - p.retryActionRegistry[nodePath] = c -} -func (p *ParallelRunner) unregisterRetryController(nodePath string) { p.mu.Lock() - p.mu.Unlock() - p.unregisterRetryControllerLocked(nodePath) -} + defer p.mu.Unlock() -func (p *ParallelRunner) unregisterRetryControllerLocked(nodePath string) { - if _, ok := p.retryActionRegistry[nodePath]; !ok { - log.Warningf("retry action for node: %v doesn't exist, cannot unregister it", nodePath) - } else { - delete(p.retryActionRegistry, nodePath) + // Register the channel for synchronizing retrying job. + if _, ok := p.retryActionRegistry[taskID]; ok { + panic(fmt.Sprintf("BUG: duplicate retry action for node: %v", taskID)) + } + retryChannel := make(chan struct{}) + p.retryActionRegistry[taskID] = retryChannel + + // Enable retry action on the node. + retryAction := &workflow.Action{ + Name: "Retry", + State: workflow.ActionStateEnabled, + Style: workflow.ActionStyleWaiting, } + node.Actions = []*workflow.Action{retryAction} + node.Listener = p + node.BroadcastChanges(false /* updateChildren */) + return retryChannel } func (p *ParallelRunner) setFinishUIMessage(taskID string) { diff --git a/go/vt/workflow/resharding/retry_controller.go b/go/vt/workflow/resharding/retry_controller.go deleted file mode 100644 index 6ae0482d7d6..00000000000 --- a/go/vt/workflow/resharding/retry_controller.go +++ /dev/null @@ -1,37 +0,0 @@ -package resharding - -import "github.com/youtube/vitess/go/vt/workflow" - -// RetryController stores the data for controlling the retry action. -type RetryController struct { - node *workflow.Node - // retryChannel is used to trigger the retrying of task - // when pressing the button. - retryChannel chan struct{} -} - -// CreateRetryController create a RetryController for a specific node and -// enable the retry action on the node. -func CreateRetryController(node *workflow.Node, actionListener workflow.ActionListener) *RetryController { - retryAction := &workflow.Action{ - Name: "Retry", - State: workflow.ActionStateEnabled, - Style: workflow.ActionStyleWaiting, - } - node.Actions = []*workflow.Action{retryAction} - node.Listener = actionListener - return &RetryController{ - node: node, - retryChannel: make(chan struct{}), - } -} - -// triggerRetry closes the retryChannel and empties the Actions list -// in the UI Node. This disables the retry action. -func (c *RetryController) triggerRetry() { - if len(c.node.Actions) != 0 { - c.node.Actions = []*workflow.Action{} - close(c.retryChannel) - } - c.node.BroadcastChanges(false /* updateChildren */) -} From e3eae98bb1c068496428830a889ea4a6ce1f3e24 Mon Sep 17 00:00:00 2001 From: wangyipei01 Date: Thu, 23 Feb 2017 18:45:28 -0800 Subject: [PATCH 031/108] Update node.go --- go/vt/workflow/node.go | 1 - 1 file changed, 1 deletion(-) diff --git a/go/vt/workflow/node.go b/go/vt/workflow/node.go index f3f6c1ca4f7..11aaa3bf951 100644 --- a/go/vt/workflow/node.go +++ b/go/vt/workflow/node.go @@ -418,7 +418,6 @@ func (m *NodeManager) Action(ctx context.Context, ap *ActionParameters) error { if n.Listener == nil { return fmt.Errorf("Action %v is invoked on a node without listener (node path is %v)", ap.Name, ap.Path) } - return n.Listener.Action(ctx, ap.Path, ap.Name) } From bac6783ebe2250bf2dac39f1b1fcb525e04866a0 Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Thu, 23 Feb 2017 21:21:24 -0800 Subject: [PATCH 032/108] tabletserver/txthrottler: Fix bug that config update error was not checked. --- go/vt/tabletserver/txthrottler/tx_throttler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/vt/tabletserver/txthrottler/tx_throttler.go b/go/vt/tabletserver/txthrottler/tx_throttler.go index e46e7ed8a22..79f452d8c1e 100644 --- a/go/vt/tabletserver/txthrottler/tx_throttler.go +++ b/go/vt/tabletserver/txthrottler/tx_throttler.go @@ -247,7 +247,7 @@ func newTxThrottlerState(config *txThrottlerConfig, keyspace, shard string, if err != nil { return nil, err } - if t.UpdateConfiguration(config.throttlerConfig, true /* copyZeroValues */); err != nil { + if err := t.UpdateConfiguration(config.throttlerConfig, true /* copyZeroValues */); err != nil { t.Close() return nil, err } From 69aaae93170b4cfde7283338138e899c5e85c863 Mon Sep 17 00:00:00 2001 From: Yipei Wang Date: Fri, 24 Feb 2017 11:43:50 -0800 Subject: [PATCH 033/108] workflow: Modify Factory.Init to pass Manager object. --- go/vt/schemamanager/schemaswap/schema_swap.go | 2 +- go/vt/workflow/manager.go | 11 ++++++----- .../resharding/horizontal_resharding_workflow.go | 13 +++++-------- go/vt/workflow/resharding/test_workflow.go | 2 +- go/vt/workflow/sleep_workflow.go | 2 +- go/vt/workflow/topovalidator/validator.go | 2 +- 6 files changed, 15 insertions(+), 17 deletions(-) diff --git a/go/vt/schemamanager/schemaswap/schema_swap.go b/go/vt/schemamanager/schemaswap/schema_swap.go index 9a750af896f..21bb06afb5d 100644 --- a/go/vt/schemamanager/schemaswap/schema_swap.go +++ b/go/vt/schemamanager/schemaswap/schema_swap.go @@ -161,7 +161,7 @@ func RegisterWorkflowFactory() { } // Init is a part of workflow.Factory interface. It initializes a Workflow protobuf object. -func (*SwapWorkflowFactory) Init(workflowProto *workflowpb.Workflow, args []string) error { +func (*SwapWorkflowFactory) Init(_ *workflow.Manager, workflowProto *workflowpb.Workflow, args []string) error { subFlags := flag.NewFlagSet(workflowFactoryName, flag.ContinueOnError) keyspace := subFlags.String("keyspace", "", "Name of a keyspace to perform schema swap on") diff --git a/go/vt/workflow/manager.go b/go/vt/workflow/manager.go index 0910894192b..c366270444b 100644 --- a/go/vt/workflow/manager.go +++ b/go/vt/workflow/manager.go @@ -38,10 +38,11 @@ type Factory interface { // The passed in workflow will have its Uuid, FactoryName and State // variable filled it. This Init method should fill in the // Name and Data attributes, based on the provided args. - // This is called during the Manager.Create phase. - // TODO(yipeiw): We should extend the interface to pass the topology server - // as well. The topology server is needed in the resarding workflow. - Init(w *workflowpb.Workflow, args []string) error + // This is called during the Manager.Create phase and will initially + // checkpoint the workflow in the topology. + // The Manager object is passed to Init method since the resharding workflow + // will use the topology server in Manager. + Init(m *Manager, w *workflowpb.Workflow, args []string) error // Instantiate loads a workflow from the proto representation // into an in-memory Workflow object. rootNode is the root UI node @@ -249,7 +250,7 @@ func (m *Manager) Create(ctx context.Context, factoryName string, args []string) // Let the factory parse the parameters and initialize the // object. - if err := factory.Init(w, args); err != nil { + if err := factory.Init(m, w, args); err != nil { return "", err } rw, err := m.instantiateWorkflow(w) diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow.go b/go/vt/workflow/resharding/horizontal_resharding_workflow.go index 61eb6ce18b6..ca397240845 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow.go @@ -55,7 +55,7 @@ func Register() { type HorizontalReshardingWorkflowFactory struct{} // Init is part of the workflow.Factory interface. -func (*HorizontalReshardingWorkflowFactory) Init(w *workflowpb.Workflow, args []string) error { +func (*HorizontalReshardingWorkflowFactory) Init(m *workflow.Manager, w *workflowpb.Workflow, args []string) error { subFlags := flag.NewFlagSet(horizontalReshardingFactoryName, flag.ContinueOnError) keyspace := subFlags.String("keyspace", "", "Name of keyspace to perform horizontal resharding") vtworkersStr := subFlags.String("vtworkers", "", "A comma-separated list of vtworker addresses") @@ -70,7 +70,7 @@ func (*HorizontalReshardingWorkflowFactory) Init(w *workflowpb.Workflow, args [] vtworkers := strings.Split(*vtworkersStr, ",") w.Name = fmt.Sprintf("Horizontal resharding on keyspace %s", *keyspace) - checkpoint, err := initCheckpoint(*keyspace, vtworkers) + checkpoint, err := initCheckpoint(m.TopoServer(), *keyspace, vtworkers) if err != nil { return err } @@ -180,18 +180,15 @@ func createUINodes(rootNode *workflow.Node, phaseName PhaseType, shards []string } // initCheckpoint initialize the checkpoint for the horizontal workflow. -func initCheckpoint(keyspace string, vtworkers []string) (*workflowpb.WorkflowCheckpoint, error) { - sourceShards, destinationShards, err := findSourceAndDestinationShards(keyspace) +func initCheckpoint(ts topo.Server, keyspace string, vtworkers []string) (*workflowpb.WorkflowCheckpoint, error) { + sourceShards, destinationShards, err := findSourceAndDestinationShards(ts, keyspace) if err != nil { return nil, err } return initCheckpointFromShards(keyspace, vtworkers, sourceShards, destinationShards) } -func findSourceAndDestinationShards(keyspace string) ([]string, []string, error) { - ts := topo.Open() - defer ts.Close() - +func findSourceAndDestinationShards(ts topo.Server, keyspace string) ([]string, []string, error) { overlappingShards, err := topotools.FindOverlappingShards(context.Background(), ts, keyspace) if err != nil { return nil, nil, err diff --git a/go/vt/workflow/resharding/test_workflow.go b/go/vt/workflow/resharding/test_workflow.go index 0a332e36d13..a8dbc6b9238 100644 --- a/go/vt/workflow/resharding/test_workflow.go +++ b/go/vt/workflow/resharding/test_workflow.go @@ -102,7 +102,7 @@ func (tw *TestWorkflow) runSimple(ctx context.Context, t *workflowpb.Task) error type TestWorkflowFactory struct{} // Init is part of the workflow.Factory interface. -func (*TestWorkflowFactory) Init(w *workflowpb.Workflow, args []string) error { +func (*TestWorkflowFactory) Init(_ *workflow.Manager, w *workflowpb.Workflow, args []string) error { subFlags := flag.NewFlagSet(testWorkflowFactoryName, flag.ContinueOnError) retryFlag := subFlags.Bool("retry", false, "The retry flag should be true if the retry action need to be tested") count := subFlags.Int("count", 0, "The number of simple tasks") diff --git a/go/vt/workflow/sleep_workflow.go b/go/vt/workflow/sleep_workflow.go index a3c4b4c3955..7aa0686732f 100644 --- a/go/vt/workflow/sleep_workflow.go +++ b/go/vt/workflow/sleep_workflow.go @@ -190,7 +190,7 @@ func (sw *SleepWorkflow) checkpointLocked(ctx context.Context) error { type SleepWorkflowFactory struct{} // Init is part of the workflow.Factory interface. -func (f *SleepWorkflowFactory) Init(w *workflowpb.Workflow, args []string) error { +func (f *SleepWorkflowFactory) Init(_ *Manager, w *workflowpb.Workflow, args []string) error { // Parse the flags. subFlags := flag.NewFlagSet(sleepFactoryName, flag.ContinueOnError) duration := subFlags.Int("duration", 30, "How long to sleep") diff --git a/go/vt/workflow/topovalidator/validator.go b/go/vt/workflow/topovalidator/validator.go index bf15060f551..a5fd6f80c74 100644 --- a/go/vt/workflow/topovalidator/validator.go +++ b/go/vt/workflow/topovalidator/validator.go @@ -193,7 +193,7 @@ func (f *workflowFixer) Action(ctx context.Context, path, name string) error { type WorkflowFactory struct{} // Init is part of the workflow.Factory interface. -func (f *WorkflowFactory) Init(w *workflowpb.Workflow, args []string) error { +func (f *WorkflowFactory) Init(_ *workflow.Manager, w *workflowpb.Workflow, args []string) error { // No parameters to parse. if len(args) > 0 { return fmt.Errorf("%v doesn't take any parameter", topoValidatorFactoryName) From 1ff6d011c6018d14b156bfe1a16ae5553debc9c0 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 24 Feb 2017 12:37:53 -0800 Subject: [PATCH 034/108] Adding interface for custom auth plugin. And a config-file based implementation. Using it in all tests. --- .../vtgate-controller-template.yaml | 1 + examples/local/vtgate-up.sh | 1 + go/mysqlconn/auth_server.go | 111 ++++++++++++++++++ go/mysqlconn/auth_server_config.go | 73 ++++++++++++ go/mysqlconn/client.go | 38 +----- go/mysqlconn/conn.go | 10 +- go/mysqlconn/constants.go | 8 +- go/mysqlconn/fakesqldb/server.go | 8 +- go/mysqlconn/handshake_test.go | 8 +- go/mysqlconn/query_benchmark_test.go | 8 +- go/mysqlconn/server.go | 76 ++++++------ go/mysqlconn/server_test.go | 42 ++++++- go/vt/vtgate/plugin_mysql_server.go | 51 ++++++-- 13 files changed, 339 insertions(+), 96 deletions(-) create mode 100644 go/mysqlconn/auth_server.go create mode 100644 go/mysqlconn/auth_server_config.go diff --git a/examples/kubernetes/vtgate-controller-template.yaml b/examples/kubernetes/vtgate-controller-template.yaml index ccb5376c86a..1c303a8ad5f 100644 --- a/examples/kubernetes/vtgate-controller-template.yaml +++ b/examples/kubernetes/vtgate-controller-template.yaml @@ -43,6 +43,7 @@ spec: -port 15001 -grpc_port 15991 -mysql_server_port {{mysql_server_port}} + -mysql_auth_server_config_string '{\"mysql_user\":{\"Password\":\"mysql_password\"}}' -service_map 'grpc-vtgateservice' -cells_to_watch {{cell}} -tablet_types_to_wait MASTER,REPLICA diff --git a/examples/local/vtgate-up.sh b/examples/local/vtgate-up.sh index e5e98eaceae..d19c4bccbf3 100755 --- a/examples/local/vtgate-up.sh +++ b/examples/local/vtgate-up.sh @@ -46,6 +46,7 @@ $VTROOT/bin/vtgate \ -port $web_port \ -grpc_port $grpc_port \ -mysql_server_port $mysql_server_port \ + -mysql_auth_server_config_string '{"mysql_user":{"Password":"mysql_password"}}' \ -cell $cell \ -cells_to_watch $cell \ -tablet_types_to_wait MASTER,REPLICA \ diff --git a/go/mysqlconn/auth_server.go b/go/mysqlconn/auth_server.go new file mode 100644 index 00000000000..de837bedd6b --- /dev/null +++ b/go/mysqlconn/auth_server.go @@ -0,0 +1,111 @@ +package mysqlconn + +import ( + "crypto/rand" + "crypto/sha1" + + log "github.com/golang/glog" +) + +// AuthServer is the interface that servers must implement to validate +// users and passwords. It has two modes: +// +// 1. using salt the way MySQL native auth does it. In that case, the +// password is not sent in the clear, but the salt is used to hash the +// password both on the client and server side, and the result is sent +// and compared. +// +// 2. sending the user / password in the clear (using MySQL Cleartext +// method). The server then gets access to both user and password, and +// can authenticate using any method. If SSL is not used, it means the +// password is sent in the clear. That may not be suitable for some +// use cases. +type AuthServer interface { + // UseClearText returns true is Cleartext auth is used. + // - If it is not set, Salt() and ValidateHash() are called. + // The server starts up in mysql_native_password mode. + // (but ValidateClearText can also be called, if client + // switched to Cleartext). + // - If it is set, ValidateClearText() is called. + // The server starts up in mysql_clear_password mode. + UseClearText() bool + + // Salt returns the salt to use for a connection. + // It should be 20 bytes of data. + Salt() ([]byte, error) + + // ValidateHash validates the data sent by the client matches + // what the server computes. It also returns the user data. + ValidateHash(salt []byte, user string, authResponse []byte) (string, error) + + // ValidateClearText validates a user / password is correct. + // It also returns the user data. + ValidateClearText(user, password string) (string, error) +} + +// authServers is a registry of AuthServer implementations. +var authServers = make(map[string]AuthServer) + +// RegisterAuthServerImpl registers an implementations of AuthServer. +func RegisterAuthServerImpl(name string, authServer AuthServer) { + if _, ok := authServers[name]; ok { + log.Fatalf("AuthServer named %v already exists", name) + } + authServers[name] = authServer +} + +// GetAuthServer returns an AuthServer by name, or log.Fatalf. +func GetAuthServer(name string) AuthServer { + authServer, ok := authServers[name] + if !ok { + log.Fatalf("no AuthServer name %v registered", name) + } + return authServer +} + +// newSalt returns a 20 character salt. +func newSalt() ([]byte, error) { + salt := make([]byte, 20) + if _, err := rand.Read(salt); err != nil { + return nil, err + } + + // Salt must be a legal UTF8 string. + for i := 0; i < len(salt); i++ { + salt[i] &= 0x7f + if salt[i] == '\x00' || salt[i] == '$' { + salt[i]++ + } + } + + return salt, nil +} + +// scramblePassword computes the hash of the password using 4.1+ method. +func scramblePassword(salt, password []byte) []byte { + if len(password) == 0 { + return nil + } + + // stage1Hash = SHA1(password) + crypt := sha1.New() + crypt.Write(password) + stage1 := crypt.Sum(nil) + + // scrambleHash = SHA1(salt + SHA1(stage1Hash)) + // inner Hash + crypt.Reset() + crypt.Write(stage1) + hash := crypt.Sum(nil) + // outer Hash + crypt.Reset() + crypt.Write(salt) + crypt.Write(hash) + scramble := crypt.Sum(nil) + + // token = scrambleHash XOR stage1Hash + for i := range scramble { + scramble[i] ^= stage1[i] + } + return scramble +} diff --git a/go/mysqlconn/auth_server_config.go b/go/mysqlconn/auth_server_config.go new file mode 100644 index 00000000000..f7513c481a7 --- /dev/null +++ b/go/mysqlconn/auth_server_config.go @@ -0,0 +1,73 @@ +package mysqlconn + +import ( + "bytes" + + "github.com/youtube/vitess/go/sqldb" +) + +// AuthServerConfig implements AuthServer using a static configuration. +type AuthServerConfig struct { + // ClearText can be set to force the use of ClearText auth. + ClearText bool + + // Entries contains the users, passwords and user data. + Entries map[string]*AuthServerConfigEntry +} + +// AuthServerConfigEntry stores the values for a given user. +type AuthServerConfigEntry struct { + Password string + UserData string +} + +// NewAuthServerConfig returns a new empty AuthServerConfig. +func NewAuthServerConfig() *AuthServerConfig { + return &AuthServerConfig{ + ClearText: false, + Entries: make(map[string]*AuthServerConfigEntry), + } +} + +// UseClearText is part of the AuthServer interface. +func (a *AuthServerConfig) UseClearText() bool { + return a.ClearText +} + +// Salt is part of the AuthServer interface. +func (a *AuthServerConfig) Salt() ([]byte, error) { + return newSalt() +} + +// ValidateHash is part of the AuthServer interface. +func (a *AuthServerConfig) ValidateHash(salt []byte, user string, authResponse []byte) (string, error) { + // Find the entry. + entry, ok := a.Entries[user] + if !ok { + return "", sqldb.NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + } + + // Validate the password. + computedAuthResponse := scramblePassword(salt, []byte(entry.Password)) + if bytes.Compare(authResponse, computedAuthResponse) != 0 { + return "", sqldb.NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + } + + return entry.UserData, nil +} + +// ValidateClearText is part of the AuthServer interface. +func (a *AuthServerConfig) ValidateClearText(user, password string) (string, error) { + // Find the entry. + entry, ok := a.Entries[user] + if !ok { + return "", sqldb.NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + } + + // Validate the password. + if entry.Password != password { + return "", sqldb.NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + } + + return entry.UserData, nil +} diff --git a/go/mysqlconn/client.go b/go/mysqlconn/client.go index 342f8cdecee..d98596d4f61 100644 --- a/go/mysqlconn/client.go +++ b/go/mysqlconn/client.go @@ -1,7 +1,6 @@ package mysqlconn import ( - "crypto/sha1" "crypto/tls" "fmt" "net" @@ -185,7 +184,7 @@ func (c *Conn) clientHandshake(characterSet uint8, params *sqldb.ConnParams) err if err != nil { return sqldb.NewSQLError(CRServerLost, "", "initial packet read failed: %v", err) } - capabilities, cipher, err := c.parseInitialHandshakePacket(data) + capabilities, salt, err := c.parseInitialHandshakePacket(data) if err != nil { return err } @@ -243,7 +242,7 @@ func (c *Conn) clientHandshake(characterSet uint8, params *sqldb.ConnParams) err // Build and send our handshake response 41. // Note this one will never have SSL flag on. - if err := c.writeHandshakeResponse41(capabilities, cipher, characterSet, params); err != nil { + if err := c.writeHandshakeResponse41(capabilities, salt, characterSet, params); err != nil { return err } @@ -467,7 +466,7 @@ func (c *Conn) writeSSLRequest(capabilities uint32, characterSet uint8, params * // writeHandshakeResponse41 writes the handshake response. // Returns a sqldb.SQLError. -func (c *Conn) writeHandshakeResponse41(capabilities uint32, cipher []byte, characterSet uint8, params *sqldb.ConnParams) error { +func (c *Conn) writeHandshakeResponse41(capabilities uint32, salt []byte, characterSet uint8, params *sqldb.ConnParams) error { // Build our flags. var flags uint32 = CapabilityClientLongPassword | CapabilityClientLongFlag | @@ -483,7 +482,7 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, cipher []byte, char // FIXME(alainjobart) add multi statement, client found rows. // Password encryption. - scrambledPassword := scramblePassword(cipher, []byte(params.Pass)) + scrambledPassword := scramblePassword(salt, []byte(params.Pass)) length := 4 + // Client capability flags. @@ -558,32 +557,3 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, cipher []byte, char } return nil } - -// Encrypt password using 4.1+ method -func scramblePassword(scramble, password []byte) []byte { - if len(password) == 0 { - return nil - } - - // stage1Hash = SHA1(password) - crypt := sha1.New() - crypt.Write(password) - stage1 := crypt.Sum(nil) - - // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) - // inner Hash - crypt.Reset() - crypt.Write(stage1) - hash := crypt.Sum(nil) - // outer Hash - crypt.Reset() - crypt.Write(scramble) - crypt.Write(hash) - scramble = crypt.Sum(nil) - - // token = scrambleHash XOR stage1Hash - for i := range scramble { - scramble[i] ^= stage1[i] - } - return scramble -} diff --git a/go/mysqlconn/conn.go b/go/mysqlconn/conn.go index 61bf6003126..ef70d641bf0 100644 --- a/go/mysqlconn/conn.go +++ b/go/mysqlconn/conn.go @@ -60,7 +60,7 @@ type Conn struct { // Capabilities is the current set of features this connection // is using. It is the features that are both supported by // the client and the server, and currently in use. - // It is set after the initial handshake. + // It is set during the initial handshake. // // It is only used for CapabilityClientDeprecateEOF. Capabilities uint32 @@ -71,6 +71,14 @@ type Conn struct { // See the values in constants.go. CharacterSet uint8 + // User is the name used by the client to connect. + // It is set during the initial handshake. + User string + + // UserData is custom data returned by the AuthServer module. + // It is set during the initial handshake. + UserData string + // SchemaName is the default database name to use. It is set // during handshake, and by ComInitDb packets. Both client and // servers maintain it. diff --git a/go/mysqlconn/constants.go b/go/mysqlconn/constants.go index 7bf514c4fe5..7701e7ed4be 100644 --- a/go/mysqlconn/constants.go +++ b/go/mysqlconn/constants.go @@ -10,9 +10,15 @@ const ( // protocolVersion is the current version of the protocol. // Always 10. protocolVersion = 10 +) - // mysqlNativePassword is the auth form we use. +// Supported auth forms. +const ( + // mysqlNativePassword uses a salt and transmits a hash on the wire. mysqlNativePassword = "mysql_native_password" + + // mysqlClearPassword transmits the password in the clear. + mysqlClearPassword = "mysql_clear_password" ) // Capability flags. diff --git a/go/mysqlconn/fakesqldb/server.go b/go/mysqlconn/fakesqldb/server.go index cdece9830ba..b2c4bc12530 100644 --- a/go/mysqlconn/fakesqldb/server.go +++ b/go/mysqlconn/fakesqldb/server.go @@ -73,14 +73,18 @@ func New(t *testing.T) *DB { connections: make(map[uint32]*mysqlconn.Conn), } + authServer := mysqlconn.NewAuthServerConfig() + authServer.Entries["user1"] = &mysqlconn.AuthServerConfigEntry{ + Password: "password1", + } + // Start listening. var err error - db.listener, err = mysqlconn.NewListener("tcp", ":0", db) + db.listener, err = mysqlconn.NewListener("tcp", ":0", authServer, db) if err != nil { t.Fatalf("NewListener failed: %v", err) } - db.listener.PasswordMap["user1"] = "password1" db.acceptWG.Add(1) go func() { defer db.acceptWG.Done() diff --git a/go/mysqlconn/handshake_test.go b/go/mysqlconn/handshake_test.go index cc690fa12b5..040b6a0345d 100644 --- a/go/mysqlconn/handshake_test.go +++ b/go/mysqlconn/handshake_test.go @@ -21,8 +21,13 @@ import ( func TestSSLConnection(t *testing.T) { th := &testHandler{} + authServer := NewAuthServerConfig() + authServer.Entries["user1"] = &AuthServerConfigEntry{ + Password: "password1", + } + // Create the listener, so we can get its host. - l, err := NewListener("tcp", ":0", th) + l, err := NewListener("tcp", ":0", authServer, th) if err != nil { t.Fatalf("NewListener failed: %v", err) } @@ -49,7 +54,6 @@ func TestSSLConnection(t *testing.T) { t.Fatalf("TLSServerConfig failed: %v", err) } l.TLSConfig = serverConfig - l.PasswordMap["user1"] = "password1" go func() { l.Accept() }() diff --git a/go/mysqlconn/query_benchmark_test.go b/go/mysqlconn/query_benchmark_test.go index 2c149f30585..46252c48f60 100644 --- a/go/mysqlconn/query_benchmark_test.go +++ b/go/mysqlconn/query_benchmark_test.go @@ -128,12 +128,16 @@ func benchmarkOldParallelReads(b *testing.B, params sqldb.ConnParams, parallelCo func BenchmarkParallelShortQueries(b *testing.B) { th := &testHandler{} - l, err := NewListener("tcp", ":0", th) + authServer := NewAuthServerConfig() + authServer.Entries["user1"] = &AuthServerConfigEntry{ + Password: "password1", + } + + l, err := NewListener("tcp", ":0", authServer, th) if err != nil { b.Fatalf("NewListener failed: %v", err) } defer l.Close() - l.PasswordMap["user1"] = "password1" go func() { l.Accept() diff --git a/go/mysqlconn/server.go b/go/mysqlconn/server.go index 6c2cda513d8..55163b65c2b 100644 --- a/go/mysqlconn/server.go +++ b/go/mysqlconn/server.go @@ -1,8 +1,6 @@ package mysqlconn import ( - "bytes" - "crypto/rand" "crypto/tls" "fmt" "net" @@ -47,6 +45,9 @@ type Handler interface { type Listener struct { // Construction parameters, set by NewListener. + // authServer is the AuthServer object to use for authentication. + authServer AuthServer + // handler is the data handler. handler Handler @@ -65,9 +66,6 @@ type Listener struct { // that we support SSL. TLSConfig *tls.Config - // PasswordMap maps users to passwords. - PasswordMap map[string]string - // The following parameters are changed by the Accept routine. // Incrementing ID for connection id. @@ -75,17 +73,18 @@ type Listener struct { } // NewListener creates a new Listener. -func NewListener(protocol, address string, handler Handler) (*Listener, error) { +func NewListener(protocol, address string, authServer AuthServer, handler Handler) (*Listener, error) { listener, err := net.Listen(protocol, address) if err != nil { return nil, err } return &Listener{ + authServer: authServer, + handler: handler, + listener: listener, + ServerVersion: DefaultServerVersion, - handler: handler, - PasswordMap: make(map[string]string), - listener: listener, connectionID: 1, }, nil } @@ -131,7 +130,7 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32) { defer l.handler.ConnectionClosed(c) // First build and send the server handshake packet. - cipher, err := c.writeHandshakeV10(l.ServerVersion, l.TLSConfig != nil) + salt, err := c.writeHandshakeV10(l.ServerVersion, l.authServer, l.TLSConfig != nil) if err != nil { log.Errorf("Cannot send HandshakeV10 packet: %v", err) return @@ -144,7 +143,7 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32) { log.Errorf("Cannot read client handshake response: %v", err) return } - username, authResponse, err := l.parseClientHandshakePacket(c, true, response) + user, authResponse, err := l.parseClientHandshakePacket(c, true, response) if err != nil { log.Errorf("Cannot parse client handshake response: %v", err) return @@ -157,28 +156,21 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32) { return } - username, authResponse, err = l.parseClientHandshakePacket(c, false, response) + user, authResponse, err = l.parseClientHandshakePacket(c, false, response) if err != nil { log.Errorf("Cannot parse post-SSL client handshake response: %v", err) return } } - // Find the user in our map - password, ok := l.PasswordMap[username] - if !ok { - log.Errorf("Invalid user: %v", username) - c.writeErrorPacket(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", username) - return - } - - // Validate the password. - computedAuthResponse := scramblePassword(cipher, []byte(password)) - if bytes.Compare(authResponse, computedAuthResponse) != 0 { - log.Errorf("Invalid password for user %v", username) - c.writeErrorPacket(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", username) + // See if the user is authenticated. + userData, err := l.authServer.ValidateHash(salt, user, authResponse) + if err != nil { + c.writeErrorPacketFromError(err) return } + c.User = user + c.UserData = userData // Send an OK packet. if err := c.writeOKPacket(0, 0, c.StatusFlags, 0); err != nil { @@ -244,8 +236,8 @@ func (l *Listener) Close() { } // writeHandshakeV10 writes the Initial Handshake Packet, server side. -// It returns the cipher data. -func (c *Conn) writeHandshakeV10(serverVersion string, enableTLS bool) ([]byte, error) { +// It returns the salt data. +func (c *Conn) writeHandshakeV10(serverVersion string, authServer AuthServer, enableTLS bool) ([]byte, error) { capabilities := CapabilityClientLongPassword | CapabilityClientLongFlag | CapabilityClientConnectWithDB | @@ -263,7 +255,7 @@ func (c *Conn) writeHandshakeV10(serverVersion string, enableTLS bool) ([]byte, 1 + // protocol version lenNullString(serverVersion) + 4 + // connection ID - 8 + // first part of cipher data + 8 + // first part of salt data 1 + // filler byte 2 + // capability flags (lower 2 bytes) 1 + // character set @@ -286,20 +278,20 @@ func (c *Conn) writeHandshakeV10(serverVersion string, enableTLS bool) ([]byte, // Add connectionID in. pos = writeUint32(data, pos, c.ConnectionID) - // Generate the cipher, put 8 bytes in. - cipher := make([]byte, 20) - if _, err := rand.Read(cipher); err != nil { - return nil, err - } - - // Cipher must be a legal UTF8 string. - for i := 0; i < len(cipher); i++ { - cipher[i] &= 0x7f - if cipher[i] == '\x00' || cipher[i] == '$' { - cipher[i] += 1 + // Generate the salt if needed, put 8 bytes in. + var salt []byte + if authServer.UseClearText() { + // salt is unused. + salt = make([]byte, 20) + } else { + var err error + salt, err = authServer.Salt() + if err != nil { + return nil, err } } - pos += copy(data[pos:], cipher[:8]) + + pos += copy(data[pos:], salt[:8]) // One filler byte, always 0. pos = writeByte(data, pos, 0) @@ -324,7 +316,7 @@ func (c *Conn) writeHandshakeV10(serverVersion string, enableTLS bool) ([]byte, pos += 10 // Second part of auth plugin data. - pos += copy(data[pos:], cipher[8:]) + pos += copy(data[pos:], salt[8:]) data[pos] = 0 pos++ @@ -340,7 +332,7 @@ func (c *Conn) writeHandshakeV10(serverVersion string, enableTLS bool) ([]byte, return nil, err } - return cipher, nil + return salt, nil } // parseClientHandshakePacket parses the handshake sent by the client. diff --git a/go/mysqlconn/server_test.go b/go/mysqlconn/server_test.go index ffec12d360f..271b0b184e3 100644 --- a/go/mysqlconn/server_test.go +++ b/go/mysqlconn/server_test.go @@ -114,13 +114,15 @@ func (th *testHandler) ComQuery(c *Conn, query string) (*sqltypes.Result, error) func TestServer(t *testing.T) { th := &testHandler{} - l, err := NewListener("tcp", ":0", th) + authServer := NewAuthServerConfig() + authServer.Entries["user1"] = &AuthServerConfigEntry{ + Password: "password1", + } + l, err := NewListener("tcp", ":0", authServer, th) if err != nil { t.Fatalf("NewListener failed: %v", err) } defer l.Close() - l.PasswordMap["user1"] = "password1" - go func() { l.Accept() }() @@ -197,6 +199,32 @@ func TestServer(t *testing.T) { !strings.Contains(output, "1 row in set") { t.Errorf("Unexpected output for 'ssl echo': %v", output) } + + // Permissions check: check a bad password is rejected. + params.Pass = "bad" + output, ok = runMysql(t, params, "select rows") + if ok { + t.Fatalf("mysql should have failed: %v", output) + } + if !strings.Contains(output, "1045") || + !strings.Contains(output, "28000") || + !strings.Contains(output, "Access denied") { + t.Errorf("Unexpected output for invalid password: %v", output) + } + + // Permissions check: check an unknown user is rejected. + params.Pass = "password1" + params.Uname = "user2" + output, ok = runMysql(t, params, "select rows") + if ok { + t.Fatalf("mysql should have failed: %v", output) + } + if !strings.Contains(output, "1045") || + !strings.Contains(output, "28000") || + !strings.Contains(output, "Access denied") { + t.Errorf("Unexpected output for invalid password: %v", output) + } + // Uncomment to leave setup up for a while, to run tests manually. // fmt.Printf("Listening to server on host '%v' port '%v'.\n", host, port) // time.Sleep(60 * time.Minute) @@ -207,11 +235,16 @@ func TestServer(t *testing.T) { func TestTLSServer(t *testing.T) { th := &testHandler{} + authServer := NewAuthServerConfig() + authServer.Entries["user1"] = &AuthServerConfigEntry{ + Password: "password1", + } + // Create the listener, so we can get its host. // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", ":0", th) + l, err := NewListener("tcp", ":0", authServer, th) if err != nil { t.Fatalf("NewListener failed: %v", err) } @@ -238,7 +271,6 @@ func TestTLSServer(t *testing.T) { t.Fatalf("TLSServerConfig failed: %v", err) } l.TLSConfig = serverConfig - l.PasswordMap["user1"] = "password1" go func() { l.Accept() }() diff --git a/go/vt/vtgate/plugin_mysql_server.go b/go/vt/vtgate/plugin_mysql_server.go index 6a0d2e5c2f7..f9532b2213a 100644 --- a/go/vt/vtgate/plugin_mysql_server.go +++ b/go/vt/vtgate/plugin_mysql_server.go @@ -1,8 +1,10 @@ package vtgate import ( + "encoding/json" "flag" "fmt" + "io/ioutil" "net" "strings" @@ -20,9 +22,45 @@ import ( ) var ( - mysqlServerPort = flag.Int("mysql_server_port", 0, "If set, also listen for MySQL binary protocol connections on this port.") + mysqlServerPort = flag.Int("mysql_server_port", 0, "If set, also listen for MySQL binary protocol connections on this port.") + mysqlAuthServerImpl = flag.String("mysql_auth_server_impl", "config", "Which auth server implementation to use.") + mysqlAuthServerConfigFile = flag.String("mysql_auth_server_config_file", "", "JSON File to read the users/passwords from.") + mysqlAuthServerConfigString = flag.String("mysql_auth_server_config_string", "", "JSON representation of the users/passwords config.") ) +// Handles initializing the AuthServerConfig if necessary. +func initAuthServerConfig() { + // Check parameters. + if *mysqlAuthServerConfigFile == "" && *mysqlAuthServerConfigString == "" { + // Not configured, nothing to do. + log.Infof("Not configuring AuthServerConfig, as mysql_auth_server_config_file and mysql_auth_server_config_string are empty") + return + } + if *mysqlAuthServerConfigFile != "" && *mysqlAuthServerConfigString != "" { + // Both parameters specified, can only use on. + log.Fatalf("Both mysql_auth_server_config_file and mysql_auth_server_config_string specified, can only use one.") + } + + // Read file if necessary. + authServerConfig := mysqlconn.NewAuthServerConfig() + jsonConfig := []byte(*mysqlAuthServerConfigString) + if *mysqlAuthServerConfigFile != "" { + data, err := ioutil.ReadFile(*mysqlAuthServerConfigFile) + if err != nil { + log.Fatalf("Failed to read mysql_auth_server_config_file file: %v", err) + } + jsonConfig = data + } + + // Parse JSON config. + if err := json.Unmarshal(jsonConfig, &authServerConfig.Entries); err != nil { + log.Fatalf("Error parsing auth server config: %v", err) + } + + // And register the server. + mysqlconn.RegisterAuthServerImpl("config", authServerConfig) +} + // vtgateHandler implements the Listener interface. // It stores the Session in the ClientData of a Connection, if a transaction // is in progress. @@ -146,19 +184,18 @@ func init() { return } + // Initialize the config AuthServer if necessary. + initAuthServerConfig() + authServer := mysqlconn.GetAuthServer(*mysqlAuthServerImpl) + // Create a Listener. var err error vh := newVtgateHandler(rpcVTGate) - listener, err = mysqlconn.NewListener("tcp", net.JoinHostPort("", fmt.Sprintf("%v", *mysqlServerPort)), vh) + listener, err = mysqlconn.NewListener("tcp", net.JoinHostPort("", fmt.Sprintf("%v", *mysqlServerPort)), authServer, vh) if err != nil { log.Fatalf("mysqlconn.NewListener failed: %v", err) } - // Add fake users for now. - // FIXME(alainjobart): add a config file with users - // and passwords. - listener.PasswordMap["mysql_user"] = "mysql_password" - // And starts listening. go func() { listener.Accept() From ae37ea05e06ac51043ef4279656df5d8987b65b7 Mon Sep 17 00:00:00 2001 From: Jon Tirsen Date: Fri, 24 Feb 2017 22:25:21 +0100 Subject: [PATCH 035/108] Ignore any IDs generated by the DB (#2576) --- go/vt/vtgate/engine/route.go | 10 +--------- go/vt/vtgate/router_dml_test.go | 29 ----------------------------- 2 files changed, 1 insertion(+), 38 deletions(-) diff --git a/go/vt/vtgate/engine/route.go b/go/vt/vtgate/engine/route.go index ed107a15a76..18ac0e89ae9 100644 --- a/go/vt/vtgate/engine/route.go +++ b/go/vt/vtgate/engine/route.go @@ -446,12 +446,7 @@ func (route *Route) execInsertUnsharded(vcursor VCursor, queryConstruct *queryin if err != nil { return nil, fmt.Errorf("execInsertUnsharded: %v", err) } - if insertid != 0 { - if result.InsertID != 0 { - return nil, fmt.Errorf("sequence and db generated a value each for insert") - } - result.InsertID = uint64(insertid) - } + result.InsertID = uint64(insertid) return result, nil } @@ -472,9 +467,6 @@ func (route *Route) execInsertSharded(vcursor VCursor, queryConstruct *queryinfo } if insertid != 0 { - if result.InsertID != 0 { - return nil, fmt.Errorf("sequence and db generated a value each for insert") - } result.InsertID = uint64(insertid) } diff --git a/go/vt/vtgate/router_dml_test.go b/go/vt/vtgate/router_dml_test.go index 10b1aca2198..8d3fe078d9e 100644 --- a/go/vt/vtgate/router_dml_test.go +++ b/go/vt/vtgate/router_dml_test.go @@ -723,35 +723,6 @@ func TestInsertFail(t *testing.T) { if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("routerExec: %v, want prefix %v", err, want) } - - sbc.SetResults([]*sqltypes.Result{{RowsAffected: 1, InsertID: 1}}) - sbclookup.SetResults([]*sqltypes.Result{{ - Rows: [][]sqltypes.Value{{ - sqltypes.MakeTrusted(sqltypes.Int64, []byte("1")), - }}, - RowsAffected: 1, - InsertID: 1, - }}) - _, err = routerExec(router, "insert into user(id, v, name) values (null, 2, 'myname')", nil) - want = "sequence and db generated a value each for insert" - if err == nil || !strings.HasPrefix(err.Error(), want) { - t.Errorf("routerExec: %v, want prefix %v", err, want) - } - sbclookup.SetResults([]*sqltypes.Result{{ - Rows: [][]sqltypes.Value{{ - sqltypes.MakeTrusted(sqltypes.Int64, []byte("1")), - }}, - RowsAffected: 1, - InsertID: 1, - }, { - RowsAffected: 1, - InsertID: 1, - }}) - _, err = routerExec(router, "insert into main1(id, v, name) values (null, 2, 'myname')", nil) - want = "sequence and db generated a value each for insert" - if err == nil || !strings.HasPrefix(err.Error(), want) { - t.Errorf("routerExec: %v, want prefix %v", err, want) - } } func TestMultiInsertSharded(t *testing.T) { From 0f843d4c8d7b92983f8803f6a52506c21333c2bc Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 24 Feb 2017 13:25:45 -0800 Subject: [PATCH 036/108] Fixing etcd watch versions. Old files could not be watched if the server had compacted its data. --- go/vt/etcdtopo/watch.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/go/vt/etcdtopo/watch.go b/go/vt/etcdtopo/watch.go index a4a09570782..8fdf5b0b8b6 100644 --- a/go/vt/etcdtopo/watch.go +++ b/go/vt/etcdtopo/watch.go @@ -76,7 +76,11 @@ func (s *Server) Watch(ctx context.Context, cellName, filePath string) (*topo.Wa watchChannel := make(chan *etcd.Response) watchError := make(chan error) go func(stop chan bool) { - versionToWatch := initial.Node.ModifiedIndex + 1 + // We start watching from the etcd version we got + // during the get, and not from the ModifiedIndex of + // the node, as the node might be older than the + // retention period of the server. + versionToWatch := initial.EtcdIndex + 1 _, err := cell.Client.Watch(filePath, versionToWatch, false /* recursive */, watchChannel, stop) // Watch will only return a non-nil error, otherwise // it keeps on watching. Send the error down. From 81ee83046f772ad3ba6bdee8882efa0e771f1f4b Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 24 Feb 2017 13:37:08 -0800 Subject: [PATCH 037/108] etcd2topo watch fix. Use the Get version as a start, not the object version. --- go/vt/topo/etcd2topo/watch.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/go/vt/topo/etcd2topo/watch.go b/go/vt/topo/etcd2topo/watch.go index 569e6b67e05..e3964c13183 100644 --- a/go/vt/topo/etcd2topo/watch.go +++ b/go/vt/topo/etcd2topo/watch.go @@ -37,8 +37,10 @@ func (s *Server) Watch(ctx context.Context, cell, filePath string) (*topo.WatchD // Create a context, will be used to cancel the watch. watchCtx, watchCancel := context.WithCancel(context.Background()) - // Create the Watcher. - watcher := s.global.cli.Watch(watchCtx, nodePath, clientv3.WithRev(initial.Kvs[0].ModRevision)) + // Create the Watcher. We start watching from the response we + // got, not from the file original version, as the server may + // not have that much history. + watcher := s.global.cli.Watch(watchCtx, nodePath, clientv3.WithRev(initial.Header.Revision)) if watcher == nil { return &topo.WatchData{Err: fmt.Errorf("Watch failed")}, nil, nil } From 1d672c3ed518d2200ad634d3e2109cd37da2f04a Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 24 Feb 2017 13:58:27 -0800 Subject: [PATCH 038/108] Set and test conn.User{,Name}. --- go/mysqlconn/client.go | 3 ++- go/mysqlconn/handshake_test.go | 3 +++ go/mysqlconn/server_test.go | 34 +++++++++++++++++++++++++++++++++- 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/go/mysqlconn/client.go b/go/mysqlconn/client.go index d98596d4f61..2688c37c133 100644 --- a/go/mysqlconn/client.go +++ b/go/mysqlconn/client.go @@ -253,7 +253,8 @@ func (c *Conn) clientHandshake(characterSet uint8, params *sqldb.ConnParams) err } switch response[0] { case OKPacket: - // OK packet, we are authenticated. We keep going. + // OK packet, we are authenticated. Save the user, keep going. + c.User = params.Uname case ErrPacket: return parseErrorPacket(response) default: diff --git a/go/mysqlconn/handshake_test.go b/go/mysqlconn/handshake_test.go index 040b6a0345d..06b1d1aac18 100644 --- a/go/mysqlconn/handshake_test.go +++ b/go/mysqlconn/handshake_test.go @@ -78,6 +78,9 @@ func TestSSLConnection(t *testing.T) { t.Fatalf("Connect failed: %v", err) } defer conn.Close() + if conn.User != "user1" { + t.Errorf("Invalid conn.User, got %v was expecting user1", conn.User) + } // Run a 'select rows' command with results. result, err := conn.ExecuteFetch("select rows", 10000, true) diff --git a/go/mysqlconn/server_test.go b/go/mysqlconn/server_test.go index 271b0b184e3..0c7718473ae 100644 --- a/go/mysqlconn/server_test.go +++ b/go/mysqlconn/server_test.go @@ -93,7 +93,6 @@ func (th *testHandler) ComQuery(c *Conn, query string) (*sqltypes.Result, error) value = "ON" } return &sqltypes.Result{ - Fields: []*querypb.Field{ { Name: "ssl_flag", @@ -108,6 +107,27 @@ func (th *testHandler) ComQuery(c *Conn, query string) (*sqltypes.Result, error) }, nil } + if query == "userData echo" { + return &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "user", + Type: querypb.Type_VARCHAR, + }, + { + Name: "user_data", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(c.User)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(c.UserData)), + }, + }, + }, nil + } + return &sqltypes.Result{}, nil } @@ -117,6 +137,7 @@ func TestServer(t *testing.T) { authServer := NewAuthServerConfig() authServer.Entries["user1"] = &AuthServerConfigEntry{ Password: "password1", + UserData: "userData1", } l, err := NewListener("tcp", ":0", authServer, th) if err != nil { @@ -200,6 +221,17 @@ func TestServer(t *testing.T) { t.Errorf("Unexpected output for 'ssl echo': %v", output) } + // UserData check: checks the server user data is correct. + output, ok = runMysql(t, params, "userData echo") + if !ok { + t.Fatalf("mysql failed: %v", output) + } + if !strings.Contains(output, "user1") || + !strings.Contains(output, "user_data") || + !strings.Contains(output, "userData1") { + t.Errorf("Unexpected output for 'userData echo': %v", output) + } + // Permissions check: check a bad password is rejected. params.Pass = "bad" output, ok = runMysql(t, params, "select rows") From 4236de736bc31c7da8c039c8ef1335dfe16c4f6b Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Fri, 24 Feb 2017 15:42:19 -0800 Subject: [PATCH 039/108] vterrors: addres review comments (#2593) Addressed review comments. I've changed the error codes for cases we've agreed on. I've left the more contentious ones unchanged for now. BUG=32851872 --- go/mysqlconn/constants.go | 5 ++++- go/mysqlconn/constants_test.go | 12 +++++++---- go/vt/tabletserver/connpool/pool.go | 2 +- go/vt/tabletserver/query_engine.go | 4 ++-- go/vt/tabletserver/queryservice/wrapped.go | 4 +++- go/vt/tabletserver/tabletconn/grpc_error.go | 4 ++-- go/vt/tabletserver/tabletenv/tabletenv.go | 24 ++++++++++++++++++--- go/vt/tabletserver/tabletserver.go | 4 ++-- go/vt/tabletserver/tabletserver_test.go | 6 +++--- go/vt/tabletserver/tx_pool_test.go | 4 ++-- go/vt/vterrors/aggregate.go | 4 ++-- go/vt/vterrors/doc.go | 4 ++-- go/vt/vtgate/buffer/buffer.go | 6 ++---- go/vt/vtgate/router_select_test.go | 2 +- go/vt/vtgate/vtgate.go | 4 ++-- 15 files changed, 57 insertions(+), 32 deletions(-) diff --git a/go/mysqlconn/constants.go b/go/mysqlconn/constants.go index 7bf514c4fe5..ae959eb8350 100644 --- a/go/mysqlconn/constants.go +++ b/go/mysqlconn/constants.go @@ -350,7 +350,10 @@ func IsNum(typ uint8) bool { func IsConnErr(err error) bool { if sqlErr, ok := err.(*sqldb.SQLError); ok { num := sqlErr.Number() - // Don't count query kill as connection error. + // ServerLost means that the query has already been + // received by MySQL and may have already been executed. + // Since we don't know if the query is idempotent, we don't + // count this error as connection error which could be retried. if num == CRServerLost { return false } diff --git a/go/mysqlconn/constants_test.go b/go/mysqlconn/constants_test.go index b7f474bd076..c5c26d5484f 100644 --- a/go/mysqlconn/constants_test.go +++ b/go/mysqlconn/constants_test.go @@ -12,16 +12,20 @@ func TestIsConnErr(t *testing.T) { in error want bool }{{ - in: errors.New("t"), + in: errors.New("t"), + want: false, }, { - in: sqldb.NewSQLError(5, "", ""), + in: sqldb.NewSQLError(5, "", ""), + want: false, }, { in: sqldb.NewSQLError(CRServerGone, "", ""), want: true, }, { - in: sqldb.NewSQLError(CRServerLost, "", ""), + in: sqldb.NewSQLError(CRServerLost, "", ""), + want: false, }, { - in: sqldb.NewSQLError(CRCantReadCharset, "", ""), + in: sqldb.NewSQLError(CRCantReadCharset, "", ""), + want: false, }} for _, tcase := range testcases { got := IsConnErr(tcase.in) diff --git a/go/vt/tabletserver/connpool/pool.go b/go/vt/tabletserver/connpool/pool.go index 84c99e8b712..a9f7f0ee770 100644 --- a/go/vt/tabletserver/connpool/pool.go +++ b/go/vt/tabletserver/connpool/pool.go @@ -19,7 +19,7 @@ import ( ) // ErrConnPoolClosed is returned when the connection pool is closed. -var ErrConnPoolClosed = vterrors.New(vtrpcpb.Code_UNAVAILABLE, "connection pool is closed") +var ErrConnPoolClosed = vterrors.New(vtrpcpb.Code_INTERNAL, "internal error: unexpected: conn pool is closed") // usedNames is for preventing expvar from panicking. Tests // create pool objects multiple time. If a name was previously diff --git a/go/vt/tabletserver/query_engine.go b/go/vt/tabletserver/query_engine.go index cf9e162633a..1d37c657ec3 100644 --- a/go/vt/tabletserver/query_engine.go +++ b/go/vt/tabletserver/query_engine.go @@ -293,7 +293,7 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats r, err := conn.Exec(ctx, sql, 1, true) logStats.AddRewrittenSQL(sql, start) if err != nil { - return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "error fetching fields: %v", err) + return nil, err } plan.Fields = r.Fields } @@ -320,7 +320,7 @@ func (qe *QueryEngine) GetStreamPlan(sql string) (*ExecPlan, error) { splan, err := planbuilder.GetStreamExecPlan(sql, GetTable) if err != nil { // TODO(sougou): Inspect to see if GetStreamExecPlan can return coded error. - return nil, vterrors.New(vtrpcpb.Code_UNKNOWN, err.Error()) + return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, err.Error()) } plan := &ExecPlan{ExecPlan: splan, Table: table} plan.Rules = qe.queryRuleSources.filterByPlan(sql, plan.PlanID, plan.TableName.String()) diff --git a/go/vt/tabletserver/queryservice/wrapped.go b/go/vt/tabletserver/queryservice/wrapped.go index c137a783dc2..5d886746b4a 100644 --- a/go/vt/tabletserver/queryservice/wrapped.go +++ b/go/vt/tabletserver/queryservice/wrapped.go @@ -50,13 +50,15 @@ func canRetry(ctx context.Context, err error) bool { if err == nil { return false } + select { case <-ctx.Done(): return false default: } + switch vterrors.Code(err) { - case vtrpcpb.Code_UNAVAILABLE, vtrpcpb.Code_FAILED_PRECONDITION, vtrpcpb.Code_RESOURCE_EXHAUSTED: + case vtrpcpb.Code_UNAVAILABLE, vtrpcpb.Code_FAILED_PRECONDITION: return true } return false diff --git a/go/vt/tabletserver/tabletconn/grpc_error.go b/go/vt/tabletserver/tabletconn/grpc_error.go index 98764523df0..e10a1449178 100644 --- a/go/vt/tabletserver/tabletconn/grpc_error.go +++ b/go/vt/tabletserver/tabletconn/grpc_error.go @@ -16,7 +16,7 @@ func ErrorFromGRPC(err error) error { if err == nil || err == io.EOF { return nil } - return vterrors.New(vtrpcpb.Code(grpc.Code(err)), "vttablet: "+err.Error()) + return vterrors.Errorf(vtrpcpb.Code(grpc.Code(err)), "vttablet: %v", err) } // ErrorFromVTRPC converts a *vtrpcpb.RPCError to vtError for @@ -29,5 +29,5 @@ func ErrorFromVTRPC(err *vtrpcpb.RPCError) error { if code == vtrpcpb.Code_OK { code = vterrors.LegacyErrorCodeToCode(err.LegacyCode) } - return vterrors.New(code, "vttablet: "+err.Message) + return vterrors.Errorf(code, "vttablet: %s", err.Message) } diff --git a/go/vt/tabletserver/tabletenv/tabletenv.go b/go/vt/tabletserver/tabletenv/tabletenv.go index f000734fdcd..251dd87cf87 100644 --- a/go/vt/tabletserver/tabletenv/tabletenv.go +++ b/go/vt/tabletserver/tabletenv/tabletenv.go @@ -15,6 +15,7 @@ import ( "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/tb" "github.com/youtube/vitess/go/vt/callerid" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" "github.com/youtube/vitess/go/vt/sqlparser" ) @@ -29,10 +30,27 @@ var ( WaitStats = stats.NewTimings("Waits") // KillStats shows number of connections being killed. KillStats = stats.NewCounters("Kills", "Transactions", "Queries") - // InfoErrors shows number of various non critical errors happened. - InfoErrors = stats.NewCounters("InfoErrors", "Retry", "DupKey") // ErrorStats shows number of critial erros happened. - ErrorStats = stats.NewCounters("Errors", "Fail", "TxPoolFull", "NotInTx", "Deadlock", "Fatal") + ErrorStats = stats.NewCounters( + "Errors", + vtrpcpb.Code_OK.String(), + vtrpcpb.Code_CANCELED.String(), + vtrpcpb.Code_UNKNOWN.String(), + vtrpcpb.Code_INVALID_ARGUMENT.String(), + vtrpcpb.Code_DEADLINE_EXCEEDED.String(), + vtrpcpb.Code_NOT_FOUND.String(), + vtrpcpb.Code_ALREADY_EXISTS.String(), + vtrpcpb.Code_PERMISSION_DENIED.String(), + vtrpcpb.Code_UNAUTHENTICATED.String(), + vtrpcpb.Code_RESOURCE_EXHAUSTED.String(), + vtrpcpb.Code_FAILED_PRECONDITION.String(), + vtrpcpb.Code_ABORTED.String(), + vtrpcpb.Code_OUT_OF_RANGE.String(), + vtrpcpb.Code_UNIMPLEMENTED.String(), + vtrpcpb.Code_INTERNAL.String(), + vtrpcpb.Code_UNAVAILABLE.String(), + vtrpcpb.Code_DATA_LOSS.String(), + ) // InternalErrors shows number of errors from internal components. InternalErrors = stats.NewCounters("InternalErrors", "Task", "StrayTransactions", "Panic", "HungQuery", "Schema", "TwopcCommit", "TwopcResurrection", "WatchdogFail") // Unresolved tracks unresolved items. For now it's just Prepares. diff --git a/go/vt/tabletserver/tabletserver.go b/go/vt/tabletserver/tabletserver.go index 8f0985de090..5d6c8eaf229 100644 --- a/go/vt/tabletserver/tabletserver.go +++ b/go/vt/tabletserver/tabletserver.go @@ -1190,7 +1190,7 @@ func (tsv *TabletServer) convertError(sql string, bindVariables map[string]inter if strings.Contains(errstr, "read-only") { errCode = vtrpcpb.Code_FAILED_PRECONDITION } - case 1227: // Google internal overloaded error code. + case 1227: // Google internal failover error code. if strings.Contains(errstr, "failover in progress") { errCode = vtrpcpb.Code_FAILED_PRECONDITION } @@ -1201,7 +1201,7 @@ func (tsv *TabletServer) convertError(sql string, bindVariables map[string]inter case mysqlconn.ERLockWaitTimeout: errCode = vtrpcpb.Code_DEADLINE_EXCEEDED case mysqlconn.ERLockDeadlock: - // A deadlock rollsback the transaction. + // A deadlock rolls back the transaction. errCode = vtrpcpb.Code_ABORTED case mysqlconn.CRServerLost: // Query was killed. diff --git a/go/vt/tabletserver/tabletserver_test.go b/go/vt/tabletserver/tabletserver_test.go index c0889f24562..bdb2490950f 100644 --- a/go/vt/tabletserver/tabletserver_test.go +++ b/go/vt/tabletserver/tabletserver_test.go @@ -357,9 +357,9 @@ func TestTabletServerAllSchemaFailure(t *testing.T) { err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs)) defer tsv.StopService() // tabletsever shouldn't start if it can't access schema for any tables - wanterr := "could not get schema for any tables" - if err == nil || err.Error() != wanterr { - t.Errorf("tsv.StartService: %v, want %s", err, wanterr) + wantErr := "could not get schema for any tables" + if err == nil || err.Error() != wantErr { + t.Errorf("tsv.StartService: %v, want %s", err, wantErr) } } diff --git a/go/vt/tabletserver/tx_pool_test.go b/go/vt/tabletserver/tx_pool_test.go index b853f89de8b..513b27b793a 100644 --- a/go/vt/tabletserver/tx_pool_test.go +++ b/go/vt/tabletserver/tx_pool_test.go @@ -177,8 +177,8 @@ func TestTxPoolBeginWithPoolConnectionError_Errno2006_Permanent(t *testing.T) { if !ok { t.Fatalf("Unexpected error type: %T, want %T", err, &sqldb.SQLError{}) } - if num := sqlErr.Number(); num != mysqlconn.CRServerLost { - t.Errorf("Unexpected error code: %d, want %d", num, mysqlconn.CRServerLost) + if got, want := sqlErr.Number(), mysqlconn.CRServerLost; got != want { + t.Errorf("Unexpected error code: %d, want %d", got, want) } } diff --git a/go/vt/vterrors/aggregate.go b/go/vt/vterrors/aggregate.go index 41606ac46f2..d168ede93b9 100644 --- a/go/vt/vterrors/aggregate.go +++ b/go/vt/vterrors/aggregate.go @@ -25,11 +25,11 @@ const ( PriorityOutOfRange // Potentially retryable errors. PriorityUnavailable - PriorityFailedPrecondition - PriorityResourceExhausted PriorityDeadlineExceeded PriorityAborted + PriorityFailedPrecondition // Permanent errors. + PriorityResourceExhausted PriorityUnknown PriorityUnauthenticated PriorityPermissionDenied diff --git a/go/vt/vterrors/doc.go b/go/vt/vterrors/doc.go index f0120dfd52e..6b6feada228 100644 --- a/go/vt/vterrors/doc.go +++ b/go/vt/vterrors/doc.go @@ -18,8 +18,8 @@ RPCError message that can be used to transmit errors through RPCs, in the message payloads. These codes match the names and numbers defined by gRPC. -Vitess also defines a VitessError error implementation, that can convert -any error and add a code to it. +Vitess also defines a standardized error implementation that allows +you to build an error with an associated canonical code. While sending an error through gRPC, these codes are transmitted using gRPC's error propagation mechanism and decoded back to diff --git a/go/vt/vtgate/buffer/buffer.go b/go/vt/vtgate/buffer/buffer.go index 94882eb16a5..89075ed2c31 100644 --- a/go/vt/vtgate/buffer/buffer.go +++ b/go/vt/vtgate/buffer/buffer.go @@ -214,13 +214,11 @@ func (b *Buffer) StatsUpdate(ts *discovery.TabletStats) { // causedByFailover returns true if "err" was supposedly caused by a failover. // To simplify things, we've merged the detection for different MySQL flavors // in one function. Supported flavors: MariaDB, MySQL, Google internal. -// TODO(mberlin): This function does not have to check the specific error messages. -// The previous error revamp ensures that FAILED_PRECONDITION is returned only -// during failover. func causedByFailover(err error) bool { log.V(2).Infof("Checking error (type: %T) if it is caused by a failover. err: %v", err, err) - if vterrors.Code(err) != vtrpcpb.Code_FAILED_PRECONDITION { + // TODO(sougou): Remove the INTERNAL check after rollout. + if code := vterrors.Code(err); code != vtrpcpb.Code_FAILED_PRECONDITION && code != vtrpcpb.Code_INTERNAL { return false } switch { diff --git a/go/vt/vtgate/router_select_test.go b/go/vt/vtgate/router_select_test.go index 88cda8b238e..bf4e1a89ef1 100644 --- a/go/vt/vtgate/router_select_test.go +++ b/go/vt/vtgate/router_select_test.go @@ -412,7 +412,7 @@ func TestSelectEqualFail(t *testing.T) { _, err := routerExec(router, "select id from user where id = (select count(*) from music)", nil) want := "unsupported" if err == nil || !strings.HasPrefix(err.Error(), want) { - t.Errorf("routerExec: %v, must contain %v", err, want) + t.Errorf("routerExec: %v, must start with %v", err, want) } _, err = routerExec(router, "select id from user where id = :aa", nil) diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index 884a8abbf30..a4a6f395499 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -885,13 +885,13 @@ func (vtg *VTGate) VSchemaStats() *VSchemaStats { func recordAndAnnotateError(err error, statsKey []string, request map[string]interface{}, logger *logutil.ThrottledLogger) error { ec := vterrors.Code(err) - fullkey := []string{ + fullKey := []string{ statsKey[0], statsKey[1], statsKey[2], ec.String(), } - errorCounts.Add(fullkey, 1) + errorCounts.Add(fullKey, 1) // Most errors are not logged by vtgate beecause they're either too spammy or logged elsewhere. switch ec { case vtrpcpb.Code_UNKNOWN, vtrpcpb.Code_INTERNAL, vtrpcpb.Code_DATA_LOSS: From d1b357c581a72a7d0fddb11628366f2755cbd8b9 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 24 Feb 2017 16:59:33 -0800 Subject: [PATCH 040/108] Adding clear text auth support. And connecting it to AuthServer. Unit tests cover almost all cases (but not all pre-MySQL 4.1 cases). --- go/mysqlconn/client.go | 60 +++++++++- go/mysqlconn/constants.go | 3 + go/mysqlconn/doc.go | 23 +++- go/mysqlconn/handshake_test.go | 95 +++++++++++++++ go/mysqlconn/server.go | 173 ++++++++++++++++++++++------ go/mysqlconn/server_test.go | 104 ++++++++++++++--- go/vt/vtgate/plugin_mysql_server.go | 10 +- 7 files changed, 405 insertions(+), 63 deletions(-) diff --git a/go/mysqlconn/client.go b/go/mysqlconn/client.go index 2688c37c133..fe2305bb7c3 100644 --- a/go/mysqlconn/client.go +++ b/go/mysqlconn/client.go @@ -255,11 +255,40 @@ func (c *Conn) clientHandshake(characterSet uint8, params *sqldb.ConnParams) err case OKPacket: // OK packet, we are authenticated. Save the user, keep going. c.User = params.Uname + case AuthSwitchRequestPacket: + // Server is asking to use a different auth method. We + // only support cleartext plugin. + pluginName, _, err := parseAuthSwitchRequest(response) + if err != nil { + return sqldb.NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "cannot parse auth switch request: %v", err) + } + if pluginName != mysqlClearPassword { + return sqldb.NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "server asked for unsupported auth method: %v", pluginName) + } + + // Write the password packet. + if err := c.writeClearTextPassword(params); err != nil { + return err + } + + // Wait for OK packet. + response, err = c.readPacket() + if err != nil { + return sqldb.NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + } + switch response[0] { + case OKPacket: + // OK packet, we are authenticated. Save the user, keep going. + c.User = params.Uname + case ErrPacket: + return parseErrorPacket(response) + default: + return sqldb.NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "initial server response cannot be parsed: %v", response) + } case ErrPacket: return parseErrorPacket(response) default: - // FIXME(alainjobart) handle extra auth cases and so on. - return fmt.Errorf("initial server response is asking for more information, not implemented yet: %v", response) + return sqldb.NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "initial server response cannot be parsed: %v", response) } // If the server didn't support DbName in its handshake, set @@ -558,3 +587,30 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, salt []byte, charac } return nil } + +func parseAuthSwitchRequest(data []byte) (string, []byte, error) { + pos := 1 + pluginName, pos, ok := readNullString(data, pos) + if !ok { + return "", nil, fmt.Errorf("cannot get plugin name from AuthSwitchRequest: %v", data) + } + + return pluginName, data[pos:], nil +} + +// writeClearTextPassword writes the clear text password. +// Returns a sqldb.SQLError. +func (c *Conn) writeClearTextPassword(params *sqldb.ConnParams) error { + length := len(params.Pass) + 1 + data := c.startEphemeralPacket(length) + pos := 0 + pos = writeNullString(data, pos, params.Pass) + // Sanity check. + if pos != len(data) { + return fmt.Errorf("error building ClearTextPassword packet: got %v bytes expected %v", pos, len(data)) + } + if err := c.writeEphemeralPacket(true); err != nil { + return err + } + return nil +} diff --git a/go/mysqlconn/constants.go b/go/mysqlconn/constants.go index 7701e7ed4be..2f58b43a8d5 100644 --- a/go/mysqlconn/constants.go +++ b/go/mysqlconn/constants.go @@ -147,6 +147,9 @@ const ( // EOFPacket is the header of the EOF packet. EOFPacket = 0xfe + // AuthSwitchRequestPacket is used to switch auth method. + AuthSwitchRequestPacket = 0xfe + // ErrPacket is the header of the error packet. ErrPacket = 0xff diff --git a/go/mysqlconn/doc.go b/go/mysqlconn/doc.go index 8d603a5acc1..fa35e3ff91d 100644 --- a/go/mysqlconn/doc.go +++ b/go/mysqlconn/doc.go @@ -36,10 +36,27 @@ message to set the database. -- PLUGABLE AUTHENTICATION: -We only support mysql_native_password for now, both client and server -side. It wouldn't be a lot of work to add SHA256 for instance, or clear text -authentication. +See https://dev.mysql.com/doc/internals/en/authentication-method-mismatch.html +for more information on this. +Our server side always starts by using mysql_native_password, like a +real MySQL server. + +Our client will expect the server to always use mysql_native_password +in its initial handshake. This is what a real server always does, even though +it's not technically mandatory. + +Our server can then use the client's auth methods right away: +- mysql_native_password +- mysql_clear_password + +If our server's AuthServer UseClearText() returns true, and the +client's auth method is not mysql_clear_password, we will +re-negotiate. + +If any of these methods doesn't work for the server, it will re-negociate +by sending an Authentication Method Switch Request Packet. +The client will then handle that if it can. -- Maximum Packet Size: diff --git a/go/mysqlconn/handshake_test.go b/go/mysqlconn/handshake_test.go index 06b1d1aac18..93ffb260bc8 100644 --- a/go/mysqlconn/handshake_test.go +++ b/go/mysqlconn/handshake_test.go @@ -7,6 +7,7 @@ import ( "os" "path" "reflect" + "strings" "testing" "github.com/youtube/vitess/go/sqldb" @@ -16,6 +17,63 @@ import ( // This file tests the handshake scenarios between our client and our server. +func TestClearTextClientAuth(t *testing.T) { + th := &testHandler{} + + authServer := NewAuthServerConfig() + authServer.Entries["user1"] = &AuthServerConfigEntry{ + Password: "password1", + } + authServer.ClearText = true + + // Create the listener. + l, err := NewListener("tcp", ":0", authServer, th) + if err != nil { + t.Fatalf("NewListener failed: %v", err) + } + defer l.Close() + host := l.Addr().(*net.TCPAddr).IP.String() + port := l.Addr().(*net.TCPAddr).Port + go func() { + l.Accept() + }() + + // Setup the right parameters. + params := &sqldb.ConnParams{ + Host: host, + Port: port, + Uname: "user1", + Pass: "password1", + } + + // Connection should fail, as server requires SSL for clear text auth. + ctx := context.Background() + conn, err := Connect(ctx, params) + if err == nil || !strings.Contains(err.Error(), "Cannot use clear text authentication over non-SSL connections") { + t.Fatalf("unexpected connection error: %v", err) + } + + // Change server side to allow clear text without auth. + l.AllowClearTextWithoutTLS = true + conn, err = Connect(ctx, params) + if err != nil { + t.Fatalf("unexpected connection error: %v", err) + } + defer conn.Close() + + // Run a 'select rows' command with results. + result, err := conn.ExecuteFetch("select rows", 10000, true) + if err != nil { + t.Fatalf("ExecuteFetch failed: %v", err) + } + if !reflect.DeepEqual(result, selectRowsResult) { + t.Errorf("Got wrong result from ExecuteFetch(select rows): %v", result) + } + + // Send a ComQuit to avoid the error message on the server side. + conn.writeComQuit() +} + // TestSSLConnection creates a server with TLS support, a client that // also has SSL support, and connects them. func TestSSLConnection(t *testing.T) { @@ -71,6 +129,43 @@ func TestSSLConnection(t *testing.T) { SslKey: path.Join(root, "client-key.pem"), } + t.Run("Basics", func(t *testing.T) { + testSSLConnectionBasics(t, params) + }) + + // Make sure clear text auth works over SSL. + t.Run("ClearText", func(t *testing.T) { + l.authServer.(*AuthServerConfig).ClearText = true + testSSLConnectionClearText(t, params) + }) +} + +func testSSLConnectionClearText(t *testing.T, params *sqldb.ConnParams) { + // Create a client connection, connect. + ctx := context.Background() + conn, err := Connect(ctx, params) + if err != nil { + t.Fatalf("Connect failed: %v", err) + } + defer conn.Close() + if conn.User != "user1" { + t.Errorf("Invalid conn.User, got %v was expecting user1", conn.User) + } + + // Make sure this went through SSL. + result, err := conn.ExecuteFetch("ssl echo", 10000, true) + if err != nil { + t.Fatalf("ExecuteFetch failed: %v", err) + } + if result.Rows[0][0].String() != "ON" { + t.Errorf("Got wrong result from ExecuteFetch(ssl echo): %v", result) + } + + // Send a ComQuit to avoid the error message on the server side. + conn.writeComQuit() +} + +func testSSLConnectionBasics(t *testing.T, params *sqldb.ConnParams) { // Create a client connection, connect. ctx := context.Background() conn, err := Connect(ctx, params) diff --git a/go/mysqlconn/server.go b/go/mysqlconn/server.go index 55163b65c2b..b374f244f3e 100644 --- a/go/mysqlconn/server.go +++ b/go/mysqlconn/server.go @@ -66,6 +66,11 @@ type Listener struct { // that we support SSL. TLSConfig *tls.Config + // AllowClearTextWithoutTLS needs to be set for the + // mysql_clear_password authentication method to be accepted + // by the server when TLS is not in use. + AllowClearTextWithoutTLS bool + // The following parameters are changed by the Accept routine. // Incrementing ID for connection id. @@ -143,7 +148,7 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32) { log.Errorf("Cannot read client handshake response: %v", err) return } - user, authResponse, err := l.parseClientHandshakePacket(c, true, response) + user, authMethod, authResponse, err := l.parseClientHandshakePacket(c, true, response) if err != nil { log.Errorf("Cannot parse client handshake response: %v", err) return @@ -156,21 +161,86 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32) { return } - user, authResponse, err = l.parseClientHandshakePacket(c, false, response) + user, authMethod, authResponse, err = l.parseClientHandshakePacket(c, false, response) if err != nil { log.Errorf("Cannot parse post-SSL client handshake response: %v", err) return } } - // See if the user is authenticated. - userData, err := l.authServer.ValidateHash(salt, user, authResponse) - if err != nil { - c.writeErrorPacketFromError(err) - return + // See what method the client used. + renegotiateWithClearText := false + switch authMethod { + case mysqlNativePassword: + // This is what the server started with. Let's use it if we can. + if !l.authServer.UseClearText() { + userData, err := l.authServer.ValidateHash(salt, user, authResponse) + if err != nil { + c.writeErrorPacketFromError(err) + return + } + c.User = user + c.UserData = userData + // We're good. + break + } + + // Our AuthServer cannot use mysql_native_password, it + // needs the real password. Let's request that. + renegotiateWithClearText = true + case mysqlClearPassword: + // Client sent us a clear text password. Let's use it if we can. + if !l.AllowClearTextWithoutTLS && c.Capabilities&CapabilityClientSSL == 0 { + c.writeErrorPacket(CRServerHandshakeErr, SSUnknownSQLState, "Cannot use clear text authentication over non-SSL connections.") + return + } + userData, err := l.authServer.ValidateClearText(user, string(authResponse)) + if err != nil { + c.writeErrorPacketFromError(err) + return + } + c.User = user + c.UserData = userData + break + default: + // Client decided to use something we don't understand. + // Let's try again with clear text password. + renegotiateWithClearText = true + } + + // If we need to re-negociate with clear text, do it. + if renegotiateWithClearText { + // Check error conditions. + if !l.AllowClearTextWithoutTLS && c.Capabilities&CapabilityClientSSL == 0 { + c.writeErrorPacket(CRServerHandshakeErr, SSUnknownSQLState, "Cannot use clear text authentication over non-SSL connections.") + return + } + + if err := c.writeAuthSwitchRequest(mysqlClearPassword, nil); err != nil { + log.Errorf("Error write auth switch packet for client %v: %v", c.ConnectionID, err) + return + } + + // The client is supposed to just send the data in a single packet. + // It is a zero-terminated string. + data, err := c.readEphemeralPacket() + if err != nil { + log.Warningf("Error reading auth switch response packet from client %v: %v", c.ConnectionID, err) + return + } + password, pos, ok := readNullString(data, 0) + if !ok || pos != len(data) { + c.writeErrorPacket(CRServerHandshakeErr, SSUnknownSQLState, "Error parsing packet with password: %v", data) + return + } + userData, err := l.authServer.ValidateClearText(user, password) + if err != nil { + c.writeErrorPacketFromError(err) + return + } + c.User = user + c.UserData = userData } - c.User = user - c.UserData = userData // Send an OK packet. if err := c.writeOKPacket(0, 0, c.StatusFlags, 0); err != nil { @@ -280,15 +350,17 @@ func (c *Conn) writeHandshakeV10(serverVersion string, authServer AuthServer, en // Generate the salt if needed, put 8 bytes in. var salt []byte + var err error if authServer.UseClearText() { - // salt is unused. - salt = make([]byte, 20) + // salt will end up being unused, but we can't send + // just zero, as the client will still use it, and + // that may leak crypto information. + salt, err = newSalt() } else { - var err error salt, err = authServer.Salt() - if err != nil { - return nil, err - } + } + if err != nil { + return nil, err } pos += copy(data[pos:], salt[:8]) @@ -320,7 +392,7 @@ func (c *Conn) writeHandshakeV10(serverVersion string, authServer AuthServer, en data[pos] = 0 pos++ - // Copy authPluginName. + // Copy authPluginName. We always start with mysql_native_password. pos = writeNullString(data, pos, mysqlNativePassword) // Sanity check. @@ -336,17 +408,17 @@ func (c *Conn) writeHandshakeV10(serverVersion string, authServer AuthServer, en } // parseClientHandshakePacket parses the handshake sent by the client. -// Returns the username, auth-data, error. -func (l *Listener) parseClientHandshakePacket(c *Conn, firstTime bool, data []byte) (string, []byte, error) { +// Returns the username, auth method, auth data, error. +func (l *Listener) parseClientHandshakePacket(c *Conn, firstTime bool, data []byte) (string, string, []byte, error) { pos := 0 // Client flags, 4 bytes. clientFlags, pos, ok := readUint32(data, pos) if !ok { - return "", nil, fmt.Errorf("parseClientHandshakePacket: can't read client flags") + return "", "", nil, fmt.Errorf("parseClientHandshakePacket: can't read client flags") } if clientFlags&CapabilityClientProtocol41 == 0 { - return "", nil, fmt.Errorf("parseClientHandshakePacket: only support protocol 4.1") + return "", "", nil, fmt.Errorf("parseClientHandshakePacket: only support protocol 4.1") } // Remember a subset of the capabilities, so we can use them @@ -360,13 +432,13 @@ func (l *Listener) parseClientHandshakePacket(c *Conn, firstTime bool, data []by // See doc.go for more information. _, pos, ok = readUint32(data, pos) if !ok { - return "", nil, fmt.Errorf("parseClientHandshakePacket: can't read maxPacketSize") + return "", "", nil, fmt.Errorf("parseClientHandshakePacket: can't read maxPacketSize") } // Character set. Need to handle it. characterSet, pos, ok := readByte(data, pos) if !ok { - return "", nil, fmt.Errorf("parseClientHandshakePacket: can't read characterSet") + return "", "", nil, fmt.Errorf("parseClientHandshakePacket: can't read characterSet") } c.CharacterSet = characterSet @@ -381,13 +453,13 @@ func (l *Listener) parseClientHandshakePacket(c *Conn, firstTime bool, data []by c.reader.Reset(conn) c.writer.Reset(conn) c.Capabilities |= CapabilityClientSSL - return "", nil, nil + return "", "", nil, nil } // username username, pos, ok := readNullString(data, pos) if !ok { - return "", nil, fmt.Errorf("parseClientHandshakePacket: can't read username") + return "", "", nil, fmt.Errorf("parseClientHandshakePacket: can't read username") } // auth-response can have three forms. @@ -396,29 +468,29 @@ func (l *Listener) parseClientHandshakePacket(c *Conn, firstTime bool, data []by var l uint64 l, pos, ok = readLenEncInt(data, pos) if !ok { - return "", nil, fmt.Errorf("parseClientHandshakePacket: can't read auth-response variable length") + return "", "", nil, fmt.Errorf("parseClientHandshakePacket: can't read auth-response variable length") } authResponse, pos, ok = readBytes(data, pos, int(l)) if !ok { - return "", nil, fmt.Errorf("parseClientHandshakePacket: can't read auth-response") + return "", "", nil, fmt.Errorf("parseClientHandshakePacket: can't read auth-response") } } else if clientFlags&CapabilityClientSecureConnection != 0 { var l byte l, pos, ok = readByte(data, pos) if !ok { - return "", nil, fmt.Errorf("parseClientHandshakePacket: can't read auth-response length") + return "", "", nil, fmt.Errorf("parseClientHandshakePacket: can't read auth-response length") } authResponse, pos, ok = readBytes(data, pos, int(l)) if !ok { - return "", nil, fmt.Errorf("parseClientHandshakePacket: can't read auth-response") + return "", "", nil, fmt.Errorf("parseClientHandshakePacket: can't read auth-response") } } else { a := "" a, pos, ok = readNullString(data, pos) if !ok { - return "", nil, fmt.Errorf("parseClientHandshakePacket: can't read auth-response") + return "", "", nil, fmt.Errorf("parseClientHandshakePacket: can't read auth-response") } authResponse = []byte(a) } @@ -428,24 +500,49 @@ func (l *Listener) parseClientHandshakePacket(c *Conn, firstTime bool, data []by dbname := "" dbname, pos, ok = readNullString(data, pos) if !ok { - return "", nil, fmt.Errorf("parseClientHandshakePacket: can't read dbname") + return "", "", nil, fmt.Errorf("parseClientHandshakePacket: can't read dbname") } c.SchemaName = dbname } - // auth plugin name - authPluginName := "mysql_native_password" + // authMethod (with default) + authMethod := mysqlNativePassword if clientFlags&CapabilityClientPluginAuth != 0 { - authPluginName, pos, ok = readNullString(data, pos) + authMethod, pos, ok = readNullString(data, pos) if !ok { - return "", nil, fmt.Errorf("parseClientHandshakePacket: can't read authPluginName") + return "", "", nil, fmt.Errorf("parseClientHandshakePacket: can't read authMethod") } } - if authPluginName != mysqlNativePassword { - return "", nil, fmt.Errorf("invalid authPluginName, got %v but only support %v", authPluginName, mysqlNativePassword) - } // FIXME(alainjobart) Add CLIENT_CONNECT_ATTRS parsing if we need it. - return username, authResponse, nil + return username, authMethod, authResponse, nil +} + +// writeAuthSwitchRequest writes an auth switch request packet. +func (c *Conn) writeAuthSwitchRequest(pluginName string, pluginData []byte) error { + length := 1 + // AuthSwitchRequestPacket + len(pluginName) + 1 + // 0-terminated pluginName + len(pluginData) + + data := c.startEphemeralPacket(length) + pos := 0 + + // Packet header. + pos = writeByte(data, pos, AuthSwitchRequestPacket) + + // Copy server version. + pos = writeNullString(data, pos, pluginName) + + // Copy auth data. + pos += copy(data[pos:], pluginData) + + // Sanity check. + if pos != len(data) { + return fmt.Errorf("error building AuthSwitchRequestPacket packet: got %v bytes expected %v", pos, len(data)) + } + if err := c.writeEphemeralPacket(true); err != nil { + return err + } + return nil } diff --git a/go/mysqlconn/server_test.go b/go/mysqlconn/server_test.go index 0c7718473ae..d933a1e52b8 100644 --- a/go/mysqlconn/server_test.go +++ b/go/mysqlconn/server_test.go @@ -262,6 +262,80 @@ func TestServer(t *testing.T) { // time.Sleep(60 * time.Minute) } +// TestClearTextServer creates a Server that needs clear text passwords from the client. +func TestClearTextServer(t *testing.T) { + th := &testHandler{} + + authServer := NewAuthServerConfig() + authServer.Entries["user1"] = &AuthServerConfigEntry{ + Password: "password1", + UserData: "userData1", + } + authServer.ClearText = true + l, err := NewListener("tcp", ":0", authServer, th) + if err != nil { + t.Fatalf("NewListener failed: %v", err) + } + defer l.Close() + go func() { + l.Accept() + }() + + host := l.Addr().(*net.TCPAddr).IP.String() + port := l.Addr().(*net.TCPAddr).Port + + // Setup the right parameters. + params := &sqldb.ConnParams{ + Host: host, + Port: port, + Uname: "user1", + Pass: "password1", + } + + // Run a 'select rows' command with results. + // This should fail as clear text is not enabled by default on the client. + l.AllowClearTextWithoutTLS = true + output, ok := runMysql(t, params, "select rows") + if ok { + t.Fatalf("mysql should have failed but returned: %v", output) + } + if !strings.Contains(output, "plugin not enabled") { + t.Errorf("Unexpected output for 'select rows': %v", output) + } + + // Now enable clear text plugin in client, but server requires SSL. + l.AllowClearTextWithoutTLS = false + output, ok = runMysql(t, params, enableCleartextPluginPrefix+"select rows") + if ok { + t.Fatalf("mysql should have failed but returned: %v", output) + } + if !strings.Contains(output, "Cannot use clear text authentication over non-SSL connections") { + t.Errorf("Unexpected output for 'select rows': %v", output) + } + + // Now enable clear text plugin, it should now work. + l.AllowClearTextWithoutTLS = true + output, ok = runMysql(t, params, enableCleartextPluginPrefix+"select rows") + if !ok { + t.Fatalf("mysql failed: %v", output) + } + if !strings.Contains(output, "nice name") || + !strings.Contains(output, "nicer name") || + !strings.Contains(output, "2 rows in set") { + t.Errorf("Unexpected output for 'select rows'") + } + + // Change password, make sure server rejects us. + params.Pass = "" + output, ok = runMysql(t, params, enableCleartextPluginPrefix+"select rows") + if ok { + t.Fatalf("mysql should have failed but returned: %v", output) + } + if !strings.Contains(output, "Access denied for user 'user1'") { + t.Errorf("Unexpected output for 'select rows': %v", output) + } +} + // TestTLSServer creates a Server with TLS support, then uses mysql // client to connect to it. func TestTLSServer(t *testing.T) { @@ -343,6 +417,8 @@ func TestTLSServer(t *testing.T) { } } +const enableCleartextPluginPrefix = "enable-cleartext-plugin: " + // runMysql forks a mysql command line process connecting to the provided server. func runMysql(t *testing.T, params *sqldb.ConnParams, command string) (string, bool) { dir, err := vtenv.VtMysqlRoot() @@ -357,40 +433,36 @@ func runMysql(t *testing.T, params *sqldb.ConnParams, command string) (string, b // In particular, it has the message: // Query OK, 1 row affected (0.00 sec) args := []string{ - "-e", command, "-v", "-v", "-v", } + if strings.HasPrefix(command, enableCleartextPluginPrefix) { + command = command[len(enableCleartextPluginPrefix):] + args = append(args, "--enable-cleartext-plugin") + } + args = append(args, "-e", command) if params.UnixSocket != "" { - args = append(args, []string{ - "-S", params.UnixSocket, - }...) + args = append(args, "-S", params.UnixSocket) } else { - args = append(args, []string{ + args = append(args, "-h", params.Host, - "-P", fmt.Sprintf("%v", params.Port), - }...) + "-P", fmt.Sprintf("%v", params.Port)) } if params.Uname != "" { - args = append(args, []string{ - "-u", params.Uname, - }...) + args = append(args, "-u", params.Uname) } if params.Pass != "" { args = append(args, "-p"+params.Pass) } if params.DbName != "" { - args = append(args, []string{ - "-D", params.DbName, - }...) + args = append(args, "-D", params.DbName) } if params.Flags&CapabilityClientSSL > 0 { - args = append(args, []string{ + args = append(args, "--ssl", "--ssl-ca", params.SslCa, "--ssl-cert", params.SslCert, "--ssl-key", params.SslKey, - "--ssl-verify-server-cert", - }...) + "--ssl-verify-server-cert") } env := []string{ "LD_LIBRARY_PATH=" + path.Join(dir, "lib/mysql"), diff --git a/go/vt/vtgate/plugin_mysql_server.go b/go/vt/vtgate/plugin_mysql_server.go index f9532b2213a..e796d0c9204 100644 --- a/go/vt/vtgate/plugin_mysql_server.go +++ b/go/vt/vtgate/plugin_mysql_server.go @@ -22,10 +22,11 @@ import ( ) var ( - mysqlServerPort = flag.Int("mysql_server_port", 0, "If set, also listen for MySQL binary protocol connections on this port.") - mysqlAuthServerImpl = flag.String("mysql_auth_server_impl", "config", "Which auth server implementation to use.") - mysqlAuthServerConfigFile = flag.String("mysql_auth_server_config_file", "", "JSON File to read the users/passwords from.") - mysqlAuthServerConfigString = flag.String("mysql_auth_server_config_string", "", "JSON representation of the users/passwords config.") + mysqlServerPort = flag.Int("mysql_server_port", 0, "If set, also listen for MySQL binary protocol connections on this port.") + mysqlAuthServerImpl = flag.String("mysql_auth_server_impl", "config", "Which auth server implementation to use.") + mysqlAuthServerConfigFile = flag.String("mysql_auth_server_config_file", "", "JSON File to read the users/passwords from.") + mysqlAuthServerConfigString = flag.String("mysql_auth_server_config_string", "", "JSON representation of the users/passwords config.") + mysqlAllowClearTextWithoutTLS = flag.Bool("mysql_allow_clear_text_without_tls", false, "If set, the server will allow the use of a clear text password over non-SSL connections.") ) // Handles initializing the AuthServerConfig if necessary. @@ -195,6 +196,7 @@ func init() { if err != nil { log.Fatalf("mysqlconn.NewListener failed: %v", err) } + listener.AllowClearTextWithoutTLS = *mysqlAllowClearTextWithoutTLS // And starts listening. go func() { From 1b5e47035205afb9d6443e0dba56b685b926104b Mon Sep 17 00:00:00 2001 From: Dean Yasuda Date: Fri, 24 Feb 2017 17:49:40 -0800 Subject: [PATCH 041/108] Pass MoreExecutors.directExecutor to Futures.transformAsync. --- .../com/youtube/vitess/client/VTGateConn.java | 126 +++++---- .../com/youtube/vitess/client/VTGateTx.java | 266 ++++++++++-------- 2 files changed, 223 insertions(+), 169 deletions(-) diff --git a/java/client/src/main/java/com/youtube/vitess/client/VTGateConn.java b/java/client/src/main/java/com/youtube/vitess/client/VTGateConn.java index 1427cfc0dbd..91fdd08f054 100644 --- a/java/client/src/main/java/com/youtube/vitess/client/VTGateConn.java +++ b/java/client/src/main/java/com/youtube/vitess/client/VTGateConn.java @@ -1,5 +1,9 @@ package com.youtube.vitess.client; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.util.concurrent.Futures.transformAsync; +import static com.google.common.util.concurrent.MoreExecutors.directExecutor; + import com.google.common.collect.Iterables; import com.google.common.util.concurrent.AsyncFunction; import com.google.common.util.concurrent.Futures; @@ -39,8 +43,6 @@ import com.youtube.vitess.proto.Vtgate.StreamExecuteKeyspaceIdsRequest; import com.youtube.vitess.proto.Vtgate.StreamExecuteRequest; import com.youtube.vitess.proto.Vtgate.StreamExecuteShardsRequest; - -import javax.annotation.Nullable; import java.io.Closeable; import java.io.IOException; import java.sql.SQLDataException; @@ -48,8 +50,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; - -import static com.google.common.base.Preconditions.checkNotNull; +import javax.annotation.Nullable; /** * An asynchronous VTGate connection. @@ -108,14 +109,17 @@ public SQLFuture execute(Context ctx, String query, @Nullable Map(Futures.transformAsync(client.execute(ctx, requestBuilder.build()), - new AsyncFunction() { - @Override - public ListenableFuture apply(ExecuteResponse response) throws Exception { - Proto.checkError(response.getError()); - return Futures.immediateFuture(new SimpleCursor(response.getResult())); - } - })); + return new SQLFuture( + transformAsync( + client.execute(ctx, requestBuilder.build()), + new AsyncFunction() { + @Override + public ListenableFuture apply(ExecuteResponse response) throws Exception { + Proto.checkError(response.getError()); + return Futures.immediateFuture(new SimpleCursor(response.getResult())); + } + }, + directExecutor())); } public SQLFuture executeShards(Context ctx, String query, String keyspace, @@ -136,7 +140,8 @@ public SQLFuture executeShards(Context ctx, String query, String keyspac } return new SQLFuture( - Futures.transformAsync(client.executeShards(ctx, requestBuilder.build()), + transformAsync( + client.executeShards(ctx, requestBuilder.build()), new AsyncFunction() { @Override public ListenableFuture apply(ExecuteShardsResponse response) @@ -144,7 +149,8 @@ public ListenableFuture apply(ExecuteShardsResponse response) Proto.checkError(response.getError()); return Futures.immediateFuture(new SimpleCursor(response.getResult())); } - })); + }, + directExecutor())); } public SQLFuture executeKeyspaceIds(Context ctx, String query, String keyspace, @@ -165,7 +171,8 @@ public SQLFuture executeKeyspaceIds(Context ctx, String query, String ke } return new SQLFuture( - Futures.transformAsync(client.executeKeyspaceIds(ctx, requestBuilder.build()), + transformAsync( + client.executeKeyspaceIds(ctx, requestBuilder.build()), new AsyncFunction() { @Override public ListenableFuture apply(ExecuteKeyspaceIdsResponse response) @@ -173,7 +180,8 @@ public ListenableFuture apply(ExecuteKeyspaceIdsResponse response) Proto.checkError(response.getError()); return Futures.immediateFuture(new SimpleCursor(response.getResult())); } - })); + }, + directExecutor())); } public SQLFuture executeKeyRanges(Context ctx, String query, String keyspace, @@ -192,7 +200,8 @@ public SQLFuture executeKeyRanges(Context ctx, String query, String keys } return new SQLFuture( - Futures.transformAsync(client.executeKeyRanges(ctx, requestBuilder.build()), + transformAsync( + client.executeKeyRanges(ctx, requestBuilder.build()), new AsyncFunction() { @Override public ListenableFuture apply(ExecuteKeyRangesResponse response) @@ -200,7 +209,8 @@ public ListenableFuture apply(ExecuteKeyRangesResponse response) Proto.checkError(response.getError()); return Futures.immediateFuture(new SimpleCursor(response.getResult())); } - })); + }, + directExecutor())); } public SQLFuture executeEntityIds(Context ctx, String query, String keyspace, @@ -221,7 +231,8 @@ public SQLFuture executeEntityIds(Context ctx, String query, String keys } return new SQLFuture( - Futures.transformAsync(client.executeEntityIds(ctx, requestBuilder.build()), + transformAsync( + client.executeEntityIds(ctx, requestBuilder.build()), new AsyncFunction() { @Override public ListenableFuture apply(ExecuteEntityIdsResponse response) @@ -229,7 +240,8 @@ public ListenableFuture apply(ExecuteEntityIdsResponse response) Proto.checkError(response.getError()); return Futures.immediateFuture(new SimpleCursor(response.getResult())); } - })); + }, + directExecutor())); } public SQLFuture> executeBatch(Context ctx, List queryList, @@ -266,16 +278,19 @@ public SQLFuture> executeBatch(Context ctx, List q requestBuilder.setCallerId(ctx.getCallerId()); } - return new SQLFuture<>(Futures - .transformAsync(client.executeBatch(ctx, requestBuilder.build()), - new AsyncFunction>() { - @Override public ListenableFuture> apply( - Vtgate.ExecuteBatchResponse response) throws Exception { - Proto.checkError(response.getError()); - return Futures.immediateFuture( - Proto.fromQueryResponsesToCursorList(response.getResultsList())); - } - })); + return new SQLFuture<>( + transformAsync( + client.executeBatch(ctx, requestBuilder.build()), + new AsyncFunction>() { + @Override + public ListenableFuture> apply( + Vtgate.ExecuteBatchResponse response) throws Exception { + Proto.checkError(response.getError()); + return Futures.immediateFuture( + Proto.fromQueryResponsesToCursorList(response.getResultsList())); + } + }, + directExecutor())); } /** @@ -301,16 +316,18 @@ public SQLFuture> executeBatchShards(Context ctx, } return new SQLFuture>( - Futures.transformAsync(client.executeBatchShards(ctx, requestBuilder.build()), + transformAsync( + client.executeBatchShards(ctx, requestBuilder.build()), new AsyncFunction>() { @Override public ListenableFuture> apply(ExecuteBatchShardsResponse response) throws Exception { Proto.checkError(response.getError()); - return Futures - .>immediateFuture(Proto.toCursorList(response.getResultsList())); + return Futures.>immediateFuture( + Proto.toCursorList(response.getResultsList())); } - })); + }, + directExecutor())); } /** @@ -335,16 +352,18 @@ public SQLFuture> executeBatchKeyspaceIds(Context ctx, } return new SQLFuture>( - Futures.transformAsync(client.executeBatchKeyspaceIds(ctx, requestBuilder.build()), + transformAsync( + client.executeBatchKeyspaceIds(ctx, requestBuilder.build()), new AsyncFunction>() { @Override public ListenableFuture> apply(ExecuteBatchKeyspaceIdsResponse response) throws Exception { Proto.checkError(response.getError()); - return Futures - .>immediateFuture(Proto.toCursorList(response.getResultsList())); + return Futures.>immediateFuture( + Proto.toCursorList(response.getResultsList())); } - })); + }, + directExecutor())); } public Cursor streamExecute(Context ctx, String query, @Nullable Map bindVars, @@ -432,14 +451,17 @@ public SQLFuture begin(Context ctx, boolean singleDB) throws SQLExcept if (ctx.getCallerId() != null) { requestBuilder.setCallerId(ctx.getCallerId()); } - return new SQLFuture(Futures.transformAsync(client.begin(ctx, requestBuilder.build()), - new AsyncFunction() { - @Override - public ListenableFuture apply(BeginResponse response) throws Exception { - return Futures - .immediateFuture(new VTGateTx(client, response.getSession(), keyspace)); - } - })); + return new SQLFuture( + transformAsync( + client.begin(ctx, requestBuilder.build()), + new AsyncFunction() { + @Override + public ListenableFuture apply(BeginResponse response) throws Exception { + return Futures.immediateFuture( + new VTGateTx(client, response.getSession(), keyspace)); + } + }, + directExecutor())); } public SQLFuture> splitQuery(Context ctx, String keyspace, @@ -458,7 +480,8 @@ public SQLFuture> splitQuery(Context ctx, String k requestBuilder.setCallerId(ctx.getCallerId()); } return new SQLFuture>( - Futures.transformAsync(client.splitQuery(ctx, requestBuilder.build()), + transformAsync( + client.splitQuery(ctx, requestBuilder.build()), new AsyncFunction>() { @Override public ListenableFuture> apply( @@ -466,21 +489,24 @@ public ListenableFuture> apply( return Futures.>immediateFuture( response.getSplitsList()); } - })); + }, + directExecutor())); } public SQLFuture getSrvKeyspace(Context ctx, String keyspace) throws SQLException { GetSrvKeyspaceRequest.Builder requestBuilder = GetSrvKeyspaceRequest.newBuilder().setKeyspace(checkNotNull(keyspace)); return new SQLFuture( - Futures.transformAsync(client.getSrvKeyspace(ctx, requestBuilder.build()), + transformAsync( + client.getSrvKeyspace(ctx, requestBuilder.build()), new AsyncFunction() { @Override public ListenableFuture apply(GetSrvKeyspaceResponse response) throws Exception { return Futures.immediateFuture(response.getSrvKeyspace()); } - })); + }, + directExecutor())); } @Override diff --git a/java/client/src/main/java/com/youtube/vitess/client/VTGateTx.java b/java/client/src/main/java/com/youtube/vitess/client/VTGateTx.java index 5def7381d73..023089672ee 100644 --- a/java/client/src/main/java/com/youtube/vitess/client/VTGateTx.java +++ b/java/client/src/main/java/com/youtube/vitess/client/VTGateTx.java @@ -1,5 +1,9 @@ package com.youtube.vitess.client; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.util.concurrent.Futures.transformAsync; +import static com.google.common.util.concurrent.MoreExecutors.directExecutor; + import com.google.common.collect.Iterables; import com.google.common.util.concurrent.AsyncFunction; import com.google.common.util.concurrent.Futures; @@ -32,37 +36,31 @@ import com.youtube.vitess.proto.Vtgate.RollbackRequest; import com.youtube.vitess.proto.Vtgate.RollbackResponse; import com.youtube.vitess.proto.Vtgate.Session; - -import javax.annotation.Nullable; import java.sql.SQLDataException; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; import java.util.Map; - -import static com.google.common.base.Preconditions.checkNotNull; +import javax.annotation.Nullable; /** * An asynchronous VTGate transaction session. * - *

- * Because {@code VTGateTx} manages a session cookie, only one operation can be in flight at a time - * on a given instance. The methods are {@code synchronized} only because the session cookie is + *

Because {@code VTGateTx} manages a session cookie, only one operation can be in flight at a + * time on a given instance. The methods are {@code synchronized} only because the session cookie is * updated asynchronously when the RPC response comes back. * - *

- * After calling any method that returns a {@link SQLFuture}, you must wait for that future to - * complete before calling any other methods on that {@code VTGateTx} instance. An - * {@link IllegalStateException} will be thrown if this constraint is violated. + *

After calling any method that returns a {@link SQLFuture}, you must wait for that future to + * complete before calling any other methods on that {@code VTGateTx} instance. An {@link + * IllegalStateException} will be thrown if this constraint is violated. * - *

- * All operations on {@code VTGateTx} are asynchronous, including those whose ultimate return type - * is {@link Void}, such as {@link #commit(Context)} and {@link #rollback(Context)}. You must still - * wait for the futures returned by these methods to complete and check the error on them (such as - * by calling {@code checkedGet()} before you can assume the operation has finished successfully. + *

All operations on {@code VTGateTx} are asynchronous, including those whose ultimate return + * type is {@link Void}, such as {@link #commit(Context)} and {@link #rollback(Context)}. You must + * still wait for the futures returned by these methods to complete and check the error on them + * (such as by calling {@code checkedGet()} before you can assume the operation has finished + * successfully. * - *

- * If you prefer a synchronous API, you can use {@link VTGateBlockingConn#begin(Context)}, which + *

If you prefer a synchronous API, you can use {@link VTGateBlockingConn#begin(Context)}, which * returns a {@link VTGateBlockingTx} instead. */ public class VTGateTx { @@ -94,15 +92,18 @@ public synchronized SQLFuture execute(Context ctx, String query, Map call = - new SQLFuture<>(Futures.transformAsync(client.execute(ctx, requestBuilder.build()), - new AsyncFunction() { - @Override - public ListenableFuture apply(ExecuteResponse response) throws Exception { - setSession(response.getSession()); - Proto.checkError(response.getError()); - return Futures.immediateFuture(new SimpleCursor(response.getResult())); - } - })); + new SQLFuture<>( + transformAsync( + client.execute(ctx, requestBuilder.build()), + new AsyncFunction() { + @Override + public ListenableFuture apply(ExecuteResponse response) throws Exception { + setSession(response.getSession()); + Proto.checkError(response.getError()); + return Futures.immediateFuture(new SimpleCursor(response.getResult())); + } + }, + directExecutor())); lastCall = call; return call; } @@ -125,16 +126,19 @@ public synchronized SQLFuture executeShards(Context ctx, String query, S } SQLFuture call = - new SQLFuture<>(Futures.transformAsync(client.executeShards(ctx, requestBuilder.build()), - new AsyncFunction() { - @Override - public ListenableFuture apply(ExecuteShardsResponse response) - throws Exception { - setSession(response.getSession()); - Proto.checkError(response.getError()); - return Futures.immediateFuture(new SimpleCursor(response.getResult())); - } - })); + new SQLFuture<>( + transformAsync( + client.executeShards(ctx, requestBuilder.build()), + new AsyncFunction() { + @Override + public ListenableFuture apply(ExecuteShardsResponse response) + throws Exception { + setSession(response.getSession()); + Proto.checkError(response.getError()); + return Futures.immediateFuture(new SimpleCursor(response.getResult())); + } + }, + directExecutor())); lastCall = call; return call; } @@ -157,17 +161,20 @@ public synchronized SQLFuture executeKeyspaceIds(Context ctx, String que requestBuilder.setCallerId(ctx.getCallerId()); } - SQLFuture call = new SQLFuture<>( - Futures.transformAsync(client.executeKeyspaceIds(ctx, requestBuilder.build()), - new AsyncFunction() { - @Override - public ListenableFuture apply(ExecuteKeyspaceIdsResponse response) - throws Exception { - setSession(response.getSession()); - Proto.checkError(response.getError()); - return Futures.immediateFuture(new SimpleCursor(response.getResult())); - } - })); + SQLFuture call = + new SQLFuture<>( + transformAsync( + client.executeKeyspaceIds(ctx, requestBuilder.build()), + new AsyncFunction() { + @Override + public ListenableFuture apply(ExecuteKeyspaceIdsResponse response) + throws Exception { + setSession(response.getSession()); + Proto.checkError(response.getError()); + return Futures.immediateFuture(new SimpleCursor(response.getResult())); + } + }, + directExecutor())); lastCall = call; return call; } @@ -191,16 +198,19 @@ public synchronized SQLFuture executeKeyRanges(Context ctx, String query } SQLFuture call = - new SQLFuture<>(Futures.transformAsync(client.executeKeyRanges(ctx, requestBuilder.build()), - new AsyncFunction() { - @Override - public ListenableFuture apply(ExecuteKeyRangesResponse response) - throws Exception { - setSession(response.getSession()); - Proto.checkError(response.getError()); - return Futures.immediateFuture(new SimpleCursor(response.getResult())); - } - })); + new SQLFuture<>( + transformAsync( + client.executeKeyRanges(ctx, requestBuilder.build()), + new AsyncFunction() { + @Override + public ListenableFuture apply(ExecuteKeyRangesResponse response) + throws Exception { + setSession(response.getSession()); + Proto.checkError(response.getError()); + return Futures.immediateFuture(new SimpleCursor(response.getResult())); + } + }, + directExecutor())); lastCall = call; return call; } @@ -225,16 +235,19 @@ public synchronized SQLFuture executeEntityIds(Context ctx, String query } SQLFuture call = - new SQLFuture<>(Futures.transformAsync(client.executeEntityIds(ctx, requestBuilder.build()), - new AsyncFunction() { - @Override - public ListenableFuture apply(ExecuteEntityIdsResponse response) - throws Exception { - setSession(response.getSession()); - Proto.checkError(response.getError()); - return Futures.immediateFuture(new SimpleCursor(response.getResult())); - } - })); + new SQLFuture<>( + transformAsync( + client.executeEntityIds(ctx, requestBuilder.build()), + new AsyncFunction() { + @Override + public ListenableFuture apply(ExecuteEntityIdsResponse response) + throws Exception { + setSession(response.getSession()); + Proto.checkError(response.getError()); + return Futures.immediateFuture(new SimpleCursor(response.getResult())); + } + }, + directExecutor())); lastCall = call; return call; } @@ -268,17 +281,20 @@ public SQLFuture> executeBatch(Context ctx, List q requestBuilder.setCallerId(ctx.getCallerId()); } - return new SQLFuture<>(Futures - .transformAsync(client.executeBatch(ctx, requestBuilder.build()), - new AsyncFunction>() { - @Override public ListenableFuture> apply( - Vtgate.ExecuteBatchResponse response) throws Exception { - setSession(response.getSession()); - Proto.checkError(response.getError()); - return Futures.immediateFuture( - Proto.fromQueryResponsesToCursorList(response.getResultsList())); - } - })); + return new SQLFuture<>( + transformAsync( + client.executeBatch(ctx, requestBuilder.build()), + new AsyncFunction>() { + @Override + public ListenableFuture> apply( + Vtgate.ExecuteBatchResponse response) throws Exception { + setSession(response.getSession()); + Proto.checkError(response.getError()); + return Futures.immediateFuture( + Proto.fromQueryResponsesToCursorList(response.getResultsList())); + } + }, + directExecutor())); } public synchronized SQLFuture> executeBatchShards(Context ctx, @@ -296,18 +312,21 @@ public synchronized SQLFuture> executeBatchShards(Context ctx, requestBuilder.setCallerId(ctx.getCallerId()); } - SQLFuture> call = new SQLFuture<>( - Futures.transformAsync(client.executeBatchShards(ctx, requestBuilder.build()), - new AsyncFunction>() { - @Override - public ListenableFuture> apply(ExecuteBatchShardsResponse response) - throws Exception { - setSession(response.getSession()); - Proto.checkError(response.getError()); - return Futures - .>immediateFuture(Proto.toCursorList(response.getResultsList())); - } - })); + SQLFuture> call = + new SQLFuture<>( + transformAsync( + client.executeBatchShards(ctx, requestBuilder.build()), + new AsyncFunction>() { + @Override + public ListenableFuture> apply(ExecuteBatchShardsResponse response) + throws Exception { + setSession(response.getSession()); + Proto.checkError(response.getError()); + return Futures.>immediateFuture( + Proto.toCursorList(response.getResultsList())); + } + }, + directExecutor())); lastCall = call; return call; } @@ -328,18 +347,21 @@ public synchronized SQLFuture> executeBatchKeyspaceIds(Context ctx, requestBuilder.setCallerId(ctx.getCallerId()); } - SQLFuture> call = new SQLFuture<>( - Futures.transformAsync(client.executeBatchKeyspaceIds(ctx, requestBuilder.build()), - new AsyncFunction>() { - @Override - public ListenableFuture> apply(ExecuteBatchKeyspaceIdsResponse response) - throws Exception { - setSession(response.getSession()); - Proto.checkError(response.getError()); - return Futures - .>immediateFuture(Proto.toCursorList(response.getResultsList())); - } - })); + SQLFuture> call = + new SQLFuture<>( + transformAsync( + client.executeBatchKeyspaceIds(ctx, requestBuilder.build()), + new AsyncFunction>() { + @Override + public ListenableFuture> apply( + ExecuteBatchKeyspaceIdsResponse response) throws Exception { + setSession(response.getSession()); + Proto.checkError(response.getError()); + return Futures.>immediateFuture( + Proto.toCursorList(response.getResultsList())); + } + }, + directExecutor())); lastCall = call; return call; } @@ -356,14 +378,17 @@ public synchronized SQLFuture commit(Context ctx, boolean atomic) throws S requestBuilder.setCallerId(ctx.getCallerId()); } SQLFuture call = - new SQLFuture<>(Futures.transformAsync(client.commit(ctx, requestBuilder.build()), - new AsyncFunction() { - @Override - public ListenableFuture apply(CommitResponse response) throws Exception { - setSession(null); - return Futures.immediateFuture(null); - } - })); + new SQLFuture<>( + transformAsync( + client.commit(ctx, requestBuilder.build()), + new AsyncFunction() { + @Override + public ListenableFuture apply(CommitResponse response) throws Exception { + setSession(null); + return Futures.immediateFuture(null); + } + }, + directExecutor())); lastCall = call; return call; } @@ -375,14 +400,17 @@ public synchronized SQLFuture rollback(Context ctx) throws SQLException { requestBuilder.setCallerId(ctx.getCallerId()); } SQLFuture call = - new SQLFuture<>(Futures.transformAsync(client.rollback(ctx, requestBuilder.build()), - new AsyncFunction() { - @Override - public ListenableFuture apply(RollbackResponse response) throws Exception { - setSession(null); - return Futures.immediateFuture(null); - } - })); + new SQLFuture<>( + transformAsync( + client.rollback(ctx, requestBuilder.build()), + new AsyncFunction() { + @Override + public ListenableFuture apply(RollbackResponse response) throws Exception { + setSession(null); + return Futures.immediateFuture(null); + } + }, + directExecutor())); lastCall = call; return call; } From 96660b802a2428f47371ab2907388fb0c87f6dbe Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Sat, 25 Feb 2017 15:58:09 -0800 Subject: [PATCH 042/108] Handling missing clear text auth plugin. Seems to happen on Travis. --- go/mysqlconn/server_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/go/mysqlconn/server_test.go b/go/mysqlconn/server_test.go index d933a1e52b8..abcb527e032 100644 --- a/go/mysqlconn/server_test.go +++ b/go/mysqlconn/server_test.go @@ -299,6 +299,10 @@ func TestClearTextServer(t *testing.T) { if ok { t.Fatalf("mysql should have failed but returned: %v", output) } + if strings.Contains(output, "No such file or directory") { + t.Logf("skipping mysql clear text tests, as the clear text plugin cannot be loaded: %v", err) + return + } if !strings.Contains(output, "plugin not enabled") { t.Errorf("Unexpected output for 'select rows': %v", output) } From ab84dc818dac71be1d1179f10d94d506cd6f2f05 Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Sat, 25 Feb 2017 22:18:35 -0800 Subject: [PATCH 043/108] vtgate/buffer: Fix leakage of buffer pool slots due to canceled requests. Canceled requests, e.g. those with a deadline shorter than the failover duration, would not return their buffer pool slot. Eventually, all slots would have leaked and the buffer would start and stop buffering as usual but reject all requests immediately because it assumed there are other pending failovers which are holding the needed slots. Previously, the code path for canceled requests differed from the common method unblockAndWait() which would unblock a request and also release its buffer pool slot after the request finished its retry. This made this oversight possible. I've changed this now: canceled requests also use unblockAndWait() now. The code was also out of sync with the documentation for WaitForFailoverEnd(): The RetryDoneFunc() must not be returned when the function returns an error as well. However, for canceled requests both were returned. --- go/sync2/semaphore.go | 5 +++ go/vt/vtgate/buffer/buffer_test.go | 61 +++++++++++++++++++++++++++-- go/vt/vtgate/buffer/flags.go | 1 + go/vt/vtgate/buffer/shard_buffer.go | 32 ++++++++++----- 4 files changed, 87 insertions(+), 12 deletions(-) diff --git a/go/sync2/semaphore.go b/go/sync2/semaphore.go index 6629042e5fe..cd75e72dd1e 100644 --- a/go/sync2/semaphore.go +++ b/go/sync2/semaphore.go @@ -66,3 +66,8 @@ func (sem *Semaphore) TryAcquire() bool { func (sem *Semaphore) Release() { sem.slots <- struct{}{} } + +// Size returns the current number of available slots. +func (sem *Semaphore) Size() int { + return len(sem.slots) +} diff --git a/go/vt/vtgate/buffer/buffer_test.go b/go/vt/vtgate/buffer/buffer_test.go index 5c27caa0450..b52c4b4e4e5 100644 --- a/go/vt/vtgate/buffer/buffer_test.go +++ b/go/vt/vtgate/buffer/buffer_test.go @@ -148,6 +148,9 @@ func TestBuffer(t *testing.T) { if err := waitForState(b, stateIdle); err != nil { t.Fatal(err) } + if err := waitForPoolSlots(b, *size); err != nil { + t.Fatal(err) + } // Stop counter must have been increased for the second failover. if got, want := stops.Counts()[statsKeyJoinedFailoverEndDetected], int64(2); got != want { @@ -180,7 +183,9 @@ func issueRequestAndBlockRetry(ctx context.Context, t *testing.T, b *Buffer, err // Wait for the test's signal before we tell the buffer that we retried. <-markRetryDone } - defer retryDone() + if retryDone != nil { + defer retryDone() + } defer close(bufferingStopped) }() @@ -188,7 +193,7 @@ func issueRequestAndBlockRetry(ctx context.Context, t *testing.T, b *Buffer, err } // waitForRequestsInFlight blocks until the buffer queue has reached "count". -// This check is potentially racy and therefore retried up to a timeout of 2s. +// This check is potentially racy and therefore retried up to a timeout of 10s. func waitForRequestsInFlight(b *Buffer, count int) error { start := time.Now() sb := b.getOrCreateBuffer(keyspace, shard) @@ -204,7 +209,7 @@ func waitForRequestsInFlight(b *Buffer, count int) error { } } -// waitForState polls the buffer data for up to 2 seconds and returns an error +// waitForState polls the buffer data for up to 10 seconds and returns an error // if shardBuffer doesn't have the wanted state by then. func waitForState(b *Buffer, want bufferState) error { sb := b.getOrCreateBuffer(keyspace, shard) @@ -221,6 +226,24 @@ func waitForState(b *Buffer, want bufferState) error { } } +// waitForPoolSlots waits up to 10s that all buffer pool slots have been +// returned. The wait is necessary because in some cases the buffer code +// does not block itself on the wait. But in any case, the slot should be +// returned when the request has finished. See also shardBuffer.unblockAndWait(). +func waitForPoolSlots(b *Buffer, want int) error { + start := time.Now() + for { + got := b.bufferSizeSema.Size() + if got == want { + return nil + } + + if time.Since(start) > 10*time.Second { + return fmt.Errorf("not all pool slots were returned: got = %v, want = %v", got, want) + } + } +} + // TestDryRun tests the case when only the dry-run mode is enabled globally. func TestDryRun(t *testing.T) { resetVariables() @@ -237,6 +260,9 @@ func TestDryRun(t *testing.T) { if err := waitForState(b, stateBuffering); err != nil { t.Fatal(err) } + if err := waitForPoolSlots(b, *size); err != nil { + t.Fatal(err) + } if got, want := starts.Counts()[statsKeyJoined], int64(1); got != want { t.Fatalf("buffering start was not tracked: got = %v, want = %v", got, want) } @@ -274,6 +300,10 @@ func TestPassthrough(t *testing.T) { if retryDone, err := b.WaitForFailoverEnd(context.Background(), keyspace, shard, nonFailoverErr); err != nil || retryDone != nil { t.Fatalf("requests with non-failover errors must never be buffered. err: %v retryDone: %v", err, retryDone) } + + if err := waitForPoolSlots(b, *size); err != nil { + t.Fatal(err) + } } // TestPassThroughLastReparentTooRecent tests that buffering is skipped if @@ -305,6 +335,9 @@ func TestPassThroughLastReparentTooRecent(t *testing.T) { if retryDone, err := b.WaitForFailoverEnd(context.Background(), keyspace, shard, failoverErr); err != nil || retryDone != nil { t.Fatalf("requests where the failover end was recently detected before the start must not be buffered. err: %v retryDone: %v", err, retryDone) } + if err := waitForPoolSlots(b, *size); err != nil { + t.Fatal(err) + } if got, want := requestsSkipped.Counts()[statsKeyJoinedLastReparentTooRecent], int64(1); got != want { t.Fatalf("skipped request was not tracked: got = %v, want = %v", got, want) } @@ -353,6 +386,9 @@ func TestPassthroughDuringDrain(t *testing.T) { if err := waitForState(b, stateIdle); err != nil { t.Fatal(err) } + if err := waitForPoolSlots(b, *size); err != nil { + t.Fatal(err) + } } // TestPassthroughIgnoredKeyspaceOrShard tests that the explicit whitelisting @@ -376,6 +412,9 @@ func TestPassthroughIgnoredKeyspaceOrShard(t *testing.T) { if retryDone, err := b.WaitForFailoverEnd(context.Background(), keyspace, ignoredShard, failoverErr); err != nil || retryDone != nil { t.Fatalf("requests for ignored shards must not be buffered. err: %v retryDone: %v", err, retryDone) } + if err := waitForPoolSlots(b, *size); err != nil { + t.Fatal(err) + } statsKeyJoined = strings.Join([]string{keyspace, ignoredShard, skippedDisabled}, ".") if got, want := requestsSkipped.Counts()[statsKeyJoined], int64(1); got != want { t.Fatalf("request was not skipped as disabled: got = %v, want = %v", got, want) @@ -463,6 +502,9 @@ func testRequestCanceled(t *testing.T, explicitEnd bool) { if err := waitForState(b, stateIdle); err != nil { t.Fatal(err) } + if err := waitForPoolSlots(b, *size); err != nil { + t.Fatal(err) + } } func TestEviction(t *testing.T) { @@ -511,6 +553,9 @@ func TestEviction(t *testing.T) { if err := waitForState(b, stateIdle); err != nil { t.Fatal(err) } + if err := waitForPoolSlots(b, 2); err != nil { + t.Fatal(err) + } } func isCanceledError(err error) error { @@ -587,6 +632,9 @@ func TestEvictionNotPossible(t *testing.T) { if err := waitForState(b, stateIdle); err != nil { t.Fatal(err) } + if err := waitForPoolSlots(b, 1); err != nil { + t.Fatal(err) + } statsKeyJoined := strings.Join([]string{keyspace, shard2, string(skippedBufferFull)}, ".") if got, want := requestsSkipped.Counts()[statsKeyJoined], int64(1); got != want { t.Fatalf("skipped request was not tracked: got = %v, want = %v", got, want) @@ -676,6 +724,9 @@ func TestWindow(t *testing.T) { if err := waitForState(b, stateIdle); err != nil { t.Fatal(err) } + if err := waitForPoolSlots(b, 1); err != nil { + t.Fatal(err) + } } func waitForRequestsExceededWindow(count int) error { @@ -715,6 +766,10 @@ func TestShutdown(t *testing.T) { if err := <-stopped1; err != nil { t.Fatalf("request should have been buffered and not returned an error: %v", err) } + + if err := waitForPoolSlots(b, *size); err != nil { + t.Fatal(err) + } } // resetVariables resets the task level variables. The code does not reset these diff --git a/go/vt/vtgate/buffer/flags.go b/go/vt/vtgate/buffer/flags.go index 570484afd12..508c9f9d5b0 100644 --- a/go/vt/vtgate/buffer/flags.go +++ b/go/vt/vtgate/buffer/flags.go @@ -28,6 +28,7 @@ func resetFlagsForTesting() { // Set all flags to their default value. flag.Set("enable_buffer", "false") flag.Set("enable_buffer_dry_run", "false") + flag.Set("buffer_size", "10") flag.Set("buffer_window", "10s") flag.Set("buffer_keyspace_shards", "") flag.Set("buffer_max_failover_duration", "20s") diff --git a/go/vt/vtgate/buffer/shard_buffer.go b/go/vt/vtgate/buffer/shard_buffer.go index 5bd7647d4a2..199b4dcf97d 100644 --- a/go/vt/vtgate/buffer/shard_buffer.go +++ b/go/vt/vtgate/buffer/shard_buffer.go @@ -197,7 +197,7 @@ func (sb *shardBuffer) waitForFailoverEnd(ctx context.Context, keyspace, shard s if err != nil { return nil, err } - return entry.bufferCancel, sb.wait(ctx, entry) + return sb.wait(ctx, entry) } // shouldBufferLocked returns true if the current request should be buffered @@ -311,8 +311,9 @@ func (sb *shardBuffer) bufferRequestLocked(ctx context.Context) (*entry, error) // the request retried and finished. // If blockingWait is true, this call will block until the request retried and // finished. This mode is used during the drain (to avoid flooding the master) -// while the non-blocking mode is used when evicting a request e.g. because the -// buffer is full or it exceeded the buffering window +// while the non-blocking mode is used when a) evicting a request (e.g. because +// the buffer is full or it exceeded the buffering window) or b) when the +// request was canceled from outside and we removed it. func (sb *shardBuffer) unblockAndWait(e *entry, err error, releaseSlot, blockingWait bool) { // Set error such that the request will see it. e.err = err @@ -345,13 +346,14 @@ func (sb *shardBuffer) waitForRequestFinish(e *entry, releaseSlot, async bool) { } // wait blocks while the request is buffered during the failover. -func (sb *shardBuffer) wait(ctx context.Context, e *entry) error { +// See Buffer.WaitForFailoverEnd() for the API contract of the return values. +func (sb *shardBuffer) wait(ctx context.Context, e *entry) (RetryDoneFunc, error) { select { case <-ctx.Done(): sb.remove(e) - return vterrors.Errorf(vterrors.Code(contextCanceledError), "%v: %v", contextCanceledError, ctx.Err()) + return nil, vterrors.Errorf(vterrors.Code(contextCanceledError), "%v: %v", contextCanceledError, ctx.Err()) case <-e.done: - return e.err + return e.bufferCancel, e.err } } @@ -406,9 +408,21 @@ func (sb *shardBuffer) remove(toRemove *entry) { if e == toRemove { // Delete entry at index "i" from slice. sb.queue = append(sb.queue[:i], sb.queue[i+1:]...) - // Entry was not canceled internally yet. Finish it explicitly. This way, - // timeoutThread will find out about it as well. - close(toRemove.done) + + // Cancel the entry's "bufferCtx". + // The usual drain or eviction code would unblock the request and then + // wait for the "bufferCtx" to be done. + // But this code path is different because it's going to return an error + // to the request and not the "e.bufferCancel" function i.e. the request + // cannot cancel the "bufferCtx" itself. + // Therefore, we call "e.bufferCancel". This also avoids that the + // context's Go routine could leak. + e.bufferCancel() + // Release the buffer slot and close the "e.done" channel. + // By closing "e.done", we finish it explicitly and timeoutThread will + // find out about it as well. + sb.unblockAndWait(e, nil /* err */, true /* releaseSlot */, false /* blockingWait */) + // Track it as "ContextDone" eviction. statsKeyWithReason := append(sb.statsKey, string(evictedContextDone)) requestsEvicted.Add(statsKeyWithReason, 1) From 2fd74dc74a8e2874624a24f7c5b8ca6289140bd1 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 26 Feb 2017 23:31:51 -0800 Subject: [PATCH 044/108] vterrors: handle special cases (#2600) * Disallow creation of error with OK as status code. Auto-convert to INTERNAL and log error. * Handle context errors as special-case. Return more appropriate error code instead of UNKNOWN. * Also log UNAVAILABLE errors in vtgate because they're often due to unreachable vttablets. --- go/vt/logutil/throttled.go | 2 +- go/vt/logutil/throttled_test.go | 4 +-- go/vt/vterrors/aggregate_test.go | 9 ++--- go/vt/vterrors/vterrors.go | 23 +++++++++--- go/vt/vterrors/vterrors_test.go | 61 ++++++++++++++++++++++++++++++++ go/vt/vtgate/vtgate.go | 2 ++ 6 files changed, 88 insertions(+), 13 deletions(-) create mode 100644 go/vt/vterrors/vterrors_test.go diff --git a/go/vt/logutil/throttled.go b/go/vt/logutil/throttled.go index 867071f7b88..9fbbf73e37b 100644 --- a/go/vt/logutil/throttled.go +++ b/go/vt/logutil/throttled.go @@ -46,7 +46,7 @@ func (tl *ThrottledLogger) log(logF logFunc, format string, v ...interface{}) { logWaitTime := tl.maxInterval - (now.Sub(tl.lastlogTime)) if logWaitTime < 0 { tl.lastlogTime = now - logF(2, fmt.Sprintf(tl.name+":"+format, v...)) + logF(2, fmt.Sprintf(tl.name+": "+format, v...)) return } // If this is the first message to be skipped, start a goroutine diff --git a/go/vt/logutil/throttled_test.go b/go/vt/logutil/throttled_test.go index decfc465e4a..593c6692cda 100644 --- a/go/vt/logutil/throttled_test.go +++ b/go/vt/logutil/throttled_test.go @@ -24,7 +24,7 @@ func TestThrottledLogger(t *testing.T) { start := time.Now() go tl.Infof("test %v", 1) - if got, want := <-log, "name:test 1"; got != want { + if got, want := <-log, "name: test 1"; got != want { t.Errorf("got %q, want %q", got, want) } @@ -40,7 +40,7 @@ func TestThrottledLogger(t *testing.T) { } go tl.Infof("test %v", 3) - if got, want := <-log, "name:test 3"; got != want { + if got, want := <-log, "name: test 3"; got != want { t.Errorf("got %q, want %q", got, want) } if got, want := skippedCount(tl), 0; got != want { diff --git a/go/vt/vterrors/aggregate_test.go b/go/vt/vterrors/aggregate_test.go index 4ef8f431f01..f36521c0f3e 100644 --- a/go/vt/vterrors/aggregate_test.go +++ b/go/vt/vterrors/aggregate_test.go @@ -35,16 +35,16 @@ func TestAggregateVtGateErrorCodes(t *testing.T) { expected: vtrpcpb.Code_INVALID_ARGUMENT, }, { - // aggregate two codes to the highest priority + // OK should be converted to INTERNAL input: []error{ errFromCode(vtrpcpb.Code_OK), errFromCode(vtrpcpb.Code_UNAVAILABLE), }, - expected: vtrpcpb.Code_UNAVAILABLE, + expected: vtrpcpb.Code_INTERNAL, }, { + // aggregate two codes to the highest priority input: []error{ - errFromCode(vtrpcpb.Code_OK), errFromCode(vtrpcpb.Code_UNAVAILABLE), errFromCode(vtrpcpb.Code_INVALID_ARGUMENT), }, @@ -53,7 +53,6 @@ func TestAggregateVtGateErrorCodes(t *testing.T) { { // unknown errors map to the unknown code input: []error{ - errFromCode(vtrpcpb.Code_OK), fmt.Errorf("unknown error"), }, expected: vtrpcpb.Code_UNKNOWN, @@ -79,7 +78,6 @@ func TestAggregateVtGateErrors(t *testing.T) { }, { input: []error{ - errFromCode(vtrpcpb.Code_OK), errFromCode(vtrpcpb.Code_UNAVAILABLE), errFromCode(vtrpcpb.Code_INVALID_ARGUMENT), }, @@ -88,7 +86,6 @@ func TestAggregateVtGateErrors(t *testing.T) { aggregateErrors([]error{ errors.New(errGeneric), errors.New(errGeneric), - errors.New(errGeneric), }), ), }, diff --git a/go/vt/vterrors/vterrors.go b/go/vt/vterrors/vterrors.go index d9cd7b6d98f..3f627749b99 100644 --- a/go/vt/vterrors/vterrors.go +++ b/go/vt/vterrors/vterrors.go @@ -2,10 +2,17 @@ package vterrors import ( "fmt" + "time" + "golang.org/x/net/context" + + "github.com/youtube/vitess/go/tb" + "github.com/youtube/vitess/go/vt/logutil" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) +var logger = logutil.NewThrottledLogger("vterror", 5*time.Second) + type vtError struct { code vtrpcpb.Code err string @@ -13,6 +20,10 @@ type vtError struct { // New creates a new error using the code and input string. func New(code vtrpcpb.Code, in string) error { + if code == vtrpcpb.Code_OK { + logger.Errorf("OK is an invalid code, using INTERNAL instead: %s\n%s", in, tb.Stack(2)) + code = vtrpcpb.Code_INTERNAL + } return &vtError{ code: code, err: in, @@ -21,10 +32,7 @@ func New(code vtrpcpb.Code, in string) error { // Errorf returns a new error built using Printf style arguments. func Errorf(code vtrpcpb.Code, format string, args ...interface{}) error { - return &vtError{ - code: code, - err: fmt.Sprintf(format, args...), - } + return New(code, fmt.Sprintf(format, args...)) } func (e *vtError) Error() string { @@ -40,5 +48,12 @@ func Code(err error) vtrpcpb.Code { if err, ok := err.(*vtError); ok { return err.code } + // Handle some special cases. + switch err { + case context.Canceled: + return vtrpcpb.Code_CANCELED + case context.DeadlineExceeded: + return vtrpcpb.Code_DEADLINE_EXCEEDED + } return vtrpcpb.Code_UNKNOWN } diff --git a/go/vt/vterrors/vterrors_test.go b/go/vt/vterrors/vterrors_test.go new file mode 100644 index 00000000000..d3e23b879f6 --- /dev/null +++ b/go/vt/vterrors/vterrors_test.go @@ -0,0 +1,61 @@ +package vterrors + +import ( + "errors" + "testing" + + "golang.org/x/net/context" + + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" +) + +func TestCreation(t *testing.T) { + testcases := []struct { + in, want vtrpcpb.Code + }{{ + in: vtrpcpb.Code_CANCELED, + want: vtrpcpb.Code_CANCELED, + }, { + in: vtrpcpb.Code_UNKNOWN, + want: vtrpcpb.Code_UNKNOWN, + }, { + // Invalid code OK should be converted to INTERNAL. + in: vtrpcpb.Code_OK, + want: vtrpcpb.Code_INTERNAL, + }} + for _, tcase := range testcases { + if got := Code(New(tcase.in, "")); got != tcase.want { + t.Errorf("Code(New(%v)): %v, want %v", tcase.in, got, tcase.want) + } + if got := Code(Errorf(tcase.in, "")); got != tcase.want { + t.Errorf("Code(Errorf(%v)): %v, want %v", tcase.in, got, tcase.want) + } + } +} + +func TestCode(t *testing.T) { + testcases := []struct { + in error + want vtrpcpb.Code + }{{ + in: nil, + want: vtrpcpb.Code_OK, + }, { + in: errors.New("generic"), + want: vtrpcpb.Code_UNKNOWN, + }, { + in: New(vtrpcpb.Code_CANCELED, "generic"), + want: vtrpcpb.Code_CANCELED, + }, { + in: context.Canceled, + want: vtrpcpb.Code_CANCELED, + }, { + in: context.DeadlineExceeded, + want: vtrpcpb.Code_DEADLINE_EXCEEDED, + }} + for _, tcase := range testcases { + if got := Code(tcase.in); got != tcase.want { + t.Errorf("Code(%v): %v, want %v", tcase.in, got, tcase.want) + } + } +} diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index a4a6f395499..e128781f212 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -896,6 +896,8 @@ func recordAndAnnotateError(err error, statsKey []string, request map[string]int switch ec { case vtrpcpb.Code_UNKNOWN, vtrpcpb.Code_INTERNAL, vtrpcpb.Code_DATA_LOSS: logger.Errorf("%v, request: %+v", err, request) + case vtrpcpb.Code_UNAVAILABLE: + logger.Infof("%v, request: %+v", err, request) } return vterrors.Errorf(vterrors.Code(err), "vtgate: %s: %v", servenv.ListeningURL.String(), err) } From d222e7a2e31e12301dd5f18b202d22cd1ea67828 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 27 Feb 2017 07:21:44 -0800 Subject: [PATCH 045/108] Addressing review comments. --- go/mysqlconn/auth_server_config.go | 28 ++++++++++++++++++++++++++++ go/mysqlconn/conn.go | 2 +- go/mysqlconn/doc.go | 2 +- go/mysqlconn/server.go | 4 ++-- go/vt/vtgate/plugin_mysql_server.go | 22 ++-------------------- 5 files changed, 34 insertions(+), 24 deletions(-) diff --git a/go/mysqlconn/auth_server_config.go b/go/mysqlconn/auth_server_config.go index f7513c481a7..3fca5d71608 100644 --- a/go/mysqlconn/auth_server_config.go +++ b/go/mysqlconn/auth_server_config.go @@ -2,6 +2,10 @@ package mysqlconn import ( "bytes" + "encoding/json" + "io/ioutil" + + log "github.com/golang/glog" "github.com/youtube/vitess/go/sqldb" ) @@ -29,6 +33,30 @@ func NewAuthServerConfig() *AuthServerConfig { } } +// RegisterAuthServerConfigFromParams creates and registers a new +// AuthServerConfig, loaded for a JSON file or string. If file is set, +// it uses file. Otherwise, load the string. It log.Fatals out in case +// of error. +func RegisterAuthServerConfigFromParams(file, str string) { + authServerConfig := NewAuthServerConfig() + jsonConfig := []byte(str) + if file != "" { + data, err := ioutil.ReadFile(file) + if err != nil { + log.Fatalf("Failed to read mysql_auth_server_config_file file: %v", err) + } + jsonConfig = data + } + + // Parse JSON config. + if err := json.Unmarshal(jsonConfig, &authServerConfig.Entries); err != nil { + log.Fatalf("Error parsing auth server config: %v", err) + } + + // And register the server. + RegisterAuthServerImpl("config", authServerConfig) +} + // UseClearText is part of the AuthServer interface. func (a *AuthServerConfig) UseClearText() bool { return a.ClearText diff --git a/go/mysqlconn/conn.go b/go/mysqlconn/conn.go index ef70d641bf0..1ccd040f950 100644 --- a/go/mysqlconn/conn.go +++ b/go/mysqlconn/conn.go @@ -155,7 +155,7 @@ func newConn(conn net.Conn) *Conn { // readPacketDirect attempts to read a packet from the socket directly. // It needs to be used for the first handshake packet the server receives, -// so we do't buffer the SSL negociation packet. As a shortcut, only +// so we do't buffer the SSL negotiation packet. As a shortcut, only // packets smaller than MaxPacketSize can be read here. func (c *Conn) readPacketDirect() ([]byte, error) { var header [4]byte diff --git a/go/mysqlconn/doc.go b/go/mysqlconn/doc.go index fa35e3ff91d..236783ffbc9 100644 --- a/go/mysqlconn/doc.go +++ b/go/mysqlconn/doc.go @@ -54,7 +54,7 @@ If our server's AuthServer UseClearText() returns true, and the client's auth method is not mysql_clear_password, we will re-negotiate. -If any of these methods doesn't work for the server, it will re-negociate +If any of these methods doesn't work for the server, it will re-negotiate by sending an Authentication Method Switch Request Packet. The client will then handle that if it can. -- diff --git a/go/mysqlconn/server.go b/go/mysqlconn/server.go index b374f244f3e..824427cef10 100644 --- a/go/mysqlconn/server.go +++ b/go/mysqlconn/server.go @@ -142,7 +142,7 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32) { } // Wait for the client response. This has to be a direct read, - // so we don't buffer the TLS negociation packets. + // so we don't buffer the TLS negotiation packets. response, err := c.readPacketDirect() if err != nil { log.Errorf("Cannot read client handshake response: %v", err) @@ -208,7 +208,7 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32) { renegotiateWithClearText = true } - // If we need to re-negociate with clear text, do it. + // If we need to re-negotiate with clear text, do it. if renegotiateWithClearText { // Check error conditions. if !l.AllowClearTextWithoutTLS && c.Capabilities&CapabilityClientSSL == 0 { diff --git a/go/vt/vtgate/plugin_mysql_server.go b/go/vt/vtgate/plugin_mysql_server.go index e796d0c9204..5270025e21f 100644 --- a/go/vt/vtgate/plugin_mysql_server.go +++ b/go/vt/vtgate/plugin_mysql_server.go @@ -1,10 +1,8 @@ package vtgate import ( - "encoding/json" "flag" "fmt" - "io/ioutil" "net" "strings" @@ -42,24 +40,8 @@ func initAuthServerConfig() { log.Fatalf("Both mysql_auth_server_config_file and mysql_auth_server_config_string specified, can only use one.") } - // Read file if necessary. - authServerConfig := mysqlconn.NewAuthServerConfig() - jsonConfig := []byte(*mysqlAuthServerConfigString) - if *mysqlAuthServerConfigFile != "" { - data, err := ioutil.ReadFile(*mysqlAuthServerConfigFile) - if err != nil { - log.Fatalf("Failed to read mysql_auth_server_config_file file: %v", err) - } - jsonConfig = data - } - - // Parse JSON config. - if err := json.Unmarshal(jsonConfig, &authServerConfig.Entries); err != nil { - log.Fatalf("Error parsing auth server config: %v", err) - } - - // And register the server. - mysqlconn.RegisterAuthServerImpl("config", authServerConfig) + // Create and register auth server. + mysqlconn.RegisterAuthServerConfigFromParams(*mysqlAuthServerConfigFile, *mysqlAuthServerConfigString) } // vtgateHandler implements the Listener interface. From 2ee53b491025aa6c17af614ac43e9b37153731e3 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Thu, 19 Jan 2017 10:49:57 -0500 Subject: [PATCH 046/108] Adds decoding of strings and string-like blobs based on the charset returned by MySQL --- .../vitess/jdbc/ConnectionProperties.java | 99 ++++ .../vitess/jdbc/FieldWithMetadata.java | 159 +++++- .../vitess/jdbc/VitessConnection.java | 24 + .../flipkart/vitess/jdbc/VitessResultSet.java | 57 +- .../vitess/jdbc/VitessResultSetMetaData.java | 37 +- .../com/flipkart/vitess/util/MysqlDefs.java | 8 +- .../com/flipkart/vitess/util/StringUtils.java | 82 +++ .../vitess/util/charset/CharsetMapping.java | 536 ++++++++++++++++++ .../vitess/util/charset/Collation.java | 33 ++ .../vitess/util/charset/MysqlCharset.java | 118 ++++ .../vitess/jdbc/ConnectionPropertiesTest.java | 65 ++- .../vitess/jdbc/FieldWithMetadataTest.java | 372 +++++++++++- .../vitess/jdbc/VitessConnectionTest.java | 47 +- .../jdbc/VitessResultSetMetadataTest.java | 176 ++++-- .../vitess/jdbc/VitessResultSetTest.java | 227 ++++++++ 15 files changed, 1967 insertions(+), 73 deletions(-) create mode 100644 java/jdbc/src/main/java/com/flipkart/vitess/util/charset/CharsetMapping.java create mode 100644 java/jdbc/src/main/java/com/flipkart/vitess/util/charset/Collation.java create mode 100644 java/jdbc/src/main/java/com/flipkart/vitess/util/charset/MysqlCharset.java diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java index cf2ae45a6f4..d94a7c93763 100644 --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java @@ -1,9 +1,11 @@ package com.flipkart.vitess.jdbc; import com.flipkart.vitess.util.Constants; +import com.flipkart.vitess.util.StringUtils; import com.youtube.vitess.proto.Query; import com.youtube.vitess.proto.Topodata; +import java.io.UnsupportedEncodingException; import java.lang.reflect.Field; import java.sql.DriverPropertyInfo; import java.sql.SQLException; @@ -30,6 +32,16 @@ public class ConnectionProperties { } } + // Configs for handling deserialization of blobs + private BooleanConnectionProperty blobsAreStrings = new BooleanConnectionProperty( + "blobsAreStrings", + "Should the driver always treat BLOBs as Strings - specifically to work around dubious metadata returned by the server for GROUP BY clauses?", + false); + private BooleanConnectionProperty functionsNeverReturnBlobs = new BooleanConnectionProperty( + "functionsNeverReturnBlobs", + "Should the driver always treat data from functions returning BLOBs as Strings - specifically to work around dubious metadata returned by the server for GROUP BY clauses?", + false); + // Configs for handing tinyint(1) private BooleanConnectionProperty tinyInt1isBit = new BooleanConnectionProperty( "tinyInt1isBit", @@ -40,6 +52,29 @@ public class ConnectionProperties { "Should the JDBC driver treat the MySQL type \"YEAR\" as a java.sql.Date, or as a SHORT?", true); + // Configs for handling irregular blobs, those with characters outside the typical 4-byte encodings + private BooleanConnectionProperty useBlobToStoreUTF8OutsideBMP = new BooleanConnectionProperty( + "useBlobToStoreUTF8OutsideBMP", + "Tells the driver to treat [MEDIUM/LONG]BLOB columns as [LONG]VARCHAR columns holding text encoded in UTF-8 that has characters outside the BMP (4-byte encodings), which MySQL server can't handle natively.", + false); + private StringConnectionProperty utf8OutsideBmpIncludedColumnNamePattern = new StringConnectionProperty( + "utf8OutsideBmpIncludedColumnNamePattern", + "Used to specify exclusion rules to \"utf8OutsideBmpExcludedColumnNamePattern\". The regex must follow the patterns used for the java.util.regex package.", + null, + null); + private StringConnectionProperty utf8OutsideBmpExcludedColumnNamePattern = new StringConnectionProperty( + "utf8OutsideBmpExcludedColumnNamePattern", + "When \"useBlobToStoreUTF8OutsideBMP\" is set to \"true\", column names matching the given regex will still be treated as BLOBs unless they match the regex specified for \"utf8OutsideBmpIncludedColumnNamePattern\". The regex must follow the patterns used for the java.util.regex package.", + null, + null); + + // Default encodings, for when one cannot be determined from field metadata + private StringConnectionProperty characterEncoding = new StringConnectionProperty( + "characterEncoding", + "If a character encoding cannot be detected, which fallback should be used when dealing with strings? (defaults is to 'autodetect')", + null, + null); + // Vitess-specific configs private EnumConnectionProperty executeType = new EnumConnectionProperty<>( Constants.Property.EXECUTE_TYPE, @@ -64,6 +99,7 @@ public class ConnectionProperties { private boolean includeAllFieldsCache = true; private boolean twopcEnabledCache = false; private boolean simpleExecuteTypeCache = true; + private String characterEncodingAsString = null; void initializeProperties(Properties props) throws SQLException { Properties propsCopy = (Properties) props.clone(); @@ -84,6 +120,16 @@ private void postInitialization() throws SQLException { this.includeAllFieldsCache = this.includedFieldsCache == Query.ExecuteOptions.IncludedFields.ALL; this.twopcEnabledCache = this.twopcEnabled.getValueAsBoolean(); this.simpleExecuteTypeCache = this.executeType.getValueAsEnum() == Constants.QueryExecuteType.SIMPLE; + this.characterEncodingAsString = this.characterEncoding.getValueAsString(); + if (characterEncodingAsString != null) { + // Attempt to use the encoding, and bail out if it can't be used + try { + String testString = "abc"; + StringUtils.getBytes(testString, characterEncodingAsString); + } catch (UnsupportedEncodingException UE) { + throw new SQLException("Unsupported character encoding: " + characterEncodingAsString); + } + } } static DriverPropertyInfo[] exposeAsDriverPropertyInfo(Properties info, int slotsToReserve) throws SQLException { @@ -111,6 +157,22 @@ protected DriverPropertyInfo[] exposeAsDriverPropertyInfoInternal(Properties inf return driverProperties; } + public boolean getBlobsAreStrings() { + return blobsAreStrings.getValueAsBoolean(); + } + + public void setBlobsAreStrings(boolean blobsAreStrings) { + this.blobsAreStrings.setValue(blobsAreStrings); + } + + public boolean getUseBlobToStoreUTF8OutsideBMP() { + return useBlobToStoreUTF8OutsideBMP.getValueAsBoolean(); + } + + public void setUseBlobToStoreUTF8OutsideBMP(boolean useBlobToStoreUTF8OutsideBMP) { + this.useBlobToStoreUTF8OutsideBMP.setValue(useBlobToStoreUTF8OutsideBMP); + } + public boolean getTinyInt1isBit() { return tinyInt1isBit.getValueAsBoolean(); } @@ -119,6 +181,30 @@ public void setTinyInt1isBit(boolean tinyInt1isBit) { this.tinyInt1isBit.setValue(tinyInt1isBit); } + public boolean getFunctionsNeverReturnBlobs() { + return functionsNeverReturnBlobs.getValueAsBoolean(); + } + + public void setFunctionsNeverReturnBlobs(boolean functionsNeverReturnBlobs) { + this.functionsNeverReturnBlobs.setValue(functionsNeverReturnBlobs); + } + + public String getUtf8OutsideBmpIncludedColumnNamePattern() { + return utf8OutsideBmpIncludedColumnNamePattern.getValueAsString(); + } + + public void setUtf8OutsideBmpIncludedColumnNamePattern(String pattern) { + this.utf8OutsideBmpIncludedColumnNamePattern.setValue(pattern); + } + + public String getUtf8OutsideBmpExcludedColumnNamePattern() { + return utf8OutsideBmpExcludedColumnNamePattern.getValueAsString(); + } + + public void setUtf8OutsideBmpExcludedColumnNamePattern(String pattern) { + this.utf8OutsideBmpExcludedColumnNamePattern.setValue(pattern); + } + public boolean getYearIsDateType() { return yearIsDateType.getValueAsBoolean(); } @@ -127,6 +213,15 @@ public void setYearIsDateType(boolean yearIsDateType) { this.yearIsDateType.setValue(yearIsDateType); } + public String getEncoding() { + return characterEncodingAsString; + } + + public void setEncoding(String encoding) { + this.characterEncoding.setValue(encoding); + this.characterEncodingAsString = this.characterEncoding.getValueAsString(); + } + public Query.ExecuteOptions.IncludedFields getIncludedFields() { return this.includedFieldsCache; } @@ -267,6 +362,10 @@ void initializeFrom(String extractedValue) { } } + public String getValueAsString() { + return (String) valueAsObject; + } + @Override String[] getAllowableValues() { return allowableValues; diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/FieldWithMetadata.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/FieldWithMetadata.java index bf07fd56799..a9721e80731 100644 --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/FieldWithMetadata.java +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/FieldWithMetadata.java @@ -2,16 +2,20 @@ import com.flipkart.vitess.util.Constants; import com.flipkart.vitess.util.MysqlDefs; +import com.flipkart.vitess.util.StringUtils; +import com.flipkart.vitess.util.charset.CharsetMapping; import com.youtube.vitess.proto.Query; import java.sql.SQLException; import java.sql.Types; +import java.util.regex.PatternSyntaxException; public class FieldWithMetadata { private final VitessConnection connection; private final Query.Field field; private final Query.Type vitessType; + private final boolean isImplicitTempTable; private final boolean isSingleBit; private final int precisionAdjustFactor; @@ -41,18 +45,61 @@ public FieldWithMetadata(VitessConnection connection, Query.Field field) throws // All of the below remapping and metadata fields require the extra // fields included when includeFields=IncludedFields.ALL if (connection != null && connection.isIncludeAllFields()) { + this.isImplicitTempTable = field.getTable().length() > 5 && field.getTable().startsWith("#sql_"); + // Re-map BLOB to 'real' blob type + if (this.javaType == Types.BLOB) { + boolean isFromFunction = field.getOrgTable().isEmpty(); + if (connection.getBlobsAreStrings() || (connection.getFunctionsNeverReturnBlobs() && isFromFunction)) { + this.javaType = Types.VARCHAR; + } else if (collationIndex == CharsetMapping.MYSQL_COLLATION_INDEX_binary) { + if (connection.getUseBlobToStoreUTF8OutsideBMP() && shouldSetupForUtf8StringInBlob()) { + if (this.getColumnLength() == MysqlDefs.LENGTH_TINYBLOB || this.getColumnLength() == MysqlDefs.LENGTH_BLOB) { + this.javaType = Types.VARCHAR; + } else { + this.javaType = Types.LONGVARCHAR; + } + this.collationIndex = CharsetMapping.MYSQL_COLLATION_INDEX_utf8; + } else { + if (this.getColumnLength() == MysqlDefs.LENGTH_TINYBLOB) { + this.javaType = Types.VARBINARY; + } else if (this.getColumnLength() == MysqlDefs.LENGTH_BLOB || this.getColumnLength() == MysqlDefs.LENGTH_MEDIUMBLOB + || this.getColumnLength() == MysqlDefs.LENGTH_LONGBLOB) { + this.javaType = Types.LONGVARBINARY; + } + } + } else { + // *TEXT masquerading as blob + this.javaType = Types.LONGVARCHAR; + } + } + // Re-map TINYINT(1) as bit or pseudo-boolean if (this.javaType == Types.TINYINT && this.field.getColumnLength() == 1 && connection.getTinyInt1isBit()) { this.javaType = Types.BIT; } if (!isNativeNumericType() && !isNativeDateTimeType()) { - if (this.javaType == Types.BIT) { - this.isSingleBit = field.getColumnLength() == 0 || field.getColumnLength() == 1; - } else { - this.isSingleBit = false; + this.encoding = connection.getEncodingForIndex(this.collationIndex); + // ucs2, utf16, and utf32 cannot be used as a client character set, but if it was received from server + // under some circumstances we can parse them as utf16 + if ("UnicodeBig".equals(this.encoding)) { + this.encoding = "UTF-16"; + } + // MySQL encodes JSON data with utf8mb4. + if (vitessType == Query.Type.JSON) { + this.encoding = "UTF-8"; + } + this.isSingleBit = this.javaType == Types.BIT && (field.getColumnLength() == 0 || field.getColumnLength() == 1); + // Re-map improperly typed binary types as non-binary counterparts if BINARY flag not set + boolean isBinary = isBinary(); + if (javaType == Types.LONGVARBINARY && !isBinary) { + this.javaType = Types.LONGVARCHAR; + } else if (javaType == Types.VARBINARY && !isBinary) { + this.javaType = Types.VARCHAR; } } else { + // Default encoding for number-types and date-types + this.encoding = "US-ASCII"; this.isSingleBit = false; } @@ -88,6 +135,7 @@ public FieldWithMetadata(VitessConnection connection, Query.Field field) throws } } else { // Defaults to appease final variables when not including all fields + this.isImplicitTempTable = false; this.isSingleBit = false; this.precisionAdjustFactor = 0; } @@ -125,12 +173,46 @@ public VitessConnection getConnection() throws SQLException { return connection; } + public boolean hasConnection() { + return connection != null; + } + private void checkConnection() throws SQLException { - if (connection == null) { + if (!hasConnection()) { throw new SQLException(Constants.SQLExceptionMessages.CONN_UNAVAILABLE); } } + private boolean shouldSetupForUtf8StringInBlob() throws SQLException { + String includePattern = connection.getUtf8OutsideBmpIncludedColumnNamePattern(); + String excludePattern = connection.getUtf8OutsideBmpExcludedColumnNamePattern(); + + // When UseBlobToStoreUTF8OutsideBMP is set, we by default set blobs to UTF-8. So we first + // look for fields to exclude from that remapping (blacklist) + if (excludePattern != null && !StringUtils.isNullOrEmptyWithoutWS(excludePattern)) { + try { + if (getOrgName().matches(excludePattern)) { + // If we want to include more specific patters that were inadvertently covered by the exclude pattern, + // we set the includePattern (whitelist) + if (includePattern != null && !StringUtils.isNullOrEmptyWithoutWS(includePattern)) { + try { + if (getOrgName().matches(includePattern)) { + return true; + } + } catch (PatternSyntaxException pse) { + throw new SQLException("Illegal regex specified for \"utf8OutsideBmpIncludedColumnNamePattern\"", pse); + } + } + return false; + } + } catch (PatternSyntaxException pse) { + throw new SQLException("Illegal regex specified for \"utf8OutsideBmpExcludedColumnNamePattern\"", pse); + } + } + + return true; + } + public boolean isAutoIncrement() throws SQLException { checkConnection(); if (!connection.isIncludeAllFields()) { @@ -210,7 +292,20 @@ public boolean isSigned() throws SQLException { boolean isOpaqueBinary() throws SQLException { checkConnection(); - return false; + if (!connection.isIncludeAllFields()) { + return false; + } + + // Detect CHAR(n) CHARACTER SET BINARY which is a synonym for fixed-length binary types + if (this.collationIndex == CharsetMapping.MYSQL_COLLATION_INDEX_binary && isBinary() + && (this.javaType == Types.CHAR || this.javaType == Types.VARCHAR)) { + // Okay, queries resolved by temp tables also have this 'signature', check for that + return !isImplicitTemporaryTable(); + } + + // this is basically always false unless a valid charset is not found and someone explicitly sets a fallback + // using ConnectionProperties, as binary defaults to ISO8859-1 per mysql-connector-j implementation + return "binary".equalsIgnoreCase(getEncoding()); } /** @@ -229,6 +324,34 @@ boolean isReadOnly() throws SQLException { return !(orgColumnName != null && orgColumnName.length() > 0 && orgTableName != null && orgTableName.length() > 0); } + public synchronized String getCollation() throws SQLException { + if (!connection.isIncludeAllFields()) { + return null; + } + + if (this.collationName == null) { + int collationIndex = getCollationIndex(); + try { + this.collationName = CharsetMapping.COLLATION_INDEX_TO_COLLATION_NAME[collationIndex]; + } catch (ArrayIndexOutOfBoundsException ex) { + throw new SQLException("CollationIndex '" + collationIndex + "' out of bounds for collationName lookup, should be within 0 and " + CharsetMapping.COLLATION_INDEX_TO_COLLATION_NAME.length, ex); + } + } + return this.collationName; + } + + + public synchronized int getMaxBytesPerCharacter() throws SQLException { + if (!connection.isIncludeAllFields()) { + return 0; + } + + if (this.maxBytesPerChar == 0) { + this.maxBytesPerChar = this.connection.getMaxBytesPerChar(getCollationIndex(), getEncoding()); + } + return this.maxBytesPerChar; + } + public String getName() { return field.getName(); } @@ -293,6 +416,20 @@ public int getVitessTypeValue() { return field.getTypeValue(); } + public boolean isImplicitTemporaryTable() { + if (!connection.isIncludeAllFields()) { + return false; + } + return isImplicitTempTable; + } + + public String getEncoding() { + if (!connection.isIncludeAllFields()) { + return null; + } + return encoding; + } + /** * Precision can be calculated from column length, but needs * to be adjusted for the extra values that can be included for the various @@ -314,6 +451,10 @@ public boolean isSingleBit() throws SQLException { return isSingleBit; } + private int getCollationIndex() { + return collationIndex; + } + @Override public String toString() { try { @@ -360,6 +501,12 @@ public String toString() { if (isZeroFill()) { asString.append(" ZEROFILL"); } + + asString.append(", charsetIndex="); + asString.append(this.collationIndex); + asString.append(", charsetName="); + asString.append(this.encoding); + asString.append("]"); return asString.toString(); } catch (Throwable t) { return super.toString(); diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessConnection.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessConnection.java index 591de78173a..34c61405932 100644 --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessConnection.java +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessConnection.java @@ -3,6 +3,7 @@ import com.flipkart.vitess.util.CommonUtils; import com.flipkart.vitess.util.Constants; import com.flipkart.vitess.util.MysqlDefs; +import com.flipkart.vitess.util.charset.CharsetMapping; import com.youtube.vitess.client.Context; import com.youtube.vitess.client.VTGateConn; import com.youtube.vitess.client.VTGateTx; @@ -842,4 +843,27 @@ public Context createContext(long deadlineAfter) { public String getUsername() { return this.vitessJDBCUrl.getUsername(); } + + public String getEncodingForIndex(int charsetIndex) throws SQLException { + String javaEncoding = null; + if (charsetIndex != MysqlDefs.NO_CHARSET_INFO) { + javaEncoding = CharsetMapping.getJavaEncodingForCollationIndex(charsetIndex, getEncoding()); + } + // If nothing, get default based on configuration, may still be null + if (javaEncoding == null) { + javaEncoding = getEncoding(); + } + return javaEncoding; + } + + public int getMaxBytesPerChar(Integer charsetIndex, String javaCharsetName) { + // if we can get it by charsetIndex just doing it + String charset = CharsetMapping.getMysqlCharsetNameForCollationIndex(charsetIndex); + // if we didn't find charset name by its full name + if (charset == null) { + charset = CharsetMapping.getMysqlCharsetForJavaEncoding(javaCharsetName); + } + // checking against static maps + return CharsetMapping.getMblen(charset); + } } diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSet.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSet.java index e28ace3ae6e..833634996aa 100644 --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSet.java +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSet.java @@ -1,6 +1,7 @@ package com.flipkart.vitess.jdbc; import com.flipkart.vitess.util.Constants; +import com.flipkart.vitess.util.StringUtils; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ByteString; import com.youtube.vitess.client.cursor.Cursor; @@ -10,6 +11,7 @@ import java.io.InputStream; import java.io.Reader; +import java.io.UnsupportedEncodingException; import java.math.BigDecimal; import java.net.URL; import java.sql.Array; @@ -28,6 +30,7 @@ import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; +import java.sql.Types; import java.util.ArrayList; import java.util.Calendar; import java.util.List; @@ -217,13 +220,16 @@ public String getString(int columnIndex) throws SQLException { } object = this.row.getObject(columnIndex); - if (object instanceof byte[]) { - columnValue = new String((byte[]) object); + FieldWithMetadata field = this.fields.get(columnIndex - 1); + if (field.hasConnection() && field.getConnection().isIncludeAllFields()) { + columnValue = convertBytesToString((byte[]) object, field.getEncoding()); + } else { + columnValue = new String((byte[]) object); + } } else { columnValue = String.valueOf(object); } - return columnValue; } @@ -549,7 +555,50 @@ public Object getObject(int columnIndex) throws SQLException { return null; } - return this.row.getObject(columnIndex); + Object retVal = this.row.getObject(columnIndex); + + FieldWithMetadata field = this.fields.get(columnIndex - 1); + if (field.hasConnection() && field.getConnection().isIncludeAllFields() && retVal instanceof byte[]) { + retVal = convertBytesIfPossible((byte[]) retVal, field); + } + + return retVal; + } + + private Object convertBytesIfPossible(byte[] bytes, FieldWithMetadata field) throws SQLException { + String encoding = field.getEncoding(); + switch (field.getJavaType()) { + case Types.BIT: + if (!field.isSingleBit()) { + return bytes; + } + return byteArrayToBoolean(bytes); + case Types.CHAR: + case Types.VARCHAR: + case Types.LONGVARCHAR: + if (!field.isOpaqueBinary()) { + return convertBytesToString(bytes, encoding); + } + return bytes; + case Types.BINARY: + case Types.VARBINARY: + case Types.LONGVARBINARY: + return bytes; + default: + return convertBytesToString(bytes, encoding); + } + } + + private String convertBytesToString(byte[] bytes, String encoding) throws SQLException { + if (encoding == null) { + return StringUtils.toString(bytes); + } else { + try { + return StringUtils.toString(bytes, 0, bytes.length, encoding); + } catch (UnsupportedEncodingException e) { + throw new SQLException("Unsupported character encoding: " + encoding, e); + } + } } public Object getObject(String columnLabel) throws SQLException { diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSetMetaData.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSetMetaData.java index 1ea8b175666..5976e609edf 100644 --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSetMetaData.java +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSetMetaData.java @@ -49,6 +49,22 @@ public boolean isCaseSensitive(int column) throws SQLException { case Types.TIME: case Types.TIMESTAMP: return false; + case Types.CHAR: + case Types.VARCHAR: + case Types.LONGVARCHAR: + if (field.isBinary() || !field.getConnection().isIncludeAllFields()) { + return true; + } + try { + String collationName = field.getCollation(); + return collationName != null && !collationName.endsWith("_ci"); + } catch (SQLException e) { + if (e.getCause() instanceof ArrayIndexOutOfBoundsException) { + return false; + } else { + throw e; + } + } default: return true; } @@ -83,7 +99,11 @@ public boolean isSigned(int column) throws SQLException { } public int getColumnDisplaySize(int column) throws SQLException { - return 0; + FieldWithMetadata field = getField(column); + if (!field.getConnection().isIncludeAllFields()) { + return 0; + } + return field.getColumnLength() / field.getMaxBytesPerCharacter(); } public String getColumnLabel(int column) throws SQLException { @@ -99,7 +119,20 @@ public String getSchemaName(int column) throws SQLException { } public int getPrecision(int column) throws SQLException { - return 0; + FieldWithMetadata field = getField(column); + if (!field.getConnection().isIncludeAllFields()) { + return 0; + } + if (isDecimalType(field.getJavaType(), field.getVitessTypeValue())) { + return field.getColumnLength() + field.getPrecisionAdjustFactor(); + } + switch (field.getJavaType()) { + case Types.VARBINARY: + case Types.LONGVARBINARY: + return field.getColumnLength(); + default: + return field.getColumnLength() / field.getMaxBytesPerCharacter(); + } } private static boolean isDecimalType(int javaType, int vitessType) { diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/util/MysqlDefs.java b/java/jdbc/src/main/java/com/flipkart/vitess/util/MysqlDefs.java index 90566c674f1..00dcdf383d6 100644 --- a/java/jdbc/src/main/java/com/flipkart/vitess/util/MysqlDefs.java +++ b/java/jdbc/src/main/java/com/flipkart/vitess/util/MysqlDefs.java @@ -72,10 +72,10 @@ public final class MysqlDefs { static final int FIELD_TYPE_YEAR = 13; static final int FIELD_TYPE_JSON = 245; static final int INIT_DB = 2; - static final long LENGTH_BLOB = 65535; - static final long LENGTH_LONGBLOB = 4294967295L; - static final long LENGTH_MEDIUMBLOB = 16777215; - static final long LENGTH_TINYBLOB = 255; + public static final int LENGTH_BLOB = 65535; + public static final int LENGTH_LONGBLOB = Integer.MAX_VALUE; + public static final int LENGTH_MEDIUMBLOB = 16777215; + public static final int LENGTH_TINYBLOB = 255; // Limitations static final int MAX_ROWS = 50000000; // From the MySQL FAQ static final byte OPEN_CURSOR_FLAG = 1; diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/util/StringUtils.java b/java/jdbc/src/main/java/com/flipkart/vitess/util/StringUtils.java index 342870a15d2..dae2ae4b425 100644 --- a/java/jdbc/src/main/java/com/flipkart/vitess/util/StringUtils.java +++ b/java/jdbc/src/main/java/com/flipkart/vitess/util/StringUtils.java @@ -1,14 +1,22 @@ package com.flipkart.vitess.util; import java.io.StringReader; +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; /** * Created by naveen.nahata on 05/02/16. */ public class StringUtils { + private static final String platformEncoding = System.getProperty("file.encoding"); + private static final ConcurrentHashMap charsetsByAlias = new ConcurrentHashMap(); + private StringUtils() { } @@ -374,4 +382,78 @@ public static int findStartOfStatement(String sql) { return statementStartPos; } + + public static String toString(byte[] value, int offset, int length, String encoding) throws UnsupportedEncodingException { + Charset cs = findCharset(encoding); + + return cs.decode(ByteBuffer.wrap(value, offset, length)).toString(); + } + + public static String toString(byte[] value, String encoding) throws UnsupportedEncodingException { + return findCharset(encoding) + .decode(ByteBuffer.wrap(value)) + .toString(); + } + + public static String toString(byte[] value, int offset, int length) { + try { + return findCharset(platformEncoding) + .decode(ByteBuffer.wrap(value, offset, length)) + .toString(); + } catch (UnsupportedEncodingException e) { + // can't happen, emulating new String(byte[]) + } + + return null; + } + + public static String toString(byte[] value) { + try { + return findCharset(platformEncoding) + .decode(ByteBuffer.wrap(value)) + .toString(); + } catch (UnsupportedEncodingException e) { + // can't happen, emulating new String(byte[]) + } + + return null; + } + + public static byte[] getBytes(String value, String encoding) throws UnsupportedEncodingException { + return getBytes(value, 0, value.length(), encoding); + } + + public static byte[] getBytes(String value, int offset, int length, String encoding) throws UnsupportedEncodingException { + Charset cs = findCharset(encoding); + + ByteBuffer buf = cs.encode(CharBuffer.wrap(value.toCharArray(), offset, length)); + + // can't simply .array() this to get the bytes especially with variable-length charsets the buffer is sometimes larger than the actual encoded data + int encodedLen = buf.limit(); + byte[] asBytes = new byte[encodedLen]; + buf.get(asBytes, 0, encodedLen); + + return asBytes; + } + + private static Charset findCharset(String alias) throws UnsupportedEncodingException { + try { + Charset cs = charsetsByAlias.get(alias); + + if (cs == null) { + cs = Charset.forName(alias); + Charset oldCs = charsetsByAlias.putIfAbsent(alias, cs); + if (oldCs != null) { + // if the previous value was recently set by another thread we return it instead of value we found here + cs = oldCs; + } + } + + return cs; + + // We re-throw these runtimes for compatibility with java.io + } catch (IllegalArgumentException iae) { + throw new UnsupportedEncodingException(alias); + } + } } diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/util/charset/CharsetMapping.java b/java/jdbc/src/main/java/com/flipkart/vitess/util/charset/CharsetMapping.java new file mode 100644 index 00000000000..ddb914fbc0e --- /dev/null +++ b/java/jdbc/src/main/java/com/flipkart/vitess/util/charset/CharsetMapping.java @@ -0,0 +1,536 @@ +package com.flipkart.vitess.util.charset; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +/** + * These classes were pulled from mysql-connector-java and simplified to just the parts supporting the statically available + * charsets + */ +public class CharsetMapping { + private static final int MAP_SIZE = 255; // Size of static maps + public static final String[] COLLATION_INDEX_TO_COLLATION_NAME; + private static final MysqlCharset[] COLLATION_INDEX_TO_CHARSET; + + static final Map CHARSET_NAME_TO_CHARSET; + private static final Map> JAVA_ENCODING_UC_TO_MYSQL_CHARSET; + + private static final String MYSQL_CHARSET_NAME_armscii8 = "armscii8"; + private static final String MYSQL_CHARSET_NAME_ascii = "ascii"; + private static final String MYSQL_CHARSET_NAME_big5 = "big5"; + private static final String MYSQL_CHARSET_NAME_binary = "binary"; + private static final String MYSQL_CHARSET_NAME_cp1250 = "cp1250"; + private static final String MYSQL_CHARSET_NAME_cp1251 = "cp1251"; + private static final String MYSQL_CHARSET_NAME_cp1256 = "cp1256"; + private static final String MYSQL_CHARSET_NAME_cp1257 = "cp1257"; + private static final String MYSQL_CHARSET_NAME_cp850 = "cp850"; + private static final String MYSQL_CHARSET_NAME_cp852 = "cp852"; + private static final String MYSQL_CHARSET_NAME_cp866 = "cp866"; + private static final String MYSQL_CHARSET_NAME_cp932 = "cp932"; + private static final String MYSQL_CHARSET_NAME_dec8 = "dec8"; + private static final String MYSQL_CHARSET_NAME_eucjpms = "eucjpms"; + private static final String MYSQL_CHARSET_NAME_euckr = "euckr"; + private static final String MYSQL_CHARSET_NAME_gb18030 = "gb18030"; + private static final String MYSQL_CHARSET_NAME_gb2312 = "gb2312"; + private static final String MYSQL_CHARSET_NAME_gbk = "gbk"; + private static final String MYSQL_CHARSET_NAME_geostd8 = "geostd8"; + private static final String MYSQL_CHARSET_NAME_greek = "greek"; + private static final String MYSQL_CHARSET_NAME_hebrew = "hebrew"; + private static final String MYSQL_CHARSET_NAME_hp8 = "hp8"; + private static final String MYSQL_CHARSET_NAME_keybcs2 = "keybcs2"; + private static final String MYSQL_CHARSET_NAME_koi8r = "koi8r"; + private static final String MYSQL_CHARSET_NAME_koi8u = "koi8u"; + private static final String MYSQL_CHARSET_NAME_latin1 = "latin1"; + private static final String MYSQL_CHARSET_NAME_latin2 = "latin2"; + private static final String MYSQL_CHARSET_NAME_latin5 = "latin5"; + private static final String MYSQL_CHARSET_NAME_latin7 = "latin7"; + private static final String MYSQL_CHARSET_NAME_macce = "macce"; + private static final String MYSQL_CHARSET_NAME_macroman = "macroman"; + private static final String MYSQL_CHARSET_NAME_sjis = "sjis"; + private static final String MYSQL_CHARSET_NAME_swe7 = "swe7"; + private static final String MYSQL_CHARSET_NAME_tis620 = "tis620"; + private static final String MYSQL_CHARSET_NAME_ucs2 = "ucs2"; + private static final String MYSQL_CHARSET_NAME_ujis = "ujis"; + private static final String MYSQL_CHARSET_NAME_utf16 = "utf16"; + private static final String MYSQL_CHARSET_NAME_utf16le = "utf16le"; + private static final String MYSQL_CHARSET_NAME_utf32 = "utf32"; + private static final String MYSQL_CHARSET_NAME_utf8 = "utf8"; + private static final String MYSQL_CHARSET_NAME_utf8mb4 = "utf8mb4"; + + private static final String MYSQL_4_0_CHARSET_NAME_cp1251cias = "cp1251cias"; + private static final String MYSQL_4_0_CHARSET_NAME_cp1251csas = "cp1251csas"; + private static final String MYSQL_4_0_CHARSET_NAME_croat = "croat"; // 4.1 => 27 latin2 latin2_croatian_ci + private static final String MYSQL_4_0_CHARSET_NAME_czech = "czech"; // 4.1 => 2 latin2 latin2_czech_ci + private static final String MYSQL_4_0_CHARSET_NAME_danish = "danish"; // 4.1 => 15 latin1 latin1_danish_ci + private static final String MYSQL_4_0_CHARSET_NAME_dos = "dos"; // 4.1 => 4 cp850 cp850_general_ci + private static final String MYSQL_4_0_CHARSET_NAME_estonia = "estonia"; // 4.1 => 20 latin7 latin7_estonian_ci + private static final String MYSQL_4_0_CHARSET_NAME_euc_kr = "euc_kr"; // 4.1 => 19 euckr euckr_korean_ci + private static final String MYSQL_4_0_CHARSET_NAME_german1 = "german1"; // 4.1 => 5 latin1 latin1_german1_ci + private static final String MYSQL_4_0_CHARSET_NAME_hungarian = "hungarian"; // 4.1 => 21 latin2 latin2_hungarian_ci + private static final String MYSQL_4_0_CHARSET_NAME_koi8_ru = "koi8_ru"; // 4.1 => 7 koi8r koi8r_general_ci + private static final String MYSQL_4_0_CHARSET_NAME_koi8_ukr = "koi8_ukr"; // 4.1 => 22 koi8u koi8u_ukrainian_ci + private static final String MYSQL_4_0_CHARSET_NAME_latin1_de = "latin1_de"; // 4.1 => 31 latin1 latin1_german2_ci + private static final String MYSQL_4_0_CHARSET_NAME_latvian = "latvian"; + private static final String MYSQL_4_0_CHARSET_NAME_latvian1 = "latvian1"; + private static final String MYSQL_4_0_CHARSET_NAME_usa7 = "usa7"; // 4.1 => 11 ascii ascii_general_ci + private static final String MYSQL_4_0_CHARSET_NAME_win1250 = "win1250"; // 4.1 => 26 cp1250 cp1250_general_ci + private static final String MYSQL_4_0_CHARSET_NAME_win1251 = "win1251"; // 4.1 => 17 (removed) + private static final String MYSQL_4_0_CHARSET_NAME_win1251ukr = "win1251ukr"; // 4.1 => 23 cp1251 cp1251_ukrainian_ci + + private static final String NOT_USED = MYSQL_CHARSET_NAME_latin1; // punting for not-used character sets + + public static final int MYSQL_COLLATION_INDEX_utf8 = 33; + public static final int MYSQL_COLLATION_INDEX_binary = 63; + + static { + // complete list of mysql character sets and their corresponding java encoding names + MysqlCharset[] charset = new MysqlCharset[]{new MysqlCharset(MYSQL_4_0_CHARSET_NAME_usa7, 1, 0, new String[]{"US-ASCII"}, 4, 0), + new MysqlCharset(MYSQL_CHARSET_NAME_ascii, 1, 0, new String[]{"US-ASCII", "ASCII"}), + + new MysqlCharset(MYSQL_CHARSET_NAME_big5, 2, 0, new String[]{"Big5"}), + new MysqlCharset(MYSQL_CHARSET_NAME_gbk, 2, 0, new String[]{"GBK"}), + + new MysqlCharset(MYSQL_CHARSET_NAME_sjis, 2, 0, new String[]{"SHIFT_JIS", "Cp943", "WINDOWS-31J"}), // SJIS is alias for SHIFT_JIS, Cp943 is rather a cp932 but we map it to sjis for years + new MysqlCharset(MYSQL_CHARSET_NAME_cp932, 2, 1, new String[]{"WINDOWS-31J"}), // MS932 is alias for WINDOWS-31J + + new MysqlCharset(MYSQL_CHARSET_NAME_gb2312, 2, 0, new String[]{"GB2312"}), + new MysqlCharset(MYSQL_CHARSET_NAME_ujis, 3, 0, new String[]{"EUC_JP"}), + new MysqlCharset(MYSQL_CHARSET_NAME_eucjpms, 3, 0, new String[]{"EUC_JP_Solaris"}, 5, 0, 3), // "EUC_JP_Solaris = >5.0.3 eucjpms," + + new MysqlCharset(MYSQL_CHARSET_NAME_gb18030, 4, 0, new String[]{"GB18030"}, 5, 7, 4), + + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_euc_kr, 2, 0, new String[]{"EUC_KR"}, 4, 0), + new MysqlCharset(MYSQL_CHARSET_NAME_euckr, 2, 0, new String[]{"EUC-KR"}), + + new MysqlCharset(MYSQL_CHARSET_NAME_latin1, 1, 1, new String[]{"Cp1252", "ISO8859_1"}), + new MysqlCharset(MYSQL_CHARSET_NAME_swe7, 1, 0, new String[]{"Cp1252"}), // new mapping, Cp1252 ? + new MysqlCharset(MYSQL_CHARSET_NAME_hp8, 1, 0, new String[]{"Cp1252"}), // new mapping, Cp1252 ? + new MysqlCharset(MYSQL_CHARSET_NAME_dec8, 1, 0, new String[]{"Cp1252"}), // new mapping, Cp1252 ? + new MysqlCharset(MYSQL_CHARSET_NAME_armscii8, 1, 0, new String[]{"Cp1252"}), // new mapping, Cp1252 ? + new MysqlCharset(MYSQL_CHARSET_NAME_geostd8, 1, 0, new String[]{"Cp1252"}), // new mapping, Cp1252 ? + + new MysqlCharset(MYSQL_CHARSET_NAME_latin2, 1, 0, new String[]{"ISO8859_2"}), // latin2 is an alias + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_czech, 1, 0, new String[]{"ISO8859_2"}, 4, 0), + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_hungarian, 1, 0, new String[]{"ISO8859_2"}, 4, 0), + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_croat, 1, 0, new String[]{"ISO8859_2"}, 4, 0), + + new MysqlCharset(MYSQL_CHARSET_NAME_greek, 1, 0, new String[]{"ISO8859_7", "greek"}), + new MysqlCharset(MYSQL_CHARSET_NAME_latin7, 1, 0, new String[]{"ISO-8859-13"}), // was ISO8859_7, that's incorrect; also + "LATIN7 = latin7," is wrong java encoding name + + new MysqlCharset(MYSQL_CHARSET_NAME_hebrew, 1, 0, new String[]{"ISO8859_8"}), // hebrew is an alias + new MysqlCharset(MYSQL_CHARSET_NAME_latin5, 1, 0, new String[]{"ISO8859_9"}), // LATIN5 is an alias + + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_latvian, 1, 0, new String[]{"ISO8859_13"}, 4, 0), + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_latvian1, 1, 0, new String[]{"ISO8859_13"}, 4, 0), + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_estonia, 1, 1, new String[]{"ISO8859_13"}, 4, 0), //, "ISO8859_13"); // punting for "estonia"; + + new MysqlCharset(MYSQL_CHARSET_NAME_cp850, 1, 0, new String[]{"Cp850", "Cp437"}), + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_dos, 1, 0, new String[]{"Cp850", "Cp437"}, 4, 0), + + new MysqlCharset(MYSQL_CHARSET_NAME_cp852, 1, 0, new String[]{"Cp852"}), + new MysqlCharset(MYSQL_CHARSET_NAME_keybcs2, 1, 0, new String[]{"Cp852"}), // new, Kamenicky encoding usually known as Cp895 but there is no official cp895 specification; close to Cp852, see http://ftp.muni.cz/pub/localization/charsets/cs-encodings-faq + + new MysqlCharset(MYSQL_CHARSET_NAME_cp866, 1, 0, new String[]{"Cp866"}), + + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_koi8_ru, 1, 0, new String[]{"KOI8_R"}, 4, 0), + new MysqlCharset(MYSQL_CHARSET_NAME_koi8r, 1, 1, new String[]{"KOI8_R"}), + new MysqlCharset(MYSQL_CHARSET_NAME_koi8u, 1, 0, new String[]{"KOI8_R"}), + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_koi8_ukr, 1, 0, new String[]{"KOI8_R"}, 4, 0), + + new MysqlCharset(MYSQL_CHARSET_NAME_tis620, 1, 0, new String[]{"TIS620"}), + + new MysqlCharset(MYSQL_CHARSET_NAME_cp1250, 1, 0, new String[]{"Cp1250"}), + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_win1250, 1, 0, new String[]{"Cp1250"}, 4, 0), + + new MysqlCharset(MYSQL_CHARSET_NAME_cp1251, 1, 1, new String[]{"Cp1251"}), + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_win1251, 1, 0, new String[]{"Cp1251"}, 4, 0), + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_cp1251cias, 1, 0, new String[]{"Cp1251"}, 4, 0), + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_cp1251csas, 1, 0, new String[]{"Cp1251"}, 4, 0), + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_win1251ukr, 1, 0, new String[]{"Cp1251"}, 4, 0), + + new MysqlCharset(MYSQL_CHARSET_NAME_cp1256, 1, 0, new String[]{"Cp1256"}), + new MysqlCharset(MYSQL_CHARSET_NAME_cp1257, 1, 0, new String[]{"Cp1257"}), + new MysqlCharset(MYSQL_CHARSET_NAME_macroman, 1, 0, new String[]{"MacRoman"}), + new MysqlCharset(MYSQL_CHARSET_NAME_macce, 1, 0, new String[]{"MacCentralEurope"}), + + new MysqlCharset(MYSQL_CHARSET_NAME_utf8, 3, 1, new String[]{"UTF-8"}), + new MysqlCharset(MYSQL_CHARSET_NAME_utf8mb4, 4, 0, new String[]{"UTF-8"}), // "UTF-8 = *> 5.5.2 utf8mb4," + + new MysqlCharset(MYSQL_CHARSET_NAME_ucs2, 2, 0, new String[]{"UnicodeBig"}), + + new MysqlCharset(MYSQL_CHARSET_NAME_binary, 1, 1, new String[]{"ISO8859_1"}), // US-ASCII ? + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_latin1_de, 1, 0, new String[]{"ISO8859_1"}, 4, 0), + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_german1, 1, 0, new String[]{"ISO8859_1"}, 4, 0), + new MysqlCharset(MYSQL_4_0_CHARSET_NAME_danish, 1, 0, new String[]{"ISO8859_1"}, 4, 0), + + new MysqlCharset(MYSQL_CHARSET_NAME_utf16, 4, 0, new String[]{"UTF-16"}), + new MysqlCharset(MYSQL_CHARSET_NAME_utf16le, 4, 0, new String[]{"UTF-16LE"}), + new MysqlCharset(MYSQL_CHARSET_NAME_utf32, 4, 0, new String[]{"UTF-32"}) + }; + + HashMap charsetNameToMysqlCharsetMap = new HashMap(); + HashMap> javaUcToMysqlCharsetMap = new HashMap>(); + + for (int i = 0; i < charset.length; i++) { + String charsetName = charset[i].charsetName; + + charsetNameToMysqlCharsetMap.put(charsetName, charset[i]); + + for (String encUC : charset[i].javaEncodingsUc) { + + // fill javaUcToMysqlCharsetMap + List charsets = javaUcToMysqlCharsetMap.get(encUC); + if (charsets == null) { + charsets = new ArrayList(); + javaUcToMysqlCharsetMap.put(encUC, charsets); + } + charsets.add(charset[i]); + } + } + + CHARSET_NAME_TO_CHARSET = Collections.unmodifiableMap(charsetNameToMysqlCharsetMap); + JAVA_ENCODING_UC_TO_MYSQL_CHARSET = Collections.unmodifiableMap(javaUcToMysqlCharsetMap); + + // complete list of mysql collations and their corresponding character sets each element of collation[1]..collation[MAP_SIZE-1] must not be null + Collation[] collation = new Collation[MAP_SIZE]; + collation[1] = new Collation(1, "big5_chinese_ci", 1, MYSQL_CHARSET_NAME_big5); + collation[84] = new Collation(84, "big5_bin", 0, MYSQL_CHARSET_NAME_big5); + + collation[2] = new Collation(2, "latin2_czech_cs", 0, MYSQL_CHARSET_NAME_latin2); + collation[9] = new Collation(9, "latin2_general_ci", 1, MYSQL_CHARSET_NAME_latin2); + collation[21] = new Collation(21, "latin2_hungarian_ci", 0, MYSQL_CHARSET_NAME_latin2); + collation[27] = new Collation(27, "latin2_croatian_ci", 0, MYSQL_CHARSET_NAME_latin2); + collation[77] = new Collation(77, "latin2_bin", 0, MYSQL_CHARSET_NAME_latin2); + + collation[4] = new Collation(4, "cp850_general_ci", 1, MYSQL_CHARSET_NAME_cp850); + collation[80] = new Collation(80, "cp850_bin", 0, MYSQL_CHARSET_NAME_cp850); + + collation[5] = new Collation(5, "latin1_german1_ci", 1, MYSQL_CHARSET_NAME_latin1); + collation[8] = new Collation(8, "latin1_swedish_ci", 0, MYSQL_CHARSET_NAME_latin1); + collation[15] = new Collation(15, "latin1_danish_ci", 0, MYSQL_CHARSET_NAME_latin1); + collation[31] = new Collation(31, "latin1_german2_ci", 0, MYSQL_CHARSET_NAME_latin1); + collation[47] = new Collation(47, "latin1_bin", 0, MYSQL_CHARSET_NAME_latin1); + collation[48] = new Collation(48, "latin1_general_ci", 0, MYSQL_CHARSET_NAME_latin1); + collation[49] = new Collation(49, "latin1_general_cs", 0, MYSQL_CHARSET_NAME_latin1); + collation[76] = new Collation(76, "not_implemented", 0, NOT_USED); + collation[94] = new Collation(94, "latin1_spanish_ci", 0, MYSQL_CHARSET_NAME_latin1); + collation[100] = new Collation(100, "not_implemented", 0, NOT_USED); + collation[125] = new Collation(125, "not_implemented", 0, NOT_USED); + collation[126] = new Collation(126, "not_implemented", 0, NOT_USED); + collation[127] = new Collation(127, "not_implemented", 0, NOT_USED); + collation[152] = new Collation(152, "not_implemented", 0, NOT_USED); + collation[153] = new Collation(153, "not_implemented", 0, NOT_USED); + collation[154] = new Collation(154, "not_implemented", 0, NOT_USED); + collation[155] = new Collation(155, "not_implemented", 0, NOT_USED); + collation[156] = new Collation(156, "not_implemented", 0, NOT_USED); + collation[157] = new Collation(157, "not_implemented", 0, NOT_USED); + collation[158] = new Collation(158, "not_implemented", 0, NOT_USED); + collation[184] = new Collation(184, "not_implemented", 0, NOT_USED); + collation[185] = new Collation(185, "not_implemented", 0, NOT_USED); + collation[186] = new Collation(186, "not_implemented", 0, NOT_USED); + collation[187] = new Collation(187, "not_implemented", 0, NOT_USED); + collation[188] = new Collation(188, "not_implemented", 0, NOT_USED); + collation[189] = new Collation(189, "not_implemented", 0, NOT_USED); + collation[190] = new Collation(190, "not_implemented", 0, NOT_USED); + collation[191] = new Collation(191, "not_implemented", 0, NOT_USED); + collation[216] = new Collation(216, "not_implemented", 0, NOT_USED); + collation[217] = new Collation(217, "not_implemented", 0, NOT_USED); + collation[218] = new Collation(218, "not_implemented", 0, NOT_USED); + collation[219] = new Collation(219, "not_implemented", 0, NOT_USED); + collation[220] = new Collation(220, "not_implemented", 0, NOT_USED); + collation[221] = new Collation(221, "not_implemented", 0, NOT_USED); + collation[222] = new Collation(222, "not_implemented", 0, NOT_USED); + collation[248] = new Collation(248, "gb18030_chinese_ci", 1, MYSQL_CHARSET_NAME_gb18030); + collation[249] = new Collation(249, "gb18030_bin", 0, MYSQL_CHARSET_NAME_gb18030); + collation[250] = new Collation(250, "gb18030_unicode_520_ci", 0, MYSQL_CHARSET_NAME_gb18030); + collation[251] = new Collation(251, "not_implemented", 0, NOT_USED); + collation[252] = new Collation(252, "not_implemented", 0, NOT_USED); + collation[253] = new Collation(253, "not_implemented", 0, NOT_USED); + collation[254] = new Collation(254, "not_implemented", 0, NOT_USED); + collation[10] = new Collation(10, "swe7_swedish_ci", 0, MYSQL_CHARSET_NAME_swe7); + collation[82] = new Collation(82, "swe7_bin", 0, MYSQL_CHARSET_NAME_swe7); + collation[6] = new Collation(6, "hp8_english_ci", 0, MYSQL_CHARSET_NAME_hp8); + collation[72] = new Collation(72, "hp8_bin", 0, MYSQL_CHARSET_NAME_hp8); + collation[3] = new Collation(3, "dec8_swedish_ci", 0, MYSQL_CHARSET_NAME_dec8); + collation[69] = new Collation(69, "dec8_bin", 0, MYSQL_CHARSET_NAME_dec8); + collation[32] = new Collation(32, "armscii8_general_ci", 0, MYSQL_CHARSET_NAME_armscii8); + collation[64] = new Collation(64, "armscii8_bin", 0, MYSQL_CHARSET_NAME_armscii8); + collation[92] = new Collation(92, "geostd8_general_ci", 0, MYSQL_CHARSET_NAME_geostd8); + collation[93] = new Collation(93, "geostd8_bin", 0, MYSQL_CHARSET_NAME_geostd8); + + collation[7] = new Collation(7, "koi8r_general_ci", 0, MYSQL_CHARSET_NAME_koi8r); + collation[74] = new Collation(74, "koi8r_bin", 0, MYSQL_CHARSET_NAME_koi8r); + + collation[11] = new Collation(11, "ascii_general_ci", 0, MYSQL_CHARSET_NAME_ascii); + collation[65] = new Collation(65, "ascii_bin", 0, MYSQL_CHARSET_NAME_ascii); + + collation[12] = new Collation(12, "ujis_japanese_ci", 0, MYSQL_CHARSET_NAME_ujis); + collation[91] = new Collation(91, "ujis_bin", 0, MYSQL_CHARSET_NAME_ujis); + + collation[13] = new Collation(13, "sjis_japanese_ci", 0, MYSQL_CHARSET_NAME_sjis); + collation[14] = new Collation(14, "cp1251_bulgarian_ci", 0, MYSQL_CHARSET_NAME_cp1251); + collation[16] = new Collation(16, "hebrew_general_ci", 0, MYSQL_CHARSET_NAME_hebrew); + collation[17] = new Collation(17, "latin1_german1_ci", 0, MYSQL_4_0_CHARSET_NAME_win1251); // removed since 4.1 + collation[18] = new Collation(18, "tis620_thai_ci", 0, MYSQL_CHARSET_NAME_tis620); + collation[19] = new Collation(19, "euckr_korean_ci", 0, MYSQL_CHARSET_NAME_euckr); + collation[20] = new Collation(20, "latin7_estonian_cs", 0, MYSQL_CHARSET_NAME_latin7); + collation[22] = new Collation(22, "koi8u_general_ci", 0, MYSQL_CHARSET_NAME_koi8u); + collation[23] = new Collation(23, "cp1251_ukrainian_ci", 0, MYSQL_CHARSET_NAME_cp1251); + collation[24] = new Collation(24, "gb2312_chinese_ci", 0, MYSQL_CHARSET_NAME_gb2312); + collation[25] = new Collation(25, "greek_general_ci", 0, MYSQL_CHARSET_NAME_greek); + collation[26] = new Collation(26, "cp1250_general_ci", 1, MYSQL_CHARSET_NAME_cp1250); + collation[28] = new Collation(28, "gbk_chinese_ci", 1, MYSQL_CHARSET_NAME_gbk); + collation[29] = new Collation(29, "cp1257_lithuanian_ci", 0, MYSQL_CHARSET_NAME_cp1257); + collation[30] = new Collation(30, "latin5_turkish_ci", 1, MYSQL_CHARSET_NAME_latin5); + collation[33] = new Collation(33, "utf8_general_ci", 1, MYSQL_CHARSET_NAME_utf8); + collation[34] = new Collation(34, "cp1250_czech_cs", 0, MYSQL_CHARSET_NAME_cp1250); + collation[35] = new Collation(35, "ucs2_general_ci", 1, MYSQL_CHARSET_NAME_ucs2); + collation[36] = new Collation(36, "cp866_general_ci", 1, MYSQL_CHARSET_NAME_cp866); + collation[37] = new Collation(37, "keybcs2_general_ci", 1, MYSQL_CHARSET_NAME_keybcs2); + collation[38] = new Collation(38, "macce_general_ci", 1, MYSQL_CHARSET_NAME_macce); + collation[39] = new Collation(39, "macroman_general_ci", 1, MYSQL_CHARSET_NAME_macroman); + collation[40] = new Collation(40, "cp852_general_ci", 1, MYSQL_CHARSET_NAME_cp852); + collation[41] = new Collation(41, "latin7_general_ci", 1, MYSQL_CHARSET_NAME_latin7); + collation[42] = new Collation(42, "latin7_general_cs", 0, MYSQL_CHARSET_NAME_latin7); + collation[43] = new Collation(43, "macce_bin", 0, MYSQL_CHARSET_NAME_macce); + collation[44] = new Collation(44, "cp1250_croatian_ci", 0, MYSQL_CHARSET_NAME_cp1250); + collation[45] = new Collation(45, "utf8mb4_general_ci", 1, MYSQL_CHARSET_NAME_utf8mb4); + collation[46] = new Collation(46, "utf8mb4_bin", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[50] = new Collation(50, "cp1251_bin", 0, MYSQL_CHARSET_NAME_cp1251); + collation[51] = new Collation(51, "cp1251_general_ci", 1, MYSQL_CHARSET_NAME_cp1251); + collation[52] = new Collation(52, "cp1251_general_cs", 0, MYSQL_CHARSET_NAME_cp1251); + collation[53] = new Collation(53, "macroman_bin", 0, MYSQL_CHARSET_NAME_macroman); + collation[54] = new Collation(54, "utf16_general_ci", 1, MYSQL_CHARSET_NAME_utf16); + collation[55] = new Collation(55, "utf16_bin", 0, MYSQL_CHARSET_NAME_utf16); + collation[56] = new Collation(56, "utf16le_general_ci", 1, MYSQL_CHARSET_NAME_utf16le); + collation[57] = new Collation(57, "cp1256_general_ci", 1, MYSQL_CHARSET_NAME_cp1256); + collation[58] = new Collation(58, "cp1257_bin", 0, MYSQL_CHARSET_NAME_cp1257); + collation[59] = new Collation(59, "cp1257_general_ci", 1, MYSQL_CHARSET_NAME_cp1257); + collation[60] = new Collation(60, "utf32_general_ci", 1, MYSQL_CHARSET_NAME_utf32); + collation[61] = new Collation(61, "utf32_bin", 0, MYSQL_CHARSET_NAME_utf32); + collation[62] = new Collation(62, "utf16le_bin", 0, MYSQL_CHARSET_NAME_utf16le); + collation[63] = new Collation(63, "binary", 1, MYSQL_CHARSET_NAME_binary); + collation[66] = new Collation(66, "cp1250_bin", 0, MYSQL_CHARSET_NAME_cp1250); + collation[67] = new Collation(67, "cp1256_bin", 0, MYSQL_CHARSET_NAME_cp1256); + collation[68] = new Collation(68, "cp866_bin", 0, MYSQL_CHARSET_NAME_cp866); + collation[70] = new Collation(70, "greek_bin", 0, MYSQL_CHARSET_NAME_greek); + collation[71] = new Collation(71, "hebrew_bin", 0, MYSQL_CHARSET_NAME_hebrew); + collation[73] = new Collation(73, "keybcs2_bin", 0, MYSQL_CHARSET_NAME_keybcs2); + collation[75] = new Collation(75, "koi8u_bin", 0, MYSQL_CHARSET_NAME_koi8u); + collation[78] = new Collation(78, "latin5_bin", 0, MYSQL_CHARSET_NAME_latin5); + collation[79] = new Collation(79, "latin7_bin", 0, MYSQL_CHARSET_NAME_latin7); + collation[81] = new Collation(81, "cp852_bin", 0, MYSQL_CHARSET_NAME_cp852); + collation[83] = new Collation(83, "utf8_bin", 0, MYSQL_CHARSET_NAME_utf8); + collation[85] = new Collation(85, "euckr_bin", 0, MYSQL_CHARSET_NAME_euckr); + collation[86] = new Collation(86, "gb2312_bin", 0, MYSQL_CHARSET_NAME_gb2312); + collation[87] = new Collation(87, "gbk_bin", 0, MYSQL_CHARSET_NAME_gbk); + collation[88] = new Collation(88, "sjis_bin", 0, MYSQL_CHARSET_NAME_sjis); + collation[89] = new Collation(89, "tis620_bin", 0, MYSQL_CHARSET_NAME_tis620); + collation[90] = new Collation(90, "ucs2_bin", 0, MYSQL_CHARSET_NAME_ucs2); + collation[95] = new Collation(95, "cp932_japanese_ci", 1, MYSQL_CHARSET_NAME_cp932); + collation[96] = new Collation(96, "cp932_bin", 0, MYSQL_CHARSET_NAME_cp932); + collation[97] = new Collation(97, "eucjpms_japanese_ci", 1, MYSQL_CHARSET_NAME_eucjpms); + collation[98] = new Collation(98, "eucjpms_bin", 0, MYSQL_CHARSET_NAME_eucjpms); + collation[99] = new Collation(99, "cp1250_polish_ci", 0, MYSQL_CHARSET_NAME_cp1250); + collation[101] = new Collation(101, "utf16_unicode_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[102] = new Collation(102, "utf16_icelandic_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[103] = new Collation(103, "utf16_latvian_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[104] = new Collation(104, "utf16_romanian_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[105] = new Collation(105, "utf16_slovenian_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[106] = new Collation(106, "utf16_polish_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[107] = new Collation(107, "utf16_estonian_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[108] = new Collation(108, "utf16_spanish_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[109] = new Collation(109, "utf16_swedish_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[110] = new Collation(110, "utf16_turkish_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[111] = new Collation(111, "utf16_czech_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[112] = new Collation(112, "utf16_danish_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[113] = new Collation(113, "utf16_lithuanian_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[114] = new Collation(114, "utf16_slovak_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[115] = new Collation(115, "utf16_spanish2_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[116] = new Collation(116, "utf16_roman_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[117] = new Collation(117, "utf16_persian_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[118] = new Collation(118, "utf16_esperanto_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[119] = new Collation(119, "utf16_hungarian_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[120] = new Collation(120, "utf16_sinhala_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[121] = new Collation(121, "utf16_german2_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[122] = new Collation(122, "utf16_croatian_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[123] = new Collation(123, "utf16_unicode_520_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[124] = new Collation(124, "utf16_vietnamese_ci", 0, MYSQL_CHARSET_NAME_utf16); + collation[128] = new Collation(128, "ucs2_unicode_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[129] = new Collation(129, "ucs2_icelandic_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[130] = new Collation(130, "ucs2_latvian_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[131] = new Collation(131, "ucs2_romanian_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[132] = new Collation(132, "ucs2_slovenian_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[133] = new Collation(133, "ucs2_polish_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[134] = new Collation(134, "ucs2_estonian_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[135] = new Collation(135, "ucs2_spanish_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[136] = new Collation(136, "ucs2_swedish_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[137] = new Collation(137, "ucs2_turkish_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[138] = new Collation(138, "ucs2_czech_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[139] = new Collation(139, "ucs2_danish_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[140] = new Collation(140, "ucs2_lithuanian_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[141] = new Collation(141, "ucs2_slovak_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[142] = new Collation(142, "ucs2_spanish2_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[143] = new Collation(143, "ucs2_roman_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[144] = new Collation(144, "ucs2_persian_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[145] = new Collation(145, "ucs2_esperanto_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[146] = new Collation(146, "ucs2_hungarian_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[147] = new Collation(147, "ucs2_sinhala_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[148] = new Collation(148, "ucs2_german2_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[149] = new Collation(149, "ucs2_croatian_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[150] = new Collation(150, "ucs2_unicode_520_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[151] = new Collation(151, "ucs2_vietnamese_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[159] = new Collation(159, "ucs2_general_mysql500_ci", 0, MYSQL_CHARSET_NAME_ucs2); + collation[160] = new Collation(160, "utf32_unicode_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[161] = new Collation(161, "utf32_icelandic_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[162] = new Collation(162, "utf32_latvian_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[163] = new Collation(163, "utf32_romanian_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[164] = new Collation(164, "utf32_slovenian_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[165] = new Collation(165, "utf32_polish_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[166] = new Collation(166, "utf32_estonian_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[167] = new Collation(167, "utf32_spanish_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[168] = new Collation(168, "utf32_swedish_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[169] = new Collation(169, "utf32_turkish_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[170] = new Collation(170, "utf32_czech_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[171] = new Collation(171, "utf32_danish_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[172] = new Collation(172, "utf32_lithuanian_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[173] = new Collation(173, "utf32_slovak_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[174] = new Collation(174, "utf32_spanish2_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[175] = new Collation(175, "utf32_roman_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[176] = new Collation(176, "utf32_persian_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[177] = new Collation(177, "utf32_esperanto_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[178] = new Collation(178, "utf32_hungarian_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[179] = new Collation(179, "utf32_sinhala_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[180] = new Collation(180, "utf32_german2_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[181] = new Collation(181, "utf32_croatian_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[182] = new Collation(182, "utf32_unicode_520_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[183] = new Collation(183, "utf32_vietnamese_ci", 0, MYSQL_CHARSET_NAME_utf32); + collation[192] = new Collation(192, "utf8_unicode_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[193] = new Collation(193, "utf8_icelandic_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[194] = new Collation(194, "utf8_latvian_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[195] = new Collation(195, "utf8_romanian_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[196] = new Collation(196, "utf8_slovenian_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[197] = new Collation(197, "utf8_polish_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[198] = new Collation(198, "utf8_estonian_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[199] = new Collation(199, "utf8_spanish_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[200] = new Collation(200, "utf8_swedish_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[201] = new Collation(201, "utf8_turkish_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[202] = new Collation(202, "utf8_czech_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[203] = new Collation(203, "utf8_danish_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[204] = new Collation(204, "utf8_lithuanian_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[205] = new Collation(205, "utf8_slovak_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[206] = new Collation(206, "utf8_spanish2_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[207] = new Collation(207, "utf8_roman_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[208] = new Collation(208, "utf8_persian_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[209] = new Collation(209, "utf8_esperanto_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[210] = new Collation(210, "utf8_hungarian_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[211] = new Collation(211, "utf8_sinhala_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[212] = new Collation(212, "utf8_german2_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[213] = new Collation(213, "utf8_croatian_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[214] = new Collation(214, "utf8_unicode_520_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[215] = new Collation(215, "utf8_vietnamese_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[223] = new Collation(223, "utf8_general_mysql500_ci", 0, MYSQL_CHARSET_NAME_utf8); + collation[224] = new Collation(224, "utf8mb4_unicode_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[225] = new Collation(225, "utf8mb4_icelandic_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[226] = new Collation(226, "utf8mb4_latvian_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[227] = new Collation(227, "utf8mb4_romanian_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[228] = new Collation(228, "utf8mb4_slovenian_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[229] = new Collation(229, "utf8mb4_polish_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[230] = new Collation(230, "utf8mb4_estonian_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[231] = new Collation(231, "utf8mb4_spanish_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[232] = new Collation(232, "utf8mb4_swedish_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[233] = new Collation(233, "utf8mb4_turkish_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[234] = new Collation(234, "utf8mb4_czech_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[235] = new Collation(235, "utf8mb4_danish_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[236] = new Collation(236, "utf8mb4_lithuanian_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[237] = new Collation(237, "utf8mb4_slovak_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[238] = new Collation(238, "utf8mb4_spanish2_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[239] = new Collation(239, "utf8mb4_roman_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[240] = new Collation(240, "utf8mb4_persian_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[241] = new Collation(241, "utf8mb4_esperanto_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[242] = new Collation(242, "utf8mb4_hungarian_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[243] = new Collation(243, "utf8mb4_sinhala_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[244] = new Collation(244, "utf8mb4_german2_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[245] = new Collation(245, "utf8mb4_croatian_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[246] = new Collation(246, "utf8mb4_unicode_520_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + collation[247] = new Collation(247, "utf8mb4_vietnamese_ci", 0, MYSQL_CHARSET_NAME_utf8mb4); + + COLLATION_INDEX_TO_COLLATION_NAME = new String[MAP_SIZE]; + COLLATION_INDEX_TO_CHARSET = new MysqlCharset[MAP_SIZE]; + + // Add all collations to lookup maps for easy indexing + for (int i = 1; i < MAP_SIZE; i++) { + COLLATION_INDEX_TO_COLLATION_NAME[i] = collation[i].collationName; + COLLATION_INDEX_TO_CHARSET[i] = collation[i].mysqlCharset; + } + + // Sanity check + for (int i = 1; i < MAP_SIZE; i++) { + if (COLLATION_INDEX_TO_COLLATION_NAME[i] == null) { + throw new RuntimeException("Assertion failure: No mapping from charset index " + i + " to a mysql collation"); + } + if (COLLATION_INDEX_TO_COLLATION_NAME[i] == null) { + throw new RuntimeException("Assertion failure: No mapping from charset index " + i + " to a Java character set"); + } + } + } + + /** + * MySQL charset could map to several Java encodings. + * So here we choose the one according to next rules: + *

  • if there is no static mapping for this charset then return javaEncoding value as is because this + * could be a custom charset for example + *
  • if static mapping exists and javaEncoding equals to one of Java encoding canonical names or aliases available + * for this mapping then javaEncoding value as is; this is required when result should match to connection encoding, for example if connection encoding is + * Cp943 we must avoid getting SHIFT_JIS for sjis mysql charset + *
  • if static mapping exists and javaEncoding doesn't match any Java encoding canonical + * names or aliases available for this mapping then return default Java encoding (the first in mapping list) + * + * @param collationIndex + * @param javaEncoding + */ + public static String getJavaEncodingForCollationIndex(Integer collationIndex, String javaEncoding) { + String res = javaEncoding; + if (collationIndex != null && collationIndex > 0 && collationIndex < MAP_SIZE) { + MysqlCharset cs = COLLATION_INDEX_TO_CHARSET[collationIndex]; + if (cs != null) { + res = cs.getMatchingJavaEncoding(javaEncoding); + } + } + return res; + } + + public static String getMysqlCharsetNameForCollationIndex(Integer collationIndex) { + if (collationIndex != null && collationIndex > 0 && collationIndex < MAP_SIZE) { + return COLLATION_INDEX_TO_CHARSET[collationIndex].charsetName; + } + return null; + } + + public static String getMysqlCharsetForJavaEncoding(String javaEncoding) { + if (javaEncoding != null) { + List mysqlCharsets = CharsetMapping.JAVA_ENCODING_UC_TO_MYSQL_CHARSET.get(javaEncoding.toUpperCase(Locale.ENGLISH)); + + if (mysqlCharsets != null && !mysqlCharsets.isEmpty()) { + return mysqlCharsets.get(0).charsetName; + } + } + + return null; + } + + public static int getMblen(String charsetName) { + if (charsetName != null) { + MysqlCharset cs = CHARSET_NAME_TO_CHARSET.get(charsetName); + if (cs != null) { + return cs.mblen; + } + } + return 0; + } +} + diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/util/charset/Collation.java b/java/jdbc/src/main/java/com/flipkart/vitess/util/charset/Collation.java new file mode 100644 index 00000000000..561f314f417 --- /dev/null +++ b/java/jdbc/src/main/java/com/flipkart/vitess/util/charset/Collation.java @@ -0,0 +1,33 @@ +package com.flipkart.vitess.util.charset; + +/** + * These classes were pulled from mysql-connector-java and simplified to just the parts supporting the statically available + * charsets + */ +class Collation { + public final int index; + public final String collationName; + public final int priority; + public final MysqlCharset mysqlCharset; + + public Collation(int index, String collationName, int priority, String charsetName) { + this.index = index; + this.collationName = collationName; + this.priority = priority; + this.mysqlCharset = CharsetMapping.CHARSET_NAME_TO_CHARSET.get(charsetName); + } + + @Override + public String toString() { + return "[" + + "index=" + + this.index + + ",collationName=" + + this.collationName + + ",charsetName=" + + this.mysqlCharset.charsetName + + ",javaCharsetName=" + + this.mysqlCharset.getMatchingJavaEncoding(null) + + "]"; + } +} diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/util/charset/MysqlCharset.java b/java/jdbc/src/main/java/com/flipkart/vitess/util/charset/MysqlCharset.java new file mode 100644 index 00000000000..d172de55713 --- /dev/null +++ b/java/jdbc/src/main/java/com/flipkart/vitess/util/charset/MysqlCharset.java @@ -0,0 +1,118 @@ +package com.flipkart.vitess.util.charset; + +import java.nio.charset.Charset; +import java.sql.SQLException; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Locale; +import java.util.Set; + +/** + * These classes were pulled from mysql-connector-java and simplified to just the parts supporting the statically available + * charsets + */ +class MysqlCharset { + public final String charsetName; + public final int mblen; + public final int priority; + public final Set javaEncodingsUc = new HashSet<>(); + private String defaultEncoding = null; + + public int major = 4; + public int minor = 1; + public int subminor = 0; + + /** + * Constructs MysqlCharset object + * + * @param charsetName MySQL charset name + * @param mblen Max number of bytes per character + * @param priority MysqlCharset with highest lever of this param will be used for Java encoding --> Mysql charsets conversion. + * @param javaEncodings List of Java encodings corresponding to this MySQL charset; the first name in list is the default for mysql --> java data conversion + */ + public MysqlCharset(String charsetName, int mblen, int priority, String[] javaEncodings) { + this.charsetName = charsetName; + this.mblen = mblen; + this.priority = priority; + + for (int i = 0; i < javaEncodings.length; i++) { + String encoding = javaEncodings[i]; + try { + Charset cs = Charset.forName(encoding); + addEncodingMapping(cs.name()); + + Set als = cs.aliases(); + Iterator ali = als.iterator(); + while (ali.hasNext()) { + addEncodingMapping(ali.next()); + } + } catch (Exception e) { + // if there is no support of this charset in JVM it's still possible to use our converter for 1-byte charsets + if (mblen == 1) { + addEncodingMapping(encoding); + } + } + } + + if (this.javaEncodingsUc.size() == 0) { + if (mblen > 1) { + addEncodingMapping("UTF-8"); + } else { + addEncodingMapping("Cp1252"); + } + } + } + + private void addEncodingMapping(String encoding) { + String encodingUc = encoding.toUpperCase(Locale.ENGLISH); + + if (this.defaultEncoding == null) { + this.defaultEncoding = encodingUc; + } + + if (!this.javaEncodingsUc.contains(encodingUc)) { + this.javaEncodingsUc.add(encodingUc); + } + } + + public MysqlCharset(String charsetName, int mblen, int priority, String[] javaEncodings, int major, int minor) { + this(charsetName, mblen, priority, javaEncodings); + this.major = major; + this.minor = minor; + } + + public MysqlCharset(String charsetName, int mblen, int priority, String[] javaEncodings, int major, int minor, int subminor) { + this(charsetName, mblen, priority, javaEncodings); + this.major = major; + this.minor = minor; + this.subminor = subminor; + } + + @Override + public String toString() { + StringBuilder asString = new StringBuilder(); + asString.append("["); + asString.append("charsetName="); + asString.append(this.charsetName); + asString.append(",mblen="); + asString.append(this.mblen); + // asString.append(",javaEncoding="); + // asString.append(this.javaEncodings.toString()); + asString.append("]"); + return asString.toString(); + } + + /** + * If javaEncoding parameter value is one of available java encodings for this charset + * then returns javaEncoding value as is. Otherwise returns first available java encoding name. + * + * @param javaEncoding + * @throws SQLException + */ + String getMatchingJavaEncoding(String javaEncoding) { + if (javaEncoding != null && this.javaEncodingsUc.contains(javaEncoding.toUpperCase(Locale.ENGLISH))) { + return javaEncoding; + } + return this.defaultEncoding; + } +} diff --git a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/ConnectionPropertiesTest.java b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/ConnectionPropertiesTest.java index 4710193c8b4..57000bd32ad 100644 --- a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/ConnectionPropertiesTest.java +++ b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/ConnectionPropertiesTest.java @@ -15,7 +15,7 @@ public class ConnectionPropertiesTest { - private static final int NUM_PROPS = 6; + private static final int NUM_PROPS = 12; @Test public void testReflection() throws Exception { @@ -36,8 +36,14 @@ public void testDefaults() throws SQLException { ConnectionProperties props = new ConnectionProperties(); props.initializeProperties(new Properties()); + Assert.assertEquals("blobsAreStrings", false, props.getBlobsAreStrings()); + Assert.assertEquals("functionsNeverReturnBlobs", false, props.getFunctionsNeverReturnBlobs()); Assert.assertEquals("tinyInt1isBit", true, props.getTinyInt1isBit()); Assert.assertEquals("yearIsDateType", true, props.getYearIsDateType()); + Assert.assertEquals("useBlobToStoreUTF8OutsideBMP", false, props.getUseBlobToStoreUTF8OutsideBMP()); + Assert.assertEquals("utf8OutsideBmpIncludedColumnNamePattern", null, props.getUtf8OutsideBmpIncludedColumnNamePattern()); + Assert.assertEquals("utf8OutsideBmpExcludedColumnNamePattern", null, props.getUtf8OutsideBmpExcludedColumnNamePattern()); + Assert.assertEquals("characterEncoding", null, props.getEncoding()); Assert.assertEquals("executeType", Constants.DEFAULT_EXECUTE_TYPE, props.getExecuteType()); Assert.assertEquals("twopcEnabled", false, props.getTwopcEnabled()); Assert.assertEquals("includedFields", Constants.DEFAULT_INCLUDED_FIELDS, props.getIncludedFields()); @@ -50,8 +56,14 @@ public void testInitializeFromProperties() throws SQLException { ConnectionProperties props = new ConnectionProperties(); Properties info = new Properties(); + info.setProperty("blobsAreStrings", "yes"); + info.setProperty("functionsNeverReturnBlobs", "yes"); info.setProperty("tinyInt1isBit", "yes"); info.setProperty("yearIsDateType", "yes"); + info.setProperty("useBlobToStoreUTF8OutsideBMP", "yes"); + info.setProperty("utf8OutsideBmpIncludedColumnNamePattern", "(foo|bar)?baz"); + info.setProperty("utf8OutsideBmpExcludedColumnNamePattern", "(foo|bar)?baz"); + info.setProperty("characterEncoding", "utf-8"); info.setProperty("executeType", Constants.QueryExecuteType.STREAM.name()); info.setProperty("twopcEnabled", "yes"); info.setProperty("includedFields", Query.ExecuteOptions.IncludedFields.TYPE_ONLY.name()); @@ -59,8 +71,14 @@ public void testInitializeFromProperties() throws SQLException { props.initializeProperties(info); + Assert.assertEquals("blobsAreStrings", true, props.getBlobsAreStrings()); + Assert.assertEquals("functionsNeverReturnBlobs", true, props.getFunctionsNeverReturnBlobs()); Assert.assertEquals("tinyInt1isBit", true, props.getTinyInt1isBit()); Assert.assertEquals("yearIsDateType", true, props.getYearIsDateType()); + Assert.assertEquals("useBlobToStoreUTF8OutsideBMP", true, props.getUseBlobToStoreUTF8OutsideBMP()); + Assert.assertEquals("utf8OutsideBmpIncludedColumnNamePattern", "(foo|bar)?baz", props.getUtf8OutsideBmpIncludedColumnNamePattern()); + Assert.assertEquals("utf8OutsideBmpExcludedColumnNamePattern", "(foo|bar)?baz", props.getUtf8OutsideBmpExcludedColumnNamePattern()); + Assert.assertEquals("characterEncoding", "utf-8", props.getEncoding()); Assert.assertEquals("executeType", Constants.QueryExecuteType.STREAM, props.getExecuteType()); Assert.assertEquals("twopcEnabled", true, props.getTwopcEnabled()); Assert.assertEquals("includedFields", Query.ExecuteOptions.IncludedFields.TYPE_ONLY, props.getIncludedFields()); @@ -68,6 +86,22 @@ public void testInitializeFromProperties() throws SQLException { Assert.assertEquals("tabletType", Topodata.TabletType.BACKUP, props.getTabletType()); } + @Test(expected = SQLException.class) + public void testEncodingValidation() throws SQLException { + ConnectionProperties props = new ConnectionProperties(); + Properties info = new Properties(); + + String fakeEncoding = "utf-12345"; + info.setProperty("characterEncoding", fakeEncoding); + try { + props.initializeProperties(info); + Assert.fail("should have failed to parse encoding " + fakeEncoding); + } catch (SQLException e) { + Assert.assertEquals("Unsupported character encoding: " + fakeEncoding, e.getMessage()); + throw e; + } + } + @Test public void testDriverPropertiesOutput() throws SQLException { Properties info = new Properties(); @@ -75,7 +109,7 @@ public void testDriverPropertiesOutput() throws SQLException { Assert.assertEquals(NUM_PROPS, infos.length); // Test the expected fields for just 1 - int indexForFullTest = 2; + int indexForFullTest = 8; Assert.assertEquals("executeType", infos[indexForFullTest].name); Assert.assertEquals("Query execution type: simple or stream", infos[indexForFullTest].description); @@ -88,11 +122,16 @@ public void testDriverPropertiesOutput() throws SQLException { Assert.assertArrayEquals(allowed, infos[indexForFullTest].choices); // Test that name exists for the others, as a sanity check - Assert.assertEquals("tinyInt1isBit", infos[0].name); - Assert.assertEquals("yearIsDateType", infos[1].name); - Assert.assertEquals(Constants.Property.TWOPC_ENABLED, infos[3].name); - Assert.assertEquals(Constants.Property.INCLUDED_FIELDS, infos[4].name); - Assert.assertEquals(Constants.Property.TABLET_TYPE, infos[5].name); + Assert.assertEquals("functionsNeverReturnBlobs", infos[1].name); + Assert.assertEquals("tinyInt1isBit", infos[2].name); + Assert.assertEquals("yearIsDateType", infos[3].name); + Assert.assertEquals("useBlobToStoreUTF8OutsideBMP", infos[4].name); + Assert.assertEquals("utf8OutsideBmpIncludedColumnNamePattern", infos[5].name); + Assert.assertEquals("utf8OutsideBmpExcludedColumnNamePattern", infos[6].name); + Assert.assertEquals("characterEncoding", infos[7].name); + Assert.assertEquals(Constants.Property.TWOPC_ENABLED, infos[9].name); + Assert.assertEquals(Constants.Property.INCLUDED_FIELDS, infos[10].name); + Assert.assertEquals(Constants.Property.TABLET_TYPE, infos[11].name); } @Test @@ -100,15 +139,11 @@ public void testValidBooleanValues() throws SQLException { ConnectionProperties props = new ConnectionProperties(); Properties info = new Properties(); - info.setProperty(Constants.Property.TWOPC_ENABLED, "true"); - props.initializeProperties(info); - Assert.assertEquals(true, props.getTwopcEnabled()); - info.setProperty(Constants.Property.TWOPC_ENABLED, "yes"); - props.initializeProperties(info); - Assert.assertEquals(true, props.getTwopcEnabled()); - info.setProperty(Constants.Property.TWOPC_ENABLED, "no"); + info.setProperty("blobsAreStrings", "true"); + info.setProperty("functionsNeverReturnBlobs", "yes"); + info.setProperty("tinyInt1isBit", "no"); + props.initializeProperties(info); - Assert.assertEquals(false, props.getTwopcEnabled()); info.setProperty(Constants.Property.TWOPC_ENABLED, "false-ish"); try { diff --git a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/FieldWithMetadataTest.java b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/FieldWithMetadataTest.java index 79aecf30279..4667b5445e5 100644 --- a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/FieldWithMetadataTest.java +++ b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/FieldWithMetadataTest.java @@ -1,10 +1,14 @@ package com.flipkart.vitess.jdbc; +import com.flipkart.vitess.util.MysqlDefs; +import com.flipkart.vitess.util.charset.CharsetMapping; import com.youtube.vitess.proto.Query; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.internal.verification.VerificationModeFactory; +import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; @@ -15,6 +19,101 @@ @RunWith(PowerMockRunner.class) public class FieldWithMetadataTest extends BaseTest { + @Test + public void testImplicitTempTable() throws SQLException { + Query.Field raw = Query.Field.newBuilder() + .setTable("#sql_my_temptable") + .setCharset(CharsetMapping.MYSQL_COLLATION_INDEX_binary) + .setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE) + .setType(Query.Type.VARCHAR) + .setName("foo") + .build(); + + FieldWithMetadata fieldWithMetadata = new FieldWithMetadata(getVitessConnection(), raw); + + Assert.assertEquals(true, fieldWithMetadata.isImplicitTemporaryTable()); + Assert.assertEquals(false, fieldWithMetadata.isOpaqueBinary()); + + VitessConnection conn = getVitessConnection(); + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + + raw = Query.Field.newBuilder() + .setType(Query.Type.VARCHAR) + .setName("foo") + .build(); + + fieldWithMetadata = new FieldWithMetadata(conn, raw); + + Assert.assertEquals(false, fieldWithMetadata.isImplicitTemporaryTable()); + Assert.assertEquals(false, fieldWithMetadata.isOpaqueBinary()); + } + + @Test + public void testBlobRemapping() throws SQLException { + VitessConnection conn = getVitessConnection(); + conn.setBlobsAreStrings(true); + + Query.Field raw = Query.Field.newBuilder() + .setTable("#sql_my_temptable") + .setCharset(/* latin1, doesn't matter just dont want utf8 for now */ 5) + .setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE) + .setType(Query.Type.BLOB) + .setName("foo") + .setOrgName("foo") + .build(); + + FieldWithMetadata fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.VARCHAR, fieldWithMetadata.getJavaType()); + + conn.setBlobsAreStrings(false); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.LONGVARCHAR, fieldWithMetadata.getJavaType()); + + conn.setFunctionsNeverReturnBlobs(true); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.VARCHAR, fieldWithMetadata.getJavaType()); + + conn.setFunctionsNeverReturnBlobs(false); + conn.setUseBlobToStoreUTF8OutsideBMP(true); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.LONGVARCHAR, fieldWithMetadata.getJavaType()); + + raw = raw.toBuilder() + .setCharset(CharsetMapping.MYSQL_COLLATION_INDEX_binary) + .setColumnLength(MysqlDefs.LENGTH_BLOB) + .build(); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.VARCHAR, fieldWithMetadata.getJavaType()); + Assert.assertEquals("utf8_general_ci", fieldWithMetadata.getCollation()); + + conn.setUtf8OutsideBmpExcludedColumnNamePattern("^fo.*$"); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.LONGVARBINARY, fieldWithMetadata.getJavaType()); + Assert.assertNotEquals("utf8_general_ci", fieldWithMetadata.getCollation()); + + conn.setUtf8OutsideBmpIncludedColumnNamePattern("^foo$"); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.VARCHAR, fieldWithMetadata.getJavaType()); + Assert.assertEquals("utf8_general_ci", fieldWithMetadata.getCollation()); + + raw = raw.toBuilder() + .setColumnLength(MysqlDefs.LENGTH_LONGBLOB) + .build(); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.LONGVARCHAR, fieldWithMetadata.getJavaType()); + Assert.assertEquals("utf8_general_ci", fieldWithMetadata.getCollation()); + + conn.setUseBlobToStoreUTF8OutsideBMP(false); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.LONGVARBINARY, fieldWithMetadata.getJavaType()); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.BLOB, fieldWithMetadata.getJavaType()); + Assert.assertEquals(null, fieldWithMetadata.getEncoding()); + Assert.assertEquals(null, fieldWithMetadata.getCollation()); + } + @Test public void testTinyIntAsBit() throws SQLException { VitessConnection conn = getVitessConnection(); @@ -40,6 +139,149 @@ public void testTinyIntAsBit() throws SQLException { Assert.assertEquals(Types.TINYINT, fieldWithMetadata.getJavaType()); } + @Test + public void testNonNumericNotDateTimeRemapping() throws SQLException { + VitessConnection conn = getVitessConnection(); + + Query.Field raw = Query.Field.newBuilder() + .setTable("foo") + .setColumnLength(3) + .setType(Query.Type.VARBINARY) + .setName("foo") + .setOrgName("foo") + .setCharset(/* utf-16 UnicodeBig */35) + .build(); + + FieldWithMetadata fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(/* remapped by TEXT special case */Types.VARCHAR, fieldWithMetadata.getJavaType()); + Assert.assertEquals("UTF-16", fieldWithMetadata.getEncoding()); + Assert.assertEquals(false, fieldWithMetadata.isSingleBit()); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.VARBINARY, fieldWithMetadata.getJavaType()); + Assert.assertEquals(null, fieldWithMetadata.getEncoding()); + Assert.assertEquals(false, fieldWithMetadata.isSingleBit()); + + conn = getVitessConnection(); + raw = raw.toBuilder() + .setType(Query.Type.JSON) + .setColumnLength(MysqlDefs.LENGTH_LONGBLOB) + .setCharset(CharsetMapping.MYSQL_COLLATION_INDEX_binary) + .build(); + + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.BINARY, fieldWithMetadata.getJavaType()); + Assert.assertEquals("UTF-8", fieldWithMetadata.getEncoding()); + Assert.assertEquals(false, fieldWithMetadata.isSingleBit()); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.BINARY, fieldWithMetadata.getJavaType()); + Assert.assertEquals(null, fieldWithMetadata.getEncoding()); + Assert.assertEquals(false, fieldWithMetadata.isSingleBit()); + + conn = getVitessConnection(); + raw = raw.toBuilder() + .setType(Query.Type.BIT) + .build(); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.BIT, fieldWithMetadata.getJavaType()); + Assert.assertEquals("ISO-8859-1", fieldWithMetadata.getEncoding()); + Assert.assertEquals(false, fieldWithMetadata.isSingleBit()); + Assert.assertEquals(false, fieldWithMetadata.isBlob()); + Assert.assertEquals(false, fieldWithMetadata.isBinary()); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.BIT, fieldWithMetadata.getJavaType()); + Assert.assertEquals(null, fieldWithMetadata.getEncoding()); + Assert.assertEquals(false, fieldWithMetadata.isSingleBit()); + Assert.assertEquals(false, fieldWithMetadata.isBlob()); + Assert.assertEquals(false, fieldWithMetadata.isBinary()); + + conn = getVitessConnection(); + raw = raw.toBuilder() + .setColumnLength(1) + .build(); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.BIT, fieldWithMetadata.getJavaType()); + Assert.assertEquals("ISO-8859-1", fieldWithMetadata.getEncoding()); + Assert.assertEquals(true, fieldWithMetadata.isSingleBit()); + Assert.assertEquals(false, fieldWithMetadata.isBlob()); + Assert.assertEquals(false, fieldWithMetadata.isBinary()); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(Types.BIT, fieldWithMetadata.getJavaType()); + Assert.assertEquals(null, fieldWithMetadata.getEncoding()); + Assert.assertEquals(false, fieldWithMetadata.isSingleBit()); + Assert.assertEquals(false, fieldWithMetadata.isBlob()); + Assert.assertEquals(false, fieldWithMetadata.isBinary()); + } + + @Test + public void testNumericAndDateTimeEncoding() throws SQLException{ + VitessConnection conn = getVitessConnection(); + + Query.Type[] types = new Query.Type[]{ + Query.Type.INT8, + Query.Type.UINT8, + Query.Type.INT16, + Query.Type.UINT16, + Query.Type.INT24, + Query.Type.UINT24, + Query.Type.INT32, + Query.Type.UINT32, + Query.Type.INT64, + Query.Type.UINT64, + Query.Type.DECIMAL, + Query.Type.UINT24, + Query.Type.INT32, + Query.Type.UINT32, + Query.Type.FLOAT32, + Query.Type.FLOAT64, + Query.Type.DATE, + Query.Type.DATETIME, + Query.Type.TIME, + Query.Type.TIMESTAMP, + Query.Type.YEAR + }; + + + for (Query.Type type : types) { + Query.Field raw = Query.Field.newBuilder() + .setTable("foo") + .setColumnLength(3) + .setType(type) + .setName("foo") + .setOrgName("foo") + .setCharset(/* utf-16 UnicodeBig */35) + .build(); + + FieldWithMetadata fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(type.name(),"US-ASCII", fieldWithMetadata.getEncoding()); + Assert.assertEquals(type.name(),false, fieldWithMetadata.isSingleBit()); + } + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + + for (Query.Type type : types) { + Query.Field raw = Query.Field.newBuilder() + .setTable("foo") + .setColumnLength(3) + .setType(type) + .setName("foo") + .setOrgName("foo") + .setCharset(/* utf-16 UnicodeBig */35) + .build(); + + FieldWithMetadata fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(type.name(),null, fieldWithMetadata.getEncoding()); + Assert.assertEquals(type.name(),false, fieldWithMetadata.isSingleBit()); + } + } + @Test public void testPrecisionAdjustFactor() throws SQLException { VitessConnection conn = getVitessConnection(); @@ -144,6 +386,45 @@ public void testFlags() throws SQLException { } + @Test + public void testOpaqueBinary() throws SQLException { + VitessConnection conn = getVitessConnection(); + + Query.Field raw = Query.Field.newBuilder() + .setTable("foo") + .setColumnLength(3) + .setType(Query.Type.CHAR) + .setName("foo") + .setOrgName("foo") + .setCharset(CharsetMapping.MYSQL_COLLATION_INDEX_binary) + .setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE) + .build(); + + FieldWithMetadata fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(true, fieldWithMetadata.isOpaqueBinary()); + + raw = raw.toBuilder() + .setTable("#sql_foo_bar") + .build(); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(false, fieldWithMetadata.isOpaqueBinary()); + + raw = raw.toBuilder() + .setCharset(/* short circuits collation -> encoding lookup, resulting in null */-1) + .build(); + + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(false, fieldWithMetadata.isOpaqueBinary()); + + conn.setEncoding("binary"); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(true, fieldWithMetadata.isOpaqueBinary()); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + fieldWithMetadata = new FieldWithMetadata(conn, raw); + Assert.assertEquals(false, fieldWithMetadata.isOpaqueBinary()); + } + @Test public void testReadOnly() throws SQLException { VitessConnection conn = getVitessConnection(); @@ -253,7 +534,96 @@ public void testToString() throws SQLException { "columnName=foo,originalColumnName=foo," + "vitessType=" + Query.Type.CHAR.toString() + "(1)," + "flags=AUTO_INCREMENT PRIMARY_KEY UNIQUE_KEY BINARY " + - "BLOB MULTI_KEY UNSIGNED ZEROFILL"; + "BLOB MULTI_KEY UNSIGNED ZEROFILL, charsetIndex=0, charsetName=null]"; Assert.assertEquals(result, field.toString()); } + + public void testCollations() throws Exception { + VitessConnection conn = getVitessConnection(); + + Query.Field raw = Query.Field.newBuilder() + .setTable("foo") + .setType(Query.Type.CHAR) + .setName("foo") + .setOrgName("foo") + .setCharset(33) + .build(); + + FieldWithMetadata fieldWithMetadata = PowerMockito.spy(new FieldWithMetadata(conn, raw)); + String first = fieldWithMetadata.getCollation(); + String second = fieldWithMetadata.getCollation(); + + Assert.assertEquals("utf8_general_ci", first); + Assert.assertEquals("cached response is same as first", first, second); + + PowerMockito.verifyPrivate(fieldWithMetadata, VerificationModeFactory.times(1)).invoke("getCollationIndex"); + + try { + raw = raw.toBuilder() + // value chosen because it's obviously out of bounds for the underlying array + .setCharset(Integer.MAX_VALUE) + .build(); + + fieldWithMetadata = PowerMockito.spy(new FieldWithMetadata(conn, raw)); + fieldWithMetadata.getCollation(); + Assert.fail("Should have received an array index out of bounds because " + + "charset/collationIndex of Int.MAX is well above size of charset array"); + } catch (SQLException e) { + if (e.getCause() instanceof ArrayIndexOutOfBoundsException) { + Assert.assertEquals("CollationIndex '" + Integer.MAX_VALUE + "' out of bounds for " + + "collationName lookup, should be within 0 and " + + CharsetMapping.COLLATION_INDEX_TO_COLLATION_NAME.length, + e.getMessage()); + } else { + // just rethrow so we fail that way + throw e; + } + } + + PowerMockito.verifyPrivate(fieldWithMetadata, VerificationModeFactory.times(1)).invoke("getCollationIndex"); + //Mockito.verify(fieldWithMetadata, Mockito.times(1)).getCollationIndex(); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + fieldWithMetadata = PowerMockito.spy(new FieldWithMetadata(conn, raw)); + Assert.assertEquals("null response when not including all fields", null, fieldWithMetadata.getCollation()); + + // We should not call this at all, because we're short circuiting due to included fields + //Mockito.verify(fieldWithMetadata, Mockito.never()).getCollationIndex(); + PowerMockito.verifyPrivate(fieldWithMetadata, VerificationModeFactory.times(0)).invoke("getCollationIndex"); + } + + @Test + public void testMaxBytesPerChar() throws Exception { + VitessConnection conn = PowerMockito.spy(getVitessConnection()); + + Query.Field raw = Query.Field.newBuilder() + .setTable("foo") + .setType(Query.Type.CHAR) + .setName("foo") + .setOrgName("foo") + .setCharset(33) + .build(); + + FieldWithMetadata fieldWithMetadata = PowerMockito.spy(new FieldWithMetadata(conn, raw)); + + int first = fieldWithMetadata.getMaxBytesPerCharacter(); + int second = fieldWithMetadata.getMaxBytesPerCharacter(); + + Assert.assertEquals("cached response is same as first", first, second); + PowerMockito.verifyPrivate(fieldWithMetadata, VerificationModeFactory.times(1)).invoke("getCollationIndex"); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + fieldWithMetadata = PowerMockito.spy(new FieldWithMetadata(conn, raw)); + Assert.assertEquals("0 return value when not including all fields", 0, fieldWithMetadata.getMaxBytesPerCharacter()); + + // We called getMaxBytesPerCharacter 3 times above, but should only have made 1 call to conn.getMaxBytesPerChar: + // first - call conn + // second - returne cached + // third - short circuit because not including all fields + // Will test the actual implementation/return value in VitessConnection + PowerMockito.verifyPrivate(conn, VerificationModeFactory.times(1)).invoke("getMaxBytesPerChar", 33, "UTF-8"); + + // Should not be called at all, because it's new for just this test + PowerMockito.verifyPrivate(fieldWithMetadata, VerificationModeFactory.times(0)).invoke("getCollationIndex"); + } } diff --git a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessConnectionTest.java b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessConnectionTest.java index aaef46b224d..02ac1338050 100644 --- a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessConnectionTest.java +++ b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessConnectionTest.java @@ -1,6 +1,8 @@ package com.flipkart.vitess.jdbc; import com.flipkart.vitess.util.Constants; +import com.flipkart.vitess.util.MysqlDefs; +import com.flipkart.vitess.util.charset.CharsetMapping; import com.google.common.util.concurrent.Futures; import com.youtube.vitess.client.Context; import com.youtube.vitess.client.SQLFuture; @@ -194,12 +196,55 @@ public class VitessConnectionTest extends BaseTest { } @Test public void testPropertiesFromJdbcUrl() throws SQLException { - String url = "jdbc:vitess://locahost:9000/vt_keyspace/keyspace?TABLET_TYPE=replica&includedFields=type_and_name"; + String url = "jdbc:vitess://locahost:9000/vt_keyspace/keyspace?TABLET_TYPE=replica&includedFields=type_and_name&blobsAreStrings=yes"; VitessConnection conn = new VitessConnection(url, new Properties()); // Properties from the url should be passed into the connection properties, and override whatever defaults we've defined Assert.assertEquals(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME, conn.getIncludedFields()); Assert.assertEquals(false, conn.isIncludeAllFields()); Assert.assertEquals(Topodata.TabletType.REPLICA, conn.getTabletType()); + Assert.assertEquals(true, conn.getBlobsAreStrings()); + } + + @Test public void testGetEncodingForIndex() throws SQLException { + VitessConnection conn = getVitessConnection(); + + // No default encoding configured, and passing NO_CHARSET_INFO basically says "mysql doesn't know" + // which means don't try looking it up + Assert.assertEquals(null, conn.getEncodingForIndex(MysqlDefs.NO_CHARSET_INFO)); + // Similarly, a null index or one landing out of bounds for the charset index should return null + Assert.assertEquals(null, conn.getEncodingForIndex(Integer.MAX_VALUE)); + Assert.assertEquals(null, conn.getEncodingForIndex(-123)); + + // charsetIndex 25 is MYSQL_CHARSET_NAME_greek, which is a charset with multiple names, ISO8859_7 and greek + // Without an encoding configured in the connection, we should return the first (default) encoding for a charset, + // in this case ISO8859_7 + Assert.assertEquals("ISO-8859-7", conn.getEncodingForIndex(25)); + conn.setEncoding("greek"); + // With an encoding configured, we should return that because it matches one of the names for the charset + Assert.assertEquals("greek", conn.getEncodingForIndex(25)); + + conn.setEncoding(null); + Assert.assertEquals("UTF-8", conn.getEncodingForIndex(33)); + Assert.assertEquals("ISO-8859-1", conn.getEncodingForIndex(63)); + + conn.setEncoding("NOT_REAL"); + // Same tests as the first one, but testing that when there is a default configured, it falls back to that regardless + Assert.assertEquals("NOT_REAL", conn.getEncodingForIndex(MysqlDefs.NO_CHARSET_INFO)); + Assert.assertEquals("NOT_REAL", conn.getEncodingForIndex(Integer.MAX_VALUE)); + Assert.assertEquals("NOT_REAL", conn.getEncodingForIndex(-123)); + } + + @Test public void testGetMaxBytesPerChar() throws SQLException { + VitessConnection conn = getVitessConnection(); + + // Default state when no good info is passed in + Assert.assertEquals(0, conn.getMaxBytesPerChar(MysqlDefs.NO_CHARSET_INFO, null)); + // use passed collation index + Assert.assertEquals(3, conn.getMaxBytesPerChar(CharsetMapping.MYSQL_COLLATION_INDEX_utf8, null)); + // use first, if both are passed and valid + Assert.assertEquals(3, conn.getMaxBytesPerChar(CharsetMapping.MYSQL_COLLATION_INDEX_utf8, "UnicodeBig")); + // use passed default charset + Assert.assertEquals(2, conn.getMaxBytesPerChar(MysqlDefs.NO_CHARSET_INFO, "UnicodeBig")); } } diff --git a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessResultSetMetadataTest.java b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessResultSetMetadataTest.java index 23d5ea3d02a..124c3964a43 100644 --- a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessResultSetMetadataTest.java +++ b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessResultSetMetadataTest.java @@ -1,6 +1,7 @@ package com.flipkart.vitess.jdbc; import com.flipkart.vitess.util.Constants; +import com.flipkart.vitess.util.charset.CharsetMapping; import com.youtube.vitess.proto.Query; import org.junit.Assert; @@ -22,49 +23,50 @@ public class VitessResultSetMetadataTest extends BaseTest { private List generateFieldList() { List fieldList = new ArrayList<>(); - fieldList.add(field("col1", "tbl", Query.Type.INT8, 4) + fieldList.add(field("col1", "tbl", Query.Type.INT8, 4, CharsetMapping.MYSQL_COLLATION_INDEX_binary) .setFlags(Query.MySqlFlag.NOT_NULL_FLAG_VALUE).setOrgName("foo").setOrgTable("foo").build()); - fieldList.add(field("col2", "tbl", Query.Type.UINT8, 3).setFlags(Query.MySqlFlag.UNSIGNED_FLAG_VALUE).build()); - fieldList.add(field("col3", "tbl", Query.Type.INT16, 6).build()); - fieldList.add(field("col4", "tbl", Query.Type.UINT16, 5).setFlags(Query.MySqlFlag.UNSIGNED_FLAG_VALUE).build()); - fieldList.add(field("col5", "tbl", Query.Type.INT24, 9).build()); - fieldList.add(field("col6", "tbl", Query.Type.UINT24, 8).setFlags(Query.MySqlFlag.UNSIGNED_FLAG_VALUE).build()); - fieldList.add(field("col7", "tbl", Query.Type.INT32, 11).build()); - fieldList.add(field("col8", "tbl", Query.Type.UINT32, 10).setFlags(Query.MySqlFlag.UNSIGNED_FLAG_VALUE).build()); - fieldList.add(field("col9", "tbl", Query.Type.INT64, 20).build()); - fieldList.add(field("col10", "tbl", Query.Type.UINT64, 20).setFlags(Query.MySqlFlag.UNSIGNED_FLAG_VALUE).build()); - fieldList.add(field("col11", "tbl", Query.Type.FLOAT32, 12).setDecimals(31).build()); - fieldList.add(field("col12", "tbl", Query.Type.FLOAT64, 22).setDecimals(31).build()); - fieldList.add(field("col13", "tbl", Query.Type.TIMESTAMP, 10).build()); - fieldList.add(field("col14", "tbl", Query.Type.DATE, 10).build()); - fieldList.add(field("col15", "tbl", Query.Type.TIME, 10).build()); - fieldList.add(field("col16", "tbl", Query.Type.DATETIME, 19).build()); - fieldList.add(field("col17", "tbl", Query.Type.YEAR, 4).build()); - fieldList.add(field("col18", "tbl", Query.Type.DECIMAL, 7).setDecimals(2).build()); - fieldList.add(field("col19", "tbl", Query.Type.TEXT, 765).build()); - fieldList.add(field("col20", "tbl", Query.Type.BLOB, 65535) + fieldList.add(field("col2", "tbl", Query.Type.UINT8, 3, CharsetMapping.MYSQL_COLLATION_INDEX_binary).setFlags(Query.MySqlFlag.UNSIGNED_FLAG_VALUE).build()); + fieldList.add(field("col3", "tbl", Query.Type.INT16, 6, CharsetMapping.MYSQL_COLLATION_INDEX_binary).build()); + fieldList.add(field("col4", "tbl", Query.Type.UINT16, 5, CharsetMapping.MYSQL_COLLATION_INDEX_binary).setFlags(Query.MySqlFlag.UNSIGNED_FLAG_VALUE).build()); + fieldList.add(field("col5", "tbl", Query.Type.INT24, 9, CharsetMapping.MYSQL_COLLATION_INDEX_binary).build()); + fieldList.add(field("col6", "tbl", Query.Type.UINT24, 8, CharsetMapping.MYSQL_COLLATION_INDEX_binary).setFlags(Query.MySqlFlag.UNSIGNED_FLAG_VALUE).build()); + fieldList.add(field("col7", "tbl", Query.Type.INT32, 11, CharsetMapping.MYSQL_COLLATION_INDEX_binary).build()); + fieldList.add(field("col8", "tbl", Query.Type.UINT32, 10, CharsetMapping.MYSQL_COLLATION_INDEX_binary).setFlags(Query.MySqlFlag.UNSIGNED_FLAG_VALUE).build()); + fieldList.add(field("col9", "tbl", Query.Type.INT64, 20, CharsetMapping.MYSQL_COLLATION_INDEX_binary).build()); + fieldList.add(field("col10", "tbl", Query.Type.UINT64, 20, CharsetMapping.MYSQL_COLLATION_INDEX_binary).setFlags(Query.MySqlFlag.UNSIGNED_FLAG_VALUE).build()); + fieldList.add(field("col11", "tbl", Query.Type.FLOAT32, 12, CharsetMapping.MYSQL_COLLATION_INDEX_binary).setDecimals(31).build()); + fieldList.add(field("col12", "tbl", Query.Type.FLOAT64, 22, CharsetMapping.MYSQL_COLLATION_INDEX_binary).setDecimals(31).build()); + fieldList.add(field("col13", "tbl", Query.Type.TIMESTAMP, 10, CharsetMapping.MYSQL_COLLATION_INDEX_binary).build()); + fieldList.add(field("col14", "tbl", Query.Type.DATE, 10, CharsetMapping.MYSQL_COLLATION_INDEX_binary).build()); + fieldList.add(field("col15", "tbl", Query.Type.TIME, 10, CharsetMapping.MYSQL_COLLATION_INDEX_binary).build()); + fieldList.add(field("col16", "tbl", Query.Type.DATETIME, 19, CharsetMapping.MYSQL_COLLATION_INDEX_binary).build()); + fieldList.add(field("col17", "tbl", Query.Type.YEAR, 4, CharsetMapping.MYSQL_COLLATION_INDEX_binary).build()); + fieldList.add(field("col18", "tbl", Query.Type.DECIMAL, 7, CharsetMapping.MYSQL_COLLATION_INDEX_binary).setDecimals(2).build()); + fieldList.add(field("col19", "tbl", Query.Type.TEXT, 765, /* utf8_bin -- not case insensitive */ 83).build()); + fieldList.add(field("col20", "tbl", Query.Type.BLOB, 65535, CharsetMapping.MYSQL_COLLATION_INDEX_binary) .setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE | Query.MySqlFlag.BLOB_FLAG_VALUE) .setDecimals(/* this is set to facilitate testing of getScale, since this is non-numeric */2).build()); - fieldList.add(field("col21", "tbl", Query.Type.VARCHAR, 768).build()); - fieldList.add(field("col22", "tbl", Query.Type.VARBINARY, 256).setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE).build()); - fieldList.add(field("col23", "tbl", Query.Type.CHAR, 48).build()); - fieldList.add(field("col24", "tbl", Query.Type.BINARY, 4).setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE).build()); - fieldList.add(field("col25", "tbl", Query.Type.BIT, 8).build()); - fieldList.add(field("col26", "tbl", Query.Type.ENUM, 3).build()); - fieldList.add(field("col27", "tbl", Query.Type.SET, 9).build()); - fieldList.add(field("col28", "tbl", Query.Type.TUPLE, 0).build()); - fieldList.add(field("col29", "tbl", Query.Type.VARBINARY, 256).setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE).build()); - fieldList.add(field("col30", "tbl", Query.Type.BLOB, 65535) + fieldList.add(field("col21", "tbl", Query.Type.VARCHAR, 768, CharsetMapping.MYSQL_COLLATION_INDEX_utf8).build()); + fieldList.add(field("col22", "tbl", Query.Type.VARBINARY, 256, CharsetMapping.MYSQL_COLLATION_INDEX_binary).setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE).build()); + fieldList.add(field("col23", "tbl", Query.Type.CHAR, 48, CharsetMapping.MYSQL_COLLATION_INDEX_utf8).build()); + fieldList.add(field("col24", "tbl", Query.Type.BINARY, 4, CharsetMapping.MYSQL_COLLATION_INDEX_binary).setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE).build()); + fieldList.add(field("col25", "tbl", Query.Type.BIT, 8, CharsetMapping.MYSQL_COLLATION_INDEX_binary).build()); + fieldList.add(field("col26", "tbl", Query.Type.ENUM, 3, CharsetMapping.MYSQL_COLLATION_INDEX_utf8).build()); + fieldList.add(field("col27", "tbl", Query.Type.SET, 9, CharsetMapping.MYSQL_COLLATION_INDEX_utf8).build()); + fieldList.add(field("col28", "tbl", Query.Type.TUPLE, 0, CharsetMapping.MYSQL_COLLATION_INDEX_utf8).build()); + fieldList.add(field("col29", "tbl", Query.Type.VARBINARY, 256, CharsetMapping.MYSQL_COLLATION_INDEX_binary).setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE).build()); + fieldList.add(field("col30", "tbl", Query.Type.BLOB, 65535, CharsetMapping.MYSQL_COLLATION_INDEX_utf8) .setFlags(Query.MySqlFlag.BLOB_FLAG_VALUE).build()); return fieldList; } - private Query.Field.Builder field(String name, String table, Query.Type type, int length) { + private Query.Field.Builder field(String name, String table, Query.Type type, int length, int charset) { return Query.Field.newBuilder() .setName(name) .setTable(table) .setType(type) - .setColumnLength(length); + .setColumnLength(length) + .setCharset(charset); } private void initializeFieldList(VitessConnection connection) throws SQLException { @@ -159,7 +161,7 @@ public List getFieldList(VitessConnection conn) throws SQLExc Assert.assertEquals("SMALLINT", Types.SMALLINT, vitessResultSetMetadata.getColumnType(17)); Assert.assertEquals("DECIMAL", Types.DECIMAL, vitessResultSetMetadata.getColumnType(18)); Assert.assertEquals("VARCHAR", Types.VARCHAR, vitessResultSetMetadata.getColumnType(19)); - Assert.assertEquals("BLOB", Types.BLOB, vitessResultSetMetadata.getColumnType(20)); + Assert.assertEquals("LONGVARBINARY", Types.LONGVARBINARY, vitessResultSetMetadata.getColumnType(20)); Assert.assertEquals("VARCHAR", Types.VARCHAR, vitessResultSetMetadata.getColumnType(21)); Assert.assertEquals("VARBINARY", Types.VARBINARY, vitessResultSetMetadata.getColumnType(22)); Assert.assertEquals("CHAR", Types.CHAR, vitessResultSetMetadata.getColumnType(23)); @@ -168,7 +170,7 @@ public List getFieldList(VitessConnection conn) throws SQLExc Assert.assertEquals("CHAR", Types.CHAR, vitessResultSetMetadata.getColumnType(26)); Assert.assertEquals("CHAR", Types.CHAR, vitessResultSetMetadata.getColumnType(27)); Assert.assertEquals("VARBINARY", Types.VARBINARY, vitessResultSetMetadata.getColumnType(29)); - Assert.assertEquals("BLOB", Types.BLOB, vitessResultSetMetadata.getColumnType(30)); + Assert.assertEquals("LONGVARCHAR", Types.LONGVARCHAR, vitessResultSetMetadata.getColumnType(30)); try { int type = vitessResultSetMetadata.getColumnType(28); } catch (SQLException ex) { @@ -252,9 +254,9 @@ public List getFieldList(VitessConnection conn) throws SQLExc VitessResultSetMetaData vitessResultSetMetaData = new VitessResultSetMetaData(fieldList); Assert.assertEquals(vitessResultSetMetaData.getSchemaName(1), ""); Assert.assertEquals(vitessResultSetMetaData.getCatalogName(1), ""); - Assert.assertEquals(vitessResultSetMetaData.getPrecision(1), 0); + Assert.assertEquals(vitessResultSetMetaData.getPrecision(1), 3); Assert.assertEquals(vitessResultSetMetaData.getScale(1), 0); - Assert.assertEquals(vitessResultSetMetaData.getColumnDisplaySize(1), 0); + Assert.assertEquals(vitessResultSetMetaData.getColumnDisplaySize(1), 4); Assert.assertEquals(vitessResultSetMetaData.isCurrency(1), false); } @@ -282,9 +284,20 @@ public List getFieldList(VitessConnection conn) throws SQLExc Assert.assertEquals("datetime case sensitivity", false, md.isCaseSensitive(16)); Assert.assertEquals("year case sensitivity", false, md.isCaseSensitive(17)); Assert.assertEquals("decimal case sensitivity", false, md.isCaseSensitive(18)); - for (int i = 18; i < fieldList.size(); i++) { - Assert.assertEquals(fieldList.get(i).getName() + " - non-numeric case insensitive", i != 24, md.isCaseSensitive(i + 1)); - } + + // These are handled on a case-by-case basis + Assert.assertEquals("text cases sensitivity", /* due to binary */true, md.isCaseSensitive(19)); + Assert.assertEquals("blob case sensitivity", /* due to binary */true, md.isCaseSensitive(20)); + Assert.assertEquals("varchar case sensitivity", /* due to utf-8_ci */ false, md.isCaseSensitive(21)); + Assert.assertEquals("varbinary case sensitivity", /* due to binary */true, md.isCaseSensitive(22)); + Assert.assertEquals("char case sensitivity", /* due to utf-8_ci */ false, md.isCaseSensitive(23)); + Assert.assertEquals("binary case sensitivity", /* due to binary */true, md.isCaseSensitive(24)); + Assert.assertEquals("bit case sensitivity", /* due to numeric type */false, md.isCaseSensitive(25)); + Assert.assertEquals("enum case sensitivity", /* due to utf-8_ci */ false, md.isCaseSensitive(26)); + Assert.assertEquals("set case sensitivity", /* due to utf-8_ci, SET == CHAR */ false, md.isCaseSensitive(27)); + Assert.assertEquals("tuple case sensitivity", /* due to default case */ true, md.isCaseSensitive(28)); + Assert.assertEquals("varbinary case sensitivity", /* due to binary */ true, md.isCaseSensitive(29)); + Assert.assertEquals("text cases sensitivity", /* due to utf8_bin (not case insensitive) encoding */false, md.isCaseSensitive(30)); connection.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); // with limited included fields, we can really only know about numeric types -- those should return false. the rest should return true @@ -308,6 +321,89 @@ public List getFieldList(VitessConnection conn) throws SQLExc } } + @Test public void testDisplaySize() throws SQLException { + VitessConnection conn = getVitessConnection(); + List fieldList = getFieldList(conn); + VitessResultSetMetaData md = new VitessResultSetMetaData(fieldList); + + Assert.assertEquals("int8 display size", 4, md.getColumnDisplaySize(1)); + Assert.assertEquals("uint8 display size", 3, md.getColumnDisplaySize(2)); + Assert.assertEquals("int16 display size", 6, md.getColumnDisplaySize(3)); + Assert.assertEquals("uint16 display size", 5, md.getColumnDisplaySize(4)); + Assert.assertEquals("int24 display size", 9, md.getColumnDisplaySize(5)); + Assert.assertEquals("uint24 display size", 8, md.getColumnDisplaySize(6)); + Assert.assertEquals("int32 display size", 11, md.getColumnDisplaySize(7)); + Assert.assertEquals("uint32 display size", 10, md.getColumnDisplaySize(8)); + Assert.assertEquals("int64 display size", 20, md.getColumnDisplaySize(9)); + // unsigned long gets an extra digit of precision over signed, so display sizes are the same + Assert.assertEquals("uint64 display size", 20, md.getColumnDisplaySize(10)); + Assert.assertEquals("float32 display size", 12, md.getColumnDisplaySize(11)); + Assert.assertEquals("float64 display size", 22, md.getColumnDisplaySize(12)); + Assert.assertEquals("timestamp display size", 10, md.getColumnDisplaySize(13)); + Assert.assertEquals("date display size", 10, md.getColumnDisplaySize(14)); + Assert.assertEquals("time display size", 10, md.getColumnDisplaySize(15)); + Assert.assertEquals("datetime display size", 19, md.getColumnDisplaySize(16)); + Assert.assertEquals("year display size", 4, md.getColumnDisplaySize(17)); + Assert.assertEquals("decimal display size", 7, md.getColumnDisplaySize(18)); + Assert.assertEquals("text display size", 255, md.getColumnDisplaySize(19)); + Assert.assertEquals("blob display size", 65535, md.getColumnDisplaySize(20)); + Assert.assertEquals("varchar display size", 256, md.getColumnDisplaySize(21)); + Assert.assertEquals("varbinary display size", 256, md.getColumnDisplaySize(22)); + Assert.assertEquals("char display size", 16, md.getColumnDisplaySize(23)); + Assert.assertEquals("binary display size", 4, md.getColumnDisplaySize(24)); + Assert.assertEquals("bit display size", 8, md.getColumnDisplaySize(25)); + Assert.assertEquals("enum display size", 1, md.getColumnDisplaySize(26)); + Assert.assertEquals("set display size", 3, md.getColumnDisplaySize(27)); + Assert.assertEquals("tuple display size", 0, md.getColumnDisplaySize(28)); + Assert.assertEquals("varbinary display size", 256, md.getColumnDisplaySize(29)); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + for (int i = 0; i < fieldList.size(); i++) { + Assert.assertEquals(fieldList.get(i).getName() + " - getColumnDisplaySize is 0 for all when lack of included fields", 0, md.getColumnDisplaySize(i + 1)); + } + } + + @Test public void testGetPrecision() throws SQLException { + VitessConnection conn = getVitessConnection(); + List fieldList = getFieldList(conn); + VitessResultSetMetaData md = new VitessResultSetMetaData(fieldList); + + Assert.assertEquals("int8 precision", 3, md.getPrecision(1)); + Assert.assertEquals("uint8 precision", 3, md.getPrecision(2)); + Assert.assertEquals("int16 precision", 5, md.getPrecision(3)); + Assert.assertEquals("uint16 precision", 5, md.getPrecision(4)); + Assert.assertEquals("int24 precision", 8, md.getPrecision(5)); + Assert.assertEquals("uint24 precision", 8, md.getPrecision(6)); + Assert.assertEquals("int32 precision", 10, md.getPrecision(7)); + Assert.assertEquals("uint32 precision", 10, md.getPrecision(8)); + Assert.assertEquals("int64 precision", 19, md.getPrecision(9)); + Assert.assertEquals("uint64 precision", 20, md.getPrecision(10)); + Assert.assertEquals("float32 precision", 12, md.getPrecision(11)); + Assert.assertEquals("float64 precision", 22, md.getPrecision(12)); + Assert.assertEquals("timestamp precision", 10, md.getPrecision(13)); + Assert.assertEquals("date precision", 10, md.getPrecision(14)); + Assert.assertEquals("time precision", 10, md.getPrecision(15)); + Assert.assertEquals("datetime precision", 19, md.getPrecision(16)); + Assert.assertEquals("year precision", 4, md.getPrecision(17)); + Assert.assertEquals("decimal precision", 5, md.getPrecision(18)); // 7 - decimal - sign + Assert.assertEquals("text precision", 255, md.getPrecision(19)); + Assert.assertEquals("blob precision", 65535, md.getPrecision(20)); + Assert.assertEquals("varchar precision", 256, md.getPrecision(21)); + Assert.assertEquals("varbinary precision", 256, md.getPrecision(22)); + Assert.assertEquals("char precision", 16, md.getPrecision(23)); + Assert.assertEquals("binary precision", 4, md.getPrecision(24)); + Assert.assertEquals("bit precision", 8, md.getPrecision(25)); + Assert.assertEquals("enum precision", 1, md.getPrecision(26)); + Assert.assertEquals("set precision", 3, md.getPrecision(27)); + Assert.assertEquals("tuple precision", 0, md.getPrecision(28)); + Assert.assertEquals("varbinary precision", 256, md.getPrecision(29)); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + for (int i = 0; i < fieldList.size(); i++) { + Assert.assertEquals(fieldList.get(i).getName() + " - getPrecision is 0 for all when lack of included fields", 0, md.getPrecision(i + 1)); + } + } + @Test public void testGetScale() throws SQLException { VitessConnection conn = getVitessConnection(); List fieldList = getFieldList(conn); @@ -372,7 +468,7 @@ public List getFieldList(VitessConnection conn) throws SQLExc Assert.assertEquals("java.sql.Date", md.getColumnClassName(17)); Assert.assertEquals("java.math.BigDecimal", md.getColumnClassName(18)); Assert.assertEquals("java.lang.String", md.getColumnClassName(19)); - Assert.assertEquals("java.lang.Object", md.getColumnClassName(20)); + Assert.assertEquals("[B", md.getColumnClassName(20)); Assert.assertEquals("java.lang.String", md.getColumnClassName(21)); Assert.assertEquals("[B", md.getColumnClassName(22)); Assert.assertEquals("java.lang.String", md.getColumnClassName(23)); diff --git a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessResultSetTest.java b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessResultSetTest.java index 69d6e1f6cc5..27edd80549c 100644 --- a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessResultSetTest.java +++ b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessResultSetTest.java @@ -1,5 +1,8 @@ package com.flipkart.vitess.jdbc; +import com.flipkart.vitess.util.MysqlDefs; +import com.flipkart.vitess.util.StringUtils; +import com.flipkart.vitess.util.charset.CharsetMapping; import com.google.protobuf.ByteString; import com.youtube.vitess.client.cursor.Cursor; import com.youtube.vitess.client.cursor.SimpleCursor; @@ -7,6 +10,12 @@ import org.junit.Assert; import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Matchers; +import org.mockito.internal.verification.VerificationModeFactory; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; import java.io.UnsupportedEncodingException; import java.math.BigDecimal; @@ -18,6 +27,8 @@ /** * Created by harshit.gangal on 19/01/16. */ +@RunWith(PowerMockRunner.class) +@PrepareForTest(VitessResultSet.class) public class VitessResultSetTest extends BaseTest { public Cursor getCursorWithRows() { @@ -474,4 +485,220 @@ public Cursor getCursorWithRowsAsNull() { VitessResultSet vitessResultSet = new VitessResultSet(cursor, getVitessStatement()); Assert.assertEquals(cursor.getFields().size(), vitessResultSet.getFields().size()); } + + @Test public void testGetStringUsesEncoding() throws Exception { + VitessConnection conn = getVitessConnection(); + VitessResultSet resultOne = PowerMockito.spy(new VitessResultSet(getCursorWithRows(), new VitessStatement(conn))); + resultOne.next(); + // test all ways to get to convertBytesToString + + // Verify that we're going through convertBytesToString for column types that return bytes (string-like), + // but not for those that return a real object + resultOne.getString("col21"); // is a string, should go through convert bytes + resultOne.getString("col13"); // is a datetime, should not + PowerMockito.verifyPrivate(resultOne, VerificationModeFactory.times(1)).invoke("convertBytesToString", Matchers.any(byte[].class), Matchers.anyString()); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + VitessResultSet resultTwo = PowerMockito.spy(new VitessResultSet(getCursorWithRows(), new VitessStatement(conn))); + resultTwo.next(); + + // neither of these should go through convertBytesToString, because we didn't include all fields + resultTwo.getString("col21"); + resultTwo.getString("col13"); + PowerMockito.verifyPrivate(resultTwo, VerificationModeFactory.times(0)).invoke("convertBytesToString", Matchers.any(byte[].class), Matchers.anyString()); + } + + @Test public void testGetObjectForBitValues() throws Exception { + VitessConnection conn = getVitessConnection(); + + ByteString.Output value = ByteString.newOutput(); + value.write(new byte[] {1}); + value.write(new byte[] {0}); + value.write(new byte[] {1,2,3,4}); + + Query.QueryResult result = Query.QueryResult.newBuilder() + .addFields(Query.Field.newBuilder().setName("col1").setColumnLength(1).setType(Query.Type.BIT)) + .addFields(Query.Field.newBuilder().setName("col2").setColumnLength(1).setType(Query.Type.BIT)) + .addFields(Query.Field.newBuilder().setName("col3").setColumnLength(4).setType(Query.Type.BIT)) + .addRows(Query.Row.newBuilder() + .addLengths(1) + .addLengths(1) + .addLengths(4) + .setValues(value.toByteString())) + .build(); + + VitessResultSet vitessResultSet = PowerMockito.spy(new VitessResultSet(new SimpleCursor(result), new VitessStatement(conn))); + vitessResultSet.next(); + + Assert.assertEquals(true, vitessResultSet.getObject(1)); + Assert.assertEquals(false, vitessResultSet.getObject(2)); + Assert.assertArrayEquals(new byte[] {1,2,3,4}, (byte[]) vitessResultSet.getObject(3)); + + PowerMockito.verifyPrivate(vitessResultSet, VerificationModeFactory.times(3)).invoke("convertBytesIfPossible", Matchers.any(byte[].class), Matchers.any(FieldWithMetadata.class)); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + vitessResultSet = PowerMockito.spy(new VitessResultSet(new SimpleCursor(result), new VitessStatement(conn))); + vitessResultSet.next(); + + Assert.assertArrayEquals(new byte[] { 1 }, (byte[]) vitessResultSet.getObject(1)); + Assert.assertArrayEquals(new byte[] { 0 }, (byte[]) vitessResultSet.getObject(2)); + Assert.assertArrayEquals(new byte[] {1,2,3,4}, (byte[]) vitessResultSet.getObject(3)); + + PowerMockito.verifyPrivate(vitessResultSet, VerificationModeFactory.times(0)).invoke("convertBytesIfPossible", Matchers.any(byte[].class), Matchers.any(FieldWithMetadata.class)); + } + + @Test public void testGetObjectForVarBinLikeValues() throws Exception { + VitessConnection conn = getVitessConnection(); + + ByteString.Output value = ByteString.newOutput(); + + byte[] binary = new byte[] {1,2,3,4}; + byte[] varbinary = new byte[] {1,2,3,4,5,6,7,8,9,10,11,12,13}; + byte[] blob = new byte[MysqlDefs.LENGTH_BLOB]; + for (int i = 0; i < blob.length; i++) { + blob[i] = 1; + } + byte[] fakeGeometry = new byte[] {2,3,4}; + + value.write(binary); + value.write(varbinary); + value.write(blob); + value.write(fakeGeometry); + + Query.QueryResult result = Query.QueryResult.newBuilder() + .addFields(Query.Field.newBuilder().setName("col1") + .setColumnLength(4) + .setCharset(CharsetMapping.MYSQL_COLLATION_INDEX_binary) + .setType(Query.Type.BINARY) + .setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE)) + .addFields(Query.Field.newBuilder().setName("col2") + .setColumnLength(13) + .setCharset(CharsetMapping.MYSQL_COLLATION_INDEX_binary) + .setType(Query.Type.VARBINARY) + .setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE)) + .addFields(Query.Field.newBuilder().setName("col3") // should go to LONGVARBINARY due to below settings + .setColumnLength(MysqlDefs.LENGTH_BLOB) + .setCharset(CharsetMapping.MYSQL_COLLATION_INDEX_binary) + .setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE) + .setType(Query.Type.BLOB)) + .addFields(Query.Field.newBuilder().setName("col4") + .setType(Query.Type.GEOMETRY) + .setCharset(CharsetMapping.MYSQL_COLLATION_INDEX_binary) + .setType(Query.Type.BINARY) + .setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE)) + .addRows(Query.Row.newBuilder() + .addLengths(4) + .addLengths(13) + .addLengths(MysqlDefs.LENGTH_BLOB) + .addLengths(3) + .setValues(value.toByteString())) + .build(); + + VitessResultSet vitessResultSet = PowerMockito.spy(new VitessResultSet(new SimpleCursor(result), new VitessStatement(conn))); + vitessResultSet.next(); + + // All of these types should pass straight through, returning the direct bytes + Assert.assertArrayEquals(binary, (byte[]) vitessResultSet.getObject(1)); + Assert.assertArrayEquals(varbinary, (byte[]) vitessResultSet.getObject(2)); + Assert.assertArrayEquals(blob, (byte[]) vitessResultSet.getObject(3)); + Assert.assertArrayEquals(fakeGeometry, (byte[]) vitessResultSet.getObject(4)); + + // We should still call the function 4 times + PowerMockito.verifyPrivate(vitessResultSet, VerificationModeFactory.times(4)).invoke("convertBytesIfPossible", Matchers.any(byte[].class), Matchers.any(FieldWithMetadata.class)); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + vitessResultSet = PowerMockito.spy(new VitessResultSet(new SimpleCursor(result), new VitessStatement(conn))); + vitessResultSet.next(); + + // Same as above since this doesn't really do much but pass right through for the varbinary type + Assert.assertArrayEquals(binary, (byte[]) vitessResultSet.getObject(1)); + Assert.assertArrayEquals(varbinary, (byte[]) vitessResultSet.getObject(2)); + Assert.assertArrayEquals(blob, (byte[]) vitessResultSet.getObject(3)); + Assert.assertArrayEquals(fakeGeometry, (byte[]) vitessResultSet.getObject(4)); + + // Never call because not including all + PowerMockito.verifyPrivate(vitessResultSet, VerificationModeFactory.times(0)).invoke("convertBytesIfPossible", Matchers.any(byte[].class), Matchers.any(FieldWithMetadata.class)); + } + + @Test public void testGetObjectForStringLikeValues() throws Exception { + ByteString.Output value = ByteString.newOutput(); + + String trimmedCharStr = "wasting space"; + String varcharStr = "i have a variable length!"; + String masqueradingBlobStr = "look at me, im a blob"; + String textStr = "an enthralling string of TEXT in some foreign language"; + + int paddedCharColLength = 20; + byte[] trimmedChar = StringUtils.getBytes(trimmedCharStr, "UTF-16"); + byte[] varchar = StringUtils.getBytes(varcharStr, "UTF-8"); + byte[] masqueradingBlob = StringUtils.getBytes(masqueradingBlobStr, "US-ASCII"); + byte[] text = StringUtils.getBytes(textStr, "ISO8859_8"); + byte[] opaqueBinary = new byte[] { 1,2,3,4,5,6,7,8,9}; + + value.write(trimmedChar); + value.write(varchar); + value.write(opaqueBinary); + value.write(masqueradingBlob); + value.write(text); + + Query.QueryResult result = Query.QueryResult.newBuilder() + // This tests CHAR + .addFields(Query.Field.newBuilder().setName("col1") + .setColumnLength(paddedCharColLength) + .setCharset(/* utf-16 collation index from CharsetMapping */ 54) + .setType(Query.Type.CHAR)) + // This tests VARCHAR + .addFields(Query.Field.newBuilder().setName("col2") + .setColumnLength(varchar.length) + .setCharset(CharsetMapping.MYSQL_COLLATION_INDEX_utf8) + .setType(Query.Type.VARCHAR)) + // This tests VARCHAR that is an opaque binary + .addFields(Query.Field.newBuilder().setName("col2") + .setColumnLength(opaqueBinary.length) + .setCharset(CharsetMapping.MYSQL_COLLATION_INDEX_binary) + .setFlags(Query.MySqlFlag.BINARY_FLAG_VALUE) + .setType(Query.Type.VARCHAR)) + // This tests LONGVARCHAR + .addFields(Query.Field.newBuilder().setName("col3") + .setColumnLength(masqueradingBlob.length) + .setCharset(/* us-ascii collation index from CharsetMapping */11) + .setType(Query.Type.BLOB)) + // This tests TEXT, which falls through the default case of the switch + .addFields(Query.Field.newBuilder().setName("col4") + .setColumnLength(text.length) + .setCharset(/* corresponds to greek, from CharsetMapping */25) + .setType(Query.Type.TEXT)) + .addRows(Query.Row.newBuilder() + .addLengths(trimmedChar.length) + .addLengths(varchar.length) + .addLengths(opaqueBinary.length) + .addLengths(masqueradingBlob.length) + .addLengths(text.length) + .setValues(value.toByteString())) + .build(); + + VitessConnection conn = getVitessConnection(); + VitessResultSet vitessResultSet = PowerMockito.spy(new VitessResultSet(new SimpleCursor(result), new VitessStatement(conn))); + vitessResultSet.next(); + + Assert.assertEquals(trimmedCharStr, vitessResultSet.getObject(1)); + Assert.assertEquals(varcharStr, vitessResultSet.getObject(2)); + Assert.assertArrayEquals(opaqueBinary, (byte[]) vitessResultSet.getObject(3)); + Assert.assertEquals(masqueradingBlobStr, vitessResultSet.getObject(4)); + Assert.assertEquals(textStr, vitessResultSet.getObject(5)); + + PowerMockito.verifyPrivate(vitessResultSet, VerificationModeFactory.times(5)).invoke("convertBytesIfPossible", Matchers.any(byte[].class), Matchers.any(FieldWithMetadata.class)); + + conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME); + vitessResultSet = PowerMockito.spy(new VitessResultSet(new SimpleCursor(result), new VitessStatement(conn))); + vitessResultSet.next(); + + Assert.assertArrayEquals(trimmedChar, (byte[]) vitessResultSet.getObject(1)); + Assert.assertArrayEquals(varchar, (byte[]) vitessResultSet.getObject(2)); + Assert.assertArrayEquals(opaqueBinary, (byte[]) vitessResultSet.getObject(3)); + Assert.assertArrayEquals(masqueradingBlob, (byte[]) vitessResultSet.getObject(4)); + Assert.assertArrayEquals(text, (byte[]) vitessResultSet.getObject(5)); + + PowerMockito.verifyPrivate(vitessResultSet, VerificationModeFactory.times(0)).invoke("convertBytesIfPossible", Matchers.any(byte[].class), Matchers.any(FieldWithMetadata.class)); + } } From 16bc66f2c428acc9527f8f51b154608efb5c4a07 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 27 Feb 2017 09:40:36 -0800 Subject: [PATCH 047/108] Linking mysql connector user to callerid. Also adding an end-to-end test to validate the connector and the caller id. --- go/mysqlconn/conn.go | 5 + go/vt/callerid/callerid.go | 4 +- go/vt/vtgate/plugin_mysql_server.go | 24 +++- test/config.json | 9 ++ test/mysql_server_test.py | 163 ++++++++++++++++++++++++++++ test/utils.py | 7 +- 6 files changed, 205 insertions(+), 7 deletions(-) create mode 100755 test/mysql_server_test.py diff --git a/go/mysqlconn/conn.go b/go/mysqlconn/conn.go index 1ccd040f950..3757095238a 100644 --- a/go/mysqlconn/conn.go +++ b/go/mysqlconn/conn.go @@ -505,6 +505,11 @@ func (c *Conn) writeComQuit() error { return nil } +// RemoteAddr returns the underlying socket RemoteAddr(). +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + // Close closes the connection. It can be called from a different go // routine to interrupt the current connection. func (c *Conn) Close() { diff --git a/go/vt/callerid/callerid.go b/go/vt/callerid/callerid.go index 799d660369f..d3677ee9310 100644 --- a/go/vt/callerid/callerid.go +++ b/go/vt/callerid/callerid.go @@ -18,7 +18,7 @@ type callerIDKey int var ( // internal Context key for immediate CallerID - immediateCallerIDKey callerIDKey = 0 + immediateCallerIDKey callerIDKey // internal Context key for effective CallerID effectiveCallerIDKey callerIDKey = 1 ) @@ -65,7 +65,7 @@ func GetComponent(ef *vtrpcpb.CallerID) string { return ef.Component } -// GetSubcomponent returns a component inisde the process of effective caller, +// GetSubcomponent returns a component inside the process of effective caller, // which is responsible for generating this request. Suggested values are a // servlet name or an API endpoint name. func GetSubcomponent(ef *vtrpcpb.CallerID) string { diff --git a/go/vt/vtgate/plugin_mysql_server.go b/go/vt/vtgate/plugin_mysql_server.go index 5270025e21f..fd9973c0d30 100644 --- a/go/vt/vtgate/plugin_mysql_server.go +++ b/go/vt/vtgate/plugin_mysql_server.go @@ -12,6 +12,7 @@ import ( "github.com/youtube/vitess/go/mysqlconn" "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" + "github.com/youtube/vitess/go/vt/callerid" "github.com/youtube/vitess/go/vt/servenv" querypb "github.com/youtube/vitess/go/vt/proto/query" @@ -124,12 +125,24 @@ func (vh *vtgateHandler) rollback(ctx context.Context, c *mysqlconn.Conn) (*sqlt } func (vh *vtgateHandler) ComQuery(c *mysqlconn.Conn, query string) (*sqltypes.Result, error) { - // FIXME(alainjobart): do something better for context. - // Include some kind of callerid reference, using the - // authenticated user. - // Add some kind of timeout too. + // FIXME(alainjobart): Add some kind of timeout to the context. ctx := context.Background() + // Fill in the ImmediateCallerID with the UserData returned by + // the AuthServer plugin for that user. If nothing was + // returned, use the User. This lets the plugin map a MySQL + // user used for authentication to a Vitess User used for + // Table ACLs and Vitess authentication in general. + im := callerid.NewImmediateCallerID(c.UserData) + if c.UserData == "" { + im.Username = c.User + } + ef := callerid.NewEffectiveCallerID( + c.User, /* principal: who */ + c.RemoteAddr().String(), /* component: running client process */ + "VTGate MySQL Connector" /* subcomponent: part of the client */) + ctx = callerid.NewContext(ctx, ef, im) + // FIXME(alainjobart) would be good to have the parser understand this. switch { case strings.EqualFold(query, "begin"): @@ -138,6 +151,9 @@ func (vh *vtgateHandler) ComQuery(c *mysqlconn.Conn, query string) (*sqltypes.Re return vh.commit(ctx, c) case strings.EqualFold(query, "rollback"): return vh.rollback(ctx, c) + case strings.EqualFold(query, "set autocommit=0"): + // This is done by the python MySQL connector, we ignore it. + return &sqltypes.Result{}, nil default: // Grab the current session, if any. var session *vtgatepb.Session diff --git a/test/config.json b/test/config.json index 451806c004e..c41f9939a53 100644 --- a/test/config.json +++ b/test/config.json @@ -219,6 +219,15 @@ "RetryMax": 0, "Tags": [] }, + "mysql_server": { + "File": "mysql_server_test.py", + "Args": [], + "Command": [], + "Manual": false, + "Shard": 1, + "RetryMax": 0, + "Tags": [] + }, "mysqlctl": { "File": "mysqlctl.py", "Args": [], diff --git a/test/mysql_server_test.py b/test/mysql_server_test.py new file mode 100755 index 00000000000..7b1e4f33ddf --- /dev/null +++ b/test/mysql_server_test.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +# +# Copyright 2013, Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can +# be found in the LICENSE file. + +"""Ensures the vtgate MySQL server protocol plugin works as expected. + +We use table ACLs to verify the user name authenticated by the connector is +set properly. +""" + + +import unittest + +import MySQLdb + +import environment +import utils +import tablet + +# single shard / 2 tablets +shard_0_master = tablet.Tablet() +shard_0_slave = tablet.Tablet() + +table_acl_config = environment.tmproot + '/table_acl_config.json' +mysql_auth_server_config = (environment.tmproot + + '/mysql_auth_server_config.json') + + +def setUpModule(): + try: + environment.topo_server().setup() + + # setup all processes + setup_procs = [ + shard_0_master.init_mysql(), + shard_0_slave.init_mysql(), + ] + utils.wait_procs(setup_procs) + + utils.run_vtctl(['CreateKeyspace', 'test_keyspace']) + + shard_0_master.init_tablet('replica', 'test_keyspace', '0') + shard_0_slave.init_tablet('replica', 'test_keyspace', '0') + + # create databases so vttablet can start behaving normally + shard_0_master.create_db('vt_test_keyspace') + shard_0_slave.create_db('vt_test_keyspace') + + except: + tearDownModule() + raise + + +def tearDownModule(): + utils.required_teardown() + if utils.options.skip_teardown: + return + + shard_0_master.kill_vttablet() + shard_0_slave.kill_vttablet() + + teardown_procs = [ + shard_0_master.teardown_mysql(), + shard_0_slave.teardown_mysql(), + ] + utils.wait_procs(teardown_procs, raise_on_error=False) + + environment.topo_server().teardown() + utils.kill_sub_processes() + utils.remove_tmp_files() + + shard_0_master.remove_tree() + shard_0_slave.remove_tree() + + +create_vt_insert_test = '''create table vt_insert_test ( +id bigint auto_increment, +msg varchar(64), +keyspace_id bigint(20) unsigned NOT NULL, +primary key (id) +) Engine=InnoDB''' + + +class TestMySQL(unittest.TestCase): + """This test makes sure the MySQL server connector is correct. + """ + + def test_mysql_connector(self): + with open(table_acl_config, 'w') as fd: + fd.write("""{ + "table_groups": [ + { + "table_names_or_prefixes": ["vt_insert_test"], + "readers": ["vtgate client 1"], + "writers": ["vtgate client 1"], + "admins": ["vtgate client 1"] + } + ] +} +""") + + with open(mysql_auth_server_config, 'w') as fd: + fd.write("""{ + "testuser1": { + "Password": "testpassword1", + "UserData": "vtgate client 1" + }, + "testuser2": { + "Password": "testpassword2", + "UserData": "vtgate client 2" + } +} +""") + + # start the tablets + shard_0_master.start_vttablet(wait_for_state='NOT_SERVING', + table_acl_config=table_acl_config) + shard_0_slave.start_vttablet(wait_for_state='NOT_SERVING', + table_acl_config=table_acl_config) + + # setup replication + utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0', + shard_0_master.tablet_alias], auto_log=True) + utils.run_vtctl(['ApplySchema', '-sql', create_vt_insert_test, + 'test_keyspace']) + for t in [shard_0_master, shard_0_slave]: + utils.run_vtctl(['RunHealthCheck', t.tablet_alias]) + + # start vtgate + utils.VtGate(mysql_server=True).start( + extra_args=['-mysql_auth_server_impl', 'config', + '-mysql_auth_server_config_file', mysql_auth_server_config]) + params = dict(host='::', + port=utils.vtgate.mysql_port, + user='testuser1', + passwd='testpassword1', + db='test_keyspace') + + # 'vtgate client 1' is authorized to access vt_insert_test + conn = MySQLdb.Connect(**params) + cursor = conn.cursor() + cursor.execute('select * from vt_insert_test', {}) + conn.close() + + # 'vtgate client 2' is not authorized to access vt_insert_test + params['user'] = 'testuser2' + params['passwd'] = 'testpassword2' + conn = MySQLdb.Connect(**params) + try: + cursor = conn.cursor() + cursor.execute('select * from vt_insert_test', {}) + self.fail('Execute went through') + except MySQLdb.OperationalError, e: + s = str(e) + self.assertIn('table acl error', s) + self.assertIn('cannot run PASS_SELECT on table', s) + conn.close() + + +if __name__ == '__main__': + utils.main() diff --git a/test/utils.py b/test/utils.py index fd628edbd9b..d930eed6aab 100644 --- a/test/utils.py +++ b/test/utils.py @@ -527,12 +527,15 @@ def wait_for_replication_pos(tablet_a, tablet_b, timeout=60.0): class VtGate(object): """VtGate object represents a vtgate process.""" - def __init__(self, port=None): + def __init__(self, port=None, mysql_server=False): """Creates the Vtgate instance and reserve the ports if necessary.""" self.port = port or environment.reserve_ports(1) if protocols_flavor().vtgate_protocol() == 'grpc': self.grpc_port = environment.reserve_ports(1) self.proc = None + self.mysql_port = None + if mysql_server: + self.mysql_port = environment.reserve_ports(1) def start(self, cell='test_nj', retry_count=2, topo_impl=None, cache_ttl='1s', @@ -576,6 +579,8 @@ def start(self, cell='test_nj', retry_count=2, args.extend(environment.topo_server().flags()) if extra_args: args.extend(extra_args) + if self.mysql_port: + args.extend(['-mysql_server_port', str(self.mysql_port)]) self.proc = run_bg(args) wait_for_vars('vtgate', self.port) From 1a6c0684921a09ea9476cd2114622c8f01f1e728 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Mon, 27 Feb 2017 13:25:48 -0500 Subject: [PATCH 048/108] review comments -- fix potential DivByZero, remove unnecessary throws, add comments and clean up some helper methods --- .../vitess/jdbc/ConnectionProperties.java | 6 ++++- .../vitess/jdbc/FieldWithMetadata.java | 22 +++++++++++++++---- .../vitess/jdbc/VitessConnection.java | 2 +- .../flipkart/vitess/jdbc/VitessResultSet.java | 6 ++--- .../vitess/jdbc/VitessResultSetMetaData.java | 3 ++- 5 files changed, 29 insertions(+), 10 deletions(-) diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java index d94a7c93763..6e5592c71dd 100644 --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java @@ -112,15 +112,19 @@ void initializeProperties(Properties props) throws SQLException { } } postInitialization(); + checkConfiguredEncodingSupport(); } - private void postInitialization() throws SQLException { + private void postInitialization() { this.tabletTypeCache = this.tabletType.getValueAsEnum(); this.includedFieldsCache = this.includedFields.getValueAsEnum(); this.includeAllFieldsCache = this.includedFieldsCache == Query.ExecuteOptions.IncludedFields.ALL; this.twopcEnabledCache = this.twopcEnabled.getValueAsBoolean(); this.simpleExecuteTypeCache = this.executeType.getValueAsEnum() == Constants.QueryExecuteType.SIMPLE; this.characterEncodingAsString = this.characterEncoding.getValueAsString(); + } + + private void checkConfiguredEncodingSupport() throws SQLException { if (characterEncodingAsString != null) { // Attempt to use the encoding, and bail out if it can't be used try { diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/FieldWithMetadata.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/FieldWithMetadata.java index a9721e80731..79f4cb32944 100644 --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/FieldWithMetadata.java +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/FieldWithMetadata.java @@ -33,7 +33,9 @@ public FieldWithMetadata(VitessConnection connection, Query.Field field) throws this.vitessType = field.getType(); this.collationIndex = field.getCharset(); - // Map MySqlTypes to java.sql Types + // Map MySqlTypes to an initial java.sql Type + // Afterwards, below we will sometimes re-map the javaType based on other + // information we receive from the server, such as flags and encodings. if (MysqlDefs.vitesstoMySqlType.containsKey(vitessType)) { this.javaType = MysqlDefs.vitesstoMySqlType.get(vitessType); } else if (field.getType().equals(Query.Type.TUPLE)) { @@ -45,7 +47,7 @@ public FieldWithMetadata(VitessConnection connection, Query.Field field) throws // All of the below remapping and metadata fields require the extra // fields included when includeFields=IncludedFields.ALL if (connection != null && connection.isIncludeAllFields()) { - this.isImplicitTempTable = field.getTable().length() > 5 && field.getTable().startsWith("#sql_"); + this.isImplicitTempTable = checkForImplicitTemporaryTable(); // Re-map BLOB to 'real' blob type if (this.javaType == Types.BLOB) { boolean isFromFunction = field.getOrgTable().isEmpty(); @@ -79,13 +81,15 @@ public FieldWithMetadata(VitessConnection connection, Query.Field field) throws } if (!isNativeNumericType() && !isNativeDateTimeType()) { + // For non-numeric types, try to pull the encoding from the passed collationIndex + // We will do some fixup afterwards this.encoding = connection.getEncodingForIndex(this.collationIndex); // ucs2, utf16, and utf32 cannot be used as a client character set, but if it was received from server // under some circumstances we can parse them as utf16 if ("UnicodeBig".equals(this.encoding)) { this.encoding = "UTF-16"; } - // MySQL encodes JSON data with utf8mb4. + // MySQL always encodes JSON data with utf8mb4. Discard whatever else we've found, if the type is JSON if (vitessType == Query.Type.JSON) { this.encoding = "UTF-8"; } @@ -99,6 +103,7 @@ public FieldWithMetadata(VitessConnection connection, Query.Field field) throws } } else { // Default encoding for number-types and date-types + // We keep the default javaType as passed from the server, and just set the encoding this.encoding = "US-ASCII"; this.isSingleBit = false; } @@ -141,6 +146,15 @@ public FieldWithMetadata(VitessConnection connection, Query.Field field) throws } } + /** + * Implicit temp tables are temporary tables created internally by MySQL for certain operations. + * For those types of tables, the table name is always prefixed with #sql_, typically followed by a numeric + * or other unique identifier. + */ + private boolean checkForImplicitTemporaryTable() { + return field.getTable().length() > 5 && field.getTable().startsWith("#sql_"); + } + private boolean isNativeNumericType() { switch (this.javaType) { case Types.TINYINT: @@ -341,7 +355,7 @@ public synchronized String getCollation() throws SQLException { } - public synchronized int getMaxBytesPerCharacter() throws SQLException { + public synchronized int getMaxBytesPerCharacter() { if (!connection.isIncludeAllFields()) { return 0; } diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessConnection.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessConnection.java index 34c61405932..c17e151f06a 100644 --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessConnection.java +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessConnection.java @@ -844,7 +844,7 @@ public String getUsername() { return this.vitessJDBCUrl.getUsername(); } - public String getEncodingForIndex(int charsetIndex) throws SQLException { + public String getEncodingForIndex(int charsetIndex) { String javaEncoding = null; if (charsetIndex != MysqlDefs.NO_CHARSET_INFO) { javaEncoding = CharsetMapping.getJavaEncodingForCollationIndex(charsetIndex, getEncoding()); diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSet.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSet.java index 833634996aa..81f8f69fed7 100644 --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSet.java +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSet.java @@ -757,8 +757,6 @@ private void checkOpen() throws SQLException { throw new SQLException(Constants.SQLExceptionMessages.CLOSED_RESULT_SET); } - //Unsupported Methods - private void preAccessor(int columnIndex) throws SQLException { checkOpen(); @@ -775,6 +773,8 @@ private boolean isNull(int columnIndex) throws SQLException { return null == this.row.getObject(columnIndex); } + //Unsupported Methods + public InputStream getAsciiStream(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException( Constants.SQLExceptionMessages.SQL_FEATURE_NOT_SUPPORTED); @@ -1469,7 +1469,7 @@ private boolean byteArrayToBoolean(int columnIndex) throws SQLException { return byteArrayToBoolean(this.row.getObject(columnIndex)); } - private boolean byteArrayToBoolean(Object value) throws SQLException { + private boolean byteArrayToBoolean(Object value) { if (value == null) { return false; } diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSetMetaData.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSetMetaData.java index 5976e609edf..8355c222f64 100644 --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSetMetaData.java +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessResultSetMetaData.java @@ -103,7 +103,8 @@ public int getColumnDisplaySize(int column) throws SQLException { if (!field.getConnection().isIncludeAllFields()) { return 0; } - return field.getColumnLength() / field.getMaxBytesPerCharacter(); + // If we can't find a charset, we'll return 0. In that case assume 1 byte per char + return field.getColumnLength() / Math.max(field.getMaxBytesPerCharacter(), 1); } public String getColumnLabel(int column) throws SQLException { From 857089de7cce9745c172444bdd4a8846f9c65900 Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Mon, 27 Feb 2017 13:34:13 -0500 Subject: [PATCH 049/108] add comment as to why we diverge from mysql-connector-j on types for length --- .../src/main/java/com/flipkart/vitess/util/MysqlDefs.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/util/MysqlDefs.java b/java/jdbc/src/main/java/com/flipkart/vitess/util/MysqlDefs.java index 00dcdf383d6..509713d8ac2 100644 --- a/java/jdbc/src/main/java/com/flipkart/vitess/util/MysqlDefs.java +++ b/java/jdbc/src/main/java/com/flipkart/vitess/util/MysqlDefs.java @@ -72,10 +72,15 @@ public final class MysqlDefs { static final int FIELD_TYPE_YEAR = 13; static final int FIELD_TYPE_JSON = 245; static final int INIT_DB = 2; + + // Unlike mysql-vanilla, vtgate returns ints for Field.getLength(). To ensure no type conversion issues, + // we diverge from mysql-connector-j here, who instead have these fields as longs, and have a function clampedGetLength + // to convert field lengths to ints after comparison. public static final int LENGTH_BLOB = 65535; public static final int LENGTH_LONGBLOB = Integer.MAX_VALUE; public static final int LENGTH_MEDIUMBLOB = 16777215; public static final int LENGTH_TINYBLOB = 255; + // Limitations static final int MAX_ROWS = 50000000; // From the MySQL FAQ static final byte OPEN_CURSOR_FLAG = 1; From 2cdce288946eac3152b0ffb4f8ef50ca3c448533 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Mon, 27 Feb 2017 13:23:32 -0800 Subject: [PATCH 050/108] messager: Fix a deadlock bug (#2594) The root cause of the deadlock is that message manager calls into tabletserver, which calls back into it. This can cause deadlocks and Close can hang forever. BUG=35763775 --- go/vt/tabletserver/message_manager.go | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/go/vt/tabletserver/message_manager.go b/go/vt/tabletserver/message_manager.go index 7956890b7d8..0074e627103 100644 --- a/go/vt/tabletserver/message_manager.go +++ b/go/vt/tabletserver/message_manager.go @@ -107,6 +107,8 @@ type MessageManager struct { } // NewMessageManager creates a new message manager. +// Calls into tsv have to be made asynchronously. Otherwise, +// it can lead to deadlocks. func NewMessageManager(tsv *TabletServer, table *schema.Table, conns *connpool.Pool) *MessageManager { mm := &MessageManager{ tsv: tsv, @@ -326,18 +328,18 @@ func (mm *MessageManager) send(receiver *receiverWithStatus, qr *sqltypes.Result for i, row := range qr.Rows { ids[i] = row[0].String() } - // Postpone the messages for resend before discarding - // from cache. If no timely ack is received, it will be resent. - mm.postpone(ids) // postpone should discard, but this is a safety measure // in case it fails. mm.cache.Discard(ids) + go postpone(mm.tsv, mm.name.String(), mm.ackWaitTime, ids) } -func (mm *MessageManager) postpone(ids []string) { - ctx, cancel := context.WithTimeout(tabletenv.LocalContext(), mm.ackWaitTime) +// postpone is a non-member because it should be called asynchronously and should +// not rely on members of MessageManager. +func postpone(tsv *TabletServer, name string, ackWaitTime time.Duration, ids []string) { + ctx, cancel := context.WithTimeout(tabletenv.LocalContext(), ackWaitTime) defer cancel() - _, err := mm.tsv.PostponeMessages(ctx, nil, mm.name.String(), ids) + _, err := tsv.PostponeMessages(ctx, nil, name, ids) if err != nil { // TODO(sougou): increment internal error. log.Errorf("Unable to postpone messages %v: %v", ids, err) @@ -406,10 +408,16 @@ func (mm *MessageManager) runPoller() { } func (mm *MessageManager) runPurge() { - ctx, cancel := context.WithTimeout(tabletenv.LocalContext(), mm.purgeTicks.Interval()) + go purge(mm.tsv, mm.name.String(), mm.purgeAfter, mm.purgeTicks.Interval()) +} + +// purge is a non-member because it should be called asynchronously and should +// not rely on members of MessageManager. +func purge(tsv *TabletServer, name string, purgeAfter, purgeInterval time.Duration) { + ctx, cancel := context.WithTimeout(tabletenv.LocalContext(), purgeInterval) defer cancel() for { - count, err := mm.tsv.PurgeMessages(ctx, nil, mm.name.String(), time.Now().Add(-mm.purgeAfter).UnixNano()) + count, err := tsv.PurgeMessages(ctx, nil, name, time.Now().Add(-purgeAfter).UnixNano()) if err != nil { // TODO(sougou): increment internal error. log.Errorf("Unable to delete messages: %v", err) From a77653032deba97194f52fd579477d936da23384 Mon Sep 17 00:00:00 2001 From: Yipei Wang Date: Mon, 27 Feb 2017 13:59:34 -0800 Subject: [PATCH 051/108] workflow: Modify the workflow library API and modify the unit tests of resharding workflow. --- go/vt/schemamanager/schemaswap/schema_swap.go | 2 +- go/vt/workflow/manager.go | 14 +- .../horizontal_resharding_workflow.go | 9 +- .../horizontal_resharding_workflow_test.go | 181 ++++++------------ .../resharding/parallel_runner_test.go | 68 ++++--- go/vt/workflow/resharding/test_workflow.go | 2 +- go/vt/workflow/sleep_workflow.go | 2 +- go/vt/workflow/topovalidator/validator.go | 2 +- 8 files changed, 121 insertions(+), 159 deletions(-) diff --git a/go/vt/schemamanager/schemaswap/schema_swap.go b/go/vt/schemamanager/schemaswap/schema_swap.go index 21bb06afb5d..db8c0847dff 100644 --- a/go/vt/schemamanager/schemaswap/schema_swap.go +++ b/go/vt/schemamanager/schemaswap/schema_swap.go @@ -188,7 +188,7 @@ func (*SwapWorkflowFactory) Init(_ *workflow.Manager, workflowProto *workflowpb. // Instantiate is a part of workflow.Factory interface. It instantiates workflow.Workflow object from // workflowpb.Workflow protobuf object. -func (*SwapWorkflowFactory) Instantiate(workflowProto *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { +func (*SwapWorkflowFactory) Instantiate(_ *workflow.Manager, workflowProto *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { data := &swapWorkflowData{} if err := json.Unmarshal(workflowProto.Data, data); err != nil { return nil, err diff --git a/go/vt/workflow/manager.go b/go/vt/workflow/manager.go index c366270444b..ffcf78e8d49 100644 --- a/go/vt/workflow/manager.go +++ b/go/vt/workflow/manager.go @@ -47,7 +47,7 @@ type Factory interface { // Instantiate loads a workflow from the proto representation // into an in-memory Workflow object. rootNode is the root UI node // representing the workflow. - Instantiate(w *workflowpb.Workflow, rootNode *Node) (Workflow, error) + Instantiate(m *Manager, w *workflowpb.Workflow, rootNode *Node) (Workflow, error) } // Manager is the main Workflow manager object. @@ -287,7 +287,7 @@ func (m *Manager) instantiateWorkflow(w *workflowpb.Workflow) (*runningWorkflow, return nil, fmt.Errorf("no factory named %v is registered", w.FactoryName) } var err error - rw.workflow, err = factory.Instantiate(w, rw.rootNode) + rw.workflow, err = factory.Instantiate(m, w, rw.rootNode) if err != nil { return nil, err } @@ -459,6 +459,16 @@ func (m *Manager) Wait(ctx context.Context, uuid string) error { return nil } +// GetWorkflowForTesting returns the Workflow object of the running workflow +// identified by uuid. The method is used in unit tests to inject mocks. +func (m *Manager) GetWorkflowForTesting(uuid string) (Workflow, error) { + rw, err := m.getRunningWorkflow(uuid) + if err != nil { + return nil, err + } + return rw.workflow, nil +} + // getRunningWorkflow returns a runningWorkflow by uuid. func (m *Manager) getRunningWorkflow(uuid string) (*runningWorkflow, error) { m.mu.Lock() diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow.go b/go/vt/workflow/resharding/horizontal_resharding_workflow.go index ca397240845..c488b76556d 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow.go @@ -83,7 +83,7 @@ func (*HorizontalReshardingWorkflowFactory) Init(m *workflow.Manager, w *workflo } // Instantiate is part the workflow.Factory interface. -func (*HorizontalReshardingWorkflowFactory) Instantiate(w *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { +func (*HorizontalReshardingWorkflowFactory) Instantiate(m *workflow.Manager, w *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { rootNode.Message = "This is a workflow to execute horizontal resharding automatically." checkpoint := &workflowpb.WorkflowCheckpoint{} @@ -95,6 +95,9 @@ func (*HorizontalReshardingWorkflowFactory) Instantiate(w *workflowpb.Workflow, checkpoint: checkpoint, rootUINode: rootNode, logger: logutil.NewMemoryLogger(), + wr: wrangler.New(logutil.NewConsoleLogger(), m.TopoServer(), tmclient.NewTabletManagerClient()), + topoServer: m.TopoServer(), + manager: m, } copySchemaUINode := &workflow.Node{ Name: "CopySchemaShard", @@ -313,12 +316,8 @@ type HorizontalReshardingWorkflow struct { // It implements the workflow.Workflow interface. func (hw *HorizontalReshardingWorkflow) Run(ctx context.Context, manager *workflow.Manager, wi *topo.WorkflowInfo) error { hw.ctx = ctx - hw.topoServer = manager.TopoServer() - hw.manager = manager - hw.wr = wrangler.New(logutil.NewConsoleLogger(), manager.TopoServer(), tmclient.NewTabletManagerClient()) hw.wi = wi hw.checkpointWriter = NewCheckpointWriter(hw.topoServer, hw.checkpoint, hw.wi) - hw.rootUINode.Display = workflow.NodeDisplayDeterminate hw.rootUINode.BroadcastChanges(true /* updateChildren */) diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go index 1d9c151eebb..3c566c34c2e 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go @@ -6,171 +6,114 @@ import ( "testing" "github.com/golang/mock/gomock" - "github.com/youtube/vitess/go/vt/logutil" + "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/worker/fakevtworkerclient" "github.com/youtube/vitess/go/vt/worker/vtworkerclient" "github.com/youtube/vitess/go/vt/workflow" "github.com/youtube/vitess/go/vt/wrangler" + // import the gRPC client implementation for tablet manager + _ "github.com/youtube/vitess/go/vt/tabletmanager/grpctmclient" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" ) +var ( + testKeyspace = "test_keyspace" + testVtworkers = "localhost:15032" +) + +func init() { + Register() +} + // TestHorizontalResharding runs the happy path of HorizontalReshardingWorkflow. func TestHorizontalResharding(t *testing.T) { - // Set up the mock wrangler. It is used for the CopySchema and Migrate phase. - ctrl := gomock.NewController(t) - defer ctrl.Finish() ctx := context.Background() - mockWranglerInterface := setupMockWrangler(ctx, ctrl) - - // Set up the fakeworkerclient. It is used at SplitClone and SplitDiff phase. - fakeVtworkerClient := setupFakeVtworker() - vtworkerclient.RegisterFactory("fake", fakeVtworkerClient.FakeVtworkerClientFactory) - defer vtworkerclient.UnregisterFactoryForTest("fake") - // Create a checkpoint with initialized tasks. - sourceShards := []string{"0"} - destinationShards := []string{"-80", "80-"} - vtworkers := []string{"localhost:15032"} - checkpoint, err := initCheckpointFromShards("test_keyspace", vtworkers, sourceShards, destinationShards) - if err != nil { - t.Errorf("initialize checkpoint fails: %v", err) - } - - hw, err := createWorkflow(ctx, mockWranglerInterface, checkpoint) - if err != nil { - t.Errorf("initialize Workflow fails: %v", err) - } - if err := hw.runWorkflow(); err != nil { - t.Errorf("%s: Horizontal resharding workflow should not fail", err) - } - - verifySuccess(t, hw.checkpoint) -} - -// TestHorizontalReshardingRetry retries a stopped workflow, -// which the tasks are partially finished. -func TestHorizontalReshardingRetry(t *testing.T) { - // Set up mock wrangler. It is used for the CopySchema and Migrate phase. + // Set up the mock wrangler. It is used for the CopySchema, + // WaitforFilteredReplication and Migrate phase. ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() - mockWranglerInterface := setupMockWranglerForRetry(ctx, ctrl) + mockWranglerInterface := setupMockWrangler(ctrl, testKeyspace) - // Set up fakeworkerclient. It is used at SplitClone and SplitDiff phase. - fakeVtworkerClient := setupFakeVtworker() + // Set up the fakeworkerclient. It is used at SplitClone and SplitDiff phase. + fakeVtworkerClient := setupFakeVtworker(testKeyspace, testVtworkers) vtworkerclient.RegisterFactory("fake", fakeVtworkerClient.FakeVtworkerClientFactory) defer vtworkerclient.UnregisterFactoryForTest("fake") - // Create a checkpoint for the stopped workflow. For the stopped workflow, - // the task of copying schema to shard 80- succeed while the task of copying - // schema to shard -80 failed. The rest of tasks haven't been executed. - sourceShards := []string{"0"} - destinationShards := []string{"-80", "80-"} - vtworkers := []string{"localhost:15032"} - checkpoint, err := initCheckpointFromShards("test_keyspace", vtworkers, sourceShards, destinationShards) + // Initialize the topology. + ts := setupTopology(ctx, t, testKeyspace) + m := workflow.NewManager(ts) + // Run the manager in the background. + wg, _, cancel := startManager(m) + // Create the workflow. + uuid, err := m.Create(ctx, horizontalReshardingFactoryName, []string{"-keyspace=" + testKeyspace, "-vtworkers=" + testVtworkers}) if err != nil { - t.Errorf("initialize checkpoint fails: %v", err) + t.Fatalf("cannot create resharding workflow: %v", err) } - setTaskSuccessOrFailure(checkpoint, createTaskID(phaseCopySchema, "80-"), true /* isSuccess*/) - setTaskSuccessOrFailure(checkpoint, createTaskID(phaseCopySchema, "-80"), false /* isSuccess*/) - - hw, err := createWorkflow(ctx, mockWranglerInterface, checkpoint) + // Inject the mock wranger into the workflow. + w, err := m.GetWorkflowForTesting(uuid) if err != nil { - t.Errorf("initialize Workflow fails: %v", err) - } - // Rerunning the workflow. - if err := hw.runWorkflow(); err != nil { - t.Errorf("%s: Horizontal resharding workflow should not fail", err) + t.Fatalf("fail to get workflow from manager: %v", err) } + hw := w.(*HorizontalReshardingWorkflow) + hw.wr = mockWranglerInterface - verifySuccess(t, hw.checkpoint) -} - -func setTaskSuccessOrFailure(checkpoint *workflowpb.WorkflowCheckpoint, taskID string, isSuccess bool) { - t := checkpoint.Tasks[taskID] - t.State = workflowpb.TaskState_TaskDone - if !isSuccess { - t.Error = "failed" - } else { - t.Error = "" + // Start the job. + if err := m.Start(ctx, uuid); err != nil { + t.Fatalf("cannot start resharding workflow: %v", err) } -} -func createWorkflow(ctx context.Context, mockWranglerInterface *MockReshardingWrangler, checkpoint *workflowpb.WorkflowCheckpoint) (*HorizontalReshardingWorkflow, error) { - ts := memorytopo.NewServer("cell") - w := &workflowpb.Workflow{ - Uuid: "test_hw", - FactoryName: horizontalReshardingFactoryName, - State: workflowpb.WorkflowState_NotStarted, - } - wi, err := ts.CreateWorkflow(ctx, w) - if err != nil { - return nil, err + // Wait for the workflow to end. + m.Wait(ctx, uuid) + if err := verifyAllTasksDone(ctx, ts, uuid); err != nil { + t.Fatal(err) } - hw := &HorizontalReshardingWorkflow{ - ctx: ctx, - wr: mockWranglerInterface, - manager: workflow.NewManager(ts), - wi: wi, - topoServer: ts, - logger: logutil.NewMemoryLogger(), - checkpoint: checkpoint, - checkpointWriter: NewCheckpointWriter(ts, checkpoint, wi), + + // Stop the manager. + if err := m.Stop(ctx, uuid); err != nil { + t.Fatalf("cannot stop resharding workflow: %v", err) } - return hw, nil + cancel() + wg.Wait() } -func setupFakeVtworker() *fakevtworkerclient.FakeVtworkerClient { +func setupFakeVtworker(keyspace, vtworkers string) *fakevtworkerclient.FakeVtworkerClient { flag.Set("vtworker_client_protocol", "fake") fakeVtworkerClient := fakevtworkerclient.NewFakeVtworkerClient() - fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitClone", "--min_healthy_rdonly_tablets=1", "test_keyspace/0"}, "", nil) - fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", "test_keyspace/-80"}, "", nil) - fakeVtworkerClient.RegisterResultForAddr("localhost:15032", []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", "test_keyspace/80-"}, "", nil) + fakeVtworkerClient.RegisterResultForAddr(vtworkers, []string{"SplitClone", "--min_healthy_rdonly_tablets=1", keyspace + "/0"}, "", nil) + fakeVtworkerClient.RegisterResultForAddr(vtworkers, []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", keyspace + "/-80"}, "", nil) + fakeVtworkerClient.RegisterResultForAddr(vtworkers, []string{"SplitDiff", "--min_healthy_rdonly_tablets=1", keyspace + "/80-"}, "", nil) return fakeVtworkerClient } -func setupMockWranglerForRetry(ctx context.Context, ctrl *gomock.Controller) *MockReshardingWrangler { - mockWranglerInterface := NewMockReshardingWrangler(ctrl) - // Set the expected behaviors for mock wrangler. copy schema to shard 80- - // should not be called. - mockWranglerInterface.EXPECT().CopySchemaShardFromShard(ctx, nil /* tableArray*/, nil /* excludeTableArray */, true /*includeViews*/, "test_keyspace", "0", "test_keyspace", "-80", wrangler.DefaultWaitSlaveTimeout).Return(nil) - mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "-80", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) - mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "80-", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) - - servedTypeParams := []topodatapb.TabletType{topodatapb.TabletType_RDONLY, - topodatapb.TabletType_REPLICA, - topodatapb.TabletType_MASTER} - for _, servedType := range servedTypeParams { - mockWranglerInterface.EXPECT().MigrateServedTypes(ctx, "test_keyspace", "0", nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime).Return(nil) - } - return mockWranglerInterface -} - -func setupMockWrangler(ctx context.Context, ctrl *gomock.Controller) *MockReshardingWrangler { +func setupMockWrangler(ctrl *gomock.Controller, keyspace string) *MockReshardingWrangler { mockWranglerInterface := NewMockReshardingWrangler(ctrl) // Set the expected behaviors for mock wrangler. - mockWranglerInterface.EXPECT().CopySchemaShardFromShard(ctx, nil /* tableArray*/, nil /* excludeTableArray */, true /*includeViews*/, "test_keyspace", "0", "test_keyspace", "-80", wrangler.DefaultWaitSlaveTimeout).Return(nil) - mockWranglerInterface.EXPECT().CopySchemaShardFromShard(ctx, nil /* tableArray*/, nil /* excludeTableArray */, true /*includeViews*/, "test_keyspace", "0", "test_keyspace", "80-", wrangler.DefaultWaitSlaveTimeout).Return(nil) + mockWranglerInterface.EXPECT().CopySchemaShardFromShard(gomock.Any(), nil /* tableArray*/, nil /* excludeTableArray */, true /*includeViews*/, keyspace, "0", keyspace, "-80", wrangler.DefaultWaitSlaveTimeout).Return(nil) + mockWranglerInterface.EXPECT().CopySchemaShardFromShard(gomock.Any(), nil /* tableArray*/, nil /* excludeTableArray */, true /*includeViews*/, keyspace, "0", keyspace, "80-", wrangler.DefaultWaitSlaveTimeout).Return(nil) - mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "-80", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) - mockWranglerInterface.EXPECT().WaitForFilteredReplication(ctx, "test_keyspace", "80-", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) + mockWranglerInterface.EXPECT().WaitForFilteredReplication(gomock.Any(), keyspace, "-80", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) + mockWranglerInterface.EXPECT().WaitForFilteredReplication(gomock.Any(), keyspace, "80-", wrangler.DefaultWaitForFilteredReplicationMaxDelay).Return(nil) servedTypeParams := []topodatapb.TabletType{topodatapb.TabletType_RDONLY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_MASTER} for _, servedType := range servedTypeParams { - mockWranglerInterface.EXPECT().MigrateServedTypes(ctx, "test_keyspace", "0", nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime).Return(nil) + mockWranglerInterface.EXPECT().MigrateServedTypes(gomock.Any(), keyspace, "0", nil /* cells */, servedType, false /* reverse */, false /* skipReFreshState */, wrangler.DefaultFilteredReplicationWaitTime).Return(nil) } return mockWranglerInterface } -func verifySuccess(t *testing.T, checkpoint *workflowpb.WorkflowCheckpoint) { - for _, task := range checkpoint.Tasks { - if task.State != workflowpb.TaskState_TaskDone || task.Error != "" { - t.Fatalf("task: %v should succeed: task status: %v, %v", task.Id, task.State, task.Error) - } +func setupTopology(ctx context.Context, t *testing.T, keyspace string) topo.Server { + ts := memorytopo.NewServer("cell") + if err := ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{}); err != nil { + t.Fatalf("CreateKeyspace: %v", err) } + ts.CreateShard(ctx, keyspace, "0") + ts.CreateShard(ctx, keyspace, "-80") + ts.CreateShard(ctx, keyspace, "80-") + return ts } diff --git a/go/vt/workflow/resharding/parallel_runner_test.go b/go/vt/workflow/resharding/parallel_runner_test.go index 7c83c51fae2..3885f449efc 100644 --- a/go/vt/workflow/resharding/parallel_runner_test.go +++ b/go/vt/workflow/resharding/parallel_runner_test.go @@ -19,28 +19,30 @@ import ( func TestParallelRunner(t *testing.T) { ts := memorytopo.NewServer("cell") m := workflow.NewManager(ts) + ctx := context.Background() // Run the manager in the background. - wg, cancel, _ := startManager(t, m) + wg, _, cancel := startManager(m) // Create a testworkflow. - uuid, err := m.Create(context.Background(), testWorkflowFactoryName, []string{"-retry=false", "-count=2"}) + uuid, err := m.Create(ctx, testWorkflowFactoryName, []string{"-retry=false", "-count=2"}) if err != nil { t.Fatalf("cannot create testworkflow: %v", err) } // Start the job - if err := m.Start(context.Background(), uuid); err != nil { + if err := m.Start(ctx, uuid); err != nil { t.Fatalf("cannot start testworkflow: %v", err) } // Wait for the workflow to end. - m.Wait(context.Background(), uuid) - - verifyWorkflowSuccess(context.Background(), t, ts, uuid) + m.Wait(ctx, uuid) + if err := verifyAllTasksDone(ctx, ts, uuid); err != nil { + t.Fatal(err) + } // Stop the manager. - if err := m.Stop(context.Background(), uuid); err != nil { + if err := m.Stop(ctx, uuid); err != nil { t.Fatalf("cannot stop testworkflow: %v", err) } cancel() @@ -52,12 +54,12 @@ func TestParallelRunnerRetryAction(t *testing.T) { // retry task1, after it is finished successfully, we retry task2. ts := memorytopo.NewServer("cell") m := workflow.NewManager(ts) - + ctx := context.Background() // Run the manager in the background. - wg, cancel, ctx := startManager(t, m) + wg, _, cancel := startManager(m) // Create a testworkflow. - uuid, err := m.Create(context.Background(), testWorkflowFactoryName, []string{"-retry=true", "-count=2"}) + uuid, err := m.Create(ctx, testWorkflowFactoryName, []string{"-retry=true", "-count=2"}) if err != nil { t.Fatalf("cannot create testworkflow: %v", err) } @@ -85,11 +87,11 @@ func TestParallelRunnerRetryAction(t *testing.T) { } if strings.Contains(monitorStr, "Retry") { if strings.Contains(monitorStr, task1ID) { - verifyTaskSuccessOrFailure(context.Background(), t, ts, uuid, task1ID, false /* isSuccess*/) + verifyTaskSuccessOrFailure(context.Background(), ts, uuid, task1ID, false /* isSuccess*/) retry1 = true } if strings.Contains(monitorStr, task2ID) { - verifyTaskSuccessOrFailure(context.Background(), t, ts, uuid, task2ID, false /* isSuccess*/) + verifyTaskSuccessOrFailure(context.Background(), ts, uuid, task2ID, false /* isSuccess*/) retry2 = true } } @@ -98,11 +100,15 @@ func TestParallelRunnerRetryAction(t *testing.T) { if retry1 && retry2 { clickRetry(ctx, t, m, path.Join("/"+uuid, task1ID)) waitForFinished(ctx, t, notifications, task1ID) - verifyTaskSuccessOrFailure(context.Background(), t, ts, uuid, task1ID, true /* isSuccess*/) + if err := verifyTaskSuccessOrFailure(context.Background(), ts, uuid, task1ID, true /* isSuccess*/); err != nil { + t.Errorf("verify task %v success failed: %v", task1ID, err) + } clickRetry(ctx, t, m, path.Join("/"+uuid, task2ID)) waitForFinished(ctx, t, notifications, task2ID) - verifyTaskSuccessOrFailure(context.Background(), t, ts, uuid, task2ID, true /* isSuccess*/) + if err := verifyTaskSuccessOrFailure(context.Background(), ts, uuid, task2ID, true /* isSuccess*/); err != nil { + t.Errorf("verify task %v success failed: %v", task2ID, err) + } return } case <-ctx.Done(): @@ -113,33 +119,35 @@ func TestParallelRunnerRetryAction(t *testing.T) { }() // Start the job - if err := m.Start(context.Background(), uuid); err != nil { + if err := m.Start(ctx, uuid); err != nil { t.Fatalf("cannot start testworkflow: %v", err) } // Wait for the workflow to end. - m.Wait(context.Background(), uuid) + m.Wait(ctx, uuid) - verifyWorkflowSuccess(context.Background(), t, ts, uuid) + if err := verifyAllTasksDone(ctx, ts, uuid); err != nil { + t.Fatal(err) + } // Stop the manager. - if err := m.Stop(context.Background(), uuid); err != nil { + if err := m.Stop(ctx, uuid); err != nil { t.Fatalf("cannot stop testworkflow: %v", err) } cancel() wg.Wait() } -func startManager(t *testing.T, m *workflow.Manager) (*sync.WaitGroup, context.CancelFunc, context.Context) { +func startManager(m *workflow.Manager) (*sync.WaitGroup, context.Context, context.CancelFunc) { // Run the manager in the background. ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} wg.Add(1) go func() { + defer wg.Done() m.Run(ctx) - wg.Done() }() m.WaitUntilRunning() - return wg, cancel, ctx + return wg, ctx, cancel } func clickRetry(ctx context.Context, t *testing.T, m *workflow.Manager, nodePath string) { @@ -174,32 +182,33 @@ func waitForFinished(ctx context.Context, t *testing.T, notifications chan []byt } } -func verifyWorkflowSuccess(ctx context.Context, t *testing.T, ts topo.Server, uuid string) { +func verifyAllTasksDone(ctx context.Context, ts topo.Server, uuid string) error { wi, err := ts.GetWorkflow(ctx, uuid) if err != nil { - t.Errorf("fail to get workflow for: %v", uuid) + return fmt.Errorf("fail to get workflow for: %v", uuid) } checkpoint := &workflowpb.WorkflowCheckpoint{} if err := proto.Unmarshal(wi.Workflow.Data, checkpoint); err != nil { - t.Errorf("fails to get checkpoint for the workflow: %v", err) + return fmt.Errorf("fails to get checkpoint for the workflow: %v", err) } for _, task := range checkpoint.Tasks { if task.State != workflowpb.TaskState_TaskDone || task.Error != "" { - t.Fatalf("task: %v should succeed: task status: %v, %v", task.Id, task.State, task.Attributes) + return fmt.Errorf("task: %v should succeed: task status: %v, %v", task.Id, task.State, task.Attributes) } } + return nil } -func verifyTaskSuccessOrFailure(ctx context.Context, t *testing.T, ts topo.Server, uuid, taskID string, isSuccess bool) { +func verifyTaskSuccessOrFailure(ctx context.Context, ts topo.Server, uuid, taskID string, isSuccess bool) error { wi, err := ts.GetWorkflow(ctx, uuid) if err != nil { - t.Errorf("fail to get workflow for: %v", uuid) + return fmt.Errorf("fail to get workflow for: %v", uuid) } checkpoint := &workflowpb.WorkflowCheckpoint{} if err := proto.Unmarshal(wi.Workflow.Data, checkpoint); err != nil { - t.Errorf("fails to get checkpoint for the workflow: %v", err) + return fmt.Errorf("fails to get checkpoint for the workflow: %v", err) } task := checkpoint.Tasks[taskID] @@ -208,6 +217,7 @@ func verifyTaskSuccessOrFailure(ctx context.Context, t *testing.T, ts topo.Serve taskError = errMessage } if task.State != workflowpb.TaskState_TaskDone || task.Error != taskError { - t.Errorf("task: %v should succeed. Task status: %v, %v", task.Id, task.State, task.Error) + return fmt.Errorf("task: %v should succeed. Task status: %v, %v", task.Id, task.State, task.Error) } + return nil } diff --git a/go/vt/workflow/resharding/test_workflow.go b/go/vt/workflow/resharding/test_workflow.go index a8dbc6b9238..edb689d1295 100644 --- a/go/vt/workflow/resharding/test_workflow.go +++ b/go/vt/workflow/resharding/test_workflow.go @@ -134,7 +134,7 @@ func (*TestWorkflowFactory) Init(_ *workflow.Manager, w *workflowpb.Workflow, ar } // Instantiate is part the workflow.Factory interface. -func (*TestWorkflowFactory) Instantiate(w *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { +func (*TestWorkflowFactory) Instantiate(_ *workflow.Manager, w *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { checkpoint := &workflowpb.WorkflowCheckpoint{} if err := proto.Unmarshal(w.Data, checkpoint); err != nil { return nil, err diff --git a/go/vt/workflow/sleep_workflow.go b/go/vt/workflow/sleep_workflow.go index 7aa0686732f..bd5eba54925 100644 --- a/go/vt/workflow/sleep_workflow.go +++ b/go/vt/workflow/sleep_workflow.go @@ -215,7 +215,7 @@ func (f *SleepWorkflowFactory) Init(_ *Manager, w *workflowpb.Workflow, args []s } // Instantiate is part of the workflow.Factory interface. -func (f *SleepWorkflowFactory) Instantiate(w *workflowpb.Workflow, rootNode *Node) (Workflow, error) { +func (f *SleepWorkflowFactory) Instantiate(_ *Manager, w *workflowpb.Workflow, rootNode *Node) (Workflow, error) { rootNode.Message = "This workflow is a test workflow that just sleeps for the provided amount of time." data := &SleepWorkflowData{} diff --git a/go/vt/workflow/topovalidator/validator.go b/go/vt/workflow/topovalidator/validator.go index a5fd6f80c74..3cc98e91570 100644 --- a/go/vt/workflow/topovalidator/validator.go +++ b/go/vt/workflow/topovalidator/validator.go @@ -203,7 +203,7 @@ func (f *WorkflowFactory) Init(_ *workflow.Manager, w *workflowpb.Workflow, args } // Instantiate is part of the workflow.Factory interface. -func (f *WorkflowFactory) Instantiate(w *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { +func (f *WorkflowFactory) Instantiate(_ *workflow.Manager, w *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { rootNode.Message = "Validates the Topology and proposes fixes for known issues." return &Workflow{ From 5bd6a7fa0fa158b37d09c014524c9b044017a09d Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Mon, 27 Feb 2017 15:13:46 -0800 Subject: [PATCH 052/108] Do not return OK error code for buffer errors. NOTE: This is an automated export. Changes were already LGTM'd internally. --- go/vt/vtgate/gateway/discoverygateway.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/vt/vtgate/gateway/discoverygateway.go b/go/vt/vtgate/gateway/discoverygateway.go index 31b8f6c4af5..40000db5a05 100644 --- a/go/vt/vtgate/gateway/discoverygateway.go +++ b/go/vt/vtgate/gateway/discoverygateway.go @@ -179,7 +179,7 @@ func (dg *discoveryGateway) withRetry(ctx context.Context, target *querypb.Targe if bufferErr != nil { // Buffering failed e.g. buffer is already full. Do not retry. err = vterrors.Errorf( - vterrors.Code(err), + vterrors.Code(bufferErr), "failed to automatically buffer and retry failed request during failover: %v original err (type=%T): %v", bufferErr, err, err) break From 626eea150f24af5b1d9a4cac19e7d19c1e454130 Mon Sep 17 00:00:00 2001 From: Yipei Wang Date: Tue, 28 Feb 2017 10:59:48 -0800 Subject: [PATCH 053/108] workflow: Change methods' name based on go-readability reviewer's suggestion. --- go/vt/workflow/manager.go | 12 ++++++------ .../horizontal_resharding_workflow_test.go | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/go/vt/workflow/manager.go b/go/vt/workflow/manager.go index ffcf78e8d49..eefe13a6be6 100644 --- a/go/vt/workflow/manager.go +++ b/go/vt/workflow/manager.go @@ -444,7 +444,7 @@ func (m *Manager) Delete(ctx context.Context, uuid string) error { // Wait waits for the provided workflow to end. func (m *Manager) Wait(ctx context.Context, uuid string) error { // Find the workflow. - rw, err := m.getRunningWorkflow(uuid) + rw, err := m.runningWorkflow(uuid) if err != nil { return err } @@ -459,18 +459,18 @@ func (m *Manager) Wait(ctx context.Context, uuid string) error { return nil } -// GetWorkflowForTesting returns the Workflow object of the running workflow +// WorkflowForTesting returns the Workflow object of the running workflow // identified by uuid. The method is used in unit tests to inject mocks. -func (m *Manager) GetWorkflowForTesting(uuid string) (Workflow, error) { - rw, err := m.getRunningWorkflow(uuid) +func (m *Manager) WorkflowForTesting(uuid string) (Workflow, error) { + rw, err := m.runningWorkflow(uuid) if err != nil { return nil, err } return rw.workflow, nil } -// getRunningWorkflow returns a runningWorkflow by uuid. -func (m *Manager) getRunningWorkflow(uuid string) (*runningWorkflow, error) { +// runningWorkflow returns a runningWorkflow by uuid. +func (m *Manager) runningWorkflow(uuid string) (*runningWorkflow, error) { m.mu.Lock() defer m.mu.Unlock() diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go index 3c566c34c2e..b0d92258fba 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go @@ -54,7 +54,7 @@ func TestHorizontalResharding(t *testing.T) { t.Fatalf("cannot create resharding workflow: %v", err) } // Inject the mock wranger into the workflow. - w, err := m.GetWorkflowForTesting(uuid) + w, err := m.WorkflowForTesting(uuid) if err != nil { t.Fatalf("fail to get workflow from manager: %v", err) } From 678ea9f449214ce86a01360fca16c2abc6697acf Mon Sep 17 00:00:00 2001 From: thompsonja Date: Tue, 28 Feb 2017 14:05:38 -0800 Subject: [PATCH 054/108] Add flags to kubernetes guestbook app. (#2605) Add flags to kubernetes guestbook app. --- ....yaml => guestbook-controller-template.yaml} | 1 + examples/kubernetes/guestbook-down.sh | 2 -- examples/kubernetes/guestbook-up.sh | 11 ++++++++++- examples/kubernetes/guestbook/main.py | 17 +++++++++++++---- 4 files changed, 24 insertions(+), 7 deletions(-) rename examples/kubernetes/{guestbook-controller.yaml => guestbook-controller-template.yaml} (86%) diff --git a/examples/kubernetes/guestbook-controller.yaml b/examples/kubernetes/guestbook-controller-template.yaml similarity index 86% rename from examples/kubernetes/guestbook-controller.yaml rename to examples/kubernetes/guestbook-controller-template.yaml index 308dbd7ef46..79f8a22b7a6 100644 --- a/examples/kubernetes/guestbook-controller.yaml +++ b/examples/kubernetes/guestbook-controller-template.yaml @@ -26,3 +26,4 @@ spec: limits: memory: "128Mi" cpu: "100m" + args: ["--port", "{{port}}", "--cell", "{{cell}}", "--vtgate_port", "{{vtgate_port}}"] diff --git a/examples/kubernetes/guestbook-down.sh b/examples/kubernetes/guestbook-down.sh index 81cbe54dc52..045229dee75 100755 --- a/examples/kubernetes/guestbook-down.sh +++ b/examples/kubernetes/guestbook-down.sh @@ -2,8 +2,6 @@ # This is an example script that stops guestbook. -set -e - script_root=`dirname "${BASH_SOURCE}"` source $script_root/env.sh diff --git a/examples/kubernetes/guestbook-up.sh b/examples/kubernetes/guestbook-up.sh index b280764d5f9..e962ae766a6 100755 --- a/examples/kubernetes/guestbook-up.sh +++ b/examples/kubernetes/guestbook-up.sh @@ -4,11 +4,20 @@ set -e +port=${GUESTBOOK_PORT:-8080} +cell=${GUESTBOOK_CELL:-"test"} +vtgate_port=${VTGATE_PORT:-15991} + script_root=`dirname "${BASH_SOURCE}"` source $script_root/env.sh echo "Creating guestbook service..." $KUBECTL create --namespace=$VITESS_NAME -f guestbook-service.yaml +sed_script="" +for var in port cell vtgate_port; do + sed_script+="s,{{$var}},${!var},g;" +done + echo "Creating guestbook replicationcontroller..." -$KUBECTL create --namespace=$VITESS_NAME -f guestbook-controller.yaml +sed -e "$sed_script" guestbook-controller-template.yaml | $KUBECTL create --namespace=$VITESS_NAME -f - diff --git a/examples/kubernetes/guestbook/main.py b/examples/kubernetes/guestbook/main.py index 8038b58ceee..b9b3daaef6d 100644 --- a/examples/kubernetes/guestbook/main.py +++ b/examples/kubernetes/guestbook/main.py @@ -1,5 +1,6 @@ """Main python file.""" +import argparse import os import time import json @@ -77,13 +78,21 @@ def add_entry(page, value): def env(): return json.dumps(dict(os.environ)) + if __name__ == '__main__': - timeout = 10 # connect timeout in seconds + parser = argparse.ArgumentParser(description='Run guestbook app') + parser.add_argument('--port', help='Port', default=8080, type=int) + parser.add_argument('--cell', help='Cell', default='test', type=str) + parser.add_argument( + '--timeout', help='Connect timeout (s)', default=10, type=int) + parser.add_argument( + '--vtgate_port', help='Vtgate Port', default=15991, type=int) + guestbook_args = parser.parse_args() # Get vtgate service address from Kubernetes DNS. - addr = 'vtgate-test:15991' + addr = 'vtgate-%s:%d' % (guestbook_args.cell, guestbook_args.vtgate_port) # Connect to vtgate. - conn = vtgate_client.connect('grpc', addr, timeout) + conn = vtgate_client.connect('grpc', addr, guestbook_args.timeout) - app.run(host='0.0.0.0', port=8080, debug=True) + app.run(host='0.0.0.0', port=guestbook_args.port, debug=True) From 29e535634200eacde12a675f7bab16e26487cb1b Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Tue, 28 Feb 2017 16:52:22 -0800 Subject: [PATCH 055/108] vtgate/buffer: Remove redundant "reason" from constant name. --- go/vt/vtgate/buffer/buffer_test.go | 2 +- go/vt/vtgate/buffer/shard_buffer.go | 4 ++-- go/vt/vtgate/buffer/variables.go | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go/vt/vtgate/buffer/buffer_test.go b/go/vt/vtgate/buffer/buffer_test.go index b52c4b4e4e5..de5d0a55d67 100644 --- a/go/vt/vtgate/buffer/buffer_test.go +++ b/go/vt/vtgate/buffer/buffer_test.go @@ -33,7 +33,7 @@ var ( statsKeyJoined = fmt.Sprintf("%s.%s", keyspace, shard) - statsKeyJoinedFailoverEndDetected = statsKeyJoined + "." + string(stopReasonFailoverEndDetected) + statsKeyJoinedFailoverEndDetected = statsKeyJoined + "." + string(stopFailoverEndDetected) statsKeyJoinedWindowExceeded = statsKeyJoined + "." + string(evictedWindowExceeded) diff --git a/go/vt/vtgate/buffer/shard_buffer.go b/go/vt/vtgate/buffer/shard_buffer.go index 199b4dcf97d..93d7be42997 100644 --- a/go/vt/vtgate/buffer/shard_buffer.go +++ b/go/vt/vtgate/buffer/shard_buffer.go @@ -459,14 +459,14 @@ func (sb *shardBuffer) recordExternallyReparentedTimestamp(timestamp int64) { // First non-zero value after startup. Remember it. sb.externallyReparentedAfterStart = timestamp } - sb.stopBufferingLocked(stopReasonFailoverEndDetected, "failover end detected") + sb.stopBufferingLocked(stopFailoverEndDetected, "failover end detected") } func (sb *shardBuffer) stopBufferingDueToMaxDuration() { sb.mu.Lock() defer sb.mu.Unlock() - sb.stopBufferingLocked(stopReasonMaxFailoverDurationExceeded, + sb.stopBufferingLocked(stopMaxFailoverDurationExceeded, fmt.Sprintf("stopping buffering because failover did not finish in time (%v)", *maxFailoverDuration)) } diff --git a/go/vt/vtgate/buffer/variables.go b/go/vt/vtgate/buffer/variables.go index b998f96975c..92771217776 100644 --- a/go/vt/vtgate/buffer/variables.go +++ b/go/vt/vtgate/buffer/variables.go @@ -58,9 +58,9 @@ var ( type stopReason string const ( - stopReasonFailoverEndDetected stopReason = "NewMasterSeen" - stopReasonMaxFailoverDurationExceeded = "MaxDurationExceeded" - stopShutdown = "Shutdown" + stopFailoverEndDetected stopReason = "NewMasterSeen" + stopMaxFailoverDurationExceeded = "MaxDurationExceeded" + stopShutdown = "Shutdown" ) // evictedReason is used in "requestsEvicted" as "Reason" label. From 9245b2bd0e5e913fa223d7ecaab397a92835b284 Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Tue, 28 Feb 2017 16:58:25 -0800 Subject: [PATCH 056/108] vtgate/buffer: Always initialize all stat variables. If we don't do this, monitoring frameworks may not correctly calculate rates for the first failover of the shard because they see a transition from "no value for this label set (NaN)" to "a value". --- go/vt/vtgate/buffer/shard_buffer.go | 5 ++- go/vt/vtgate/buffer/variables.go | 36 +++++++++++++++ go/vt/vtgate/buffer/variables_test.go | 64 +++++++++++++++++++++++++++ 3 files changed, 104 insertions(+), 1 deletion(-) diff --git a/go/vt/vtgate/buffer/shard_buffer.go b/go/vt/vtgate/buffer/shard_buffer.go index 93d7be42997..51348cc3ba9 100644 --- a/go/vt/vtgate/buffer/shard_buffer.go +++ b/go/vt/vtgate/buffer/shard_buffer.go @@ -97,12 +97,15 @@ type entry struct { } func newShardBuffer(mode bufferMode, keyspace, shard string, bufferSizeSema *sync2.Semaphore) *shardBuffer { + statsKey := []string{keyspace, shard} + initVariablesForShard(statsKey) + return &shardBuffer{ mode: mode, keyspace: keyspace, shard: shard, bufferSizeSema: bufferSizeSema, - statsKey: []string{keyspace, shard}, + statsKey: statsKey, statsKeyJoined: fmt.Sprintf("%s.%s", keyspace, shard), logTooRecent: logutil.NewThrottledLogger(fmt.Sprintf("FailoverTooRecent-%v", topoproto.KeyspaceShardString(keyspace, shard)), 5*time.Second), state: stateIdle, diff --git a/go/vt/vtgate/buffer/variables.go b/go/vt/vtgate/buffer/variables.go index 92771217776..5a4c9ecfc9c 100644 --- a/go/vt/vtgate/buffer/variables.go +++ b/go/vt/vtgate/buffer/variables.go @@ -57,6 +57,8 @@ var ( // stopReason is used in "stopsByReason" as "Reason" label. type stopReason string +var stopReasons = []stopReason{stopFailoverEndDetected, stopMaxFailoverDurationExceeded, stopShutdown} + const ( stopFailoverEndDetected stopReason = "NewMasterSeen" stopMaxFailoverDurationExceeded = "MaxDurationExceeded" @@ -66,6 +68,8 @@ const ( // evictedReason is used in "requestsEvicted" as "Reason" label. type evictedReason string +var evictReasons = []evictedReason{evictedContextDone, evictedBufferFull, evictedWindowExceeded} + const ( evictedContextDone evictedReason = "ContextDone" evictedBufferFull = "BufferFull" @@ -75,6 +79,8 @@ const ( // skippedReason is used in "requestsSkipped" as "Reason" label. type skippedReason string +var skippedReasons = []skippedReason{skippedBufferFull, skippedDisabled, skippedShutdown, skippedLastReparentTooRecent, skippedLastFailoverTooRecent} + const ( // skippedBufferFull occurs when all slots in the buffer are occupied by one // or more concurrent failovers. Unlike "evictedBufferFull", no request could @@ -88,6 +94,36 @@ const ( skippedLastFailoverTooRecent = "LastFailoverTooRecent" ) +// initVariablesForShard is used to initialize all shard variables to 0. +// If we don't do this, monitoring frameworks may not correctly calculate rates +// for the first failover of the shard because they see a transition from +// "no value for this label set (NaN)" to "a value". +// "statsKey" should have two members for keyspace and shard. +func initVariablesForShard(statsKey []string) { + starts.Set(statsKey, 0) + for _, reason := range stopReasons { + key := append(statsKey, string(reason)) + stops.Set(key, 0) + } + + failoverDurationSumMs.Set(statsKey, 0) + + utilizationSum.Set(statsKey, 0) + utilizationDryRunSum.Set(statsKey, 0) + + requestsBuffered.Set(statsKey, 0) + requestsBufferedDryRun.Set(statsKey, 0) + requestsDrained.Set(statsKey, 0) + for _, reason := range evictReasons { + key := append(statsKey, string(reason)) + requestsEvicted.Set(key, 0) + } + for _, reason := range skippedReasons { + key := append(statsKey, string(reason)) + requestsSkipped.Set(key, 0) + } +} + // TODO(mberlin): Remove the gauge values below once we store them // internally and have a /bufferz page where we can show this. var ( diff --git a/go/vt/vtgate/buffer/variables_test.go b/go/vt/vtgate/buffer/variables_test.go index 56152257a0d..d9c04f61f17 100644 --- a/go/vt/vtgate/buffer/variables_test.go +++ b/go/vt/vtgate/buffer/variables_test.go @@ -1,8 +1,13 @@ package buffer import ( + "context" "flag" + "fmt" + "strings" "testing" + + "github.com/youtube/vitess/go/stats" ) func TestVariables(t *testing.T) { @@ -16,3 +21,62 @@ func TestVariables(t *testing.T) { t.Fatalf("BufferSize variable not set during initilization: got = %v, want = %v", got, want) } } + +func TestVariablesAreInitialized(t *testing.T) { + // Create a new buffer and make a call which will create the shardBuffer object. + // After that, the variables should be initialized for that shard. + b := New() + _, err := b.WaitForFailoverEnd(context.Background(), "init_test", "0", nil /* err */) + if err != nil { + t.Fatalf("buffer should just passthrough and not return an error: %v", err) + } + + statsKey := []string{"init_test", "0"} + type testCase struct { + desc string + counter *stats.MultiCounters + statsKey []string + } + testCases := []testCase{ + {"starts", starts, statsKey}, + {"failoverDurationSumMs", failoverDurationSumMs, statsKey}, + {"utilizationSum", utilizationSum, statsKey}, + {"utilizationDryRunSum", utilizationDryRunSum, statsKey}, + {"requestsBuffered", requestsBuffered, statsKey}, + {"requestsBufferedDryRun", requestsBufferedDryRun, statsKey}, + {"requestsDrained", requestsDrained, statsKey}, + } + for _, r := range stopReasons { + testCases = append(testCases, testCase{"stops", stops, append(statsKey, string(r))}) + } + for _, r := range evictReasons { + testCases = append(testCases, testCase{"evicted", requestsEvicted, append(statsKey, string(r))}) + } + for _, r := range skippedReasons { + testCases = append(testCases, testCase{"skipped", requestsSkipped, append(statsKey, string(r))}) + } + + for _, tc := range testCases { + wantValue := 0 + if len(tc.statsKey) == 3 && tc.statsKey[2] == string(skippedDisabled) { + // The request passed through above was registered as skipped. + wantValue = 1 + } + if err := checkEntry(tc.counter, tc.statsKey, wantValue); err != nil { + t.Fatalf("variable: %v not correctly initialized: %v", tc.desc, err) + } + } +} + +func checkEntry(counters *stats.MultiCounters, statsKey []string, want int) error { + name := strings.Join(statsKey, ".") + got, ok := counters.Counts()[name] + if !ok { + return fmt.Errorf("no entry for: %v", name) + } + if got != int64(want) { + return fmt.Errorf("wrong value for entry: %v got = %v, want = %v", name, got, want) + } + + return nil +} From ffe34d096366f69831a429a064f295c7404a63ba Mon Sep 17 00:00:00 2001 From: thompsonja Date: Tue, 28 Feb 2017 17:11:35 -0800 Subject: [PATCH 057/108] Sandbox reliability updates and cleanup. (#2604) * Sandbox reliability updates and cleanup. * Address comments. * Address more comments. --- test/cluster/k8s_environment.py | 28 +++-------- test/cluster/sandbox/gke.py | 17 +++++-- test/cluster/sandbox/initial_reparent.py | 16 ++++-- test/cluster/sandbox/kubernetes_components.py | 50 +++++++++++++------ .../sandbox/vitess_kubernetes_sandbox.py | 39 ++++++++------- test/cluster/sandbox/wait_for_mysql.py | 18 +++++-- 6 files changed, 104 insertions(+), 64 deletions(-) diff --git a/test/cluster/k8s_environment.py b/test/cluster/k8s_environment.py index a240883a27b..9e7441e334e 100644 --- a/test/cluster/k8s_environment.py +++ b/test/cluster/k8s_environment.py @@ -8,6 +8,7 @@ import tempfile import time +from sandbox import kubernetes_components from vtproto import topodata_pb2 from vtdb import vtgate_client import base_environment @@ -30,21 +31,9 @@ def use_named(self, instance_name): 'kubectl not found, please install by visiting kubernetes.io or ' 'running gcloud components update kubectl if using compute engine.') - get_address_template = ( - '{{if ge (len .status.loadBalancer) 1}}' - '{{index (index .status.loadBalancer.ingress 0) "ip"}}' - '{{end}}') - - get_address_params = ['kubectl', 'get', '-o', 'template', '--template', - get_address_template, 'service', '--namespace', - instance_name] - - start_time = time.time() - vtctld_addr = '' - while time.time() - start_time < 60 and not vtctld_addr: - vtctld_addr = subprocess.check_output( - get_address_params + ['vtctld'], stderr=subprocess.STDOUT) - self.vtctl_addr = '%s:15999' % vtctld_addr + vtctld_ip = kubernetes_components.get_forwarded_ip( + 'vtctld', instance_name) + self.vtctl_addr = '%s:15999' % vtctld_ip self.vtctl_helper = vtctl_helper.VtctlHelper('grpc', self.vtctl_addr) self.cluster_name = instance_name @@ -101,14 +90,11 @@ def use_named(self, instance_name): 'rdonly': int(self.rdonly_instances[index]) } - start_time = time.time() self.vtgate_addrs = {} for cell in self.cells: - vtgate_addr = '' - while time.time() - start_time < 60 and not vtgate_addr: - vtgate_addr = subprocess.check_output( - get_address_params + ['vtgate-%s' % cell], stderr=subprocess.STDOUT) - self.vtgate_addrs[cell] = '%s:15991' % vtgate_addr + vtgate_ip = kubernetes_components.get_forwarded_ip( + 'vtgate-%s' % cell, instance_name) + self.vtgate_addrs[cell] = '%s:15991' % vtgate_ip super(K8sEnvironment, self).use_named(instance_name) def create(self, **kwargs): diff --git a/test/cluster/sandbox/gke.py b/test/cluster/sandbox/gke.py index 4331570c3cd..05673eee355 100755 --- a/test/cluster/sandbox/gke.py +++ b/test/cluster/sandbox/gke.py @@ -5,6 +5,7 @@ import os import subprocess +import sandbox import sandlet @@ -19,14 +20,22 @@ def __init__(self, params): self.params = params def start(self): + """Start the GKE cluster.""" zone = self.params.get('gke_zone', self._DEFAULT_ZONE) machine_type = self.params.get('machine_type', self._DEFAULT_MACHINE_TYPE) node_count = str(self.params.get('node_count', self._DEFAULT_NODE_COUNT)) subprocess.call(['gcloud', 'config', 'set', 'compute/zone', zone]) - subprocess.call( - ['gcloud', 'container', 'clusters', 'create', self.params['name'], - '--machine-type', machine_type, '--num-nodes', node_count, '--scopes', - 'storage-rw']) + cluster_create_args = [ + 'gcloud', 'container', 'clusters', 'create', self.params['name'], + '--machine-type', machine_type, '--num-nodes', node_count, '--scopes', + 'storage-rw'] + if 'cluster_version' in self.params: + cluster_create_args += [ + '--cluster-version=%s' % self.params['cluster_version']] + try: + subprocess.check_call(cluster_create_args) + except subprocess.CalledProcessError as e: + raise sandbox.SandboxError('Failed to create GKE cluster: %s', e.output) def stop(self): zone = self.params.get('gke_zone', self._DEFAULT_ZONE) diff --git a/test/cluster/sandbox/initial_reparent.py b/test/cluster/sandbox/initial_reparent.py index 0e6528fd317..532176672db 100755 --- a/test/cluster/sandbox/initial_reparent.py +++ b/test/cluster/sandbox/initial_reparent.py @@ -4,6 +4,7 @@ import json import logging import optparse +import time from vtproto import topodata_pb2 from vttest import sharding_utils import sandbox_utils @@ -18,7 +19,7 @@ def is_master(tablet, namespace): return True -def initial_reparent(keyspace, master_cell, num_shards, namespace): +def initial_reparent(keyspace, master_cell, num_shards, namespace, timeout_s): """Performs the first reparent.""" successfully_reparented = [] master_tablets = {} @@ -36,7 +37,8 @@ def initial_reparent(keyspace, master_cell, num_shards, namespace): if potential_masters: master_tablets[shard_name] = potential_masters[0] - while len(successfully_reparented) < num_shards: + start_time = time.time() + while time.time() - start_time < timeout_s: for shard_name in sharding_utils.get_shard_names(num_shards): shard_name = sandbox_utils.fix_shard_name(shard_name) master_tablet_id = master_tablets[shard_name] @@ -51,7 +53,10 @@ def initial_reparent(keyspace, master_cell, num_shards, namespace): vtctl_sandbox.execute_vtctl_command( ['InitShardMaster', '-force', '%s/%s' % (keyspace, shard_name), master_tablet_id], namespace=namespace, timeout_s=5) - logging.info('Done with initial reparent.') + if len(successfully_reparented) == num_shards: + logging.info('Done with initial reparent.') + return + logging.fatal('Timed out waiting for initial reparent.') def main(): @@ -63,11 +68,14 @@ def main(): parser.add_option('-m', '--master_cell', help='Master cell') parser.add_option('-s', '--shard_count', help='Number of shards', default=2, type=int) + parser.add_option('-t', '--timeout', help='Reparent timeout (s)', default=300, + type=int) logging.getLogger().setLevel(logging.INFO) options, _ = parser.parse_args() initial_reparent(options.keyspace, options.master_cell, - options.shard_count, options.namespace) + options.shard_count, options.namespace, + options.timeout) if __name__ == '__main__': diff --git a/test/cluster/sandbox/kubernetes_components.py b/test/cluster/sandbox/kubernetes_components.py index dd9602b6d61..3d5ecc8b142 100755 --- a/test/cluster/sandbox/kubernetes_components.py +++ b/test/cluster/sandbox/kubernetes_components.py @@ -6,6 +6,7 @@ import os import re import subprocess +import tempfile import time import sandbox @@ -32,15 +33,25 @@ class HelmComponent(sandlet.SandletComponent): def __init__(self, name, sandbox_name, helm_config): super(HelmComponent, self).__init__(name, sandbox_name) self.helm_config = helm_config + try: + subprocess.check_output(['helm'], stderr=subprocess.STDOUT) + except OSError: + raise sandbox.SandboxError( + 'Could not find helm binary. Please visit ' + 'https://github.com/kubernetes/helm to download helm.') def start(self): logging.info('Initializing helm.') + try: + subprocess.check_output(['helm', 'init'], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + raise sandbox.SandboxError('Failed to initialize helm: %s', e.output) + + + # helm init on a fresh cluster takes a while to be ready. + # Wait until 'helm list' returns cleanly. with open(os.devnull, 'w') as devnull: - subprocess.call(['helm', 'init'], stdout=devnull) start_time = time.time() - - # helm init on a fresh cluster takes a while to be ready. - # Wait until 'helm list' returns cleanly. while time.time() - start_time < 120: try: subprocess.check_call(['helm', 'list'], stdout=devnull, @@ -52,12 +63,17 @@ def start(self): else: raise sandbox.SandboxError( 'Timed out waiting for helm to become ready.') - logging.info('Installing helm.') - subprocess.call( + + logging.info('Installing helm.') + try: + subprocess.check_output( ['helm', 'install', os.path.join(os.environ['VTTOP'], 'helm/vitess'), '-n', self.sandbox_name, '--namespace', self.sandbox_name, - '--replace', '--values', self.helm_config], stdout=devnull) - logging.info('Finished installing helm.') + '--replace', '--values', self.helm_config], + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + raise sandbox.SandboxError('Failed to install helm: %s' % e.output) + logging.info('Finished installing helm.') def stop(self): subprocess.call(['helm', 'delete', self.sandbox_name, '--purge']) @@ -83,17 +99,23 @@ def start(self): with open(self.template_file, 'r') as template_file: template = template_file.read() for name, value in self.template_params.items(): - template = re.sub('{{%s}}' % name, value, template) - os.system('echo "%s" | kubectl create -f - --namespace %s' % ( - template, self.sandbox_name)) + template = re.sub('{{%s}}' % name, str(value), template) + with tempfile.NamedTemporaryFile() as f: + f.write(template) + f.flush() + os.system('kubectl create --namespace %s -f %s' % ( + self.sandbox_name, f.name)) def stop(self): with open(self.template_file, 'r') as template_file: template = template_file.read() for name, value in self.template_params.items(): - template = re.sub('{{%s}}' % name, value, template) - os.system('echo "%s" | kubectl delete -f - --namespace %s' % ( - template, self.sandbox_name)) + template = re.sub('{{%s}}' % name, str(value), template) + with tempfile.NamedTemporaryFile() as f: + f.write(template) + f.flush() + os.system('kubectl delete --namespace %s -f %s' % ( + self.sandbox_name, f.name)) super(KubernetesResource, self).stop() diff --git a/test/cluster/sandbox/vitess_kubernetes_sandbox.py b/test/cluster/sandbox/vitess_kubernetes_sandbox.py index b3862eb257a..d3eff9092a1 100755 --- a/test/cluster/sandbox/vitess_kubernetes_sandbox.py +++ b/test/cluster/sandbox/vitess_kubernetes_sandbox.py @@ -62,13 +62,12 @@ def generate_guestbook_sandlet(self): guestbook_sandlet.components.add_component( kubernetes_components.KubernetesResource( 'guestbook-service', self.name, - os.path.join(template_dir, 'guestbook-service.yaml'), - namespace=self.name)) + os.path.join(template_dir, 'guestbook-service.yaml'))) guestbook_sandlet.components.add_component( kubernetes_components.KubernetesResource( 'guestbook-controller', self.name, - os.path.join(template_dir, 'guestbook-controller.yaml'), - namespace=self.name)) + os.path.join(template_dir, 'guestbook-controller-template.yaml'), + port=8080, cell=self.app_options.cells[0], vtgate_port=15991)) self.sandlets.add_component(guestbook_sandlet) def _generate_helm_keyspaces(self): @@ -110,7 +109,9 @@ def _generate_helm_keyspaces(self): shard['tablets'].append(dict( type='rdonly', uidBase=uid_base + ks['replica_count'], - replicas=ks['rdonly_count'])) + vttablet=dict( + replicas=ks['rdonly_count'], + ))) keyspace['shards'].append(shard) return keyspaces @@ -144,6 +145,7 @@ def _generate_helm_values_config(self): cpu=self.app_options.mysql_cpu, ), ), + controllerType='None', ), vtgate=dict( serviceType='LoadBalancer', # Allows port forwarding. @@ -199,6 +201,7 @@ def _generate_helm_values_config(self): with tempfile.NamedTemporaryFile(delete=False) as f: f.write(yaml.dump(yaml_values, default_flow_style=False)) yaml_filename = f.name + logging.info('Helm config generated at %s', yaml_filename) return yaml_filename def generate_helm_sandlet(self): @@ -216,7 +219,9 @@ def generate_helm_sandlet(self): wait_for_mysql_subprocess = subprocess_component.Subprocess( 'wait_for_mysql_%s' % name, self.name, 'wait_for_mysql.py', self.log_dir, namespace=self.name, - cells=','.join(self.app_options.cells)) + cells=','.join(self.app_options.cells), + tablet_count=(shard_count * ( + keyspace['replica_count'] + keyspace['rdonly_count']))) wait_for_mysql_subprocess.dependencies = ['helm'] initial_reparent_subprocess = subprocess_component.Subprocess( 'initial_reparent_%s' % name, self.name, @@ -244,23 +249,21 @@ def generate_from_config(self): def print_banner(self): logging.info('Fetching forwarded ports.') - vtctld_addr = '' + banner = '\nVitess Sandbox Info:\n' vtctld_port = self.app_options.port_forwarding['vtctld'] vtgate_port = self.app_options.port_forwarding['vtgate'] - vtgate_addrs = [] - vtctld_addr = kubernetes_components.get_forwarded_ip( + vtctld_ip = kubernetes_components.get_forwarded_ip( 'vtctld', self.name) + banner += ' vtctld: http://%s:%d\n' % (vtctld_ip, vtctld_port) for cell in self.app_options.cells: - vtgate_addr = kubernetes_components.get_forwarded_ip( + vtgate_ip = kubernetes_components.get_forwarded_ip( 'vtgate-%s' % cell, self.name) - vtgate_addrs.append('%s %s:%d' % (cell, vtgate_addr, vtgate_port)) - banner = """ - Vitess Sandbox Info: - vtctld: %s:%d - vtgate: %s - logs dir: %s""" % ( - vtctld_addr, vtctld_port, ', '.join(vtgate_addrs), - self.log_dir) + banner += ' vtgate-%s: http://%s:%d\n' % (cell, vtgate_ip, vtgate_port) + if 'guestbook' in self.app_options.port_forwarding: + guestbook_ip = kubernetes_components.get_forwarded_ip( + 'guestbook', self.name) + banner += ' guestbook: http://%s:80\n' % guestbook_ip + banner += ' logs dir: %s\n' % self.log_dir logging.info(banner) diff --git a/test/cluster/sandbox/wait_for_mysql.py b/test/cluster/sandbox/wait_for_mysql.py index d4089a7e369..041640c83d5 100755 --- a/test/cluster/sandbox/wait_for_mysql.py +++ b/test/cluster/sandbox/wait_for_mysql.py @@ -3,6 +3,7 @@ import logging import optparse +import re import time import vtctl_sandbox @@ -16,7 +17,8 @@ def get_all_tablets(cells, namespace): ['ListAllTablets', cell], namespace=namespace)[0].split('\n') for t in cell_tablets: tablets.append(t.split(' ')[0]) - tablets = filter(None, tablets) + r = re.compile('.*-.*') + tablets = filter(r.match, tablets) logging.info('Tablets: %s.', ', '.join(tablets)) return tablets @@ -26,6 +28,8 @@ def main(): parser.add_option('-n', '--namespace', help='Kubernetes namespace', default='vitess') parser.add_option('-c', '--cells', help='Comma separated list of cells') + parser.add_option('-t', '--tablet_count', + help='Total number of expected tablets', type=int) logging.getLogger().setLevel(logging.INFO) options, _ = parser.parse_args() @@ -34,9 +38,17 @@ def main(): start_time = time.time() good_tablets = [] + tablets = [] + + # Do this in a loop as the output of ListAllTablets may not be parseable + # until all tablets have been started. + while time.time() - start_time < 300 and len(tablets) < options.tablet_count: + tablets = get_all_tablets(options.cells, options.namespace) + logging.info('Expecting %d tablets, found %d tablets', + options.tablet_count, len(tablets)) + + start_time = time.time() while time.time() - start_time < 300: - if not good_tablets: - tablets = get_all_tablets(options.cells, options.namespace) for tablet in [t for t in tablets if t not in good_tablets]: _, success = vtctl_sandbox.execute_vtctl_command( ['ExecuteFetchAsDba', tablet, 'show databases'], From fe7261d0912028405276f266aef0cc6364e255ad Mon Sep 17 00:00:00 2001 From: Ashudeep Sharma Date: Wed, 1 Mar 2017 20:21:37 +0530 Subject: [PATCH 058/108] Updating the example with New Vschema format --- docs/user-guide/horizontal-sharding.html | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/user-guide/horizontal-sharding.html b/docs/user-guide/horizontal-sharding.html index bf0462a9377..34925a97c2b 100644 --- a/docs/user-guide/horizontal-sharding.html +++ b/docs/user-guide/horizontal-sharding.html @@ -319,18 +319,18 @@

    Configure sharding information

    The first step is to tell Vitess how we want to partition the data. We do this by providing a VSchema definition as follows:

    {
    -  "Sharded": true,
    -  "Vindexes": {
    +  "sharded": true,
    +  "vindexes": {
         "hash": {
    -      "Type": "hash"
    +      "type": "hash"
         }
       },
    -  "Tables": {
    +  "tables": {
         "messages": {
    -      "ColVindexes": [
    +      "column_vindexes": [
             {
    -          "Col": "page",
    -          "Name": "hash"
    +          "column": "page",
    +          "name": "hash"
             }
           ]
         }
    
    From 9d63dc408e15275ebad005cfaf70dc0c334830bd Mon Sep 17 00:00:00 2001
    From: Ashudeep Sharma 
    Date: Wed, 1 Mar 2017 21:01:03 +0530
    Subject: [PATCH 059/108] Making VSchema changes in sharding-kubernetes.html
    
    ---
     docs/user-guide/sharding-kubernetes.html | 14 +++++++-------
     1 file changed, 7 insertions(+), 7 deletions(-)
    
    diff --git a/docs/user-guide/sharding-kubernetes.html b/docs/user-guide/sharding-kubernetes.html
    index 330d78cc6b9..5acafa7c412 100644
    --- a/docs/user-guide/sharding-kubernetes.html
    +++ b/docs/user-guide/sharding-kubernetes.html
    @@ -315,18 +315,18 @@ 

    Configure sharding information

    The first step is to tell Vitess how we want to partition the data. We do this by providing a VSchema definition as follows:

    {
    -  "Sharded": true,
    -  "Vindexes": {
    +  "sharded": true,
    +  "vindexes": {
         "hash": {
    -      "Type": "hash"
    +      "type": "hash"
         }
       },
    -  "Tables": {
    +  "tables": {
         "messages": {
    -      "ColVindexes": [
    +      "column_vindexes": [
             {
    -          "Col": "page",
    -          "Name": "hash"
    +          "column": "page",
    +          "name": "hash"
             }
           ]
         }
    
    From c97d97e6540fbbcb67fe4e53f216f227e13e8fba Mon Sep 17 00:00:00 2001
    From: thompsonja 
    Date: Wed, 1 Mar 2017 14:13:14 -0800
    Subject: [PATCH 060/108] Add keyspace parameter to guestbook app. (#2612)
    
    ---
     .../kubernetes/guestbook-controller-template.yaml     |  2 +-
     examples/kubernetes/guestbook-up.sh                   |  3 ++-
     examples/kubernetes/guestbook/main.py                 | 11 +++++++----
     test/cluster/sandbox/kubernetes_components.py         |  1 -
     test/cluster/sandbox/vitess_kubernetes_sandbox.py     |  3 ++-
     5 files changed, 12 insertions(+), 8 deletions(-)
    
    diff --git a/examples/kubernetes/guestbook-controller-template.yaml b/examples/kubernetes/guestbook-controller-template.yaml
    index 79f8a22b7a6..2897b3f5929 100644
    --- a/examples/kubernetes/guestbook-controller-template.yaml
    +++ b/examples/kubernetes/guestbook-controller-template.yaml
    @@ -26,4 +26,4 @@ spec:
                 limits:
                   memory: "128Mi"
                   cpu: "100m"
    -          args: ["--port", "{{port}}", "--cell", "{{cell}}", "--vtgate_port", "{{vtgate_port}}"]
    +          args: ["--port", "{{port}}", "--cell", "{{cell}}", "--keyspace", "{{keyspace}}", "--vtgate_port", "{{vtgate_port}}"]
    diff --git a/examples/kubernetes/guestbook-up.sh b/examples/kubernetes/guestbook-up.sh
    index e962ae766a6..3f77637ce43 100755
    --- a/examples/kubernetes/guestbook-up.sh
    +++ b/examples/kubernetes/guestbook-up.sh
    @@ -6,6 +6,7 @@ set -e
     
     port=${GUESTBOOK_PORT:-8080}
     cell=${GUESTBOOK_CELL:-"test"}
    +keyspace=${GUESTBOOK_KEYSPACE:-"test_keyspace"}
     vtgate_port=${VTGATE_PORT:-15991}
     
     script_root=`dirname "${BASH_SOURCE}"`
    @@ -15,7 +16,7 @@ echo "Creating guestbook service..."
     $KUBECTL create --namespace=$VITESS_NAME -f guestbook-service.yaml
     
     sed_script=""
    -for var in port cell vtgate_port; do
    +for var in port cell keyspace vtgate_port; do
       sed_script+="s,{{$var}},${!var},g;"
     done
     
    diff --git a/examples/kubernetes/guestbook/main.py b/examples/kubernetes/guestbook/main.py
    index b9b3daaef6d..2ce83de2960 100644
    --- a/examples/kubernetes/guestbook/main.py
    +++ b/examples/kubernetes/guestbook/main.py
    @@ -16,6 +16,7 @@
     
     # conn is the connection to vtgate.
     conn = None
    +keyspace = None
     
     
     @app.route('/')
    @@ -32,8 +33,7 @@ def view(page):
     @app.route('/lrange/guestbook/')
     def list_guestbook(page):
       """Read the list from a replica."""
    -  cursor = conn.cursor(
    -      tablet_type='replica', keyspace='test_keyspace')
    +  cursor = conn.cursor(tablet_type='replica', keyspace=keyspace)
     
       cursor.execute(
           'SELECT message, time_created_ns FROM messages WHERE page=:page'
    @@ -48,8 +48,7 @@ def list_guestbook(page):
     @app.route('/rpush/guestbook//')
     def add_entry(page, value):
       """Insert a row on the master."""
    -  cursor = conn.cursor(
    -      tablet_type='master', keyspace='test_keyspace', writable=True)
    +  cursor = conn.cursor(tablet_type='master', keyspace=keyspace, writable=True)
     
       cursor.begin()
       cursor.execute(
    @@ -83,6 +82,8 @@ def env():
       parser = argparse.ArgumentParser(description='Run guestbook app')
       parser.add_argument('--port', help='Port', default=8080, type=int)
       parser.add_argument('--cell', help='Cell', default='test', type=str)
    +  parser.add_argument(
    +      '--keyspace', help='Keyspace', default='test_keyspace', type=str)
       parser.add_argument(
           '--timeout', help='Connect timeout (s)', default=10, type=int)
       parser.add_argument(
    @@ -95,4 +96,6 @@ def env():
       # Connect to vtgate.
       conn = vtgate_client.connect('grpc', addr, guestbook_args.timeout)
     
    +  keyspace = guestbook_args.keyspace
    +
       app.run(host='0.0.0.0', port=guestbook_args.port, debug=True)
    diff --git a/test/cluster/sandbox/kubernetes_components.py b/test/cluster/sandbox/kubernetes_components.py
    index 3d5ecc8b142..864943b230d 100755
    --- a/test/cluster/sandbox/kubernetes_components.py
    +++ b/test/cluster/sandbox/kubernetes_components.py
    @@ -47,7 +47,6 @@ def start(self):
         except subprocess.CalledProcessError as e:
           raise sandbox.SandboxError('Failed to initialize helm: %s', e.output)
     
    -
         # helm init on a fresh cluster takes a while to be ready.
         # Wait until 'helm list' returns cleanly.
         with open(os.devnull, 'w') as devnull:
    diff --git a/test/cluster/sandbox/vitess_kubernetes_sandbox.py b/test/cluster/sandbox/vitess_kubernetes_sandbox.py
    index d3eff9092a1..3cd61f1da5c 100755
    --- a/test/cluster/sandbox/vitess_kubernetes_sandbox.py
    +++ b/test/cluster/sandbox/vitess_kubernetes_sandbox.py
    @@ -67,7 +67,8 @@ def generate_guestbook_sandlet(self):
             kubernetes_components.KubernetesResource(
                 'guestbook-controller', self.name,
                 os.path.join(template_dir, 'guestbook-controller-template.yaml'),
    -            port=8080, cell=self.app_options.cells[0], vtgate_port=15991))
    +            port=8080, cell=self.app_options.cells[0], vtgate_port=15991,
    +            keyspace=self.app_options.keyspaces[0]['name']))
         self.sandlets.add_component(guestbook_sandlet)
     
       def _generate_helm_keyspaces(self):
    
    From 0585ada6fe2fa2d990ef49c9ec5ce07e96ef7cbe Mon Sep 17 00:00:00 2001
    From: Ashu 
    Date: Thu, 2 Mar 2017 03:43:25 +0530
    Subject: [PATCH 061/108] Updating the example with New Vschema format (#2610)
    
    * Updating the example with New Vschema format
    
    * Making VSchema changes in sharding-kubernetes.html
    ---
     docs/user-guide/horizontal-sharding.html | 14 +++++++-------
     docs/user-guide/sharding-kubernetes.html | 14 +++++++-------
     2 files changed, 14 insertions(+), 14 deletions(-)
    
    diff --git a/docs/user-guide/horizontal-sharding.html b/docs/user-guide/horizontal-sharding.html
    index bf0462a9377..34925a97c2b 100644
    --- a/docs/user-guide/horizontal-sharding.html
    +++ b/docs/user-guide/horizontal-sharding.html
    @@ -319,18 +319,18 @@ 

    Configure sharding information

    The first step is to tell Vitess how we want to partition the data. We do this by providing a VSchema definition as follows:

    {
    -  "Sharded": true,
    -  "Vindexes": {
    +  "sharded": true,
    +  "vindexes": {
         "hash": {
    -      "Type": "hash"
    +      "type": "hash"
         }
       },
    -  "Tables": {
    +  "tables": {
         "messages": {
    -      "ColVindexes": [
    +      "column_vindexes": [
             {
    -          "Col": "page",
    -          "Name": "hash"
    +          "column": "page",
    +          "name": "hash"
             }
           ]
         }
    diff --git a/docs/user-guide/sharding-kubernetes.html b/docs/user-guide/sharding-kubernetes.html
    index 330d78cc6b9..5acafa7c412 100644
    --- a/docs/user-guide/sharding-kubernetes.html
    +++ b/docs/user-guide/sharding-kubernetes.html
    @@ -315,18 +315,18 @@ 

    Configure sharding information

    The first step is to tell Vitess how we want to partition the data. We do this by providing a VSchema definition as follows:

    {
    -  "Sharded": true,
    -  "Vindexes": {
    +  "sharded": true,
    +  "vindexes": {
         "hash": {
    -      "Type": "hash"
    +      "type": "hash"
         }
       },
    -  "Tables": {
    +  "tables": {
         "messages": {
    -      "ColVindexes": [
    +      "column_vindexes": [
             {
    -          "Col": "page",
    -          "Name": "hash"
    +          "column": "page",
    +          "name": "hash"
             }
           ]
         }
    
    From 7c5b91aab2e384569a95252d833fac4dbcdfd90b Mon Sep 17 00:00:00 2001
    From: Ashudeep Sharma 
    Date: Thu, 2 Mar 2017 07:51:13 +0530
    Subject: [PATCH 062/108] Making the Vschema Changes to the markdown files
    
    ---
     doc/HorizontalReshardingGuide.md | 14 +++++++-------
     doc/ShardingKubernetes.md        | 14 +++++++-------
     2 files changed, 14 insertions(+), 14 deletions(-)
    
    diff --git a/doc/HorizontalReshardingGuide.md b/doc/HorizontalReshardingGuide.md
    index 152d510c419..3d6e8d1d2ad 100644
    --- a/doc/HorizontalReshardingGuide.md
    +++ b/doc/HorizontalReshardingGuide.md
    @@ -36,18 +36,18 @@ We do this by providing a VSchema definition as follows:
     
     ``` json
     {
    -  "Sharded": true,
    -  "Vindexes": {
    +  "sharded": true,
    +  "vindexes": {
         "hash": {
    -      "Type": "hash"
    +      "type": "hash"
         }
       },
    -  "Tables": {
    +  "tables": {
         "messages": {
    -      "ColVindexes": [
    +      "column_vindexes": [
             {
    -          "Col": "page",
    -          "Name": "hash"
    +          "column": "page",
    +          "name": "hash"
             }
           ]
         }
    diff --git a/doc/ShardingKubernetes.md b/doc/ShardingKubernetes.md
    index a4625c5bf75..39787dd636b 100644
    --- a/doc/ShardingKubernetes.md
    +++ b/doc/ShardingKubernetes.md
    @@ -29,18 +29,18 @@ We do this by providing a VSchema definition as follows:
     
     ``` json
     {
    -  "Sharded": true,
    -  "Vindexes": {
    +  "sharded": true,
    +  "vindexes": {
         "hash": {
    -      "Type": "hash"
    +      "type": "hash"
         }
       },
    -  "Tables": {
    +  "tables": {
         "messages": {
    -      "ColVindexes": [
    +      "column_vindexes": [
             {
    -          "Col": "page",
    -          "Name": "hash"
    +          "column": "page",
    +          "name": "hash"
             }
           ]
         }
    
    From 7251e3b3ca58abf9b871a906986179f9c2cadbfe Mon Sep 17 00:00:00 2001
    From: Michael Berlin 
    Date: Wed, 1 Mar 2017 21:10:22 -0800
    Subject: [PATCH 063/108] publish site Wed Mar  1 21:10:22 PST 2017
    
    ---
     docs/sitemap.xml | 14 +++++++-------
     1 file changed, 7 insertions(+), 7 deletions(-)
    
    diff --git a/docs/sitemap.xml b/docs/sitemap.xml
    index fd41d21405a..66269a7b005 100644
    --- a/docs/sitemap.xml
    +++ b/docs/sitemap.xml
    @@ -22,25 +22,25 @@
         http://vitess.io/user-guide/horizontal-sharding.html
       
       
    -    http://vitess.io/overview/
    +    http://vitess.io/about/
       
       
    -    http://vitess.io/about/
    +    http://vitess.io/search/
       
       
    -    http://vitess.io/terms/
    +    http://vitess.io/overview/
       
       
    -    http://vitess.io/
    +    http://vitess.io/getting-started/
       
       
    -    http://vitess.io/search/
    +    http://vitess.io/contributing/
       
       
    -    http://vitess.io/getting-started/
    +    http://vitess.io/terms/
       
       
    -    http://vitess.io/contributing/
    +    http://vitess.io/
       
       
         http://vitess.io/user-guide/introduction.html
    
    From 0fdb0cb670ec8473b8000a3b4b8a12a987ed84af Mon Sep 17 00:00:00 2001
    From: c-wind 
    Date: Thu, 2 Mar 2017 16:55:23 +0800
    Subject: [PATCH 064/108] Makefile: fix docker build mysql56 bug
    
    ---
     Makefile | 2 +-
     1 file changed, 1 insertion(+), 1 deletion(-)
    
    diff --git a/Makefile b/Makefile
    index adfd7b46313..721075164a4 100644
    --- a/Makefile
    +++ b/Makefile
    @@ -176,7 +176,7 @@ docker_base:
     
     docker_base_mysql56:
     	chmod -R o=g *
    -	docker build -f Dockerfile.percona -t vitess/base:mysql56 .
    +	docker build -f Dockerfile.mysql56 -t vitess/base:mysql56 .
     
     docker_base_mariadb:
     	chmod -R o=g *
    
    From 70c68d67b6391368ea8c139e46840ae8bb88f6f0 Mon Sep 17 00:00:00 2001
    From: Bryan Beaudreault 
    Date: Thu, 2 Mar 2017 12:19:37 -0500
    Subject: [PATCH 065/108] Review comment fixes
    
    ---
     .../flipkart/vitess/jdbc/ConnectionProperties.java    |  7 +++++--
     .../com/flipkart/vitess/jdbc/FieldWithMetadata.java   |  4 ++--
     .../java/com/flipkart/vitess/util/StringUtils.java    | 11 -----------
     3 files changed, 7 insertions(+), 15 deletions(-)
    
    diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java
    index 6e5592c71dd..110bf4abe2a 100644
    --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java
    +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java
    @@ -124,9 +124,12 @@ private void postInitialization() {
             this.characterEncodingAsString = this.characterEncoding.getValueAsString();
         }
     
    +    /**
    +     * Attempt to use the encoding, and bail out if it can't be used
    +     * @throws SQLException if exception occurs while attempting to use the encoding
    +     */
         private void checkConfiguredEncodingSupport() throws SQLException {
             if (characterEncodingAsString != null) {
    -            // Attempt to use the encoding, and bail out if it can't be used
                 try {
                     String testString = "abc";
                     StringUtils.getBytes(testString, characterEncodingAsString);
    @@ -140,7 +143,7 @@ static DriverPropertyInfo[] exposeAsDriverPropertyInfo(Properties info, int slot
             return new ConnectionProperties().exposeAsDriverPropertyInfoInternal(info, slotsToReserve);
         }
     
    -    protected DriverPropertyInfo[] exposeAsDriverPropertyInfoInternal(Properties info, int slotsToReserve) throws SQLException {
    +    private DriverPropertyInfo[] exposeAsDriverPropertyInfoInternal(Properties info, int slotsToReserve) throws SQLException {
             initializeProperties(info);
             int numProperties = PROPERTY_LIST.size();
             int listSize = numProperties + slotsToReserve;
    diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/FieldWithMetadata.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/FieldWithMetadata.java
    index 79f4cb32944..808af359418 100644
    --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/FieldWithMetadata.java
    +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/FieldWithMetadata.java
    @@ -422,7 +422,7 @@ public int getJavaType() {
             return javaType;
         }
     
    -    public Query.Type getVitessType() {
    +    private Query.Type getVitessType() {
             return vitessType;
         }
     
    @@ -430,7 +430,7 @@ public int getVitessTypeValue() {
             return field.getTypeValue();
         }
     
    -    public boolean isImplicitTemporaryTable() {
    +    boolean isImplicitTemporaryTable() {
             if (!connection.isIncludeAllFields()) {
                 return false;
             }
    diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/util/StringUtils.java b/java/jdbc/src/main/java/com/flipkart/vitess/util/StringUtils.java
    index dae2ae4b425..2b0fc619854 100644
    --- a/java/jdbc/src/main/java/com/flipkart/vitess/util/StringUtils.java
    +++ b/java/jdbc/src/main/java/com/flipkart/vitess/util/StringUtils.java
    @@ -359,7 +359,6 @@ private static char getSuccessor(char c, int n) {
          */
         public static int findStartOfStatement(String sql) {
             int statementStartPos = 0;
    -
             if (StringUtils.startsWithIgnoreCaseAndWs(sql, "/*")) {
                 statementStartPos = sql.indexOf("*/");
     
    @@ -379,13 +378,11 @@ public static int findStartOfStatement(String sql) {
                     }
                 }
             }
    -
             return statementStartPos;
         }
     
         public static String toString(byte[] value, int offset, int length, String encoding) throws UnsupportedEncodingException {
             Charset cs = findCharset(encoding);
    -
             return cs.decode(ByteBuffer.wrap(value, offset, length)).toString();
         }
     
    @@ -403,7 +400,6 @@ public static String toString(byte[] value, int offset, int length) {
             } catch (UnsupportedEncodingException e) {
                 // can't happen, emulating new String(byte[])
             }
    -
             return null;
         }
     
    @@ -415,7 +411,6 @@ public static String toString(byte[] value) {
             } catch (UnsupportedEncodingException e) {
                 // can't happen, emulating new String(byte[])
             }
    -
             return null;
         }
     
    @@ -425,21 +420,17 @@ public static byte[] getBytes(String value, String encoding) throws UnsupportedE
     
         public static byte[] getBytes(String value, int offset, int length, String encoding) throws UnsupportedEncodingException {
             Charset cs = findCharset(encoding);
    -
             ByteBuffer buf = cs.encode(CharBuffer.wrap(value.toCharArray(), offset, length));
    -
             // can't simply .array() this to get the bytes especially with variable-length charsets the buffer is sometimes larger than the actual encoded data
             int encodedLen = buf.limit();
             byte[] asBytes = new byte[encodedLen];
             buf.get(asBytes, 0, encodedLen);
    -
             return asBytes;
         }
     
         private static Charset findCharset(String alias) throws UnsupportedEncodingException {
             try {
                 Charset cs = charsetsByAlias.get(alias);
    -
                 if (cs == null) {
                     cs = Charset.forName(alias);
                     Charset oldCs = charsetsByAlias.putIfAbsent(alias, cs);
    @@ -448,9 +439,7 @@ private static Charset findCharset(String alias) throws UnsupportedEncodingExcep
                         cs = oldCs;
                     }
                 }
    -
                 return cs;
    -
                 // We re-throw these runtimes for compatibility with java.io
             } catch (IllegalArgumentException iae) {
                 throw new UnsupportedEncodingException(alias);
    
    From 3937b0409ec06007636f03fa4dba042bc8071f6e Mon Sep 17 00:00:00 2001
    From: Michael Berlin 
    Date: Thu, 2 Mar 2017 10:34:28 -0800
    Subject: [PATCH 066/108] travis: Stop publishing coverage to coveralls.io.
    
    It has not been working for a while and I was not able to fix it.
    
    Additionally, it recently started to flake e.g. we saw this error:
    
    > Bad response status from coveralls: 422 - {"message":"Couldn't find a
    repository matching this job.","error":true}
    
    https://travis-ci.org/youtube/vitess/jobs/206464990
    
    Let's remove it and replace it with something else at a later point.
    ---
     .travis.yml         |  5 -----
     Makefile            |  5 -----
     bootstrap.sh        |  2 --
     test/config.json    | 11 -----------
     travis/goveralls.sh | 16 ----------------
     5 files changed, 39 deletions(-)
     delete mode 100755 travis/goveralls.sh
    
    diff --git a/.travis.yml b/.travis.yml
    index f2b8cf110a4..5d6a0a81457 100644
    --- a/.travis.yml
    +++ b/.travis.yml
    @@ -115,11 +115,6 @@ before_script:
     script:
       # Log GOMAXPROCS (should be 2 as of 07/2015).
       - go run travis/log_gomaxprocs.go
    -  # Shard 0: Run unit tests and post coverage to coveralls.io (will show up on pull request).
    -  - |
    -    if [[ $TEST_MATRIX = *"-shard 0"* ]]; then
    -      go run test.go $TEST_FLAGS goveralls
    -    fi
       - |
         if [[ $TRAVIS_PULL_REQUEST = "false" ]]; then
           go run test.go $TEST_FLAGS $TEST_MATRIX
    diff --git a/Makefile b/Makefile
    index adfd7b46313..a18f912de9c 100644
    --- a/Makefile
    +++ b/Makefile
    @@ -70,11 +70,6 @@ unit_test_cover: build
     unit_test_race: build
     	tools/unit_test_race.sh
     
    -# Run coverage and upload to coveralls.io.
    -# Requires the secret COVERALLS_TOKEN env variable to be set.
    -unit_test_goveralls: build
    -	travis/goveralls.sh
    -
     .ONESHELL:
     SHELL = /bin/bash
     
    diff --git a/bootstrap.sh b/bootstrap.sh
    index 1fcbb84a7f4..38212a4eb16 100755
    --- a/bootstrap.sh
    +++ b/bootstrap.sh
    @@ -137,8 +137,6 @@ gotools=" \
            honnef.co/go/unused/cmd/unused \
     "
     
    -# Tools for uploading code coverage to coveralls.io (used by Travis CI).
    -gotools+=" github.com/modocache/gover github.com/mattn/goveralls"
     # The cover tool needs to be installed into the Go toolchain, so it will fail
     # if Go is installed somewhere that requires root access.
     source tools/shell_functions.inc
    diff --git a/test/config.json b/test/config.json
    index c41f9939a53..b7c778584b3 100644
    --- a/test/config.json
    +++ b/test/config.json
    @@ -86,17 +86,6 @@
     			"RetryMax": 0,
     			"Tags": []
     		},
    -		"goveralls": {
    -			"File": "",
    -			"Args": [],
    -			"Command": [
    -				"travis/goveralls.sh"
    -			],
    -			"Manual": true,
    -			"Shard": 0,
    -			"RetryMax": 0,
    -			"Tags": []
    -		},
     		"initial_sharding": {
     			"File": "initial_sharding.py",
     			"Args": [],
    diff --git a/travis/goveralls.sh b/travis/goveralls.sh
    deleted file mode 100755
    index 3bbde61fac6..00000000000
    --- a/travis/goveralls.sh
    +++ /dev/null
    @@ -1,16 +0,0 @@
    -#!/bin/bash
    -
    -# Run coverage and upload to coveralls.io.
    -# Requires the secret COVERALLS_TOKEN env variable to be set.
    -
    -set -e
    -
    -go list -f '{{if len .TestGoFiles}}go test $(VT_GO_PARALLEL) -coverprofile={{.Dir}}/.coverprofile {{.ImportPath}}{{end    }}' ./go/... | xargs -i sh -c {} | tee unit_test_goveralls.txt
    -gover ./go/
    -# -shallow ensures that goveralls does not return with a failure \
    -# if Coveralls returns a 500 http error or higher (e.g. when the site is in read-only mode). \
    -goveralls -shallow -coverprofile=gover.coverprofile -service=travis-ci
    -echo
    -echo "Top 10 of Go packages with worst coverage:"
    -sort -n -k 5 unit_test_goveralls.txt | head -n10
    -[ -f unit_test_goveralls.txt ] && rm unit_test_goveralls.txt
    
    From a405923e2b82a2d6a9d93438a25feb5879189f6b Mon Sep 17 00:00:00 2001
    From: Erez Louidor 
    Date: Thu, 2 Mar 2017 11:10:38 -0800
    Subject: [PATCH 067/108] Removed the old unused query_splitter.go and related
     test
    
    ---
     go/vt/tabletserver/query_splitter.go      | 319 ------------
     go/vt/tabletserver/query_splitter_test.go | 568 ----------------------
     2 files changed, 887 deletions(-)
     delete mode 100644 go/vt/tabletserver/query_splitter.go
     delete mode 100644 go/vt/tabletserver/query_splitter_test.go
    
    diff --git a/go/vt/tabletserver/query_splitter.go b/go/vt/tabletserver/query_splitter.go
    deleted file mode 100644
    index 9105396f3ce..00000000000
    --- a/go/vt/tabletserver/query_splitter.go
    +++ /dev/null
    @@ -1,319 +0,0 @@
    -package tabletserver
    -
    -import (
    -	"encoding/binary"
    -	"fmt"
    -	"strconv"
    -
    -	"github.com/youtube/vitess/go/sqltypes"
    -	querypb "github.com/youtube/vitess/go/vt/proto/query"
    -	"github.com/youtube/vitess/go/vt/sqlparser"
    -	"github.com/youtube/vitess/go/vt/tabletserver/engines/schema"
    -	"github.com/youtube/vitess/go/vt/tabletserver/querytypes"
    -)
    -
    -// QuerySplitter splits a BoundQuery into equally sized smaller queries.
    -// QuerySplits are generated by adding primary key range clauses to the
    -// original query. Only a limited set of queries are supported, see
    -// QuerySplitter.validateQuery() for details. Also, the table must have at least
    -// one primary key and the leading primary key must be numeric, see
    -// QuerySplitter.splitBoundaries()
    -type QuerySplitter struct {
    -	sql           string
    -	bindVariables map[string]interface{}
    -	splitCount    int64
    -	se            *schema.Engine
    -	sel           *sqlparser.Select
    -	tableName     sqlparser.TableIdent
    -	splitColumn   sqlparser.ColIdent
    -	rowCount      int64
    -}
    -
    -const (
    -	startBindVarName = "_splitquery_start"
    -	endBindVarName   = "_splitquery_end"
    -)
    -
    -// NewQuerySplitter creates a new QuerySplitter. query is the original query
    -// to split and splitCount is the desired number of splits. splitCount must
    -// be a positive int, if not it will be set to 1.
    -func NewQuerySplitter(
    -	sql string,
    -	bindVariables map[string]interface{},
    -	splitColumn string,
    -	splitCount int64,
    -	se *schema.Engine) *QuerySplitter {
    -	if splitCount < 1 {
    -		splitCount = 1
    -	}
    -	return &QuerySplitter{
    -		sql:           sql,
    -		bindVariables: bindVariables,
    -		splitCount:    splitCount,
    -		se:            se,
    -		splitColumn:   sqlparser.NewColIdent(splitColumn),
    -	}
    -}
    -
    -// Ensure that the input query is a Select statement that contains no Join,
    -// GroupBy, OrderBy, Limit or Distinct operations. Also ensure that the
    -// source table is present in the schema and has at least one primary key.
    -func (qs *QuerySplitter) validateQuery() error {
    -	statement, err := sqlparser.Parse(qs.sql)
    -	if err != nil {
    -		return err
    -	}
    -	var ok bool
    -	qs.sel, ok = statement.(*sqlparser.Select)
    -	if !ok {
    -		return fmt.Errorf("not a select statement")
    -	}
    -	if qs.sel.Distinct != "" || qs.sel.GroupBy != nil ||
    -		qs.sel.Having != nil || len(qs.sel.From) != 1 ||
    -		qs.sel.OrderBy != nil || qs.sel.Limit != nil ||
    -		qs.sel.Lock != "" {
    -		return fmt.Errorf("unsupported query")
    -	}
    -	node, ok := qs.sel.From[0].(*sqlparser.AliasedTableExpr)
    -	if !ok {
    -		return fmt.Errorf("unsupported query")
    -	}
    -	qs.tableName = sqlparser.GetTableName(node.Expr)
    -	if qs.tableName.IsEmpty() {
    -		return fmt.Errorf("not a simple table expression")
    -	}
    -	table := qs.se.GetTable(qs.tableName)
    -	if table == nil {
    -		return fmt.Errorf("can't find table in schema")
    -	}
    -	if len(table.PKColumns) == 0 {
    -		return fmt.Errorf("no primary keys")
    -	}
    -	if !qs.splitColumn.IsEmpty() {
    -		for _, index := range table.Indexes {
    -			for _, column := range index.Columns {
    -				if qs.splitColumn.Equal(column) {
    -					return nil
    -				}
    -			}
    -		}
    -		return fmt.Errorf("split column is not indexed or does not exist in table schema, SplitColumn: %v, Table: %v", qs.splitColumn, table)
    -	}
    -	qs.splitColumn = table.GetPKColumn(0).Name
    -	return nil
    -}
    -
    -// split splits the query into multiple queries. validateQuery() must return
    -// nil error before split() is called.
    -func (qs *QuerySplitter) split(columnType querypb.Type, pkMinMax *sqltypes.Result) ([]querytypes.QuerySplit, error) {
    -	boundaries, err := qs.splitBoundaries(columnType, pkMinMax)
    -	if err != nil {
    -		return nil, err
    -	}
    -	splits := []querytypes.QuerySplit{}
    -	// No splits, return the original query as a single split
    -	if len(boundaries) == 0 {
    -		splits = append(splits, querytypes.QuerySplit{
    -			Sql:           qs.sql,
    -			BindVariables: qs.bindVariables,
    -		})
    -	} else {
    -		boundaries = append(boundaries, sqltypes.Value{})
    -		whereClause := qs.sel.Where
    -		// Loop through the boundaries and generated modified where clauses
    -		start := sqltypes.Value{}
    -		for _, end := range boundaries {
    -			bindVars := make(map[string]interface{}, len(qs.bindVariables))
    -			for k, v := range qs.bindVariables {
    -				bindVars[k] = v
    -			}
    -			qs.sel.Where = qs.getWhereClause(whereClause, bindVars, start, end)
    -			split := &querytypes.QuerySplit{
    -				Sql:           sqlparser.String(qs.sel),
    -				BindVariables: bindVars,
    -				RowCount:      qs.rowCount,
    -			}
    -			splits = append(splits, *split)
    -			start = end
    -		}
    -		qs.sel.Where = whereClause // reset where clause
    -	}
    -	return splits, err
    -}
    -
    -// getWhereClause returns a whereClause based on desired upper and lower
    -// bounds for primary key.
    -func (qs *QuerySplitter) getWhereClause(whereClause *sqlparser.Where, bindVars map[string]interface{}, start, end sqltypes.Value) *sqlparser.Where {
    -	var startClause *sqlparser.ComparisonExpr
    -	var endClause *sqlparser.ComparisonExpr
    -	var clauses sqlparser.Expr
    -	// No upper or lower bound, just return the where clause of original query
    -	if start.IsNull() && end.IsNull() {
    -		return whereClause
    -	}
    -	pk := &sqlparser.ColName{
    -		Name: qs.splitColumn,
    -	}
    -	if !start.IsNull() {
    -		startClause = &sqlparser.ComparisonExpr{
    -			Operator: sqlparser.GreaterEqualStr,
    -			Left:     pk,
    -			Right:    sqlparser.NewValArg([]byte(":" + startBindVarName)),
    -		}
    -		bindVars[startBindVarName] = start.ToNative()
    -	}
    -	// splitColumn < end
    -	if !end.IsNull() {
    -		endClause = &sqlparser.ComparisonExpr{
    -			Operator: sqlparser.LessThanStr,
    -			Left:     pk,
    -			Right:    sqlparser.NewValArg([]byte(":" + endBindVarName)),
    -		}
    -		bindVars[endBindVarName] = end.ToNative()
    -	}
    -	if startClause == nil {
    -		clauses = endClause
    -	} else {
    -		if endClause == nil {
    -			clauses = startClause
    -		} else {
    -			// splitColumn >= start AND splitColumn < end
    -			clauses = &sqlparser.AndExpr{
    -				Left:  startClause,
    -				Right: endClause,
    -			}
    -		}
    -	}
    -	if whereClause != nil {
    -		clauses = &sqlparser.AndExpr{
    -			Left:  &sqlparser.ParenExpr{Expr: whereClause.Expr},
    -			Right: &sqlparser.ParenExpr{Expr: clauses},
    -		}
    -	}
    -	return &sqlparser.Where{
    -		Type: sqlparser.WhereStr,
    -		Expr: clauses,
    -	}
    -}
    -
    -func (qs *QuerySplitter) splitBoundaries(columnType querypb.Type, pkMinMax *sqltypes.Result) ([]sqltypes.Value, error) {
    -	switch {
    -	case sqltypes.IsSigned(columnType):
    -		return qs.splitBoundariesIntColumn(pkMinMax)
    -	case sqltypes.IsUnsigned(columnType):
    -		return qs.splitBoundariesUintColumn(pkMinMax)
    -	case sqltypes.IsFloat(columnType):
    -		return qs.splitBoundariesFloatColumn(pkMinMax)
    -	case sqltypes.IsBinary(columnType):
    -		return qs.splitBoundariesStringColumn()
    -	}
    -	return []sqltypes.Value{}, nil
    -}
    -
    -func (qs *QuerySplitter) splitBoundariesIntColumn(pkMinMax *sqltypes.Result) ([]sqltypes.Value, error) {
    -	boundaries := []sqltypes.Value{}
    -	if pkMinMax == nil || len(pkMinMax.Rows) != 1 || pkMinMax.Rows[0][0].IsNull() || pkMinMax.Rows[0][1].IsNull() {
    -		return boundaries, nil
    -	}
    -	minNumeric := pkMinMax.Rows[0][0]
    -	maxNumeric := pkMinMax.Rows[0][1]
    -	min, err := minNumeric.ParseInt64()
    -	if err != nil {
    -		return nil, err
    -	}
    -	max, err := maxNumeric.ParseInt64()
    -	if err != nil {
    -		return nil, err
    -	}
    -	interval := (max - min) / qs.splitCount
    -	if interval == 0 {
    -		return nil, err
    -	}
    -	qs.rowCount = interval
    -	for i := int64(1); i < qs.splitCount; i++ {
    -		v, err := sqltypes.BuildValue(min + interval*i)
    -		if err != nil {
    -			return nil, err
    -		}
    -		boundaries = append(boundaries, v)
    -	}
    -	return boundaries, nil
    -}
    -
    -func (qs *QuerySplitter) splitBoundariesUintColumn(pkMinMax *sqltypes.Result) ([]sqltypes.Value, error) {
    -	boundaries := []sqltypes.Value{}
    -	if pkMinMax == nil || len(pkMinMax.Rows) != 1 || pkMinMax.Rows[0][0].IsNull() || pkMinMax.Rows[0][1].IsNull() {
    -		return boundaries, nil
    -	}
    -	minNumeric := pkMinMax.Rows[0][0]
    -	maxNumeric := pkMinMax.Rows[0][1]
    -	min, err := minNumeric.ParseUint64()
    -	if err != nil {
    -		return nil, err
    -	}
    -	max, err := maxNumeric.ParseUint64()
    -	if err != nil {
    -		return nil, err
    -	}
    -	interval := (max - min) / uint64(qs.splitCount)
    -	if interval == 0 {
    -		return nil, err
    -	}
    -	qs.rowCount = int64(interval)
    -	for i := uint64(1); i < uint64(qs.splitCount); i++ {
    -		v, err := sqltypes.BuildValue(min + interval*i)
    -		if err != nil {
    -			return nil, err
    -		}
    -		boundaries = append(boundaries, v)
    -	}
    -	return boundaries, nil
    -}
    -
    -func (qs *QuerySplitter) splitBoundariesFloatColumn(pkMinMax *sqltypes.Result) ([]sqltypes.Value, error) {
    -	boundaries := []sqltypes.Value{}
    -	if pkMinMax == nil || len(pkMinMax.Rows) != 1 || pkMinMax.Rows[0][0].IsNull() || pkMinMax.Rows[0][1].IsNull() {
    -		return boundaries, nil
    -	}
    -	min, err := strconv.ParseFloat(pkMinMax.Rows[0][0].String(), 64)
    -	if err != nil {
    -		return nil, err
    -	}
    -	max, err := strconv.ParseFloat(pkMinMax.Rows[0][1].String(), 64)
    -	if err != nil {
    -		return nil, err
    -	}
    -	interval := (max - min) / float64(qs.splitCount)
    -	if interval == 0 {
    -		return nil, err
    -	}
    -	qs.rowCount = int64(interval)
    -	for i := 1; i < int(qs.splitCount); i++ {
    -		boundary := min + interval*float64(i)
    -		v, err := sqltypes.BuildValue(boundary)
    -		if err != nil {
    -			return nil, err
    -		}
    -		boundaries = append(boundaries, v)
    -	}
    -	return boundaries, nil
    -}
    -
    -// TODO(shengzhe): support split based on min, max from the string column.
    -func (qs *QuerySplitter) splitBoundariesStringColumn() ([]sqltypes.Value, error) {
    -	splitRange := int64(0xFFFFFFFF) + 1
    -	splitSize := splitRange / int64(qs.splitCount)
    -	//TODO(shengzhe): have a better estimated row count based on table size.
    -	qs.rowCount = int64(splitSize)
    -	var boundaries []sqltypes.Value
    -	for i := 1; i < int(qs.splitCount); i++ {
    -		buf := make([]byte, 4)
    -		binary.BigEndian.PutUint32(buf, uint32(splitSize)*uint32(i))
    -		val, err := sqltypes.BuildValue(buf)
    -		if err != nil {
    -			return nil, err
    -		}
    -		boundaries = append(boundaries, val)
    -	}
    -	return boundaries, nil
    -}
    diff --git a/go/vt/tabletserver/query_splitter_test.go b/go/vt/tabletserver/query_splitter_test.go
    deleted file mode 100644
    index 629f4514a09..00000000000
    --- a/go/vt/tabletserver/query_splitter_test.go
    +++ /dev/null
    @@ -1,568 +0,0 @@
    -package tabletserver
    -
    -import (
    -	"encoding/binary"
    -	"fmt"
    -	"reflect"
    -	"strings"
    -	"testing"
    -
    -	"github.com/youtube/vitess/go/mysqlconn"
    -	"github.com/youtube/vitess/go/mysqlconn/fakesqldb"
    -	"github.com/youtube/vitess/go/sqltypes"
    -	"github.com/youtube/vitess/go/vt/sqlparser"
    -	"github.com/youtube/vitess/go/vt/tabletserver/engines/schema"
    -	"github.com/youtube/vitess/go/vt/tabletserver/querytypes"
    -	"github.com/youtube/vitess/go/vt/tabletserver/tabletenv"
    -
    -	querypb "github.com/youtube/vitess/go/vt/proto/query"
    -)
    -
    -func getSchemaEngine(t *testing.T) *schema.Engine {
    -	db := fakesqldb.New(t)
    -	defer db.Close()
    -	for query, result := range getQueriesForSplitter() {
    -		db.AddQuery(query, result)
    -	}
    -	se := schema.NewEngine(DummyChecker, tabletenv.DefaultQsConfig)
    -	se.Open(db.ConnParams())
    -	return se
    -}
    -
    -func getQueriesForSplitter() map[string]*sqltypes.Result {
    -	return map[string]*sqltypes.Result{
    -		"select unix_timestamp()": {
    -			Fields: []*querypb.Field{{
    -				Type: sqltypes.Uint64,
    -			}},
    -			RowsAffected: 1,
    -			Rows: [][]sqltypes.Value{
    -				{sqltypes.MakeTrusted(sqltypes.Int32, []byte("1427325875"))},
    -			},
    -		},
    -		"select @@global.sql_mode": {
    -			Fields: []*querypb.Field{{
    -				Type: sqltypes.VarChar,
    -			}},
    -			RowsAffected: 1,
    -			Rows: [][]sqltypes.Value{
    -				{sqltypes.MakeString([]byte("STRICT_TRANS_TABLES"))},
    -			},
    -		},
    -		"select @@autocommit": {
    -			Fields: []*querypb.Field{{
    -				Type: sqltypes.Uint64,
    -			}},
    -			RowsAffected: 1,
    -			Rows: [][]sqltypes.Value{
    -				{sqltypes.MakeString([]byte("1"))},
    -			},
    -		},
    -		mysqlconn.BaseShowTables: {
    -			Fields:       mysqlconn.BaseShowTablesFields,
    -			RowsAffected: 3,
    -			Rows: [][]sqltypes.Value{
    -				mysqlconn.BaseShowTablesRow("test_table", false, ""),
    -				mysqlconn.BaseShowTablesRow("test_table_no_pk", false, ""),
    -			},
    -		},
    -		"select * from test_table where 1 != 1": {
    -			Fields: []*querypb.Field{{
    -				Name: "id",
    -				Type: sqltypes.Int64,
    -			}, {
    -				Name: "id2",
    -				Type: sqltypes.Int64,
    -			}, {
    -				Name: "count",
    -				Type: sqltypes.Int64,
    -			}},
    -		},
    -		"describe test_table": {
    -			Fields:       mysqlconn.DescribeTableFields,
    -			RowsAffected: 1,
    -			Rows: [][]sqltypes.Value{
    -				mysqlconn.DescribeTableRow("id", "int(20)", false, "PRI", "0"),
    -				mysqlconn.DescribeTableRow("id2", "int(20)", false, "", "0"),
    -				mysqlconn.DescribeTableRow("count", "int(20)", false, "", "0"),
    -			},
    -		},
    -		"show index from test_table": {
    -			Fields:       mysqlconn.ShowIndexFromTableFields,
    -			RowsAffected: 2,
    -			Rows: [][]sqltypes.Value{
    -				mysqlconn.ShowIndexFromTableRow("test_table", true, "PRIMARY", 1, "id", false),
    -				mysqlconn.ShowIndexFromTableRow("test_table", true, "idx_id2", 1, "id2", false),
    -			},
    -		},
    -		"select * from test_table_no_pk where 1 != 1": {
    -			Fields: []*querypb.Field{{
    -				Name: "id",
    -				Type: sqltypes.Int64,
    -			}},
    -		},
    -		"describe test_table_no_pk": {
    -			Fields:       mysqlconn.DescribeTableFields,
    -			RowsAffected: 0,
    -			Rows:         [][]sqltypes.Value{},
    -		},
    -		"show index from test_table_no_pk": {
    -			Fields:       mysqlconn.ShowIndexFromTableFields,
    -			RowsAffected: 0,
    -			Rows:         [][]sqltypes.Value{},
    -		},
    -	}
    -}
    -
    -func TestValidateQuery(t *testing.T) {
    -	se := getSchemaEngine(t)
    -
    -	splitter := NewQuerySplitter("delete from test_table", nil, "", 3, se)
    -	got := splitter.validateQuery()
    -	want := fmt.Errorf("not a select statement")
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("non-select validation failed, got:%v, want:%v", got, want)
    -	}
    -
    -	splitter = NewQuerySplitter("select * from test_table order by id", nil, "", 3, se)
    -	got = splitter.validateQuery()
    -	want = fmt.Errorf("unsupported query")
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("order by query validation failed, got:%v, want:%v", got, want)
    -	}
    -
    -	splitter = NewQuerySplitter("select * from test_table group by id", nil, "", 3, se)
    -	got = splitter.validateQuery()
    -	want = fmt.Errorf("unsupported query")
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("group by query validation failed, got:%v, want:%v", got, want)
    -	}
    -
    -	splitter = NewQuerySplitter("select A.* from test_table A JOIN test_table B", nil, "", 3, se)
    -	got = splitter.validateQuery()
    -	want = fmt.Errorf("unsupported query")
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("join query validation failed, got:%v, want:%v", got, want)
    -	}
    -
    -	splitter = NewQuerySplitter("select * from test_table_no_pk", nil, "", 3, se)
    -	got = splitter.validateQuery()
    -	want = fmt.Errorf("no primary keys")
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("no PK table validation failed, got:%v, want:%v", got, want)
    -	}
    -
    -	splitter = NewQuerySplitter("select * from unknown_table", nil, "", 3, se)
    -	got = splitter.validateQuery()
    -	want = fmt.Errorf("can't find table in schema")
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("unknown table validation failed, got:%v, want:%v", got, want)
    -	}
    -
    -	splitter = NewQuerySplitter("select * from test_table", nil, "", 3, se)
    -	got = splitter.validateQuery()
    -	want = nil
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("valid query validation failed, got:%v, want:%v", got, want)
    -	}
    -
    -	splitter = NewQuerySplitter("select * from test_table where count > :count", nil, "", 3, se)
    -	got = splitter.validateQuery()
    -	want = nil
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("valid query validation failed, got:%v, want:%v", got, want)
    -	}
    -
    -	splitter = NewQuerySplitter("select * from test_table where count > :count", nil, "id2", 0, se)
    -	got = splitter.validateQuery()
    -	want = nil
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("valid query validation failed, got:%v, want:%v", got, want)
    -	}
    -
    -	splitter = NewQuerySplitter("invalid select * from test_table where count > :count", nil, "id2", 0, se)
    -	if err := splitter.validateQuery(); err == nil {
    -		t.Fatalf("validateQuery() = %v, want: nil", err)
    -	}
    -
    -	// column id2 is indexed
    -	splitter = NewQuerySplitter("select * from test_table where count > :count", nil, "id2", 3, se)
    -	got = splitter.validateQuery()
    -	want = nil
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("valid query validation failed, got:%v, want:%v", got, want)
    -	}
    -
    -	// column does not exist
    -	splitter = NewQuerySplitter("select * from test_table where count > :count", nil, "unknown_column", 3, se)
    -	got = splitter.validateQuery()
    -	wantStr := "split column is not indexed or does not exist in table schema"
    -	if !strings.Contains(got.Error(), wantStr) {
    -		t.Errorf("unknown table validation failed, got:%v, want:%v", got, wantStr)
    -	}
    -
    -	// column is not indexed
    -	splitter = NewQuerySplitter("select * from test_table where count > :count", nil, "count", 3, se)
    -	got = splitter.validateQuery()
    -	wantStr = "split column is not indexed or does not exist in table schema"
    -	if !strings.Contains(got.Error(), wantStr) {
    -		t.Errorf("unknown table validation failed, got:%v, want:%v", got, wantStr)
    -	}
    -}
    -
    -func TestGetWhereClause(t *testing.T) {
    -	splitter := &QuerySplitter{}
    -	sql := "select * from test_table where count > :count"
    -	statement, _ := sqlparser.Parse(sql)
    -	splitter.sel, _ = statement.(*sqlparser.Select)
    -	splitter.splitColumn = sqlparser.NewColIdent("id")
    -	bindVars := make(map[string]interface{})
    -	// no boundary case, start = end = nil, should not change the where clause
    -	nilValue := sqltypes.Value{}
    -	clause := splitter.getWhereClause(splitter.sel.Where, bindVars, nilValue, nilValue)
    -	want := " where count > :count"
    -	got := sqlparser.String(clause)
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("incorrect where clause for nil ranges, got:%v, want:%v", got, want)
    -	}
    -
    -	// Set lower bound, should add the lower bound condition to where clause
    -	startVal := int64(20)
    -	start, _ := sqltypes.BuildValue(startVal)
    -	bindVars = make(map[string]interface{})
    -	bindVars[":count"] = 300
    -	clause = splitter.getWhereClause(splitter.sel.Where, bindVars, start, nilValue)
    -	want = " where (count > :count) and (id >= :" + startBindVarName + ")"
    -	got = sqlparser.String(clause)
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("incorrect where clause, got:%v, want:%v", got, want)
    -	}
    -	v, ok := bindVars[startBindVarName]
    -	if !ok {
    -		t.Fatalf("bind var: %s not found got: nil, want: %v", startBindVarName, startVal)
    -	}
    -	if v != startVal {
    -		t.Fatalf("bind var: %s not found got: %v, want: %v", startBindVarName, v, startVal)
    -	}
    -	// Set upper bound, should add the upper bound condition to where clause
    -	endVal := int64(40)
    -	end, _ := sqltypes.BuildValue(endVal)
    -	bindVars = make(map[string]interface{})
    -	clause = splitter.getWhereClause(splitter.sel.Where, bindVars, nilValue, end)
    -	want = " where (count > :count) and (id < :" + endBindVarName + ")"
    -	got = sqlparser.String(clause)
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("incorrect where clause, got:%v, want:%v", got, want)
    -	}
    -	v, ok = bindVars[endBindVarName]
    -	if !ok {
    -		t.Fatalf("bind var: %s not found got: nil, want: %v", endBindVarName, endVal)
    -	}
    -	if v != endVal {
    -		t.Fatalf("bind var: %s not found got: %v, want: %v", endBindVarName, v, endVal)
    -	}
    -
    -	// Set both bounds, should add two conditions to where clause
    -	bindVars = make(map[string]interface{})
    -	clause = splitter.getWhereClause(splitter.sel.Where, bindVars, start, end)
    -	want = fmt.Sprintf(" where (count > :count) and (id >= :%s and id < :%s)", startBindVarName, endBindVarName)
    -	got = sqlparser.String(clause)
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("incorrect where clause, got:%v, want:%v", got, want)
    -	}
    -
    -	// Original query with no where clause
    -	sql = "select * from test_table"
    -	statement, _ = sqlparser.Parse(sql)
    -	splitter.sel, _ = statement.(*sqlparser.Select)
    -	bindVars = make(map[string]interface{})
    -	// no boundary case, start = end = nil should return no where clause
    -	clause = splitter.getWhereClause(splitter.sel.Where, bindVars, nilValue, nilValue)
    -	want = ""
    -	got = sqlparser.String(clause)
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("incorrect where clause for nil ranges, got:%v, want:%v", got, want)
    -	}
    -	bindVars = make(map[string]interface{})
    -	// Set both bounds, should add two conditions to where clause
    -	clause = splitter.getWhereClause(splitter.sel.Where, bindVars, start, end)
    -	want = fmt.Sprintf(" where id >= :%s and id < :%s", startBindVarName, endBindVarName)
    -	got = sqlparser.String(clause)
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("incorrect where clause, got:%v, want:%v", got, want)
    -	}
    -	v, ok = bindVars[startBindVarName]
    -	if !ok {
    -		t.Fatalf("bind var: %s not found got: nil, want: %v", startBindVarName, startVal)
    -	}
    -	if v != startVal {
    -		t.Fatalf("bind var: %s not found got: %v, want: %v", startBindVarName, v, startVal)
    -	}
    -	v, ok = bindVars[endBindVarName]
    -	if !ok {
    -		t.Fatalf("bind var: %s not found got: nil, want: %v", endBindVarName, endVal)
    -	}
    -	if v != endVal {
    -		t.Fatalf("bind var: %s not found got: %v, want: %v", endBindVarName, v, endVal)
    -	}
    -}
    -
    -func TestSplitBoundaries(t *testing.T) {
    -	min, _ := sqltypes.BuildValue(10)
    -	max, _ := sqltypes.BuildValue(60)
    -	row := []sqltypes.Value{min, max}
    -	rows := [][]sqltypes.Value{row}
    -
    -	minField := &querypb.Field{Name: "min", Type: sqltypes.Int64}
    -	maxField := &querypb.Field{Name: "max", Type: sqltypes.Int64}
    -	fields := []*querypb.Field{minField, maxField}
    -
    -	pkMinMax := &sqltypes.Result{
    -		Fields: fields,
    -		Rows:   rows,
    -	}
    -
    -	splitter := &QuerySplitter{}
    -	splitter.splitCount = 5
    -	boundaries, err := splitter.splitBoundaries(sqltypes.Int64, pkMinMax)
    -	if err != nil {
    -		t.Fatalf("unexpected error: %v", err)
    -	}
    -	if len(boundaries) != int(splitter.splitCount-1) {
    -		t.Errorf("wrong number of boundaries got: %v, want: %v", len(boundaries), splitter.splitCount-1)
    -	}
    -	got, err := splitter.splitBoundaries(sqltypes.Int64, pkMinMax)
    -	if err != nil {
    -		t.Fatalf("unexpected error: %v", err)
    -	}
    -	want := []sqltypes.Value{buildVal(20), buildVal(30), buildVal(40), buildVal(50)}
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("incorrect boundaries, got: %v, want: %v", got, want)
    -	}
    -
    -	// Test negative min value
    -	min, _ = sqltypes.BuildValue(-100)
    -	max, _ = sqltypes.BuildValue(100)
    -	row = []sqltypes.Value{min, max}
    -	rows = [][]sqltypes.Value{row}
    -	pkMinMax.Rows = rows
    -	got, err = splitter.splitBoundaries(sqltypes.Int64, pkMinMax)
    -	if err != nil {
    -		t.Fatalf("unexpected error: %v", err)
    -	}
    -	want = []sqltypes.Value{buildVal(-60), buildVal(-20), buildVal(20), buildVal(60)}
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("incorrect boundaries, got: %v, want: %v", got, want)
    -	}
    -
    -	// Test float min max
    -	min, _ = sqltypes.BuildValue(10.5)
    -	max, _ = sqltypes.BuildValue(60.5)
    -	row = []sqltypes.Value{min, max}
    -	rows = [][]sqltypes.Value{row}
    -	minField = &querypb.Field{Name: "min", Type: sqltypes.Float64}
    -	maxField = &querypb.Field{Name: "max", Type: sqltypes.Float64}
    -	fields = []*querypb.Field{minField, maxField}
    -	pkMinMax.Rows = rows
    -	pkMinMax.Fields = fields
    -	got, err = splitter.splitBoundaries(sqltypes.Float64, pkMinMax)
    -	if err != nil {
    -		t.Fatalf("unexpected error: %v", err)
    -	}
    -	want = []sqltypes.Value{buildVal(20.5), buildVal(30.5), buildVal(40.5), buildVal(50.5)}
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("incorrect boundaries, got: %v, want: %v", got, want)
    -	}
    -}
    -
    -func buildVal(val interface{}) sqltypes.Value {
    -	v, _ := sqltypes.BuildValue(val)
    -	return v
    -}
    -
    -func TestSplitQuery(t *testing.T) {
    -	se := getSchemaEngine(t)
    -	splitter := NewQuerySplitter("select * from test_table where count > :count", nil, "", 3, se)
    -	splitter.validateQuery()
    -	min, _ := sqltypes.BuildValue(0)
    -	max, _ := sqltypes.BuildValue(300)
    -	minField := &querypb.Field{
    -		Name: "min",
    -		Type: sqltypes.Int64,
    -	}
    -	maxField := &querypb.Field{
    -		Name: "max",
    -		Type: sqltypes.Int64,
    -	}
    -	fields := []*querypb.Field{minField, maxField}
    -	pkMinMax := &sqltypes.Result{
    -		Fields: fields,
    -	}
    -
    -	// Ensure that empty min max does not cause panic or return any error
    -	splits, err := splitter.split(sqltypes.Int64, pkMinMax)
    -	if err != nil {
    -		t.Errorf("unexpected error while splitting on empty pkMinMax, %s", err)
    -	}
    -
    -	pkMinMax.Rows = [][]sqltypes.Value{{min, max}}
    -	splits, err = splitter.split(sqltypes.Int64, pkMinMax)
    -	if err != nil {
    -		t.Fatalf("unexpected error: %v", err)
    -	}
    -	got := []querytypes.BoundQuery{}
    -	for _, split := range splits {
    -		if split.RowCount != 100 {
    -			t.Errorf("wrong RowCount, got: %v, want: %v", split.RowCount, 100)
    -		}
    -		got = append(got, querytypes.BoundQuery{
    -			Sql:           split.Sql,
    -			BindVariables: split.BindVariables,
    -		})
    -	}
    -	want := []querytypes.BoundQuery{
    -		{
    -			Sql:           "select * from test_table where (count > :count) and (id < :" + endBindVarName + ")",
    -			BindVariables: map[string]interface{}{endBindVarName: int64(100)},
    -		},
    -		{
    -			Sql: fmt.Sprintf("select * from test_table where (count > :count) and (id >= :%s and id < :%s)", startBindVarName, endBindVarName),
    -			BindVariables: map[string]interface{}{
    -				startBindVarName: int64(100),
    -				endBindVarName:   int64(200),
    -			},
    -		},
    -		{
    -			Sql:           "select * from test_table where (count > :count) and (id >= :" + startBindVarName + ")",
    -			BindVariables: map[string]interface{}{startBindVarName: int64(200)},
    -		},
    -	}
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("wrong splits, got: %v, want: %v", got, want)
    -	}
    -}
    -
    -func TestSplitQueryFractionalColumn(t *testing.T) {
    -	se := getSchemaEngine(t)
    -	splitter := NewQuerySplitter("select * from test_table where count > :count", nil, "", 3, se)
    -	splitter.validateQuery()
    -	min, _ := sqltypes.BuildValue(10.5)
    -	max, _ := sqltypes.BuildValue(490.5)
    -	minField := &querypb.Field{
    -		Name: "min",
    -		Type: sqltypes.Float32,
    -	}
    -	maxField := &querypb.Field{
    -		Name: "max",
    -		Type: sqltypes.Float32,
    -	}
    -	fields := []*querypb.Field{minField, maxField}
    -	pkMinMax := &sqltypes.Result{
    -		Fields: fields,
    -		Rows:   [][]sqltypes.Value{{min, max}},
    -	}
    -
    -	splits, err := splitter.split(sqltypes.Float32, pkMinMax)
    -	if err != nil {
    -		t.Fatalf("unexpected error: %v", err)
    -	}
    -	got := []querytypes.BoundQuery{}
    -	for _, split := range splits {
    -		if split.RowCount != 160 {
    -			t.Errorf("wrong RowCount, got: %v, want: %v", split.RowCount, 160)
    -		}
    -		got = append(got, querytypes.BoundQuery{
    -			Sql:           split.Sql,
    -			BindVariables: split.BindVariables,
    -		})
    -	}
    -	want := []querytypes.BoundQuery{
    -		{
    -			Sql:           "select * from test_table where (count > :count) and (id < :" + endBindVarName + ")",
    -			BindVariables: map[string]interface{}{endBindVarName: 170.5},
    -		},
    -		{
    -			Sql: fmt.Sprintf("select * from test_table where (count > :count) and (id >= :%s and id < :%s)", startBindVarName, endBindVarName),
    -			BindVariables: map[string]interface{}{
    -				startBindVarName: 170.5,
    -				endBindVarName:   330.5,
    -			},
    -		},
    -		{
    -			Sql:           "select * from test_table where (count > :count) and (id >= :" + startBindVarName + ")",
    -			BindVariables: map[string]interface{}{startBindVarName: 330.5},
    -		},
    -	}
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("wrong splits, got: %v, want: %v", got, want)
    -	}
    -}
    -
    -func TestSplitQueryVarBinaryColumn(t *testing.T) {
    -	se := getSchemaEngine(t)
    -	splitter := NewQuerySplitter("select * from test_table where count > :count", nil, "", 3, se)
    -	splitter.validateQuery()
    -	splits, err := splitter.split(sqltypes.VarBinary, nil)
    -	if err != nil {
    -		t.Fatalf("unexpected error: %v", err)
    -	}
    -	got := []querytypes.BoundQuery{}
    -	for _, split := range splits {
    -		got = append(got, querytypes.BoundQuery{
    -			Sql:           split.Sql,
    -			BindVariables: split.BindVariables,
    -		})
    -	}
    -	want := []querytypes.BoundQuery{
    -		{
    -			Sql:           "select * from test_table where (count > :count) and (id < :" + endBindVarName + ")",
    -			BindVariables: map[string]interface{}{endBindVarName: hexToByteUInt32(0x55555555)},
    -		},
    -		{
    -			Sql: fmt.Sprintf("select * from test_table where (count > :count) and (id >= :%s and id < :%s)", startBindVarName, endBindVarName),
    -			BindVariables: map[string]interface{}{
    -				startBindVarName: hexToByteUInt32(0x55555555),
    -				endBindVarName:   hexToByteUInt32(0xAAAAAAAA),
    -			},
    -		},
    -		{
    -			Sql:           "select * from test_table where (count > :count) and (id >= :" + startBindVarName + ")",
    -			BindVariables: map[string]interface{}{startBindVarName: hexToByteUInt32(0xAAAAAAAA)},
    -		},
    -	}
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("wrong splits, got: %v, want: %v", got, want)
    -	}
    -}
    -
    -func TestSplitQueryVarCharColumn(t *testing.T) {
    -	se := getSchemaEngine(t)
    -	splitter := NewQuerySplitter("select * from test_table where count > :count", map[string]interface{}{"count": 123}, "", 3, se)
    -	splitter.validateQuery()
    -	splits, err := splitter.split(sqltypes.VarChar, nil)
    -	if err != nil {
    -		t.Fatalf("unexpected error: %v", err)
    -	}
    -	got := []querytypes.BoundQuery{}
    -	for _, split := range splits {
    -		got = append(got, querytypes.BoundQuery{
    -			Sql:           split.Sql,
    -			BindVariables: split.BindVariables,
    -		})
    -	}
    -	want := []querytypes.BoundQuery{
    -		{
    -			Sql:           "select * from test_table where count > :count",
    -			BindVariables: map[string]interface{}{"count": 123},
    -		},
    -	}
    -	if !reflect.DeepEqual(got, want) {
    -		t.Errorf("wrong splits, got: %v, want: %v", got, want)
    -	}
    -}
    -
    -func hexToByteUInt32(val uint32) []byte {
    -	buf := make([]byte, 4)
    -	binary.BigEndian.PutUint32(buf, val)
    -	return buf
    -}
    
    From 825a065a3141b2ab66211be02e58cc5f66676a85 Mon Sep 17 00:00:00 2001
    From: Michael Berlin 
    Date: Thu, 2 Mar 2017 11:57:03 -0800
    Subject: [PATCH 068/108] test: vtgate_buffer.py: Ttest for the varz value and
     not just existence.
    
    Since go/vt-pull/2608 it is always initialized and broke this test.
    ---
     test/vtgate_buffer.py | 6 +++---
     1 file changed, 3 insertions(+), 3 deletions(-)
    
    diff --git a/test/vtgate_buffer.py b/test/vtgate_buffer.py
    index f741ca3c5ac..051074371f0 100755
    --- a/test/vtgate_buffer.py
    +++ b/test/vtgate_buffer.py
    @@ -320,10 +320,10 @@ def _test_buffer(self, reparent_func, enable_read_thread=True,
         master_promoted_count = v['HealthcheckMasterPromoted'].get(labels, 0)
         self.assertGreater(master_promoted_count, 0)
     
    -    if labels in v['BufferFailoverDurationSumMs']:
    +    duration_ms = v['BufferFailoverDurationSumMs'].get(labels, 0)
    +    if duration_ms > 0:
           # Buffering was actually started.
    -      logging.debug('Failover was buffered for %d milliseconds.',
    -                    v['BufferFailoverDurationSumMs'][labels])
    +      logging.debug('Failover was buffered for %d milliseconds.', duration_ms)
           # Number of buffering stops must be equal to the number of seen failovers.
           buffering_stops = v['BufferStops'].get('%s.NewMasterSeen' % labels, 0)
           self.assertEqual(master_promoted_count, buffering_stops)
    
    From 06ab24a730e4ec1886af01044e58188a000beb3e Mon Sep 17 00:00:00 2001
    From: Alouane Nour-Eddine 
    Date: Thu, 2 Mar 2017 23:25:48 +0000
    Subject: [PATCH 069/108] update success flag
    
    ---
     php/src/Vitess/ProtoUtils.php | 2 +-
     1 file changed, 1 insertion(+), 1 deletion(-)
    
    diff --git a/php/src/Vitess/ProtoUtils.php b/php/src/Vitess/ProtoUtils.php
    index 608df1a3ede..cea73a082ba 100644
    --- a/php/src/Vitess/ProtoUtils.php
    +++ b/php/src/Vitess/ProtoUtils.php
    @@ -54,7 +54,7 @@ public static function checkError($response)
                         throw new Exception($error->getCode() . ': ' . $error->getMessage());
                 }
                 switch ($error->getLegacyCode()) {
    -                case ErrorCode::SUCCESS:
    +                case ErrorCode::SUCCESS_LEGACY:
                         break;
                     case ErrorCode::BAD_INPUT_LEGACY:
                         throw new Error\BadInput($error->getMessage());
    
    From 104bee01992681b957124607bbbdb80aff1208be Mon Sep 17 00:00:00 2001
    From: davygeek 
    Date: Fri, 3 Mar 2017 09:34:56 +0800
    Subject: [PATCH 070/108] Add kubernetes api server config (#2603)
    
    ---
     examples/kubernetes/env.sh               | 16 ++++++++++++----
     examples/kubernetes/etcd-down.sh         |  6 +++---
     examples/kubernetes/etcd-up.sh           |  4 ++--
     examples/kubernetes/guestbook-down.sh    |  4 ++--
     examples/kubernetes/guestbook-up.sh      |  4 ++--
     examples/kubernetes/namespace-down.sh    |  2 +-
     examples/kubernetes/namespace-up.sh      |  2 +-
     examples/kubernetes/orchestrator-down.sh |  6 +++---
     examples/kubernetes/orchestrator-up.sh   |  4 ++--
     examples/kubernetes/sharded-vtworker.sh  | 12 ++++++------
     examples/kubernetes/vtctld-down.sh       |  4 ++--
     examples/kubernetes/vtctld-up.sh         |  4 ++--
     examples/kubernetes/vtgate-down.sh       |  4 ++--
     examples/kubernetes/vtgate-up.sh         |  4 ++--
     examples/kubernetes/vttablet-down.sh     |  2 +-
     examples/kubernetes/vttablet-up.sh       |  2 +-
     16 files changed, 44 insertions(+), 36 deletions(-)
    
    diff --git a/examples/kubernetes/env.sh b/examples/kubernetes/env.sh
    index 60ef6c8c6a0..4474dbcd42b 100644
    --- a/examples/kubernetes/env.sh
    +++ b/examples/kubernetes/env.sh
    @@ -6,9 +6,17 @@
     # use cases just need KUBECTL=kubectl, we'll make that the default.
     KUBECTL=${KUBECTL:-kubectl}
     
    -# Kuberentes namespace for Vitess and components.
    +# Kubernetes api address for $KUBECTL 
    +# The default value is 127.0.0.1:8080
    +# When the Kubernetes api server is not local, We can easily access the api by edit KUBERNETES_API_SERVER's value
    +KUBERNETES_API_SERVER=${KUBERNETES_API_SERVER:-'127.0.0.1:8080'}
    +
    +# Kubernetes namespace for Vitess and components.
     VITESS_NAME=${VITESS_NAME:-'default'}
     
    +# Kubernetes options config
    +KUBECTL_OPTIONS="--namespace=$VITESS_NAME --server=$KUBERNETES_API_SERVER"
    +
     # CELLS should be a comma separated list of cells
     # the first cell listed will become local to vtctld.
     CELLS=${CELLS:-'test'}
    @@ -18,7 +26,7 @@ VTCTLD_PORT=${VTCTLD_PORT:-30001}
     
     # Get the ExternalIP of any node.
     get_node_ip() {
    -  $KUBECTL get -o template --template '{{range (index .items 0).status.addresses}}{{if eq .type "ExternalIP" "LegacyHostIP"}}{{.address}}{{end}}{{end}}' nodes --namespace=$VITESS_NAME
    +  $KUBECTL $KUBECTL_OPTIONS get -o template --template '{{range (index .items 0).status.addresses}}{{if eq .type "ExternalIP" "LegacyHostIP"}}{{.address}}{{end}}{{end}}' nodes
     }
     
     # Try to find vtctld address if not provided.
    @@ -33,7 +41,7 @@ get_vtctld_addr() {
     
     # Find the name of a vtctld pod.
     get_vtctld_pod() {
    -  $KUBECTL get -o template --template "{{if ge (len .items) 1 }}{{(index .items 0).metadata.name}}{{end}}" -l 'app=vitess,component=vtctld' pods --namespace=$VITESS_NAME
    +  $KUBECTL $KUBECTL_OPTIONS get -o template --template "{{if ge (len .items) 1 }}{{(index .items 0).metadata.name}}{{end}}" -l 'app=vitess,component=vtctld' pods
     }
     
     start_vtctld_forward() {
    @@ -44,7 +52,7 @@ start_vtctld_forward() {
       fi
     
       tmpfile=`mktemp`
    -  $KUBECTL port-forward -p $pod 0:15999 --namespace=$VITESS_NAME &> $tmpfile &
    +  $KUBECTL $KUBECTL_OPTIONS port-forward -p $pod 0:15999 &> $tmpfile &
       vtctld_forward_pid=$!
     
       until [[ `cat $tmpfile` =~ :([0-9]+)\ -\> ]]; do :; done
    diff --git a/examples/kubernetes/etcd-down.sh b/examples/kubernetes/etcd-down.sh
    index f0e2103551e..18b1a824422 100755
    --- a/examples/kubernetes/etcd-down.sh
    +++ b/examples/kubernetes/etcd-down.sh
    @@ -13,10 +13,10 @@ cells=`echo $CELLS | tr ',' ' '`
     # Delete replication controllers
     for cell in 'global' $cells; do
       echo "Stopping etcd replicationcontroller for $cell cell..."
    -  $KUBECTL delete replicationcontroller etcd-$cell --namespace=$VITESS_NAME
    +  $KUBECTL $KUBECTL_OPTIONS delete replicationcontroller etcd-$cell
     
       echo "Deleting etcd service for $cell cell..."
    -  $KUBECTL delete service etcd-$cell --namespace=$VITESS_NAME
    -  $KUBECTL delete service etcd-$cell-srv --namespace=$VITESS_NAME
    +  $KUBECTL $KUBECTL_OPTIONS delete service etcd-$cell
    +  $KUBECTL $KUBECTL_OPTIONS delete service etcd-$cell-srv
     done
     
    diff --git a/examples/kubernetes/etcd-up.sh b/examples/kubernetes/etcd-up.sh
    index 3fc1c0b9c11..38718dc5a11 100755
    --- a/examples/kubernetes/etcd-up.sh
    +++ b/examples/kubernetes/etcd-up.sh
    @@ -21,7 +21,7 @@ for cell in 'global' $cells; do
       echo "Creating etcd service for $cell cell..."
       cat etcd-service-template.yaml | \
         sed -e "s/{{cell}}/$cell/g" | \
    -    $KUBECTL create --namespace=$VITESS_NAME -f -
    +    $KUBECTL $KUBECTL_OPTIONS create -f -
     
       # Expand template variables
       sed_script=""
    @@ -31,6 +31,6 @@ for cell in 'global' $cells; do
     
       # Create the replication controller.
       echo "Creating etcd replicationcontroller for $cell cell..."
    -  cat etcd-controller-template.yaml | sed -e "$sed_script" | $KUBECTL create --namespace=$VITESS_NAME -f -
    +  cat etcd-controller-template.yaml | sed -e "$sed_script" | $KUBECTL $KUBECTL_OPTIONS create -f -
     done
     
    diff --git a/examples/kubernetes/guestbook-down.sh b/examples/kubernetes/guestbook-down.sh
    index 045229dee75..f7c41413abb 100755
    --- a/examples/kubernetes/guestbook-down.sh
    +++ b/examples/kubernetes/guestbook-down.sh
    @@ -6,7 +6,7 @@ script_root=`dirname "${BASH_SOURCE}"`
     source $script_root/env.sh
     
     echo "Stopping guestbook replicationcontroller..."
    -$KUBECTL delete replicationcontroller guestbook --namespace=$VITESS_NAME
    +$KUBECTL $KUBECTL_OPTIONS delete replicationcontroller guestbook
     
     echo "Deleting guestbook service..."
    -$KUBECTL delete service guestbook --namespace=$VITESS_NAME
    +$KUBECTL $KUBECTL_OPTIONS delete service guestbook
    diff --git a/examples/kubernetes/guestbook-up.sh b/examples/kubernetes/guestbook-up.sh
    index 3f77637ce43..cea846ce4d4 100755
    --- a/examples/kubernetes/guestbook-up.sh
    +++ b/examples/kubernetes/guestbook-up.sh
    @@ -13,7 +13,7 @@ script_root=`dirname "${BASH_SOURCE}"`
     source $script_root/env.sh
     
     echo "Creating guestbook service..."
    -$KUBECTL create --namespace=$VITESS_NAME -f guestbook-service.yaml
    +$KUBECTL $KUBECTL_OPTIONS create -f guestbook-service.yaml
     
     sed_script=""
     for var in port cell keyspace vtgate_port; do
    @@ -21,4 +21,4 @@ for var in port cell keyspace vtgate_port; do
     done
     
     echo "Creating guestbook replicationcontroller..."
    -sed -e "$sed_script" guestbook-controller-template.yaml | $KUBECTL create --namespace=$VITESS_NAME -f -
    +sed -e "$sed_script" guestbook-controller-template.yaml | $KUBECTL $KUBECTL_OPTIONS create -f -
    diff --git a/examples/kubernetes/namespace-down.sh b/examples/kubernetes/namespace-down.sh
    index e35c96e6868..6ffbf762a40 100755
    --- a/examples/kubernetes/namespace-down.sh
    +++ b/examples/kubernetes/namespace-down.sh
    @@ -10,4 +10,4 @@ source $script_root/env.sh
     namespace=${VITESS_NAME:-'vitess'}
     
     echo "Deleting namespace $namespace..."
    -$KUBECTL delete namespace $namespace
    +$KUBECTL $KUBECTL_OPTIONS delete namespace $namespace
    diff --git a/examples/kubernetes/namespace-up.sh b/examples/kubernetes/namespace-up.sh
    index 2ca34aedbe1..f585c3989fd 100755
    --- a/examples/kubernetes/namespace-up.sh
    +++ b/examples/kubernetes/namespace-up.sh
    @@ -14,5 +14,5 @@ sed_script=""
     for var in namespace; do
       sed_script+="s,{{$var}},${!var},g;"
     done
    -cat namespace-template.yaml | sed -e "$sed_script" | $KUBECTL create -f -
    +cat namespace-template.yaml | sed -e "$sed_script" | $KUBECTL $KUBECTL_OPTIONS create -f -
     
    diff --git a/examples/kubernetes/orchestrator-down.sh b/examples/kubernetes/orchestrator-down.sh
    index ddad5fb9140..957ddfd4a96 100755
    --- a/examples/kubernetes/orchestrator-down.sh
    +++ b/examples/kubernetes/orchestrator-down.sh
    @@ -8,10 +8,10 @@ script_root=`dirname "${BASH_SOURCE}"`
     source $script_root/env.sh
     
     echo "Stopping orchestrator replicationcontroller..."
    -$KUBECTL delete replicationcontroller orchestrator --namespace=$VITESS_NAME
    +$KUBECTL $KUBECTL_OPTIONS delete replicationcontroller orchestrator
     
     echo "Deleting orchestrator service..."
    -$KUBECTL delete service orchestrator --namespace=$VITESS_NAME
    +$KUBECTL $KUBECTL_OPTIONS delete service orchestrator
     
     echo "Deleting orchestrator configmap..."
    -$KUBECTL delete --namespace=$VITESS_NAME configmap orchestrator-conf
    +$KUBECTL $KUBECTL_OPTIONS delete configmap orchestrator-conf
    diff --git a/examples/kubernetes/orchestrator-up.sh b/examples/kubernetes/orchestrator-up.sh
    index 893209af3fe..cdfa4425f9d 100755
    --- a/examples/kubernetes/orchestrator-up.sh
    +++ b/examples/kubernetes/orchestrator-up.sh
    @@ -14,9 +14,9 @@ for var in service_type; do
     done
     
     # Create configmap from orchestrator docker config file
    -$KUBECTL create --namespace=$VITESS_NAME configmap orchestrator-conf --from-file="${script_root}/../../docker/orchestrator/orchestrator.conf.json"
    +$KUBECTL $KUBECTL_OPTIONS create configmap orchestrator-conf --from-file="${script_root}/../../docker/orchestrator/orchestrator.conf.json"
     
    -cat orchestrator-template.yaml | sed -e "$sed_script" | $KUBECTL create --namespace=$VITESS_NAME -f -
    +cat orchestrator-template.yaml | sed -e "$sed_script" | $KUBECTL $KUBECTL_OPTIONS create -f -
     
     echo
     echo "To access orchestrator web UI, start kubectl proxy in another terminal:"
    diff --git a/examples/kubernetes/sharded-vtworker.sh b/examples/kubernetes/sharded-vtworker.sh
    index 9da3c8a886c..9afa922eaa2 100755
    --- a/examples/kubernetes/sharded-vtworker.sh
    +++ b/examples/kubernetes/sharded-vtworker.sh
    @@ -19,29 +19,29 @@ done
     
     # Instantiate template and send to kubectl.
     echo "Creating vtworker pod in cell $cell..."
    -cat vtworker-pod-template.yaml | sed -e "$sed_script" | $KUBECTL create -f -
    +cat vtworker-pod-template.yaml | sed -e "$sed_script" | $KUBECTL $KUBECTL_OPTIONS create -f -
     
     set +e
     
     # Wait for vtworker pod to show up.
    -until [ $($KUBECTL get pod -o template --template '{{.status.phase}}' vtworker 2> /dev/null) = "Running" ]; do
    +until [ $($KUBECTL $KUBECTL_OPTIONS get pod -o template --template '{{.status.phase}}' vtworker 2> /dev/null) = "Running" ]; do
       echo "Waiting for vtworker pod to be created..."
     	sleep 1
     done
     
     echo "Following vtworker logs until termination..."
    -$KUBECTL logs -f vtworker
    +$KUBECTL $KUBECTL_OPTIONS logs -f vtworker
     
     # Get vtworker exit code. Wait for complete shutdown.
     # (Although logs -f exited, the pod isn't fully shutdown yet and the exit code is not available yet.)
    -until [ $($KUBECTL get pod -o template --template '{{.status.phase}}' vtworker 2> /dev/null) != "Running" ]; do
    +until [ $($KUBECTL $KUBECTL_OPTIONS get pod -o template --template '{{.status.phase}}' vtworker 2> /dev/null) != "Running" ]; do
       echo "Waiting for vtworker pod to shutdown completely..."
       sleep 1
     done
    -exit_code=$($KUBECTL get -o template --template '{{(index .status.containerStatuses 0).state.terminated.exitCode}}' pods vtworker)
    +exit_code=$($KUBECTL $KUBECTL_OPTIONS get -o template --template '{{(index .status.containerStatuses 0).state.terminated.exitCode}}' pods vtworker)
     
     echo "Deleting vtworker pod..."
    -$KUBECTL delete pod vtworker
    +$KUBECTL $KUBECTL_OPTIONS delete pod vtworker
     
     if [ "$exit_code" != "0" ]; then
       echo
    diff --git a/examples/kubernetes/vtctld-down.sh b/examples/kubernetes/vtctld-down.sh
    index 0b888ba8ff7..0191f6f295f 100755
    --- a/examples/kubernetes/vtctld-down.sh
    +++ b/examples/kubernetes/vtctld-down.sh
    @@ -8,7 +8,7 @@ script_root=`dirname "${BASH_SOURCE}"`
     source $script_root/env.sh
     
     echo "Stopping vtctld replicationcontroller..."
    -$KUBECTL delete replicationcontroller vtctld --namespace=$VITESS_NAME
    +$KUBECTL $KUBECTL_OPTIONS delete replicationcontroller vtctld
     
     echo "Deleting vtctld service..."
    -$KUBECTL delete service vtctld --namespace=$VITESS_NAME
    +$KUBECTL $KUBECTL_OPTIONS delete service vtctld
    diff --git a/examples/kubernetes/vtctld-up.sh b/examples/kubernetes/vtctld-up.sh
    index b5041d2efeb..306e367f2cb 100755
    --- a/examples/kubernetes/vtctld-up.sh
    +++ b/examples/kubernetes/vtctld-up.sh
    @@ -18,7 +18,7 @@ sed_script=""
     for var in service_type; do
       sed_script+="s,{{$var}},${!var},g;"
     done
    -cat vtctld-service-template.yaml | sed -e "$sed_script" | $KUBECTL create --namespace=$VITESS_NAME -f -
    +cat vtctld-service-template.yaml | sed -e "$sed_script" | $KUBECTL $KUBECTL_OPTIONS create -f -
     
     echo "Creating vtctld replicationcontroller..."
     # Expand template variables
    @@ -28,7 +28,7 @@ for var in vitess_image backup_flags test_flags cell; do
     done
     
     # Instantiate template and send to kubectl.
    -cat vtctld-controller-template.yaml | sed -e "$sed_script" | $KUBECTL create --namespace=$VITESS_NAME -f -
    +cat vtctld-controller-template.yaml | sed -e "$sed_script" | $KUBECTL $KUBECTL_OPTIONS create -f -
     
     echo
     echo "To access vtctld web UI, start kubectl proxy in another terminal:"
    diff --git a/examples/kubernetes/vtgate-down.sh b/examples/kubernetes/vtgate-down.sh
    index 26b87ac8380..fadd2a0073c 100755
    --- a/examples/kubernetes/vtgate-down.sh
    +++ b/examples/kubernetes/vtgate-down.sh
    @@ -11,9 +11,9 @@ cells=`echo $CELLS | tr ',' ' '`
     
     for cell in $cells; do
       echo "Stopping vtgate replicationcontroller in cell $cell..."
    -  $KUBECTL delete replicationcontroller vtgate-$cell --namespace=$VITESS_NAME
    +  $KUBECTL $KUBECTL_OPTIONS delete replicationcontroller vtgate-$cell
     
       echo "Deleting vtgate service in cell $cell..."
    -  $KUBECTL delete service vtgate-$cell --namespace=$VITESS_NAME
    +  $KUBECTL $KUBECTL_OPTIONS delete service vtgate-$cell
     done
     
    diff --git a/examples/kubernetes/vtgate-up.sh b/examples/kubernetes/vtgate-up.sh
    index 46965608398..c5a754dd7e1 100755
    --- a/examples/kubernetes/vtgate-up.sh
    +++ b/examples/kubernetes/vtgate-up.sh
    @@ -31,7 +31,7 @@ for cell in $cells; do
       sed_script+="s,{{mysql_server_port}},$mysql_server_port,g;"
     
       echo "Creating vtgate service in cell $cell..."
    -  cat vtgate-service-template.yaml | sed -e "$sed_script" | $KUBECTL create --namespace=$VITESS_NAME -f -
    +  cat vtgate-service-template.yaml | sed -e "$sed_script" | $KUBECTL $KUBECTL_OPTIONS create -f -
     
       sed_script=""
       for var in vitess_image replicas vtdataroot_volume cell mysql_server_port; do
    @@ -39,5 +39,5 @@ for cell in $cells; do
       done
     
       echo "Creating vtgate replicationcontroller in cell $cell..."
    -  cat $VTGATE_TEMPLATE | sed -e "$sed_script" | $KUBECTL create --namespace=$VITESS_NAME -f -
    +  cat $VTGATE_TEMPLATE | sed -e "$sed_script" | $KUBECTL $KUBECTL_OPTIONS create -f -
     done
    diff --git a/examples/kubernetes/vttablet-down.sh b/examples/kubernetes/vttablet-down.sh
    index 205730de21f..1ea3c6e48a1 100755
    --- a/examples/kubernetes/vttablet-down.sh
    +++ b/examples/kubernetes/vttablet-down.sh
    @@ -32,7 +32,7 @@ for shard in `seq 1 $num_shards`; do
           printf -v alias '%s-%010d' $cell $uid
     
           echo "Deleting pod for tablet $alias..."
    -      $KUBECTL delete pod vttablet-$uid --namespace=$VITESS_NAME
    +      $KUBECTL $KUBECTL_OPTIONS delete pod vttablet-$uid
         done
         let cell_index=cell_index+100000000
       done
    diff --git a/examples/kubernetes/vttablet-up.sh b/examples/kubernetes/vttablet-up.sh
    index 7cc84b5435c..22202996a86 100755
    --- a/examples/kubernetes/vttablet-up.sh
    +++ b/examples/kubernetes/vttablet-up.sh
    @@ -55,7 +55,7 @@ for shard in $(echo $SHARDS | tr "," " "); do
           done
     
           # Instantiate template and send to kubectl.
    -      cat $VTTABLET_TEMPLATE | sed -e "$sed_script" | $KUBECTL create --namespace=$VITESS_NAME -f -
    +      cat $VTTABLET_TEMPLATE | sed -e "$sed_script" | $KUBECTL $KUBECTL_OPTIONS create -f -
         done
         let cell_index=cell_index+100000000
       done
    
    From 176336a0701dbc8c616938f2da449068dde5f541 Mon Sep 17 00:00:00 2001
    From: Martin Fris 
    Date: Fri, 3 Mar 2017 07:32:24 +0100
    Subject: [PATCH 071/108] PHP client - ProtoUtils fix after last vterrors
     changes (#2621)
    
    ---
     php/src/Vitess/ProtoUtils.php | 31 ++++++++++++++++---------------
     1 file changed, 16 insertions(+), 15 deletions(-)
    
    diff --git a/php/src/Vitess/ProtoUtils.php b/php/src/Vitess/ProtoUtils.php
    index cea73a082ba..025344aaffb 100644
    --- a/php/src/Vitess/ProtoUtils.php
    +++ b/php/src/Vitess/ProtoUtils.php
    @@ -1,7 +1,8 @@
     getError();
             if ($error) {
                 switch ($error->getCode()) {
    -                case ErrorCode::OK:
    +                case Code::OK:
                         break;
    -                case ErrorCode::INVALID_ARGUMENT:
    +                case Code::INVALID_ARGUMENT:
                         throw new Error\BadInput($error->getMessage());
    -                case ErrorCode::DEADLINE_EXCEEDED:
    +                case Code::DEADLINE_EXCEEDED:
                         throw new Error\DeadlineExceeded($error->getMessage());
    -                case ErrorCode::ALREADY_EXISTS:
    +                case Code::ALREADY_EXISTS:
                         throw new Error\Integrity($error->getMessage());
    -                case ErrorCode::UNAVAILABLE:
    +                case Code::UNAVAILABLE:
                         throw new Error\Transient($error->getMessage());
    -                case ErrorCode::UNAUTHENTICATED:
    +                case Code::UNAUTHENTICATED:
                         throw new Error\Unauthenticated($error->getMessage());
    -                case ErrorCode::ABORTED:
    +                case Code::ABORTED:
                         throw new Error\Aborted($error->getMessage());
                     default:
                         throw new Exception($error->getCode() . ': ' . $error->getMessage());
                 }
                 switch ($error->getLegacyCode()) {
    -                case ErrorCode::SUCCESS_LEGACY:
    +                case LegacyErrorCode::SUCCESS_LEGACY:
                         break;
    -                case ErrorCode::BAD_INPUT_LEGACY:
    +                case LegacyErrorCode::BAD_INPUT_LEGACY:
                         throw new Error\BadInput($error->getMessage());
    -                case ErrorCode::DEADLINE_EXCEEDED_LEGACY:
    +                case LegacyErrorCode::DEADLINE_EXCEEDED_LEGACY:
                         throw new Error\DeadlineExceeded($error->getMessage());
    -                case ErrorCode::INTEGRITY_ERROR_LEGACY:
    +                case LegacyErrorCode::INTEGRITY_ERROR_LEGACY:
                         throw new Error\Integrity($error->getMessage());
    -                case ErrorCode::TRANSIENT_ERROR_LEGACY:
    +                case LegacyErrorCode::TRANSIENT_ERROR_LEGACY:
                         throw new Error\Transient($error->getMessage());
    -                case ErrorCode::UNAUTHENTICATED_LEGACY:
    +                case LegacyErrorCode::UNAUTHENTICATED_LEGACY:
                         throw new Error\Unauthenticated($error->getMessage());
    -                case ErrorCode::NOT_IN_TX_LEGACY:
    +                case LegacyErrorCode::NOT_IN_TX_LEGACY:
                         throw new Error\Aborted($error->getMessage());
                     default:
                         throw new Exception($error->getCode() . ': ' . $error->getMessage());
    
    From bcfaabb55d60995c9ea2be3f06e26d6d4e98d4e3 Mon Sep 17 00:00:00 2001
    From: Michael Berlin 
    Date: Thu, 2 Mar 2017 22:55:37 -0800
    Subject: [PATCH 072/108] java/client: Use immutable instead of interface type.
    
    This is recommended by the Guava docs:
    
    "For field types and method return types, you should generally use the immutable type (such as ImmutableList) instead of the general collection interface type (such as List). This communicates to your callers all of the semantic guarantees listed above, which is almost always very useful information."
    
    From: https://google.github.io/guava/releases/21.0/api/docs/com/google/common/collect/ImmutableCollection.html
    ---
     .../src/test/java/com/youtube/vitess/mysql/DateTimeTest.java    | 2 +-
     1 file changed, 1 insertion(+), 1 deletion(-)
    
    diff --git a/java/client/src/test/java/com/youtube/vitess/mysql/DateTimeTest.java b/java/client/src/test/java/com/youtube/vitess/mysql/DateTimeTest.java
    index caebcb6fad7..eda3f4affe7 100644
    --- a/java/client/src/test/java/com/youtube/vitess/mysql/DateTimeTest.java
    +++ b/java/client/src/test/java/com/youtube/vitess/mysql/DateTimeTest.java
    @@ -23,7 +23,7 @@ public class DateTimeTest {
       private static final Calendar PST = Calendar.getInstance(TimeZone.getTimeZone("GMT-8"));
       private static final Calendar IST = Calendar.getInstance(TimeZone.getTimeZone("GMT+0530"));
     
    -  private static final Map TEST_DATES =
    +  private static final ImmutableMap TEST_DATES =
           new ImmutableMap.Builder()
               .put("1970-01-01", new Date(0L))
               .put("2008-01-02", new Date(1199232000000L))
    
    From d24a3e51dd195c46be97dca33e66ede196fce750 Mon Sep 17 00:00:00 2001
    From: Michael Berlin 
    Date: Thu, 2 Mar 2017 22:56:42 -0800
    Subject: [PATCH 073/108] java/grpc-client: Remove unused import.
    
    ---
     .../test/java/com/youtube/vitess/client/grpc/GrpcClientTest.java | 1 -
     1 file changed, 1 deletion(-)
    
    diff --git a/java/grpc-client/src/test/java/com/youtube/vitess/client/grpc/GrpcClientTest.java b/java/grpc-client/src/test/java/com/youtube/vitess/client/grpc/GrpcClientTest.java
    index 4affd468e95..b46ea162c9a 100644
    --- a/java/grpc-client/src/test/java/com/youtube/vitess/client/grpc/GrpcClientTest.java
    +++ b/java/grpc-client/src/test/java/com/youtube/vitess/client/grpc/GrpcClientTest.java
    @@ -12,7 +12,6 @@
     import java.net.InetSocketAddress;
     import java.net.ServerSocket;
     import java.util.Arrays;
    -import java.util.concurrent.TimeUnit;
     
     /**
      * This tests GrpcClient with a mock vtgate server (go/cmd/vtgateclienttest).
    
    From b60da2fa062ba6ff82cfb4907098e59e8e344708 Mon Sep 17 00:00:00 2001
    From: Steve Perkins 
    Date: Fri, 3 Feb 2017 14:16:36 -0500
    Subject: [PATCH 074/108] Refactors JDBC driver to parse TLS options in
     VitessConnection class
    
    Fixes broken unit tests
    
    Bumps version of Java client artifacts from 1.0-SNAPSHOT to 1.1.0-SNAPSHOT
    
    Adds null check
    
    Removes redundant fields
    
    Removes duplicate method
    
    Removes redundant unit tests
    
    Fixes broken unit tests
    
    Restores missing test
    ---
     java/client/pom.xml                           |  2 +-
     java/example/pom.xml                          |  8 +-
     java/grpc-client/pom.xml                      |  6 +-
     java/hadoop/pom.xml                           |  8 +-
     java/jdbc/pom.xml                             |  6 +-
     .../vitess/jdbc/ConnectionProperties.java     | 84 ++++++++++++++++++-
     .../vitess/jdbc/VitessConnection.java         |  9 +-
     .../vitess/jdbc/VitessDatabaseMetaData.java   |  5 +-
     .../flipkart/vitess/jdbc/VitessDriver.java    |  7 +-
     .../flipkart/vitess/jdbc/VitessJDBCUrl.java   | 52 ------------
     .../vitess/jdbc/VitessVTGateManager.java      | 56 ++++++-------
     .../com/flipkart/vitess/util/Constants.java   |  9 +-
     .../vitess/jdbc/ConnectionPropertiesTest.java |  3 +-
     .../jdbc/VitessDatabaseMetadataTest.java      |  9 +-
     .../vitess/jdbc/VitessDriverTest.java         |  2 +-
     .../vitess/jdbc/VitessJDBCUrlTest.java        | 56 -------------
     .../vitess/jdbc/VitessVTGateManagerTest.java  | 16 ++--
     java/pom.xml                                  |  2 +-
     18 files changed, 160 insertions(+), 180 deletions(-)
    
    diff --git a/java/client/pom.xml b/java/client/pom.xml
    index e84540e7a45..242ba2904bb 100644
    --- a/java/client/pom.xml
    +++ b/java/client/pom.xml
    @@ -4,7 +4,7 @@
       
         com.youtube.vitess
         vitess-parent
    -    1.0-SNAPSHOT
    +    1.1.0-SNAPSHOT
         ../pom.xml
       
       client
    diff --git a/java/example/pom.xml b/java/example/pom.xml
    index a1c5b3c09ad..cc817157344 100644
    --- a/java/example/pom.xml
    +++ b/java/example/pom.xml
    @@ -5,7 +5,7 @@
       
         com.youtube.vitess
         vitess-parent
    -    1.0-SNAPSHOT
    +    1.1.0-SNAPSHOT
         ../pom.xml
       
       example
    @@ -14,17 +14,17 @@
         
           com.flipkart.vitess
           vitess-connector-java
    -      1.0-SNAPSHOT
    +      1.1.0-SNAPSHOT
         
         
           com.youtube.vitess
           client
    -      1.0-SNAPSHOT
    +      1.1.0-SNAPSHOT
         
         
           com.youtube.vitess
           grpc-client
    -      1.0-SNAPSHOT
    +      1.1.0-SNAPSHOT
         
         
           io.grpc
    diff --git a/java/grpc-client/pom.xml b/java/grpc-client/pom.xml
    index 0fccd13db61..906c8b24a59 100644
    --- a/java/grpc-client/pom.xml
    +++ b/java/grpc-client/pom.xml
    @@ -5,7 +5,7 @@
       
         com.youtube.vitess
         vitess-parent
    -    1.0-SNAPSHOT
    +    1.1.0-SNAPSHOT
         ../pom.xml
       
       grpc-client
    @@ -14,12 +14,12 @@
         
           com.youtube.vitess
           client
    -      1.0-SNAPSHOT
    +      1.1.0-SNAPSHOT
         
         
           com.youtube.vitess
           client
    -      1.0-SNAPSHOT
    +      1.1.0-SNAPSHOT
           test-jar
           test
         
    diff --git a/java/hadoop/pom.xml b/java/hadoop/pom.xml
    index c2d09ac2ac5..7fc1cc21b4c 100644
    --- a/java/hadoop/pom.xml
    +++ b/java/hadoop/pom.xml
    @@ -5,7 +5,7 @@
         
             vitess-parent
             com.youtube.vitess
    -        1.0-SNAPSHOT
    +        1.1.0-SNAPSHOT
         
         4.0.0
     
    @@ -15,19 +15,19 @@
           
             com.youtube.vitess
             client
    -        1.0-SNAPSHOT
    +        1.1.0-SNAPSHOT
           
           
             com.youtube.vitess
             client
    -        1.0-SNAPSHOT
    +        1.1.0-SNAPSHOT
             test-jar
             test
           
           
             com.youtube.vitess
             grpc-client
    -        1.0-SNAPSHOT
    +        1.1.0-SNAPSHOT
           
           
             org.apache.hadoop
    diff --git a/java/jdbc/pom.xml b/java/jdbc/pom.xml
    index 3eb80ec5f7d..38d835d7c23 100644
    --- a/java/jdbc/pom.xml
    +++ b/java/jdbc/pom.xml
    @@ -5,7 +5,7 @@
         
             vitess-parent
             com.youtube.vitess
    -        1.0-SNAPSHOT
    +        1.1.0-SNAPSHOT
             ../pom.xml
         
         4.0.0
    @@ -32,12 +32,12 @@
             
                 com.youtube.vitess
                 client
    -            1.0-SNAPSHOT
    +            1.1.0-SNAPSHOT
             
             
                 com.youtube.vitess
                 grpc-client
    -            1.0-SNAPSHOT
    +            1.1.0-SNAPSHOT
             
             
                 io.grpc
    diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java
    index 110bf4abe2a..b9300a9b6ad 100644
    --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java
    +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/ConnectionProperties.java
    @@ -93,6 +93,50 @@ public class ConnectionProperties {
             "Tablet Type to which Vitess will connect(master, replica, rdonly)",
             Constants.DEFAULT_TABLET_TYPE);
     
    +    // TLS-related configs
    +    private BooleanConnectionProperty useSSL = new BooleanConnectionProperty(
    +        Constants.Property.USE_SSL,
    +        "Whether this connection should use transport-layer security",
    +        false);
    +    private StringConnectionProperty keyStore = new StringConnectionProperty(
    +        Constants.Property.KEYSTORE,
    +        "The Java .JKS keystore file to use when TLS is enabled",
    +        null,
    +        null);
    +    private StringConnectionProperty keyStorePassword = new StringConnectionProperty(
    +        Constants.Property.KEYSTORE_PASSWORD,
    +        "The password protecting the keystore file (if a password is set)",
    +        null,
    +        null);
    +    private StringConnectionProperty keyAlias = new StringConnectionProperty(
    +        Constants.Property.KEY_ALIAS,
    +        "Alias under which the private key is stored in the keystore file (if not specified, then the "
    +            + "first valid `PrivateKeyEntry` will be used)",
    +        null,
    +        null);
    +    private StringConnectionProperty keyPassword = new StringConnectionProperty(
    +        Constants.Property.KEY_PASSWORD,
    +        "The additional password protecting the private key entry within the keystore file (if not "
    +                + "specified, then the logic will fallback to the keystore password and then to no password at all)",
    +        null,
    +        null);
    +    private StringConnectionProperty trustStore = new StringConnectionProperty(
    +        Constants.Property.TRUSTSTORE,
    +        "The Java .JKS truststore file to use when TLS is enabled",
    +        null,
    +        null);
    +    private StringConnectionProperty trustStorePassword = new StringConnectionProperty(
    +        Constants.Property.TRUSTSTORE_PASSWORD,
    +        "The password protecting the truststore file (if a password is set)",
    +        null,
    +        null);
    +    private StringConnectionProperty trustAlias = new StringConnectionProperty(
    +        Constants.Property.TRUST_ALIAS,
    +        "Alias under which the certficate chain is stored in the truststore file (if not specified, then "
    +                + "the first valid `X509Certificate` will be used)",
    +        null,
    +        null);
    +
         // Caching of some hot properties to avoid casting over and over
         private Topodata.TabletType tabletTypeCache;
         private Query.ExecuteOptions.IncludedFields includedFieldsCache;
    @@ -274,6 +318,38 @@ public void setTabletType(Topodata.TabletType tabletType) {
             this.tabletTypeCache = this.tabletType.getValueAsEnum();
         }
     
    +    public boolean getUseSSL() {
    +        return useSSL.getValueAsBoolean();
    +    }
    +
    +    public String getKeyStore() {
    +        return keyStore.getValueAsString();
    +    }
    +
    +    public String getKeyStorePassword() {
    +        return keyStorePassword.getValueAsString();
    +    }
    +
    +    public String getKeyAlias() {
    +        return keyAlias.getValueAsString();
    +    }
    +
    +    public String getKeyPassword() {
    +        return keyPassword.getValueAsString();
    +    }
    +
    +    public String getTrustStore() {
    +        return trustStore.getValueAsString();
    +    }
    +
    +    public String getTrustStorePassword() {
    +        return trustStorePassword.getValueAsString();
    +    }
    +
    +    public String getTrustAlias() {
    +        return trustAlias.getValueAsString();
    +    }
    +
         abstract static class ConnectionProperty {
     
             private final String name;
    @@ -369,10 +445,6 @@ void initializeFrom(String extractedValue) {
                 }
             }
     
    -        public String getValueAsString() {
    -            return (String) valueAsObject;
    -        }
    -
             @Override
             String[] getAllowableValues() {
                 return allowableValues;
    @@ -381,6 +453,10 @@ String[] getAllowableValues() {
             public void setValue(String value) {
                 this.valueAsObject = value;
             }
    +
    +        String getValueAsString() {
    +            return valueAsObject == null ? null : valueAsObject.toString();
    +        }
         }
     
         private static class EnumConnectionProperty> extends ConnectionProperty {
    diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessConnection.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessConnection.java
    index c17e151f06a..9f2814f11a7 100644
    --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessConnection.java
    +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessConnection.java
    @@ -69,7 +69,6 @@ public class VitessConnection extends ConnectionProperties implements Connection
         public VitessConnection(String url, Properties connectionProperties) throws SQLException {
             try {
                 this.vitessJDBCUrl = new VitessJDBCUrl(url, connectionProperties);
    -            this.vTGateConnections = new VitessVTGateManager.VTGateConnections(vitessJDBCUrl);
                 this.closed = false;
                 this.dbProperties = null;
             } catch (Exception e) {
    @@ -80,6 +79,10 @@ public VitessConnection(String url, Properties connectionProperties) throws SQLE
             initializeProperties(vitessJDBCUrl.getProperties());
         }
     
    +    public void connect() {
    +        this.vTGateConnections = new VitessVTGateManager.VTGateConnections(this);
    +    }
    +
         /**
          * Creates statement for the given connection
          *
    @@ -585,8 +588,8 @@ public void setVtGateTx(VTGateTx vtGateTx) {
             this.vtGateTx = vtGateTx;
         }
     
    -    public String getUrl() {
    -        return this.vitessJDBCUrl.getUrl();
    +    public VitessJDBCUrl getUrl() {
    +        return this.vitessJDBCUrl;
         }
     
         /**
    diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessDatabaseMetaData.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessDatabaseMetaData.java
    index 98ac21c0d03..7918f92f0b0 100644
    --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessDatabaseMetaData.java
    +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessDatabaseMetaData.java
    @@ -26,7 +26,10 @@ public abstract class VitessDatabaseMetaData implements DatabaseMetaData {
         protected VitessConnection connection = null;
     
         public String getURL() throws SQLException {
    -        return this.connection.getUrl();
    +        if (this.connection == null || this.connection.getUrl() == null) {
    +            return null;
    +        }
    +        return this.connection.getUrl().getUrl();
         }
     
         public String getUserName() throws SQLException {
    diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessDriver.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessDriver.java
    index c1439da98e8..40c65b9d050 100644
    --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessDriver.java
    +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessDriver.java
    @@ -31,7 +31,12 @@ public class VitessDriver implements Driver {
     
         @Override
         public Connection connect(String url, Properties info) throws SQLException {
    -        return acceptsURL(url) ? new VitessConnection(url, info) : null;
    +        if (!acceptsURL(url)) {
    +            return null;
    +        }
    +        final VitessConnection connection = new VitessConnection(url, info);
    +        connection.connect();
    +        return connection;
         }
     
         /**
    diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessJDBCUrl.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessJDBCUrl.java
    index 433a5db226c..b8a609f7208 100644
    --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessJDBCUrl.java
    +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessJDBCUrl.java
    @@ -36,16 +36,6 @@ public class VitessJDBCUrl {
         private final Properties info;
         private String catalog;
     
    -    private final boolean useSSL;
    -    private final String keyStore;
    -    private final String keyStorePassword;
    -    private final String keyAlias;
    -    private final String keyPassword;
    -    private final String trustStore;
    -    private final String trustStorePassword;
    -    private final String trustAlias;
    -
    -
         /*
          Assuming List of vtGate ips could be given in url, separated by ","
         */
    @@ -152,16 +142,6 @@ public VitessJDBCUrl(String url, Properties info) throws SQLException {
             this.catalog =
                 StringUtils.isNullOrEmptyWithoutWS(m.group(10)) ? this.keyspace : m.group(10);
             this.hostInfos = getURLHostInfos(postUrl);
    -
    -        this.useSSL = "true".equalsIgnoreCase(caseInsensitiveKeyLookup(info, Constants.Property.USE_SSL));
    -        this.keyStore = caseInsensitiveKeyLookup(info, Constants.Property.KEYSTORE);
    -        this.keyStorePassword = caseInsensitiveKeyLookup(info, Constants.Property.KEYSTORE_PASSWORD);
    -        this.keyAlias = caseInsensitiveKeyLookup(info, Constants.Property.KEY_ALIAS);
    -        this.keyPassword = caseInsensitiveKeyLookup(info, Constants.Property.KEY_PASSWORD);
    -        this.trustStore = caseInsensitiveKeyLookup(info, Constants.Property.TRUSTSTORE);
    -        this.trustStorePassword = caseInsensitiveKeyLookup(info, Constants.Property.TRUSTSTORE_PASSWORD);
    -        this.trustAlias = caseInsensitiveKeyLookup(info, Constants.Property.TRUSTSTORE_ALIAS);
    -
             this.url = url;
             this.info = info;
         }
    @@ -190,38 +170,6 @@ public void setCatalog(String catalog) {
             this.catalog = catalog;
         }
     
    -    public boolean isUseSSL() {
    -        return useSSL;
    -    }
    -
    -    public String getKeyStore() {
    -        return keyStore;
    -    }
    -
    -    public String getKeyStorePassword() {
    -        return keyStorePassword;
    -    }
    -
    -    public String getKeyAlias() {
    -        return keyAlias;
    -    }
    -
    -    public String getKeyPassword() {
    -        return keyPassword;
    -    }
    -
    -    public String getTrustStore() {
    -        return trustStore;
    -    }
    -
    -    public String getTrustStorePassword() {
    -        return trustStorePassword;
    -    }
    -
    -    public String getTrustAlias() {
    -        return trustAlias;
    -    }
    -
         /**
          * Get Properties object for params after ? in url.
          *
    diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessVTGateManager.java b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessVTGateManager.java
    index 3da0dd4ce41..a38c9529c87 100644
    --- a/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessVTGateManager.java
    +++ b/java/jdbc/src/main/java/com/flipkart/vitess/jdbc/VitessVTGateManager.java
    @@ -37,16 +37,14 @@ public static class VTGateConnections {
             /**
              * Constructor
              *
    -         * @param vitessJDBCUrl
    +         * @param connection
              */
    -        public VTGateConnections(VitessJDBCUrl vitessJDBCUrl) {
    -            for (VitessJDBCUrl.HostInfo hostInfo : vitessJDBCUrl.getHostInfos()) {
    -                String identifier = getIdentifer(hostInfo.getHostname(), hostInfo.getPort(),
    -                    vitessJDBCUrl.getUsername());
    +        public VTGateConnections(VitessConnection connection) {
    +            for (VitessJDBCUrl.HostInfo hostInfo : connection.getUrl().getHostInfos()) {
    +                String identifier = getIdentifer(hostInfo.getHostname(), hostInfo.getPort(), connection.getUsername());
                     synchronized (VitessVTGateManager.class) {
                         if (!vtGateConnHashMap.containsKey(identifier)) {
    -                        updateVtGateConnHashMap(identifier, hostInfo.getHostname(),
    -                            hostInfo.getPort(), vitessJDBCUrl);
    +                        updateVtGateConnHashMap(identifier, hostInfo.getHostname(), hostInfo.getPort(), connection);
                         }
                     }
                     vtGateIdentifiers.add(identifier);
    @@ -78,11 +76,11 @@ private static String getIdentifer(String hostname, int port, String userIdentif
          * @param identifier
          * @param hostname
          * @param port
    -     * @param jdbcUrl
    +     * @param connection
          */
         private static void updateVtGateConnHashMap(String identifier, String hostname, int port,
    -                                                VitessJDBCUrl jdbcUrl) {
    -        vtGateConnHashMap.put(identifier, getVtGateConn(hostname, port, jdbcUrl));
    +                                                VitessConnection connection) {
    +        vtGateConnHashMap.put(identifier, getVtGateConn(hostname, port, connection));
         }
     
         /**
    @@ -105,30 +103,30 @@ private static VTGateConn getVtGateConn(String hostname, int port, String userna
          *
          * @param hostname
          * @param port
    -     * @param jdbcUrl
    +     * @param connection
          * @return
          */
    -    private static VTGateConn getVtGateConn(String hostname, int port, VitessJDBCUrl jdbcUrl) {
    -        final String username = jdbcUrl.getUsername();
    -        final String keyspace = jdbcUrl.getKeyspace();
    +    private static VTGateConn getVtGateConn(String hostname, int port, VitessConnection connection) {
    +        final String username = connection.getUsername();
    +        final String keyspace = connection.getKeyspace();
             final Context context = CommonUtils.createContext(username, Constants.CONNECTION_TIMEOUT);
             final InetSocketAddress inetSocketAddress = new InetSocketAddress(hostname, port);
             RpcClient client;
    -        if (jdbcUrl.isUseSSL()) {
    -            final String keyStorePath = jdbcUrl.getKeyStore() != null
    -                    ? jdbcUrl.getKeyStore() : System.getProperty(Constants.Property.KEYSTORE_FULL);
    -            final String keyStorePassword = jdbcUrl.getKeyStorePassword() != null
    -                    ? jdbcUrl.getKeyStorePassword() : System.getProperty(Constants.Property.KEYSTORE_PASSWORD_FULL);
    -            final String keyAlias = jdbcUrl.getKeyAlias() != null
    -                    ? jdbcUrl.getKeyAlias() : System.getProperty(Constants.Property.KEY_ALIAS_FULL);
    -            final String keyPassword = jdbcUrl.getKeyPassword() != null
    -                    ? jdbcUrl.getKeyPassword() : System.getProperty(Constants.Property.KEY_PASSWORD_FULL);
    -            final String trustStorePath = jdbcUrl.getTrustStore() != null
    -                    ? jdbcUrl.getTrustStore() : System.getProperty(Constants.Property.TRUSTSTORE_FULL);
    -            final String trustStorePassword = jdbcUrl.getTrustStorePassword() != null
    -                    ? jdbcUrl.getTrustStorePassword() : System.getProperty(Constants.Property.TRUSTSTORE_PASSWORD_FULL);
    -            final String trustAlias = jdbcUrl.getTrustAlias() != null
    -                    ? jdbcUrl.getTrustAlias() : System.getProperty(Constants.Property.TRUSTSTORE_ALIAS_FULL);
    +        if (connection.getUseSSL()) {
    +            final String keyStorePath = connection.getKeyStore() != null
    +                    ? connection.getKeyStore() : System.getProperty(Constants.Property.KEYSTORE_FULL);
    +            final String keyStorePassword = connection.getKeyStorePassword() != null
    +                    ? connection.getKeyStorePassword() : System.getProperty(Constants.Property.KEYSTORE_PASSWORD_FULL);
    +            final String keyAlias = connection.getKeyAlias() != null
    +                    ? connection.getKeyAlias() : System.getProperty(Constants.Property.KEY_ALIAS_FULL);
    +            final String keyPassword = connection.getKeyPassword() != null
    +                    ? connection.getKeyPassword() : System.getProperty(Constants.Property.KEY_PASSWORD_FULL);
    +            final String trustStorePath = connection.getTrustStore() != null
    +                    ? connection.getTrustStore() : System.getProperty(Constants.Property.TRUSTSTORE_FULL);
    +            final String trustStorePassword = connection.getTrustStorePassword() != null
    +                    ? connection.getTrustStorePassword() : System.getProperty(Constants.Property.TRUSTSTORE_PASSWORD_FULL);
    +            final String trustAlias = connection.getTrustAlias() != null
    +                    ? connection.getTrustAlias() : System.getProperty(Constants.Property.TRUST_ALIAS_FULL);
     
                 final TlsOptions tlsOptions = new TlsOptions()
                         .keyStorePath(keyStorePath)
    diff --git a/java/jdbc/src/main/java/com/flipkart/vitess/util/Constants.java b/java/jdbc/src/main/java/com/flipkart/vitess/util/Constants.java
    index 747af4e9fbd..8272402e7a3 100644
    --- a/java/jdbc/src/main/java/com/flipkart/vitess/util/Constants.java
    +++ b/java/jdbc/src/main/java/com/flipkart/vitess/util/Constants.java
    @@ -105,15 +105,14 @@ public static final class Property {
             public static final String EXECUTE_TYPE = "executeType";
             public static final String TWOPC_ENABLED = "twopcEnabled";
     
    -	public static final String USE_SSL = "useSSL";
    -
    -	public static final String KEYSTORE = "keyStore";
    +        public static final String USE_SSL = "useSSL";
    +        public static final String KEYSTORE = "keyStore";
             public static final String KEYSTORE_PASSWORD = "keyStorePassword";
             public static final String KEY_ALIAS = "keyAlias";
             public static final String KEY_PASSWORD = "keyPassword";
             public static final String TRUSTSTORE = "trustStore";
             public static final String TRUSTSTORE_PASSWORD = "trustStorePassword";
    -        public static final String TRUSTSTORE_ALIAS = "trustAlias";
    +        public static final String TRUST_ALIAS = "trustAlias";
     
             public static final String KEYSTORE_FULL = "javax.net.ssl.keyStore";
             public static final String KEYSTORE_PASSWORD_FULL = "javax.net.ssl.keyStorePassword";
    @@ -121,7 +120,7 @@ public static final class Property {
             public static final String KEY_PASSWORD_FULL = "javax.net.ssl.keyPassword";
             public static final String TRUSTSTORE_FULL = "javax.net.ssl.trustStore";
             public static final String TRUSTSTORE_PASSWORD_FULL = "javax.net.ssl.trustStorePassword";
    -        public static final String TRUSTSTORE_ALIAS_FULL = "javax.net.ssl.trustAlias";
    +        public static final String TRUST_ALIAS_FULL = "javax.net.ssl.trustAlias";
             public static final String INCLUDED_FIELDS = "includedFields";
         }
     
    diff --git a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/ConnectionPropertiesTest.java b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/ConnectionPropertiesTest.java
    index 57000bd32ad..b13be772967 100644
    --- a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/ConnectionPropertiesTest.java
    +++ b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/ConnectionPropertiesTest.java
    @@ -15,7 +15,7 @@
     
     public class ConnectionPropertiesTest {
     
    -    private static final int NUM_PROPS = 12;
    +    private static final int NUM_PROPS = 20;
     
         @Test
         public void testReflection() throws Exception {
    @@ -49,6 +49,7 @@ public void testDefaults() throws SQLException {
             Assert.assertEquals("includedFields", Constants.DEFAULT_INCLUDED_FIELDS, props.getIncludedFields());
             Assert.assertEquals("includedFieldsCache", true, props.isIncludeAllFields());
             Assert.assertEquals("tabletType", Constants.DEFAULT_TABLET_TYPE, props.getTabletType());
    +        Assert.assertEquals("useSSL", false, props.getUseSSL());
         }
     
         @Test
    diff --git a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessDatabaseMetadataTest.java b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessDatabaseMetadataTest.java
    index b7438025fc7..6f3bf170d85 100644
    --- a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessDatabaseMetadataTest.java
    +++ b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessDatabaseMetadataTest.java
    @@ -745,11 +745,14 @@
         }
     
         @Test public void getUrlTest() throws SQLException {
    -        VitessConnection mockConn = PowerMockito.mock(VitessConnection.class);
             String connectionUrl = "jdbc:vitess://://";
    -        PowerMockito.when(mockConn.getUrl()).thenReturn(connectionUrl);
    -        Assert.assertEquals(connectionUrl, mockConn.getUrl());
    +        VitessJDBCUrl mockUrl = PowerMockito.mock(VitessJDBCUrl.class);
    +        PowerMockito.when(mockUrl.getUrl()).thenReturn(connectionUrl);
    +
    +        VitessConnection mockConn = PowerMockito.mock(VitessConnection.class);
    +        PowerMockito.when(mockConn.getUrl()).thenReturn(mockUrl);
     
    +        Assert.assertEquals(connectionUrl, mockConn.getUrl().getUrl());
         }
     
         @Test public void isReadOnlyTest() throws SQLException {
    diff --git a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessDriverTest.java b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessDriverTest.java
    index 10d56baea0a..f5a5bf4f16a 100644
    --- a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessDriverTest.java
    +++ b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessDriverTest.java
    @@ -35,7 +35,7 @@ public class VitessDriverTest {
             try {
                 VitessConnection connection =
                     (VitessConnection) DriverManager.getConnection(dbURL, new Properties());
    -            Assert.assertEquals(connection.getUrl(), dbURL);
    +            Assert.assertEquals(connection.getUrl().getUrl(), dbURL);
             } catch (SQLException e) {
                 Assert.fail("SQLException Not Expected");
             }
    diff --git a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessJDBCUrlTest.java b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessJDBCUrlTest.java
    index 775a5a927ef..d2fd93192bc 100644
    --- a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessJDBCUrlTest.java
    +++ b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessJDBCUrlTest.java
    @@ -175,61 +175,5 @@ public class VitessJDBCUrlTest {
             Assert.assertEquals("val4", vitessJDBCUrl.getProperties().getProperty("prop2"));
             Assert.assertEquals("val3", vitessJDBCUrl.getProperties().getProperty("prop3"));
         }
    -    @Test public void testSSLParamSet() throws SQLException {
    -        Properties info = new Properties();
    -        VitessJDBCUrl vitessJDBCUrl = new VitessJDBCUrl(
    -                "jdbc:vitess://hostname:15991/keyspace?useSSL=true&keyStore=/tmp/keystore.jks&keyStorePassword=abc123&keyAlias=privateKey&keyPassword=def456&trustStore=/tmp/truststore.jks&trustAlias=issuingCA&trustStorePassword=ghi789",
    -                info
    -        );
    -        Assert.assertTrue(vitessJDBCUrl.isUseSSL());
    -        Assert.assertEquals("/tmp/keystore.jks", vitessJDBCUrl.getKeyStore());
    -        Assert.assertEquals("abc123", vitessJDBCUrl.getKeyStorePassword());
    -        Assert.assertEquals("privateKey", vitessJDBCUrl.getKeyAlias());
    -        Assert.assertEquals("def456", vitessJDBCUrl.getKeyPassword());
    -        Assert.assertEquals("/tmp/truststore.jks", vitessJDBCUrl.getTrustStore());
    -        Assert.assertEquals("ghi789", vitessJDBCUrl.getTrustStorePassword());
    -        Assert.assertEquals("issuingCA", vitessJDBCUrl.getTrustAlias());
    -    }
    -
    -    /**
    -     * 

    Validate that the SSL-related optional parameters can have case-insensitive KEYS (e.g. "useSSL", or just - * plain "usessl"). Also validate that for the "useSSL" parameter, any case-insensitive match for "true" will - * be recognized as true.

    - * - *

    However, all of the other VALUES (e.g. filenames and passwords) obviously must have their case preserved.

    - * - * @throws SQLException - */ - @Test public void testSSLParamCaseInsensitiveSet() throws SQLException { - Properties info = new Properties(); - VitessJDBCUrl vitessJDBCUrl = new VitessJDBCUrl( - "jdbc:vitess://hostname:15991/keyspace?UsEssL=tRuE&keYstOre=/tmp/keystore.jks&kEysTorEpaSsWord=abc123&KeYAliaS=privateKey&kEypAssWord=def456&TRUSTSTORE=/tmp/truststore.jks&trUStaLiAs=issuingCA&truststorepassword=ghi789", - info - ); - Assert.assertTrue(vitessJDBCUrl.isUseSSL()); - Assert.assertEquals("/tmp/keystore.jks", vitessJDBCUrl.getKeyStore()); - Assert.assertEquals("abc123", vitessJDBCUrl.getKeyStorePassword()); - Assert.assertEquals("privateKey", vitessJDBCUrl.getKeyAlias()); - Assert.assertEquals("def456", vitessJDBCUrl.getKeyPassword()); - Assert.assertEquals("/tmp/truststore.jks", vitessJDBCUrl.getTrustStore()); - Assert.assertEquals("ghi789", vitessJDBCUrl.getTrustStorePassword()); - Assert.assertEquals("issuingCA", vitessJDBCUrl.getTrustAlias()); - } - - @Test public void testSSLParamUnset() throws SQLException { - Properties info = new Properties(); - VitessJDBCUrl vitessJDBCUrl = new VitessJDBCUrl( - "jdbc:vitess://hostname:15991/keyspace", - info - ); - Assert.assertFalse(vitessJDBCUrl.isUseSSL()); - Assert.assertNull(vitessJDBCUrl.getKeyStore()); - Assert.assertNull(vitessJDBCUrl.getKeyStorePassword()); - Assert.assertNull(vitessJDBCUrl.getKeyAlias()); - Assert.assertNull(vitessJDBCUrl.getKeyPassword()); - Assert.assertNull(vitessJDBCUrl.getTrustStore()); - Assert.assertNull(vitessJDBCUrl.getTrustStorePassword()); - Assert.assertNull(vitessJDBCUrl.getTrustAlias()); - } } diff --git a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessVTGateManagerTest.java b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessVTGateManagerTest.java index 9811d54c336..5ba508a7a1c 100644 --- a/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessVTGateManagerTest.java +++ b/java/jdbc/src/test/java/com/flipkart/vitess/jdbc/VitessVTGateManagerTest.java @@ -36,18 +36,18 @@ public VTGateConn getVtGateConn() { VitessVTGateManager.close(); Properties info = new Properties(); info.setProperty("username", "user"); - VitessJDBCUrl vitessJDBCUrl = new VitessJDBCUrl( - "jdbc:vitess://10.33.17.231:15991:xyz,10.33.17.232:15991:xyz,10.33.17" - + ".233:15991/shipment/shipment?tabletType=master", info); + VitessConnection connection = new VitessConnection( + "jdbc:vitess://10.33.17.231:15991:xyz,10.33.17.232:15991:xyz,10.33.17" + + ".233:15991/shipment/shipment?tabletType=master", info); VitessVTGateManager.VTGateConnections vtGateConnections = - new VitessVTGateManager.VTGateConnections(vitessJDBCUrl); + new VitessVTGateManager.VTGateConnections(connection); info.setProperty("username", "user"); - VitessJDBCUrl vitessJDBCUrl1 = new VitessJDBCUrl( + VitessConnection connection1 = new VitessConnection( "jdbc:vitess://10.33.17.231:15991:xyz,10.33.17.232:15991:xyz,11.33.17" + ".233:15991/shipment/shipment?tabletType=master", info); VitessVTGateManager.VTGateConnections vtGateConnections1 = - new VitessVTGateManager.VTGateConnections(vitessJDBCUrl1); + new VitessVTGateManager.VTGateConnections(connection1); Field privateMapField = VitessVTGateManager.class. getDeclaredField("vtGateConnHashMap"); @@ -63,11 +63,11 @@ public VTGateConn getVtGateConn() { VitessVTGateManager.close(); Properties info = new Properties(); info.setProperty("username", "user"); - VitessJDBCUrl vitessJDBCUrl = new VitessJDBCUrl( + VitessConnection connection = new VitessConnection( "jdbc:vitess://10.33.17.231:15991:xyz,10.33.17.232:15991:xyz,10.33.17" + ".233:15991/shipment/shipment?tabletType=master", info); VitessVTGateManager.VTGateConnections vtGateConnections = - new VitessVTGateManager.VTGateConnections(vitessJDBCUrl); + new VitessVTGateManager.VTGateConnections(connection); Assert.assertEquals(vtGateConnections.getVtGateConnInstance() instanceof VTGateConn, true); VTGateConn vtGateConn = vtGateConnections.getVtGateConnInstance(); Field privateMapField = VitessVTGateManager.class. diff --git a/java/pom.xml b/java/pom.xml index 8a74c12766d..81c241747fd 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -5,7 +5,7 @@ 4.0.0 com.youtube.vitess vitess-parent - 1.0-SNAPSHOT + 1.1.0-SNAPSHOT pom https://github.com/youtube/vitess/ Umbrella project for all Java activities for Vitess From e2f97f875ba3430f9662c4e7330fcab09393cb2c Mon Sep 17 00:00:00 2001 From: Bryan Beaudreault Date: Fri, 3 Mar 2017 21:38:52 -0500 Subject: [PATCH 075/108] WIP -- Working to support migration-scoped queries from VTGate (#2606) * Reload schemas on all DDLs. Support table qualifiers in DDL queries. * For now only support table_name in non-create because the lookahead conflicts with force_eof * create tables cant have a row count, so no point checking. This avoids an NPE on Table.Name, since creates are the only one to only have a NewName * Make table_name work for CREATE and VIEWs. Fix alter view test, the syntax in the test failed to parse and was incorrect according to docs * This test does not apply since the removal of TableWasDropped * If we're in a transaction, don't start a new one * Reload schema on commit, if necessary * recycle connection when using transaction * utilize existing autocommit function * simplify * unneeded function * If an transaction is open, we should commit it before running the DDL -- DDL's have an implicit commit. * implement and use BeginAgain when we implicit commit a running transaction due to DDL * Move BeginAgain to TxConnection * dont lowercase qualifier --- data/test/tabletserver/ddl_cases.txt | 27 +- data/test/vtgate/unsupported_cases.txt | 9 + go/vt/schemamanager/tablet_executor.go | 5 +- go/vt/sqlparser/ast.go | 14 +- go/vt/sqlparser/sql.go | 1054 +++++++++-------- go/vt/sqlparser/sql.y | 45 +- .../engines/schema/schema_engine.go | 13 - .../engines/schema/schema_engine_test.go | 39 - go/vt/tabletserver/planbuilder/ddl.go | 6 +- go/vt/tabletserver/planbuilder/plan_test.go | 4 +- go/vt/tabletserver/query_executor.go | 36 +- go/vt/tabletserver/tx_pool.go | 11 + 12 files changed, 664 insertions(+), 599 deletions(-) diff --git a/data/test/tabletserver/ddl_cases.txt b/data/test/tabletserver/ddl_cases.txt index e492b439cdb..62587692996 100644 --- a/data/test/tabletserver/ddl_cases.txt +++ b/data/test/tabletserver/ddl_cases.txt @@ -8,6 +8,11 @@ "Action": "drop", "TableName": "b" } +"drop table b.c" +{ + "Action": "drop", "TableName": "b.c" +} + "alter table c alter foo" { "Action": "alter", "TableName": "c", "NewTable": "c" @@ -18,16 +23,31 @@ "Action": "alter", "TableName": "c", "NewTable": "c" } +"alter table b.c comment 'aa'" +{ + "Action": "alter", "TableName": "b.c", "NewTable": "b.c" +} + "drop index a on b" { "Action": "alter", "TableName": "b", "NewName": "b" } +"drop index a on b.c" +{ + "Action": "alter", "TableName": "b.c", "NewName": "b.c" +} + "rename table a to b" { "Action": "rename", "TableName": "a", "NewTable": "b" } +"rename table c.a to c.b" +{ + "Action": "rename", "TableName": "c.a", "NewTable": "c.b" +} + "alter table a rename b" { "Action": "rename", "TableName": "a", "NewTable": "b" @@ -38,12 +58,17 @@ "Action": "rename", "TableName": "a", "NewTable": "b" } +"alter table c.a rename to c.b" +{ + "Action": "rename", "TableName": "c.a", "NewTable": "c.b" +} + "create view a asdasd" { "Action": "create", "NewName": "a" } -"alter view c alter foo" +"alter view c as foo" { "Action": "alter", "TableName": "c", "NewTable": "c" } diff --git a/data/test/vtgate/unsupported_cases.txt b/data/test/vtgate/unsupported_cases.txt index 34b5c87dc63..c9dfa90e36c 100644 --- a/data/test/vtgate/unsupported_cases.txt +++ b/data/test/vtgate/unsupported_cases.txt @@ -10,6 +10,15 @@ "create table a(id int)" "unsupported construct" +"create table a.b(id int)" +"unsupported construct" + +"alter table a ADD id int" +"unsupported construct" + +"alter table a.b ADD id int" +"unsupported construct" + # DBA statements "explain select * from user" "unsupported construct" diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index eb0f9e91092..1705feee24a 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -140,10 +140,11 @@ func (exec *TabletExecutor) detectBigSchemaChanges(ctx context.Context, parsedDD tableWithCount[tableSchema.Name] = tableSchema.RowCount } for _, ddl := range parsedDDLs { - if ddl.Action == sqlparser.DropStr { + switch ddl.Action { + case sqlparser.DropStr, sqlparser.CreateStr: continue } - tableName := ddl.Table.String() + tableName := ddl.Table.Name.String() if rowCount, ok := tableWithCount[tableName]; ok { if rowCount > 100000 && ddl.Action == sqlparser.AlterStr { return true, fmt.Errorf( diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go index 75424c310fa..33da94ff4c9 100644 --- a/go/vt/sqlparser/ast.go +++ b/go/vt/sqlparser/ast.go @@ -392,8 +392,8 @@ func (node *Set) WalkSubtree(visit Visit) error { // NewName is set for AlterStr, CreateStr, RenameStr. type DDL struct { Action string - Table TableIdent - NewName TableIdent + Table *TableName + NewName *TableName IfExists bool } @@ -725,6 +725,16 @@ func (node *TableName) Equal(t *TableName) bool { return node.Name == t.Name && node.Qualifier == t.Qualifier } +// ToViewName returns a TableName acceptable for use as a VIEW. VIEW names are +// always lowercase, so ToViewName lowercasese the name. Databases are case-sensitive +// so Qualifier is left untouched. +func (node *TableName) ToViewName() *TableName { + return &TableName{ + Qualifier: node.Qualifier, + Name: NewTableIdent(strings.ToLower(node.Name.v)), + } +} + // ParenTableExpr represents a parenthesized list of TableExpr. type ParenTableExpr struct { Exprs TableExprs diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go index d20462ad18c..db61dff4665 100644 --- a/go/vt/sqlparser/sql.go +++ b/go/vt/sqlparser/sql.go @@ -363,333 +363,350 @@ var yyExca = [...]int{ -1, 85, 103, 307, -2, 303, - -1, 292, + -1, 295, 103, 309, -2, 305, } -const yyNprod = 410 +const yyNprod = 413 const yyPrivate = 57344 var yyTokenNames []string var yyStates []string -const yyLast = 2931 +const yyLast = 3133 var yyAct = [...]int{ - 421, 429, 561, 243, 594, 79, 292, 523, 475, 532, - 472, 593, 417, 328, 374, 267, 503, 487, 311, 501, - 441, 233, 290, 309, 242, 502, 517, 408, 49, 100, - 240, 414, 28, 61, 685, 385, 474, 678, 684, 671, - 81, 683, 677, 597, 638, 266, 296, 670, 373, 3, - 37, 96, 39, 87, 50, 51, 40, 43, 660, 342, - 341, 351, 352, 344, 345, 346, 347, 348, 349, 350, - 343, 80, 403, 353, 138, 124, 327, 465, 52, 466, - 62, 326, 83, 465, 325, 466, 14, 88, 92, 465, - 94, 466, 48, 106, 99, 302, 42, 72, 43, 142, - 144, 45, 46, 47, 44, 62, 67, 111, 576, 300, - 573, 81, 114, 140, 541, 74, 71, 661, 121, 81, - 148, 109, 123, 245, 62, 467, 289, 353, 332, 331, - 138, 467, 118, 63, 304, 236, 146, 467, 122, 139, - 81, 125, 321, 669, 424, 333, 468, 293, 630, 633, - 412, 62, 468, 83, 398, 666, 111, 307, 468, 147, - 372, 83, 324, 147, 363, 364, 308, 143, 117, 113, - 305, 343, 673, 306, 353, 312, 333, 116, 600, 488, - 119, 542, 83, 488, 320, 549, 103, 235, 299, 301, - 298, 667, 448, 76, 543, 346, 347, 348, 349, 350, - 343, 72, 66, 353, 69, 265, 446, 447, 445, 648, - 67, 652, 138, 120, 332, 331, 97, 331, 423, 74, - 71, 65, 444, 70, 77, 73, 68, 75, 303, 332, - 331, 333, 145, 333, 657, 423, 602, 90, 332, 331, - 585, 586, 82, 559, 423, 89, 333, 115, 93, 435, - 437, 438, 477, 330, 436, 333, 30, 334, 344, 345, - 346, 347, 348, 349, 350, 343, 405, 423, 353, 583, - 342, 341, 351, 352, 344, 345, 346, 347, 348, 349, - 350, 343, 332, 331, 353, 329, 559, 375, 259, 258, - 260, 261, 262, 263, 383, 405, 264, 76, 512, 333, - 525, 526, 527, 423, 128, 138, 66, 132, 69, 578, - 423, 643, 582, 82, 387, 388, 389, 390, 391, 392, - 393, 82, 405, 291, 234, 65, 401, 70, 77, 73, - 68, 75, 545, 423, 130, 418, 477, 423, 425, 422, - 423, 415, 82, 323, 95, 406, 581, 413, 136, 399, - 432, 433, 442, 420, 415, 127, 115, 646, 464, 62, - 332, 331, 239, 614, 115, 361, 439, 612, 615, 138, - 645, 611, 613, 610, 476, 478, 682, 333, 563, 566, - 567, 568, 564, 681, 565, 569, 405, 490, 469, 470, - 676, 616, 98, 567, 568, 56, 57, 481, 482, 679, - 14, 505, 102, 317, 663, 496, 492, 495, 316, 674, - 41, 485, 649, 479, 480, 101, 664, 312, 147, 107, - 513, 511, 312, 312, 574, 319, 499, 516, 294, 500, - 126, 491, 571, 493, 494, 510, 60, 508, 375, 102, - 312, 312, 312, 312, 59, 497, 31, 430, 498, 521, - 524, 312, 62, 605, 519, 520, 53, 54, 147, 315, - 443, 442, 33, 34, 35, 36, 528, 314, 563, 566, - 567, 568, 564, 535, 565, 569, 431, 329, 644, 604, - 539, 540, 558, 234, 544, 78, 672, 268, 621, 14, - 551, 30, 552, 553, 554, 555, 538, 32, 27, 1, - 570, 108, 400, 407, 548, 295, 38, 550, 402, 297, - 536, 86, 313, 135, 322, 483, 662, 410, 577, 312, - 579, 580, 546, 584, 522, 575, 556, 572, 418, 603, - 506, 312, 557, 547, 382, 486, 244, 591, 416, 587, - 434, 255, 252, 14, 15, 16, 17, 254, 592, 253, - 131, 318, 335, 62, 595, 596, 237, 141, 104, 562, - 599, 560, 504, 404, 637, 659, 18, 55, 601, 443, - 129, 29, 617, 619, 58, 13, 471, 607, 291, 609, - 496, 12, 495, 312, 11, 606, 484, 608, 10, 9, - 489, 626, 137, 628, 629, 8, 634, 635, 7, 632, - 624, 625, 524, 6, 291, 62, 62, 62, 62, 365, - 366, 367, 368, 369, 370, 410, 618, 640, 291, 5, - 642, 639, 4, 641, 375, 81, 2, 647, 514, 0, - 515, 506, 518, 518, 518, 0, 0, 0, 651, 0, - 0, 0, 653, 19, 20, 22, 21, 23, 0, 656, - 0, 0, 0, 0, 658, 0, 24, 25, 26, 0, - 668, 665, 654, 655, 0, 0, 0, 83, 675, 351, - 352, 344, 345, 346, 347, 348, 349, 350, 343, 680, - 0, 353, 0, 506, 506, 506, 506, 341, 351, 352, - 344, 345, 346, 347, 348, 349, 350, 343, 440, 0, - 353, 449, 450, 451, 452, 453, 454, 455, 456, 457, - 458, 459, 460, 461, 462, 463, 0, 342, 341, 351, - 352, 344, 345, 346, 347, 348, 349, 350, 343, 362, - 0, 353, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 85, 0, 0, 0, 0, 0, 0, - 0, 598, 0, 371, 533, 0, 0, 376, 377, 378, - 379, 380, 381, 0, 384, 386, 386, 386, 386, 386, - 386, 386, 386, 394, 395, 396, 397, 0, 64, 291, - 0, 0, 622, 91, 623, 0, 91, 0, 64, 0, - 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 64, 0, 64, 0, 419, 0, 529, - 530, 531, 0, 426, 427, 428, 0, 0, 0, 0, - 0, 0, 64, 0, 0, 0, 0, 82, 0, 0, - 64, 0, 0, 0, 0, 0, 64, 0, 0, 64, - 0, 0, 91, 0, 0, 91, 0, 0, 0, 64, - 0, 0, 0, 0, 64, 0, 0, 64, 0, 0, - 0, 0, 0, 0, 72, 0, 0, 0, 64, 0, - 0, 64, 0, 67, 0, 0, 0, 0, 0, 0, - 0, 91, 74, 71, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 507, 0, 589, 590, 0, 0, - 90, 0, 72, 133, 0, 473, 134, 241, 0, 0, - 0, 67, 0, 0, 0, 0, 276, 0, 0, 0, - 74, 71, 0, 0, 0, 0, 269, 270, 0, 0, - 0, 0, 0, 0, 0, 138, 0, 0, 84, 259, - 258, 260, 261, 262, 263, 0, 0, 264, 256, 257, - 0, 627, 238, 250, 0, 275, 0, 0, 0, 0, - 76, 0, 0, 537, 0, 0, 0, 0, 0, 66, - 0, 69, 0, 0, 0, 247, 248, 310, 0, 0, - 0, 287, 0, 249, 0, 0, 246, 251, 65, 0, - 70, 77, 73, 68, 75, 507, 0, 0, 76, 650, - 419, 285, 0, 0, 0, 0, 0, 66, 0, 69, - 0, 277, 286, 283, 284, 281, 282, 280, 279, 278, - 288, 271, 272, 274, 0, 273, 65, 0, 70, 77, - 73, 68, 75, 72, 0, 0, 0, 110, 0, 0, - 91, 0, 67, 0, 0, 0, 0, 507, 507, 507, - 507, 74, 71, 0, 0, 91, 0, 64, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 63, - 0, 112, 0, 0, 0, 0, 0, 0, 337, 0, - 340, 0, 631, 0, 0, 636, 354, 355, 356, 357, - 358, 359, 360, 0, 338, 339, 336, 342, 341, 351, - 352, 344, 345, 346, 347, 348, 349, 350, 343, 0, - 0, 353, 0, 0, 91, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 91, 0, 0, 0, 91, 76, - 0, 0, 0, 0, 0, 0, 0, 0, 66, 0, - 69, 0, 91, 64, 0, 0, 64, 0, 0, 0, - 64, 0, 0, 91, 0, 0, 0, 65, 0, 70, - 77, 73, 68, 75, 72, 0, 91, 0, 91, 241, - 91, 91, 91, 67, 0, 0, 0, 0, 276, 0, - 0, 0, 74, 71, 0, 0, 0, 0, 269, 270, - 0, 0, 0, 0, 0, 0, 0, 138, 588, 0, - 84, 259, 258, 260, 261, 262, 263, 0, 0, 264, - 256, 257, 0, 0, 238, 250, 0, 275, 342, 341, - 351, 352, 344, 345, 346, 347, 348, 349, 350, 343, - 0, 0, 353, 0, 0, 0, 0, 247, 248, 310, - 0, 0, 0, 287, 0, 249, 0, 0, 246, 251, - 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, - 76, 0, 0, 285, 0, 0, 0, 0, 0, 66, - 0, 69, 0, 277, 286, 283, 284, 281, 282, 280, - 279, 278, 288, 271, 272, 274, 0, 273, 65, 91, - 70, 77, 73, 68, 75, 0, 0, 0, 0, 0, - 0, 0, 0, 64, 64, 64, 64, 0, 0, 0, - 0, 0, 0, 0, 64, 0, 0, 91, 0, 0, - 91, 0, 91, 222, 213, 187, 224, 165, 179, 232, - 180, 181, 207, 154, 195, 72, 177, 0, 168, 150, - 174, 151, 166, 189, 67, 192, 164, 215, 198, 230, - 0, 202, 0, 74, 71, 0, 0, 191, 217, 193, - 212, 186, 208, 159, 201, 225, 178, 205, 0, 0, - 0, 90, 0, 0, 0, 0, 0, 0, 0, 0, - 204, 221, 176, 206, 149, 203, 0, 152, 155, 231, - 219, 171, 172, 0, 0, 0, 0, 0, 0, 0, - 190, 194, 209, 184, 0, 0, 0, 0, 0, 0, - 620, 0, 169, 0, 200, 0, 0, 0, 156, 153, - 188, 0, 0, 0, 158, 0, 170, 210, 0, 218, - 185, 76, 220, 183, 182, 223, 226, 216, 167, 175, - 66, 173, 69, 0, 162, 163, 160, 161, 196, 197, - 227, 228, 229, 211, 157, 0, 0, 214, 199, 65, - 0, 70, 77, 73, 68, 75, 222, 213, 187, 224, - 165, 179, 232, 180, 181, 207, 154, 195, 72, 177, - 0, 168, 150, 174, 151, 166, 189, 67, 192, 164, - 215, 198, 230, 0, 202, 0, 74, 71, 0, 0, - 191, 217, 193, 212, 186, 208, 159, 201, 225, 178, - 205, 0, 0, 0, 84, 0, 0, 0, 0, 0, - 0, 0, 0, 204, 221, 176, 206, 149, 203, 0, - 152, 155, 231, 219, 171, 172, 0, 0, 0, 0, - 0, 0, 0, 190, 194, 209, 184, 0, 0, 0, - 0, 0, 0, 509, 0, 169, 0, 200, 0, 0, - 0, 156, 153, 188, 0, 0, 0, 158, 0, 170, - 210, 0, 218, 185, 76, 220, 183, 182, 223, 226, - 216, 167, 175, 66, 173, 69, 0, 162, 163, 160, - 161, 196, 197, 227, 228, 229, 211, 157, 0, 0, - 214, 199, 65, 0, 70, 77, 73, 68, 75, 222, - 213, 187, 224, 165, 179, 232, 180, 181, 207, 154, - 195, 72, 177, 0, 168, 150, 174, 151, 166, 189, - 67, 192, 164, 215, 198, 230, 0, 202, 0, 74, - 71, 0, 0, 191, 217, 193, 212, 186, 208, 159, - 201, 225, 178, 205, 0, 0, 0, 90, 0, 0, - 0, 0, 0, 0, 0, 0, 204, 221, 176, 206, - 149, 203, 0, 152, 155, 231, 219, 171, 172, 0, - 0, 0, 0, 0, 0, 0, 190, 194, 209, 184, - 0, 0, 0, 0, 0, 0, 0, 0, 169, 0, - 200, 0, 0, 0, 156, 153, 188, 0, 0, 0, - 158, 0, 170, 210, 0, 218, 185, 76, 220, 183, - 182, 223, 226, 216, 167, 175, 66, 173, 69, 0, - 162, 163, 160, 161, 196, 197, 227, 228, 229, 211, - 157, 0, 0, 214, 199, 65, 0, 70, 77, 73, - 68, 75, 222, 213, 187, 224, 165, 179, 232, 180, - 181, 207, 154, 195, 72, 177, 0, 168, 150, 174, - 151, 166, 189, 67, 192, 164, 215, 198, 230, 0, - 202, 0, 74, 71, 0, 0, 191, 217, 193, 212, - 186, 208, 159, 201, 225, 178, 205, 0, 0, 0, - 84, 0, 0, 0, 0, 0, 0, 0, 0, 204, - 221, 176, 206, 149, 203, 0, 152, 155, 231, 219, - 171, 172, 0, 0, 0, 0, 0, 0, 0, 190, - 194, 209, 184, 0, 0, 0, 0, 0, 0, 0, - 0, 169, 0, 200, 0, 0, 0, 156, 153, 188, - 0, 0, 0, 158, 0, 170, 210, 0, 218, 185, - 76, 220, 183, 182, 223, 226, 216, 167, 175, 66, - 173, 69, 0, 162, 163, 160, 161, 196, 197, 227, - 228, 229, 211, 157, 0, 0, 214, 199, 65, 0, - 70, 77, 73, 68, 75, 222, 213, 187, 224, 165, - 179, 232, 180, 181, 207, 154, 195, 72, 177, 0, - 168, 150, 174, 151, 166, 189, 67, 192, 164, 215, - 198, 230, 0, 202, 0, 74, 71, 0, 0, 191, - 217, 193, 212, 186, 208, 159, 201, 225, 178, 205, - 0, 0, 0, 63, 0, 0, 0, 0, 0, 0, - 0, 0, 204, 221, 176, 206, 149, 203, 0, 152, - 155, 231, 219, 171, 172, 0, 0, 0, 0, 0, - 0, 0, 190, 194, 209, 184, 0, 0, 0, 0, - 0, 0, 0, 0, 169, 0, 200, 0, 0, 0, - 156, 153, 188, 0, 0, 0, 158, 0, 170, 210, - 0, 218, 185, 76, 220, 183, 182, 223, 226, 216, - 167, 175, 66, 173, 69, 0, 162, 163, 160, 161, - 196, 197, 227, 228, 229, 211, 157, 0, 0, 214, - 199, 65, 0, 70, 77, 73, 68, 75, 72, 0, - 0, 0, 0, 241, 0, 0, 0, 67, 0, 0, - 0, 0, 276, 0, 0, 0, 74, 71, 0, 0, - 0, 0, 269, 270, 0, 0, 0, 0, 0, 0, - 0, 138, 534, 423, 84, 259, 258, 260, 261, 262, - 263, 0, 0, 264, 256, 257, 0, 0, 238, 250, - 0, 275, 342, 341, 351, 352, 344, 345, 346, 347, - 348, 349, 350, 343, 0, 0, 353, 0, 0, 0, - 0, 247, 248, 0, 0, 0, 0, 287, 0, 249, - 0, 0, 246, 251, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 76, 0, 0, 285, 0, 0, - 0, 0, 0, 66, 0, 69, 0, 277, 286, 283, - 284, 281, 282, 280, 279, 278, 288, 271, 272, 274, - 14, 273, 65, 0, 70, 77, 73, 68, 75, 0, - 0, 72, 0, 0, 0, 0, 241, 0, 0, 0, - 67, 0, 0, 0, 0, 276, 0, 0, 0, 74, - 71, 0, 0, 0, 0, 269, 270, 0, 0, 0, - 0, 0, 0, 0, 138, 0, 0, 84, 259, 258, - 260, 261, 262, 263, 0, 0, 264, 256, 257, 0, - 0, 238, 250, 0, 275, 342, 341, 351, 352, 344, - 345, 346, 347, 348, 349, 350, 343, 0, 0, 353, - 0, 0, 0, 0, 247, 248, 0, 0, 0, 0, - 287, 0, 249, 0, 0, 246, 251, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 76, 0, 0, - 285, 0, 0, 0, 0, 0, 66, 0, 69, 0, - 277, 286, 283, 284, 281, 282, 280, 279, 278, 288, - 271, 272, 274, 0, 273, 65, 0, 70, 77, 73, - 68, 75, 72, 0, 0, 0, 0, 241, 0, 0, - 0, 67, 0, 0, 0, 0, 276, 0, 0, 0, - 74, 71, 0, 0, 0, 0, 269, 270, 0, 0, - 0, 0, 0, 0, 0, 138, 0, 0, 84, 259, - 258, 260, 261, 262, 263, 0, 0, 264, 256, 257, - 0, 0, 238, 250, 0, 275, 0, 0, 0, 0, + 424, 247, 432, 331, 596, 270, 79, 377, 121, 505, + 475, 525, 534, 563, 595, 420, 271, 249, 503, 314, + 243, 411, 490, 417, 504, 444, 119, 294, 312, 61, + 519, 98, 246, 28, 687, 49, 124, 305, 81, 237, + 62, 680, 83, 244, 686, 62, 673, 685, 62, 679, + 62, 303, 376, 3, 62, 672, 599, 640, 468, 477, + 469, 50, 51, 299, 37, 62, 39, 109, 94, 89, + 40, 87, 91, 42, 92, 43, 307, 468, 97, 469, + 43, 406, 52, 213, 62, 330, 72, 329, 328, 104, + 88, 90, 62, 80, 48, 67, 62, 44, 578, 62, + 366, 367, 62, 575, 74, 71, 470, 81, 112, 62, + 427, 83, 228, 415, 109, 81, 116, 235, 212, 83, + 211, 235, 125, 214, 293, 470, 217, 471, 356, 401, + 302, 304, 301, 227, 240, 375, 45, 46, 47, 234, + 230, 232, 107, 296, 115, 111, 471, 346, 236, 668, + 356, 671, 295, 543, 336, 345, 344, 354, 355, 347, + 348, 349, 350, 351, 352, 353, 346, 335, 334, 356, + 306, 675, 451, 335, 334, 122, 334, 491, 468, 114, + 469, 602, 76, 491, 336, 551, 449, 450, 448, 117, + 336, 66, 336, 69, 662, 345, 344, 354, 355, 347, + 348, 349, 350, 351, 352, 353, 346, 239, 231, 356, + 65, 122, 70, 77, 73, 68, 75, 101, 269, 62, + 544, 447, 62, 669, 315, 650, 470, 81, 527, 528, + 529, 83, 324, 354, 355, 347, 348, 349, 350, 351, + 352, 353, 346, 308, 310, 356, 309, 471, 118, 632, + 635, 233, 426, 663, 654, 82, 659, 426, 333, 438, + 440, 441, 337, 364, 439, 561, 426, 238, 327, 587, + 588, 368, 369, 370, 371, 372, 373, 323, 311, 345, + 344, 354, 355, 347, 348, 349, 350, 351, 352, 353, + 346, 95, 378, 356, 113, 584, 332, 545, 30, 386, + 480, 349, 350, 351, 352, 353, 346, 113, 123, 356, + 123, 335, 334, 335, 334, 418, 535, 113, 221, 585, + 604, 561, 62, 408, 82, 408, 426, 122, 336, 583, + 336, 428, 82, 408, 123, 123, 404, 580, 426, 421, + 514, 335, 334, 335, 334, 426, 416, 93, 423, 547, + 426, 480, 426, 409, 435, 436, 445, 446, 336, 443, + 336, 467, 452, 453, 454, 455, 456, 457, 458, 459, + 460, 461, 462, 463, 464, 465, 466, 479, 481, 645, + 315, 235, 442, 418, 478, 315, 315, 402, 388, 219, + 493, 425, 426, 216, 408, 96, 225, 648, 647, 472, + 473, 484, 485, 315, 315, 315, 315, 613, 62, 495, + 498, 62, 295, 612, 315, 62, 488, 122, 272, 684, + 616, 235, 683, 515, 478, 617, 678, 508, 614, 56, + 57, 518, 499, 615, 510, 500, 123, 502, 681, 482, + 483, 41, 378, 512, 82, 326, 618, 501, 569, 570, + 507, 100, 295, 523, 526, 14, 665, 494, 320, 496, + 497, 521, 522, 319, 99, 445, 446, 513, 666, 676, + 531, 532, 533, 651, 530, 59, 537, 105, 576, 297, + 322, 215, 315, 541, 542, 60, 573, 546, 53, 54, + 100, 318, 433, 553, 315, 554, 555, 556, 557, 317, + 540, 263, 262, 264, 265, 266, 267, 607, 120, 268, + 120, 552, 550, 434, 332, 62, 606, 560, 403, 238, + 579, 226, 581, 582, 31, 78, 558, 508, 574, 674, + 577, 421, 623, 413, 14, 120, 538, 30, 32, 593, + 33, 34, 35, 36, 589, 315, 27, 1, 548, 572, + 594, 106, 601, 410, 298, 38, 591, 592, 405, 597, + 598, 300, 86, 316, 224, 325, 486, 62, 62, 62, + 62, 603, 609, 664, 611, 621, 586, 524, 620, 508, + 508, 508, 508, 605, 498, 619, 608, 559, 610, 549, + 385, 489, 474, 248, 123, 630, 631, 628, 636, 637, + 419, 634, 487, 626, 627, 526, 492, 437, 259, 642, + 256, 629, 258, 257, 220, 644, 321, 338, 241, 229, + 123, 102, 564, 562, 641, 81, 643, 378, 506, 83, + 649, 413, 407, 639, 123, 661, 120, 55, 218, 29, + 653, 58, 13, 12, 516, 655, 517, 11, 520, 520, + 520, 658, 10, 9, 8, 7, 660, 6, 5, 652, + 4, 2, 670, 667, 365, 656, 657, 0, 0, 0, + 677, 390, 391, 392, 393, 394, 395, 396, 0, 0, + 0, 682, 0, 0, 85, 0, 0, 0, 374, 0, + 0, 0, 379, 380, 381, 382, 383, 384, 0, 387, + 389, 389, 389, 389, 389, 389, 389, 389, 397, 398, + 399, 400, 14, 15, 16, 17, 0, 0, 123, 64, + 0, 0, 0, 0, 64, 0, 0, 64, 0, 64, + 590, 0, 0, 64, 0, 18, 565, 568, 569, 570, + 566, 422, 567, 571, 64, 0, 64, 429, 430, 431, + 345, 344, 354, 355, 347, 348, 349, 350, 351, 352, + 353, 346, 0, 64, 356, 0, 600, 0, 0, 0, + 0, 64, 0, 0, 126, 64, 126, 0, 64, 0, + 0, 64, 0, 0, 126, 0, 0, 0, 64, 0, + 0, 0, 0, 64, 123, 0, 64, 624, 0, 625, + 0, 126, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 19, 20, 22, 21, 23, 0, 0, 0, + 0, 0, 0, 0, 0, 24, 25, 26, 509, 0, + 0, 72, 0, 0, 476, 0, 245, 0, 0, 0, + 67, 0, 82, 0, 0, 280, 0, 0, 0, 74, + 71, 0, 0, 0, 0, 273, 274, 0, 0, 0, + 0, 0, 0, 0, 122, 536, 0, 84, 263, 262, + 264, 265, 266, 267, 0, 0, 268, 260, 261, 0, + 0, 242, 254, 0, 279, 345, 344, 354, 355, 347, + 348, 349, 350, 351, 352, 353, 346, 539, 64, 356, + 0, 64, 126, 0, 251, 252, 313, 0, 0, 0, + 291, 126, 253, 0, 0, 250, 255, 0, 120, 347, + 348, 349, 350, 351, 352, 353, 346, 76, 509, 356, + 289, 0, 0, 422, 0, 0, 66, 0, 69, 0, + 281, 290, 287, 288, 285, 286, 284, 283, 282, 292, + 275, 276, 278, 0, 277, 65, 0, 70, 77, 73, + 68, 75, 345, 344, 354, 355, 347, 348, 349, 350, + 351, 352, 353, 346, 0, 0, 356, 0, 0, 0, + 509, 509, 509, 509, 126, 344, 354, 355, 347, 348, + 349, 350, 351, 352, 353, 346, 0, 0, 356, 126, + 0, 64, 72, 0, 0, 0, 412, 0, 0, 0, + 0, 67, 0, 0, 0, 633, 0, 14, 638, 0, + 74, 71, 0, 565, 568, 569, 570, 566, 72, 567, + 571, 0, 0, 646, 0, 0, 0, 67, 125, 0, + 414, 0, 0, 0, 72, 0, 74, 71, 108, 0, + 335, 334, 0, 67, 0, 0, 0, 0, 126, 0, + 0, 122, 74, 71, 63, 0, 0, 336, 126, 0, + 0, 0, 126, 0, 0, 0, 0, 0, 0, 0, + 63, 0, 110, 72, 0, 0, 126, 64, 0, 0, + 64, 0, 67, 0, 64, 0, 0, 126, 76, 0, + 0, 74, 71, 0, 0, 0, 0, 66, 0, 69, + 126, 0, 126, 0, 126, 126, 126, 0, 0, 125, + 0, 0, 222, 0, 76, 223, 65, 0, 70, 77, + 73, 68, 75, 66, 0, 69, 0, 0, 0, 0, + 76, 0, 0, 0, 0, 0, 0, 0, 0, 66, + 0, 69, 65, 0, 70, 77, 73, 68, 75, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 65, 0, + 70, 77, 73, 68, 75, 0, 0, 0, 0, 76, + 0, 0, 0, 0, 126, 0, 0, 0, 66, 0, + 69, 0, 0, 0, 64, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 65, 0, 70, + 77, 73, 68, 75, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 126, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 64, 64, 64, 64, + 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, + 126, 0, 0, 126, 0, 126, 200, 191, 165, 202, + 143, 157, 210, 158, 159, 185, 132, 173, 72, 155, + 0, 146, 128, 152, 129, 144, 167, 67, 170, 142, + 193, 176, 208, 0, 180, 0, 74, 71, 0, 0, + 169, 195, 171, 190, 164, 186, 137, 179, 203, 156, + 183, 0, 0, 0, 125, 0, 0, 0, 0, 0, + 0, 0, 0, 182, 199, 154, 184, 127, 181, 0, + 130, 133, 209, 197, 149, 150, 0, 0, 0, 0, + 0, 0, 0, 168, 172, 187, 162, 0, 0, 0, + 0, 0, 0, 622, 0, 147, 0, 178, 0, 0, + 0, 134, 131, 166, 0, 0, 0, 136, 0, 148, + 188, 0, 196, 163, 76, 198, 161, 160, 201, 204, + 194, 145, 153, 66, 151, 69, 0, 140, 141, 138, + 139, 174, 175, 205, 206, 207, 189, 135, 0, 0, + 192, 177, 65, 0, 70, 77, 73, 68, 75, 200, + 191, 165, 202, 143, 157, 210, 158, 159, 185, 132, + 173, 72, 155, 0, 146, 128, 152, 129, 144, 167, + 67, 170, 142, 193, 176, 208, 0, 180, 0, 74, + 71, 0, 0, 169, 195, 171, 190, 164, 186, 137, + 179, 203, 156, 183, 122, 0, 0, 125, 0, 0, + 0, 0, 0, 0, 0, 0, 182, 199, 154, 184, + 127, 181, 0, 130, 133, 209, 197, 149, 150, 0, + 0, 0, 0, 0, 0, 0, 168, 172, 187, 162, + 0, 0, 0, 0, 0, 0, 0, 0, 147, 0, + 178, 0, 0, 0, 134, 131, 166, 0, 0, 0, + 136, 0, 148, 188, 0, 196, 163, 76, 198, 161, + 160, 201, 204, 194, 145, 153, 66, 151, 69, 0, + 140, 141, 138, 139, 174, 175, 205, 206, 207, 189, + 135, 0, 0, 192, 177, 65, 0, 70, 77, 73, + 68, 75, 200, 191, 165, 202, 143, 157, 210, 158, + 159, 185, 132, 173, 72, 155, 0, 146, 128, 152, + 129, 144, 167, 67, 170, 142, 193, 176, 208, 0, + 180, 0, 74, 71, 0, 0, 169, 195, 171, 190, + 164, 186, 137, 179, 203, 156, 183, 0, 0, 0, + 84, 0, 0, 0, 0, 0, 0, 0, 0, 182, + 199, 154, 184, 127, 181, 0, 130, 133, 209, 197, + 149, 150, 0, 0, 0, 0, 0, 0, 0, 168, + 172, 187, 162, 0, 0, 0, 0, 0, 0, 511, + 0, 147, 0, 178, 0, 0, 0, 134, 131, 166, + 0, 0, 0, 136, 0, 148, 188, 0, 196, 163, + 76, 198, 161, 160, 201, 204, 194, 145, 153, 66, + 151, 69, 0, 140, 141, 138, 139, 174, 175, 205, + 206, 207, 189, 135, 0, 0, 192, 177, 65, 0, + 70, 77, 73, 68, 75, 200, 191, 165, 202, 143, + 157, 210, 158, 159, 185, 132, 173, 72, 155, 0, + 146, 128, 152, 129, 144, 167, 67, 170, 142, 193, + 176, 208, 0, 180, 0, 74, 71, 0, 0, 169, + 195, 171, 190, 164, 186, 137, 179, 203, 156, 183, + 0, 0, 0, 125, 0, 0, 0, 0, 0, 0, + 0, 0, 182, 199, 154, 184, 127, 181, 0, 130, + 133, 209, 197, 149, 150, 0, 0, 0, 0, 0, + 0, 0, 168, 172, 187, 162, 0, 0, 0, 0, + 0, 0, 0, 0, 147, 0, 178, 0, 0, 0, + 134, 131, 166, 0, 0, 0, 136, 0, 148, 188, + 0, 196, 163, 76, 198, 161, 160, 201, 204, 194, + 145, 153, 66, 151, 69, 0, 140, 141, 138, 139, + 174, 175, 205, 206, 207, 189, 135, 0, 0, 192, + 177, 65, 0, 70, 77, 73, 68, 75, 200, 191, + 165, 202, 143, 157, 210, 158, 159, 185, 132, 173, + 72, 155, 0, 146, 128, 152, 129, 144, 167, 67, + 170, 142, 193, 176, 208, 0, 180, 0, 74, 71, + 0, 0, 169, 195, 171, 190, 164, 186, 137, 179, + 203, 156, 183, 0, 0, 0, 84, 0, 0, 0, + 0, 0, 0, 0, 0, 182, 199, 154, 184, 127, + 181, 0, 130, 133, 209, 197, 149, 150, 0, 0, + 0, 0, 0, 0, 0, 168, 172, 187, 162, 0, + 0, 0, 0, 0, 0, 0, 0, 147, 0, 178, + 0, 0, 0, 134, 131, 166, 0, 0, 0, 136, + 0, 148, 188, 0, 196, 163, 76, 198, 161, 160, + 201, 204, 194, 145, 153, 66, 151, 69, 0, 140, + 141, 138, 139, 174, 175, 205, 206, 207, 189, 135, + 0, 0, 192, 177, 65, 0, 70, 77, 73, 68, + 75, 200, 191, 165, 202, 143, 157, 210, 158, 159, + 185, 132, 173, 72, 155, 0, 146, 128, 152, 129, + 144, 167, 67, 170, 142, 193, 176, 208, 0, 180, + 0, 74, 71, 0, 0, 169, 195, 171, 190, 164, + 186, 137, 179, 203, 156, 183, 0, 0, 0, 63, + 0, 0, 0, 0, 0, 0, 0, 0, 182, 199, + 154, 184, 127, 181, 0, 130, 133, 209, 197, 149, + 150, 0, 0, 0, 0, 0, 0, 0, 168, 172, + 187, 162, 0, 0, 0, 0, 0, 0, 0, 0, + 147, 0, 178, 0, 0, 0, 134, 131, 166, 0, + 0, 0, 136, 0, 148, 188, 0, 196, 163, 76, + 198, 161, 160, 201, 204, 194, 145, 153, 66, 151, + 69, 0, 140, 141, 138, 139, 174, 175, 205, 206, + 207, 189, 135, 0, 0, 192, 177, 65, 0, 70, + 77, 73, 68, 75, 72, 0, 0, 0, 0, 245, + 0, 0, 0, 67, 0, 0, 0, 0, 280, 0, + 0, 0, 74, 71, 0, 0, 0, 0, 273, 274, + 0, 0, 0, 0, 0, 0, 0, 122, 0, 0, + 84, 263, 262, 264, 265, 266, 267, 0, 0, 268, + 260, 261, 0, 0, 242, 254, 0, 279, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 251, 252, 313, + 0, 0, 0, 291, 0, 253, 0, 0, 250, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 247, 248, 0, 0, 0, - 0, 287, 0, 249, 0, 0, 246, 251, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 76, 0, - 0, 285, 0, 0, 0, 0, 0, 66, 0, 69, - 0, 277, 286, 283, 284, 281, 282, 280, 279, 278, - 288, 271, 272, 274, 72, 273, 65, 0, 70, 77, - 73, 68, 75, 67, 0, 0, 0, 0, 276, 0, - 0, 0, 74, 71, 0, 0, 0, 0, 269, 270, - 0, 0, 0, 0, 0, 0, 0, 138, 0, 0, - 84, 259, 258, 260, 261, 262, 263, 0, 0, 264, - 256, 257, 0, 0, 0, 250, 0, 275, 0, 0, + 76, 0, 0, 289, 0, 0, 0, 0, 0, 66, + 0, 69, 0, 281, 290, 287, 288, 285, 286, 284, + 283, 282, 292, 275, 276, 278, 0, 277, 65, 0, + 70, 77, 73, 68, 75, 72, 0, 0, 0, 0, + 245, 0, 0, 0, 67, 0, 0, 0, 0, 280, + 0, 0, 0, 74, 71, 0, 0, 0, 0, 273, + 274, 0, 0, 0, 0, 0, 0, 0, 122, 0, + 426, 84, 263, 262, 264, 265, 266, 267, 0, 0, + 268, 260, 261, 0, 0, 242, 254, 0, 279, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 247, 248, 0, - 0, 0, 0, 287, 0, 249, 0, 0, 246, 251, + 0, 0, 0, 0, 0, 0, 0, 0, 251, 252, + 0, 0, 0, 0, 291, 0, 253, 0, 0, 250, + 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 76, 0, 0, 289, 0, 0, 0, 0, 0, + 66, 0, 69, 0, 281, 290, 287, 288, 285, 286, + 284, 283, 282, 292, 275, 276, 278, 14, 277, 65, + 0, 70, 77, 73, 68, 75, 0, 0, 72, 0, + 0, 0, 0, 245, 0, 0, 0, 67, 0, 0, + 0, 0, 280, 0, 0, 0, 74, 71, 0, 0, + 0, 0, 273, 274, 0, 0, 0, 0, 0, 0, + 0, 122, 0, 0, 84, 263, 262, 264, 265, 266, + 267, 0, 0, 268, 260, 261, 0, 0, 242, 254, + 0, 279, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 76, 0, 0, 285, 0, 0, 0, 0, 0, 66, - 0, 69, 0, 277, 286, 283, 284, 281, 282, 280, - 279, 278, 288, 271, 272, 274, 72, 273, 65, 0, - 70, 77, 73, 68, 75, 67, 0, 0, 0, 0, - 276, 0, 0, 0, 74, 71, 0, 0, 0, 0, - 269, 270, 0, 0, 0, 0, 0, 0, 0, 138, - 0, 0, 84, 259, 258, 260, 261, 262, 263, 0, - 0, 264, 0, 72, 0, 0, 0, 250, 0, 275, - 0, 0, 67, 0, 0, 72, 0, 0, 0, 0, - 0, 74, 71, 0, 67, 0, 0, 0, 0, 247, - 248, 0, 0, 74, 71, 287, 138, 249, 0, 63, - 246, 251, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 90, 76, 411, 0, 285, 0, 0, 0, 0, - 0, 66, 0, 69, 0, 277, 286, 283, 284, 281, - 282, 280, 279, 278, 288, 271, 272, 274, 0, 273, - 65, 0, 70, 77, 73, 68, 75, 72, 0, 0, - 0, 409, 0, 0, 0, 0, 67, 0, 0, 76, - 0, 0, 0, 0, 0, 74, 71, 0, 66, 0, - 69, 76, 0, 0, 0, 0, 0, 0, 0, 0, - 66, 0, 69, 90, 0, 411, 0, 65, 72, 70, - 77, 73, 68, 75, 0, 332, 331, 67, 0, 65, - 72, 70, 77, 73, 68, 75, 74, 71, 105, 67, - 0, 0, 333, 0, 72, 0, 0, 0, 74, 71, - 0, 0, 0, 67, 63, 0, 112, 0, 0, 72, - 0, 0, 74, 71, 0, 0, 63, 0, 67, 0, - 0, 0, 0, 76, 0, 0, 0, 74, 71, 0, - 84, 0, 66, 0, 69, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 90, 0, 0, 0, 0, - 72, 65, 0, 70, 77, 73, 68, 75, 0, 67, - 0, 0, 0, 0, 76, 0, 0, 0, 74, 71, - 0, 0, 0, 66, 0, 69, 76, 0, 0, 0, - 0, 0, 0, 0, 0, 66, 63, 69, 0, 0, - 76, 0, 65, 0, 70, 77, 73, 68, 75, 66, - 0, 69, 0, 0, 65, 76, 70, 77, 73, 68, - 75, 0, 0, 0, 66, 0, 69, 0, 65, 0, - 70, 77, 73, 68, 75, 0, 0, 0, 0, 0, - 0, 0, 0, 65, 0, 70, 77, 73, 68, 75, - 0, 0, 0, 0, 0, 0, 76, 0, 0, 0, - 0, 0, 0, 0, 0, 66, 0, 69, 0, 0, + 0, 251, 252, 0, 0, 0, 0, 291, 0, 253, + 0, 0, 250, 255, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 76, 0, 0, 289, 0, 0, + 0, 0, 0, 66, 0, 69, 0, 281, 290, 287, + 288, 285, 286, 284, 283, 282, 292, 275, 276, 278, + 0, 277, 65, 0, 70, 77, 73, 68, 75, 72, + 0, 0, 0, 0, 245, 0, 0, 0, 67, 0, + 0, 0, 0, 280, 0, 0, 0, 74, 71, 0, + 0, 0, 0, 273, 274, 0, 0, 0, 0, 0, + 0, 0, 122, 0, 0, 84, 263, 262, 264, 265, + 266, 267, 0, 0, 268, 260, 261, 0, 0, 242, + 254, 0, 279, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 65, 0, 70, 77, 73, 68, - 75, + 0, 0, 251, 252, 0, 0, 0, 0, 291, 0, + 253, 0, 0, 250, 255, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 76, 0, 0, 289, 0, + 0, 0, 0, 0, 66, 0, 69, 0, 281, 290, + 287, 288, 285, 286, 284, 283, 282, 292, 275, 276, + 278, 72, 277, 65, 0, 70, 77, 73, 68, 75, + 67, 0, 0, 0, 0, 280, 0, 0, 0, 74, + 71, 0, 0, 0, 0, 273, 274, 0, 0, 0, + 0, 0, 0, 0, 122, 0, 0, 84, 263, 262, + 264, 265, 266, 267, 0, 0, 268, 260, 261, 0, + 0, 0, 254, 0, 279, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 251, 252, 0, 0, 0, 0, + 291, 0, 253, 0, 0, 250, 255, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 76, 0, 0, + 289, 0, 0, 0, 0, 0, 66, 0, 69, 0, + 281, 290, 287, 288, 285, 286, 284, 283, 282, 292, + 275, 276, 278, 72, 277, 65, 0, 70, 77, 73, + 68, 75, 67, 0, 0, 0, 0, 280, 0, 0, + 0, 74, 71, 0, 0, 0, 0, 273, 274, 0, + 0, 0, 0, 0, 0, 0, 122, 0, 0, 84, + 263, 262, 264, 265, 266, 267, 0, 0, 268, 0, + 72, 0, 0, 0, 254, 0, 279, 0, 0, 67, + 0, 0, 72, 0, 0, 0, 0, 0, 74, 71, + 0, 67, 0, 0, 0, 0, 251, 252, 0, 0, + 74, 71, 291, 122, 253, 0, 63, 250, 255, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 125, 76, + 414, 0, 289, 0, 0, 0, 0, 0, 66, 0, + 69, 0, 281, 290, 287, 288, 285, 286, 284, 283, + 282, 292, 275, 276, 278, 72, 277, 65, 0, 70, + 77, 73, 68, 75, 67, 0, 0, 0, 0, 0, + 0, 0, 0, 74, 71, 0, 76, 0, 0, 0, + 0, 0, 0, 0, 0, 66, 0, 69, 76, 0, + 0, 63, 0, 110, 72, 0, 0, 66, 0, 69, + 0, 0, 103, 67, 65, 0, 70, 77, 73, 68, + 75, 0, 74, 71, 0, 0, 65, 0, 70, 77, + 73, 68, 75, 72, 0, 0, 0, 0, 0, 0, + 63, 72, 67, 0, 0, 0, 0, 0, 72, 0, + 67, 74, 71, 0, 0, 0, 0, 67, 0, 74, + 71, 76, 0, 0, 0, 0, 74, 71, 0, 84, + 66, 0, 69, 0, 0, 0, 0, 125, 0, 0, + 0, 0, 0, 0, 63, 0, 0, 0, 0, 65, + 0, 70, 77, 73, 68, 75, 0, 0, 0, 0, + 76, 0, 0, 0, 0, 0, 0, 0, 0, 66, + 0, 69, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 65, 76, + 70, 77, 73, 68, 75, 0, 0, 76, 66, 0, + 69, 0, 0, 0, 76, 0, 66, 0, 69, 0, + 0, 0, 0, 66, 0, 69, 0, 65, 0, 70, + 77, 73, 68, 75, 0, 65, 0, 70, 77, 73, + 68, 75, 65, 0, 70, 77, 73, 68, 75, 340, + 0, 343, 0, 0, 0, 0, 0, 357, 358, 359, + 360, 361, 362, 363, 0, 341, 342, 339, 345, 344, + 354, 355, 347, 348, 349, 350, 351, 352, 353, 346, + 0, 0, 356, } var yyPact = [...]int{ - 537, -1000, -116, 486, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -61, - -17, -7, -10, -19, -1000, -1000, -1000, -1000, -1000, 483, - 437, 358, -1000, -58, 2783, 475, 2727, -63, -25, 2742, - -1000, -23, 2742, -1000, 2783, -65, 163, -65, 2783, -1000, - -1000, -1000, -1000, -1000, -1000, 382, -1000, -1000, 125, 2713, - 390, 1016, 66, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2783, 196, - -1000, 102, -1000, 65, -1000, -1000, 2783, 113, 160, -1000, - -1000, -1000, 2783, -1000, -39, 2783, 408, 306, 2742, -1000, - 294, 847, -1000, -1000, 319, 2783, -1000, 2727, 52, -1000, - 2701, -1000, -1000, 1890, 472, 2727, 2285, 1747, -1000, 406, - -72, -1000, 81, -1000, 2783, -1000, -1000, 2783, -1000, 1147, - -1000, 449, -1000, 377, 372, 394, 2727, 2742, -1000, -1000, - 313, -1000, -28, -31, -36, -1000, -1000, -1000, -1000, -1000, + 706, -1000, -115, 532, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -47, + -40, -14, 25, -17, -1000, -1000, -1000, -1000, -1000, 528, + 469, 392, -1000, -35, 2951, 515, 2936, -45, -22, 2951, + -1000, -20, 2951, -1000, 2951, -48, 238, -48, 2951, -1000, + -1000, -1000, -1000, -1000, -1000, 431, -1000, -1000, 156, 2907, + 448, 1027, 42, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2951, 243, + -1000, 104, -1000, 41, -1000, -1000, 2951, 122, 195, 1404, + 2951, 1404, -31, 2951, 459, 344, 2951, -1000, 349, 1066, + -1000, -1000, 367, 2951, -1000, 2936, 93, -1000, 2868, -1000, + -1000, 1976, 508, 2936, 2502, 1833, 1404, 457, -55, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -698,73 +715,76 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 463, 2285, -1000, 149, -1000, 2285, 1011, - -1000, 255, -1000, 60, -1000, -1000, 2529, 2529, 2529, 2529, - 2529, 2529, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 255, 57, -1000, 2154, 255, - 255, 255, 255, 255, 255, 2285, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, -1000, - 51, -1000, -1000, -1000, -1000, 300, 2742, -1000, -42, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 335, -1000, - -1000, 2660, 47, 2783, -1000, -1000, -1000, -1000, 292, 255, - 486, 305, 288, 41, 463, 255, 255, 255, 431, 461, - 149, 2285, 2285, 187, 94, 2407, 162, 123, 2529, 2529, - 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, - 2529, 2529, 2529, 36, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 483, 234, 234, 27, 27, 27, 27, 27, - 184, 885, 1747, 251, 285, 149, 1147, 1147, 2285, 2285, - 2742, 419, 108, 149, 2742, -1000, 166, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 1147, 1147, 1147, 1147, 1604, 2783, - -1000, -1000, 2783, -1000, 472, 1147, 2576, -1000, -1000, 2588, - -1000, -1000, 1461, -1000, -1000, 395, 247, -1000, -1000, 2021, - -1000, -1000, 2742, -1000, 2742, 431, 2742, 2742, 2742, -1000, - 2285, 2285, 94, 151, -1000, -1000, 238, -1000, -1000, -1000, - 2139, -1000, -1000, -1000, -1000, 162, 2529, 2529, 2529, 631, - 2139, 2006, 581, 600, 27, 103, 103, 74, 74, 74, - 74, 74, 168, 168, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 166, 1147, 244, 255, -1000, 2285, -1000, 215, - 215, 63, 173, 281, -1000, 1147, 112, -1000, 2285, 166, - -1000, 215, 166, 215, 215, -1000, -1000, -1000, -1000, 470, - -1000, 235, 339, -1000, -1000, -1000, 411, 80, -1000, -1000, - 7, 397, 255, -1000, 5, -1000, -1000, 258, -1000, 258, - 258, 295, 218, -1000, 217, -1000, -1000, -1000, -1000, 631, - 2139, 1132, -1000, 2529, 2529, -1000, 215, 1147, 149, -1000, - -1000, 36, 36, 36, -98, 2742, 271, 104, -1000, 2285, - 164, -1000, -1000, -1000, -1000, -1000, -1000, 466, 438, 2576, - 2576, 2576, 2576, -1000, 334, 332, -1000, 328, 324, 352, - 2783, -1000, 192, 1318, 480, -1000, 2742, -1000, 2742, -1000, - -1000, 2285, 2285, 2285, -1000, -1000, -1000, -1000, 2529, 2139, - 2139, -1000, 166, 166, 24, 166, 166, 255, -1000, -95, - -1000, 149, 2285, 463, 2285, 2285, 339, 262, 429, -1000, - -1000, -1000, -1000, 331, -1000, 318, -1000, -1000, -1000, -1000, - -1000, 2727, -1000, -1000, 149, 149, -1000, 2139, -1000, -1000, - -1000, 153, -1000, 383, -1000, -1000, 2529, 166, 156, 149, - 431, 149, 201, 2285, 2285, -1000, -1000, 196, 183, 36, - -27, -1000, -1000, 386, 149, 149, 30, 135, -1000, 166, - 1, -106, -1000, 478, 87, -1000, 380, 166, -1000, 354, - -101, -109, -1000, 364, 36, -1000, -1000, 347, -1000, 340, - -1000, -103, -1000, -107, -112, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 23, -1000, 2951, -1000, -1000, 2951, 1404, 2107, -1000, + 481, -1000, 432, 427, 449, 2936, 2944, -1000, 256, -1000, + -24, -25, -27, -1000, -1000, -1000, -1000, 500, 2502, -1000, + 108, -1000, 2502, 3032, -1000, 277, -1000, -4, -1000, -1000, + 2746, 2746, 2746, 2746, 2746, 2746, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 277, + 32, -1000, 2371, 277, 277, 277, 277, 277, 277, 2502, + 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, + 277, 277, 277, -1000, 26, -1000, -1000, -1000, 338, 2944, + -1000, -33, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 343, -1000, -1000, 985, 10, 2951, -1000, -1000, -1000, + -1000, 334, 277, 532, 266, 340, 7, 500, 277, 277, + 277, 476, 498, 108, 2502, 2502, 197, 72, 2624, 161, + 103, 2746, 2746, 2746, 2746, 2746, 2746, 2746, 2746, 2746, + 2746, 2746, 2746, 2746, 2746, 2746, 5, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 528, 447, 447, 28, 28, + 28, 28, 28, 69, 814, 1833, 293, 300, 108, 2107, + 2107, 2502, 2502, 2944, 470, 106, 108, 2944, -1000, 200, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2107, 2107, 2107, + 2107, 1690, 2951, -1000, -1000, 2951, -1000, 508, 2107, 2793, + -1000, -1000, 2805, -1000, -1000, 1547, -1000, -1000, 441, 289, + -1000, -1000, 2238, -1000, -1000, 2944, -1000, 2944, 476, 2944, + 2944, 2944, -1000, 2502, 2502, 72, 110, -1000, -1000, 166, + -1000, -1000, -1000, 876, -1000, -1000, -1000, -1000, 161, 2746, + 2746, 2746, 193, 876, 799, 145, 898, 28, 209, 209, + 50, 50, 50, 50, 50, 829, 829, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 200, 2107, 272, 277, -1000, + 2502, -1000, 274, 274, 102, 276, 298, -1000, 2107, 112, + -1000, 2502, 200, -1000, 274, 200, 274, 274, -1000, 1404, + -1000, 505, -1000, 270, 697, -1000, -1000, -1000, 465, 1011, + -1000, -1000, 0, 451, 277, -1000, -5, -1000, -1000, 286, + -1000, 286, 286, 278, 268, -1000, 246, -1000, -1000, -1000, + -1000, 193, 876, 664, -1000, 2746, 2746, -1000, 274, 2107, + 108, -1000, -1000, 5, 5, 5, -85, 2944, 282, 107, + -1000, 2502, 248, -1000, -1000, -1000, -1000, -1000, -1000, 503, + 492, 2793, 2793, 2793, 2793, -1000, 374, 368, -1000, 389, + 381, 407, 2951, -1000, 214, 1261, 524, -1000, 2944, -1000, + 2944, -1000, -1000, 2502, 2502, 2502, -1000, -1000, -1000, -1000, + 2746, 876, 876, -1000, 200, 200, 125, 200, 200, 277, + -1000, -82, -1000, 108, 2502, 500, 2502, 2502, 697, 330, + 984, -1000, -1000, -1000, -1000, 359, -1000, 358, -1000, -1000, + -1000, -1000, -1000, 2936, -1000, -1000, 108, 108, -1000, 876, + -1000, -1000, -1000, 169, -1000, 444, -1000, -1000, 2746, 200, + 199, 108, 476, 108, 249, 2502, 2502, -1000, -1000, 243, + 205, 5, 109, -1000, -1000, 438, 108, 108, 24, 167, + -1000, 200, 9, -99, -1000, 521, 86, -1000, 440, 200, + -1000, 390, -94, -105, -1000, 403, 5, -1000, -1000, 386, + -1000, 383, -1000, -97, -1000, -101, -112, -1000, } var yyPgo = [...]int{ - 0, 626, 48, 622, 619, 603, 598, 595, 589, 588, - 584, 581, 575, 446, 574, 571, 29, 570, 567, 565, - 564, 9, 36, 10, 23, 18, 563, 19, 25, 16, - 562, 561, 2, 559, 33, 558, 401, 557, 26, 21, - 556, 30, 552, 551, 24, 362, 550, 549, 547, 542, - 541, 540, 20, 14, 538, 15, 12, 536, 123, 3, - 535, 17, 534, 533, 532, 529, 13, 524, 7, 523, - 1, 516, 515, 514, 513, 31, 5, 71, 512, 410, - 344, 511, 509, 508, 506, 505, 6, 743, 205, 8, - 27, 503, 45, 22, 121, 501, 500, 28, 4, 11, - 499, 498, 497, 487, 0, 35, + 0, 661, 52, 660, 658, 657, 655, 654, 653, 652, + 647, 643, 642, 524, 641, 639, 31, 638, 637, 635, + 633, 12, 59, 10, 28, 19, 632, 18, 24, 9, + 628, 623, 13, 622, 29, 621, 450, 619, 30, 39, + 618, 43, 617, 616, 32, 20, 614, 613, 612, 610, + 608, 607, 25, 7, 600, 16, 15, 593, 17, 1, + 591, 22, 590, 589, 587, 583, 3, 577, 11, 576, + 2, 573, 566, 565, 564, 23, 6, 93, 563, 441, + 347, 562, 561, 558, 555, 554, 36, 684, 218, 8, + 21, 553, 5, 27, 142, 551, 549, 35, 26, 4, + 14, 547, 546, 538, 418, 0, 388, } var yyR1 = [...]int{ - 0, 100, 101, 101, 1, 1, 1, 1, 1, 1, + 0, 101, 102, 102, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6, 7, 7, 7, 8, 8, 8, 9, - 10, 10, 10, 11, 12, 12, 12, 102, 13, 14, + 10, 10, 10, 11, 12, 12, 12, 103, 13, 14, 14, 15, 15, 15, 18, 18, 18, 16, 16, 17, 17, 23, 23, 22, 22, 24, 24, 24, 24, 91, 91, 91, 90, 90, 26, 26, 27, 27, 28, 28, @@ -775,14 +795,14 @@ var yyR1 = [...]int{ 25, 25, 25, 41, 41, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 51, 51, 51, 51, 51, 51, 42, 42, 42, 42, 42, 42, 42, - 21, 21, 52, 52, 52, 58, 53, 53, 98, 98, - 98, 98, 45, 45, 45, 45, 45, 45, 45, 45, + 21, 21, 52, 52, 52, 58, 53, 53, 99, 99, + 99, 99, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 49, 49, 49, 47, 47, 47, 47, 47, 47, 47, 47, 47, 48, 48, 48, 48, 48, 48, 48, - 48, 105, 105, 50, 50, 50, 50, 19, 19, 19, - 19, 19, 99, 99, 99, 99, 99, 99, 99, 99, + 48, 106, 106, 50, 50, 50, 50, 19, 19, 19, + 19, 19, 100, 100, 100, 100, 100, 100, 100, 100, 62, 62, 20, 20, 60, 60, 61, 63, 63, 59, 59, 59, 44, 44, 44, 44, 44, 44, 44, 46, 46, 46, 64, 64, 65, 65, 66, 66, 67, 67, @@ -801,7 +821,8 @@ var yyR1 = [...]int{ 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 87, 87, 87, 87, 87, 87, - 87, 87, 87, 87, 87, 87, 87, 103, 104, 97, + 87, 87, 87, 87, 87, 87, 87, 104, 105, 97, + 98, 98, 98, } var yyR2 = [...]int{ @@ -846,78 +867,79 @@ var yyR2 = [...]int{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, + 0, 1, 1, } var yyChk = [...]int{ - -1000, -100, -1, -2, -3, -4, -5, -6, -7, -8, + -1000, -101, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, 6, 7, 8, 9, 29, 106, - 107, 109, 108, 110, 119, 120, 121, -101, 148, -15, - 5, -13, -102, -13, -13, -13, -13, 111, -84, 113, + 107, 109, 108, 110, 119, 120, 121, -102, 148, -15, + 5, -13, -103, -13, -13, -13, -13, 111, -84, 113, 117, -79, 113, 115, 111, 111, 112, 113, 111, -97, -97, -97, -2, 19, 20, -18, 37, 38, -14, -79, -36, -34, -92, 53, -87, 141, 122, 26, 146, 124, 143, 36, 17, 145, 35, 147, 113, 144, 10, -76, - -77, -59, -88, -92, 53, -87, -81, 116, 112, -88, - 53, -87, 111, -88, -92, -80, 116, 53, -80, -92, - -16, 33, 20, 61, -35, 25, -34, 29, -95, -94, - 21, -92, 55, 103, -34, 51, 75, 103, -92, 67, - 53, -97, -92, -97, 114, -92, 22, 49, -88, -17, - 40, -46, -88, 56, 59, -74, 29, -103, 50, -34, - -76, -37, 47, 115, 48, -94, -93, -92, -86, 66, - 21, 23, 69, 101, 15, 70, 100, 136, 106, 45, - 128, 129, 126, 127, 28, 9, 24, 120, 20, 94, - 108, 73, 74, 123, 22, 121, 64, 18, 48, 10, - 12, 13, 116, 115, 85, 112, 43, 7, 102, 25, - 82, 39, 27, 41, 83, 16, 130, 131, 30, 140, - 96, 46, 33, 67, 62, 49, 65, 14, 44, 84, - 109, 135, 42, 6, 139, 29, 119, 40, 111, 72, - 114, 63, 5, 117, 8, 47, 118, 132, 133, 134, - 31, 71, 11, -39, 11, -77, -25, -40, 67, -45, - -41, 22, -44, -59, -57, -58, 101, 90, 91, 98, - 68, 102, -49, -47, -48, -50, 63, 64, 55, 54, - 56, 57, 58, 59, 62, -88, -92, -55, -103, 41, - 42, 136, 137, 140, 138, 70, 31, 126, 134, 133, - 132, 130, 131, 128, 129, 116, 127, 96, 135, -89, - -93, -88, -86, -97, 22, -85, 118, -82, 109, 107, - 28, 108, 14, 147, 53, -92, -92, -97, -22, -24, - 92, -25, -92, -78, 18, 10, 31, 31, -43, 31, - -2, -76, -73, -88, -39, 112, 112, 112, -66, 14, - -25, 66, 65, 82, -25, -42, 85, 67, 83, 84, - 69, 87, 86, 97, 90, 91, 92, 93, 94, 95, - 96, 88, 89, 100, 75, 76, 77, 78, 79, 80, - 81, -58, -103, 104, 105, -45, -45, -45, -45, -45, - -45, -103, 103, -2, -53, -25, -103, -103, -103, -103, - -103, -103, -62, -25, -103, -105, -103, -105, -105, -105, - -105, -105, -105, -105, -103, -103, -103, -103, 103, 49, - -88, -97, -83, 114, -26, 51, 10, -91, -90, 21, - -88, 55, 103, -34, -75, 49, -54, -56, -55, -103, - -75, -104, 51, 52, 103, -66, -103, -103, -103, -70, - 16, 15, -25, -25, -51, 62, 67, 63, 64, -41, - -45, -52, -55, -58, 60, 85, 83, 84, 69, -45, - -45, -45, -45, -45, -45, -45, -45, -45, -45, -45, - -45, -45, -45, -45, -98, 53, 55, 101, 122, -44, - -44, -88, -23, 20, -22, -89, -104, 51, -104, -22, - -22, -25, -25, -72, -88, -16, -60, -61, 71, -88, - -104, -22, -23, -22, -22, -89, -86, -92, -92, -39, - -24, -27, -28, -29, -30, -36, -58, -103, -90, 92, - -93, 26, 51, -104, -88, -88, -70, -38, -88, -38, - -38, -25, -67, -68, -25, 62, 63, 64, -52, -45, - -45, -45, -21, 123, 66, -104, -22, -103, -25, -104, - -104, 51, 118, 21, -104, 51, -22, -63, -61, 73, - -25, -104, -104, -104, -104, -104, -97, -64, 12, 51, - -31, -32, -33, 39, 43, 45, 40, 41, 42, 46, - -96, 21, -27, 103, 27, -56, 103, -104, 51, -104, - -104, 51, 17, 51, -69, 23, 24, -21, 66, -45, - -45, -104, -23, -99, -98, -99, -99, 141, -88, -66, - 74, -25, 72, -65, 13, 15, -28, -29, -28, -29, - 39, 39, 39, 44, 39, 44, 39, -32, -92, -104, - 92, 8, -88, -88, -25, -25, -68, -45, -104, -104, - 124, -103, -98, 125, -104, -104, -103, -20, 139, -25, - -66, -25, -53, 49, 49, 39, 39, -76, 56, 29, - -45, -104, 55, -70, -25, -25, -104, 51, -98, -19, - 85, 144, -71, 18, 30, -98, 125, 56, -104, 142, - 46, 145, 8, 85, 29, -104, 36, 143, 146, 35, - -98, 36, 36, 144, 145, 146, + -77, -59, -88, -92, 53, -87, -81, 116, 112, -34, + 111, -34, -34, -80, 116, 53, -80, -34, -16, 33, + 20, 61, -35, 25, -34, 29, -95, -94, 21, -92, + 55, 103, -34, 51, 75, 103, -34, 67, 53, -98, + -104, -89, 50, -88, -86, 53, -87, 66, 21, 23, + 69, 101, 15, 70, 100, 136, 106, 45, 128, 129, + 126, 127, 28, 9, 24, 120, 20, 94, 108, 73, + 74, 123, 22, 121, 64, 18, 48, 10, 12, 13, + 116, 115, 85, 112, 43, 7, 102, 25, 82, 39, + 27, 41, 83, 16, 130, 131, 30, 140, 96, 46, + 33, 67, 62, 49, 65, 14, 44, 84, 109, 135, + 42, 6, 139, 29, 119, 40, 111, 72, 114, 63, + 5, 117, 8, 47, 118, 132, 133, 134, 31, 71, + 11, -34, -98, 114, -34, 22, 49, -34, -17, 40, + -46, -88, 56, 59, -74, 29, -104, -34, -76, -37, + 47, 115, 48, -94, -93, -92, -86, -39, 11, -77, + -25, -40, 67, -45, -41, 22, -44, -59, -57, -58, + 101, 90, 91, 98, 68, 102, -49, -47, -48, -50, + 63, 64, 55, 54, 56, 57, 58, 59, 62, -88, + -92, -55, -104, 41, 42, 136, 137, 140, 138, 70, + 31, 126, 134, 133, 132, 130, 131, 128, 129, 116, + 127, 96, 135, -89, -93, -86, -98, 22, -85, 118, + -82, 109, 107, 28, 108, 14, 147, 53, -34, -34, + -98, -22, -24, 92, -25, -92, -78, 18, 10, 31, + 31, -43, 31, -2, -76, -73, -88, -39, 112, 112, + 112, -66, 14, -25, 66, 65, 82, -25, -42, 85, + 67, 83, 84, 69, 87, 86, 97, 90, 91, 92, + 93, 94, 95, 96, 88, 89, 100, 75, 76, 77, + 78, 79, 80, 81, -58, -104, 104, 105, -45, -45, + -45, -45, -45, -45, -104, 103, -2, -53, -25, -104, + -104, -104, -104, -104, -104, -62, -25, -104, -106, -104, + -106, -106, -106, -106, -106, -106, -106, -104, -104, -104, + -104, 103, 49, -88, -97, -83, 114, -26, 51, 10, + -91, -90, 21, -88, 55, 103, -34, -75, 49, -54, + -56, -55, -104, -75, -105, 51, 52, 103, -66, -104, + -104, -104, -70, 16, 15, -25, -25, -51, 62, 67, + 63, 64, -41, -45, -52, -55, -58, 60, 85, 83, + 84, 69, -45, -45, -45, -45, -45, -45, -45, -45, + -45, -45, -45, -45, -45, -45, -45, -99, 53, 55, + 101, 122, -44, -44, -88, -23, 20, -22, -89, -105, + 51, -105, -22, -22, -25, -25, -72, -88, -16, -60, + -61, 71, -88, -105, -22, -23, -22, -22, -89, -34, + -34, -39, -24, -27, -28, -29, -30, -36, -58, -104, + -90, 92, -93, 26, 51, -105, -88, -88, -70, -38, + -88, -38, -38, -25, -67, -68, -25, 62, 63, 64, + -52, -45, -45, -45, -21, 123, 66, -105, -22, -104, + -25, -105, -105, 51, 118, 21, -105, 51, -22, -63, + -61, 73, -25, -105, -105, -105, -105, -105, -98, -64, + 12, 51, -31, -32, -33, 39, 43, 45, 40, 41, + 42, 46, -96, 21, -27, 103, 27, -56, 103, -105, + 51, -105, -105, 51, 17, 51, -69, 23, 24, -21, + 66, -45, -45, -105, -23, -100, -99, -100, -100, 141, + -88, -66, 74, -25, 72, -65, 13, 15, -28, -29, + -28, -29, 39, 39, 39, 44, 39, 44, 39, -32, + -92, -105, 92, 8, -88, -88, -25, -25, -68, -45, + -105, -105, 124, -104, -99, 125, -105, -105, -104, -20, + 139, -25, -66, -25, -53, 49, 49, 39, 39, -76, + 56, 29, -45, -105, 55, -70, -25, -25, -105, 51, + -99, -19, 85, 144, -71, 18, 30, -99, 125, 56, + -105, 142, 46, 145, 8, 85, 29, -105, 36, 143, + 146, 35, -99, 36, 36, 144, 145, 146, } var yyDef = [...]int{ @@ -929,67 +951,67 @@ var yyDef = [...]int{ 35, 36, 17, 42, 43, 47, 45, 46, 38, 0, 0, 80, 97, 306, 307, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 0, 22, - 279, 0, 229, 0, -2, -2, 0, 0, 0, 409, - 302, 303, 0, 409, 0, 0, 0, 0, 0, 33, - 49, 0, 48, 40, 263, 0, 96, 0, 99, 81, - 0, 83, 84, 0, 105, 0, 0, 0, 409, 0, - 300, 25, 0, 28, 0, 30, 285, 0, 409, 0, - 50, 0, 239, 0, 0, 0, 0, 0, 407, 95, - 105, 73, 0, 0, 0, 82, 98, 308, 309, 310, - 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, - 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, - 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, - 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, - 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, - 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, - 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, - 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, - 391, 392, 393, 246, 0, 280, 281, 107, 0, 112, - 115, 0, 152, 153, 154, 155, 0, 0, 0, 0, - 0, 0, 177, 178, 179, 180, 113, 114, 232, 233, - 234, 235, 236, 237, 238, 229, 0, 278, 0, 0, - 0, 0, 0, 0, 0, 220, 0, 201, 201, 201, - 201, 201, 201, 201, 201, 0, 0, 0, 0, 230, - 0, 304, -2, 23, 287, 0, 0, 409, 296, 290, - 291, 292, 293, 294, 295, 29, 31, 32, 64, 53, - 55, 59, 0, 0, 282, 283, 240, 241, 269, 0, - 272, 269, 0, 265, 246, 0, 0, 0, 254, 0, - 106, 0, 0, 0, 110, 0, 0, 0, 0, 0, + 279, 0, 229, 0, -2, -2, 0, 0, 0, 410, + 0, 410, 0, 0, 0, 0, 0, 33, 49, 0, + 48, 40, 263, 0, 96, 0, 99, 81, 0, 83, + 84, 0, 105, 0, 0, 0, 410, 0, 300, 25, + 411, 412, 407, 304, 305, 302, 303, 310, 311, 312, + 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, + 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, + 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, + 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, + 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, + 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, + 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, + 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, + 393, 0, 28, 0, 30, 285, 0, 410, 0, 50, + 0, 239, 0, 0, 0, 0, 0, 95, 105, 73, + 0, 0, 0, 82, 98, 308, 309, 246, 0, 280, + 281, 107, 0, 112, 115, 0, 152, 153, 154, 155, + 0, 0, 0, 0, 0, 0, 177, 178, 179, 180, + 113, 114, 232, 233, 234, 235, 236, 237, 238, 229, + 0, 278, 0, 0, 0, 0, 0, 0, 0, 220, + 0, 201, 201, 201, 201, 201, 201, 201, 201, 0, + 0, 0, 0, 230, 0, -2, 23, 287, 0, 0, + 409, 296, 290, 291, 292, 293, 294, 295, 29, 31, + 32, 64, 53, 55, 59, 0, 0, 282, 283, 240, + 241, 269, 0, 272, 269, 0, 265, 246, 0, 0, + 0, 254, 0, 106, 0, 0, 0, 110, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 133, 134, 135, 136, 137, 138, - 139, 126, 0, 0, 0, 171, 172, 173, 174, 175, - 0, 51, 0, 0, 0, 146, 0, 0, 0, 0, - 0, 47, 0, 221, 0, 193, 0, 194, 195, 196, - 197, 198, 199, 200, 0, 51, 0, 0, 0, 0, - 301, 26, 0, 297, 105, 0, 0, 56, 60, 0, - 62, 63, 0, 16, 18, 0, 271, 273, 275, 0, - 19, 264, 0, 408, 0, 254, 0, 0, 0, 21, - 0, 0, 108, 109, 111, 127, 0, 129, 131, 116, - 117, 118, 142, 143, 144, 0, 0, 0, 0, 140, - 122, 0, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 170, 148, 149, 150, 151, 168, - 169, 176, 0, 0, 52, 230, 145, 0, 277, 0, - 0, 0, 0, 0, 261, 0, 227, 224, 0, 0, - 202, 0, 0, 0, 0, 231, 305, 409, 27, 242, - 54, 65, 66, 68, 69, 70, 78, 0, 61, 57, - 0, 0, 0, 276, 267, 266, 20, 0, 103, 0, - 0, 255, 247, 248, 251, 128, 130, 132, 119, 140, - 123, 0, 120, 0, 0, 181, 0, 51, 147, 184, - 185, 0, 0, 0, 0, 0, 246, 0, 225, 0, - 0, 192, 203, 204, 205, 206, 24, 244, 0, 0, - 0, 0, 0, 85, 0, 0, 88, 0, 0, 0, - 0, 79, 0, 0, 0, 274, 0, 100, 0, 101, - 102, 0, 0, 0, 250, 252, 253, 121, 0, 141, - 124, 182, 0, 0, 212, 0, 0, 0, 262, 222, - 191, 228, 0, 246, 0, 0, 67, 74, 0, 77, - 86, 87, 89, 0, 91, 0, 93, 94, 71, 72, - 58, 0, 268, 104, 256, 257, 249, 125, 183, 186, - 213, 0, 217, 0, 187, 188, 0, 0, 0, 226, - 254, 245, 243, 0, 0, 90, 92, 270, 0, 0, - 207, 190, 223, 258, 75, 76, 214, 0, 218, 0, - 0, 0, 15, 0, 0, 215, 0, 0, 189, 0, - 0, 0, 259, 0, 0, 219, 208, 0, 211, 0, - 216, 209, 260, 0, 0, 210, + 0, 0, 0, 0, 0, 0, 0, 133, 134, 135, + 136, 137, 138, 139, 126, 0, 0, 0, 171, 172, + 173, 174, 175, 0, 51, 0, 0, 0, 146, 0, + 0, 0, 0, 0, 47, 0, 221, 0, 193, 0, + 194, 195, 196, 197, 198, 199, 200, 0, 51, 0, + 0, 0, 0, 301, 26, 0, 297, 105, 0, 0, + 56, 60, 0, 62, 63, 0, 16, 18, 0, 271, + 273, 275, 0, 19, 264, 0, 408, 0, 254, 0, + 0, 0, 21, 0, 0, 108, 109, 111, 127, 0, + 129, 131, 116, 117, 118, 142, 143, 144, 0, 0, + 0, 0, 140, 122, 0, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 170, 148, 149, + 150, 151, 168, 169, 176, 0, 0, 52, 230, 145, + 0, 277, 0, 0, 0, 0, 0, 261, 0, 227, + 224, 0, 0, 202, 0, 0, 0, 0, 231, 410, + 27, 242, 54, 65, 66, 68, 69, 70, 78, 0, + 61, 57, 0, 0, 0, 276, 267, 266, 20, 0, + 103, 0, 0, 255, 247, 248, 251, 128, 130, 132, + 119, 140, 123, 0, 120, 0, 0, 181, 0, 51, + 147, 184, 185, 0, 0, 0, 0, 0, 246, 0, + 225, 0, 0, 192, 203, 204, 205, 206, 24, 244, + 0, 0, 0, 0, 0, 85, 0, 0, 88, 0, + 0, 0, 0, 79, 0, 0, 0, 274, 0, 100, + 0, 101, 102, 0, 0, 0, 250, 252, 253, 121, + 0, 141, 124, 182, 0, 0, 212, 0, 0, 0, + 262, 222, 191, 228, 0, 246, 0, 0, 67, 74, + 0, 77, 86, 87, 89, 0, 91, 0, 93, 94, + 71, 72, 58, 0, 268, 104, 256, 257, 249, 125, + 183, 186, 213, 0, 217, 0, 187, 188, 0, 0, + 0, 226, 254, 245, 243, 0, 0, 90, 92, 270, + 0, 0, 207, 190, 223, 258, 75, 76, 214, 0, + 218, 0, 0, 0, 15, 0, 0, 215, 0, 0, + 189, 0, 0, 0, 259, 0, 0, 219, 208, 0, + 211, 0, 216, 209, 260, 0, 0, 210, } var yyTok1 = [...]int{ @@ -1444,45 +1466,45 @@ yydefault: yyDollar = yyS[yypt-5 : yypt+1] //line ./go/vt/sqlparser/sql.y:273 { - yyVAL.statement = &DDL{Action: CreateStr, NewName: yyDollar[4].tableIdent} + yyVAL.statement = &DDL{Action: CreateStr, NewName: yyDollar[4].tableName} } case 24: yyDollar = yyS[yypt-8 : yypt+1] //line ./go/vt/sqlparser/sql.y:277 { // Change this to an alter statement - yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[7].tableIdent, NewName: yyDollar[7].tableIdent} + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[7].tableName, NewName: yyDollar[7].tableName} } case 25: yyDollar = yyS[yypt-4 : yypt+1] //line ./go/vt/sqlparser/sql.y:282 { - yyVAL.statement = &DDL{Action: CreateStr, NewName: NewTableIdent(yyDollar[3].colIdent.Lowered())} + yyVAL.statement = &DDL{Action: CreateStr, NewName: yyDollar[3].tableName.ToViewName()} } case 26: yyDollar = yyS[yypt-6 : yypt+1] //line ./go/vt/sqlparser/sql.y:288 { - yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableIdent, NewName: yyDollar[4].tableIdent} + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName} } case 27: yyDollar = yyS[yypt-7 : yypt+1] //line ./go/vt/sqlparser/sql.y:292 { // Change this to a rename statement - yyVAL.statement = &DDL{Action: RenameStr, Table: yyDollar[4].tableIdent, NewName: yyDollar[7].tableIdent} + yyVAL.statement = &DDL{Action: RenameStr, Table: yyDollar[4].tableName, NewName: yyDollar[7].tableName} } case 28: yyDollar = yyS[yypt-4 : yypt+1] //line ./go/vt/sqlparser/sql.y:297 { - yyVAL.statement = &DDL{Action: AlterStr, Table: NewTableIdent(yyDollar[3].colIdent.Lowered()), NewName: NewTableIdent(yyDollar[3].colIdent.Lowered())} + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName.ToViewName(), NewName: yyDollar[3].tableName.ToViewName()} } case 29: yyDollar = yyS[yypt-5 : yypt+1] //line ./go/vt/sqlparser/sql.y:303 { - yyVAL.statement = &DDL{Action: RenameStr, Table: yyDollar[3].tableIdent, NewName: yyDollar[5].tableIdent} + yyVAL.statement = &DDL{Action: RenameStr, Table: yyDollar[3].tableName, NewName: yyDollar[5].tableName} } case 30: yyDollar = yyS[yypt-4 : yypt+1] @@ -1492,14 +1514,14 @@ yydefault: if yyDollar[3].byt != 0 { exists = true } - yyVAL.statement = &DDL{Action: DropStr, Table: yyDollar[4].tableIdent, IfExists: exists} + yyVAL.statement = &DDL{Action: DropStr, Table: yyDollar[4].tableName, IfExists: exists} } case 31: yyDollar = yyS[yypt-5 : yypt+1] //line ./go/vt/sqlparser/sql.y:317 { // Change this to an alter statement - yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[5].tableIdent, NewName: yyDollar[5].tableIdent} + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[5].tableName, NewName: yyDollar[5].tableName} } case 32: yyDollar = yyS[yypt-5 : yypt+1] @@ -1509,13 +1531,13 @@ yydefault: if yyDollar[3].byt != 0 { exists = true } - yyVAL.statement = &DDL{Action: DropStr, Table: NewTableIdent(yyDollar[4].colIdent.Lowered()), IfExists: exists} + yyVAL.statement = &DDL{Action: DropStr, Table: yyDollar[4].tableName.ToViewName(), IfExists: exists} } case 33: yyDollar = yyS[yypt-3 : yypt+1] //line ./go/vt/sqlparser/sql.y:332 { - yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableIdent, NewName: yyDollar[3].tableIdent} + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName, NewName: yyDollar[3].tableName} } case 34: yyDollar = yyS[yypt-2 : yypt+1] @@ -3142,6 +3164,24 @@ yydefault: { forceEOF(yylex) } + case 410: + yyDollar = yyS[yypt-0 : yypt+1] + //line ./go/vt/sqlparser/sql.y:1697 + { + forceEOF(yylex) + } + case 411: + yyDollar = yyS[yypt-1 : yypt+1] + //line ./go/vt/sqlparser/sql.y:1701 + { + forceEOF(yylex) + } + case 412: + yyDollar = yyS[yypt-1 : yypt+1] + //line ./go/vt/sqlparser/sql.y:1705 + { + forceEOF(yylex) + } } goto yystack /* stack new state and value */ } diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y index 7e62f5e792d..1e60726055a 100644 --- a/go/vt/sqlparser/sql.y +++ b/go/vt/sqlparser/sql.y @@ -186,7 +186,7 @@ func forceEOF(yylex interface{}) { %type sql_id reserved_sql_id col_alias as_ci_opt %type table_id reserved_table_id table_alias as_opt_id %type as_opt -%type force_eof +%type force_eof ddl_force_eof %type charset %type convert_type @@ -269,43 +269,43 @@ set_statement: } create_statement: - CREATE TABLE not_exists_opt table_id force_eof + CREATE TABLE not_exists_opt table_name ddl_force_eof { $$ = &DDL{Action: CreateStr, NewName: $4} } -| CREATE constraint_opt INDEX ID using_opt ON table_id force_eof +| CREATE constraint_opt INDEX ID using_opt ON table_name ddl_force_eof { // Change this to an alter statement - $$ = &DDL{Action: AlterStr, Table: $7, NewName: $7} + $$ = &DDL{Action: AlterStr, Table: $7, NewName:$7} } -| CREATE VIEW sql_id force_eof +| CREATE VIEW table_name ddl_force_eof { - $$ = &DDL{Action: CreateStr, NewName: NewTableIdent($3.Lowered())} + $$ = &DDL{Action: CreateStr, NewName: $3.ToViewName()} } alter_statement: - ALTER ignore_opt TABLE table_id non_rename_operation force_eof + ALTER ignore_opt TABLE table_name non_rename_operation force_eof { $$ = &DDL{Action: AlterStr, Table: $4, NewName: $4} } -| ALTER ignore_opt TABLE table_id RENAME to_opt table_id +| ALTER ignore_opt TABLE table_name RENAME to_opt table_name { // Change this to a rename statement $$ = &DDL{Action: RenameStr, Table: $4, NewName: $7} } -| ALTER VIEW sql_id force_eof +| ALTER VIEW table_name ddl_force_eof { - $$ = &DDL{Action: AlterStr, Table: NewTableIdent($3.Lowered()), NewName: NewTableIdent($3.Lowered())} + $$ = &DDL{Action: AlterStr, Table: $3.ToViewName(), NewName: $3.ToViewName()} } rename_statement: - RENAME TABLE table_id TO table_id + RENAME TABLE table_name TO table_name { $$ = &DDL{Action: RenameStr, Table: $3, NewName: $5} } drop_statement: - DROP TABLE exists_opt table_id + DROP TABLE exists_opt table_name { var exists bool if $3 != 0 { @@ -313,22 +313,22 @@ drop_statement: } $$ = &DDL{Action: DropStr, Table: $4, IfExists: exists} } -| DROP INDEX ID ON table_id +| DROP INDEX ID ON table_name { // Change this to an alter statement $$ = &DDL{Action: AlterStr, Table: $5, NewName: $5} } -| DROP VIEW exists_opt sql_id force_eof +| DROP VIEW exists_opt table_name ddl_force_eof { var exists bool if $3 != 0 { exists = true } - $$ = &DDL{Action: DropStr, Table: NewTableIdent($4.Lowered()), IfExists: exists} + $$ = &DDL{Action: DropStr, Table: $4.ToViewName(), IfExists: exists} } analyze_statement: - ANALYZE TABLE table_id + ANALYZE TABLE table_name { $$ = &DDL{Action: AlterStr, Table: $3, NewName: $3} } @@ -1692,3 +1692,16 @@ force_eof: { forceEOF(yylex) } + +ddl_force_eof: + { + forceEOF(yylex) + } +| openb + { + forceEOF(yylex) + } +| reserved_sql_id + { + forceEOF(yylex) + } diff --git a/go/vt/tabletserver/engines/schema/schema_engine.go b/go/vt/tabletserver/engines/schema/schema_engine.go index bed1980f147..e0b44946f96 100644 --- a/go/vt/tabletserver/engines/schema/schema_engine.go +++ b/go/vt/tabletserver/engines/schema/schema_engine.go @@ -329,19 +329,6 @@ func (se *Engine) TableWasCreatedOrAltered(ctx context.Context, tableName string return nil } -// TableWasDropped must be called if a table was dropped. -func (se *Engine) TableWasDropped(tableName sqlparser.TableIdent) { - se.mu.Lock() - defer se.mu.Unlock() - if !se.isOpen { - return - } - - delete(se.tables, tableName.String()) - log.Infof("Table %s forgotten", tableName) - se.broadcast(nil, nil, []string{tableName.String()}) -} - // RegisterNotifier registers the function for schema change notification. // It also causes an immediate notification to the caller. The notified // function must not change the map or its contents. The only exception diff --git a/go/vt/tabletserver/engines/schema/schema_engine_test.go b/go/vt/tabletserver/engines/schema/schema_engine_test.go index 9ea39383d2b..96b3410cf59 100644 --- a/go/vt/tabletserver/engines/schema/schema_engine_test.go +++ b/go/vt/tabletserver/engines/schema/schema_engine_test.go @@ -296,45 +296,6 @@ func TestCreateOrUpdateTable(t *testing.T) { } } -func TestDropTable(t *testing.T) { - db := fakesqldb.New(t) - defer db.Close() - for query, result := range schematest.Queries() { - db.AddQuery(query, result) - } - existingTable := sqlparser.NewTableIdent("test_table_01") - se := newEngine(10, 1*time.Second, 1*time.Second, false) - se.Open(db.ConnParams()) - defer se.Close() - table := se.GetTable(existingTable) - if table == nil { - t.Fatalf("table: %s should exist", existingTable) - } - i := 0 - se.RegisterNotifier("test", func(schema map[string]*Table, created, altered, dropped []string) { - switch i { - case 0: - // Ignore. - case 1: - want := []string{"test_table_01"} - if !reflect.DeepEqual(dropped, want) { - t.Errorf("callback 1: %v, want %v\n", dropped, want) - } - default: - t.Fatal("unexpected") - } - i++ - }) - se.TableWasDropped(existingTable) - table = se.GetTable(existingTable) - if table != nil { - t.Fatalf("table: %s should not exist", existingTable) - } - if i < 2 { - t.Error("Notifier did not get called") - } -} - func TestExportVars(t *testing.T) { db := fakesqldb.New(t) defer db.Close() diff --git a/go/vt/tabletserver/planbuilder/ddl.go b/go/vt/tabletserver/planbuilder/ddl.go index 2bb0dd26664..7bbe65f7c4e 100644 --- a/go/vt/tabletserver/planbuilder/ddl.go +++ b/go/vt/tabletserver/planbuilder/ddl.go @@ -9,8 +9,8 @@ import "github.com/youtube/vitess/go/vt/sqlparser" // DDLPlan provides a plan for DDLs. type DDLPlan struct { Action string - TableName sqlparser.TableIdent - NewName sqlparser.TableIdent + TableName *sqlparser.TableName + NewName *sqlparser.TableName } // DDLParse parses a DDL and produces a DDLPlan. @@ -36,7 +36,7 @@ func analyzeDDL(ddl *sqlparser.DDL, getTable TableGetter) *ExecPlan { tableName := ddl.Table // Skip TableName if table is empty (create statements) or not found in schema if !tableName.IsEmpty() { - table, ok := getTable(tableName) + table, ok := getTable(tableName.Name) if ok { plan.TableName = table.Name } diff --git a/go/vt/tabletserver/planbuilder/plan_test.go b/go/vt/tabletserver/planbuilder/plan_test.go index 5f788a15cfe..48b9037b4cc 100644 --- a/go/vt/tabletserver/planbuilder/plan_test.go +++ b/go/vt/tabletserver/planbuilder/plan_test.go @@ -128,8 +128,8 @@ func TestDDLPlan(t *testing.T) { t.Fatalf("Error marshalling %v", plan) } matchString(t, tcase.lineno, expected["Action"], plan.Action) - matchString(t, tcase.lineno, expected["TableName"], plan.TableName.String()) - matchString(t, tcase.lineno, expected["NewName"], plan.NewName.String()) + matchString(t, tcase.lineno, expected["TableName"], sqlparser.String(plan.TableName)) + matchString(t, tcase.lineno, expected["NewName"], sqlparser.String(plan.NewName)) } } diff --git a/go/vt/tabletserver/query_executor.go b/go/vt/tabletserver/query_executor.go index ab8179672d3..6547496934a 100644 --- a/go/vt/tabletserver/query_executor.go +++ b/go/vt/tabletserver/query_executor.go @@ -295,25 +295,33 @@ func (qre *QueryExecutor) execDDL() (*sqltypes.Result, error) { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "DDL is not understood") } - conn, err := qre.tsv.te.txPool.LocalBegin(qre.ctx) - if err != nil { - return nil, err + defer qre.tsv.se.Reload(qre.ctx) + + if qre.transactionID != 0 { + conn, err := qre.tsv.te.txPool.Get(qre.transactionID, "DDL begin again") + if err != nil { + return nil, err + } + defer conn.Recycle() + result, err := qre.execSQL(conn, qre.query, false) + if err != nil { + return nil, err + } + err = conn.BeginAgain(qre.ctx) + if err != nil { + return nil, err + } + return result, nil } - defer qre.tsv.te.txPool.LocalCommit(qre.ctx, conn, qre.tsv.messager) - result, err := qre.execSQL(conn, qre.query, false) + result, err := qre.execAsTransaction(func(conn *TxConnection) (*sqltypes.Result, error) { + return qre.execSQL(conn, qre.query, false) + }) + if err != nil { return nil, err } - if !ddlPlan.TableName.IsEmpty() && ddlPlan.TableName != ddlPlan.NewName { - // It's a drop or rename. - qre.tsv.se.TableWasDropped(ddlPlan.TableName) - } - if !ddlPlan.NewName.IsEmpty() { - if err := qre.tsv.se.TableWasCreatedOrAltered(qre.ctx, ddlPlan.NewName.String()); err != nil { - return nil, err - } - } + return result, nil } diff --git a/go/vt/tabletserver/tx_pool.go b/go/vt/tabletserver/tx_pool.go index ac4c40ffbe0..26113c1dba5 100644 --- a/go/vt/tabletserver/tx_pool.go +++ b/go/vt/tabletserver/tx_pool.go @@ -309,6 +309,17 @@ func (txc *TxConnection) Exec(ctx context.Context, query string, maxrows int, wa return r, nil } +// BeginAgain commits the existing transaction and begins a new one +func (txc *TxConnection) BeginAgain(ctx context.Context) error { + if _, err := txc.DBConn.Exec(ctx, "commit", 1, false); err != nil { + return err + } + if _, err := txc.DBConn.Exec(ctx, "begin", 1, false); err != nil { + return err + } + return nil +} + // Recycle returns the connection to the pool. The transaction remains // active. func (txc *TxConnection) Recycle() { From 980bd649731181358b20b14fd4feba6eea648302 Mon Sep 17 00:00:00 2001 From: Joshua Thompson Date: Mon, 6 Mar 2017 19:10:41 -0800 Subject: [PATCH 076/108] Kubernetes sandbox reliability updates. --- helm/vitess/templates/_vttablet.tpl | 7 +++-- test/cluster/k8s_environment.py | 3 ++- test/cluster/sandbox/initial_reparent.py | 26 ++++++++++++------- test/cluster/sandbox/sandbox_utils.py | 20 -------------- test/cluster/sandbox/subprocess_component.py | 9 ++++--- .../sandbox/vitess_kubernetes_sandbox.py | 4 +-- test/cluster/sandbox/wait_for_mysql.py | 11 +++++--- 7 files changed, 37 insertions(+), 43 deletions(-) diff --git a/helm/vitess/templates/_vttablet.tpl b/helm/vitess/templates/_vttablet.tpl index fb48e440f7c..39d2dd4ef02 100644 --- a/helm/vitess/templates/_vttablet.tpl +++ b/helm/vitess/templates/_vttablet.tpl @@ -176,9 +176,10 @@ volumes: {{- $tablet := index . 4 -}} {{- with $tablet.vttablet -}} {{- $0 := $.Values.vttablet -}} +{{- $cellClean := $cell.name | replace "_" "-" -}} {{- $keyspaceClean := $keyspace.name | replace "_" "-" -}} {{- $shardClean := include "format-shard-name" $shard.name -}} -{{- $setName := printf "%s-%s-%s" $keyspaceClean $shardClean $tablet.type | lower -}} +{{- $setName := printf "%s-%s-%s-%s" $cellClean $keyspaceClean $shardClean $tablet.type | lower -}} {{- $uid := "$(cat $VTDATAROOT/init/tablet-uid)" }} # vttablet StatefulSet apiVersion: apps/v1beta1 @@ -193,6 +194,7 @@ spec: labels: app: vitess component: vttablet + cell: {{$cellClean | quote}} keyspace: {{$keyspace.name | quote}} shard: {{$shardClean | quote}} type: {{$tablet.type | quote}} @@ -222,6 +224,7 @@ spec: {{- $cell := index . 1 -}} {{- $keyspace := index . 2 -}} {{- $shard := index . 3 -}} +{{- $shardClean := include "format-shard-name" $shard.name -}} {{- $tablet := index . 4 -}} {{- $uid := index . 5 -}} {{- with $tablet.vttablet -}} @@ -235,7 +238,7 @@ metadata: app: vitess component: vttablet keyspace: {{$keyspace.name | quote}} - shard: {{$shard.name | quote}} + shard: {{$shardClean | quote}} type: {{$tablet.type | quote}} annotations: pod.beta.kubernetes.io/init-containers: '[ diff --git a/test/cluster/k8s_environment.py b/test/cluster/k8s_environment.py index 9e7441e334e..260fc70fbc4 100644 --- a/test/cluster/k8s_environment.py +++ b/test/cluster/k8s_environment.py @@ -184,7 +184,8 @@ def restart_mysql_task(self, tablet_name, task_name, is_alloc=False): time.sleep(60) # Create the pod again. - os.system('cat %s | kubectl create -f -' % tmpfile.name) + os.system('cat %s | kubectl create --namespace=%s -f -' % ( + tmpfile.name, self.cluster_name)) while time.time() - start_time < 120: logging.info('Waiting for pod %s to be running', vttablet_pod_name) pod = subprocess.check_output( diff --git a/test/cluster/sandbox/initial_reparent.py b/test/cluster/sandbox/initial_reparent.py index 532176672db..66415e31a48 100755 --- a/test/cluster/sandbox/initial_reparent.py +++ b/test/cluster/sandbox/initial_reparent.py @@ -4,10 +4,10 @@ import json import logging import optparse +import sys import time from vtproto import topodata_pb2 from vttest import sharding_utils -import sandbox_utils import vtctl_sandbox @@ -23,12 +23,17 @@ def initial_reparent(keyspace, master_cell, num_shards, namespace, timeout_s): """Performs the first reparent.""" successfully_reparented = [] master_tablets = {} + start_time = time.time() + logging.info('Finding tablets to reparent to.') while len(master_tablets) < num_shards: + if time.time() - start_time > timeout_s: + logging.fatal('Timed out waiting to find a replica tablet') + return 1 for shard_name in sharding_utils.get_shard_names(num_shards): - shard_name = sandbox_utils.fix_shard_name(shard_name) + if shard_name in master_tablets: + continue tablets = vtctl_sandbox.execute_vtctl_command( - ['ListShardTablets', '%s/%s' % ( - keyspace, sandbox_utils.fix_shard_name(shard_name))], + ['ListShardTablets', '%s/%s' % (keyspace, shard_name)], namespace=namespace)[0].split('\n') tablets = [x.split(' ') for x in tablets if x] potential_masters = [ @@ -36,11 +41,11 @@ def initial_reparent(keyspace, master_cell, num_shards, namespace, timeout_s): and x[0].split('-')[0] == master_cell] if potential_masters: master_tablets[shard_name] = potential_masters[0] + logging.info( + '%s selected for shard %s', potential_masters[0], shard_name) - start_time = time.time() while time.time() - start_time < timeout_s: for shard_name in sharding_utils.get_shard_names(num_shards): - shard_name = sandbox_utils.fix_shard_name(shard_name) master_tablet_id = master_tablets[shard_name] if is_master(master_tablet_id, namespace): logging.info('Tablet %s is the master of %s/%s.', @@ -55,8 +60,9 @@ def initial_reparent(keyspace, master_cell, num_shards, namespace, timeout_s): master_tablet_id], namespace=namespace, timeout_s=5) if len(successfully_reparented) == num_shards: logging.info('Done with initial reparent.') - return + return 0 logging.fatal('Timed out waiting for initial reparent.') + return 1 def main(): @@ -73,9 +79,9 @@ def main(): logging.getLogger().setLevel(logging.INFO) options, _ = parser.parse_args() - initial_reparent(options.keyspace, options.master_cell, - options.shard_count, options.namespace, - options.timeout) + sys.exit(initial_reparent(options.keyspace, options.master_cell, + options.shard_count, options.namespace, + options.timeout)) if __name__ == '__main__': diff --git a/test/cluster/sandbox/sandbox_utils.py b/test/cluster/sandbox/sandbox_utils.py index 040dc7cad4d..cfe28869fa5 100644 --- a/test/cluster/sandbox/sandbox_utils.py +++ b/test/cluster/sandbox/sandbox_utils.py @@ -5,26 +5,6 @@ import random -def fix_shard_name(shard_name): - """Kubernetes doesn't allow '-' in the beginning or end of attributes. - - Instead, replace them with an x. - - Example: -80 becomes x80, 80- becomes 80x. - - Args: - shard_name: string, A standard shard name (like -80). - - Returns: - A fixed shard name suitable for kubernetes (string). - """ - if shard_name.startswith('-'): - return 'x%s' % shard_name[1:] - if shard_name.endswith('-'): - return '%sx' % shard_name[:-1] - return shard_name - - def create_log_file(log_dir, filename): """Create a log file. diff --git a/test/cluster/sandbox/subprocess_component.py b/test/cluster/sandbox/subprocess_component.py index 2de950f454b..5f25325a675 100644 --- a/test/cluster/sandbox/subprocess_component.py +++ b/test/cluster/sandbox/subprocess_component.py @@ -28,13 +28,14 @@ def start(self): self.log_dir, '%s.INFO' % self.name) errorfile = sandbox_utils.create_log_file( self.log_dir, '%s.ERROR' % self.name) - subprocess.call(['./%s' % self.script] + script_args, stdout=infofile, - stderr=errorfile) + subprocess.check_call( + ['./%s' % self.script] + script_args, stdout=infofile, + stderr=errorfile) logging.info('Done.') except subprocess.CalledProcessError as error: raise sandbox.SandboxError( - 'Subprocess %s returned errorcode %d, result %s.' % ( - self.script, error.returncode, error.output)) + 'Subprocess %s returned errorcode %d, find log at %s.' % ( + self.script, error.returncode, errorfile.name)) finally: if infofile: infofile.close() diff --git a/test/cluster/sandbox/vitess_kubernetes_sandbox.py b/test/cluster/sandbox/vitess_kubernetes_sandbox.py index 3cd61f1da5c..502d81c13dd 100755 --- a/test/cluster/sandbox/vitess_kubernetes_sandbox.py +++ b/test/cluster/sandbox/vitess_kubernetes_sandbox.py @@ -11,7 +11,6 @@ from vttest import sharding_utils import sandbox -import sandbox_utils import sandlet import subprocess_component @@ -92,7 +91,6 @@ def _generate_helm_keyspaces(self): for shard_index, shard_name in enumerate( sharding_utils.get_shard_names(ks['shard_count'])): - shard_name = sandbox_utils.fix_shard_name(shard_name) shard = dict( name=shard_name, tablets=[dict( @@ -221,7 +219,7 @@ def generate_helm_sandlet(self): 'wait_for_mysql_%s' % name, self.name, 'wait_for_mysql.py', self.log_dir, namespace=self.name, cells=','.join(self.app_options.cells), - tablet_count=(shard_count * ( + tablet_count=(shard_count * len(self.app_options.cells) * ( keyspace['replica_count'] + keyspace['rdonly_count']))) wait_for_mysql_subprocess.dependencies = ['helm'] initial_reparent_subprocess = subprocess_component.Subprocess( diff --git a/test/cluster/sandbox/wait_for_mysql.py b/test/cluster/sandbox/wait_for_mysql.py index 041640c83d5..0099cfb0ddb 100755 --- a/test/cluster/sandbox/wait_for_mysql.py +++ b/test/cluster/sandbox/wait_for_mysql.py @@ -4,6 +4,7 @@ import logging import optparse import re +import sys import time import vtctl_sandbox @@ -30,6 +31,8 @@ def main(): parser.add_option('-c', '--cells', help='Comma separated list of cells') parser.add_option('-t', '--tablet_count', help='Total number of expected tablets', type=int) + parser.add_option('-w', '--wait', help='Max wait time (s)', type=int, + default=300) logging.getLogger().setLevel(logging.INFO) options, _ = parser.parse_args() @@ -42,17 +45,18 @@ def main(): # Do this in a loop as the output of ListAllTablets may not be parseable # until all tablets have been started. - while time.time() - start_time < 300 and len(tablets) < options.tablet_count: + while (time.time() - start_time < options.wait and + len(tablets) < options.tablet_count): tablets = get_all_tablets(options.cells, options.namespace) logging.info('Expecting %d tablets, found %d tablets', options.tablet_count, len(tablets)) start_time = time.time() - while time.time() - start_time < 300: + while time.time() - start_time < options.wait: for tablet in [t for t in tablets if t not in good_tablets]: _, success = vtctl_sandbox.execute_vtctl_command( ['ExecuteFetchAsDba', tablet, 'show databases'], - namespace=options.namespace) + namespace=options.namespace, timeout_s=1) if success: good_tablets.append(tablet) logging.info('%d of %d tablets healthy.', len(good_tablets), len(tablets)) @@ -62,6 +66,7 @@ def main(): break else: logging.warn('Timed out waiting for tablets to be ready.') + sys.exit(1) if __name__ == '__main__': From 1b10471743d4c86c9e8987ec7de31cc83144626d Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Mon, 6 Mar 2017 23:26:53 -0800 Subject: [PATCH 077/108] Exporting internal changes back to open-source. (#2627) NOTE: This is an automated export. Changes were already LGTM'd internally. BUG=35968999 --- go/vt/tabletserver/tabletenv/tabletenv.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/vt/tabletserver/tabletenv/tabletenv.go b/go/vt/tabletserver/tabletenv/tabletenv.go index 251dd87cf87..c399a78da9a 100644 --- a/go/vt/tabletserver/tabletenv/tabletenv.go +++ b/go/vt/tabletserver/tabletenv/tabletenv.go @@ -21,7 +21,7 @@ import ( var ( // MySQLStats shows the time histogram for operations spent on mysql side. - MySQLStats = stats.NewTimings("MySQL") + MySQLStats = stats.NewTimings("Mysql") // QueryStats shows the time histogram for each type of queries. QueryStats = stats.NewTimings("Queries") // QPSRates shows the qps of QueryStats. Sample every 5 seconds and keep samples for up to 15 mins. From 7c8fa8a1e83f3981eb4dbc13ea1eba1e3124ef2d Mon Sep 17 00:00:00 2001 From: Joshua Thompson Date: Tue, 7 Mar 2017 10:31:15 -0800 Subject: [PATCH 078/108] Replace logging.fatal with logging.error --- test/cluster/sandbox/initial_reparent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/cluster/sandbox/initial_reparent.py b/test/cluster/sandbox/initial_reparent.py index 66415e31a48..4899f468201 100755 --- a/test/cluster/sandbox/initial_reparent.py +++ b/test/cluster/sandbox/initial_reparent.py @@ -27,7 +27,7 @@ def initial_reparent(keyspace, master_cell, num_shards, namespace, timeout_s): logging.info('Finding tablets to reparent to.') while len(master_tablets) < num_shards: if time.time() - start_time > timeout_s: - logging.fatal('Timed out waiting to find a replica tablet') + logging.error('Timed out waiting to find a replica tablet') return 1 for shard_name in sharding_utils.get_shard_names(num_shards): if shard_name in master_tablets: @@ -61,7 +61,7 @@ def initial_reparent(keyspace, master_cell, num_shards, namespace, timeout_s): if len(successfully_reparented) == num_shards: logging.info('Done with initial reparent.') return 0 - logging.fatal('Timed out waiting for initial reparent.') + logging.error('Timed out waiting for initial reparent.') return 1 From 1bd0d3c55f74db8bbfcedab24e0ea7e3028e6112 Mon Sep 17 00:00:00 2001 From: Joshua Thompson Date: Tue, 7 Mar 2017 14:27:23 -0800 Subject: [PATCH 079/108] Address comments. --- helm/vitess/templates/_helpers.tpl | 13 ++++++++----- helm/vitess/templates/_vttablet.tpl | 8 ++++---- test/cluster/sandbox/initial_reparent.py | 2 ++ 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/helm/vitess/templates/_helpers.tpl b/helm/vitess/templates/_helpers.tpl index b4d28ebf8a1..49d7185a642 100644 --- a/helm/vitess/templates/_helpers.tpl +++ b/helm/vitess/templates/_helpers.tpl @@ -14,14 +14,17 @@ {{- range . }}{{template "format-flags" .}}{{end -}} {{- end -}} -# Format a shard name, making sure it starts and ends with [A-Za-z0-9]. -{{- define "format-shard-name" -}} +# Clean labels, making sure it starts and ends with [A-Za-z0-9]. +# This is especially important for shard names, which can start or end with +# '-' (like -80 or 80-), which would be an invalid kubernetes label. +{{- define "clean-label" -}} +{{- $replaced_label := . | replace "_" "-"}} {{- if hasPrefix "-" . -}} -x{{.}} +x{{$replaced_label}} {{- else if hasSuffix "-" . -}} -{{.}}x +{{$replaced_label}}x {{- else -}} -{{.}} +{{$replaced_label}} {{- end -}} {{- end -}} diff --git a/helm/vitess/templates/_vttablet.tpl b/helm/vitess/templates/_vttablet.tpl index 39d2dd4ef02..620ca956e8e 100644 --- a/helm/vitess/templates/_vttablet.tpl +++ b/helm/vitess/templates/_vttablet.tpl @@ -176,9 +176,9 @@ volumes: {{- $tablet := index . 4 -}} {{- with $tablet.vttablet -}} {{- $0 := $.Values.vttablet -}} -{{- $cellClean := $cell.name | replace "_" "-" -}} -{{- $keyspaceClean := $keyspace.name | replace "_" "-" -}} -{{- $shardClean := include "format-shard-name" $shard.name -}} +{{- $cellClean := include "clean-label" $cell.name -}} +{{- $keyspaceClean := include "clean-label" $keyspace.name -}} +{{- $shardClean := include "clean-label" $shard.name -}} {{- $setName := printf "%s-%s-%s-%s" $cellClean $keyspaceClean $shardClean $tablet.type | lower -}} {{- $uid := "$(cat $VTDATAROOT/init/tablet-uid)" }} # vttablet StatefulSet @@ -224,7 +224,7 @@ spec: {{- $cell := index . 1 -}} {{- $keyspace := index . 2 -}} {{- $shard := index . 3 -}} -{{- $shardClean := include "format-shard-name" $shard.name -}} +{{- $shardClean := include "clean-label" $shard.name -}} {{- $tablet := index . 4 -}} {{- $uid := index . 5 -}} {{- with $tablet.vttablet -}} diff --git a/test/cluster/sandbox/initial_reparent.py b/test/cluster/sandbox/initial_reparent.py index 4899f468201..bd049a9a328 100755 --- a/test/cluster/sandbox/initial_reparent.py +++ b/test/cluster/sandbox/initial_reparent.py @@ -29,6 +29,7 @@ def initial_reparent(keyspace, master_cell, num_shards, namespace, timeout_s): if time.time() - start_time > timeout_s: logging.error('Timed out waiting to find a replica tablet') return 1 + for shard_name in sharding_utils.get_shard_names(num_shards): if shard_name in master_tablets: continue @@ -61,6 +62,7 @@ def initial_reparent(keyspace, master_cell, num_shards, namespace, timeout_s): if len(successfully_reparented) == num_shards: logging.info('Done with initial reparent.') return 0 + logging.error('Timed out waiting for initial reparent.') return 1 From ba31fb506f673305d0e34940fe37347325a012fb Mon Sep 17 00:00:00 2001 From: Joshua Thompson Date: Tue, 7 Mar 2017 14:54:37 -0800 Subject: [PATCH 080/108] Fix vttablet UIDs generated for helm configs. --- test/cluster/sandbox/vitess_kubernetes_sandbox.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/test/cluster/sandbox/vitess_kubernetes_sandbox.py b/test/cluster/sandbox/vitess_kubernetes_sandbox.py index 502d81c13dd..3e2d3d571e6 100755 --- a/test/cluster/sandbox/vitess_kubernetes_sandbox.py +++ b/test/cluster/sandbox/vitess_kubernetes_sandbox.py @@ -185,12 +185,15 @@ def _generate_helm_values_config(self): keyspaces=copy.deepcopy(keyspaces), ) # Each tablet's UID must be unique, so increment the uidBase for tablets - # by the cell epsilon value to ensure uniqueness. This logic will go away - # once StatefulSet is available. + # by the cell epsilon value to ensure uniqueness. Also convert the UID to + # a string, or else the parser will attempt to parse UID as a float, which + # causes issues when UID's are large. This logic will go away once + # StatefulSet is available. for keyspace in cell_dict['keyspaces']: for shard in keyspace['shards']: for tablets in shard['tablets']: - tablets['uidBase'] += index * self.cell_epsilon + tablets['uidBase'] = str( + tablets['uidBase'] + index * self.cell_epsilon) yaml_values['topology']['cells'].append(cell_dict) if index == 0: From 635af5a3906e44d6165ab079c9761685b435880a Mon Sep 17 00:00:00 2001 From: Joshua Thompson Date: Tue, 7 Mar 2017 15:34:29 -0800 Subject: [PATCH 081/108] Only perform wait_for_mysql once, not per keyspace. --- .../sandbox/vitess_kubernetes_sandbox.py | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/test/cluster/sandbox/vitess_kubernetes_sandbox.py b/test/cluster/sandbox/vitess_kubernetes_sandbox.py index 3e2d3d571e6..3d19e6c9986 100755 --- a/test/cluster/sandbox/vitess_kubernetes_sandbox.py +++ b/test/cluster/sandbox/vitess_kubernetes_sandbox.py @@ -215,24 +215,31 @@ def generate_helm_sandlet(self): helm_sandlet = sandlet.Sandlet('helm') helm_sandlet.components.add_component(kubernetes_components.HelmComponent( 'helm', self.name, self._generate_helm_values_config())) + + # Add a subprocess task to wait for all mysql instances to be healthy. + tablet_count = 0 + for keyspace in self.app_options.keyspaces: + tablet_count += (keyspace['shard_count'] * len(self.app_options.cells) * ( + keyspace['replica_count'] + keyspace['rdonly_count'])) + wait_for_mysql_subprocess = subprocess_component.Subprocess( + 'wait_for_mysql', self.name, 'wait_for_mysql.py', + self.log_dir, namespace=self.name, + cells=','.join(self.app_options.cells), + tablet_count=tablet_count) + wait_for_mysql_subprocess.dependencies = ['helm'] + helm_sandlet.components.add_component(wait_for_mysql_subprocess) + + # Add a subprocess task for each keyspace to perform the initial reparent. for keyspace in self.app_options.keyspaces: name = keyspace['name'] shard_count = keyspace['shard_count'] - wait_for_mysql_subprocess = subprocess_component.Subprocess( - 'wait_for_mysql_%s' % name, self.name, 'wait_for_mysql.py', - self.log_dir, namespace=self.name, - cells=','.join(self.app_options.cells), - tablet_count=(shard_count * len(self.app_options.cells) * ( - keyspace['replica_count'] + keyspace['rdonly_count']))) - wait_for_mysql_subprocess.dependencies = ['helm'] initial_reparent_subprocess = subprocess_component.Subprocess( - 'initial_reparent_%s' % name, self.name, + 'initial_reparent_%s_%d' % (name, shard_count), self.name, 'initial_reparent.py', self.log_dir, namespace=self.name, keyspace=name, shard_count=shard_count, master_cell=self.app_options.cells[0]) initial_reparent_subprocess.dependencies = [ wait_for_mysql_subprocess.name] - helm_sandlet.components.add_component(wait_for_mysql_subprocess) helm_sandlet.components.add_component(initial_reparent_subprocess) self.sandlets.add_component(helm_sandlet) From 5ade30bf909e6da143f17595379416afd86f6d9b Mon Sep 17 00:00:00 2001 From: Joshua Thompson Date: Tue, 7 Mar 2017 16:25:11 -0800 Subject: [PATCH 082/108] Remove unused sandbox test. --- test/cluster/sandbox/sandbox_utils_test.py | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100644 test/cluster/sandbox/sandbox_utils_test.py diff --git a/test/cluster/sandbox/sandbox_utils_test.py b/test/cluster/sandbox/sandbox_utils_test.py deleted file mode 100644 index 734e83bf8f7..00000000000 --- a/test/cluster/sandbox/sandbox_utils_test.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Tests for sandbox_utils.""" - -import unittest - -import sandbox_utils - - -class SandboxUtilsTest(unittest.TestCase): - - def test_fix_shard_name(self): - self.assertEquals(sandbox_utils.fix_shard_name('-80'), 'x80') - self.assertEquals(sandbox_utils.fix_shard_name('80-'), '80x') - self.assertEquals(sandbox_utils.fix_shard_name('40-80'), '40-80') - - -if __name__ == '__main__': - unittest.main() From a9e9713c222affb189c768029dc2a1920d7a83bf Mon Sep 17 00:00:00 2001 From: Michael Berlin Date: Tue, 7 Mar 2017 21:12:20 -0800 Subject: [PATCH 083/108] Suppress output when checking for python2 binaries. The check did print output like this: /usr/bin/python2 /usr/bin/pip2 If you did source dev.env in your .bashrc, you would see it in every new shell. --- dev.env | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dev.env b/dev.env index 1fecf227df4..e7f5379e19a 100644 --- a/dev.env +++ b/dev.env @@ -36,11 +36,11 @@ export PYTHONPATH=$(prepend_path $PYTHONPATH $VTTOP/test) export PYTHONPATH=$(prepend_path $PYTHONPATH $VTTOP/test/cluster/sandbox) # Ensure bootstrap and install_grpc use python2 on systems which default to python3 -command -v python2 && PYTHON=python2 || PYTHON=python +command -v python2 >/dev/null && PYTHON=python2 || PYTHON=python export PYTHON -command -v pip2 && PIP=pip2 || PIP=pip +command -v pip2 >/dev/null && PIP=pip2 || PIP=pip export PIP -command -v virtualenv2 && VIRTUALENV=virtualenv2 || VIRTUALENV=virtualenv +command -v virtualenv2 >/dev/null && VIRTUALENV=virtualenv2 || VIRTUALENV=virtualenv export VIRTUALENV selenium_dist=$VTROOT/dist/selenium From 1f82f9836641f8699d34e8440ee61779e2aadde8 Mon Sep 17 00:00:00 2001 From: thompsonja Date: Fri, 10 Mar 2017 10:45:37 -0800 Subject: [PATCH 084/108] Add charset/snapshot_file to vttest. (#2611) This helps for testing efforts that require restoring from a snapshot. Also, setting charset is already allowed via start_vt_processes, so this PR just exposes it through run_local_database. --- .../com/youtube/vitess/client/TestUtil.java | 6 +++--- py/vttest/local_database.py | 21 +++++++++++++------ py/vttest/mysql_db.py | 3 ++- py/vttest/mysql_db_mysqlctl.py | 5 +++-- py/vttest/run_local_database.py | 14 ++++++++----- 5 files changed, 32 insertions(+), 17 deletions(-) diff --git a/java/client/src/test/java/com/youtube/vitess/client/TestUtil.java b/java/client/src/test/java/com/youtube/vitess/client/TestUtil.java index 60e9db440b6..e292173f4a4 100644 --- a/java/client/src/test/java/com/youtube/vitess/client/TestUtil.java +++ b/java/client/src/test/java/com/youtube/vitess/client/TestUtil.java @@ -47,10 +47,10 @@ public static void setupTestEnv(TestEnv testEnv) throws Exception { continue; } try { - Type mapType = new TypeToken>() {}.getType(); - Map map = new Gson().fromJson(line, mapType); + Type mapType = new TypeToken>() {}.getType(); + Map map = new Gson().fromJson(line, mapType); testEnv.setPythonScriptProcess(p); - testEnv.setPort(map.get(System.getProperty(PROPERTY_KEY_CLIENT_TEST_PORT))); + testEnv.setPort(((Double)map.get(System.getProperty(PROPERTY_KEY_CLIENT_TEST_PORT))).intValue()); return; } catch (JsonSyntaxException e) { logger.error("JsonSyntaxException parsing setup command output: " + line, e); diff --git a/py/vttest/local_database.py b/py/vttest/local_database.py index 882c5e5ea2a..62c9236200d 100644 --- a/py/vttest/local_database.py +++ b/py/vttest/local_database.py @@ -21,7 +21,9 @@ def __init__(self, web_dir=None, default_schema_dir=None, extra_my_cnf=None, - web_dir2=None): + web_dir2=None, + snapshot_file=None, + charset='utf8'): """Initializes an object of this class. Args: @@ -41,6 +43,8 @@ def __init__(self, extra_my_cnf: additional cnf file to use for the EXTRA_MY_CNF var. web_dir2: see the documentation for the corresponding command line flag in run_local_database.py + snapshot_file: A MySQL DB snapshot file. + charset: MySQL charset. """ self.topology = topology @@ -51,17 +55,20 @@ def __init__(self, self.default_schema_dir = default_schema_dir self.extra_my_cnf = extra_my_cnf self.web_dir2 = web_dir2 + self.snapshot_file = snapshot_file + self.charset = charset def setup(self): """Create a MySQL instance and all Vitess processes.""" mysql_port = environment.get_port('mysql') self.directory = environment.get_test_directory() self.mysql_db = environment.mysql_db_class( - self.directory, mysql_port, self.extra_my_cnf) + self.directory, mysql_port, self.extra_my_cnf, self.snapshot_file) self.mysql_db.setup() - self.create_databases() - self.load_schema() + if not self.snapshot_file: + self.create_databases() + self.load_schema() if self.init_data_options is not None: self.rng = random.Random(self.init_data_options.rng_seed) self.populate_with_random_data() @@ -70,7 +77,7 @@ def setup(self): vt_processes.start_vt_processes(self.directory, self.topology, self.mysql_db, self.schema_dir, - web_dir=self.web_dir, + charset=self.charset, web_dir=self.web_dir, web_dir2=self.web_dir2) def teardown(self): @@ -105,7 +112,9 @@ def config(self): result = { 'port': vt_processes.vtcombo_process.port, - } + 'socket': self.mysql_db.unix_socket(), + } + if environment.get_protocol() == 'grpc': result['grpc_port'] = vt_processes.vtcombo_process.grpc_port return result diff --git a/py/vttest/mysql_db.py b/py/vttest/mysql_db.py index e830768ce55..57aa26bac72 100644 --- a/py/vttest/mysql_db.py +++ b/py/vttest/mysql_db.py @@ -9,10 +9,11 @@ class MySqlDB(object): """A MySqlDB contains basic info about a MySQL instance.""" - def __init__(self, directory, port, extra_my_cnf=None): + def __init__(self, directory, port, extra_my_cnf=None, snapshot_file=None): self._directory = directory self._port = port self._extra_my_cnf = extra_my_cnf + self._snapshot_file = snapshot_file def setup(self, port): """Starts the MySQL database.""" diff --git a/py/vttest/mysql_db_mysqlctl.py b/py/vttest/mysql_db_mysqlctl.py index 379511dbc95..8d15386705d 100644 --- a/py/vttest/mysql_db_mysqlctl.py +++ b/py/vttest/mysql_db_mysqlctl.py @@ -18,8 +18,9 @@ class MySqlDBMysqlctl(mysql_db.MySqlDB): """Contains data and methods to manage a MySQL instance using mysqlctl.""" - def __init__(self, directory, port, extra_my_cnf): - super(MySqlDBMysqlctl, self).__init__(directory, port, extra_my_cnf) + def __init__(self, directory, port, extra_my_cnf, snapshot_file=None): + super(MySqlDBMysqlctl, self).__init__( + directory, port, extra_my_cnf, snapshot_file) def setup(self): cmd = [ diff --git a/py/vttest/run_local_database.py b/py/vttest/run_local_database.py index a3901bd76d0..aa4a91a1cf2 100755 --- a/py/vttest/run_local_database.py +++ b/py/vttest/run_local_database.py @@ -34,18 +34,17 @@ import os import sys -from vtdb import prefer_vtroot_imports # pylint: disable=unused-import from google.protobuf import text_format +from vtproto import vttest_pb2 +from vtdb import prefer_vtroot_imports # pylint: disable=unused-import from vttest import environment +from vttest import init_data_options from vttest import local_database from vttest import mysql_flavor -from vttest import init_data_options from vttest import sharding_utils -from vtproto import vttest_pb2 - def main(cmdline_options): topology = vttest_pb2.VTTestTopology() @@ -96,7 +95,9 @@ def main(cmdline_options): web_dir=cmdline_options.web_dir, web_dir2=cmdline_options.web_dir2, default_schema_dir=cmdline_options.default_schema_dir, - extra_my_cnf=extra_my_cnf) as local_db: + extra_my_cnf=extra_my_cnf, + charset=cmdline_options.charset, + snapshot_file=cmdline_options.snapshot_file) as local_db: print json.dumps(local_db.config()) sys.stdout.flush() try: @@ -186,6 +187,9 @@ def main(cmdline_options): help='Replica tablets per shard (includes master)') parser.add_option('--rdonly_count', type='int', default=1, help='Rdonly tablets per shard') + parser.add_option('--charset', default='utf8', help='MySQL charset') + parser.add_option( + '--snapshot_file', default=None, help='A MySQL DB snapshot file') (options, args) = parser.parse_args() if options.verbose: logging.getLogger().setLevel(logging.DEBUG) From 64eaa20543a51a12a3eba16f4b5499b8418b5d50 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Tue, 28 Feb 2017 16:13:21 -0800 Subject: [PATCH 085/108] Plumbing in the schema engine for binlog server. --- go/vt/binlog/binlog_streamer.go | 7 ++-- go/vt/binlog/binlog_streamer_rbr_test.go | 2 +- go/vt/binlog/binlog_streamer_test.go | 42 +++++++++++------------ go/vt/binlog/event_streamer.go | 5 +-- go/vt/binlog/updatestreamctl.go | 6 ++-- go/vt/tabletserver/replication_watcher.go | 2 +- go/vt/tabletserver/tabletserver.go | 9 +++-- 7 files changed, 41 insertions(+), 32 deletions(-) diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index 8c9d19758bb..98a2243dad3 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -16,6 +16,7 @@ import ( "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/vt/mysqlctl" + "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" querypb "github.com/youtube/vitess/go/vt/proto/query" @@ -65,9 +66,10 @@ func getStatementCategory(sql string) binlogdatapb.BinlogTransaction_Statement_C // A Streamer should only be used once. To start another stream, call // NewStreamer() again. type Streamer struct { - // dbname and mysqld are set at creation and immutable. + // The following fields at set at creation and immutable. dbname string mysqld mysqlctl.MysqlDaemon + se *schema.Engine clientCharset *binlogdatapb.Charset startPos replication.Position @@ -86,10 +88,11 @@ type Streamer struct { // startPos is the position to start streaming at. Incompatible with timestamp. // timestamp is the timestamp to start streaming at. Incompatible with startPos. // sendTransaction is called each time a transaction is committed or rolled back. -func NewStreamer(dbname string, mysqld mysqlctl.MysqlDaemon, clientCharset *binlogdatapb.Charset, startPos replication.Position, timestamp int64, sendTransaction sendTransactionFunc) *Streamer { +func NewStreamer(dbname string, mysqld mysqlctl.MysqlDaemon, se *schema.Engine, clientCharset *binlogdatapb.Charset, startPos replication.Position, timestamp int64, sendTransaction sendTransactionFunc) *Streamer { return &Streamer{ dbname: dbname, mysqld: mysqld, + se: se, clientCharset: clientCharset, startPos: startPos, timestamp: timestamp, diff --git a/go/vt/binlog/binlog_streamer_rbr_test.go b/go/vt/binlog/binlog_streamer_rbr_test.go index f810fa610c3..b348ae55d98 100644 --- a/go/vt/binlog/binlog_streamer_rbr_test.go +++ b/go/vt/binlog/binlog_streamer_rbr_test.go @@ -100,7 +100,7 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { got = append(got, *trans) return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events) diff --git a/go/vt/binlog/binlog_streamer_test.go b/go/vt/binlog/binlog_streamer_test.go index 1b7dedcfe5a..4b1d5715b7e 100644 --- a/go/vt/binlog/binlog_streamer_test.go +++ b/go/vt/binlog/binlog_streamer_test.go @@ -70,7 +70,7 @@ func TestStreamerParseEventsXID(t *testing.T) { got = append(got, *trans) return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events) @@ -128,7 +128,7 @@ func TestStreamerParseEventsCommit(t *testing.T) { got = append(got, *trans) return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events) @@ -147,7 +147,7 @@ func TestStreamerStop(t *testing.T) { sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) // Start parseEvents(), but don't send it anything, so it just waits. ctx, cancel := context.WithCancel(context.Background()) @@ -192,7 +192,7 @@ func TestStreamerParseEventsClientEOF(t *testing.T) { sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { return io.EOF } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events) @@ -210,7 +210,7 @@ func TestStreamerParseEventsServerEOF(t *testing.T) { sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) _, err := bls.parseEvents(context.Background(), events) if err != want { @@ -240,7 +240,7 @@ func TestStreamerParseEventsSendErrorXID(t *testing.T) { sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { return fmt.Errorf("foobar") } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) @@ -278,7 +278,7 @@ func TestStreamerParseEventsSendErrorCommit(t *testing.T) { sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { return fmt.Errorf("foobar") } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events) @@ -311,7 +311,7 @@ func TestStreamerParseEventsInvalid(t *testing.T) { sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events) @@ -346,7 +346,7 @@ func TestStreamerParseEventsInvalidFormat(t *testing.T) { sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events) @@ -381,7 +381,7 @@ func TestStreamerParseEventsNoFormat(t *testing.T) { sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events) @@ -414,7 +414,7 @@ func TestStreamerParseEventsInvalidQuery(t *testing.T) { sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events) @@ -495,7 +495,7 @@ func TestStreamerParseEventsRollback(t *testing.T) { got = append(got, *trans) return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { @@ -560,7 +560,7 @@ func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) { got = append(got, *trans) return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { @@ -628,7 +628,7 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) { got = append(got, *trans) return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { @@ -685,7 +685,7 @@ func TestStreamerParseEventsSetInsertID(t *testing.T) { got = append(got, *trans) return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { @@ -720,7 +720,7 @@ func TestStreamerParseEventsInvalidIntVar(t *testing.T) { sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events) @@ -779,7 +779,7 @@ func TestStreamerParseEventsOtherDB(t *testing.T) { got = append(got, *trans) return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { @@ -837,7 +837,7 @@ func TestStreamerParseEventsOtherDBBegin(t *testing.T) { got = append(got, *trans) return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { @@ -872,7 +872,7 @@ func TestStreamerParseEventsBeginAgain(t *testing.T) { sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) before := binlogStreamerErrors.Counts()["ParseEvents"] go sendTestEvents(events, input) @@ -937,7 +937,7 @@ func TestStreamerParseEventsMariadbBeginGTID(t *testing.T) { got = append(got, *trans) return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { @@ -992,7 +992,7 @@ func TestStreamerParseEventsMariadbStandaloneGTID(t *testing.T) { got = append(got, *trans) return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { diff --git a/go/vt/binlog/event_streamer.go b/go/vt/binlog/event_streamer.go index c16b643dc77..e8313855abf 100644 --- a/go/vt/binlog/event_streamer.go +++ b/go/vt/binlog/event_streamer.go @@ -17,6 +17,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/sqlparser" + "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" querypb "github.com/youtube/vitess/go/vt/proto/query" @@ -39,11 +40,11 @@ type EventStreamer struct { } // NewEventStreamer returns a new EventStreamer on top of a Streamer -func NewEventStreamer(dbname string, mysqld mysqlctl.MysqlDaemon, startPos replication.Position, timestamp int64, sendEvent sendEventFunc) *EventStreamer { +func NewEventStreamer(dbname string, mysqld mysqlctl.MysqlDaemon, se *schema.Engine, startPos replication.Position, timestamp int64, sendEvent sendEventFunc) *EventStreamer { evs := &EventStreamer{ sendEvent: sendEvent, } - evs.bls = NewStreamer(dbname, mysqld, nil, startPos, timestamp, evs.transactionToEvent) + evs.bls = NewStreamer(dbname, mysqld, se, nil, startPos, timestamp, evs.transactionToEvent) return evs } diff --git a/go/vt/binlog/updatestreamctl.go b/go/vt/binlog/updatestreamctl.go index fcd44185b77..77dc76a1dfc 100644 --- a/go/vt/binlog/updatestreamctl.go +++ b/go/vt/binlog/updatestreamctl.go @@ -16,6 +16,7 @@ import ( "github.com/youtube/vitess/go/sync2" "github.com/youtube/vitess/go/tb" "github.com/youtube/vitess/go/vt/mysqlctl" + "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -95,6 +96,7 @@ type UpdateStreamImpl struct { mysqld mysqlctl.MysqlDaemon dbname string + se *schema.Engine // actionLock protects the following variables actionLock sync.Mutex @@ -242,7 +244,7 @@ func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, positi keyrangeTransactions.Add(1) return callback(reply) }) - bls := NewStreamer(updateStream.dbname, updateStream.mysqld, charset, pos, 0, f) + bls := NewStreamer(updateStream.dbname, updateStream.mysqld, updateStream.se, charset, pos, 0, f) streamCtx, cancel := context.WithCancel(ctx) i := updateStream.streams.Add(cancel) @@ -278,7 +280,7 @@ func (updateStream *UpdateStreamImpl) StreamTables(ctx context.Context, position tablesTransactions.Add(1) return callback(reply) }) - bls := NewStreamer(updateStream.dbname, updateStream.mysqld, charset, pos, 0, f) + bls := NewStreamer(updateStream.dbname, updateStream.mysqld, updateStream.se, charset, pos, 0, f) streamCtx, cancel := context.WithCancel(ctx) i := updateStream.streams.Add(cancel) diff --git a/go/vt/tabletserver/replication_watcher.go b/go/vt/tabletserver/replication_watcher.go index 0a4709b412e..f3bb7b1567a 100644 --- a/go/vt/tabletserver/replication_watcher.go +++ b/go/vt/tabletserver/replication_watcher.go @@ -92,7 +92,7 @@ func (rpw *ReplicationWatcher) Process(ctx context.Context, dbconfigs dbconfigs. defer rpw.wg.Done() for { log.Infof("Starting a binlog Streamer from current replication position to monitor binlogs") - streamer := binlog.NewStreamer(dbconfigs.App.DbName, mysqld, nil /*clientCharset*/, replication.Position{}, 0 /*timestamp*/, func(trans *binlogdatapb.BinlogTransaction) error { + streamer := binlog.NewStreamer(dbconfigs.App.DbName, mysqld, rpw.se, nil /*clientCharset*/, replication.Position{}, 0 /*timestamp*/, func(trans *binlogdatapb.BinlogTransaction) error { // Save the event token. rpw.mu.Lock() rpw.eventToken = trans.EventToken diff --git a/go/vt/tabletserver/tabletserver.go b/go/vt/tabletserver/tabletserver.go index 5d6c8eaf229..8ac9b338201 100644 --- a/go/vt/tabletserver/tabletserver.go +++ b/go/vt/tabletserver/tabletserver.go @@ -164,8 +164,8 @@ func NewTabletServerWithNilTopoServer(config tabletenv.TabletConfig) *TabletServ return NewTabletServer(config, topo.Server{}) } -// NewTabletServer creates an instance of TabletServer. Only one instance -// of TabletServer can be created per process. +// NewTabletServer creates an instance of TabletServer. Only the first +// instance of TabletServer will expose its state variables. func NewTabletServer(config tabletenv.TabletConfig, topoServer topo.Server) *TabletServer { tsv := &TabletServer{ QueryTimeout: sync2.NewAtomicDuration(time.Duration(config.QueryTimeout * 1e9)), @@ -183,6 +183,9 @@ func NewTabletServer(config tabletenv.TabletConfig, topoServer topo.Server) *Tab tsv.messager = NewMessagerEngine(tsv, config) tsv.watcher = NewReplicationWatcher(tsv.se, config) tsv.updateStreamList = &binlog.StreamList{} + // FIXME(alainjobart) could we move this to the Register method below? + // So that vtcombo doesn't even call it once, on the first tablet. + // And we can remove the tsOnce variable. tsOnce.Do(func() { stats.Publish("TabletState", stats.IntFunc(func() int64 { tsv.mu.Lock() @@ -1484,7 +1487,7 @@ func (tsv *TabletServer) UpdateStream(ctx context.Context, target *querypb.Targe } defer tsv.endRequest(false) - s := binlog.NewEventStreamer(tsv.dbconfigs.App.DbName, tsv.mysqld, p, timestamp, callback) + s := binlog.NewEventStreamer(tsv.dbconfigs.App.DbName, tsv.mysqld, tsv.se, p, timestamp, callback) // Create a cancelable wrapping context. streamCtx, streamCancel := context.WithCancel(ctx) From 49ff3a678dd98c89d87ffdb82a58632bc6148666 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 1 Mar 2017 07:20:46 -0800 Subject: [PATCH 086/108] Better support for more RBR types. * Making RBR TableMap simpler to parse. * Fixing RBR support for integer types. * Adding metadata parsing. Supporting Varchar. The metadata field is needs for varchars, as it contains the maximum length of the string, which drives using one or two bytes for actual length. * Adding support for date, time, datetime. * Adding Bit and xxx2 time types. --- go/mysqlconn/replication/binlog_event.go | 32 +- .../replication/binlog_event_common.go | 415 ++++++++++++++++-- .../replication/binlog_event_common_test.go | 298 +++++++++++++ go/mysqlconn/replication/binlog_event_make.go | 31 +- .../replication/binlog_event_make_test.go | 55 ++- go/mysqlconn/replication/constants.go | 12 + go/mysqlconn/replication_test.go | 14 +- go/vt/binlog/binlog_streamer.go | 379 +++++++++++++++- go/vt/binlog/binlog_streamer_rbr_test.go | 321 +++++++++++++- .../engines/schema/schema_engine.go | 20 + 10 files changed, 1455 insertions(+), 122 deletions(-) diff --git a/go/mysqlconn/replication/binlog_event.go b/go/mysqlconn/replication/binlog_event.go index 82d740bfc39..af89121a7cf 100644 --- a/go/mysqlconn/replication/binlog_event.go +++ b/go/mysqlconn/replication/binlog_event.go @@ -154,16 +154,28 @@ func (q Query) String() string { // TableMap contains data from a TABLE_MAP_EVENT. type TableMap struct { - Flags uint16 + // Flags is the table's flags. + Flags uint16 + + // Database is the database name. Database string - Name string - Columns []TableMapColumn -} -// TableMapColumn describes a table column inside a TABLE_MAP_EVENT. -type TableMapColumn struct { - Type byte - CanBeNull bool + // Name is the name of the table. + Name string + + // Types is an array of MySQL types for the fields. + Types []byte + + // CanBeNull's bits are set if the column can be NULL. + CanBeNull Bitmap + + // Metadata is an array of uint16, one per column. + // It contains a few extra information about each column, + // that is dependent on the type. + // - If the metadata is not present, this is zero. + // - If the metadata is one byte, only the lower 8 bits are used. + // - If the metadata is two bytes, all 16 bits are used. + Metadata []uint16 } // Rows contains data from a {WRITE,UPDATE,DELETE}_ROWS_EVENT. @@ -179,10 +191,10 @@ type Rows struct { // DataColumns describes which columns are included. It is // a bitmap indexed by the TableMap list of columns. - // Set for WRITE and UPDATE + // Set for WRITE and UPDATE. DataColumns Bitmap - // Rows is an array of UpdateRow in the event. + // Rows is an array of Row in the event. Rows []Row } diff --git a/go/mysqlconn/replication/binlog_event_common.go b/go/mysqlconn/replication/binlog_event_common.go index c420276cca0..9ed3b7a11bc 100644 --- a/go/mysqlconn/replication/binlog_event_common.go +++ b/go/mysqlconn/replication/binlog_event_common.go @@ -348,77 +348,392 @@ func (ev binlogEvent) TableMap(f BinlogFormat) (*TableMap, error) { // FIXME(alainjobart) this is varlength encoded. columnCount := int(data[pos]) + pos++ - result.Columns = make([]TableMapColumn, columnCount) - for i := 0; i < columnCount; i++ { - result.Columns[i].Type = data[pos+1+i] - } - pos += 1 + columnCount + result.Types = data[pos : pos+columnCount] + pos += columnCount // FIXME(alainjobart) this is a var-len-string. - // These are type-specific meta-data per field. Not sure what's in - // there. l = int(data[pos]) - pos += 1 + l + pos++ - // A bit array that says if each colum can be NULL. - nullBitmap, _ := newBitmap(data, pos, columnCount) - for i := 0; i < columnCount; i++ { - result.Columns[i].CanBeNull = nullBitmap.Bit(i) + // Allocate and parse / copy Metadata. + result.Metadata = make([]uint16, columnCount) + expectedEnd := pos + l + for c := 0; c < columnCount; c++ { + var err error + result.Metadata[c], pos, err = metadataRead(data, pos, result.Types[c]) + if err != nil { + return nil, err + } + } + if pos != expectedEnd { + return nil, fmt.Errorf("unexpected metadata end: got %v was expecting %v (data=%v)", pos, expectedEnd, data) } + // A bit array that says if each colum can be NULL. + result.CanBeNull, _ = newBitmap(data, pos, columnCount) + return result, nil } -// cellLength returns the new position after the field with the given type is read. -func cellLength(data []byte, pos int, tmc *TableMapColumn) (int, error) { - switch tmc.Type { - case TypeTiny: +// metadataLength returns how many bytes are used for metadata, based on a type. +func metadataLength(typ byte) int { + switch typ { + case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate, TypeVarString: + // No data here. + return 0 + + case TypeFloat, TypeDouble, TypeTimestamp2, TypeDateTime2, TypeTime2, TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: + // One byte. + return 1 + + case TypeNewDecimal, TypeEnum, TypeSet, TypeString: + // Two bytes, Big Endian because of crazy encoding. + return 2 + + case TypeVarchar, TypeBit: + // Two bytes, Little Endian + return 2 + + default: + // Unknown type. This is used in tests only, so panic. + panic(fmt.Errorf("metadataLength: unhandled data type: %v", typ)) + } +} + +// metadataTotalLength returns the total size of the metadata for an +// array of types. +func metadataTotalLength(types []byte) int { + sum := 0 + for _, t := range types { + sum += metadataLength(t) + } + return sum +} + +// metadataRead reads a single value from the metadata string. +func metadataRead(data []byte, pos int, typ byte) (uint16, int, error) { + switch typ { + + case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate, TypeVarString: + // No data here. + return 0, pos, nil + + case TypeFloat, TypeDouble, TypeTimestamp2, TypeDateTime2, TypeTime2, TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: + // One byte. + return uint16(data[pos]), pos + 1, nil + + case TypeNewDecimal, TypeEnum, TypeSet, TypeString: + // Two bytes, Big Endian because of crazy encoding. + return uint16(data[pos])<<8 + uint16(data[pos+1]), pos + 2, nil + + case TypeVarchar, TypeBit: + // Two bytes, Little Endian + return uint16(data[pos]) + uint16(data[pos+1])<<8, pos + 2, nil + + default: + // Unknown types, we can't go on. + return 0, 0, fmt.Errorf("metadataRead: unhandled data type: %v", typ) + } +} + +// metadataWrite writes a single value into the metadata string. +func metadataWrite(data []byte, pos int, typ byte, value uint16) int { + switch typ { + + case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate, TypeVarString: + // No data here. + return pos + + case TypeFloat, TypeDouble, TypeTimestamp2, TypeDateTime2, TypeTime2, TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: + // One byte. + data[pos] = byte(value) + return pos + 1 + + case TypeNewDecimal, TypeEnum, TypeSet, TypeString: + // Two bytes, Big Endian because of crazy encoding. + data[pos] = byte(value >> 8) + data[pos+1] = byte(value) + return pos + 2 + + case TypeVarchar, TypeBit: + // Two bytes, Little Endian + data[pos] = byte(value) + data[pos+1] = byte(value >> 8) + return pos + 2 + + default: + // Unknown type. This is used in tests only, so panic. + panic(fmt.Errorf("metadataRead: unhandled data type: %v", typ)) + } +} + +// cellLength returns the new position after the field with the given +// type is read. +func cellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { + switch typ { + case TypeNull: + return 0, nil + case TypeTiny, TypeYear: return 1, nil - case TypeShort, TypeYear: + case TypeShort: return 2, nil - case TypeLong, TypeInt24: + case TypeInt24: + return 3, nil + case TypeLong, TypeTimestamp: return 4, nil case TypeLongLong: return 8, nil - case TypeTimestamp, TypeDate, TypeTime, TypeDateTime: - // first byte has the length. - l := int(data[pos]) - return 1 + l, nil + case TypeDate, TypeNewDate: + return 3, nil + case TypeTime: + return 4, nil + case TypeDateTime: + return 8, nil case TypeVarchar: - // Length is encoded in 2 bytes. - l := int(uint64(data[pos]) | - uint64(data[pos+1])<<8) - return 2 + l, nil + // Length is encoded in 1 or 2 bytes. + if metadata > 255 { + l := int(uint64(data[pos]) | + uint64(data[pos+1])<<8) + return l + 2, nil + } + l := int(data[pos]) + return l + 1, nil + case TypeBit: + // bitmap length is in metadata, as: + // upper 8 bits: bytes length + // lower 8 bits: bit length + nbits := ((metadata >> 8) * 8) + (metadata & 0xFF) + return (int(nbits) + 7) / 8, nil + case TypeTimestamp2: + // metadata has number of decimals. One byte encodes + // two decimals. + return 4 + (int(metadata)+1)/2, nil + case TypeDateTime2: + // metadata has number of decimals. One byte encodes + // two decimals. + return 5 + (int(metadata)+1)/2, nil + case TypeTime2: + // metadata has number of decimals. One byte encodes + // two decimals. + return 3 + (int(metadata)+1)/2, nil + default: - return 0, fmt.Errorf("Unsupported type %v (data: %v pos: %v)", tmc.Type, data, pos) + return 0, fmt.Errorf("Unsupported type %v (data: %v pos: %v)", typ, data, pos) } } -// FIXME(alainjobart) are the ints signed? It seems Tiny is unsigned, -// but the others are. -func cellData(data []byte, pos int, tmc *TableMapColumn) (string, int, error) { - switch tmc.Type { +// cellData returns the data for a cell as a string. This is meant to +// be used in tests only, as it is missing the type flags to interpret +// the data correctly. +func cellData(data []byte, pos int, typ byte, metadata uint16) (string, int, error) { + switch typ { case TypeTiny: return fmt.Sprintf("%v", data[pos]), 1, nil - case TypeShort, TypeYear: + case TypeYear: + return fmt.Sprintf("%v", 1900+int(data[pos])), 1, nil + case TypeShort: val := binary.LittleEndian.Uint16(data[pos : pos+2]) return fmt.Sprintf("%v", val), 2, nil - case TypeLong, TypeInt24: + case TypeInt24: + val := uint32(data[pos]) + + uint32(data[pos+1])<<8 + + uint32(data[pos+2])<<16 + return fmt.Sprintf("%v", val), 3, nil + case TypeLong, TypeTimestamp: val := binary.LittleEndian.Uint32(data[pos : pos+4]) return fmt.Sprintf("%v", val), 4, nil case TypeLongLong: val := binary.LittleEndian.Uint64(data[pos : pos+8]) return fmt.Sprintf("%v", val), 8, nil - case TypeTimestamp, TypeDate, TypeTime, TypeDateTime: - panic(fmt.Errorf("Not yet implemented type %v", tmc.Type)) + case TypeDate, TypeNewDate: + val := uint32(data[pos]) + + uint32(data[pos+1])<<8 + + uint32(data[pos+2])<<16 + day := val & 31 + month := val >> 5 & 15 + year := val >> 9 + return fmt.Sprintf("%04d-%02d-%02d", year, month, day), 3, nil + case TypeTime: + val := binary.LittleEndian.Uint32(data[pos : pos+4]) + hour := val / 10000 + minute := (val % 10000) / 100 + second := val % 100 + return fmt.Sprintf("%02d:%02d:%02d", hour, minute, second), 4, nil + case TypeDateTime: + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + d := val / 1000000 + t := val % 1000000 + year := d / 10000 + month := (d % 10000) / 100 + day := d % 100 + hour := t / 10000 + minute := (t % 10000) / 100 + second := t % 100 + return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second), 8, nil case TypeVarchar: - // Varchar length is two bytes here. - l := int(uint64(data[pos]) | - uint64(data[pos+1])<<8) - return string(data[pos+2 : pos+2+l]), 2 + l, nil + // Length is encoded in 1 or 2 bytes. + if metadata > 255 { + l := int(uint64(data[pos]) | + uint64(data[pos+1])<<8) + return string(data[pos+2 : pos+2+l]), l + 2, nil + } + l := int(data[pos]) + return string(data[pos+1 : pos+1+l]), l + 1, nil + case TypeBit: + nbits := ((metadata >> 8) * 8) + (metadata & 0xFF) + l := (int(nbits) + 7) / 8 + var buf bytes.Buffer + for i := 0; i < l; i++ { + buf.WriteString(fmt.Sprintf("%08b", data[pos+i])) + } + return buf.String(), l, nil + case TypeTimestamp2: + second := binary.LittleEndian.Uint32(data[pos : pos+4]) + switch metadata { + case 1: + decimals := int(data[pos+4]) + return fmt.Sprintf("%v.%01d", second, decimals), 5, nil + case 2: + decimals := int(data[pos+4]) + return fmt.Sprintf("%v.%02d", second, decimals), 5, nil + case 3: + decimals := int(data[pos+4]) + + int(data[pos+5])<<8 + return fmt.Sprintf("%v.%03d", second, decimals), 6, nil + case 4: + decimals := int(data[pos+4]) + + int(data[pos+5])<<8 + return fmt.Sprintf("%v.%04d", second, decimals), 6, nil + case 5: + decimals := int(data[pos+4]) + + int(data[pos+5])<<8 + + int(data[pos+6])<<16 + return fmt.Sprintf("%v.%05d", second, decimals), 7, nil + case 6: + decimals := int(data[pos+4]) + + int(data[pos+5])<<8 + + int(data[pos+6])<<16 + return fmt.Sprintf("%v.%.6d", second, decimals), 7, nil + } + return fmt.Sprintf("%v", second), 4, nil + case TypeDateTime2: + ymdhms := (uint64(data[pos]) | + uint64(data[pos+1])<<8 | + uint64(data[pos+2])<<16 | + uint64(data[pos+3])<<24 | + uint64(data[pos+4])<<32) - uint64(0x8000000000) + ymd := ymdhms >> 17 + ym := ymd >> 5 + hms := ymdhms % (1 << 17) + + day := ymd % (1 << 5) + month := ym % 13 + year := ym / 13 + + second := hms % (1 << 6) + minute := (hms >> 6) % (1 << 6) + hour := hms >> 12 + + datetime := fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second) + + switch metadata { + case 1: + decimals := int(data[pos+5]) + return fmt.Sprintf("%v.%01d", datetime, decimals), 6, nil + case 2: + decimals := int(data[pos+5]) + return fmt.Sprintf("%v.%02d", datetime, decimals), 6, nil + case 3: + decimals := int(data[pos+5]) + + int(data[pos+6])<<8 + return fmt.Sprintf("%v.%03d", datetime, decimals), 7, nil + case 4: + decimals := int(data[pos+5]) + + int(data[pos+6])<<8 + return fmt.Sprintf("%v.%04d", datetime, decimals), 7, nil + case 5: + decimals := int(data[pos+5]) + + int(data[pos+6])<<8 + + int(data[pos+7])<<16 + return fmt.Sprintf("%v.%05d", datetime, decimals), 8, nil + case 6: + decimals := int(data[pos+5]) + + int(data[pos+6])<<8 + + int(data[pos+7])<<16 + return fmt.Sprintf("%v.%.6d", datetime, decimals), 8, nil + } + return datetime, 5, nil + case TypeTime2: + hms := (int64(data[pos]) | + int64(data[pos+1])<<8 | + int64(data[pos+2])<<16) - 0x800000 + sign := "" + if hms < 0 { + hms = -hms + sign = "-" + } + + fracStr := "" + switch metadata { + case 1: + frac := int(data[pos+3]) + if sign == "-" && frac != 0 { + hms-- + frac = 0x100 - frac + } + fracStr = fmt.Sprintf(".%.1d", frac/10) + case 2: + frac := int(data[pos+3]) + if sign == "-" && frac != 0 { + hms-- + frac = 0x100 - frac + } + fracStr = fmt.Sprintf(".%.2d", frac) + case 3: + frac := int(data[pos+3]) | + int(data[pos+4])<<8 + if sign == "-" && frac != 0 { + hms-- + frac = 0x10000 - frac + } + fracStr = fmt.Sprintf(".%.3d", frac/10) + case 4: + frac := int(data[pos+3]) | + int(data[pos+4])<<8 + if sign == "-" && frac != 0 { + hms-- + frac = 0x10000 - frac + } + fracStr = fmt.Sprintf(".%.4d", frac) + case 5: + frac := int(data[pos+3]) | + int(data[pos+4])<<8 | + int(data[pos+5])<<16 + if sign == "-" && frac != 0 { + hms-- + frac = 0x1000000 - frac + } + fracStr = fmt.Sprintf(".%.5d", frac/10) + case 6: + frac := int(data[pos+3]) | + int(data[pos+4])<<8 | + int(data[pos+5])<<16 + if sign == "-" && frac != 0 { + hms-- + frac = 0x1000000 - frac + } + fracStr = fmt.Sprintf(".%.6d", frac) + } + + hour := (hms >> 12) % (1 << 10) + minute := (hms >> 6) % (1 << 6) + second := hms % (1 << 6) + return fmt.Sprintf("%v%02d:%02d:%02d%v", sign, hour, minute, second, fracStr), 3 + (int(metadata)+1)/2, nil + default: - return "", 0, fmt.Errorf("Unsupported type %v", tmc.Type) + return "", 0, fmt.Errorf("Unsupported type %v", typ) } } @@ -507,7 +822,7 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) { } // This column is represented now. We need to skip its length. - l, err := cellLength(data, pos, &tm.Columns[c]) + l, err := cellLength(data, pos, tm.Types[c], tm.Metadata[c]) if err != nil { return result, err } @@ -537,7 +852,7 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) { } // This column is represented now. We need to skip its length. - l, err := cellLength(data, pos, &tm.Columns[c]) + l, err := cellLength(data, pos, tm.Types[c], tm.Metadata[c]) if err != nil { return result, err } @@ -553,8 +868,10 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) { return result, nil } -// StringValues is a helper method to return the string value of all columns in a row in a Row. -func (rs *Rows) StringValues(tm *TableMap, rowIndex int) ([]string, error) { +// StringValuesForTests is a helper method to return the string value +// of all columns in a row in a Row. Only use it in tests, as the +// returned values cannot be interpreted correctly without the schema. +func (rs *Rows) StringValuesForTests(tm *TableMap, rowIndex int) ([]string, error) { var result []string valueIndex := 0 @@ -573,7 +890,7 @@ func (rs *Rows) StringValues(tm *TableMap, rowIndex int) ([]string, error) { } // We have real data - value, l, err := cellData(data, pos, &tm.Columns[c]) + value, l, err := cellData(data, pos, tm.Types[c], tm.Metadata[c]) if err != nil { return nil, err } @@ -585,8 +902,10 @@ func (rs *Rows) StringValues(tm *TableMap, rowIndex int) ([]string, error) { return result, nil } -// StringIdentifies is a helper method to return the string identify of all columns in a row in a Row. -func (rs *Rows) StringIdentifies(tm *TableMap, rowIndex int) ([]string, error) { +// StringIdentifiesForTests is a helper method to return the string +// identify of all columns in a row in a Row. Only use it in tests, as the +// returned values cannot be interpreted correctly without the schema. +func (rs *Rows) StringIdentifiesForTests(tm *TableMap, rowIndex int) ([]string, error) { var result []string valueIndex := 0 @@ -605,7 +924,7 @@ func (rs *Rows) StringIdentifies(tm *TableMap, rowIndex int) ([]string, error) { } // We have real data - value, l, err := cellData(data, pos, &tm.Columns[c]) + value, l, err := cellData(data, pos, tm.Types[c], tm.Metadata[c]) if err != nil { return nil, err } diff --git a/go/mysqlconn/replication/binlog_event_common_test.go b/go/mysqlconn/replication/binlog_event_common_test.go index 89a6b285294..cb92c56189e 100644 --- a/go/mysqlconn/replication/binlog_event_common_test.go +++ b/go/mysqlconn/replication/binlog_event_common_test.go @@ -1,6 +1,7 @@ package replication import ( + "fmt" "reflect" "testing" @@ -333,3 +334,300 @@ func TestBinlogEventIntVarBadID(t *testing.T) { t.Errorf("wrong error, got %#v, want %#v", got, want) } } + +func TestCellLengthAndData(t *testing.T) { + testcases := []struct { + typ byte + metadata uint16 + data []byte + out string + }{{ + typ: TypeTiny, + data: []byte{0x82}, + out: "130", + }, { + typ: TypeYear, + data: []byte{0x82}, + out: "2030", + }, { + typ: TypeShort, + data: []byte{0x02, 0x01}, + out: fmt.Sprintf("%v", 0x0102), + }, { + typ: TypeInt24, + data: []byte{0x03, 0x02, 0x01}, + out: fmt.Sprintf("%v", 0x010203), + }, { + typ: TypeLong, + data: []byte{0x04, 0x03, 0x02, 0x01}, + out: fmt.Sprintf("%v", 0x01020304), + }, { + typ: TypeTimestamp, + data: []byte{0x84, 0x83, 0x82, 0x81}, + out: fmt.Sprintf("%v", 0x81828384), + }, { + typ: TypeLongLong, + data: []byte{0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01}, + out: fmt.Sprintf("%v", 0x0102030405060708), + }, { + typ: TypeDate, + // 2010 << 9 + 10 << 5 + 3 = 1029443 = 0x0fb543 + data: []byte{0x43, 0xb5, 0x0f}, + out: "2010-10-03", + }, { + typ: TypeNewDate, + // 2010 << 9 + 10 << 5 + 3 = 1029443 = 0x0fb543 + data: []byte{0x43, 0xb5, 0x0f}, + out: "2010-10-03", + }, { + typ: TypeTime, + // 154532 = 0x00025ba4 + data: []byte{0xa4, 0x5b, 0x02, 0x00}, + out: "15:45:32", + }, { + typ: TypeDateTime, + // 19840304154532 = 0x120b6e4807a4 + data: []byte{0xa4, 0x07, 0x48, 0x6e, 0x0b, 0x12, 0x00, 0x00}, + out: "1984-03-04 15:45:32", + }, { + typ: TypeVarchar, + metadata: 20, // one byte length encoding + data: []byte{3, 'a', 'b', 'c'}, + out: "abc", + }, { + typ: TypeVarchar, + metadata: 384, // two bytes length encoding + data: []byte{3, 0, 'a', 'b', 'c'}, + out: "abc", + }, { + typ: TypeBit, + metadata: 0x0107, + data: []byte{0x3, 0x1}, + out: "0000001100000001", + }, { + typ: TypeTimestamp2, + metadata: 0, + data: []byte{0x84, 0x83, 0x82, 0x81}, + out: fmt.Sprintf("%v", 0x81828384), + }, { + typ: TypeTimestamp2, + metadata: 1, + data: []byte{0x84, 0x83, 0x82, 0x81, 7}, + out: fmt.Sprintf("%v.7", 0x81828384), + }, { + typ: TypeTimestamp2, + metadata: 2, + data: []byte{0x84, 0x83, 0x82, 0x81, 76}, + out: fmt.Sprintf("%v.76", 0x81828384), + }, { + typ: TypeTimestamp2, + metadata: 3, + // 765 = 0x02fd + data: []byte{0x84, 0x83, 0x82, 0x81, 0xfd, 0x02}, + out: fmt.Sprintf("%v.765", 0x81828384), + }, { + typ: TypeTimestamp2, + metadata: 4, + // 7654 = 0x1de6 + data: []byte{0x84, 0x83, 0x82, 0x81, 0xe6, 0x1d}, + out: fmt.Sprintf("%v.7654", 0x81828384), + }, { + typ: TypeTimestamp2, + metadata: 5, + // 76543 = 0x012aff + data: []byte{0x84, 0x83, 0x82, 0x81, 0xff, 0x2a, 0x01}, + out: fmt.Sprintf("%v.76543", 0x81828384), + }, { + typ: TypeTimestamp2, + metadata: 6, + // 765432 = 0x0badf8 + data: []byte{0x84, 0x83, 0x82, 0x81, 0xf8, 0xad, 0x0b}, + out: fmt.Sprintf("%v.765432", 0x81828384), + }, { + typ: TypeDateTime2, + metadata: 0, + // (2012 * 13 + 6) << 22 + 21 << 17 + 15 << 12 + 45 << 6 + 17) + // = 109734198097 = 0x198caafb51 + // Then have to add 0x8000000000 = 0x998caafb51 + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99}, + out: "2012-06-21 15:45:17", + }, { + typ: TypeDateTime2, + metadata: 1, + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 7}, + out: "2012-06-21 15:45:17.7", + }, { + typ: TypeDateTime2, + metadata: 2, + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 76}, + out: "2012-06-21 15:45:17.76", + }, { + typ: TypeDateTime2, + metadata: 3, + // 765 = 0x02fd + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xfd, 0x02}, + out: "2012-06-21 15:45:17.765", + }, { + typ: TypeDateTime2, + metadata: 4, + // 7654 = 0x1de6 + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xe6, 0x1d}, + out: "2012-06-21 15:45:17.7654", + }, { + typ: TypeDateTime2, + metadata: 5, + // 76543 = 0x012aff + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xff, 0x2a, 0x01}, + out: "2012-06-21 15:45:17.76543", + }, { + typ: TypeDateTime2, + metadata: 6, + // 765432 = 0x0badf8 + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xf8, 0xad, 0x0b}, + out: "2012-06-21 15:45:17.765432", + }, { + // This first set of tests is from a comment in + // sql-common/my_time.c: + // + // Disk value intpart frac Time value Memory value + // 800000.00 0 0 00:00:00.00 0000000000.000000 + // 7FFFFF.FF -1 255 -00:00:00.01 FFFFFFFFFF.FFD8F0 + // 7FFFFF.9D -1 99 -00:00:00.99 FFFFFFFFFF.F0E4D0 + // 7FFFFF.00 -1 0 -00:00:01.00 FFFFFFFFFF.000000 + // 7FFFFE.FF -1 255 -00:00:01.01 FFFFFFFFFE.FFD8F0 + // 7FFFFE.F6 -2 246 -00:00:01.10 FFFFFFFFFE.FE7960 + typ: TypeTime2, + metadata: 2, + data: []byte{0x00, 0x00, 0x80, 0x00}, + out: "00:00:00.00", + }, { + typ: TypeTime2, + metadata: 2, + data: []byte{0xff, 0xff, 0x7f, 0xff}, + out: "-00:00:00.01", + }, { + typ: TypeTime2, + metadata: 2, + data: []byte{0xff, 0xff, 0x7f, 0x9d}, + out: "-00:00:00.99", + }, { + typ: TypeTime2, + metadata: 2, + data: []byte{0xff, 0xff, 0x7f, 0x00}, + out: "-00:00:01.00", + }, { + typ: TypeTime2, + metadata: 2, + data: []byte{0xfe, 0xff, 0x7f, 0xff}, + out: "-00:00:01.01", + }, { + typ: TypeTime2, + metadata: 2, + data: []byte{0xfe, 0xff, 0x7f, 0xf6}, + out: "-00:00:01.10", + }, { + // Similar tests for 4 decimals. + typ: TypeTime2, + metadata: 4, + data: []byte{0x00, 0x00, 0x80, 0x00, 0x00}, + out: "00:00:00.0000", + }, { + typ: TypeTime2, + metadata: 4, + data: []byte{0xff, 0xff, 0x7f, 0xff, 0xff}, + out: "-00:00:00.0001", + }, { + typ: TypeTime2, + metadata: 4, + data: []byte{0xff, 0xff, 0x7f, 0x9d, 0xff}, + out: "-00:00:00.0099", + }, { + typ: TypeTime2, + metadata: 4, + data: []byte{0xff, 0xff, 0x7f, 0x00, 0x00}, + out: "-00:00:01.0000", + }, { + typ: TypeTime2, + metadata: 4, + data: []byte{0xfe, 0xff, 0x7f, 0xff, 0xff}, + out: "-00:00:01.0001", + }, { + typ: TypeTime2, + metadata: 4, + data: []byte{0xfe, 0xff, 0x7f, 0xf6, 0xff}, + out: "-00:00:01.0010", + }, { + // Similar tests for 6 decimals. + typ: TypeTime2, + metadata: 6, + data: []byte{0x00, 0x00, 0x80, 0x00, 0x00, 0x00}, + out: "00:00:00.000000", + }, { + typ: TypeTime2, + metadata: 6, + data: []byte{0xff, 0xff, 0x7f, 0xff, 0xff, 0xff}, + out: "-00:00:00.000001", + }, { + typ: TypeTime2, + metadata: 6, + data: []byte{0xff, 0xff, 0x7f, 0x9d, 0xff, 0xff}, + out: "-00:00:00.000099", + }, { + typ: TypeTime2, + metadata: 6, + data: []byte{0xff, 0xff, 0x7f, 0x00, 0x00, 0x00}, + out: "-00:00:01.000000", + }, { + typ: TypeTime2, + metadata: 6, + data: []byte{0xfe, 0xff, 0x7f, 0xff, 0xff, 0xff}, + out: "-00:00:01.000001", + }, { + typ: TypeTime2, + metadata: 6, + data: []byte{0xfe, 0xff, 0x7f, 0xf6, 0xff, 0xff}, + out: "-00:00:01.000010", + }, { + // Few more tests. + typ: TypeTime2, + metadata: 0, + data: []byte{0x00, 0x00, 0x80}, + out: "00:00:00", + }, { + typ: TypeTime2, + metadata: 1, + data: []byte{0x01, 0x00, 0x80, 0x0a}, + out: "00:00:01.1", + }, { + typ: TypeTime2, + metadata: 2, + data: []byte{0x01, 0x00, 0x80, 0x0a}, + out: "00:00:01.10", + }, { + typ: TypeTime2, + metadata: 0, + // 15 << 12 + 34 << 6 + 54 = 63670 = 0x00f8b6 + // and need to add 0x800000 + data: []byte{0xb6, 0xf8, 0x80}, + out: "15:34:54", + }} + + for _, tcase := range testcases { + // Copy the data into a larger buffer (one extra byte + // on both sides), so we make sure the 'pos' field works. + padded := make([]byte, len(tcase.data)+2) + copy(padded[1:], tcase.data) + + // Test cellLength. + l, err := cellLength(padded, 1, tcase.typ, tcase.metadata) + if err != nil || l != len(tcase.data) { + t.Errorf("testcase cellLength(%v,%v) returned unexpected result: %v %v", tcase.typ, tcase.data, l, err) + } + + // Test cellData (only used for tests, but might as well). + out, l, err := cellData(padded, 1, tcase.typ, tcase.metadata) + if err != nil || l != len(tcase.data) || out != tcase.out { + t.Errorf("testcase cellData(%v,%v) returned unexpected result: %v %v %v, was expecting %v %v ", tcase.typ, tcase.data, out, l, err, tcase.out, len(tcase.data)) + } + } +} diff --git a/go/mysqlconn/replication/binlog_event_make.go b/go/mysqlconn/replication/binlog_event_make.go index 22bf4b8dd72..adc8c2cf23b 100644 --- a/go/mysqlconn/replication/binlog_event_make.go +++ b/go/mysqlconn/replication/binlog_event_make.go @@ -268,11 +268,7 @@ func NewTableMapEvent(f BinlogFormat, s *FakeBinlogStream, tableID uint64, tm *T panic("Not implemented, post_header_length!=8") } - // Build the NullBitmap first. - nullBitmap := NewServerBitmap(len(tm.Columns)) - for i, tmc := range tm.Columns { - nullBitmap.Set(i, tmc.CanBeNull) - } + metadataLength := metadataTotalLength(tm.Types) length := 6 + // table_id 2 + // flags @@ -283,9 +279,10 @@ func NewTableMapEvent(f BinlogFormat, s *FakeBinlogStream, tableID uint64, tm *T len(tm.Name) + 1 + // [00] 1 + // column-count FIXME(alainjobart) len enc - len(tm.Columns) + - 1 + // lenenc-str column-meta-def, see below. - len(nullBitmap.data) + len(tm.Types) + + 1 + // lenenc-str column-meta-def FIXME(alainjobart) len enc + metadataLength + + len(tm.CanBeNull.data) data := make([]byte, length) data[0] = byte(tableID) @@ -305,20 +302,20 @@ func NewTableMapEvent(f BinlogFormat, s *FakeBinlogStream, tableID uint64, tm *T data[pos] = 0 pos++ - data[pos] = byte(len(tm.Columns)) // FIXME(alainjobart) lenenc + data[pos] = byte(len(tm.Types)) // FIXME(alainjobart) lenenc pos++ - for i, tmc := range tm.Columns { - data[pos+i] = tmc.Type - } - pos += len(tm.Columns) + pos += copy(data[pos:], tm.Types) - // FIXME(alainjobart) per-column meta data. Starting with - // len-enc length, so 0 for now. - data[pos] = 0 + // Per-column meta data. Starting with len-enc length. + // FIXME(alainjobart) lenenc + data[pos] = byte(metadataLength) pos++ + for c, typ := range tm.Types { + pos = metadataWrite(data, pos, typ, tm.Metadata[c]) + } - pos += copy(data[pos:], nullBitmap.data) + pos += copy(data[pos:], tm.CanBeNull.data) if pos != len(data) { panic("bad encoding") } diff --git a/go/mysqlconn/replication/binlog_event_make_test.go b/go/mysqlconn/replication/binlog_event_make_test.go index 7ea027edcf3..e9023ab217f 100644 --- a/go/mysqlconn/replication/binlog_event_make_test.go +++ b/go/mysqlconn/replication/binlog_event_make_test.go @@ -230,19 +230,36 @@ func TestTableMapEvent(t *testing.T) { Flags: 0x8090, Database: "my_database", Name: "my_table", - Columns: []TableMapColumn{ - {Type: TypeLongLong, CanBeNull: false}, - {Type: TypeLongLong, CanBeNull: true}, - {Type: TypeLongLong, CanBeNull: true}, - {Type: TypeLongLong, CanBeNull: false}, - {Type: TypeLongLong, CanBeNull: false}, - {Type: TypeTime, CanBeNull: true}, - {Type: TypeLongLong, CanBeNull: false}, - {Type: TypeLongLong, CanBeNull: false}, - {Type: TypeLongLong, CanBeNull: false}, - {Type: TypeVarchar, CanBeNull: true}, + Types: []byte{ + TypeLongLong, + TypeLongLong, + TypeLongLong, + TypeLongLong, + TypeLongLong, + TypeTime, + TypeLongLong, + TypeLongLong, + TypeLongLong, + TypeVarchar, + }, + CanBeNull: NewServerBitmap(10), + Metadata: []uint16{ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 384, // Length of the varchar field. }, } + tm.CanBeNull.Set(1, true) + tm.CanBeNull.Set(2, true) + tm.CanBeNull.Set(5, true) + tm.CanBeNull.Set(9, true) event := NewTableMapEvent(f, s, 0x102030405060, tm) if !event.IsValid() { @@ -280,11 +297,17 @@ func TestRowsEvent(t *testing.T) { Flags: 0x8090, Database: "my_database", Name: "my_table", - Columns: []TableMapColumn{ - {Type: TypeLong, CanBeNull: false}, - {Type: TypeVarchar, CanBeNull: true}, + Types: []byte{ + TypeLong, + TypeVarchar, + }, + CanBeNull: NewServerBitmap(2), + Metadata: []uint16{ + 0, + 384, }, } + tm.CanBeNull.Set(1, true) // Do an update packet with all fields set. rows := Rows{ @@ -317,11 +340,11 @@ func TestRowsEvent(t *testing.T) { // Test the Rows we just created, to be sure. // 1076895760 is 0x40302010. - identifies, err := rows.StringIdentifies(tm, 0) + identifies, err := rows.StringIdentifiesForTests(tm, 0) if expected := []string{"1076895760", "abc"}; !reflect.DeepEqual(identifies, expected) { t.Fatalf("bad Rows idenfity, got %v expected %v", identifies, expected) } - values, err := rows.StringValues(tm, 0) + values, err := rows.StringValuesForTests(tm, 0) if expected := []string{"1076895760", "abcd"}; !reflect.DeepEqual(values, expected) { t.Fatalf("bad Rows data, got %v expected %v", values, expected) } diff --git a/go/mysqlconn/replication/constants.go b/go/mysqlconn/replication/constants.go index 009b1f518f1..e941fa26d85 100644 --- a/go/mysqlconn/replication/constants.go +++ b/go/mysqlconn/replication/constants.go @@ -59,6 +59,18 @@ const ( // TypeBit is MYSQL_TYPE_BIT TypeBit = 16 + // TypeTimestamp2 is MYSQL_TYPE_TIMESTAMP2 + TypeTimestamp2 = 17 + + // TypeDateTime2 is MYSQL_TYPE_DATETIME2 + TypeDateTime2 = 18 + + // TypeTime2 is MYSQL_TYPE_TIME2 + TypeTime2 = 19 + + // TypeJSON is MYSQL_TYPE_JSON + TypeJSON = 245 + // TypeNewDecimal is MYSQL_TYPE_NEWDECIMAL TypeNewDecimal = 246 diff --git a/go/mysqlconn/replication_test.go b/go/mysqlconn/replication_test.go index 7e6cef5cfd5..ae9f0a93a5b 100644 --- a/go/mysqlconn/replication_test.go +++ b/go/mysqlconn/replication_test.go @@ -527,9 +527,9 @@ func testRowReplicationWithRealDatabase(t *testing.T, params *sqldb.ConnParams) t.Logf("Got Table Map event: %v %v", tableID, tableMap) if tableMap.Database != "vttest" || tableMap.Name != "replication" || - len(tableMap.Columns) != 2 || - tableMap.Columns[0].CanBeNull || - !tableMap.Columns[1].CanBeNull { + len(tableMap.Types) != 2 || + tableMap.CanBeNull.Bit(0) || + !tableMap.CanBeNull.Bit(1) { t.Errorf("got wrong TableMap: %v", tableMap) } gotTableMapEvent = true @@ -543,7 +543,7 @@ func testRowReplicationWithRealDatabase(t *testing.T, params *sqldb.ConnParams) } // Check it has 2 rows, and first value is '10', second value is 'nice name'. - values, _ := wr.StringValues(tableMap, 0) + values, _ := wr.StringValuesForTests(tableMap, 0) t.Logf("Got WriteRows event data: %v %v", wr, values) if expected := []string{"10", "nice name"}; !reflect.DeepEqual(values, expected) { t.Fatalf("StringValues returned %v, expected %v", values, expected) @@ -560,14 +560,14 @@ func testRowReplicationWithRealDatabase(t *testing.T, params *sqldb.ConnParams) } // Check it has 2 identify rows, and first value is '10', second value is 'nice name'. - values, _ := ur.StringIdentifies(tableMap, 0) + values, _ := ur.StringIdentifiesForTests(tableMap, 0) t.Logf("Got UpdateRows event identify: %v %v", ur, values) if expected := []string{"10", "nice name"}; !reflect.DeepEqual(values, expected) { t.Fatalf("StringIdentifies returned %v, expected %v", values, expected) } // Check it has 2 values rows, and first value is '10', second value is 'nicer name'. - values, _ = ur.StringValues(tableMap, 0) + values, _ = ur.StringValuesForTests(tableMap, 0) t.Logf("Got UpdateRows event data: %v %v", ur, values) if expected := []string{"10", "nicer name"}; !reflect.DeepEqual(values, expected) { t.Fatalf("StringValues returned %v, expected %v", values, expected) @@ -584,7 +584,7 @@ func testRowReplicationWithRealDatabase(t *testing.T, params *sqldb.ConnParams) } // Check it has 2 rows, and first value is '10', second value is 'nicer name'. - values, _ := dr.StringIdentifies(tableMap, 0) + values, _ := dr.StringIdentifiesForTests(tableMap, 0) t.Logf("Got DeleteRows event identify: %v %v", dr, values) if expected := []string{"10", "nicer name"}; !reflect.DeepEqual(values, expected) { t.Fatalf("StringIdentifies returned %v, expected %v", values, expected) diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index 98a2243dad3..e012ad8bf70 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -5,6 +5,8 @@ package binlog import ( + "bytes" + "encoding/binary" "fmt" "io" "strings" @@ -14,8 +16,10 @@ import ( "github.com/youtube/vitess/go/mysqlconn/replication" "github.com/youtube/vitess/go/sqldb" + "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/vt/mysqlctl" + "github.com/youtube/vitess/go/vt/sqlparser" "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" @@ -381,33 +385,23 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan replication. // Skip cross-db statements. continue } - rows, err := ev.Rows(format, tm) - if err != nil { - return pos, err + ti := bls.se.GetTable(sqlparser.NewTableIdent(tm.Name)) + if ti == nil { + return pos, fmt.Errorf("unknown table %v in schema", tm.Name) } setTimestamp := &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte(fmt.Sprintf("SET TIMESTAMP=%d", ev.Timestamp())), } statements = append(statements, setTimestamp) - for i := range rows.Rows { - identifies, err := rows.StringIdentifies(tm, i) - if err != nil { - log.Warningf("Failed to parse UPDATE due to error %v", err) - continue - } - values, err := rows.StringValues(tm, i) - if err != nil { - log.Warningf("Failed to parse UPDATE due to error %v", err) - continue - } - update := &binlogdatapb.BinlogTransaction_Statement{ - Category: binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, - Sql: []byte(fmt.Sprintf("WIP: update table %v set values = %v where identifies = %v", tm.Name, values, identifies)), - } - statements = append(statements, update) + + rows, err := ev.Rows(format, tm) + if err != nil { + return pos, err } + statements = appendUpdates(statements, &rows, tm, ti) + if autocommit { if err = commit(ev.Timestamp()); err != nil { return pos, err @@ -416,3 +410,350 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan replication. } } } + +func appendUpdates(statements []*binlogdatapb.BinlogTransaction_Statement, rows *replication.Rows, tm *replication.TableMap, ti *schema.Table) []*binlogdatapb.BinlogTransaction_Statement { + for i := range rows.Rows { + var sql bytes.Buffer + + sql.WriteString("UPDATE ") + sql.WriteString(tm.Name) + sql.WriteString(" SET ") + + if err := writeValuesAsSQL(&sql, rows, tm, ti, i); err != nil { + log.Warningf("writeValuesAsSQL(%v) failed: %v", i, err) + continue + } + + sql.WriteString(" WHERE ") + + if err := writeIdentifiesAsSQL(&sql, rows, tm, ti, i); err != nil { + log.Warningf("writeIdentifiesAsSQL(%v) failed: %v", i, err) + continue + } + + update := &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, + Sql: sql.Bytes(), + } + statements = append(statements, update) + } + return statements +} + +// writeValuesAsSQL is a helper method to print the values as SQL in the +// provided bytes.Buffer. +func writeValuesAsSQL(sql *bytes.Buffer, rs *replication.Rows, tm *replication.TableMap, ti *schema.Table, rowIndex int) error { + valueIndex := 0 + data := rs.Rows[rowIndex].Data + pos := 0 + for c := 0; c < rs.DataColumns.Count(); c++ { + if !rs.DataColumns.Bit(c) { + continue + } + + // Print a separator if needed, then print the name. + if valueIndex > 0 { + sql.WriteString(", ") + } + sql.WriteString(ti.Columns[c].Name.String()) + sql.WriteByte('=') + + if rs.Rows[rowIndex].NullColumns.Bit(valueIndex) { + // This column is represented, but its value is NULL. + sql.WriteString("NULL") + valueIndex++ + continue + } + + // We have real data + value, l, err := cellAsSQL(data, pos, tm.Types[c], tm.Metadata[c], ti.Columns[c].Type) + if err != nil { + return err + } + sql.WriteString(value) + pos += l + valueIndex++ + } + + return nil +} + +// writeIdentifiesAsSQL is a helper method to print the identifies as SQL in the +// provided bytes.Buffer. +func writeIdentifiesAsSQL(sql *bytes.Buffer, rs *replication.Rows, tm *replication.TableMap, ti *schema.Table, rowIndex int) error { + valueIndex := 0 + data := rs.Rows[rowIndex].Identify + pos := 0 + for c := 0; c < rs.IdentifyColumns.Count(); c++ { + if !rs.IdentifyColumns.Bit(c) { + continue + } + + // Print a separator if needed, then print the name. + if valueIndex > 0 { + sql.WriteString(" AND ") + } + sql.WriteString(ti.Columns[c].Name.String()) + sql.WriteByte('=') + + if rs.Rows[rowIndex].NullIdentifyColumns.Bit(valueIndex) { + // This column is represented, but its value is NULL. + sql.WriteString("NULL") + valueIndex++ + continue + } + + // We have real data + value, l, err := cellAsSQL(data, pos, tm.Types[c], tm.Metadata[c], ti.Columns[c].Type) + if err != nil { + return err + } + sql.WriteString(value) + pos += l + valueIndex++ + } + + return nil +} + +// cellAsSQL parses the data for a cell inside a RBR event, and returns +// the SQL representation of that cell, given its RBR type, and schema type. +func cellAsSQL(data []byte, pos int, typ byte, metadata uint16, styp querypb.Type) (string, int, error) { + switch typ { + case replication.TypeTiny: + if sqltypes.IsSigned(styp) { + return fmt.Sprintf("%v", int8(data[pos])), 1, nil + } + return fmt.Sprintf("%v", uint8(data[pos])), 1, nil + case replication.TypeYear: + return fmt.Sprintf("%v", 1900+int(data[pos])), 1, nil + case replication.TypeShort: + val := binary.LittleEndian.Uint16(data[pos : pos+2]) + if sqltypes.IsSigned(styp) { + return fmt.Sprintf("%v", int16(val)), 2, nil + } + return fmt.Sprintf("%v", val), 2, nil + case replication.TypeInt24: + if sqltypes.IsSigned(styp) && data[pos+2]&128 > 0 { + // Negative number, have to extend the sign. + val := int32(uint32(data[pos]) + + uint32(data[pos+1])<<8 + + uint32(data[pos+2])<<16 + + uint32(255)<<24) + return fmt.Sprintf("%v", val), 3, nil + } + // Positive number. + val := uint32(data[pos]) + + uint32(data[pos+1])<<8 + + uint32(data[pos+2])<<16 + return fmt.Sprintf("%v", val), 3, nil + case replication.TypeLong: + val := binary.LittleEndian.Uint32(data[pos : pos+4]) + if sqltypes.IsSigned(styp) { + return fmt.Sprintf("%v", int32(val)), 4, nil + } + return fmt.Sprintf("%v", val), 4, nil + case replication.TypeTimestamp: + val := binary.LittleEndian.Uint32(data[pos : pos+4]) + return fmt.Sprintf("%v", val), 4, nil + case replication.TypeLongLong: + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + if sqltypes.IsSigned(styp) { + return fmt.Sprintf("%v", int64(val)), 8, nil + } + return fmt.Sprintf("%v", val), 8, nil + case replication.TypeDate, replication.TypeNewDate: + val := uint32(data[pos]) + + uint32(data[pos+1])<<8 + + uint32(data[pos+2])<<16 + day := val & 31 + month := val >> 5 & 15 + year := val >> 9 + return fmt.Sprintf("'%04d-%02d-%02d'", year, month, day), 3, nil + case replication.TypeTime: + val := binary.LittleEndian.Uint32(data[pos : pos+4]) + hour := val / 10000 + minute := (val % 10000) / 100 + second := val % 100 + return fmt.Sprintf("'%02d:%02d:%02d'", hour, minute, second), 4, nil + case replication.TypeDateTime: + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + d := val / 1000000 + t := val % 1000000 + year := d / 10000 + month := (d % 10000) / 100 + day := d % 100 + hour := t / 10000 + minute := (t % 10000) / 100 + second := t % 100 + return fmt.Sprintf("'%04d-%02d-%02d %02d:%02d:%02d'", year, month, day, hour, minute, second), 8, nil + case replication.TypeVarchar: + // Length is encoded in 1 or 2 bytes. + l := int(data[pos]) + headerSize := 1 + if metadata > 255 { + l += int(data[pos+1]) << 8 + headerSize = 2 + } + v := sqltypes.MakeTrusted(querypb.Type_VARCHAR, data[pos+headerSize:pos+headerSize+l]) + var buf bytes.Buffer + v.EncodeSQL(&buf) + return buf.String(), l + headerSize, nil + case replication.TypeBit: + nbits := ((metadata >> 8) * 8) + (metadata & 0xFF) + l := (int(nbits) + 7) / 8 + var buf bytes.Buffer + buf.WriteString("b'") + for i := 0; i < l; i++ { + buf.WriteString(fmt.Sprintf("%08b", data[pos+i])) + } + buf.WriteByte('\'') + return buf.String(), l, nil + case replication.TypeTimestamp2: + second := binary.LittleEndian.Uint32(data[pos : pos+4]) + switch metadata { + case 1: + decimals := int(data[pos+4]) + return fmt.Sprintf("%v.%01d", second, decimals), 5, nil + case 2: + decimals := int(data[pos+4]) + return fmt.Sprintf("%v.%02d", second, decimals), 5, nil + case 3: + decimals := int(data[pos+4]) + + int(data[pos+5])<<8 + return fmt.Sprintf("%v.%03d", second, decimals), 6, nil + case 4: + decimals := int(data[pos+4]) + + int(data[pos+5])<<8 + return fmt.Sprintf("%v.%04d", second, decimals), 6, nil + case 5: + decimals := int(data[pos+4]) + + int(data[pos+5])<<8 + + int(data[pos+6])<<16 + return fmt.Sprintf("%v.%05d", second, decimals), 7, nil + case 6: + decimals := int(data[pos+4]) + + int(data[pos+5])<<8 + + int(data[pos+6])<<16 + return fmt.Sprintf("%v.%.6d", second, decimals), 7, nil + } + return fmt.Sprintf("%v", second), 4, nil + case replication.TypeDateTime2: + ymdhms := (uint64(data[pos]) | + uint64(data[pos+1])<<8 | + uint64(data[pos+2])<<16 | + uint64(data[pos+3])<<24 | + uint64(data[pos+4])<<32) - uint64(0x8000000000) + ymd := ymdhms >> 17 + ym := ymd >> 5 + hms := ymdhms % (1 << 17) + + day := ymd % (1 << 5) + month := ym % 13 + year := ym / 13 + + second := hms % (1 << 6) + minute := (hms >> 6) % (1 << 6) + hour := hms >> 12 + + datetime := fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second) + + switch metadata { + case 1: + decimals := int(data[pos+5]) + return fmt.Sprintf("'%v.%01d'", datetime, decimals), 6, nil + case 2: + decimals := int(data[pos+5]) + return fmt.Sprintf("'%v.%02d'", datetime, decimals), 6, nil + case 3: + decimals := int(data[pos+5]) + + int(data[pos+6])<<8 + return fmt.Sprintf("'%v.%03d'", datetime, decimals), 7, nil + case 4: + decimals := int(data[pos+5]) + + int(data[pos+6])<<8 + return fmt.Sprintf("'%v.%04d'", datetime, decimals), 7, nil + case 5: + decimals := int(data[pos+5]) + + int(data[pos+6])<<8 + + int(data[pos+7])<<16 + return fmt.Sprintf("'%v.%05d'", datetime, decimals), 8, nil + case 6: + decimals := int(data[pos+5]) + + int(data[pos+6])<<8 + + int(data[pos+7])<<16 + return fmt.Sprintf("'%v.%.6d'", datetime, decimals), 8, nil + } + return fmt.Sprintf("'%v'", datetime), 5, nil + + case replication.TypeTime2: + hms := (int64(data[pos]) | + int64(data[pos+1])<<8 | + int64(data[pos+2])<<16) - 0x800000 + sign := "" + if hms < 0 { + hms = -hms + sign = "-" + } + + fracStr := "" + switch metadata { + case 1: + frac := int(data[pos+3]) + if sign == "-" && frac != 0 { + hms-- + frac = 0x100 - frac + } + fracStr = fmt.Sprintf(".%.1d", frac/10) + case 2: + frac := int(data[pos+3]) + if sign == "-" && frac != 0 { + hms-- + frac = 0x100 - frac + } + fracStr = fmt.Sprintf(".%.2d", frac) + case 3: + frac := int(data[pos+3]) | + int(data[pos+4])<<8 + if sign == "-" && frac != 0 { + hms-- + frac = 0x10000 - frac + } + fracStr = fmt.Sprintf(".%.3d", frac/10) + case 4: + frac := int(data[pos+3]) | + int(data[pos+4])<<8 + if sign == "-" && frac != 0 { + hms-- + frac = 0x10000 - frac + } + fracStr = fmt.Sprintf(".%.4d", frac) + case 5: + frac := int(data[pos+3]) | + int(data[pos+4])<<8 | + int(data[pos+5])<<16 + if sign == "-" && frac != 0 { + hms-- + frac = 0x1000000 - frac + } + fracStr = fmt.Sprintf(".%.5d", frac/10) + case 6: + frac := int(data[pos+3]) | + int(data[pos+4])<<8 | + int(data[pos+5])<<16 + if sign == "-" && frac != 0 { + hms-- + frac = 0x1000000 - frac + } + fracStr = fmt.Sprintf(".%.6d", frac) + } + + hour := (hms >> 12) % (1 << 10) + minute := (hms >> 6) % (1 << 6) + second := hms % (1 << 6) + return fmt.Sprintf("'%v%02d:%02d:%02d%v'", sign, hour, minute, second, fracStr), 3 + (int(metadata)+1)/2, nil + + default: + return "", 0, fmt.Errorf("Unsupported type %v", typ) + } +} diff --git a/go/vt/binlog/binlog_streamer_rbr_test.go b/go/vt/binlog/binlog_streamer_rbr_test.go index b348ae55d98..66f69702640 100644 --- a/go/vt/binlog/binlog_streamer_rbr_test.go +++ b/go/vt/binlog/binlog_streamer_rbr_test.go @@ -1,12 +1,15 @@ package binlog import ( + "fmt" "reflect" "testing" "golang.org/x/net/context" "github.com/youtube/vitess/go/mysqlconn/replication" + "github.com/youtube/vitess/go/vt/sqlparser" + "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" querypb "github.com/youtube/vitess/go/vt/proto/query" @@ -19,16 +22,40 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { s := replication.NewFakeBinlogStream() s.ServerID = 62344 + // Create a schema.Engine for this test, with just one table. + // We only use the Columns. + se := schema.NewEngineForTests() + se.SetTableForTests(&schema.Table{ + Name: sqlparser.NewTableIdent("vt_a"), + Columns: []schema.TableColumn{ + { + Name: sqlparser.NewColIdent("id"), + Type: querypb.Type_INT64, + }, + { + Name: sqlparser.NewColIdent("message"), + Type: querypb.Type_VARCHAR, + }, + }, + }) + + // Create a tableMap event on the table. tableID := uint64(0x102030405060) tm := &replication.TableMap{ Flags: 0x8090, Database: "vt_test_keyspace", Name: "vt_a", - Columns: []replication.TableMapColumn{ - {Type: replication.TypeLong, CanBeNull: false}, - {Type: replication.TypeVarchar, CanBeNull: true}, + Types: []byte{ + replication.TypeLong, + replication.TypeVarchar, + }, + CanBeNull: replication.NewServerBitmap(2), + Metadata: []uint16{ + 0, + 384, // A VARCHAR(128) in utf8 would result in 384. }, } + tm.CanBeNull.Set(1, true) // Do an update packet with all fields set. rows := replication.Rows{ @@ -80,7 +107,7 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { }, { Category: binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, - Sql: []byte("WIP: update table vt_a set values = [1076895760 abcd] where identifies = [1076895760 abc]"), + Sql: []byte("UPDATE vt_a SET id=1076895760, message='abcd' WHERE id=1076895760 AND message='abc'"), }, }, EventToken: &querypb.EventToken{ @@ -100,7 +127,7 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { got = append(got, *trans) return nil } - bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) + bls := NewStreamer("vt_test_keyspace", nil, se, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events) @@ -112,3 +139,287 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { t.Errorf("binlogConnStreamer.parseEvents(): got:\n%v\nwant:\n%v", got, want) } } + +func TestCellAsSQL(t *testing.T) { + testcases := []struct { + typ byte + metadata uint16 + styp querypb.Type + data []byte + out string + }{{ + // TypeTiny tests, unsigned and signed. + typ: replication.TypeTiny, + styp: querypb.Type_UINT8, + data: []byte{0x0a}, + out: "10", + }, { + typ: replication.TypeTiny, + styp: querypb.Type_UINT8, + data: []byte{0x82}, + out: "130", + }, { + typ: replication.TypeTiny, + styp: querypb.Type_INT8, + data: []byte{0x82}, + out: "-126", + }, { + // TypeYear is always unsigned. + typ: replication.TypeYear, + styp: querypb.Type_YEAR, + data: []byte{0x82}, + out: "2030", + }, { + // TypeShort tests, unsigned and signed. + typ: replication.TypeShort, + styp: querypb.Type_UINT16, + data: []byte{0x02, 0x01}, + out: fmt.Sprintf("%v", 0x0102), + }, { + typ: replication.TypeShort, + styp: querypb.Type_UINT16, + data: []byte{0x81, 0x82}, + out: fmt.Sprintf("%v", 0x8281), + }, { + typ: replication.TypeShort, + styp: querypb.Type_INT16, + data: []byte{0xfe, 0xff}, + out: "-2", + }, { + // TypeInt24 tests, unsigned and signed. + typ: replication.TypeInt24, + styp: querypb.Type_UINT24, + data: []byte{0x03, 0x02, 0x01}, + out: fmt.Sprintf("%v", 0x010203), + }, { + typ: replication.TypeInt24, + styp: querypb.Type_UINT24, + data: []byte{0x81, 0x82, 0x83}, + out: fmt.Sprintf("%v", 0x838281), + }, { + typ: replication.TypeInt24, + styp: querypb.Type_INT24, + data: []byte{0xfe, 0xff, 0xff}, + out: "-2", + }, { + // TypeLong tests, unsigned and signed. + typ: replication.TypeLong, + styp: querypb.Type_UINT32, + data: []byte{0x04, 0x03, 0x02, 0x01}, + out: fmt.Sprintf("%v", 0x01020304), + }, { + typ: replication.TypeLong, + styp: querypb.Type_UINT32, + data: []byte{0x81, 0x82, 0x83, 0x84}, + out: fmt.Sprintf("%v", 0x84838281), + }, { + typ: replication.TypeLong, + styp: querypb.Type_INT32, + data: []byte{0xfe, 0xff, 0xff, 0xff}, + out: "-2", + }, { + // TypeTimestamp tests. + typ: replication.TypeTimestamp, + styp: querypb.Type_TIMESTAMP, + data: []byte{0x84, 0x83, 0x82, 0x81}, + out: fmt.Sprintf("%v", 0x81828384), + }, { + // TypeLongLong tests, unsigned and signed. + typ: replication.TypeLongLong, + styp: querypb.Type_UINT64, + data: []byte{0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01}, + out: fmt.Sprintf("%v", 0x0102030405060708), + }, { + typ: replication.TypeLongLong, + styp: querypb.Type_UINT64, + data: []byte{0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88}, + out: fmt.Sprintf("%v", uint64(0x8887868584838281)), + }, { + typ: replication.TypeLongLong, + styp: querypb.Type_INT64, + data: []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + out: "-2", + }, { + // TypeDate and TypeNewDate tests, unsigned and signed. + // 2010 << 9 + 10 << 5 + 3 = 1029443 = 0x0fb543 + typ: replication.TypeDate, + styp: querypb.Type_DATE, + data: []byte{0x43, 0xb5, 0x0f}, + out: "'2010-10-03'", + }, { + typ: replication.TypeNewDate, + styp: querypb.Type_DATE, + data: []byte{0x43, 0xb5, 0x0f}, + out: "'2010-10-03'", + }, { + // TypeTime tests. + // 154532 = 0x00025ba4 + typ: replication.TypeTime, + styp: querypb.Type_TIME, + data: []byte{0xa4, 0x5b, 0x02, 0x00}, + out: "'15:45:32'", + }, { + // TypeDateTime tests. + // 19840304154532 = 0x120b6e4807a4 + typ: replication.TypeDateTime, + styp: querypb.Type_DATETIME, + data: []byte{0xa4, 0x07, 0x48, 0x6e, 0x0b, 0x12, 0x00, 0x00}, + out: "'1984-03-04 15:45:32'", + }, { + // Varchar + typ: replication.TypeVarchar, + metadata: 30, + styp: querypb.Type_VARCHAR, + data: []byte{3, 'a', 'b', 'c'}, + out: "'abc'", + }, { + typ: replication.TypeVarchar, + metadata: 300, + styp: querypb.Type_VARCHAR, + data: []byte{3, 0, 'a', '\'', 'c'}, + out: "'a\\'c'", + }, { + // Bit + typ: replication.TypeBit, + metadata: 0x0107, + styp: querypb.Type_BIT, + data: []byte{0x03, 0x01}, + out: "b'0000001100000001'", + }, { + // Timestamp2 + typ: replication.TypeTimestamp2, + metadata: 0, + styp: querypb.Type_TIMESTAMP, + data: []byte{0x84, 0x83, 0x82, 0x81}, + out: fmt.Sprintf("%v", 0x81828384), + }, { + typ: replication.TypeTimestamp2, + metadata: 1, + styp: querypb.Type_TIMESTAMP, + data: []byte{0x84, 0x83, 0x82, 0x81, 7}, + out: fmt.Sprintf("%v.7", 0x81828384), + }, { + typ: replication.TypeTimestamp2, + metadata: 2, + styp: querypb.Type_TIMESTAMP, + data: []byte{0x84, 0x83, 0x82, 0x81, 76}, + out: fmt.Sprintf("%v.76", 0x81828384), + }, { + typ: replication.TypeTimestamp2, + metadata: 3, + styp: querypb.Type_TIMESTAMP, + // 765 = 0x02fd + data: []byte{0x84, 0x83, 0x82, 0x81, 0xfd, 0x02}, + out: fmt.Sprintf("%v.765", 0x81828384), + }, { + typ: replication.TypeTimestamp2, + metadata: 4, + styp: querypb.Type_TIMESTAMP, + // 7654 = 0x1de6 + data: []byte{0x84, 0x83, 0x82, 0x81, 0xe6, 0x1d}, + out: fmt.Sprintf("%v.7654", 0x81828384), + }, { + typ: replication.TypeTimestamp2, + metadata: 5, + styp: querypb.Type_TIMESTAMP, + // 76543 = 0x012aff + data: []byte{0x84, 0x83, 0x82, 0x81, 0xff, 0x2a, 0x01}, + out: fmt.Sprintf("%v.76543", 0x81828384), + }, { + typ: replication.TypeTimestamp2, + metadata: 6, + styp: querypb.Type_TIMESTAMP, + // 765432 = 0x0badf8 + data: []byte{0x84, 0x83, 0x82, 0x81, 0xf8, 0xad, 0x0b}, + out: fmt.Sprintf("%v.765432", 0x81828384), + }, { + // DateTime2 + typ: replication.TypeDateTime2, + metadata: 0, + styp: querypb.Type_DATETIME, + // (2012 * 13 + 6) << 22 + 21 << 17 + 15 << 12 + 45 << 6 + 17) + // = 109734198097 = 0x198caafb51 + // Then have to add 0x8000000000 = 0x998caafb51 + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99}, + out: "'2012-06-21 15:45:17'", + }, { + typ: replication.TypeDateTime2, + metadata: 1, + styp: querypb.Type_DATETIME, + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 7}, + out: "'2012-06-21 15:45:17.7'", + }, { + typ: replication.TypeDateTime2, + metadata: 2, + styp: querypb.Type_DATETIME, + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 76}, + out: "'2012-06-21 15:45:17.76'", + }, { + typ: replication.TypeDateTime2, + metadata: 3, + styp: querypb.Type_DATETIME, + // 765 = 0x02fd + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xfd, 0x02}, + out: "'2012-06-21 15:45:17.765'", + }, { + typ: replication.TypeDateTime2, + metadata: 4, + styp: querypb.Type_DATETIME, + // 7654 = 0x1de6 + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xe6, 0x1d}, + out: "'2012-06-21 15:45:17.7654'", + }, { + typ: replication.TypeDateTime2, + metadata: 5, + styp: querypb.Type_DATETIME, + // 76543 = 0x012aff + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xff, 0x2a, 0x01}, + out: "'2012-06-21 15:45:17.76543'", + }, { + typ: replication.TypeDateTime2, + metadata: 6, + styp: querypb.Type_DATETIME, + // 765432 = 0x0badf8 + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xf8, 0xad, 0x0b}, + out: "'2012-06-21 15:45:17.765432'", + }, { + // Time2 + typ: replication.TypeTime2, + metadata: 2, + styp: querypb.Type_TIME, + data: []byte{0xff, 0xff, 0x7f, 0x9d}, + out: "'-00:00:00.99'", + }, { + typ: replication.TypeTime2, + metadata: 2, + styp: querypb.Type_TIME, + data: []byte{0xff, 0xff, 0x7f, 0x9d}, + out: "'-00:00:00.99'", + }, { + typ: replication.TypeTime2, + metadata: 4, + styp: querypb.Type_TIME, + data: []byte{0xfe, 0xff, 0x7f, 0xff, 0xff}, + out: "'-00:00:01.0001'", + }, { + typ: replication.TypeTime2, + metadata: 0, + // 15 << 12 + 34 << 6 + 54 = 63670 = 0x00f8b6 + // and need to add 0x800000 + styp: querypb.Type_TIME, + data: []byte{0xb6, 0xf8, 0x80}, + out: "'15:34:54'", + }} + + for _, tcase := range testcases { + // Copy the data into a larger buffer (one extra byte + // on both sides), so we make sure the 'pos' field works. + padded := make([]byte, len(tcase.data)+2) + copy(padded[1:], tcase.data) + + out, l, err := cellAsSQL(padded, 1, tcase.typ, tcase.metadata, tcase.styp) + if err != nil || l != len(tcase.data) || out != tcase.out { + t.Errorf("testcase cellAsSQL(%v,%v,%v) returned unexpected result: %v %v %v, was expecting %v %v ", tcase.typ, tcase.styp, tcase.data, out, l, err, tcase.out, len(tcase.data)) + } + } +} diff --git a/go/vt/tabletserver/engines/schema/schema_engine.go b/go/vt/tabletserver/engines/schema/schema_engine.go index e0b44946f96..a4bab71a8e5 100644 --- a/go/vt/tabletserver/engines/schema/schema_engine.go +++ b/go/vt/tabletserver/engines/schema/schema_engine.go @@ -475,3 +475,23 @@ func (se *Engine) handleHTTPSchema(response http.ResponseWriter, request *http.R json.HTMLEscape(buf, b) response.Write(buf.Bytes()) } + +// Test methods. Do not use in non-test code. + +// NewEngineForTests creates a new engine, that can't query the +// database, and will not send notifications. It starts opened, and +// doesn't reload. Use SetTableForTests to set table schema. +func NewEngineForTests() *Engine { + se := &Engine{ + isOpen: true, + tables: make(map[string]*Table), + } + return se +} + +// SetTableForTests puts a Table in the map directly. +func (se *Engine) SetTableForTests(table *Table) { + se.mu.Lock() + defer se.mu.Unlock() + se.tables[table.Name.String()] = table +} From 606d68ca29726810f19875f5a92d964975a42078 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 8 Mar 2017 12:28:56 -0800 Subject: [PATCH 087/108] Moving eventtoken.Minimum to go/sqltypes. That way it removes a dependency, and I can now merge go/mysqlconn/replication into go/mysqlconn. --- go/sqltypes/event_token.go | 24 ++++++++++ go/sqltypes/event_token_test.go | 63 +++++++++++++++++++++++++ go/sqltypes/result.go | 7 +-- go/vt/binlog/eventtoken/compare.go | 23 --------- go/vt/binlog/eventtoken/compare_test.go | 56 ---------------------- 5 files changed, 89 insertions(+), 84 deletions(-) create mode 100644 go/sqltypes/event_token.go create mode 100644 go/sqltypes/event_token_test.go diff --git a/go/sqltypes/event_token.go b/go/sqltypes/event_token.go new file mode 100644 index 00000000000..1eef7c08450 --- /dev/null +++ b/go/sqltypes/event_token.go @@ -0,0 +1,24 @@ +package sqltypes + +import querypb "github.com/youtube/vitess/go/vt/proto/query" + +// EventTokenMinimum returns an event token that is guaranteed to +// happen before both provided EventToken objects. Note it doesn't +// parse the position, but rather only uses the timestamp. This is +// meant to be used for EventToken objects coming from different +// source shard. +func EventTokenMinimum(ev1, ev2 *querypb.EventToken) *querypb.EventToken { + if ev1 == nil || ev2 == nil { + // One or the other is not set, we can't do anything. + return nil + } + + if ev1.Timestamp < ev2.Timestamp { + return &querypb.EventToken{ + Timestamp: ev1.Timestamp, + } + } + return &querypb.EventToken{ + Timestamp: ev2.Timestamp, + } +} diff --git a/go/sqltypes/event_token_test.go b/go/sqltypes/event_token_test.go new file mode 100644 index 00000000000..eda0a93aca9 --- /dev/null +++ b/go/sqltypes/event_token_test.go @@ -0,0 +1,63 @@ +package sqltypes + +import ( + "testing" + + "github.com/golang/protobuf/proto" + querypb "github.com/youtube/vitess/go/vt/proto/query" +) + +func TestEventTokenMinimum(t *testing.T) { + testcases := []struct { + ev1 *querypb.EventToken + ev2 *querypb.EventToken + expected *querypb.EventToken + }{{ + ev1: nil, + ev2: nil, + expected: nil, + }, { + ev1: &querypb.EventToken{ + Timestamp: 123, + }, + ev2: nil, + expected: nil, + }, { + ev1: nil, + ev2: &querypb.EventToken{ + Timestamp: 123, + }, + expected: nil, + }, { + ev1: &querypb.EventToken{ + Timestamp: 123, + }, + ev2: &querypb.EventToken{ + Timestamp: 456, + }, + expected: &querypb.EventToken{ + Timestamp: 123, + }, + }, { + ev1: &querypb.EventToken{ + Timestamp: 456, + }, + ev2: &querypb.EventToken{ + Timestamp: 123, + }, + expected: &querypb.EventToken{ + Timestamp: 123, + }, + }} + + for _, tcase := range testcases { + got := EventTokenMinimum(tcase.ev1, tcase.ev2) + if tcase.expected == nil && got != nil { + t.Errorf("expected nil result for Minimum(%v, %v) but got: %v", tcase.ev1, tcase.ev2, got) + continue + } + if !proto.Equal(got, tcase.expected) { + t.Errorf("got %v but expected %v for Minimum(%v, %v)", got, tcase.expected, tcase.ev1, tcase.ev2) + } + } +} diff --git a/go/sqltypes/result.go b/go/sqltypes/result.go index e54896ee196..b509aa87376 100644 --- a/go/sqltypes/result.go +++ b/go/sqltypes/result.go @@ -4,10 +4,7 @@ package sqltypes -import ( - "github.com/youtube/vitess/go/vt/binlog/eventtoken" - querypb "github.com/youtube/vitess/go/vt/proto/query" -) +import querypb "github.com/youtube/vitess/go/vt/proto/query" // Result represents a query result. type Result struct { @@ -167,7 +164,7 @@ func (result *Result) AppendResult(src *Result) { // discard the new one. if result.Extras != nil { // Note if any of the two is nil, we get nil. - result.Extras.EventToken = eventtoken.Minimum(result.Extras.EventToken, src.Extras.EventToken) + result.Extras.EventToken = EventTokenMinimum(result.Extras.EventToken, src.Extras.EventToken) result.Extras.Fresher = result.Extras.Fresher && src.Extras.Fresher } diff --git a/go/vt/binlog/eventtoken/compare.go b/go/vt/binlog/eventtoken/compare.go index c4f90d27b43..974080e25c4 100644 --- a/go/vt/binlog/eventtoken/compare.go +++ b/go/vt/binlog/eventtoken/compare.go @@ -8,29 +8,6 @@ import ( querypb "github.com/youtube/vitess/go/vt/proto/query" ) -// Minimum returns an event token that is guaranteed to happen before -// both provided EventToken objects. -// -// FIXME(alainjobart) for now, we always strip the shard and position, -// and only look at timestamp. It is only used across shards so it's -// not a big deal. When we compare values within a shard, we'll have -// to fix this. -func Minimum(ev1, ev2 *querypb.EventToken) *querypb.EventToken { - if ev1 == nil || ev2 == nil { - // One or the other is not set, we can't do anything. - return nil - } - - if ev1.Timestamp < ev2.Timestamp { - return &querypb.EventToken{ - Timestamp: ev1.Timestamp, - } - } - return &querypb.EventToken{ - Timestamp: ev2.Timestamp, - } -} - // Fresher compares two event tokens. It returns a negative number if // ev1ev2. In case of doubt (we don't have enough information to know diff --git a/go/vt/binlog/eventtoken/compare_test.go b/go/vt/binlog/eventtoken/compare_test.go index 46f5367c6f9..9f6868e2000 100644 --- a/go/vt/binlog/eventtoken/compare_test.go +++ b/go/vt/binlog/eventtoken/compare_test.go @@ -3,65 +3,9 @@ package eventtoken import ( "testing" - "github.com/golang/protobuf/proto" querypb "github.com/youtube/vitess/go/vt/proto/query" ) -func TestMinimum(t *testing.T) { - testcases := []struct { - ev1 *querypb.EventToken - ev2 *querypb.EventToken - expected *querypb.EventToken - }{{ - ev1: nil, - ev2: nil, - expected: nil, - }, { - ev1: &querypb.EventToken{ - Timestamp: 123, - }, - ev2: nil, - expected: nil, - }, { - ev1: nil, - ev2: &querypb.EventToken{ - Timestamp: 123, - }, - expected: nil, - }, { - ev1: &querypb.EventToken{ - Timestamp: 123, - }, - ev2: &querypb.EventToken{ - Timestamp: 456, - }, - expected: &querypb.EventToken{ - Timestamp: 123, - }, - }, { - ev1: &querypb.EventToken{ - Timestamp: 456, - }, - ev2: &querypb.EventToken{ - Timestamp: 123, - }, - expected: &querypb.EventToken{ - Timestamp: 123, - }, - }} - - for _, tcase := range testcases { - got := Minimum(tcase.ev1, tcase.ev2) - if tcase.expected == nil && got != nil { - t.Errorf("expected nil result for Minimum(%v, %v) but got: %v", tcase.ev1, tcase.ev2, got) - continue - } - if !proto.Equal(got, tcase.expected) { - t.Errorf("got %v but expected %v for Minimum(%v, %v)", got, tcase.expected, tcase.ev1, tcase.ev2) - } - } -} - func TestFresher(t *testing.T) { testcases := []struct { ev1 *querypb.EventToken From 147336e4192eef66b0be1482cf339dff7dbdeabd Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 8 Mar 2017 12:57:13 -0800 Subject: [PATCH 088/108] Consolidating two cell printing methods. * Adding type to original cellData method. Now that dependency is fixed, we can use sqltypes.IsSigned from mysqlconn/replication library. * De-dupping printing code, using sqltypes.Value. * Using strconv instead of Sprintf when possible. For numeric types. --- .../replication/binlog_event_common.go | 148 ++++++--- .../replication/binlog_event_common_test.go | 207 +++++++++---- go/vt/binlog/binlog_streamer.go | 252 +--------------- go/vt/binlog/binlog_streamer_rbr_test.go | 285 ------------------ 4 files changed, 256 insertions(+), 636 deletions(-) diff --git a/go/mysqlconn/replication/binlog_event_common.go b/go/mysqlconn/replication/binlog_event_common.go index 9ed3b7a11bc..7800f7a3b51 100644 --- a/go/mysqlconn/replication/binlog_event_common.go +++ b/go/mysqlconn/replication/binlog_event_common.go @@ -4,8 +4,12 @@ import ( "bytes" "encoding/binary" "fmt" + "strconv" + + "github.com/youtube/vitess/go/sqltypes" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" + querypb "github.com/youtube/vitess/go/vt/proto/query" ) // binlogEvent wraps a raw packet buffer and provides methods to examine it @@ -524,29 +528,65 @@ func cellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { } } -// cellData returns the data for a cell as a string. This is meant to -// be used in tests only, as it is missing the type flags to interpret -// the data correctly. -func cellData(data []byte, pos int, typ byte, metadata uint16) (string, int, error) { +// CellValue returns the data for a cell as a sqltypes.Value, and how +// many bytes it takes. It only uses the querypb.Type value for the +// signed flag. +func CellValue(data []byte, pos int, typ byte, metadata uint16, styp querypb.Type) (sqltypes.Value, int, error) { switch typ { case TypeTiny: - return fmt.Sprintf("%v", data[pos]), 1, nil + if sqltypes.IsSigned(styp) { + return sqltypes.MakeTrusted(querypb.Type_INT8, + strconv.AppendInt(nil, int64(int8(data[pos])), 10)), 1, nil + } + return sqltypes.MakeTrusted(querypb.Type_UINT8, + strconv.AppendUint(nil, uint64(data[pos]), 10)), 1, nil case TypeYear: - return fmt.Sprintf("%v", 1900+int(data[pos])), 1, nil + return sqltypes.MakeTrusted(querypb.Type_YEAR, + strconv.AppendUint(nil, uint64(data[pos])+1900, 10)), 1, nil case TypeShort: val := binary.LittleEndian.Uint16(data[pos : pos+2]) - return fmt.Sprintf("%v", val), 2, nil + if sqltypes.IsSigned(styp) { + return sqltypes.MakeTrusted(querypb.Type_INT16, + strconv.AppendInt(nil, int64(int16(val)), 10)), 2, nil + } + return sqltypes.MakeTrusted(querypb.Type_UINT16, + strconv.AppendUint(nil, uint64(val), 10)), 2, nil case TypeInt24: - val := uint32(data[pos]) + - uint32(data[pos+1])<<8 + - uint32(data[pos+2])<<16 - return fmt.Sprintf("%v", val), 3, nil - case TypeLong, TypeTimestamp: + if sqltypes.IsSigned(styp) && data[pos+2]&128 > 0 { + // Negative number, have to extend the sign. + val := int32(uint32(data[pos]) + + uint32(data[pos+1])<<8 + + uint32(data[pos+2])<<16 + + uint32(255)<<24) + return sqltypes.MakeTrusted(querypb.Type_INT24, + strconv.AppendInt(nil, int64(val), 10)), 3, nil + } + // Positive number. + val := uint64(data[pos]) + + uint64(data[pos+1])<<8 + + uint64(data[pos+2])<<16 + return sqltypes.MakeTrusted(querypb.Type_UINT24, + strconv.AppendUint(nil, val, 10)), 3, nil + case TypeLong: + val := binary.LittleEndian.Uint32(data[pos : pos+4]) + if sqltypes.IsSigned(styp) { + return sqltypes.MakeTrusted(querypb.Type_INT32, + strconv.AppendInt(nil, int64(int32(val)), 10)), 4, nil + } + return sqltypes.MakeTrusted(querypb.Type_UINT32, + strconv.AppendUint(nil, uint64(val), 10)), 4, nil + case TypeTimestamp: val := binary.LittleEndian.Uint32(data[pos : pos+4]) - return fmt.Sprintf("%v", val), 4, nil + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + strconv.AppendUint(nil, uint64(val), 10)), 4, nil case TypeLongLong: val := binary.LittleEndian.Uint64(data[pos : pos+8]) - return fmt.Sprintf("%v", val), 8, nil + if sqltypes.IsSigned(styp) { + return sqltypes.MakeTrusted(querypb.Type_INT64, + strconv.AppendInt(nil, int64(val), 10)), 8, nil + } + return sqltypes.MakeTrusted(querypb.Type_UINT64, + strconv.AppendUint(nil, val, 10)), 8, nil case TypeDate, TypeNewDate: val := uint32(data[pos]) + uint32(data[pos+1])<<8 + @@ -554,13 +594,15 @@ func cellData(data []byte, pos int, typ byte, metadata uint16) (string, int, err day := val & 31 month := val >> 5 & 15 year := val >> 9 - return fmt.Sprintf("%04d-%02d-%02d", year, month, day), 3, nil + return sqltypes.MakeTrusted(querypb.Type_DATE, + []byte(fmt.Sprintf("%04d-%02d-%02d", year, month, day))), 3, nil case TypeTime: val := binary.LittleEndian.Uint32(data[pos : pos+4]) hour := val / 10000 minute := (val % 10000) / 100 second := val % 100 - return fmt.Sprintf("%02d:%02d:%02d", hour, minute, second), 4, nil + return sqltypes.MakeTrusted(querypb.Type_TIME, + []byte(fmt.Sprintf("%02d:%02d:%02d", hour, minute, second))), 4, nil case TypeDateTime: val := binary.LittleEndian.Uint64(data[pos : pos+8]) d := val / 1000000 @@ -571,53 +613,61 @@ func cellData(data []byte, pos int, typ byte, metadata uint16) (string, int, err hour := t / 10000 minute := (t % 10000) / 100 second := t % 100 - return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second), 8, nil + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second))), 8, nil case TypeVarchar: // Length is encoded in 1 or 2 bytes. if metadata > 255 { l := int(uint64(data[pos]) | uint64(data[pos+1])<<8) - return string(data[pos+2 : pos+2+l]), l + 2, nil + return sqltypes.MakeTrusted(querypb.Type_VARCHAR, + data[pos+2:pos+2+l]), l + 2, nil } l := int(data[pos]) - return string(data[pos+1 : pos+1+l]), l + 1, nil + return sqltypes.MakeTrusted(querypb.Type_VARCHAR, + data[pos+1:pos+1+l]), l + 1, nil case TypeBit: + // The contents is just the bytes, quoted. nbits := ((metadata >> 8) * 8) + (metadata & 0xFF) l := (int(nbits) + 7) / 8 - var buf bytes.Buffer - for i := 0; i < l; i++ { - buf.WriteString(fmt.Sprintf("%08b", data[pos+i])) - } - return buf.String(), l, nil + return sqltypes.MakeTrusted(querypb.Type_BIT, + data[pos:pos+l]), l, nil case TypeTimestamp2: second := binary.LittleEndian.Uint32(data[pos : pos+4]) switch metadata { case 1: decimals := int(data[pos+4]) - return fmt.Sprintf("%v.%01d", second, decimals), 5, nil + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.%01d", second, decimals))), 5, nil case 2: decimals := int(data[pos+4]) - return fmt.Sprintf("%v.%02d", second, decimals), 5, nil + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.%02d", second, decimals))), 5, nil case 3: decimals := int(data[pos+4]) + int(data[pos+5])<<8 - return fmt.Sprintf("%v.%03d", second, decimals), 6, nil + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.%03d", second, decimals))), 6, nil case 4: decimals := int(data[pos+4]) + int(data[pos+5])<<8 - return fmt.Sprintf("%v.%04d", second, decimals), 6, nil + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.%04d", second, decimals))), 6, nil case 5: decimals := int(data[pos+4]) + int(data[pos+5])<<8 + int(data[pos+6])<<16 - return fmt.Sprintf("%v.%05d", second, decimals), 7, nil + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.%05d", second, decimals))), 7, nil case 6: decimals := int(data[pos+4]) + int(data[pos+5])<<8 + int(data[pos+6])<<16 - return fmt.Sprintf("%v.%.6d", second, decimals), 7, nil + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.%.6d", second, decimals))), 7, nil } - return fmt.Sprintf("%v", second), 4, nil + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + strconv.AppendUint(nil, uint64(second), 10)), 4, nil case TypeDateTime2: ymdhms := (uint64(data[pos]) | uint64(data[pos+1])<<8 | @@ -641,30 +691,37 @@ func cellData(data []byte, pos int, typ byte, metadata uint16) (string, int, err switch metadata { case 1: decimals := int(data[pos+5]) - return fmt.Sprintf("%v.%01d", datetime, decimals), 6, nil + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%v.%01d", datetime, decimals))), 6, nil case 2: decimals := int(data[pos+5]) - return fmt.Sprintf("%v.%02d", datetime, decimals), 6, nil + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%v.%02d", datetime, decimals))), 6, nil case 3: decimals := int(data[pos+5]) + int(data[pos+6])<<8 - return fmt.Sprintf("%v.%03d", datetime, decimals), 7, nil + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%v.%03d", datetime, decimals))), 7, nil case 4: decimals := int(data[pos+5]) + int(data[pos+6])<<8 - return fmt.Sprintf("%v.%04d", datetime, decimals), 7, nil + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%v.%04d", datetime, decimals))), 7, nil case 5: decimals := int(data[pos+5]) + int(data[pos+6])<<8 + int(data[pos+7])<<16 - return fmt.Sprintf("%v.%05d", datetime, decimals), 8, nil + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%v.%05d", datetime, decimals))), 8, nil case 6: decimals := int(data[pos+5]) + int(data[pos+6])<<8 + int(data[pos+7])<<16 - return fmt.Sprintf("%v.%.6d", datetime, decimals), 8, nil + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%v.%.6d", datetime, decimals))), 8, nil } - return datetime, 5, nil + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(datetime)), 5, nil case TypeTime2: hms := (int64(data[pos]) | int64(data[pos+1])<<8 | @@ -730,10 +787,11 @@ func cellData(data []byte, pos int, typ byte, metadata uint16) (string, int, err hour := (hms >> 12) % (1 << 10) minute := (hms >> 6) % (1 << 6) second := hms % (1 << 6) - return fmt.Sprintf("%v%02d:%02d:%02d%v", sign, hour, minute, second, fracStr), 3 + (int(metadata)+1)/2, nil + return sqltypes.MakeTrusted(querypb.Type_TIME, + []byte(fmt.Sprintf("%v%02d:%02d:%02d%v", sign, hour, minute, second, fracStr))), 3 + (int(metadata)+1)/2, nil default: - return "", 0, fmt.Errorf("Unsupported type %v", typ) + return sqltypes.NULL, 0, fmt.Errorf("Unsupported type %v", typ) } } @@ -871,6 +929,7 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) { // StringValuesForTests is a helper method to return the string value // of all columns in a row in a Row. Only use it in tests, as the // returned values cannot be interpreted correctly without the schema. +// We assume everything is unsigned in this method. func (rs *Rows) StringValuesForTests(tm *TableMap, rowIndex int) ([]string, error) { var result []string @@ -890,11 +949,11 @@ func (rs *Rows) StringValuesForTests(tm *TableMap, rowIndex int) ([]string, erro } // We have real data - value, l, err := cellData(data, pos, tm.Types[c], tm.Metadata[c]) + value, l, err := CellValue(data, pos, tm.Types[c], tm.Metadata[c], querypb.Type_UINT64) if err != nil { return nil, err } - result = append(result, value) + result = append(result, value.String()) pos += l valueIndex++ } @@ -905,6 +964,7 @@ func (rs *Rows) StringValuesForTests(tm *TableMap, rowIndex int) ([]string, erro // StringIdentifiesForTests is a helper method to return the string // identify of all columns in a row in a Row. Only use it in tests, as the // returned values cannot be interpreted correctly without the schema. +// We assume everything is unsigned in this method. func (rs *Rows) StringIdentifiesForTests(tm *TableMap, rowIndex int) ([]string, error) { var result []string @@ -924,11 +984,11 @@ func (rs *Rows) StringIdentifiesForTests(tm *TableMap, rowIndex int) ([]string, } // We have real data - value, l, err := cellData(data, pos, tm.Types[c], tm.Metadata[c]) + value, l, err := CellValue(data, pos, tm.Types[c], tm.Metadata[c], querypb.Type_UINT64) if err != nil { return nil, err } - result = append(result, value) + result = append(result, value.String()) pos += l valueIndex++ } diff --git a/go/mysqlconn/replication/binlog_event_common_test.go b/go/mysqlconn/replication/binlog_event_common_test.go index cb92c56189e..2add4db0ca1 100644 --- a/go/mysqlconn/replication/binlog_event_common_test.go +++ b/go/mysqlconn/replication/binlog_event_common_test.go @@ -1,11 +1,14 @@ package replication import ( + "bytes" "fmt" "reflect" "testing" + "github.com/youtube/vitess/go/sqltypes" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" + querypb "github.com/youtube/vitess/go/vt/proto/query" ) // sample event data @@ -339,110 +342,167 @@ func TestCellLengthAndData(t *testing.T) { testcases := []struct { typ byte metadata uint16 + styp querypb.Type data []byte - out string + out sqltypes.Value }{{ typ: TypeTiny, + styp: querypb.Type_UINT8, data: []byte{0x82}, - out: "130", + out: sqltypes.MakeTrusted(querypb.Type_UINT8, + []byte("130")), + }, { + typ: TypeTiny, + styp: querypb.Type_INT8, + data: []byte{0xfe}, + out: sqltypes.MakeTrusted(querypb.Type_INT8, + []byte("-2")), }, { typ: TypeYear, data: []byte{0x82}, - out: "2030", + out: sqltypes.MakeTrusted(querypb.Type_YEAR, + []byte("2030")), + }, { + typ: TypeShort, + styp: querypb.Type_UINT16, + data: []byte{0x82, 0x81}, + out: sqltypes.MakeTrusted(querypb.Type_UINT16, + []byte(fmt.Sprintf("%v", 0x8182))), }, { typ: TypeShort, - data: []byte{0x02, 0x01}, - out: fmt.Sprintf("%v", 0x0102), + styp: querypb.Type_INT16, + data: []byte{0xfe, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_INT16, + []byte(fmt.Sprintf("%v", -1-int32(0x0001)))), + }, { + typ: TypeInt24, + styp: querypb.Type_UINT24, + data: []byte{0x83, 0x82, 0x81}, + out: sqltypes.MakeTrusted(querypb.Type_UINT24, + []byte(fmt.Sprintf("%v", 0x818283))), }, { typ: TypeInt24, - data: []byte{0x03, 0x02, 0x01}, - out: fmt.Sprintf("%v", 0x010203), + styp: querypb.Type_INT24, + data: []byte{0xfd, 0xfe, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_INT24, + []byte(fmt.Sprintf("%v", -1-int32(0x000102)))), }, { typ: TypeLong, - data: []byte{0x04, 0x03, 0x02, 0x01}, - out: fmt.Sprintf("%v", 0x01020304), + styp: querypb.Type_UINT32, + data: []byte{0x84, 0x83, 0x82, 0x81}, + out: sqltypes.MakeTrusted(querypb.Type_UINT32, + []byte(fmt.Sprintf("%v", 0x81828384))), + }, { + typ: TypeLong, + styp: querypb.Type_INT32, + data: []byte{0xfc, 0xfd, 0xfe, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_INT32, + []byte(fmt.Sprintf("%v", -1-int32(0x00010203)))), }, { typ: TypeTimestamp, data: []byte{0x84, 0x83, 0x82, 0x81}, - out: fmt.Sprintf("%v", 0x81828384), + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v", 0x81828384))), + }, { + typ: TypeLongLong, + styp: querypb.Type_UINT64, + data: []byte{0x88, 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81}, + out: sqltypes.MakeTrusted(querypb.Type_UINT64, + []byte(fmt.Sprintf("%v", uint64(0x8182838485868788)))), }, { typ: TypeLongLong, - data: []byte{0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01}, - out: fmt.Sprintf("%v", 0x0102030405060708), + styp: querypb.Type_INT64, + data: []byte{0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_INT64, + []byte(fmt.Sprintf("%v", -1-int64(0x0001020304050607)))), }, { typ: TypeDate, // 2010 << 9 + 10 << 5 + 3 = 1029443 = 0x0fb543 data: []byte{0x43, 0xb5, 0x0f}, - out: "2010-10-03", + out: sqltypes.MakeTrusted(querypb.Type_DATE, + []byte("2010-10-03")), }, { typ: TypeNewDate, // 2010 << 9 + 10 << 5 + 3 = 1029443 = 0x0fb543 data: []byte{0x43, 0xb5, 0x0f}, - out: "2010-10-03", + out: sqltypes.MakeTrusted(querypb.Type_DATE, + []byte("2010-10-03")), }, { typ: TypeTime, // 154532 = 0x00025ba4 data: []byte{0xa4, 0x5b, 0x02, 0x00}, - out: "15:45:32", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("15:45:32")), }, { typ: TypeDateTime, // 19840304154532 = 0x120b6e4807a4 data: []byte{0xa4, 0x07, 0x48, 0x6e, 0x0b, 0x12, 0x00, 0x00}, - out: "1984-03-04 15:45:32", + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("1984-03-04 15:45:32")), }, { typ: TypeVarchar, metadata: 20, // one byte length encoding data: []byte{3, 'a', 'b', 'c'}, - out: "abc", + out: sqltypes.MakeTrusted(querypb.Type_VARCHAR, + []byte("abc")), }, { typ: TypeVarchar, metadata: 384, // two bytes length encoding data: []byte{3, 0, 'a', 'b', 'c'}, - out: "abc", + out: sqltypes.MakeTrusted(querypb.Type_VARCHAR, + []byte("abc")), }, { typ: TypeBit, metadata: 0x0107, data: []byte{0x3, 0x1}, - out: "0000001100000001", + out: sqltypes.MakeTrusted(querypb.Type_BIT, + []byte{3, 1}), }, { typ: TypeTimestamp2, metadata: 0, data: []byte{0x84, 0x83, 0x82, 0x81}, - out: fmt.Sprintf("%v", 0x81828384), + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v", 0x81828384))), }, { typ: TypeTimestamp2, metadata: 1, data: []byte{0x84, 0x83, 0x82, 0x81, 7}, - out: fmt.Sprintf("%v.7", 0x81828384), + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.7", 0x81828384))), }, { typ: TypeTimestamp2, metadata: 2, data: []byte{0x84, 0x83, 0x82, 0x81, 76}, - out: fmt.Sprintf("%v.76", 0x81828384), + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.76", 0x81828384))), }, { typ: TypeTimestamp2, metadata: 3, // 765 = 0x02fd data: []byte{0x84, 0x83, 0x82, 0x81, 0xfd, 0x02}, - out: fmt.Sprintf("%v.765", 0x81828384), + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.765", 0x81828384))), }, { typ: TypeTimestamp2, metadata: 4, // 7654 = 0x1de6 data: []byte{0x84, 0x83, 0x82, 0x81, 0xe6, 0x1d}, - out: fmt.Sprintf("%v.7654", 0x81828384), + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.7654", 0x81828384))), }, { typ: TypeTimestamp2, metadata: 5, // 76543 = 0x012aff data: []byte{0x84, 0x83, 0x82, 0x81, 0xff, 0x2a, 0x01}, - out: fmt.Sprintf("%v.76543", 0x81828384), + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.76543", 0x81828384))), }, { typ: TypeTimestamp2, metadata: 6, // 765432 = 0x0badf8 data: []byte{0x84, 0x83, 0x82, 0x81, 0xf8, 0xad, 0x0b}, - out: fmt.Sprintf("%v.765432", 0x81828384), + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.765432", 0x81828384))), }, { typ: TypeDateTime2, metadata: 0, @@ -450,41 +510,48 @@ func TestCellLengthAndData(t *testing.T) { // = 109734198097 = 0x198caafb51 // Then have to add 0x8000000000 = 0x998caafb51 data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99}, - out: "2012-06-21 15:45:17", + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17")), }, { typ: TypeDateTime2, metadata: 1, data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 7}, - out: "2012-06-21 15:45:17.7", + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17.7")), }, { typ: TypeDateTime2, metadata: 2, data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 76}, - out: "2012-06-21 15:45:17.76", + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17.76")), }, { typ: TypeDateTime2, metadata: 3, // 765 = 0x02fd data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xfd, 0x02}, - out: "2012-06-21 15:45:17.765", + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17.765")), }, { typ: TypeDateTime2, metadata: 4, // 7654 = 0x1de6 data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xe6, 0x1d}, - out: "2012-06-21 15:45:17.7654", + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17.7654")), }, { typ: TypeDateTime2, metadata: 5, // 76543 = 0x012aff data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xff, 0x2a, 0x01}, - out: "2012-06-21 15:45:17.76543", + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17.76543")), }, { typ: TypeDateTime2, metadata: 6, // 765432 = 0x0badf8 data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xf8, 0xad, 0x0b}, - out: "2012-06-21 15:45:17.765432", + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17.765432")), }, { // This first set of tests is from a comment in // sql-common/my_time.c: @@ -499,117 +566,139 @@ func TestCellLengthAndData(t *testing.T) { typ: TypeTime2, metadata: 2, data: []byte{0x00, 0x00, 0x80, 0x00}, - out: "00:00:00.00", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("00:00:00.00")), }, { typ: TypeTime2, metadata: 2, data: []byte{0xff, 0xff, 0x7f, 0xff}, - out: "-00:00:00.01", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:00.01")), }, { typ: TypeTime2, metadata: 2, data: []byte{0xff, 0xff, 0x7f, 0x9d}, - out: "-00:00:00.99", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:00.99")), }, { typ: TypeTime2, metadata: 2, data: []byte{0xff, 0xff, 0x7f, 0x00}, - out: "-00:00:01.00", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.00")), }, { typ: TypeTime2, metadata: 2, data: []byte{0xfe, 0xff, 0x7f, 0xff}, - out: "-00:00:01.01", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.01")), }, { typ: TypeTime2, metadata: 2, data: []byte{0xfe, 0xff, 0x7f, 0xf6}, - out: "-00:00:01.10", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.10")), }, { // Similar tests for 4 decimals. typ: TypeTime2, metadata: 4, data: []byte{0x00, 0x00, 0x80, 0x00, 0x00}, - out: "00:00:00.0000", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("00:00:00.0000")), }, { typ: TypeTime2, metadata: 4, data: []byte{0xff, 0xff, 0x7f, 0xff, 0xff}, - out: "-00:00:00.0001", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:00.0001")), }, { typ: TypeTime2, metadata: 4, data: []byte{0xff, 0xff, 0x7f, 0x9d, 0xff}, - out: "-00:00:00.0099", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:00.0099")), }, { typ: TypeTime2, metadata: 4, data: []byte{0xff, 0xff, 0x7f, 0x00, 0x00}, - out: "-00:00:01.0000", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.0000")), }, { typ: TypeTime2, metadata: 4, data: []byte{0xfe, 0xff, 0x7f, 0xff, 0xff}, - out: "-00:00:01.0001", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.0001")), }, { typ: TypeTime2, metadata: 4, data: []byte{0xfe, 0xff, 0x7f, 0xf6, 0xff}, - out: "-00:00:01.0010", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.0010")), }, { // Similar tests for 6 decimals. typ: TypeTime2, metadata: 6, data: []byte{0x00, 0x00, 0x80, 0x00, 0x00, 0x00}, - out: "00:00:00.000000", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("00:00:00.000000")), }, { typ: TypeTime2, metadata: 6, data: []byte{0xff, 0xff, 0x7f, 0xff, 0xff, 0xff}, - out: "-00:00:00.000001", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:00.000001")), }, { typ: TypeTime2, metadata: 6, data: []byte{0xff, 0xff, 0x7f, 0x9d, 0xff, 0xff}, - out: "-00:00:00.000099", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:00.000099")), }, { typ: TypeTime2, metadata: 6, data: []byte{0xff, 0xff, 0x7f, 0x00, 0x00, 0x00}, - out: "-00:00:01.000000", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.000000")), }, { typ: TypeTime2, metadata: 6, data: []byte{0xfe, 0xff, 0x7f, 0xff, 0xff, 0xff}, - out: "-00:00:01.000001", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.000001")), }, { typ: TypeTime2, metadata: 6, data: []byte{0xfe, 0xff, 0x7f, 0xf6, 0xff, 0xff}, - out: "-00:00:01.000010", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.000010")), }, { // Few more tests. typ: TypeTime2, metadata: 0, data: []byte{0x00, 0x00, 0x80}, - out: "00:00:00", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("00:00:00")), }, { typ: TypeTime2, metadata: 1, data: []byte{0x01, 0x00, 0x80, 0x0a}, - out: "00:00:01.1", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("00:00:01.1")), }, { typ: TypeTime2, metadata: 2, data: []byte{0x01, 0x00, 0x80, 0x0a}, - out: "00:00:01.10", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("00:00:01.10")), }, { typ: TypeTime2, metadata: 0, // 15 << 12 + 34 << 6 + 54 = 63670 = 0x00f8b6 // and need to add 0x800000 data: []byte{0xb6, 0xf8, 0x80}, - out: "15:34:54", + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("15:34:54")), }} for _, tcase := range testcases { @@ -624,10 +713,10 @@ func TestCellLengthAndData(t *testing.T) { t.Errorf("testcase cellLength(%v,%v) returned unexpected result: %v %v", tcase.typ, tcase.data, l, err) } - // Test cellData (only used for tests, but might as well). - out, l, err := cellData(padded, 1, tcase.typ, tcase.metadata) - if err != nil || l != len(tcase.data) || out != tcase.out { - t.Errorf("testcase cellData(%v,%v) returned unexpected result: %v %v %v, was expecting %v %v ", tcase.typ, tcase.data, out, l, err, tcase.out, len(tcase.data)) + // Test CellValue. + out, l, err := CellValue(padded, 1, tcase.typ, tcase.metadata, tcase.styp) + if err != nil || l != len(tcase.data) || out.Type() != tcase.out.Type() || bytes.Compare(out.Raw(), tcase.out.Raw()) != 0 { + t.Errorf("testcase cellData(%v,%v) returned unexpected result: %v(%v) %v %v, was expecting %v(%v) %v ", tcase.typ, tcase.data, out, out.Type(), l, err, tcase.out, tcase.out.Type(), len(tcase.data)) } } } diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index e012ad8bf70..d499bb0ecf2 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -6,7 +6,6 @@ package binlog import ( "bytes" - "encoding/binary" "fmt" "io" "strings" @@ -16,7 +15,6 @@ import ( "github.com/youtube/vitess/go/mysqlconn/replication" "github.com/youtube/vitess/go/sqldb" - "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/sqlparser" @@ -466,11 +464,11 @@ func writeValuesAsSQL(sql *bytes.Buffer, rs *replication.Rows, tm *replication.T } // We have real data - value, l, err := cellAsSQL(data, pos, tm.Types[c], tm.Metadata[c], ti.Columns[c].Type) + value, l, err := replication.CellValue(data, pos, tm.Types[c], tm.Metadata[c], ti.Columns[c].Type) if err != nil { return err } - sql.WriteString(value) + value.EncodeSQL(sql) pos += l valueIndex++ } @@ -504,256 +502,14 @@ func writeIdentifiesAsSQL(sql *bytes.Buffer, rs *replication.Rows, tm *replicati } // We have real data - value, l, err := cellAsSQL(data, pos, tm.Types[c], tm.Metadata[c], ti.Columns[c].Type) + value, l, err := replication.CellValue(data, pos, tm.Types[c], tm.Metadata[c], ti.Columns[c].Type) if err != nil { return err } - sql.WriteString(value) + value.EncodeSQL(sql) pos += l valueIndex++ } return nil } - -// cellAsSQL parses the data for a cell inside a RBR event, and returns -// the SQL representation of that cell, given its RBR type, and schema type. -func cellAsSQL(data []byte, pos int, typ byte, metadata uint16, styp querypb.Type) (string, int, error) { - switch typ { - case replication.TypeTiny: - if sqltypes.IsSigned(styp) { - return fmt.Sprintf("%v", int8(data[pos])), 1, nil - } - return fmt.Sprintf("%v", uint8(data[pos])), 1, nil - case replication.TypeYear: - return fmt.Sprintf("%v", 1900+int(data[pos])), 1, nil - case replication.TypeShort: - val := binary.LittleEndian.Uint16(data[pos : pos+2]) - if sqltypes.IsSigned(styp) { - return fmt.Sprintf("%v", int16(val)), 2, nil - } - return fmt.Sprintf("%v", val), 2, nil - case replication.TypeInt24: - if sqltypes.IsSigned(styp) && data[pos+2]&128 > 0 { - // Negative number, have to extend the sign. - val := int32(uint32(data[pos]) + - uint32(data[pos+1])<<8 + - uint32(data[pos+2])<<16 + - uint32(255)<<24) - return fmt.Sprintf("%v", val), 3, nil - } - // Positive number. - val := uint32(data[pos]) + - uint32(data[pos+1])<<8 + - uint32(data[pos+2])<<16 - return fmt.Sprintf("%v", val), 3, nil - case replication.TypeLong: - val := binary.LittleEndian.Uint32(data[pos : pos+4]) - if sqltypes.IsSigned(styp) { - return fmt.Sprintf("%v", int32(val)), 4, nil - } - return fmt.Sprintf("%v", val), 4, nil - case replication.TypeTimestamp: - val := binary.LittleEndian.Uint32(data[pos : pos+4]) - return fmt.Sprintf("%v", val), 4, nil - case replication.TypeLongLong: - val := binary.LittleEndian.Uint64(data[pos : pos+8]) - if sqltypes.IsSigned(styp) { - return fmt.Sprintf("%v", int64(val)), 8, nil - } - return fmt.Sprintf("%v", val), 8, nil - case replication.TypeDate, replication.TypeNewDate: - val := uint32(data[pos]) + - uint32(data[pos+1])<<8 + - uint32(data[pos+2])<<16 - day := val & 31 - month := val >> 5 & 15 - year := val >> 9 - return fmt.Sprintf("'%04d-%02d-%02d'", year, month, day), 3, nil - case replication.TypeTime: - val := binary.LittleEndian.Uint32(data[pos : pos+4]) - hour := val / 10000 - minute := (val % 10000) / 100 - second := val % 100 - return fmt.Sprintf("'%02d:%02d:%02d'", hour, minute, second), 4, nil - case replication.TypeDateTime: - val := binary.LittleEndian.Uint64(data[pos : pos+8]) - d := val / 1000000 - t := val % 1000000 - year := d / 10000 - month := (d % 10000) / 100 - day := d % 100 - hour := t / 10000 - minute := (t % 10000) / 100 - second := t % 100 - return fmt.Sprintf("'%04d-%02d-%02d %02d:%02d:%02d'", year, month, day, hour, minute, second), 8, nil - case replication.TypeVarchar: - // Length is encoded in 1 or 2 bytes. - l := int(data[pos]) - headerSize := 1 - if metadata > 255 { - l += int(data[pos+1]) << 8 - headerSize = 2 - } - v := sqltypes.MakeTrusted(querypb.Type_VARCHAR, data[pos+headerSize:pos+headerSize+l]) - var buf bytes.Buffer - v.EncodeSQL(&buf) - return buf.String(), l + headerSize, nil - case replication.TypeBit: - nbits := ((metadata >> 8) * 8) + (metadata & 0xFF) - l := (int(nbits) + 7) / 8 - var buf bytes.Buffer - buf.WriteString("b'") - for i := 0; i < l; i++ { - buf.WriteString(fmt.Sprintf("%08b", data[pos+i])) - } - buf.WriteByte('\'') - return buf.String(), l, nil - case replication.TypeTimestamp2: - second := binary.LittleEndian.Uint32(data[pos : pos+4]) - switch metadata { - case 1: - decimals := int(data[pos+4]) - return fmt.Sprintf("%v.%01d", second, decimals), 5, nil - case 2: - decimals := int(data[pos+4]) - return fmt.Sprintf("%v.%02d", second, decimals), 5, nil - case 3: - decimals := int(data[pos+4]) + - int(data[pos+5])<<8 - return fmt.Sprintf("%v.%03d", second, decimals), 6, nil - case 4: - decimals := int(data[pos+4]) + - int(data[pos+5])<<8 - return fmt.Sprintf("%v.%04d", second, decimals), 6, nil - case 5: - decimals := int(data[pos+4]) + - int(data[pos+5])<<8 + - int(data[pos+6])<<16 - return fmt.Sprintf("%v.%05d", second, decimals), 7, nil - case 6: - decimals := int(data[pos+4]) + - int(data[pos+5])<<8 + - int(data[pos+6])<<16 - return fmt.Sprintf("%v.%.6d", second, decimals), 7, nil - } - return fmt.Sprintf("%v", second), 4, nil - case replication.TypeDateTime2: - ymdhms := (uint64(data[pos]) | - uint64(data[pos+1])<<8 | - uint64(data[pos+2])<<16 | - uint64(data[pos+3])<<24 | - uint64(data[pos+4])<<32) - uint64(0x8000000000) - ymd := ymdhms >> 17 - ym := ymd >> 5 - hms := ymdhms % (1 << 17) - - day := ymd % (1 << 5) - month := ym % 13 - year := ym / 13 - - second := hms % (1 << 6) - minute := (hms >> 6) % (1 << 6) - hour := hms >> 12 - - datetime := fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second) - - switch metadata { - case 1: - decimals := int(data[pos+5]) - return fmt.Sprintf("'%v.%01d'", datetime, decimals), 6, nil - case 2: - decimals := int(data[pos+5]) - return fmt.Sprintf("'%v.%02d'", datetime, decimals), 6, nil - case 3: - decimals := int(data[pos+5]) + - int(data[pos+6])<<8 - return fmt.Sprintf("'%v.%03d'", datetime, decimals), 7, nil - case 4: - decimals := int(data[pos+5]) + - int(data[pos+6])<<8 - return fmt.Sprintf("'%v.%04d'", datetime, decimals), 7, nil - case 5: - decimals := int(data[pos+5]) + - int(data[pos+6])<<8 + - int(data[pos+7])<<16 - return fmt.Sprintf("'%v.%05d'", datetime, decimals), 8, nil - case 6: - decimals := int(data[pos+5]) + - int(data[pos+6])<<8 + - int(data[pos+7])<<16 - return fmt.Sprintf("'%v.%.6d'", datetime, decimals), 8, nil - } - return fmt.Sprintf("'%v'", datetime), 5, nil - - case replication.TypeTime2: - hms := (int64(data[pos]) | - int64(data[pos+1])<<8 | - int64(data[pos+2])<<16) - 0x800000 - sign := "" - if hms < 0 { - hms = -hms - sign = "-" - } - - fracStr := "" - switch metadata { - case 1: - frac := int(data[pos+3]) - if sign == "-" && frac != 0 { - hms-- - frac = 0x100 - frac - } - fracStr = fmt.Sprintf(".%.1d", frac/10) - case 2: - frac := int(data[pos+3]) - if sign == "-" && frac != 0 { - hms-- - frac = 0x100 - frac - } - fracStr = fmt.Sprintf(".%.2d", frac) - case 3: - frac := int(data[pos+3]) | - int(data[pos+4])<<8 - if sign == "-" && frac != 0 { - hms-- - frac = 0x10000 - frac - } - fracStr = fmt.Sprintf(".%.3d", frac/10) - case 4: - frac := int(data[pos+3]) | - int(data[pos+4])<<8 - if sign == "-" && frac != 0 { - hms-- - frac = 0x10000 - frac - } - fracStr = fmt.Sprintf(".%.4d", frac) - case 5: - frac := int(data[pos+3]) | - int(data[pos+4])<<8 | - int(data[pos+5])<<16 - if sign == "-" && frac != 0 { - hms-- - frac = 0x1000000 - frac - } - fracStr = fmt.Sprintf(".%.5d", frac/10) - case 6: - frac := int(data[pos+3]) | - int(data[pos+4])<<8 | - int(data[pos+5])<<16 - if sign == "-" && frac != 0 { - hms-- - frac = 0x1000000 - frac - } - fracStr = fmt.Sprintf(".%.6d", frac) - } - - hour := (hms >> 12) % (1 << 10) - minute := (hms >> 6) % (1 << 6) - second := hms % (1 << 6) - return fmt.Sprintf("'%v%02d:%02d:%02d%v'", sign, hour, minute, second, fracStr), 3 + (int(metadata)+1)/2, nil - - default: - return "", 0, fmt.Errorf("Unsupported type %v", typ) - } -} diff --git a/go/vt/binlog/binlog_streamer_rbr_test.go b/go/vt/binlog/binlog_streamer_rbr_test.go index 66f69702640..0ce6cce4db0 100644 --- a/go/vt/binlog/binlog_streamer_rbr_test.go +++ b/go/vt/binlog/binlog_streamer_rbr_test.go @@ -1,7 +1,6 @@ package binlog import ( - "fmt" "reflect" "testing" @@ -139,287 +138,3 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { t.Errorf("binlogConnStreamer.parseEvents(): got:\n%v\nwant:\n%v", got, want) } } - -func TestCellAsSQL(t *testing.T) { - testcases := []struct { - typ byte - metadata uint16 - styp querypb.Type - data []byte - out string - }{{ - // TypeTiny tests, unsigned and signed. - typ: replication.TypeTiny, - styp: querypb.Type_UINT8, - data: []byte{0x0a}, - out: "10", - }, { - typ: replication.TypeTiny, - styp: querypb.Type_UINT8, - data: []byte{0x82}, - out: "130", - }, { - typ: replication.TypeTiny, - styp: querypb.Type_INT8, - data: []byte{0x82}, - out: "-126", - }, { - // TypeYear is always unsigned. - typ: replication.TypeYear, - styp: querypb.Type_YEAR, - data: []byte{0x82}, - out: "2030", - }, { - // TypeShort tests, unsigned and signed. - typ: replication.TypeShort, - styp: querypb.Type_UINT16, - data: []byte{0x02, 0x01}, - out: fmt.Sprintf("%v", 0x0102), - }, { - typ: replication.TypeShort, - styp: querypb.Type_UINT16, - data: []byte{0x81, 0x82}, - out: fmt.Sprintf("%v", 0x8281), - }, { - typ: replication.TypeShort, - styp: querypb.Type_INT16, - data: []byte{0xfe, 0xff}, - out: "-2", - }, { - // TypeInt24 tests, unsigned and signed. - typ: replication.TypeInt24, - styp: querypb.Type_UINT24, - data: []byte{0x03, 0x02, 0x01}, - out: fmt.Sprintf("%v", 0x010203), - }, { - typ: replication.TypeInt24, - styp: querypb.Type_UINT24, - data: []byte{0x81, 0x82, 0x83}, - out: fmt.Sprintf("%v", 0x838281), - }, { - typ: replication.TypeInt24, - styp: querypb.Type_INT24, - data: []byte{0xfe, 0xff, 0xff}, - out: "-2", - }, { - // TypeLong tests, unsigned and signed. - typ: replication.TypeLong, - styp: querypb.Type_UINT32, - data: []byte{0x04, 0x03, 0x02, 0x01}, - out: fmt.Sprintf("%v", 0x01020304), - }, { - typ: replication.TypeLong, - styp: querypb.Type_UINT32, - data: []byte{0x81, 0x82, 0x83, 0x84}, - out: fmt.Sprintf("%v", 0x84838281), - }, { - typ: replication.TypeLong, - styp: querypb.Type_INT32, - data: []byte{0xfe, 0xff, 0xff, 0xff}, - out: "-2", - }, { - // TypeTimestamp tests. - typ: replication.TypeTimestamp, - styp: querypb.Type_TIMESTAMP, - data: []byte{0x84, 0x83, 0x82, 0x81}, - out: fmt.Sprintf("%v", 0x81828384), - }, { - // TypeLongLong tests, unsigned and signed. - typ: replication.TypeLongLong, - styp: querypb.Type_UINT64, - data: []byte{0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01}, - out: fmt.Sprintf("%v", 0x0102030405060708), - }, { - typ: replication.TypeLongLong, - styp: querypb.Type_UINT64, - data: []byte{0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88}, - out: fmt.Sprintf("%v", uint64(0x8887868584838281)), - }, { - typ: replication.TypeLongLong, - styp: querypb.Type_INT64, - data: []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - out: "-2", - }, { - // TypeDate and TypeNewDate tests, unsigned and signed. - // 2010 << 9 + 10 << 5 + 3 = 1029443 = 0x0fb543 - typ: replication.TypeDate, - styp: querypb.Type_DATE, - data: []byte{0x43, 0xb5, 0x0f}, - out: "'2010-10-03'", - }, { - typ: replication.TypeNewDate, - styp: querypb.Type_DATE, - data: []byte{0x43, 0xb5, 0x0f}, - out: "'2010-10-03'", - }, { - // TypeTime tests. - // 154532 = 0x00025ba4 - typ: replication.TypeTime, - styp: querypb.Type_TIME, - data: []byte{0xa4, 0x5b, 0x02, 0x00}, - out: "'15:45:32'", - }, { - // TypeDateTime tests. - // 19840304154532 = 0x120b6e4807a4 - typ: replication.TypeDateTime, - styp: querypb.Type_DATETIME, - data: []byte{0xa4, 0x07, 0x48, 0x6e, 0x0b, 0x12, 0x00, 0x00}, - out: "'1984-03-04 15:45:32'", - }, { - // Varchar - typ: replication.TypeVarchar, - metadata: 30, - styp: querypb.Type_VARCHAR, - data: []byte{3, 'a', 'b', 'c'}, - out: "'abc'", - }, { - typ: replication.TypeVarchar, - metadata: 300, - styp: querypb.Type_VARCHAR, - data: []byte{3, 0, 'a', '\'', 'c'}, - out: "'a\\'c'", - }, { - // Bit - typ: replication.TypeBit, - metadata: 0x0107, - styp: querypb.Type_BIT, - data: []byte{0x03, 0x01}, - out: "b'0000001100000001'", - }, { - // Timestamp2 - typ: replication.TypeTimestamp2, - metadata: 0, - styp: querypb.Type_TIMESTAMP, - data: []byte{0x84, 0x83, 0x82, 0x81}, - out: fmt.Sprintf("%v", 0x81828384), - }, { - typ: replication.TypeTimestamp2, - metadata: 1, - styp: querypb.Type_TIMESTAMP, - data: []byte{0x84, 0x83, 0x82, 0x81, 7}, - out: fmt.Sprintf("%v.7", 0x81828384), - }, { - typ: replication.TypeTimestamp2, - metadata: 2, - styp: querypb.Type_TIMESTAMP, - data: []byte{0x84, 0x83, 0x82, 0x81, 76}, - out: fmt.Sprintf("%v.76", 0x81828384), - }, { - typ: replication.TypeTimestamp2, - metadata: 3, - styp: querypb.Type_TIMESTAMP, - // 765 = 0x02fd - data: []byte{0x84, 0x83, 0x82, 0x81, 0xfd, 0x02}, - out: fmt.Sprintf("%v.765", 0x81828384), - }, { - typ: replication.TypeTimestamp2, - metadata: 4, - styp: querypb.Type_TIMESTAMP, - // 7654 = 0x1de6 - data: []byte{0x84, 0x83, 0x82, 0x81, 0xe6, 0x1d}, - out: fmt.Sprintf("%v.7654", 0x81828384), - }, { - typ: replication.TypeTimestamp2, - metadata: 5, - styp: querypb.Type_TIMESTAMP, - // 76543 = 0x012aff - data: []byte{0x84, 0x83, 0x82, 0x81, 0xff, 0x2a, 0x01}, - out: fmt.Sprintf("%v.76543", 0x81828384), - }, { - typ: replication.TypeTimestamp2, - metadata: 6, - styp: querypb.Type_TIMESTAMP, - // 765432 = 0x0badf8 - data: []byte{0x84, 0x83, 0x82, 0x81, 0xf8, 0xad, 0x0b}, - out: fmt.Sprintf("%v.765432", 0x81828384), - }, { - // DateTime2 - typ: replication.TypeDateTime2, - metadata: 0, - styp: querypb.Type_DATETIME, - // (2012 * 13 + 6) << 22 + 21 << 17 + 15 << 12 + 45 << 6 + 17) - // = 109734198097 = 0x198caafb51 - // Then have to add 0x8000000000 = 0x998caafb51 - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99}, - out: "'2012-06-21 15:45:17'", - }, { - typ: replication.TypeDateTime2, - metadata: 1, - styp: querypb.Type_DATETIME, - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 7}, - out: "'2012-06-21 15:45:17.7'", - }, { - typ: replication.TypeDateTime2, - metadata: 2, - styp: querypb.Type_DATETIME, - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 76}, - out: "'2012-06-21 15:45:17.76'", - }, { - typ: replication.TypeDateTime2, - metadata: 3, - styp: querypb.Type_DATETIME, - // 765 = 0x02fd - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xfd, 0x02}, - out: "'2012-06-21 15:45:17.765'", - }, { - typ: replication.TypeDateTime2, - metadata: 4, - styp: querypb.Type_DATETIME, - // 7654 = 0x1de6 - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xe6, 0x1d}, - out: "'2012-06-21 15:45:17.7654'", - }, { - typ: replication.TypeDateTime2, - metadata: 5, - styp: querypb.Type_DATETIME, - // 76543 = 0x012aff - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xff, 0x2a, 0x01}, - out: "'2012-06-21 15:45:17.76543'", - }, { - typ: replication.TypeDateTime2, - metadata: 6, - styp: querypb.Type_DATETIME, - // 765432 = 0x0badf8 - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xf8, 0xad, 0x0b}, - out: "'2012-06-21 15:45:17.765432'", - }, { - // Time2 - typ: replication.TypeTime2, - metadata: 2, - styp: querypb.Type_TIME, - data: []byte{0xff, 0xff, 0x7f, 0x9d}, - out: "'-00:00:00.99'", - }, { - typ: replication.TypeTime2, - metadata: 2, - styp: querypb.Type_TIME, - data: []byte{0xff, 0xff, 0x7f, 0x9d}, - out: "'-00:00:00.99'", - }, { - typ: replication.TypeTime2, - metadata: 4, - styp: querypb.Type_TIME, - data: []byte{0xfe, 0xff, 0x7f, 0xff, 0xff}, - out: "'-00:00:01.0001'", - }, { - typ: replication.TypeTime2, - metadata: 0, - // 15 << 12 + 34 << 6 + 54 = 63670 = 0x00f8b6 - // and need to add 0x800000 - styp: querypb.Type_TIME, - data: []byte{0xb6, 0xf8, 0x80}, - out: "'15:34:54'", - }} - - for _, tcase := range testcases { - // Copy the data into a larger buffer (one extra byte - // on both sides), so we make sure the 'pos' field works. - padded := make([]byte, len(tcase.data)+2) - copy(padded[1:], tcase.data) - - out, l, err := cellAsSQL(padded, 1, tcase.typ, tcase.metadata, tcase.styp) - if err != nil || l != len(tcase.data) || out != tcase.out { - t.Errorf("testcase cellAsSQL(%v,%v,%v) returned unexpected result: %v %v %v, was expecting %v %v ", tcase.typ, tcase.styp, tcase.data, out, l, err, tcase.out, len(tcase.data)) - } - } -} From 58f803d345ca8f5cd7722fed723601ae8f674313 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sat, 11 Mar 2017 13:15:15 -0800 Subject: [PATCH 089/108] tabletserver: simplify query_engine->planbuilder The relationship was complicated due to the callback function. Instead, we just pass the schema into planbuilder. Plan building now uses a shared lock. This resolves the long-standing issue of holding a lock while running a query against MySQL. Plan types have been renamed to be more meaningful. --- go/vt/tabletserver/endtoend/config_test.go | 20 ++-- go/vt/tabletserver/planbuilder/ddl.go | 18 ++-- go/vt/tabletserver/planbuilder/dml.go | 32 +++--- go/vt/tabletserver/planbuilder/plan.go | 83 +++++++------- go/vt/tabletserver/planbuilder/plan_test.go | 50 ++++++--- go/vt/tabletserver/query_engine.go | 114 ++++++++------------ go/vt/tabletserver/query_executor.go | 27 ++--- go/vt/tabletserver/queryz.go | 2 +- go/vt/tabletserver/queryz_test.go | 35 +++--- 9 files changed, 195 insertions(+), 186 deletions(-) diff --git a/go/vt/tabletserver/endtoend/config_test.go b/go/vt/tabletserver/endtoend/config_test.go index 594d477b2cd..a2981cf888e 100644 --- a/go/vt/tabletserver/endtoend/config_test.go +++ b/go/vt/tabletserver/endtoend/config_test.go @@ -105,10 +105,14 @@ func TestConfigVars(t *testing.T) { } func TestPoolSize(t *testing.T) { - vstart := framework.DebugVars() defer framework.Server.SetPoolSize(framework.Server.PoolSize()) framework.Server.SetPoolSize(1) + vstart := framework.DebugVars() + if err := verifyIntValue(vstart, "ConnPoolCapacity", 1); err != nil { + t.Error(err) + } + var wg sync.WaitGroup wg.Add(2) go func() { @@ -122,12 +126,14 @@ func TestPoolSize(t *testing.T) { }() wg.Wait() - vend := framework.DebugVars() - if err := verifyIntValue(vend, "ConnPoolCapacity", 1); err != nil { - t.Error(err) - } - if err := compareIntDiff(vend, "ConnPoolWaitCount", vstart, 1); err != nil { - t.Error(err) + // Parallel plan building can cause multiple conn pool waits. + // Check that the wait count was at least incremented once so + // we know it's working. + tag := "ConnPoolWaitCount" + got := framework.FetchInt(framework.DebugVars(), tag) + want := framework.FetchInt(vstart, tag) + if got <= want { + t.Errorf("%s: %d, must be greater than %d", tag, got, want) } } diff --git a/go/vt/tabletserver/planbuilder/ddl.go b/go/vt/tabletserver/planbuilder/ddl.go index 7bbe65f7c4e..264118c2c10 100644 --- a/go/vt/tabletserver/planbuilder/ddl.go +++ b/go/vt/tabletserver/planbuilder/ddl.go @@ -4,7 +4,10 @@ package planbuilder -import "github.com/youtube/vitess/go/vt/sqlparser" +import ( + "github.com/youtube/vitess/go/vt/sqlparser" + "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" +) // DDLPlan provides a plan for DDLs. type DDLPlan struct { @@ -30,16 +33,11 @@ func DDLParse(sql string) (plan *DDLPlan) { } } -func analyzeDDL(ddl *sqlparser.DDL, getTable TableGetter) *ExecPlan { +func analyzeDDL(ddl *sqlparser.DDL, tables map[string]*schema.Table) *Plan { // TODO(sougou): Add support for sequences. - plan := &ExecPlan{PlanID: PlanDDL} - tableName := ddl.Table - // Skip TableName if table is empty (create statements) or not found in schema - if !tableName.IsEmpty() { - table, ok := getTable(tableName.Name) - if ok { - plan.TableName = table.Name - } + plan := &Plan{PlanID: PlanDDL} + if ddl.Table != nil { + plan.Table = tables[ddl.Table.Name.String()] } return plan } diff --git a/go/vt/tabletserver/planbuilder/dml.go b/go/vt/tabletserver/planbuilder/dml.go index c51f898feef..406d7b0429d 100644 --- a/go/vt/tabletserver/planbuilder/dml.go +++ b/go/vt/tabletserver/planbuilder/dml.go @@ -13,8 +13,8 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" ) -func analyzeUpdate(upd *sqlparser.Update, getTable TableGetter) (plan *ExecPlan, err error) { - plan = &ExecPlan{ +func analyzeUpdate(upd *sqlparser.Update, tables map[string]*schema.Table) (plan *Plan, err error) { + plan = &Plan{ PlanID: PlanPassDML, FullQuery: GenerateFullQuery(upd), } @@ -24,7 +24,7 @@ func analyzeUpdate(upd *sqlparser.Update, getTable TableGetter) (plan *ExecPlan, plan.Reason = ReasonTable return plan, nil } - table, err := plan.setTable(tableName, getTable) + table, err := plan.setTable(tableName, tables) if err != nil { return nil, err } @@ -57,8 +57,8 @@ func analyzeUpdate(upd *sqlparser.Update, getTable TableGetter) (plan *ExecPlan, return plan, nil } -func analyzeDelete(del *sqlparser.Delete, getTable TableGetter) (plan *ExecPlan, err error) { - plan = &ExecPlan{ +func analyzeDelete(del *sqlparser.Delete, tables map[string]*schema.Table) (plan *Plan, err error) { + plan = &Plan{ PlanID: PlanPassDML, FullQuery: GenerateFullQuery(del), } @@ -68,7 +68,7 @@ func analyzeDelete(del *sqlparser.Delete, getTable TableGetter) (plan *ExecPlan, plan.Reason = ReasonTable return plan, nil } - table, err := plan.setTable(tableName, getTable) + table, err := plan.setTable(tableName, tables) if err != nil { return nil, err } @@ -92,8 +92,8 @@ func analyzeDelete(del *sqlparser.Delete, getTable TableGetter) (plan *ExecPlan, return plan, nil } -func analyzeSet(set *sqlparser.Set) (plan *ExecPlan) { - return &ExecPlan{ +func analyzeSet(set *sqlparser.Set) (plan *Plan) { + return &Plan{ PlanID: PlanSet, FullQuery: GenerateFullQuery(set), } @@ -120,8 +120,8 @@ func analyzeUpdateExpressions(exprs sqlparser.UpdateExprs, pkIndex *schema.Index return pkValues, nil } -func analyzeSelect(sel *sqlparser.Select, getTable TableGetter) (plan *ExecPlan, err error) { - plan = &ExecPlan{ +func analyzeSelect(sel *sqlparser.Select, tables map[string]*schema.Table) (plan *Plan, err error) { + plan = &Plan{ PlanID: PlanPassSelect, FieldQuery: GenerateFieldQuery(sel), FullQuery: GenerateSelectLimitQuery(sel), @@ -134,7 +134,7 @@ func analyzeSelect(sel *sqlparser.Select, getTable TableGetter) (plan *ExecPlan, if tableName.IsEmpty() { return plan, nil } - table, err := plan.setTable(tableName, getTable) + table, err := plan.setTable(tableName, tables) if err != nil { return nil, err } @@ -238,8 +238,8 @@ func getPKValues(conditions []*sqlparser.ComparisonExpr, pkIndex *schema.Index) return pkValues } -func analyzeInsert(ins *sqlparser.Insert, getTable TableGetter) (plan *ExecPlan, err error) { - plan = &ExecPlan{ +func analyzeInsert(ins *sqlparser.Insert, tables map[string]*schema.Table) (plan *Plan, err error) { + plan = &Plan{ PlanID: PlanPassDML, FullQuery: GenerateFullQuery(ins), } @@ -248,7 +248,7 @@ func analyzeInsert(ins *sqlparser.Insert, getTable TableGetter) (plan *ExecPlan, plan.Reason = ReasonTable return plan, nil } - table, err := plan.setTable(tableName, getTable) + table, err := plan.setTable(tableName, tables) if err != nil { return nil, err } @@ -268,7 +268,7 @@ func analyzeInsert(ins *sqlparser.Insert, getTable TableGetter) (plan *ExecPlan, panic("unreachable") } -func analyzeInsertNoType(ins *sqlparser.Insert, plan *ExecPlan, table *schema.Table) (*ExecPlan, error) { +func analyzeInsertNoType(ins *sqlparser.Insert, plan *Plan, table *schema.Table) (*Plan, error) { pkColumnNumbers := getInsertPKColumns(ins.Columns, table) if sel, ok := ins.Rows.(sqlparser.SelectStatement); ok { @@ -361,7 +361,7 @@ func resolveUpsertUpdateValues(rowList sqlparser.ValTuple, columns sqlparser.Col return sqlparser.UpdateExprs(dup), err } -func analyzeInsertMessage(ins *sqlparser.Insert, plan *ExecPlan, table *schema.Table) (*ExecPlan, error) { +func analyzeInsertMessage(ins *sqlparser.Insert, plan *Plan, table *schema.Table) (*Plan, error) { if _, ok := ins.Rows.(sqlparser.SelectStatement); ok { return nil, fmt.Errorf("subquery not allowed for message table: %s", table.Name.String()) } diff --git a/go/vt/tabletserver/planbuilder/plan.go b/go/vt/tabletserver/planbuilder/plan.go index ec74dec8ef3..462dc37df49 100644 --- a/go/vt/tabletserver/planbuilder/plan.go +++ b/go/vt/tabletserver/planbuilder/plan.go @@ -179,97 +179,100 @@ type MessageRowValues struct { //_______________________________________________ -// ExecPlan is built for selects and DMLs. -// PK Values values within ExecPlan can be: -// sqltypes.Value: sourced form the query, or -// string: bind variable name starting with ':', or -// nil if no value was specified -type ExecPlan struct { - PlanID PlanType - Reason ReasonType `json:",omitempty"` - TableName sqlparser.TableIdent `json:",omitempty"` +// Plan is built for selects and DMLs. +type Plan struct { + PlanID PlanType + Reason ReasonType + Table *schema.Table // FieldQuery is used to fetch field info - FieldQuery *sqlparser.ParsedQuery `json:",omitempty"` + FieldQuery *sqlparser.ParsedQuery // FullQuery will be set for all plans. - FullQuery *sqlparser.ParsedQuery `json:",omitempty"` + FullQuery *sqlparser.ParsedQuery // For PK plans, only OuterQuery is set. // For SUBQUERY plans, Subquery is also set. - OuterQuery *sqlparser.ParsedQuery `json:",omitempty"` - Subquery *sqlparser.ParsedQuery `json:",omitempty"` - UpsertQuery *sqlparser.ParsedQuery `json:",omitempty"` + OuterQuery *sqlparser.ParsedQuery + Subquery *sqlparser.ParsedQuery + UpsertQuery *sqlparser.ParsedQuery // PlanInsertSubquery: columns to be inserted. - ColumnNumbers []int `json:",omitempty"` + ColumnNumbers []int + // PKValues is an sqltypes.Value if it's sourced + // from the query. If it's a bind var then it's + // a string including the ':' prefix(es). // PlanDMLPK: where clause values. // PlanInsertPK: values clause. // PlanNextVal: increment. - PKValues []interface{} `json:",omitempty"` + PKValues []interface{} // For update: set clause if pk is changing. - SecondaryPKValues []interface{} `json:",omitempty"` + SecondaryPKValues []interface{} // For PlanInsertSubquery: pk columns in the subquery result. - SubqueryPKColumns []int `json:",omitempty"` + SubqueryPKColumns []int // For PlanInsertMessage. Query used to reload inserted messages. - MessageReloaderQuery *sqlparser.ParsedQuery `json:",omitempty"` + MessageReloaderQuery *sqlparser.ParsedQuery } -func (plan *ExecPlan) setTable(tableName sqlparser.TableIdent, getTable TableGetter) (*schema.Table, error) { - table, ok := getTable(tableName) - if !ok { - return nil, fmt.Errorf("table %s not found in schema", tableName) +// TableName returns the table name for the plan. +func (plan *Plan) TableName() sqlparser.TableIdent { + var tableName sqlparser.TableIdent + if plan.Table != nil { + tableName = plan.Table.Name } - plan.TableName = table.Name - return table, nil + return tableName } -// TableGetter returns a schema.Table given the table name. -type TableGetter func(tableName sqlparser.TableIdent) (*schema.Table, bool) +func (plan *Plan) setTable(tableName sqlparser.TableIdent, tables map[string]*schema.Table) (*schema.Table, error) { + if plan.Table = tables[tableName.String()]; plan.Table == nil { + return nil, fmt.Errorf("table %s not found in schema", tableName) + } + return plan.Table, nil +} -// GetExecPlan generates a ExecPlan given a sql query and a TableGetter. -func GetExecPlan(sql string, getTable TableGetter) (plan *ExecPlan, err error) { +// Build builds a plan based on the schema. +func Build(sql string, tables map[string]*schema.Table) (plan *Plan, err error) { statement, err := sqlparser.Parse(sql) if err != nil { return nil, err } switch stmt := statement.(type) { case *sqlparser.Union: - return &ExecPlan{ + return &Plan{ PlanID: PlanPassSelect, FieldQuery: GenerateFieldQuery(stmt), FullQuery: GenerateFullQuery(stmt), }, nil case *sqlparser.Select: - return analyzeSelect(stmt, getTable) + return analyzeSelect(stmt, tables) case *sqlparser.Insert: - return analyzeInsert(stmt, getTable) + return analyzeInsert(stmt, tables) case *sqlparser.Update: - return analyzeUpdate(stmt, getTable) + return analyzeUpdate(stmt, tables) case *sqlparser.Delete: - return analyzeDelete(stmt, getTable) + return analyzeDelete(stmt, tables) case *sqlparser.Set: return analyzeSet(stmt), nil case *sqlparser.DDL: - return analyzeDDL(stmt, getTable), nil + return analyzeDDL(stmt, tables), nil case *sqlparser.Other: - return &ExecPlan{PlanID: PlanOther}, nil + return &Plan{PlanID: PlanOther}, nil } return nil, errors.New("invalid SQL") } -// GetStreamExecPlan generates a ExecPlan given a sql query and a TableGetter. -func GetStreamExecPlan(sql string, getTable TableGetter) (plan *ExecPlan, err error) { +// BuildStreaming builds a streaming plan based on the schema. +func BuildStreaming(sql string, tables map[string]*schema.Table) (plan *Plan, err error) { statement, err := sqlparser.Parse(sql) if err != nil { return nil, err } - plan = &ExecPlan{ + plan = &Plan{ PlanID: PlanSelectStream, FullQuery: GenerateFullQuery(statement), } @@ -280,7 +283,7 @@ func GetStreamExecPlan(sql string, getTable TableGetter) (plan *ExecPlan, err er return nil, errors.New("select with lock not allowed for streaming") } if tableName := analyzeFrom(stmt.From); !tableName.IsEmpty() { - plan.setTable(tableName, getTable) + plan.setTable(tableName, tables) } case *sqlparser.Union: // pass diff --git a/go/vt/tabletserver/planbuilder/plan_test.go b/go/vt/tabletserver/planbuilder/plan_test.go index 48b9037b4cc..46de73ce4b7 100644 --- a/go/vt/tabletserver/planbuilder/plan_test.go +++ b/go/vt/tabletserver/planbuilder/plan_test.go @@ -23,13 +23,44 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" ) +// MarshalJSON is only used for testing. +func (ep *Plan) MarshalJSON() ([]byte, error) { + mplan := struct { + PlanID PlanType + Reason ReasonType `json:",omitempty"` + TableName sqlparser.TableIdent `json:",omitempty"` + FieldQuery *sqlparser.ParsedQuery `json:",omitempty"` + FullQuery *sqlparser.ParsedQuery `json:",omitempty"` + OuterQuery *sqlparser.ParsedQuery `json:",omitempty"` + Subquery *sqlparser.ParsedQuery `json:",omitempty"` + UpsertQuery *sqlparser.ParsedQuery `json:",omitempty"` + ColumnNumbers []int `json:",omitempty"` + PKValues []interface{} `json:",omitempty"` + SecondaryPKValues []interface{} `json:",omitempty"` + SubqueryPKColumns []int `json:",omitempty"` + MessageReloaderQuery *sqlparser.ParsedQuery `json:",omitempty"` + }{ + PlanID: ep.PlanID, + Reason: ep.Reason, + TableName: ep.TableName(), + FieldQuery: ep.FieldQuery, + FullQuery: ep.FullQuery, + OuterQuery: ep.OuterQuery, + Subquery: ep.Subquery, + UpsertQuery: ep.UpsertQuery, + ColumnNumbers: ep.ColumnNumbers, + PKValues: ep.PKValues, + SecondaryPKValues: ep.SecondaryPKValues, + SubqueryPKColumns: ep.SubqueryPKColumns, + MessageReloaderQuery: ep.MessageReloaderQuery, + } + return json.Marshal(&mplan) +} + func TestPlan(t *testing.T) { testSchema := loadSchema("schema_test.json") for tcase := range iterateExecFile("exec_cases.txt") { - plan, err := GetExecPlan(tcase.input, func(name sqlparser.TableIdent) (*schema.Table, bool) { - r, ok := testSchema[name.String()] - return r, ok - }) + plan, err := Build(tcase.input, testSchema) var out string if err != nil { out = err.Error() @@ -69,14 +100,10 @@ func TestCustom(t *testing.T) { if len(files) == 0 { t.Fatalf("No test files for %s", schemFile) } - getter := func(name sqlparser.TableIdent) (*schema.Table, bool) { - r, ok := schem[name.String()] - return r, ok - } for _, file := range files { t.Logf("Testing file %s", file) for tcase := range iterateExecFile(file) { - plan, err := GetExecPlan(tcase.input, getter) + plan, err := Build(tcase.input, schem) var out string if err != nil { out = err.Error() @@ -98,10 +125,7 @@ func TestCustom(t *testing.T) { func TestStreamPlan(t *testing.T) { testSchema := loadSchema("schema_test.json") for tcase := range iterateExecFile("stream_cases.txt") { - plan, err := GetStreamExecPlan(tcase.input, func(name sqlparser.TableIdent) (*schema.Table, bool) { - r, ok := testSchema[name.String()] - return r, ok - }) + plan, err := BuildStreaming(tcase.input, testSchema) var out string if err != nil { out = err.Error() diff --git a/go/vt/tabletserver/query_engine.go b/go/vt/tabletserver/query_engine.go index 1d37c657ec3..2042e036e67 100644 --- a/go/vt/tabletserver/query_engine.go +++ b/go/vt/tabletserver/query_engine.go @@ -39,11 +39,10 @@ import ( //_______________________________________________ -// ExecPlan wraps the planbuilder's exec plan to enforce additional rules +// TabletPlan wraps the planbuilder's exec plan to enforce additional rules // and track stats. -type ExecPlan struct { - *planbuilder.ExecPlan - Table *schema.Table +type TabletPlan struct { + *planbuilder.Plan Fields []*querypb.Field Rules *QueryRules Authorized *tableacl.ACLResult @@ -56,13 +55,13 @@ type ExecPlan struct { ErrorCount int64 } -// Size allows ExecPlan to be in cache.LRUCache. -func (*ExecPlan) Size() int { +// Size allows TabletPlan to be in cache.LRUCache. +func (*TabletPlan) Size() int { return 1 } -// AddStats updates the stats for the current ExecPlan. -func (ep *ExecPlan) AddStats(queryCount int64, duration, mysqlTime time.Duration, rowCount, errorCount int64) { +// AddStats updates the stats for the current TabletPlan. +func (ep *TabletPlan) AddStats(queryCount int64, duration, mysqlTime time.Duration, rowCount, errorCount int64) { ep.mu.Lock() ep.QueryCount += queryCount ep.Time += duration @@ -72,8 +71,8 @@ func (ep *ExecPlan) AddStats(queryCount int64, duration, mysqlTime time.Duration ep.mu.Unlock() } -// Stats returns the current stats of ExecPlan. -func (ep *ExecPlan) Stats() (queryCount int64, duration, mysqlTime time.Duration, rowCount, errorCount int64) { +// Stats returns the current stats of TabletPlan. +func (ep *TabletPlan) Stats() (queryCount int64, duration, mysqlTime time.Duration, rowCount, errorCount int64) { ep.mu.Lock() queryCount = ep.QueryCount duration = ep.Time @@ -98,7 +97,7 @@ type QueryEngine struct { dbconfigs dbconfigs.DBConfigs // mu protects the following fields. - mu sync.Mutex + mu sync.RWMutex tables map[string]*schema.Table queries *cache.LRUCache queryRuleSources *QueryRuleInfo @@ -237,47 +236,32 @@ func (qe *QueryEngine) Close() { qe.conns.Close() } -// GetPlan returns the ExecPlan that for the query. Plans are cached in a cache.LRUCache. -func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats, sql string) (*ExecPlan, error) { +// GetPlan returns the TabletPlan that for the query. Plans are cached in a cache.LRUCache. +func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats, sql string) (*TabletPlan, error) { span := trace.NewSpanFromContext(ctx) span.StartLocal("QueryEngine.GetPlan") defer span.Finish() - // Fastpath if plan already exists. if plan := qe.getQuery(sql); plan != nil { return plan, nil } - // TODO(sougou): It's not correct to hold this lock here because the code - // below runs queries against MySQL. But if we don't hold the lock, there - // are other race conditions where identical queries will end up building - // plans and compete with populating the query cache. In other words, we - // need a more elaborate scheme that blocks less, but still prevents these - // race conditions. - qe.mu.Lock() - defer qe.mu.Unlock() - // Recheck. A plan might have been built by someone elqe. - if plan := qe.getQuery(sql); plan != nil { - return plan, nil - } - - var table *schema.Table - GetTable := func(tableName sqlparser.TableIdent) (*schema.Table, bool) { - var ok bool - table, ok = qe.tables[tableName.String()] - if !ok { - return nil, false - } - return table, true - } - splan, err := planbuilder.GetExecPlan(sql, GetTable) + // Obtain read lock to prevent schema from changing while + // we build a plan. The read lock allows multiple identical + // queries to build the same plan. One of them will win by + // updating the query cache and prevent future races. Due to + // this, query stats reporting may not be accurate, but it's + // acceptable because those numbers are best effort. + qe.mu.RLock() + defer qe.mu.RUnlock() + splan, err := planbuilder.Build(sql, qe.tables) if err != nil { - // TODO(sougou): Inspect to see if GetExecPlan can return coded error. + // TODO(sougou): Inspect to see if Build can return coded error. return nil, vterrors.New(vtrpcpb.Code_UNKNOWN, err.Error()) } - plan := &ExecPlan{ExecPlan: splan, Table: table} - plan.Rules = qe.queryRuleSources.filterByPlan(sql, plan.PlanID, plan.TableName.String()) - plan.Authorized = tableacl.Authorized(plan.TableName.String(), plan.PlanID.MinRole()) + plan := &TabletPlan{Plan: splan} + plan.Rules = qe.queryRuleSources.filterByPlan(sql, plan.PlanID, plan.TableName().String()) + plan.Authorized = tableacl.Authorized(plan.TableName().String(), plan.PlanID.MinRole()) if plan.PlanID.IsSelect() { if plan.FieldQuery == nil { log.Warningf("Cannot cache field info: %s", sql) @@ -306,25 +290,17 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats // GetStreamPlan is similar to GetPlan, but doesn't use the cache // and doesn't enforce a limit. It just returns the parsed query. -func (qe *QueryEngine) GetStreamPlan(sql string) (*ExecPlan, error) { - var table *schema.Table - GetTable := func(tableName sqlparser.TableIdent) (table *schema.Table, ok bool) { - qe.mu.Lock() - defer qe.mu.Unlock() - table, ok = qe.tables[tableName.String()] - if !ok { - return nil, false - } - return table, true - } - splan, err := planbuilder.GetStreamExecPlan(sql, GetTable) +func (qe *QueryEngine) GetStreamPlan(sql string) (*TabletPlan, error) { + qe.mu.RLock() + defer qe.mu.RUnlock() + splan, err := planbuilder.BuildStreaming(sql, qe.tables) if err != nil { - // TODO(sougou): Inspect to see if GetStreamExecPlan can return coded error. + // TODO(sougou): Inspect to see if BuildStreaming can return coded error. return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, err.Error()) } - plan := &ExecPlan{ExecPlan: splan, Table: table} - plan.Rules = qe.queryRuleSources.filterByPlan(sql, plan.PlanID, plan.TableName.String()) - plan.Authorized = tableacl.Authorized(plan.TableName.String(), plan.PlanID.MinRole()) + plan := &TabletPlan{Plan: splan} + plan.Rules = qe.queryRuleSources.filterByPlan(sql, plan.PlanID, plan.TableName().String()) + plan.Authorized = tableacl.Authorized(plan.TableName().String(), plan.PlanID.MinRole()) return plan, nil } @@ -357,17 +333,17 @@ func (qe *QueryEngine) schemaChanged(tables map[string]*schema.Table, created, a } // getQuery fetches the plan and makes it the most recent. -func (qe *QueryEngine) getQuery(sql string) *ExecPlan { +func (qe *QueryEngine) getQuery(sql string) *TabletPlan { if cacheResult, ok := qe.queries.Get(sql); ok { - return cacheResult.(*ExecPlan) + return cacheResult.(*TabletPlan) } return nil } // peekQuery fetches the plan without changing the LRU order. -func (qe *QueryEngine) peekQuery(sql string) *ExecPlan { +func (qe *QueryEngine) peekQuery(sql string) *TabletPlan { if cacheResult, ok := qe.queries.Peek(sql); ok { - return cacheResult.(*ExecPlan) + return cacheResult.(*TabletPlan) } return nil } @@ -386,7 +362,7 @@ func (qe *QueryEngine) QueryCacheCap() int { } func (qe *QueryEngine) getQueryCount() map[string]int64 { - f := func(plan *ExecPlan) int64 { + f := func(plan *TabletPlan) int64 { queryCount, _, _, _, _ := plan.Stats() return queryCount } @@ -394,7 +370,7 @@ func (qe *QueryEngine) getQueryCount() map[string]int64 { } func (qe *QueryEngine) getQueryTime() map[string]int64 { - f := func(plan *ExecPlan) int64 { + f := func(plan *TabletPlan) int64 { _, time, _, _, _ := plan.Stats() return int64(time) } @@ -402,7 +378,7 @@ func (qe *QueryEngine) getQueryTime() map[string]int64 { } func (qe *QueryEngine) getQueryRowCount() map[string]int64 { - f := func(plan *ExecPlan) int64 { + f := func(plan *TabletPlan) int64 { _, _, _, rowCount, _ := plan.Stats() return rowCount } @@ -410,21 +386,21 @@ func (qe *QueryEngine) getQueryRowCount() map[string]int64 { } func (qe *QueryEngine) getQueryErrorCount() map[string]int64 { - f := func(plan *ExecPlan) int64 { + f := func(plan *TabletPlan) int64 { _, _, _, _, errorCount := plan.Stats() return errorCount } return qe.getQueryStats(f) } -type queryStatsFunc func(*ExecPlan) int64 +type queryStatsFunc func(*TabletPlan) int64 func (qe *QueryEngine) getQueryStats(f queryStatsFunc) map[string]int64 { keys := qe.queries.Keys() qstats := make(map[string]int64) for _, v := range keys { if plan := qe.peekQuery(v); plan != nil { - table := plan.TableName + table := plan.TableName() if table.IsEmpty() { table = sqlparser.NewTableIdent("Join") } @@ -471,7 +447,7 @@ func (qe *QueryEngine) handleHTTPQueryPlans(response http.ResponseWriter, reques for _, v := range keys { response.Write([]byte(fmt.Sprintf("%#v\n", v))) if plan := qe.peekQuery(v); plan != nil { - if b, err := json.MarshalIndent(plan.ExecPlan, "", " "); err != nil { + if b, err := json.MarshalIndent(plan.Plan, "", " "); err != nil { response.Write([]byte(err.Error())) } else { response.Write(b) @@ -489,7 +465,7 @@ func (qe *QueryEngine) handleHTTPQueryStats(response http.ResponseWriter, reques if plan := qe.peekQuery(v); plan != nil { var pqstats perQueryStats pqstats.Query = unicoded(v) - pqstats.Table = plan.TableName.String() + pqstats.Table = plan.TableName().String() pqstats.Plan = plan.PlanID pqstats.QueryCount, pqstats.Time, pqstats.MysqlTime, pqstats.RowCount, pqstats.ErrorCount = plan.Stats() qstats = append(qstats, pqstats) diff --git a/go/vt/tabletserver/query_executor.go b/go/vt/tabletserver/query_executor.go index 6547496934a..0b42931d999 100644 --- a/go/vt/tabletserver/query_executor.go +++ b/go/vt/tabletserver/query_executor.go @@ -35,7 +35,7 @@ type QueryExecutor struct { query string bindVars map[string]interface{} transactionID int64 - plan *ExecPlan + plan *TabletPlan ctx context.Context logStats *tabletenv.LogStats tsv *TabletServer @@ -56,7 +56,7 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { defer func(start time.Time) { duration := time.Now().Sub(start) tabletenv.QueryStats.Add(planName, duration) - tabletenv.RecordUserQuery(qre.ctx, qre.plan.TableName, "Execute", int64(duration)) + tabletenv.RecordUserQuery(qre.ctx, qre.plan.TableName(), "Execute", int64(duration)) if reply == nil { qre.plan.AddStats(1, duration, qre.logStats.MysqlResponseTime, 0, 1) @@ -142,7 +142,7 @@ func (qre *QueryExecutor) Stream(includedFields querypb.ExecuteOptions_IncludedF defer func(start time.Time) { tabletenv.QueryStats.Record(qre.plan.PlanID.String(), start) - tabletenv.RecordUserQuery(qre.ctx, qre.plan.TableName, "Stream", int64(time.Now().Sub(start))) + tabletenv.RecordUserQuery(qre.ctx, qre.plan.TableName(), "Stream", int64(time.Now().Sub(start))) }(time.Now()) if err := qre.checkPermissions(); err != nil { @@ -257,7 +257,7 @@ func (qre *QueryExecutor) checkPermissions() error { } // empty table name, do not need a table ACL check. - if qre.plan.TableName.IsEmpty() { + if qre.plan.TableName().IsEmpty() { return nil } @@ -265,7 +265,7 @@ func (qre *QueryExecutor) checkPermissions() error { return vterrors.Errorf(vtrpcpb.Code_PERMISSION_DENIED, "table acl error: nil acl") } tableACLStatsKey := []string{ - qre.plan.TableName.String(), + qre.plan.TableName().String(), qre.plan.Authorized.GroupName, qre.plan.PlanID.String(), callerID.Username, @@ -278,7 +278,7 @@ func (qre *QueryExecutor) checkPermissions() error { } // raise error if in strictTableAcl mode, else just log an error. if qre.tsv.qe.strictTableACL { - errStr := fmt.Sprintf("table acl error: %q cannot run %v on table %q", callerID.Username, qre.plan.PlanID, qre.plan.TableName) + errStr := fmt.Sprintf("table acl error: %q cannot run %v on table %q", callerID.Username, qre.plan.PlanID, qre.plan.TableName()) tabletenv.TableaclDenied.Add(tableACLStatsKey, 1) qre.tsv.qe.accessCheckerLogger.Infof("%s", errStr) return vterrors.Errorf(vtrpcpb.Code_PERMISSION_DENIED, "%s", errStr) @@ -330,8 +330,9 @@ func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) { if err != nil { return nil, err } + tableName := qre.plan.TableName() if inc < 1 { - return nil, fmt.Errorf("invalid increment for sequence %s: %d", qre.plan.TableName, inc) + return nil, fmt.Errorf("invalid increment for sequence %s: %d", tableName, inc) } t := qre.plan.Table @@ -339,17 +340,17 @@ func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) { defer t.SequenceInfo.Unlock() if t.SequenceInfo.NextVal == 0 || t.SequenceInfo.NextVal+inc > t.SequenceInfo.LastVal { _, err := qre.execAsTransaction(func(conn *TxConnection) (*sqltypes.Result, error) { - query := fmt.Sprintf("select next_id, cache from %s where id = 0 for update", sqlparser.String(qre.plan.TableName)) + query := fmt.Sprintf("select next_id, cache from %s where id = 0 for update", sqlparser.String(tableName)) qr, err := qre.execSQL(conn, query, false) if err != nil { return nil, err } if len(qr.Rows) != 1 { - return nil, fmt.Errorf("unexpected rows from reading sequence %s (possible mis-route): %d", qre.plan.TableName, len(qr.Rows)) + return nil, fmt.Errorf("unexpected rows from reading sequence %s (possible mis-route): %d", tableName, len(qr.Rows)) } nextID, err := qr.Rows[0][0].ParseInt64() if err != nil { - return nil, fmt.Errorf("error loading sequence %s: %v", qre.plan.TableName, err) + return nil, fmt.Errorf("error loading sequence %s: %v", tableName, err) } // Initialize SequenceInfo.NextVal if it wasn't already. if t.SequenceInfo.NextVal == 0 { @@ -357,16 +358,16 @@ func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) { } cache, err := qr.Rows[0][1].ParseInt64() if err != nil { - return nil, fmt.Errorf("error loading sequence %s: %v", qre.plan.TableName, err) + return nil, fmt.Errorf("error loading sequence %s: %v", tableName, err) } if cache < 1 { - return nil, fmt.Errorf("invalid cache value for sequence %s: %d", qre.plan.TableName, cache) + return nil, fmt.Errorf("invalid cache value for sequence %s: %d", tableName, cache) } newLast := nextID + cache for newLast <= t.SequenceInfo.NextVal+inc { newLast += cache } - query = fmt.Sprintf("update %s set next_id = %d where id = 0", sqlparser.String(qre.plan.TableName), newLast) + query = fmt.Sprintf("update %s set next_id = %d where id = 0", sqlparser.String(tableName), newLast) conn.RecordQuery(query) _, err = qre.execSQL(conn, query, false) if err != nil { diff --git a/go/vt/tabletserver/queryz.go b/go/vt/tabletserver/queryz.go index 59bad70d344..632b91e1023 100644 --- a/go/vt/tabletserver/queryz.go +++ b/go/vt/tabletserver/queryz.go @@ -138,7 +138,7 @@ func queryzHandler(qe *QueryEngine, w http.ResponseWriter, r *http.Request) { } Value := &queryzRow{ Query: logz.Wrappable(v), - Table: plan.TableName.String(), + Table: plan.TableName().String(), Plan: plan.PlanID, Reason: plan.Reason, } diff --git a/go/vt/tabletserver/queryz_test.go b/go/vt/tabletserver/queryz_test.go index f33bff4b118..b8efe01b823 100644 --- a/go/vt/tabletserver/queryz_test.go +++ b/go/vt/tabletserver/queryz_test.go @@ -14,6 +14,7 @@ import ( "time" "github.com/youtube/vitess/go/vt/sqlparser" + "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" ) @@ -22,36 +23,36 @@ func TestQueryzHandler(t *testing.T) { req, _ := http.NewRequest("GET", "/schemaz", nil) qe := newTestQueryEngine(100, 10*time.Second, true) - plan1 := &ExecPlan{ - ExecPlan: &planbuilder.ExecPlan{ - TableName: sqlparser.NewTableIdent("test_table"), - PlanID: planbuilder.PlanPassSelect, - Reason: planbuilder.ReasonTable, + plan1 := &TabletPlan{ + Plan: &planbuilder.Plan{ + Table: &schema.Table{Name: sqlparser.NewTableIdent("test_table")}, + PlanID: planbuilder.PlanPassSelect, + Reason: planbuilder.ReasonTable, }, } plan1.AddStats(10, 2*time.Second, 1*time.Second, 2, 0) qe.queries.Set("select name from test_table", plan1) - plan2 := &ExecPlan{ - ExecPlan: &planbuilder.ExecPlan{ - TableName: sqlparser.NewTableIdent("test_table"), - PlanID: planbuilder.PlanDDL, - Reason: planbuilder.ReasonDefault, + plan2 := &TabletPlan{ + Plan: &planbuilder.Plan{ + Table: &schema.Table{Name: sqlparser.NewTableIdent("test_table")}, + PlanID: planbuilder.PlanDDL, + Reason: planbuilder.ReasonDefault, }, } plan2.AddStats(1, 2*time.Millisecond, 1*time.Millisecond, 1, 0) qe.queries.Set("insert into test_table values 1", plan2) - plan3 := &ExecPlan{ - ExecPlan: &planbuilder.ExecPlan{ - TableName: sqlparser.NewTableIdent(""), - PlanID: planbuilder.PlanOther, - Reason: planbuilder.ReasonDefault, + plan3 := &TabletPlan{ + Plan: &planbuilder.Plan{ + Table: &schema.Table{Name: sqlparser.NewTableIdent("")}, + PlanID: planbuilder.PlanOther, + Reason: planbuilder.ReasonDefault, }, } plan3.AddStats(1, 75*time.Millisecond, 50*time.Millisecond, 1, 0) qe.queries.Set("show tables", plan3) - qe.queries.Set("", (*ExecPlan)(nil)) + qe.queries.Set("", (*TabletPlan)(nil)) queryzHandler(qe, resp, req) body, _ := ioutil.ReadAll(resp.Body) @@ -108,7 +109,7 @@ func TestQueryzHandler(t *testing.T) { checkQueryzHasPlan(t, planPattern3, plan3, body) } -func checkQueryzHasPlan(t *testing.T, planPattern []string, plan *ExecPlan, page []byte) { +func checkQueryzHasPlan(t *testing.T, planPattern []string, plan *TabletPlan, page []byte) { matcher := regexp.MustCompile(strings.Join(planPattern, `\s*`)) if !matcher.Match(page) { t.Fatalf("queryz page does not contain plan: %v, page: %s", plan, string(page)) From 76c8fd81af3f5352e9b5ac088226db0acf7eabfb Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sat, 11 Mar 2017 15:56:13 -0800 Subject: [PATCH 090/108] tabletserver: strictMode check moved to QueryEngine It makes more sense for strict mode check to be moved to QueryEngine because it's the one that depends on that property. --- .../engines/schema/schema_engine.go | 13 ++---------- .../engines/schema/schema_engine_test.go | 16 --------------- go/vt/tabletserver/query_engine.go | 16 +++++++++++++++ go/vt/tabletserver/query_engine_test.go | 20 +++++++++++++++++++ 4 files changed, 38 insertions(+), 27 deletions(-) diff --git a/go/vt/tabletserver/engines/schema/schema_engine.go b/go/vt/tabletserver/engines/schema/schema_engine.go index a4bab71a8e5..fcca53fab11 100644 --- a/go/vt/tabletserver/engines/schema/schema_engine.go +++ b/go/vt/tabletserver/engines/schema/schema_engine.go @@ -21,7 +21,6 @@ import ( "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/stats" - "github.com/youtube/vitess/go/sync2" "github.com/youtube/vitess/go/timer" "github.com/youtube/vitess/go/vt/concurrency" "github.com/youtube/vitess/go/vt/sqlparser" @@ -49,9 +48,8 @@ type Engine struct { // The following fields have their own synchronization // and do not require locking mu. - strictMode sync2.AtomicBool - conns *connpool.Pool - ticks *timer.Timer + conns *connpool.Pool + ticks *timer.Timer } var schemaOnce sync.Once @@ -64,7 +62,6 @@ func NewEngine(checker tabletenv.MySQLChecker, config tabletenv.TabletConfig) *E conns: connpool.New("", 3, idleTimeout, checker), ticks: timer.NewTimer(reloadTime), reloadTime: reloadTime, - strictMode: sync2.NewAtomicBool(config.StrictMode), } schemaOnce.Do(func() { stats.Publish("SchemaReloadTime", stats.DurationFunc(se.ticks.Interval)) @@ -106,12 +103,6 @@ func (se *Engine) Open(dbaParams *sqldb.ConnParams) error { return err } - if se.strictMode.Get() { - if err := conn.VerifyMode(); err != nil { - return vterrors.Errorf(vtrpcpb.Code_UNKNOWN, err.Error()) - } - } - tableData, err := conn.Exec(ctx, mysqlconn.BaseShowTables, maxTableCount, false) if err != nil { return vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not get table list: %v", err) diff --git a/go/vt/tabletserver/engines/schema/schema_engine_test.go b/go/vt/tabletserver/engines/schema/schema_engine_test.go index 96b3410cf59..e5404fc175e 100644 --- a/go/vt/tabletserver/engines/schema/schema_engine_test.go +++ b/go/vt/tabletserver/engines/schema/schema_engine_test.go @@ -26,22 +26,6 @@ import ( querypb "github.com/youtube/vitess/go/vt/proto/query" ) -func TestStrictMode(t *testing.T) { - db := fakesqldb.New(t) - defer db.Close() - for query, result := range schematest.Queries() { - db.AddQuery(query, result) - } - db.AddRejectedQuery("select @@global.sql_mode", errRejected) - se := newEngine(10, 1*time.Second, 1*time.Second, true) - t.Log(se) - err := se.Open(db.ConnParams()) - want := "could not verify mode" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("se.Open: %v, must contain %s", err, want) - } -} - func TestOpenFailedDueToMissMySQLTime(t *testing.T) { db := fakesqldb.New(t) defer db.Close() diff --git a/go/vt/tabletserver/query_engine.go b/go/vt/tabletserver/query_engine.go index 2042e036e67..d03378f588d 100644 --- a/go/vt/tabletserver/query_engine.go +++ b/go/vt/tabletserver/query_engine.go @@ -219,6 +219,22 @@ func NewQueryEngine(checker MySQLChecker, se *schema.Engine, config tabletenv.Ta func (qe *QueryEngine) Open(dbconfigs dbconfigs.DBConfigs) error { qe.dbconfigs = dbconfigs qe.conns.Open(&qe.dbconfigs.App, &qe.dbconfigs.Dba) + + if qe.strictMode.Get() { + conn, err := qe.conns.Get(tabletenv.LocalContext()) + if err != nil { + qe.conns.Close() + return err + } + err = conn.VerifyMode() + conn.Recycle() + + if err != nil { + qe.conns.Close() + return err + } + } + qe.streamConns.Open(&qe.dbconfigs.App, &qe.dbconfigs.Dba) qe.se.RegisterNotifier("qe", qe.schemaChanged) return nil diff --git a/go/vt/tabletserver/query_engine_test.go b/go/vt/tabletserver/query_engine_test.go index 1a61dfbaa25..a6e3d1303a4 100644 --- a/go/vt/tabletserver/query_engine_test.go +++ b/go/vt/tabletserver/query_engine_test.go @@ -20,6 +20,26 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" ) +func TestStrictMode(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + for query, result := range schematest.Queries() { + db.AddQuery(query, result) + } + db.AddRejectedQuery("select @@global.sql_mode", errRejected) + + qe := newTestQueryEngine(10, 10*time.Second, true) + testUtils := newTestUtils() + dbconfigs := testUtils.newDBConfigs(db) + qe.se.Open(db.ConnParams()) + + err := qe.Open(dbconfigs) + want := "could not verify mode" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("se.Open: %v, must contain %s", err, want) + } +} + func TestGetPlanPanicDuetoEmptyQuery(t *testing.T) { db := fakesqldb.New(t) defer db.Close() From b9a1b1f35d2f9aca40db413f92dfd68899ef2ea4 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 12 Mar 2017 15:40:02 -0700 Subject: [PATCH 091/108] tabletserver: move messager to its own package --- go/vt/sqlparser/tracked_buffer.go | 7 + .../{messager_cache.go => messager/cache.go} | 2 +- .../cache_test.go} | 2 +- .../engine.go} | 37 ++- go/vt/tabletserver/messager/engine_test.go | 233 ++++++++++++++ .../{ => messager}/message_manager.go | 25 +- .../{ => messager}/message_manager_test.go | 271 ++++++---------- go/vt/tabletserver/messager_engine_test.go | 290 ------------------ go/vt/tabletserver/query_executor.go | 3 +- go/vt/tabletserver/query_executor_test.go | 23 +- go/vt/tabletserver/tabletserver.go | 9 +- go/vt/tabletserver/tabletserver_test.go | 5 +- go/vt/tabletserver/twopc.go | 34 +- go/vt/tabletserver/tx_executor.go | 3 +- go/vt/tabletserver/tx_pool.go | 9 +- 15 files changed, 421 insertions(+), 532 deletions(-) rename go/vt/tabletserver/{messager_cache.go => messager/cache.go} (99%) rename go/vt/tabletserver/{messager_cache_test.go => messager/cache_test.go} (99%) rename go/vt/tabletserver/{messager_engine.go => messager/engine.go} (77%) create mode 100644 go/vt/tabletserver/messager/engine_test.go rename go/vt/tabletserver/{ => messager}/message_manager.go (96%) rename go/vt/tabletserver/{ => messager}/message_manager_test.go (67%) delete mode 100644 go/vt/tabletserver/messager_engine_test.go diff --git a/go/vt/sqlparser/tracked_buffer.go b/go/vt/sqlparser/tracked_buffer.go index e0ea35a25ce..0a47c810c5a 100644 --- a/go/vt/sqlparser/tracked_buffer.go +++ b/go/vt/sqlparser/tracked_buffer.go @@ -115,3 +115,10 @@ func (buf *TrackedBuffer) ParsedQuery() *ParsedQuery { func (buf *TrackedBuffer) HasBindVars() bool { return len(buf.bindLocations) != 0 } + +// BuildParsedQuery builds a ParsedQuery from the input. +func BuildParsedQuery(in string, vars ...interface{}) *ParsedQuery { + buf := NewTrackedBuffer(nil) + buf.Myprintf(in, vars...) + return buf.ParsedQuery() +} diff --git a/go/vt/tabletserver/messager_cache.go b/go/vt/tabletserver/messager/cache.go similarity index 99% rename from go/vt/tabletserver/messager_cache.go rename to go/vt/tabletserver/messager/cache.go index b27610d5e93..ac5f1061451 100644 --- a/go/vt/tabletserver/messager_cache.go +++ b/go/vt/tabletserver/messager/cache.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package tabletserver +package messager import ( "container/heap" diff --git a/go/vt/tabletserver/messager_cache_test.go b/go/vt/tabletserver/messager/cache_test.go similarity index 99% rename from go/vt/tabletserver/messager_cache_test.go rename to go/vt/tabletserver/messager/cache_test.go index 5eb0297b465..4f06181fa2a 100644 --- a/go/vt/tabletserver/messager_cache_test.go +++ b/go/vt/tabletserver/messager/cache_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package tabletserver +package messager import ( "reflect" diff --git a/go/vt/tabletserver/messager_engine.go b/go/vt/tabletserver/messager/engine.go similarity index 77% rename from go/vt/tabletserver/messager_engine.go rename to go/vt/tabletserver/messager/engine.go index 9b11bad6999..e31d7b85d08 100644 --- a/go/vt/tabletserver/messager_engine.go +++ b/go/vt/tabletserver/messager/engine.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package tabletserver +package messager import ( "fmt" @@ -10,31 +10,43 @@ import ( "time" log "github.com/golang/glog" + "golang.org/x/net/context" + "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/tabletserver/connpool" "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" + querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) +// TabletService defines the functions of TabletServer +// that the messager needs for callback. +type TabletService interface { + CheckMySQL() + PostponeMessages(ctx context.Context, target *querypb.Target, name string, ids []string) (count int64, err error) + PurgeMessages(ctx context.Context, target *querypb.Target, name string, timeCutoff int64) (count int64, err error) +} + // MessagerEngine is the engine for handling messages. type MessagerEngine struct { mu sync.Mutex isOpen bool managers map[string]*MessageManager - // TODO(sougou): This depndency should be cleaned up. - tsv *TabletServer + tsv TabletService + se *schema.Engine conns *connpool.Pool } // NewMessagerEngine creates a new MessagerEngine. -func NewMessagerEngine(tsv *TabletServer, config tabletenv.TabletConfig) *MessagerEngine { +func NewMessagerEngine(tsv TabletService, se *schema.Engine, config tabletenv.TabletConfig) *MessagerEngine { return &MessagerEngine{ tsv: tsv, + se: se, conns: connpool.New( config.PoolNamePrefix+"MessagerPool", config.MessagePoolSize, @@ -51,7 +63,7 @@ func (me *MessagerEngine) Open(dbconfigs dbconfigs.DBConfigs) error { return nil } me.conns.Open(&dbconfigs.App, &dbconfigs.Dba) - me.tsv.se.RegisterNotifier("messages", me.schemaChanged) + me.se.RegisterNotifier("messages", me.schemaChanged) me.isOpen = true return nil } @@ -64,7 +76,7 @@ func (me *MessagerEngine) Close() { return } me.isOpen = false - me.tsv.se.UnregisterNotifier("messages") + me.se.UnregisterNotifier("messages") for _, mm := range me.managers { mm.Close() } @@ -73,15 +85,22 @@ func (me *MessagerEngine) Close() { } // Subscribe subscribes to messages from the requested table. -func (me *MessagerEngine) Subscribe(name string, rcv *messageReceiver) error { +// The function returns a done channel that will be closed when +// the subscription ends, which can be initiated by the send function +// returning io.EOF. The engine can also end a subscription which is +// usually triggered by Close. It's the responsibility of the send +// function to promptly return if the done channel is closed. Otherwise, +// the engine's Close function will hang indefinitely. +func (me *MessagerEngine) Subscribe(name string, send func(*sqltypes.Result) error) (done chan struct{}, err error) { me.mu.Lock() defer me.mu.Unlock() mm := me.managers[name] if mm == nil { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "message table %s not found", name) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "message table %s not found", name) } + rcv, done := newMessageReceiver(send) mm.Subscribe(rcv) - return nil + return done, nil } // LockDB obtains db locks for all messages that need to diff --git a/go/vt/tabletserver/messager/engine_test.go b/go/vt/tabletserver/messager/engine_test.go new file mode 100644 index 00000000000..3d43c33a26c --- /dev/null +++ b/go/vt/tabletserver/messager/engine_test.go @@ -0,0 +1,233 @@ +// Copyright 2017, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package messager + +import ( + "fmt" + "math/rand" + "reflect" + "runtime" + "testing" + "time" + + "github.com/youtube/vitess/go/mysqlconn/fakesqldb" + "github.com/youtube/vitess/go/sqltypes" + "github.com/youtube/vitess/go/sync2" + "github.com/youtube/vitess/go/vt/dbconfigs" + "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" +) + +var meTable = &schema.Table{ + Type: schema.Message, + MessageInfo: mmTable.MessageInfo, +} + +func TestEngineSchemaChanged(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + engine := newTestEngine(db) + defer engine.Close() + tables := map[string]*schema.Table{ + "t1": meTable, + "t2": { + Type: schema.NoType, + }, + } + engine.schemaChanged(tables, []string{"t1", "t2"}, nil, nil) + got := extractManagerNames(engine.managers) + want := map[string]bool{"t1": true} + if !reflect.DeepEqual(got, want) { + t.Errorf("got: %+v, want %+v", got, want) + } + tables = map[string]*schema.Table{ + "t1": meTable, + "t2": { + Type: schema.NoType, + }, + "t3": meTable, + } + engine.schemaChanged(tables, []string{"t3"}, nil, nil) + got = extractManagerNames(engine.managers) + want = map[string]bool{"t1": true, "t3": true} + if !reflect.DeepEqual(got, want) { + t.Errorf("got: %+v, want %+v", got, want) + } + tables = map[string]*schema.Table{ + "t1": meTable, + "t2": { + Type: schema.NoType, + }, + "t4": meTable, + } + engine.schemaChanged(tables, []string{"t4"}, nil, []string{"t3", "t5"}) + got = extractManagerNames(engine.managers) + // schemaChanged is only additive. + want = map[string]bool{"t1": true, "t4": true} + if !reflect.DeepEqual(got, want) { + t.Errorf("got: %+v, want %+v", got, want) + } +} + +func extractManagerNames(in map[string]*MessageManager) map[string]bool { + out := make(map[string]bool) + for k := range in { + out[k] = true + } + return out +} + +func TestSubscribe(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + engine := newTestEngine(db) + defer engine.Close() + tables := map[string]*schema.Table{ + "t1": meTable, + "t2": meTable, + } + engine.schemaChanged(tables, []string{"t1", "t2"}, nil, nil) + f1, ch1 := newEngineReceiver() + f2, ch2 := newEngineReceiver() + // Each receiver is subscribed to different managers. + engine.Subscribe("t1", f1) + <-ch1 + engine.Subscribe("t2", f2) + <-ch2 + engine.managers["t1"].Add(&MessageRow{ID: sqltypes.MakeString([]byte("1"))}) + engine.managers["t2"].Add(&MessageRow{ID: sqltypes.MakeString([]byte("2"))}) + <-ch1 + <-ch2 + + // Error case. + want := "message table t3 not found" + _, err := engine.Subscribe("t3", f1) + if err == nil || err.Error() != want { + t.Errorf("Subscribe: %v, want %s", err, want) + } +} + +func TestLockDB(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + engine := newTestEngine(db) + defer engine.Close() + tables := map[string]*schema.Table{ + "t1": meTable, + "t2": meTable, + } + engine.schemaChanged(tables, []string{"t1", "t2"}, nil, nil) + f1, ch1 := newEngineReceiver() + engine.Subscribe("t1", f1) + <-ch1 + + row1 := &MessageRow{ + ID: sqltypes.MakeString([]byte("1")), + } + row2 := &MessageRow{ + TimeNext: time.Now().UnixNano() + int64(10*time.Minute), + ID: sqltypes.MakeString([]byte("2")), + } + newMessages := map[string][]*MessageRow{"t1": {row1, row2}, "t3": {row1}} + unlock := engine.LockDB(newMessages, nil) + engine.UpdateCaches(newMessages, nil) + unlock() + <-ch1 + runtime.Gosched() + // row2 should not be sent. + select { + case mr := <-ch1: + t.Errorf("Unexpected message: %v", mr) + default: + } + + ch2 := make(chan *sqltypes.Result) + var count sync2.AtomicInt64 + engine.Subscribe("t2", func(qr *sqltypes.Result) error { + count.Add(1) + ch2 <- qr + return nil + }) + <-ch2 + mm := engine.managers["t2"] + mm.Add(&MessageRow{ID: sqltypes.MakeString([]byte("1"))}) + // Make sure the message is enqueued. + for { + runtime.Gosched() + time.Sleep(10 * time.Millisecond) + if count.Get() == int64(2) { + break + } + } + // "2" will be in the cache. + mm.Add(&MessageRow{ID: sqltypes.MakeString([]byte("2"))}) + changedMessages := map[string][]string{"t2": {"2"}, "t3": {"2"}} + unlock = engine.LockDB(nil, changedMessages) + // This should delete "2". + engine.UpdateCaches(nil, changedMessages) + unlock() + <-ch2 + runtime.Gosched() + // There should be no more messages. + select { + case mr := <-ch2: + t.Errorf("Unexpected message: %v", mr) + default: + } +} + +func TestEngineGenerate(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + engine := newTestEngine(db) + defer engine.Close() + engine.schemaChanged(map[string]*schema.Table{ + "t1": meTable, + }, []string{"t1"}, nil, nil) + if _, _, err := engine.GenerateAckQuery("t1", []string{"1"}); err != nil { + t.Error(err) + } + want := "message table t2 not found in schema" + if _, _, err := engine.GenerateAckQuery("t2", []string{"1"}); err == nil || err.Error() != want { + t.Errorf("engine.GenerateAckQuery(invalid): %v, want %s", err, want) + } + + if _, _, err := engine.GeneratePostponeQuery("t1", []string{"1"}); err != nil { + t.Error(err) + } + if _, _, err := engine.GeneratePostponeQuery("t2", []string{"1"}); err == nil || err.Error() != want { + t.Errorf("engine.GeneratePostponeQuery(invalid): %v, want %s", err, want) + } + + if _, _, err := engine.GeneratePurgeQuery("t1", 0); err != nil { + t.Error(err) + } + if _, _, err := engine.GeneratePurgeQuery("t2", 0); err == nil || err.Error() != want { + t.Errorf("engine.GeneratePurgeQuery(invalid): %v, want %s", err, want) + } +} + +func newTestEngine(db *fakesqldb.DB) *MessagerEngine { + randID := rand.Int63() + config := tabletenv.DefaultQsConfig + config.PoolNamePrefix = fmt.Sprintf("Pool-%d-", randID) + tsv := newFakeTabletServer() + se := schema.NewEngine(tsv, config) + te := NewMessagerEngine(tsv, se, config) + dbconfigs := dbconfigs.DBConfigs{ + App: *db.ConnParams(), + SidecarDBName: "_vt", + } + te.Open(dbconfigs) + return te +} + +func newEngineReceiver() (f func(qr *sqltypes.Result) error, ch chan *sqltypes.Result) { + ch = make(chan *sqltypes.Result) + return func(qr *sqltypes.Result) error { + ch <- qr + return nil + }, ch +} diff --git a/go/vt/tabletserver/message_manager.go b/go/vt/tabletserver/messager/message_manager.go similarity index 96% rename from go/vt/tabletserver/message_manager.go rename to go/vt/tabletserver/messager/message_manager.go index 0074e627103..72d6935fc4c 100644 --- a/go/vt/tabletserver/message_manager.go +++ b/go/vt/tabletserver/messager/message_manager.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package tabletserver +package messager import ( "io" @@ -71,7 +71,7 @@ type receiverWithStatus struct { // MessageManager manages messages for a message table. type MessageManager struct { DBLock sync.Mutex - tsv *TabletServer + tsv TabletService isOpen bool @@ -109,7 +109,7 @@ type MessageManager struct { // NewMessageManager creates a new message manager. // Calls into tsv have to be made asynchronously. Otherwise, // it can lead to deadlocks. -func NewMessageManager(tsv *TabletServer, table *schema.Table, conns *connpool.Pool) *MessageManager { +func NewMessageManager(tsv TabletService, table *schema.Table, conns *connpool.Pool) *MessageManager { mm := &MessageManager{ tsv: tsv, name: table.Name, @@ -126,16 +126,16 @@ func NewMessageManager(tsv *TabletServer, table *schema.Table, conns *connpool.P } mm.cond.L = &mm.mu - mm.readByTimeNext = buildParsedQuery( + mm.readByTimeNext = sqlparser.BuildParsedQuery( "select time_next, epoch, id, message from %v where time_next < %a order by time_next desc limit %a", mm.name, ":time_next", ":max") - mm.ackQuery = buildParsedQuery( + mm.ackQuery = sqlparser.BuildParsedQuery( "update %v set time_acked = %a, time_next = null where id in %a and time_acked is null", mm.name, ":time_acked", "::ids") - mm.postponeQuery = buildParsedQuery( + mm.postponeQuery = sqlparser.BuildParsedQuery( "update %v set time_next = %a+(%a< 1 { + return io.EOF + } + count++ + ch1 <- qr + return nil + }) + <-ch1 got, err := qre.Execute() if err != nil { t.Fatalf("qre.Execute() = %v, want nil", err) @@ -188,7 +191,7 @@ func TestQueryExecutorPlanInsertMessage(t *testing.T) { if !reflect.DeepEqual(got, want) { t.Fatalf("got: %v, want: %v", got, want) } - mr := <-r1.ch + mr := <-ch1 wantqr := &sqltypes.Result{ Rows: [][]sqltypes.Value{{ sqltypes.MakeTrusted(sqltypes.Int64, []byte("1")), diff --git a/go/vt/tabletserver/tabletserver.go b/go/vt/tabletserver/tabletserver.go index 8ac9b338201..3090d3fdbfe 100644 --- a/go/vt/tabletserver/tabletserver.go +++ b/go/vt/tabletserver/tabletserver.go @@ -33,6 +33,7 @@ import ( "github.com/youtube/vitess/go/vt/sqlparser" "github.com/youtube/vitess/go/vt/tabletserver/connpool" "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/tabletserver/messager" "github.com/youtube/vitess/go/vt/tabletserver/queryservice" "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/tabletserver/splitquery" @@ -113,7 +114,7 @@ type TabletServer struct { se *schema.Engine qe *QueryEngine te *TxEngine - messager *MessagerEngine + messager *messager.MessagerEngine watcher *ReplicationWatcher updateStreamList *binlog.StreamList @@ -180,7 +181,7 @@ func NewTabletServer(config tabletenv.TabletConfig, topoServer topo.Server) *Tab tsv.qe = NewQueryEngine(tsv, tsv.se, config) tsv.te = NewTxEngine(tsv, config) tsv.txThrottler = txthrottler.CreateTxThrottlerFromTabletConfig(topoServer) - tsv.messager = NewMessagerEngine(tsv, config) + tsv.messager = messager.NewMessagerEngine(tsv, tsv.se, config) tsv.watcher = NewReplicationWatcher(tsv.se, config) tsv.updateStreamList = &binlog.StreamList{} // FIXME(alainjobart) could we move this to the Register method below? @@ -946,7 +947,7 @@ func (tsv *TabletServer) MessageStream(ctx context.Context, target *querypb.Targ target, false, false, func(ctx context.Context, logStats *tabletenv.LogStats) error { // TODO(sougou): perform ACL checks. - rcv, done := newMessageReceiver(func(r *sqltypes.Result) error { + done, err := tsv.messager.Subscribe(name, func(r *sqltypes.Result) error { select { case <-ctx.Done(): return io.EOF @@ -954,7 +955,7 @@ func (tsv *TabletServer) MessageStream(ctx context.Context, target *querypb.Targ } return callback(r) }) - if err := tsv.messager.Subscribe(name, rcv); err != nil { + if err != nil { return err } <-done diff --git a/go/vt/tabletserver/tabletserver_test.go b/go/vt/tabletserver/tabletserver_test.go index bdb2490950f..bd2ce83419f 100644 --- a/go/vt/tabletserver/tabletserver_test.go +++ b/go/vt/tabletserver/tabletserver_test.go @@ -23,6 +23,7 @@ import ( "github.com/youtube/vitess/go/mysqlconn/fakesqldb" "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" + "github.com/youtube/vitess/go/vt/tabletserver/messager" "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" @@ -1483,9 +1484,9 @@ func TestMessageStream(t *testing.T) { close(done) }() // Skip first result (field info). - newMessages := map[string][]*MessageRow{ + newMessages := map[string][]*messager.MessageRow{ "msg": { - &MessageRow{ID: sqltypes.MakeString([]byte("1"))}, + &messager.MessageRow{ID: sqltypes.MakeString([]byte("1"))}, }, } // We may have to iterate a few times before the stream kicks in. diff --git a/go/vt/tabletserver/twopc.go b/go/vt/tabletserver/twopc.go index 38fdc58ad49..befdbed860f 100644 --- a/go/vt/tabletserver/twopc.go +++ b/go/vt/tabletserver/twopc.go @@ -140,48 +140,48 @@ func (tpc *TwoPC) Init(sidecarDBName string, dbaparams *sqldb.ConnParams) error return err } } - tpc.insertRedoTx = buildParsedQuery( + tpc.insertRedoTx = sqlparser.BuildParsedQuery( "insert into %s.redo_state(dtid, state, time_created) values (%a, %a, %a)", dbname, ":dtid", ":state", ":time_created") - tpc.insertRedoStmt = buildParsedQuery( + tpc.insertRedoStmt = sqlparser.BuildParsedQuery( "insert into %s.redo_statement(dtid, id, statement) values %a", dbname, ":vals") - tpc.updateRedoTx = buildParsedQuery( + tpc.updateRedoTx = sqlparser.BuildParsedQuery( "update %s.redo_state set state = %a where dtid = %a", dbname, ":state", ":dtid") - tpc.deleteRedoTx = buildParsedQuery( + tpc.deleteRedoTx = sqlparser.BuildParsedQuery( "delete from %s.redo_state where dtid = %a", dbname, ":dtid") - tpc.deleteRedoStmt = buildParsedQuery( + tpc.deleteRedoStmt = sqlparser.BuildParsedQuery( "delete from %s.redo_statement where dtid = %a", dbname, ":dtid") tpc.readAllRedo = fmt.Sprintf(sqlReadAllRedo, dbname, dbname) - tpc.countUnresolvedRedo = buildParsedQuery( + tpc.countUnresolvedRedo = sqlparser.BuildParsedQuery( "select count(*) from %s.redo_state where time_created < %a", dbname, ":time_created") - tpc.insertTransaction = buildParsedQuery( + tpc.insertTransaction = sqlparser.BuildParsedQuery( "insert into %s.dt_state(dtid, state, time_created) values (%a, %a, %a)", dbname, ":dtid", ":state", ":cur_time") - tpc.insertParticipants = buildParsedQuery( + tpc.insertParticipants = sqlparser.BuildParsedQuery( "insert into %s.dt_participant(dtid, id, keyspace, shard) values %a", dbname, ":vals") - tpc.transition = buildParsedQuery( + tpc.transition = sqlparser.BuildParsedQuery( "update %s.dt_state set state = %a where dtid = %a and state = %a", dbname, ":state", ":dtid", ":prepare") - tpc.deleteTransaction = buildParsedQuery( + tpc.deleteTransaction = sqlparser.BuildParsedQuery( "delete from %s.dt_state where dtid = %a", dbname, ":dtid") - tpc.deleteParticipants = buildParsedQuery( + tpc.deleteParticipants = sqlparser.BuildParsedQuery( "delete from %s.dt_participant where dtid = %a", dbname, ":dtid") - tpc.readTransaction = buildParsedQuery( + tpc.readTransaction = sqlparser.BuildParsedQuery( "select dtid, state, time_created from %s.dt_state where dtid = %a", dbname, ":dtid") - tpc.readParticipants = buildParsedQuery( + tpc.readParticipants = sqlparser.BuildParsedQuery( "select keyspace, shard from %s.dt_participant where dtid = %a", dbname, ":dtid") - tpc.readAbandoned = buildParsedQuery( + tpc.readAbandoned = sqlparser.BuildParsedQuery( "select dtid, time_created from %s.dt_state where time_created < %a", dbname, ":time_created") tpc.readAllTransactions = fmt.Sprintf(sqlReadAllTransactions, dbname, dbname) @@ -198,12 +198,6 @@ func (tpc *TwoPC) Close() { tpc.readPool.Close() } -func buildParsedQuery(in string, vars ...interface{}) *sqlparser.ParsedQuery { - buf := sqlparser.NewTrackedBuffer(nil) - buf.Myprintf(in, vars...) - return buf.ParsedQuery() -} - // SaveRedo saves the statements in the redo log using the supplied connection. func (tpc *TwoPC) SaveRedo(ctx context.Context, conn *TxConnection, dtid string, queries []string) error { bindVars := map[string]interface{}{ diff --git a/go/vt/tabletserver/tx_executor.go b/go/vt/tabletserver/tx_executor.go index 4ca1acc7ab4..fc868e8587c 100644 --- a/go/vt/tabletserver/tx_executor.go +++ b/go/vt/tabletserver/tx_executor.go @@ -14,6 +14,7 @@ import ( querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" + "github.com/youtube/vitess/go/vt/tabletserver/messager" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" ) @@ -24,7 +25,7 @@ type TxExecutor struct { ctx context.Context logStats *tabletenv.LogStats te *TxEngine - messager *MessagerEngine + messager *messager.MessagerEngine } // Prepare performs a prepare on a connection including the redo log work. diff --git a/go/vt/tabletserver/tx_pool.go b/go/vt/tabletserver/tx_pool.go index 26113c1dba5..d87aafc1736 100644 --- a/go/vt/tabletserver/tx_pool.go +++ b/go/vt/tabletserver/tx_pool.go @@ -23,6 +23,7 @@ import ( "github.com/youtube/vitess/go/timer" "github.com/youtube/vitess/go/vt/callerid" "github.com/youtube/vitess/go/vt/tabletserver/connpool" + "github.com/youtube/vitess/go/vt/tabletserver/messager" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" @@ -171,7 +172,7 @@ func (axp *TxPool) Begin(ctx context.Context) (int64, error) { } // Commit commits the specified transaction. -func (axp *TxPool) Commit(ctx context.Context, transactionID int64, messager *MessagerEngine) error { +func (axp *TxPool) Commit(ctx context.Context, transactionID int64, messager *messager.MessagerEngine) error { conn, err := axp.Get(transactionID, "for commit") if err != nil { return err @@ -210,7 +211,7 @@ func (axp *TxPool) LocalBegin(ctx context.Context) (*TxConnection, error) { } // LocalCommit is the commit function for LocalBegin. -func (axp *TxPool) LocalCommit(ctx context.Context, conn *TxConnection, messager *MessagerEngine) error { +func (axp *TxPool) LocalCommit(ctx context.Context, conn *TxConnection, messager *messager.MessagerEngine) error { defer conn.conclude(TxCommit) defer messager.LockDB(conn.NewMessages, conn.ChangedMessages)() txStats.Add("Completed", time.Now().Sub(conn.StartTime)) @@ -276,7 +277,7 @@ type TxConnection struct { StartTime time.Time EndTime time.Time Queries []string - NewMessages map[string][]*MessageRow + NewMessages map[string][]*messager.MessageRow ChangedMessages map[string][]string Conclusion string LogToFile sync2.AtomicInt32 @@ -290,7 +291,7 @@ func newTxConnection(conn *connpool.DBConn, transactionID int64, pool *TxPool, i TransactionID: transactionID, pool: pool, StartTime: time.Now(), - NewMessages: make(map[string][]*MessageRow), + NewMessages: make(map[string][]*messager.MessageRow), ChangedMessages: make(map[string][]string), ImmediateCallerID: immediate, EffectiveCallerID: effective, From 3a7454bdaca8b042902397278b5640d077d78199 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 12 Mar 2017 16:32:34 -0700 Subject: [PATCH 092/108] tabletserver: rename messager types Also, export only Engine from messager. The rest are all inernal. --- .../schema/{schema_engine.go => engine.go} | 0 .../{schema_engine_test.go => engine_test.go} | 0 go/vt/tabletserver/messager/cache.go | 22 ++++---- go/vt/tabletserver/messager/cache_test.go | 10 ++-- go/vt/tabletserver/messager/engine.go | 42 +++++++-------- go/vt/tabletserver/messager/engine_test.go | 6 +-- .../tabletserver/messager/message_manager.go | 54 +++++++++---------- .../messager/message_manager_test.go | 20 +++---- go/vt/tabletserver/tabletserver.go | 4 +- go/vt/tabletserver/tx_executor.go | 2 +- go/vt/tabletserver/tx_pool.go | 4 +- 11 files changed, 82 insertions(+), 82 deletions(-) rename go/vt/tabletserver/engines/schema/{schema_engine.go => engine.go} (100%) rename go/vt/tabletserver/engines/schema/{schema_engine_test.go => engine_test.go} (100%) diff --git a/go/vt/tabletserver/engines/schema/schema_engine.go b/go/vt/tabletserver/engines/schema/engine.go similarity index 100% rename from go/vt/tabletserver/engines/schema/schema_engine.go rename to go/vt/tabletserver/engines/schema/engine.go diff --git a/go/vt/tabletserver/engines/schema/schema_engine_test.go b/go/vt/tabletserver/engines/schema/engine_test.go similarity index 100% rename from go/vt/tabletserver/engines/schema/schema_engine_test.go rename to go/vt/tabletserver/engines/schema/engine_test.go diff --git a/go/vt/tabletserver/messager/cache.go b/go/vt/tabletserver/messager/cache.go index ac5f1061451..ac5bd7c137f 100644 --- a/go/vt/tabletserver/messager/cache.go +++ b/go/vt/tabletserver/messager/cache.go @@ -52,17 +52,17 @@ func (mh *messageHeap) Pop() interface{} { //_______________________________________________ -// MessagerCache is the cache for the messager. -type MessagerCache struct { +// cache is the cache for the messager. +type cache struct { mu sync.Mutex size int sendQueue messageHeap messages map[string]*MessageRow } -// NewMessagerCache creates a new MessagerCache. -func NewMessagerCache(size int) *MessagerCache { - mc := &MessagerCache{ +// NewMessagerCache creates a new cache. +func newCache(size int) *cache { + mc := &cache{ size: size, messages: make(map[string]*MessageRow), } @@ -70,7 +70,7 @@ func NewMessagerCache(size int) *MessagerCache { } // Clear clears the cache. -func (mc *MessagerCache) Clear() { +func (mc *cache) Clear() { mc.mu.Lock() defer mc.mu.Unlock() mc.sendQueue = nil @@ -79,7 +79,7 @@ func (mc *MessagerCache) Clear() { // Add adds a MessageRow to the cache. It returns // false if the cache is full. -func (mc *MessagerCache) Add(mr *MessageRow) bool { +func (mc *cache) Add(mr *MessageRow) bool { mc.mu.Lock() defer mc.mu.Unlock() if len(mc.sendQueue) >= mc.size { @@ -102,7 +102,7 @@ func (mc *MessagerCache) Add(mr *MessageRow) bool { // to prevent the poller thread from repopulating the // message while it's being sent. // If the Cache is empty Pop returns nil. -func (mc *MessagerCache) Pop() *MessageRow { +func (mc *cache) Pop() *MessageRow { mc.mu.Lock() defer mc.mu.Unlock() for { @@ -125,7 +125,7 @@ func (mc *MessagerCache) Pop() *MessageRow { } // Discard forgets the specified id. -func (mc *MessagerCache) Discard(ids []string) { +func (mc *cache) Discard(ids []string) { mc.mu.Lock() defer mc.mu.Unlock() for _, id := range ids { @@ -140,8 +140,8 @@ func (mc *MessagerCache) Discard(ids []string) { } } -// Size returns the max size of MessagerCache. -func (mc *MessagerCache) Size() int { +// Size returns the max size of cache. +func (mc *cache) Size() int { mc.mu.Lock() defer mc.mu.Unlock() return mc.size diff --git a/go/vt/tabletserver/messager/cache_test.go b/go/vt/tabletserver/messager/cache_test.go index 4f06181fa2a..d7769c4b226 100644 --- a/go/vt/tabletserver/messager/cache_test.go +++ b/go/vt/tabletserver/messager/cache_test.go @@ -12,7 +12,7 @@ import ( ) func TestMessagerCacheOrder(t *testing.T) { - mc := NewMessagerCache(10) + mc := newCache(10) if !mc.Add(&MessageRow{ TimeNext: 1, Epoch: 0, @@ -65,7 +65,7 @@ func TestMessagerCacheOrder(t *testing.T) { } func TestMessagerCacheDupKey(t *testing.T) { - mc := NewMessagerCache(10) + mc := newCache(10) if !mc.Add(&MessageRow{ TimeNext: 1, Epoch: 0, @@ -99,7 +99,7 @@ func TestMessagerCacheDupKey(t *testing.T) { } func TestMessagerCacheDiscard(t *testing.T) { - mc := NewMessagerCache(10) + mc := newCache(10) if !mc.Add(&MessageRow{ TimeNext: 1, Epoch: 0, @@ -149,7 +149,7 @@ func TestMessagerCacheDiscard(t *testing.T) { } func TestMessagerCacheFull(t *testing.T) { - mc := NewMessagerCache(2) + mc := newCache(2) if !mc.Add(&MessageRow{ TimeNext: 1, Epoch: 0, @@ -174,7 +174,7 @@ func TestMessagerCacheFull(t *testing.T) { } func TestMessagerCacheEmpty(t *testing.T) { - mc := NewMessagerCache(2) + mc := newCache(2) if !mc.Add(&MessageRow{ TimeNext: 1, Epoch: 0, diff --git a/go/vt/tabletserver/messager/engine.go b/go/vt/tabletserver/messager/engine.go index e31d7b85d08..fd5d20942ee 100644 --- a/go/vt/tabletserver/messager/engine.go +++ b/go/vt/tabletserver/messager/engine.go @@ -31,20 +31,20 @@ type TabletService interface { PurgeMessages(ctx context.Context, target *querypb.Target, name string, timeCutoff int64) (count int64, err error) } -// MessagerEngine is the engine for handling messages. -type MessagerEngine struct { +// Engine is the engine for handling messages. +type Engine struct { mu sync.Mutex isOpen bool - managers map[string]*MessageManager + managers map[string]*messageManager tsv TabletService se *schema.Engine conns *connpool.Pool } -// NewMessagerEngine creates a new MessagerEngine. -func NewMessagerEngine(tsv TabletService, se *schema.Engine, config tabletenv.TabletConfig) *MessagerEngine { - return &MessagerEngine{ +// NewEngine creates a new Engine. +func NewEngine(tsv TabletService, se *schema.Engine, config tabletenv.TabletConfig) *Engine { + return &Engine{ tsv: tsv, se: se, conns: connpool.New( @@ -53,12 +53,12 @@ func NewMessagerEngine(tsv TabletService, se *schema.Engine, config tabletenv.Ta time.Duration(config.IdleTimeout*1e9), tsv, ), - managers: make(map[string]*MessageManager), + managers: make(map[string]*messageManager), } } -// Open starts the MessagerEngine service. -func (me *MessagerEngine) Open(dbconfigs dbconfigs.DBConfigs) error { +// Open starts the Engine service. +func (me *Engine) Open(dbconfigs dbconfigs.DBConfigs) error { if me.isOpen { return nil } @@ -68,8 +68,8 @@ func (me *MessagerEngine) Open(dbconfigs dbconfigs.DBConfigs) error { return nil } -// Close closes the MessagerEngine service. -func (me *MessagerEngine) Close() { +// Close closes the Engine service. +func (me *Engine) Close() { me.mu.Lock() defer me.mu.Unlock() if !me.isOpen { @@ -80,7 +80,7 @@ func (me *MessagerEngine) Close() { for _, mm := range me.managers { mm.Close() } - me.managers = make(map[string]*MessageManager) + me.managers = make(map[string]*messageManager) me.conns.Close() } @@ -91,7 +91,7 @@ func (me *MessagerEngine) Close() { // usually triggered by Close. It's the responsibility of the send // function to promptly return if the done channel is closed. Otherwise, // the engine's Close function will hang indefinitely. -func (me *MessagerEngine) Subscribe(name string, send func(*sqltypes.Result) error) (done chan struct{}, err error) { +func (me *Engine) Subscribe(name string, send func(*sqltypes.Result) error) (done chan struct{}, err error) { me.mu.Lock() defer me.mu.Unlock() mm := me.managers[name] @@ -105,7 +105,7 @@ func (me *MessagerEngine) Subscribe(name string, send func(*sqltypes.Result) err // LockDB obtains db locks for all messages that need to // be updated and returns the counterpart unlock function. -func (me *MessagerEngine) LockDB(newMessages map[string][]*MessageRow, changedMessages map[string][]string) func() { +func (me *Engine) LockDB(newMessages map[string][]*MessageRow, changedMessages map[string][]string) func() { combined := make(map[string]struct{}) for name := range newMessages { combined[name] = struct{}{} @@ -113,7 +113,7 @@ func (me *MessagerEngine) LockDB(newMessages map[string][]*MessageRow, changedMe for name := range changedMessages { combined[name] = struct{}{} } - var mms []*MessageManager + var mms []*messageManager // Don't do DBLock while holding lock on mu. // It causes deadlocks. func() { @@ -136,7 +136,7 @@ func (me *MessagerEngine) LockDB(newMessages map[string][]*MessageRow, changedMe } // UpdateCaches updates the caches for the committed changes. -func (me *MessagerEngine) UpdateCaches(newMessages map[string][]*MessageRow, changedMessages map[string][]string) { +func (me *Engine) UpdateCaches(newMessages map[string][]*MessageRow, changedMessages map[string][]string) { me.mu.Lock() defer me.mu.Unlock() now := time.Now().UnixNano() @@ -163,7 +163,7 @@ func (me *MessagerEngine) UpdateCaches(newMessages map[string][]*MessageRow, cha } // GenerateAckQuery returns the query and bind vars for acking a message. -func (me *MessagerEngine) GenerateAckQuery(name string, ids []string) (string, map[string]interface{}, error) { +func (me *Engine) GenerateAckQuery(name string, ids []string) (string, map[string]interface{}, error) { me.mu.Lock() defer me.mu.Unlock() mm := me.managers[name] @@ -175,7 +175,7 @@ func (me *MessagerEngine) GenerateAckQuery(name string, ids []string) (string, m } // GeneratePostponeQuery returns the query and bind vars for postponing a message. -func (me *MessagerEngine) GeneratePostponeQuery(name string, ids []string) (string, map[string]interface{}, error) { +func (me *Engine) GeneratePostponeQuery(name string, ids []string) (string, map[string]interface{}, error) { me.mu.Lock() defer me.mu.Unlock() mm := me.managers[name] @@ -187,7 +187,7 @@ func (me *MessagerEngine) GeneratePostponeQuery(name string, ids []string) (stri } // GeneratePurgeQuery returns the query and bind vars for purging messages. -func (me *MessagerEngine) GeneratePurgeQuery(name string, timeCutoff int64) (string, map[string]interface{}, error) { +func (me *Engine) GeneratePurgeQuery(name string, timeCutoff int64) (string, map[string]interface{}, error) { me.mu.Lock() defer me.mu.Unlock() mm := me.managers[name] @@ -198,7 +198,7 @@ func (me *MessagerEngine) GeneratePurgeQuery(name string, timeCutoff int64) (str return query, bv, nil } -func (me *MessagerEngine) schemaChanged(tables map[string]*schema.Table, created, altered, dropped []string) { +func (me *Engine) schemaChanged(tables map[string]*schema.Table, created, altered, dropped []string) { me.mu.Lock() defer me.mu.Unlock() for _, name := range created { @@ -211,7 +211,7 @@ func (me *MessagerEngine) schemaChanged(tables map[string]*schema.Table, created log.Errorf("Newly created table alread exists in messages: %s", name) continue } - mm := NewMessageManager(me.tsv, t, me.conns) + mm := newMessageManager(me.tsv, t, me.conns) me.managers[name] = mm mm.Open() } diff --git a/go/vt/tabletserver/messager/engine_test.go b/go/vt/tabletserver/messager/engine_test.go index 3d43c33a26c..f64bfef7408 100644 --- a/go/vt/tabletserver/messager/engine_test.go +++ b/go/vt/tabletserver/messager/engine_test.go @@ -71,7 +71,7 @@ func TestEngineSchemaChanged(t *testing.T) { } } -func extractManagerNames(in map[string]*MessageManager) map[string]bool { +func extractManagerNames(in map[string]*messageManager) map[string]bool { out := make(map[string]bool) for k := range in { out[k] = true @@ -209,13 +209,13 @@ func TestEngineGenerate(t *testing.T) { } } -func newTestEngine(db *fakesqldb.DB) *MessagerEngine { +func newTestEngine(db *fakesqldb.DB) *Engine { randID := rand.Int63() config := tabletenv.DefaultQsConfig config.PoolNamePrefix = fmt.Sprintf("Pool-%d-", randID) tsv := newFakeTabletServer() se := schema.NewEngine(tsv, config) - te := NewMessagerEngine(tsv, se, config) + te := NewEngine(tsv, se, config) dbconfigs := dbconfigs.DBConfigs{ App: *db.ConnParams(), SidecarDBName: "_vt", diff --git a/go/vt/tabletserver/messager/message_manager.go b/go/vt/tabletserver/messager/message_manager.go index 72d6935fc4c..d0f328d5186 100644 --- a/go/vt/tabletserver/messager/message_manager.go +++ b/go/vt/tabletserver/messager/message_manager.go @@ -61,15 +61,15 @@ func (rcv *messageReceiver) Cancel() { } // receiverWithStatus is a separate struct to signify -// that the busy flag is controlled by the MessageManager +// that the busy flag is controlled by the messageManager // mutex. type receiverWithStatus struct { receiver *messageReceiver busy bool } -// MessageManager manages messages for a message table. -type MessageManager struct { +// messageManager manages messages for a message table. +type messageManager struct { DBLock sync.Mutex tsv TabletService @@ -89,7 +89,7 @@ type MessageManager struct { // an item gets added to the cache, or if the manager is closed. // The trigger wakes up the runSend thread. cond sync.Cond - cache *MessagerCache + cache *cache receivers []*receiverWithStatus curReceiver int messagesPending bool @@ -106,11 +106,11 @@ type MessageManager struct { purgeQuery *sqlparser.ParsedQuery } -// NewMessageManager creates a new message manager. +// newMessageManager creates a new message manager. // Calls into tsv have to be made asynchronously. Otherwise, // it can lead to deadlocks. -func NewMessageManager(tsv TabletService, table *schema.Table, conns *connpool.Pool) *MessageManager { - mm := &MessageManager{ +func newMessageManager(tsv TabletService, table *schema.Table, conns *connpool.Pool) *messageManager { + mm := &messageManager{ tsv: tsv, name: table.Name, fieldResult: &sqltypes.Result{ @@ -119,7 +119,7 @@ func NewMessageManager(tsv TabletService, table *schema.Table, conns *connpool.P ackWaitTime: table.MessageInfo.AckWaitDuration, purgeAfter: table.MessageInfo.PurgeAfterDuration, batchSize: table.MessageInfo.BatchSize, - cache: NewMessagerCache(table.MessageInfo.CacheSize), + cache: newCache(table.MessageInfo.CacheSize), pollerTicks: timer.NewTimer(table.MessageInfo.PollInterval), purgeTicks: timer.NewTimer(table.MessageInfo.PollInterval), conns: conns, @@ -141,8 +141,8 @@ func NewMessageManager(tsv TabletService, table *schema.Table, conns *connpool.P return mm } -// Open starts the MessageManager service. -func (mm *MessageManager) Open() { +// Open starts the messageManager service. +func (mm *messageManager) Open() { mm.mu.Lock() defer mm.mu.Unlock() if mm.isOpen { @@ -157,8 +157,8 @@ func (mm *MessageManager) Open() { mm.purgeTicks.Start(mm.runPurge) } -// Close stops the MessageManager service. -func (mm *MessageManager) Close() { +// Close stops the messageManager service. +func (mm *messageManager) Close() { mm.pollerTicks.Stop() mm.purgeTicks.Stop() @@ -180,7 +180,7 @@ func (mm *MessageManager) Close() { } // Subscribe adds the receiver to the list of subsribers. -func (mm *MessageManager) Subscribe(receiver *messageReceiver) { +func (mm *messageManager) Subscribe(receiver *messageReceiver) { mm.mu.Lock() defer mm.mu.Unlock() withStatus := &receiverWithStatus{ @@ -194,7 +194,7 @@ func (mm *MessageManager) Subscribe(receiver *messageReceiver) { go mm.send(withStatus, mm.fieldResult) } -func (mm *MessageManager) unsubscribe(receiver *messageReceiver) { +func (mm *messageManager) unsubscribe(receiver *messageReceiver) { mm.mu.Lock() defer mm.mu.Unlock() for i, rcv := range mm.receivers { @@ -221,7 +221,7 @@ func (mm *MessageManager) unsubscribe(receiver *messageReceiver) { // was previously -1, it broadcasts. If none was found, // curReceiver is set to -1. If there's no starting point, // it must be specified as -1. -func (mm *MessageManager) rescanReceivers(start int) { +func (mm *messageManager) rescanReceivers(start int) { cur := start for range mm.receivers { cur = (cur + 1) % len(mm.receivers) @@ -240,7 +240,7 @@ func (mm *MessageManager) rescanReceivers(start int) { // Add adds the message to the cache. It returns true // if successful. If the message is already present, // it still returns true. -func (mm *MessageManager) Add(mr *MessageRow) bool { +func (mm *messageManager) Add(mr *MessageRow) bool { mm.mu.Lock() defer mm.mu.Unlock() if len(mm.receivers) == 0 { @@ -253,7 +253,7 @@ func (mm *MessageManager) Add(mr *MessageRow) bool { return true } -func (mm *MessageManager) runSend() { +func (mm *messageManager) runSend() { defer mm.wg.Done() for { var rows [][]sqltypes.Value @@ -296,7 +296,7 @@ func (mm *MessageManager) runSend() { } } -func (mm *MessageManager) send(receiver *receiverWithStatus, qr *sqltypes.Result) { +func (mm *messageManager) send(receiver *receiverWithStatus, qr *sqltypes.Result) { defer mm.wg.Done() if err := receiver.receiver.Send(qr); err != nil { if err == io.EOF { @@ -330,7 +330,7 @@ func (mm *MessageManager) send(receiver *receiverWithStatus, qr *sqltypes.Result } // postpone is a non-member because it should be called asynchronously and should -// not rely on members of MessageManager. +// not rely on members of messageManager. func postpone(tsv TabletService, name string, ackWaitTime time.Duration, ids []string) { ctx, cancel := context.WithTimeout(tabletenv.LocalContext(), ackWaitTime) defer cancel() @@ -341,7 +341,7 @@ func postpone(tsv TabletService, name string, ackWaitTime time.Duration, ids []s } } -func (mm *MessageManager) runPoller() { +func (mm *messageManager) runPoller() { ctx, cancel := context.WithTimeout(tabletenv.LocalContext(), mm.pollerTicks.Interval()) defer cancel() conn, err := mm.conns.Get(ctx) @@ -402,12 +402,12 @@ func (mm *MessageManager) runPoller() { }() } -func (mm *MessageManager) runPurge() { +func (mm *messageManager) runPurge() { go purge(mm.tsv, mm.name.String(), mm.purgeAfter, mm.purgeTicks.Interval()) } // purge is a non-member because it should be called asynchronously and should -// not rely on members of MessageManager. +// not rely on members of messageManager. func purge(tsv TabletService, name string, purgeAfter, purgeInterval time.Duration) { ctx, cancel := context.WithTimeout(tabletenv.LocalContext(), purgeInterval) defer cancel() @@ -425,7 +425,7 @@ func purge(tsv TabletService, name string, purgeAfter, purgeInterval time.Durati } // GenerateAckQuery returns the query and bind vars for acking a message. -func (mm *MessageManager) GenerateAckQuery(ids []string) (string, map[string]interface{}) { +func (mm *messageManager) GenerateAckQuery(ids []string) (string, map[string]interface{}) { idbvs := make([]interface{}, len(ids)) for i, id := range ids { idbvs[i] = id @@ -437,7 +437,7 @@ func (mm *MessageManager) GenerateAckQuery(ids []string) (string, map[string]int } // GeneratePostponeQuery returns the query and bind vars for postponing a message. -func (mm *MessageManager) GeneratePostponeQuery(ids []string) (string, map[string]interface{}) { +func (mm *messageManager) GeneratePostponeQuery(ids []string) (string, map[string]interface{}) { idbvs := make([]interface{}, len(ids)) for i, id := range ids { idbvs[i] = id @@ -450,7 +450,7 @@ func (mm *MessageManager) GeneratePostponeQuery(ids []string) (string, map[strin } // GeneratePurgeQuery returns the query and bind vars for purging messages. -func (mm *MessageManager) GeneratePurgeQuery(timeCutoff int64) (string, map[string]interface{}) { +func (mm *messageManager) GeneratePurgeQuery(timeCutoff int64) (string, map[string]interface{}) { return mm.purgeQuery.Query, map[string]interface{}{ "time_scheduled": timeCutoff, } @@ -474,13 +474,13 @@ func BuildMessageRow(row []sqltypes.Value) (*MessageRow, error) { }, nil } -func (mm *MessageManager) receiverCount() int { +func (mm *messageManager) receiverCount() int { mm.mu.Lock() defer mm.mu.Unlock() return len(mm.receivers) } -func (mm *MessageManager) read(ctx context.Context, conn *connpool.DBConn, pq *sqlparser.ParsedQuery, bindVars map[string]interface{}) (*sqltypes.Result, error) { +func (mm *messageManager) read(ctx context.Context, conn *connpool.DBConn, pq *sqlparser.ParsedQuery, bindVars map[string]interface{}) (*sqltypes.Result, error) { b, err := pq.GenerateQuery(bindVars) if err != nil { // TODO(sougou): increment internal error. diff --git a/go/vt/tabletserver/messager/message_manager_test.go b/go/vt/tabletserver/messager/message_manager_test.go index 67322a7edd3..859ed95abc1 100644 --- a/go/vt/tabletserver/messager/message_manager_test.go +++ b/go/vt/tabletserver/messager/message_manager_test.go @@ -108,7 +108,7 @@ func (tr *testReceiver) WaitForDone() { func TestReceiverEOF(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - mm := NewMessageManager(newFakeTabletServer(), mmTable, newMMConnPool(db)) + mm := newMessageManager(newFakeTabletServer(), mmTable, newMMConnPool(db)) mm.Open() defer mm.Close() r1 := newTestReceiver(0) @@ -133,7 +133,7 @@ func TestReceiverEOF(t *testing.T) { func TestMessageManagerState(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - mm := NewMessageManager(newFakeTabletServer(), mmTable, newMMConnPool(db)) + mm := newMessageManager(newFakeTabletServer(), mmTable, newMMConnPool(db)) // Do it twice for i := 0; i < 2; i++ { mm.Open() @@ -162,7 +162,7 @@ func TestMessageManagerAdd(t *testing.T) { defer db.Close() ti := newMMTable() ti.MessageInfo.CacheSize = 1 - mm := NewMessageManager(newFakeTabletServer(), ti, newMMConnPool(db)) + mm := newMessageManager(newFakeTabletServer(), ti, newMMConnPool(db)) mm.Open() defer mm.Close() @@ -199,7 +199,7 @@ func TestMessageManagerSend(t *testing.T) { db := fakesqldb.New(t) defer db.Close() tsv := newFakeTabletServer() - mm := NewMessageManager(tsv, mmTable, newMMConnPool(db)) + mm := newMessageManager(tsv, mmTable, newMMConnPool(db)) mm.Open() defer mm.Close() r1 := newTestReceiver(1) @@ -261,7 +261,7 @@ func TestMessageManagerBatchSend(t *testing.T) { defer db.Close() ti := newMMTable() ti.MessageInfo.BatchSize = 2 - mm := NewMessageManager(newFakeTabletServer(), ti, newMMConnPool(db)) + mm := newMessageManager(newFakeTabletServer(), ti, newMMConnPool(db)) mm.Open() defer mm.Close() r1 := newTestReceiver(1) @@ -332,7 +332,7 @@ func TestMessageManagerPoller(t *testing.T) { ti := newMMTable() ti.MessageInfo.BatchSize = 2 ti.MessageInfo.PollInterval = 20 * time.Second - mm := NewMessageManager(newFakeTabletServer(), ti, newMMConnPool(db)) + mm := newMessageManager(newFakeTabletServer(), ti, newMMConnPool(db)) mm.Open() defer mm.Close() r1 := newTestReceiver(1) @@ -397,7 +397,7 @@ func TestMessagesPending1(t *testing.T) { ti := newMMTable() ti.MessageInfo.CacheSize = 2 ti.MessageInfo.PollInterval = 30 * time.Second - mm := NewMessageManager(newFakeTabletServer(), ti, newMMConnPool(db)) + mm := newMessageManager(newFakeTabletServer(), ti, newMMConnPool(db)) mm.Open() defer mm.Close() r1 := newTestReceiver(0) @@ -462,7 +462,7 @@ func TestMessagesPending2(t *testing.T) { ti := newMMTable() ti.MessageInfo.CacheSize = 1 ti.MessageInfo.PollInterval = 30 * time.Second - mm := NewMessageManager(newFakeTabletServer(), ti, newMMConnPool(db)) + mm := newMessageManager(newFakeTabletServer(), ti, newMMConnPool(db)) mm.Open() defer mm.Close() r1 := newTestReceiver(0) @@ -495,7 +495,7 @@ func TestMessageManagerPurge(t *testing.T) { tsv := newFakeTabletServer() ti := newMMTable() ti.MessageInfo.PollInterval = 1 * time.Millisecond - mm := NewMessageManager(tsv, ti, newMMConnPool(db)) + mm := newMessageManager(tsv, ti, newMMConnPool(db)) mm.Open() defer mm.Close() ch := make(chan string) @@ -510,7 +510,7 @@ func TestMessageManagerPurge(t *testing.T) { func TestMMGenerate(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - mm := NewMessageManager(newFakeTabletServer(), mmTable, newMMConnPool(db)) + mm := newMessageManager(newFakeTabletServer(), mmTable, newMMConnPool(db)) mm.Open() defer mm.Close() query, bv := mm.GenerateAckQuery([]string{"1", "2"}) diff --git a/go/vt/tabletserver/tabletserver.go b/go/vt/tabletserver/tabletserver.go index 3090d3fdbfe..c8a52ed5a3c 100644 --- a/go/vt/tabletserver/tabletserver.go +++ b/go/vt/tabletserver/tabletserver.go @@ -114,7 +114,7 @@ type TabletServer struct { se *schema.Engine qe *QueryEngine te *TxEngine - messager *messager.MessagerEngine + messager *messager.Engine watcher *ReplicationWatcher updateStreamList *binlog.StreamList @@ -181,7 +181,7 @@ func NewTabletServer(config tabletenv.TabletConfig, topoServer topo.Server) *Tab tsv.qe = NewQueryEngine(tsv, tsv.se, config) tsv.te = NewTxEngine(tsv, config) tsv.txThrottler = txthrottler.CreateTxThrottlerFromTabletConfig(topoServer) - tsv.messager = messager.NewMessagerEngine(tsv, tsv.se, config) + tsv.messager = messager.NewEngine(tsv, tsv.se, config) tsv.watcher = NewReplicationWatcher(tsv.se, config) tsv.updateStreamList = &binlog.StreamList{} // FIXME(alainjobart) could we move this to the Register method below? diff --git a/go/vt/tabletserver/tx_executor.go b/go/vt/tabletserver/tx_executor.go index fc868e8587c..ff680d336cf 100644 --- a/go/vt/tabletserver/tx_executor.go +++ b/go/vt/tabletserver/tx_executor.go @@ -25,7 +25,7 @@ type TxExecutor struct { ctx context.Context logStats *tabletenv.LogStats te *TxEngine - messager *messager.MessagerEngine + messager *messager.Engine } // Prepare performs a prepare on a connection including the redo log work. diff --git a/go/vt/tabletserver/tx_pool.go b/go/vt/tabletserver/tx_pool.go index d87aafc1736..9f33753949a 100644 --- a/go/vt/tabletserver/tx_pool.go +++ b/go/vt/tabletserver/tx_pool.go @@ -172,7 +172,7 @@ func (axp *TxPool) Begin(ctx context.Context) (int64, error) { } // Commit commits the specified transaction. -func (axp *TxPool) Commit(ctx context.Context, transactionID int64, messager *messager.MessagerEngine) error { +func (axp *TxPool) Commit(ctx context.Context, transactionID int64, messager *messager.Engine) error { conn, err := axp.Get(transactionID, "for commit") if err != nil { return err @@ -211,7 +211,7 @@ func (axp *TxPool) LocalBegin(ctx context.Context) (*TxConnection, error) { } // LocalCommit is the commit function for LocalBegin. -func (axp *TxPool) LocalCommit(ctx context.Context, conn *TxConnection, messager *messager.MessagerEngine) error { +func (axp *TxPool) LocalCommit(ctx context.Context, conn *TxConnection, messager *messager.Engine) error { defer conn.conclude(TxCommit) defer messager.LockDB(conn.NewMessages, conn.ChangedMessages)() txStats.Add("Completed", time.Now().Sub(conn.StartTime)) From 794dd7c053f27e76f7979df3c20b33479c2d8229 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 12 Mar 2017 17:16:15 -0700 Subject: [PATCH 093/108] tabletserver: move queryrules to separate package --- go/vt/tabletmanager/state_change.go | 6 +- go/vt/tabletserver/controller.go | 3 +- .../filecustomrule/filecustomrule.go | 13 +- .../filecustomrule/filecustomrule_test.go | 4 +- .../customrule/zkcustomrule/zkcustomrule.go | 15 +-- .../zkcustomrule/zkcustomrule_test.go | 6 +- go/vt/tabletserver/endtoend/acl_test.go | 4 +- go/vt/tabletserver/query_engine.go | 11 +- go/vt/tabletserver/query_executor.go | 7 +- go/vt/tabletserver/query_executor_test.go | 21 ++-- go/vt/tabletserver/query_rule_info.go | 92 -------------- go/vt/tabletserver/rules/map.go | 92 ++++++++++++++ .../map_test.go} | 118 +++++++++--------- .../{query_rules.go => rules/rules.go} | 118 +++++++++--------- .../rules_test.go} | 46 +++---- go/vt/tabletserver/tabletserver.go | 7 +- .../tabletservermock/controller.go | 4 +- 17 files changed, 287 insertions(+), 280 deletions(-) delete mode 100644 go/vt/tabletserver/query_rule_info.go create mode 100644 go/vt/tabletserver/rules/map.go rename go/vt/tabletserver/{query_rule_info_test.go => rules/map_test.go} (72%) rename go/vt/tabletserver/{query_rules.go => rules/rules.go} (87%) rename go/vt/tabletserver/{query_rules_test.go => rules/rules_test.go} (97%) diff --git a/go/vt/tabletmanager/state_change.go b/go/vt/tabletmanager/state_change.go index 11cd50d0d34..550121b3cbd 100644 --- a/go/vt/tabletmanager/state_change.go +++ b/go/vt/tabletmanager/state_change.go @@ -19,7 +19,7 @@ import ( "github.com/youtube/vitess/go/trace" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/tabletmanager/events" - "github.com/youtube/vitess/go/vt/tabletserver" + "github.com/youtube/vitess/go/vt/tabletserver/rules" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" @@ -45,7 +45,7 @@ const blacklistQueryRules string = "BlacklistQueryRules" // loadBlacklistRules loads and builds the blacklist query rules func (agent *ActionAgent) loadBlacklistRules(tablet *topodatapb.Tablet, blacklistedTables []string) (err error) { - blacklistRules := tabletserver.NewQueryRules() + blacklistRules := rules.New() if len(blacklistedTables) > 0 { // tables, first resolve wildcards tables, err := mysqlctl.ResolveTables(agent.MysqlDaemon, topoproto.TabletDbName(tablet), blacklistedTables) @@ -57,7 +57,7 @@ func (agent *ActionAgent) loadBlacklistRules(tablet *topodatapb.Tablet, blacklis // that we don't add a rule to blacklist all tables if len(tables) > 0 { log.Infof("Blacklisting tables %v", strings.Join(tables, ", ")) - qr := tabletserver.NewQueryRule("enforce blacklisted tables", "blacklisted_table", tabletserver.QRFailRetry) + qr := rules.NewQueryRule("enforce blacklisted tables", "blacklisted_table", rules.QRFailRetry) for _, t := range tables { qr.AddTableCond(t) } diff --git a/go/vt/tabletserver/controller.go b/go/vt/tabletserver/controller.go index 501a052ddfb..d74b619af5e 100644 --- a/go/vt/tabletserver/controller.go +++ b/go/vt/tabletserver/controller.go @@ -10,6 +10,7 @@ import ( "github.com/youtube/vitess/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/tabletserver/queryservice" + "github.com/youtube/vitess/go/vt/tabletserver/rules" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -51,7 +52,7 @@ type Controller interface { UnRegisterQueryRuleSource(ruleSource string) // SetQueryRules sets the query rules for this QueryService - SetQueryRules(ruleSource string, qrs *QueryRules) error + SetQueryRules(ruleSource string, qrs *rules.Rules) error // QueryService returns the QueryService object used by this Controller QueryService() queryservice.QueryService diff --git a/go/vt/tabletserver/customrule/filecustomrule/filecustomrule.go b/go/vt/tabletserver/customrule/filecustomrule/filecustomrule.go index 1d2381713af..4606aa369cb 100644 --- a/go/vt/tabletserver/customrule/filecustomrule/filecustomrule.go +++ b/go/vt/tabletserver/customrule/filecustomrule/filecustomrule.go @@ -12,6 +12,7 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/vt/tabletserver" + "github.com/youtube/vitess/go/vt/tabletserver/rules" ) var ( @@ -24,9 +25,9 @@ var ( // FileCustomRule is an implementation of CustomRuleManager, it reads custom query // rules from local file for once and push it to vttablet type FileCustomRule struct { - path string // Path to the file containing custom query rules - currentRuleSet *tabletserver.QueryRules // Query rules built from local file - currentRuleSetTimestamp int64 // Unix timestamp when currentRuleSet is built from local file + path string // Path to the file containing custom query rules + currentRuleSet *rules.Rules // Query rules built from local file + currentRuleSetTimestamp int64 // Unix timestamp when currentRuleSet is built from local file } // FileCustomRuleSource is the name of the file based custom rule source @@ -36,7 +37,7 @@ const FileCustomRuleSource string = "FILE_CUSTOM_RULE" func NewFileCustomRule() (fcr *FileCustomRule) { fcr = new(FileCustomRule) fcr.path = "" - fcr.currentRuleSet = tabletserver.NewQueryRules() + fcr.currentRuleSet = rules.New() return fcr } @@ -53,7 +54,7 @@ func (fcr *FileCustomRule) Open(qsc tabletserver.Controller, rulePath string) er // Don't update any internal cache, just return error return err } - qrs := tabletserver.NewQueryRules() + qrs := rules.New() err = qrs.UnmarshalJSON(data) if err != nil { log.Warningf("Error unmarshaling query rules %v", err) @@ -68,7 +69,7 @@ func (fcr *FileCustomRule) Open(qsc tabletserver.Controller, rulePath string) er } // GetRules returns query rules built from local file -func (fcr *FileCustomRule) GetRules() (qrs *tabletserver.QueryRules, version int64, err error) { +func (fcr *FileCustomRule) GetRules() (qrs *rules.Rules, version int64, err error) { return fcr.currentRuleSet.Copy(), fcr.currentRuleSetTimestamp, nil } diff --git a/go/vt/tabletserver/customrule/filecustomrule/filecustomrule_test.go b/go/vt/tabletserver/customrule/filecustomrule/filecustomrule_test.go index a9d795e3043..952ed42bc4b 100644 --- a/go/vt/tabletserver/customrule/filecustomrule/filecustomrule_test.go +++ b/go/vt/tabletserver/customrule/filecustomrule/filecustomrule_test.go @@ -10,7 +10,7 @@ import ( "path" "testing" - "github.com/youtube/vitess/go/vt/tabletserver" + "github.com/youtube/vitess/go/vt/tabletserver/rules" "github.com/youtube/vitess/go/vt/tabletserver/tabletservermock" ) @@ -29,7 +29,7 @@ var customRule1 = `[ func TestFileCustomRule(t *testing.T) { tqsc := tabletservermock.NewController() - var qrs *tabletserver.QueryRules + var qrs *rules.Rules rulepath := path.Join(os.TempDir(), ".customrule.json") // Set r1 and try to get it back err := ioutil.WriteFile(rulepath, []byte(customRule1), os.FileMode(0644)) diff --git a/go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule.go b/go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule.go index c11f02d60f2..15974f1ecbe 100644 --- a/go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule.go +++ b/go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule.go @@ -16,6 +16,7 @@ import ( "github.com/youtube/vitess/go/vt/servenv" "github.com/youtube/vitess/go/vt/tabletserver" + "github.com/youtube/vitess/go/vt/tabletserver/rules" "github.com/youtube/vitess/go/vt/topo/zk2topo" ) @@ -42,7 +43,7 @@ type ZkCustomRule struct { // mu protects all the following fields. mu sync.Mutex watch <-chan zk.Event // Zookeeper watch for listenning data change notifications - currentRuleSet *tabletserver.QueryRules + currentRuleSet *rules.Rules currentRuleSetVersion int64 // implemented with Zookeeper modification version done chan struct{} } @@ -52,13 +53,13 @@ func NewZkCustomRule(server, path string) *ZkCustomRule { return &ZkCustomRule{ zconn: zk2topo.Connect(server), path: path, - currentRuleSet: tabletserver.NewQueryRules(), + currentRuleSet: rules.New(), currentRuleSetVersion: invalidQueryRulesVersion, done: make(chan struct{}), } } -// Start registers Zookeeper watch, gets inital QueryRules and starts +// Start registers Zookeeper watch, gets inital Rules and starts // polling routine. func (zkcr *ZkCustomRule) Start(qsc tabletserver.Controller) (err error) { err = zkcr.refreshWatch() @@ -86,8 +87,8 @@ func (zkcr *ZkCustomRule) refreshWatch() error { return nil } -// refreshData gets query rules from Zookeeper and refresh internal QueryRules cache -// this function will also call TabletServer.SetQueryRules to propagate rule changes to query service +// refreshData gets query rules from Zookeeper and refresh internal Rules cache +// this function will also call rules.SetQueryRules to propagate rule changes to query service func (zkcr *ZkCustomRule) refreshData(qsc tabletserver.Controller, nodeRemoval bool) error { ctx := context.Background() data, stat, err := zkcr.zconn.Get(ctx, zkcr.path) @@ -96,7 +97,7 @@ func (zkcr *ZkCustomRule) refreshData(qsc tabletserver.Controller, nodeRemoval b return err } - qrs := tabletserver.NewQueryRules() + qrs := rules.New() if !nodeRemoval { if err = qrs.UnmarshalJSON([]byte(data)); err != nil { log.Warningf("Error unmarshaling query rules %v, original data '%s'", err, data) @@ -154,7 +155,7 @@ func (zkcr *ZkCustomRule) Stop() { } // GetRules retrives cached rules. -func (zkcr *ZkCustomRule) GetRules() (qrs *tabletserver.QueryRules, version int64, err error) { +func (zkcr *ZkCustomRule) GetRules() (qrs *rules.Rules, version int64, err error) { zkcr.mu.Lock() defer zkcr.mu.Unlock() return zkcr.currentRuleSet.Copy(), zkcr.currentRuleSetVersion, nil diff --git a/go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule_test.go b/go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule_test.go index 5bc404d9df7..9570a997630 100644 --- a/go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule_test.go +++ b/go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule_test.go @@ -13,7 +13,7 @@ import ( "github.com/samuel/go-zookeeper/zk" "github.com/youtube/vitess/go/testfiles" - "github.com/youtube/vitess/go/vt/tabletserver" + "github.com/youtube/vitess/go/vt/tabletserver/rules" "github.com/youtube/vitess/go/vt/tabletserver/tabletservermock" "github.com/youtube/vitess/go/vt/topo/zk2topo" "github.com/youtube/vitess/go/zk/zkctl" @@ -67,7 +67,7 @@ func TestZkCustomRule(t *testing.T) { } defer zkcr.Stop() - var qrs *tabletserver.QueryRules + var qrs *rules.Rules // Test if we can successfully fetch the original rule (test GetRules) qrs, _, err = zkcr.GetRules() if err != nil { @@ -101,7 +101,7 @@ func TestZkCustomRule(t *testing.T) { if err != nil { t.Fatalf("GetRules of ZkCustomRule should always return nil error, but we receive %v", err) } - if reflect.DeepEqual(qrs, tabletserver.NewQueryRules()) { + if reflect.DeepEqual(qrs, rules.New()) { t.Fatalf("Expect empty rule at this point") } diff --git a/go/vt/tabletserver/endtoend/acl_test.go b/go/vt/tabletserver/endtoend/acl_test.go index 3a9c204c440..00def40d176 100644 --- a/go/vt/tabletserver/endtoend/acl_test.go +++ b/go/vt/tabletserver/endtoend/acl_test.go @@ -10,8 +10,8 @@ import ( "strings" "testing" - "github.com/youtube/vitess/go/vt/tabletserver" "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" + "github.com/youtube/vitess/go/vt/tabletserver/rules" ) func TestTableACLNoAccess(t *testing.T) { @@ -122,7 +122,7 @@ var rulesJSON = []byte(`[{ }]`) func TestQueryRules(t *testing.T) { - rules := tabletserver.NewQueryRules() + rules := rules.New() err := rules.UnmarshalJSON(rulesJSON) if err != nil { t.Error(err) diff --git a/go/vt/tabletserver/query_engine.go b/go/vt/tabletserver/query_engine.go index d03378f588d..e9ebc346731 100644 --- a/go/vt/tabletserver/query_engine.go +++ b/go/vt/tabletserver/query_engine.go @@ -30,6 +30,7 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/connpool" "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" + "github.com/youtube/vitess/go/vt/tabletserver/rules" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" @@ -44,7 +45,7 @@ import ( type TabletPlan struct { *planbuilder.Plan Fields []*querypb.Field - Rules *QueryRules + Rules *rules.Rules Authorized *tableacl.ACLResult mu sync.Mutex @@ -100,7 +101,7 @@ type QueryEngine struct { mu sync.RWMutex tables map[string]*schema.Table queries *cache.LRUCache - queryRuleSources *QueryRuleInfo + queryRuleSources *rules.Map // Pools conns *connpool.Pool @@ -140,7 +141,7 @@ func NewQueryEngine(checker MySQLChecker, se *schema.Engine, config tabletenv.Ta se: se, tables: make(map[string]*schema.Table), queries: cache.NewLRUCache(int64(config.QueryCacheSize)), - queryRuleSources: NewQueryRuleInfo(), + queryRuleSources: rules.NewMap(), } qe.conns = connpool.New( @@ -276,7 +277,7 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats return nil, vterrors.New(vtrpcpb.Code_UNKNOWN, err.Error()) } plan := &TabletPlan{Plan: splan} - plan.Rules = qe.queryRuleSources.filterByPlan(sql, plan.PlanID, plan.TableName().String()) + plan.Rules = qe.queryRuleSources.FilterByPlan(sql, plan.PlanID, plan.TableName().String()) plan.Authorized = tableacl.Authorized(plan.TableName().String(), plan.PlanID.MinRole()) if plan.PlanID.IsSelect() { if plan.FieldQuery == nil { @@ -315,7 +316,7 @@ func (qe *QueryEngine) GetStreamPlan(sql string) (*TabletPlan, error) { return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, err.Error()) } plan := &TabletPlan{Plan: splan} - plan.Rules = qe.queryRuleSources.filterByPlan(sql, plan.PlanID, plan.TableName().String()) + plan.Rules = qe.queryRuleSources.FilterByPlan(sql, plan.PlanID, plan.TableName().String()) plan.Authorized = tableacl.Authorized(plan.TableName().String(), plan.PlanID.MinRole()) return plan, nil } diff --git a/go/vt/tabletserver/query_executor.go b/go/vt/tabletserver/query_executor.go index e998e1bf14b..a1503f26eae 100644 --- a/go/vt/tabletserver/query_executor.go +++ b/go/vt/tabletserver/query_executor.go @@ -24,6 +24,7 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" "github.com/youtube/vitess/go/vt/tabletserver/messager" "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" + "github.com/youtube/vitess/go/vt/tabletserver/rules" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" @@ -229,11 +230,11 @@ func (qre *QueryExecutor) checkPermissions() error { remoteAddr = ci.RemoteAddr() username = ci.Username() } - action, desc := qre.plan.Rules.getAction(remoteAddr, username, qre.bindVars) + action, desc := qre.plan.Rules.GetAction(remoteAddr, username, qre.bindVars) switch action { - case QRFail: + case rules.QRFail: return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "disallowed due to rule: %s", desc) - case QRFailRetry: + case rules.QRFailRetry: return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "disallowed due to rule: %s", desc) } diff --git a/go/vt/tabletserver/query_executor_test.go b/go/vt/tabletserver/query_executor_test.go index df2a8f73011..f24e9396918 100644 --- a/go/vt/tabletserver/query_executor_test.go +++ b/go/vt/tabletserver/query_executor_test.go @@ -24,6 +24,7 @@ import ( "github.com/youtube/vitess/go/vt/tableacl" "github.com/youtube/vitess/go/vt/tableacl/simpleacl" "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" + "github.com/youtube/vitess/go/vt/tabletserver/rules" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" @@ -1164,7 +1165,7 @@ func TestQueryExecutorBlacklistQRFail(t *testing.T) { bannedAddr := "127.0.0.1" bannedUser := "u2" - alterRule := NewQueryRule("disable update", "disable update", QRFail) + alterRule := rules.NewQueryRule("disable update", "disable update", rules.QRFail) alterRule.SetIPCond(bannedAddr) alterRule.SetUserCond(bannedUser) alterRule.SetQueryCond("select.*") @@ -1172,7 +1173,7 @@ func TestQueryExecutorBlacklistQRFail(t *testing.T) { alterRule.AddTableCond("test_table") rulesName := "blacklistedRulesQRFail" - rules := NewQueryRules() + rules := rules.New() rules.Add(alterRule) callInfo := &fakecallinfo.FakeCallInfo{ @@ -1181,9 +1182,9 @@ func TestQueryExecutorBlacklistQRFail(t *testing.T) { } ctx := callinfo.NewContext(context.Background(), callInfo) tsv := newTestTabletServer(ctx, enableStrict, db) - tsv.qe.queryRuleSources.UnRegisterQueryRuleSource(rulesName) - tsv.qe.queryRuleSources.RegisterQueryRuleSource(rulesName) - defer tsv.qe.queryRuleSources.UnRegisterQueryRuleSource(rulesName) + tsv.qe.queryRuleSources.UnRegisterSource(rulesName) + tsv.qe.queryRuleSources.RegisterSource(rulesName) + defer tsv.qe.queryRuleSources.UnRegisterSource(rulesName) if err := tsv.qe.queryRuleSources.SetRules(rulesName, rules); err != nil { t.Fatalf("failed to set rule, error: %v", err) @@ -1218,7 +1219,7 @@ func TestQueryExecutorBlacklistQRRetry(t *testing.T) { bannedAddr := "127.0.0.1" bannedUser := "x" - alterRule := NewQueryRule("disable update", "disable update", QRFailRetry) + alterRule := rules.NewQueryRule("disable update", "disable update", rules.QRFailRetry) alterRule.SetIPCond(bannedAddr) alterRule.SetUserCond(bannedUser) alterRule.SetQueryCond("select.*") @@ -1226,7 +1227,7 @@ func TestQueryExecutorBlacklistQRRetry(t *testing.T) { alterRule.AddTableCond("test_table") rulesName := "blacklistedRulesQRRetry" - rules := NewQueryRules() + rules := rules.New() rules.Add(alterRule) callInfo := &fakecallinfo.FakeCallInfo{ @@ -1235,9 +1236,9 @@ func TestQueryExecutorBlacklistQRRetry(t *testing.T) { } ctx := callinfo.NewContext(context.Background(), callInfo) tsv := newTestTabletServer(ctx, enableStrict, db) - tsv.qe.queryRuleSources.UnRegisterQueryRuleSource(rulesName) - tsv.qe.queryRuleSources.RegisterQueryRuleSource(rulesName) - defer tsv.qe.queryRuleSources.UnRegisterQueryRuleSource(rulesName) + tsv.qe.queryRuleSources.UnRegisterSource(rulesName) + tsv.qe.queryRuleSources.RegisterSource(rulesName) + defer tsv.qe.queryRuleSources.UnRegisterSource(rulesName) if err := tsv.qe.queryRuleSources.SetRules(rulesName, rules); err != nil { t.Fatalf("failed to set rule, error: %v", err) diff --git a/go/vt/tabletserver/query_rule_info.go b/go/vt/tabletserver/query_rule_info.go deleted file mode 100644 index 6bb8154242e..00000000000 --- a/go/vt/tabletserver/query_rule_info.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2014, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tabletserver - -import ( - "encoding/json" - "errors" - "sync" - - log "github.com/golang/glog" - "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" -) - -// QueryRuleInfo is the maintainer of QueryRules from multiple sources -type QueryRuleInfo struct { - // mutex to protect following queryRulesMap - mu sync.Mutex - // queryRulesMap maps the names of different query rule sources to the actual QueryRules structure - queryRulesMap map[string]*QueryRules -} - -// NewQueryRuleInfo returns an empty QueryRuleInfo object for use -func NewQueryRuleInfo() *QueryRuleInfo { - qri := &QueryRuleInfo{ - queryRulesMap: map[string]*QueryRules{}, - } - return qri -} - -// RegisterQueryRuleSource registers a query rule source name with QueryRuleInfo -func (qri *QueryRuleInfo) RegisterQueryRuleSource(ruleSource string) { - qri.mu.Lock() - defer qri.mu.Unlock() - if _, existed := qri.queryRulesMap[ruleSource]; existed { - log.Errorf("Query rule source " + ruleSource + " has been registered") - panic("Query rule source " + ruleSource + " has been registered") - } - qri.queryRulesMap[ruleSource] = NewQueryRules() -} - -// UnRegisterQueryRuleSource removes a registered query rule source name -func (qri *QueryRuleInfo) UnRegisterQueryRuleSource(ruleSource string) { - qri.mu.Lock() - defer qri.mu.Unlock() - delete(qri.queryRulesMap, ruleSource) -} - -// SetRules takes an external QueryRules structure and overwrite one of the -// internal QueryRules as designated by ruleSource parameter -func (qri *QueryRuleInfo) SetRules(ruleSource string, newRules *QueryRules) error { - if newRules == nil { - newRules = NewQueryRules() - } - qri.mu.Lock() - defer qri.mu.Unlock() - if _, ok := qri.queryRulesMap[ruleSource]; ok { - qri.queryRulesMap[ruleSource] = newRules.Copy() - return nil - } - return errors.New("Rule source identifier " + ruleSource + " is not valid") -} - -// GetRules returns the corresponding QueryRules as designated by ruleSource parameter -func (qri *QueryRuleInfo) GetRules(ruleSource string) (*QueryRules, error) { - qri.mu.Lock() - defer qri.mu.Unlock() - if ruleset, ok := qri.queryRulesMap[ruleSource]; ok { - return ruleset.Copy(), nil - } - return NewQueryRules(), errors.New("Rule source identifier " + ruleSource + " is not valid") -} - -// filterByPlan creates a new QueryRules by prefiltering on all query rules that are contained in internal -// QueryRules structures, in other words, query rules from all predefined sources will be applied -func (qri *QueryRuleInfo) filterByPlan(query string, planid planbuilder.PlanType, tableName string) (newqrs *QueryRules) { - qri.mu.Lock() - defer qri.mu.Unlock() - newqrs = NewQueryRules() - for _, rules := range qri.queryRulesMap { - newqrs.Append(rules.filterByPlan(query, planid, tableName)) - } - return newqrs -} - -// MarshalJSON marshals to JSON. -func (qri *QueryRuleInfo) MarshalJSON() ([]byte, error) { - qri.mu.Lock() - defer qri.mu.Unlock() - return json.Marshal(qri.queryRulesMap) -} diff --git a/go/vt/tabletserver/rules/map.go b/go/vt/tabletserver/rules/map.go new file mode 100644 index 00000000000..8704e2a9113 --- /dev/null +++ b/go/vt/tabletserver/rules/map.go @@ -0,0 +1,92 @@ +// Copyright 2014, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rules + +import ( + "encoding/json" + "errors" + "sync" + + log "github.com/golang/glog" + "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" +) + +// Map is the maintainer of Rules from multiple sources +type Map struct { + // mutex to protect following queryRulesMap + mu sync.Mutex + // queryRulesMap maps the names of different query rule sources to the actual Rules structure + queryRulesMap map[string]*Rules +} + +// NewMap returns an empty Map object. +func NewMap() *Map { + qri := &Map{ + queryRulesMap: map[string]*Rules{}, + } + return qri +} + +// RegisterSource registers a query rule source name with Map. +func (qri *Map) RegisterSource(ruleSource string) { + qri.mu.Lock() + defer qri.mu.Unlock() + if _, existed := qri.queryRulesMap[ruleSource]; existed { + log.Errorf("Query rule source " + ruleSource + " has been registered") + panic("Query rule source " + ruleSource + " has been registered") + } + qri.queryRulesMap[ruleSource] = New() +} + +// UnRegisterSource removes a registered query rule source name. +func (qri *Map) UnRegisterSource(ruleSource string) { + qri.mu.Lock() + defer qri.mu.Unlock() + delete(qri.queryRulesMap, ruleSource) +} + +// SetRules takes an external Rules structure and overwrite one of the +// internal Rules as designated by ruleSource parameter. +func (qri *Map) SetRules(ruleSource string, newRules *Rules) error { + if newRules == nil { + newRules = New() + } + qri.mu.Lock() + defer qri.mu.Unlock() + if _, ok := qri.queryRulesMap[ruleSource]; ok { + qri.queryRulesMap[ruleSource] = newRules.Copy() + return nil + } + return errors.New("Rule source identifier " + ruleSource + " is not valid") +} + +// Get returns the corresponding Rules as designated by ruleSource parameter. +func (qri *Map) Get(ruleSource string) (*Rules, error) { + qri.mu.Lock() + defer qri.mu.Unlock() + if ruleset, ok := qri.queryRulesMap[ruleSource]; ok { + return ruleset.Copy(), nil + } + return New(), errors.New("Rule source identifier " + ruleSource + " is not valid") +} + +// FilterByPlan creates a new Rules by prefiltering on all query rules that are contained in internal +// Rules structures, in other words, query rules from all predefined sources will be applied. +func (qri *Map) FilterByPlan(query string, planid planbuilder.PlanType, tableName string) (newqrs *Rules) { + qri.mu.Lock() + defer qri.mu.Unlock() + newqrs = New() + for _, rules := range qri.queryRulesMap { + newqrs.Append(rules.FilterByPlan(query, planid, tableName)) + } + return newqrs +} + +// MarshalJSON marshals to JSON. +func (qri *Map) MarshalJSON() ([]byte, error) { + qri.mu.Lock() + defer qri.mu.Unlock() + return json.Marshal(qri.queryRulesMap) +} diff --git a/go/vt/tabletserver/query_rule_info_test.go b/go/vt/tabletserver/rules/map_test.go similarity index 72% rename from go/vt/tabletserver/query_rule_info_test.go rename to go/vt/tabletserver/rules/map_test.go index 8ff09ce354e..945833e4d40 100644 --- a/go/vt/tabletserver/query_rule_info_test.go +++ b/go/vt/tabletserver/rules/map_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package tabletserver +package rules import ( "fmt" @@ -16,9 +16,9 @@ import ( ) var ( - keyrangeRules *QueryRules - blacklistRules *QueryRules - otherRules *QueryRules + keyrangeRules *Rules + blacklistRules *Rules + otherRules *Rules ) // mimic query rules from keyrange @@ -30,10 +30,10 @@ const blacklistQueryRules string = "BLACKLIST_QUERY_RULES" // mimic query rules from custom source const customQueryRules string = "CUSTOM_QUERY_RULES" -func setupQueryRules() { - var qr *QueryRule +func setupRules() { + var qr *Rule // mock keyrange rules - keyrangeRules = NewQueryRules() + keyrangeRules = New() dmlPlans := []struct { planID planbuilder.PlanType onAbsent bool @@ -56,7 +56,7 @@ func setupQueryRules() { } // mock blacklisted tables - blacklistRules = NewQueryRules() + blacklistRules = New() blacklistedTables := []string{"bannedtable1", "bannedtable2", "bannedtable3"} qr = NewQueryRule("enforce blacklisted tables", "blacklisted_table", QRFailRetry) for _, t := range blacklistedTables { @@ -65,36 +65,36 @@ func setupQueryRules() { blacklistRules.Add(qr) // mock custom rules - otherRules = NewQueryRules() + otherRules = New() qr = NewQueryRule("sample custom rule", "customrule_ban_bindvar", QRFail) qr.AddTableCond("t_customer") qr.AddBindVarCond("bindvar1", true, false, QRNoOp, nil) otherRules.Add(qr) } -func TestQueryRuleInfoRegisterARegisteredSource(t *testing.T) { - setupQueryRules() - qri := NewQueryRuleInfo() - qri.RegisterQueryRuleSource(keyrangeQueryRules) +func TestMapRegisterARegisteredSource(t *testing.T) { + setupRules() + qri := NewMap() + qri.RegisterSource(keyrangeQueryRules) defer func() { err := recover() if err == nil { t.Fatalf("should get an error for registering a registered query rule source ") } }() - qri.RegisterQueryRuleSource(keyrangeQueryRules) + qri.RegisterSource(keyrangeQueryRules) } -func TestQueryRuleInfoSetRulesWithNil(t *testing.T) { - setupQueryRules() - qri := NewQueryRuleInfo() +func TestMapSetRulesWithNil(t *testing.T) { + setupRules() + qri := NewMap() - qri.RegisterQueryRuleSource(keyrangeQueryRules) + qri.RegisterSource(keyrangeQueryRules) err := qri.SetRules(keyrangeQueryRules, keyrangeRules) if err != nil { - t.Errorf("Failed to set keyrange QueryRules : %s", err) + t.Errorf("Failed to set keyrange Rules : %s", err) } - qrs, err := qri.GetRules(keyrangeQueryRules) + qrs, err := qri.Get(keyrangeQueryRules) if err != nil { t.Errorf("GetRules failed to retrieve keyrangeQueryRules that has been set: %s", err) } @@ -104,57 +104,57 @@ func TestQueryRuleInfoSetRulesWithNil(t *testing.T) { qri.SetRules(keyrangeQueryRules, nil) - qrs, err = qri.GetRules(keyrangeQueryRules) + qrs, err = qri.Get(keyrangeQueryRules) if err != nil { t.Errorf("GetRules failed to retrieve keyrangeQueryRules that has been set: %s", err) } - if !reflect.DeepEqual(qrs, NewQueryRules()) { + if !reflect.DeepEqual(qrs, New()) { t.Errorf("keyrangeQueryRules retrived is %v, but the expected value should be %v", qrs, keyrangeRules) } } -func TestQueryRuleInfoGetSetQueryRules(t *testing.T) { - setupQueryRules() - qri := NewQueryRuleInfo() +func TestMapGetSetQueryRules(t *testing.T) { + setupRules() + qri := NewMap() - qri.RegisterQueryRuleSource(keyrangeQueryRules) - qri.RegisterQueryRuleSource(blacklistQueryRules) - qri.RegisterQueryRuleSource(customQueryRules) + qri.RegisterSource(keyrangeQueryRules) + qri.RegisterSource(blacklistQueryRules) + qri.RegisterSource(customQueryRules) - // Test if we can get a QueryRules without a predefined rule set name - qrs, err := qri.GetRules("Foo") + // Test if we can get a Rules without a predefined rule set name + qrs, err := qri.Get("Foo") if err == nil { t.Errorf("GetRules shouldn't succeed with 'Foo' as the rule set name") } if qrs == nil { - t.Errorf("GetRules should always return empty QueryRules and never nil") + t.Errorf("GetRules should always return empty Rules and never nil") } - if !reflect.DeepEqual(qrs, NewQueryRules()) { - t.Errorf("QueryRuleInfo contains only empty QueryRules at the beginning") + if !reflect.DeepEqual(qrs, New()) { + t.Errorf("Map contains only empty Rules at the beginning") } - // Test if we can set a QueryRules without a predefined rule set name - err = qri.SetRules("Foo", NewQueryRules()) + // Test if we can set a Rules without a predefined rule set name + err = qri.SetRules("Foo", New()) if err == nil { t.Errorf("SetRules shouldn't succeed with 'Foo' as the rule set name") } - // Test if we can successfully set QueryRules previously mocked into QueryRuleInfo + // Test if we can successfully set Rules previously mocked into Map err = qri.SetRules(keyrangeQueryRules, keyrangeRules) if err != nil { - t.Errorf("Failed to set keyrange QueryRules : %s", err) + t.Errorf("Failed to set keyrange Rules : %s", err) } err = qri.SetRules(blacklistQueryRules, blacklistRules) if err != nil { - t.Errorf("Failed to set blacklist QueryRules: %s", err) + t.Errorf("Failed to set blacklist Rules: %s", err) } err = qri.SetRules(customQueryRules, otherRules) if err != nil { - t.Errorf("Failed to set custom QueryRules: %s", err) + t.Errorf("Failed to set custom Rules: %s", err) } // Test if we can successfully retrive rules that've been set - qrs, err = qri.GetRules(keyrangeQueryRules) + qrs, err = qri.Get(keyrangeQueryRules) if err != nil { t.Errorf("GetRules failed to retrieve keyrangeQueryRules that has been set: %s", err) } @@ -162,7 +162,7 @@ func TestQueryRuleInfoGetSetQueryRules(t *testing.T) { t.Errorf("keyrangeQueryRules retrived is %v, but the expected value should be %v", qrs, keyrangeRules) } - qrs, err = qri.GetRules(blacklistQueryRules) + qrs, err = qri.Get(blacklistQueryRules) if err != nil { t.Errorf("GetRules failed to retrieve blacklistQueryRules that has been set: %s", err) } @@ -170,7 +170,7 @@ func TestQueryRuleInfoGetSetQueryRules(t *testing.T) { t.Errorf("blacklistQueryRules retrived is %v, but the expected value should be %v", qrs, blacklistRules) } - qrs, err = qri.GetRules(customQueryRules) + qrs, err = qri.Get(customQueryRules) if err != nil { t.Errorf("GetRules failed to retrieve customQueryRules that has been set: %s", err) } @@ -179,21 +179,21 @@ func TestQueryRuleInfoGetSetQueryRules(t *testing.T) { } } -func TestQueryRuleInfoFilterByPlan(t *testing.T) { - var qrs *QueryRules - setupQueryRules() - qri := NewQueryRuleInfo() +func TestMapFilterByPlan(t *testing.T) { + var qrs *Rules + setupRules() + qri := NewMap() - qri.RegisterQueryRuleSource(keyrangeQueryRules) - qri.RegisterQueryRuleSource(blacklistQueryRules) - qri.RegisterQueryRuleSource(customQueryRules) + qri.RegisterSource(keyrangeQueryRules) + qri.RegisterSource(blacklistQueryRules) + qri.RegisterSource(customQueryRules) qri.SetRules(keyrangeQueryRules, keyrangeRules) qri.SetRules(blacklistQueryRules, blacklistRules) qri.SetRules(customQueryRules, otherRules) // Test filter by keyrange rule - qrs = qri.filterByPlan("insert into t_test values(123, 456, 'abc')", planbuilder.PlanInsertPK, "t_test") + qrs = qri.FilterByPlan("insert into t_test values(123, 456, 'abc')", planbuilder.PlanInsertPK, "t_test") if l := len(qrs.rules); l != 1 { t.Errorf("Insert PK query matches %d rules, but we expect %d", l, 1) } @@ -202,7 +202,7 @@ func TestQueryRuleInfoFilterByPlan(t *testing.T) { } // Test filter by blacklist rule - qrs = qri.filterByPlan("select * from bannedtable2", planbuilder.PlanPassSelect, "bannedtable2") + qrs = qri.FilterByPlan("select * from bannedtable2", planbuilder.PlanPassSelect, "bannedtable2") if l := len(qrs.rules); l != 1 { t.Errorf("Select from bannedtable matches %d rules, but we expect %d", l, 1) } @@ -211,7 +211,7 @@ func TestQueryRuleInfoFilterByPlan(t *testing.T) { } // Test filter by custom rule - qrs = qri.filterByPlan("select cid from t_customer limit 10", planbuilder.PlanPassSelect, "t_customer") + qrs = qri.FilterByPlan("select cid from t_customer limit 10", planbuilder.PlanPassSelect, "t_customer") if l := len(qrs.rules); l != 1 { t.Errorf("Select from t_customer matches %d rules, but we expect %d", l, 1) } @@ -220,12 +220,12 @@ func TestQueryRuleInfoFilterByPlan(t *testing.T) { } // Test match two rules: both keyrange rule and custom rule will be matched - otherRules = NewQueryRules() + otherRules = New() qr := NewQueryRule("sample custom rule", "customrule_ban_bindvar", QRFail) qr.AddBindVarCond("bindvar1", true, false, QRNoOp, nil) otherRules.Add(qr) qri.SetRules(customQueryRules, otherRules) - qrs = qri.filterByPlan("insert into t_test values (:bindvar1, 123, 'test')", planbuilder.PlanInsertPK, "t_test") + qrs = qri.FilterByPlan("insert into t_test values (:bindvar1, 123, 'test')", planbuilder.PlanInsertPK, "t_test") if l := len(qrs.rules); l != 2 { t.Errorf("Insert into t_test matches %d rules: %v, but we expect %d rules to be matched", l, qrs.rules, 2) } @@ -242,12 +242,12 @@ func TestQueryRuleInfoFilterByPlan(t *testing.T) { qrs.rules[0].Name, qrs.rules[1].Name, "keyspace_id_not_in_range", "customrule_ban_bindvar") } -func TestQueryRuleInfoJSON(t *testing.T) { - setupQueryRules() - qri := NewQueryRuleInfo() - qri.RegisterQueryRuleSource(blacklistQueryRules) +func TestMapJSON(t *testing.T) { + setupRules() + qri := NewMap() + qri.RegisterSource(blacklistQueryRules) _ = qri.SetRules(blacklistQueryRules, blacklistRules) - qri.RegisterQueryRuleSource(customQueryRules) + qri.RegisterSource(customQueryRules) _ = qri.SetRules(customQueryRules, otherRules) got := marshalled(qri) want := compacted(`{ diff --git a/go/vt/tabletserver/query_rules.go b/go/vt/tabletserver/rules/rules.go similarity index 87% rename from go/vt/tabletserver/query_rules.go rename to go/vt/tabletserver/rules/rules.go index 03508c79906..c478ce466d2 100644 --- a/go/vt/tabletserver/query_rules.go +++ b/go/vt/tabletserver/rules/rules.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package tabletserver +package rules import ( "bytes" @@ -23,22 +23,22 @@ import ( //----------------------------------------------- -// QueryRules is used to store and execute rules for the tabletserver. -type QueryRules struct { - rules []*QueryRule +// Rules is used to store and execute rules for the tabletserver. +type Rules struct { + rules []*Rule } -// NewQueryRules creates a new QueryRules. -func NewQueryRules() *QueryRules { - return &QueryRules{} +// New creates a new Rules. +func New() *Rules { + return &Rules{} } -// Copy performs a deep copy of QueryRules. +// Copy performs a deep copy of Rules. // A nil input produces a nil output. -func (qrs *QueryRules) Copy() (newqrs *QueryRules) { - newqrs = NewQueryRules() +func (qrs *Rules) Copy() (newqrs *Rules) { + newqrs = New() if qrs.rules != nil { - newqrs.rules = make([]*QueryRule, 0, len(qrs.rules)) + newqrs.rules = make([]*Rule, 0, len(qrs.rules)) for _, qr := range qrs.rules { newqrs.rules = append(newqrs.rules, qr.Copy()) } @@ -46,22 +46,22 @@ func (qrs *QueryRules) Copy() (newqrs *QueryRules) { return newqrs } -// Append merges the rules from another QueryRules into the receiver -func (qrs *QueryRules) Append(otherqrs *QueryRules) { +// Append merges the rules from another Rules into the receiver +func (qrs *Rules) Append(otherqrs *Rules) { for _, qr := range otherqrs.rules { qrs.rules = append(qrs.rules, qr) } } -// Add adds a QueryRule to QueryRules. It does not check +// Add adds a Rule to Rules. It does not check // for duplicates. -func (qrs *QueryRules) Add(qr *QueryRule) { +func (qrs *Rules) Add(qr *Rule) { qrs.rules = append(qrs.rules, qr) } -// Find finds the first occurrence of a QueryRule by matching +// Find finds the first occurrence of a Rule by matching // the Name field. It returns nil if the rule was not found. -func (qrs *QueryRules) Find(name string) (qr *QueryRule) { +func (qrs *Rules) Find(name string) (qr *Rule) { for _, qr = range qrs.rules { if qr.Name == name { return qr @@ -70,9 +70,9 @@ func (qrs *QueryRules) Find(name string) (qr *QueryRule) { return nil } -// Delete deletes a QueryRule by name and returns the rule +// Delete deletes a Rule by name and returns the rule // that was deleted. It returns nil if the rule was not found. -func (qrs *QueryRules) Delete(name string) (qr *QueryRule) { +func (qrs *Rules) Delete(name string) (qr *Rule) { for i, qr := range qrs.rules { if qr.Name == name { for j := i; j < len(qrs.rules)-i-1; j++ { @@ -85,8 +85,8 @@ func (qrs *QueryRules) Delete(name string) (qr *QueryRule) { return nil } -// UnmarshalJSON unmarshals QueryRules. -func (qrs *QueryRules) UnmarshalJSON(data []byte) (err error) { +// UnmarshalJSON unmarshals Rules. +func (qrs *Rules) UnmarshalJSON(data []byte) (err error) { var rulesInfo []map[string]interface{} dec := json.NewDecoder(bytes.NewReader(data)) dec.UseNumber() @@ -105,7 +105,7 @@ func (qrs *QueryRules) UnmarshalJSON(data []byte) (err error) { } // MarshalJSON marshals to JSON. -func (qrs *QueryRules) MarshalJSON() ([]byte, error) { +func (qrs *Rules) MarshalJSON() ([]byte, error) { b := bytes.NewBuffer(nil) _, _ = b.WriteString("[") for i, rule := range qrs.rules { @@ -118,22 +118,22 @@ func (qrs *QueryRules) MarshalJSON() ([]byte, error) { return b.Bytes(), nil } -// filterByPlan creates a new QueryRules by prefiltering on the query and planId. This allows -// us to create query plan specific QueryRules out of the original QueryRules. In the new rules, +// FilterByPlan creates a new Rules by prefiltering on the query and planId. This allows +// us to create query plan specific Rules out of the original Rules. In the new rules, // query, plans and tableNames predicates are empty. -func (qrs *QueryRules) filterByPlan(query string, planid planbuilder.PlanType, tableName string) (newqrs *QueryRules) { - var newrules []*QueryRule +func (qrs *Rules) FilterByPlan(query string, planid planbuilder.PlanType, tableName string) (newqrs *Rules) { + var newrules []*Rule for _, qr := range qrs.rules { - if newrule := qr.filterByPlan(query, planid, tableName); newrule != nil { + if newrule := qr.FilterByPlan(query, planid, tableName); newrule != nil { newrules = append(newrules, newrule) } } - return &QueryRules{newrules} + return &Rules{newrules} } -func (qrs *QueryRules) getAction(ip, user string, bindVars map[string]interface{}) (action Action, desc string) { +func (qrs *Rules) GetAction(ip, user string, bindVars map[string]interface{}) (action Action, desc string) { for _, qr := range qrs.rules { - if act := qr.getAction(ip, user, bindVars); act != QRContinue { + if act := qr.GetAction(ip, user, bindVars); act != QRContinue { return act, qr.Description } } @@ -142,15 +142,15 @@ func (qrs *QueryRules) getAction(ip, user string, bindVars map[string]interface{ //----------------------------------------------- -// QueryRule represents one rule (conditions-action). +// Rule represents one rule (conditions-action). // Name is meant to uniquely identify a rule. // Description is a human readable comment that describes the rule. -// For a QueryRule to fire, all conditions of the QueryRule -// have to match. For example, an empty QueryRule will match +// For a Rule to fire, all conditions of the Rule +// have to match. For example, an empty Rule will match // all requests. -// Every QueryRule has an associated Action. If all the conditions -// of the QueryRule are met, then the Action is triggerred. -type QueryRule struct { +// Every Rule has an associated Action. If all the conditions +// of the Rule are met, then the Action is triggerred. +type Rule struct { Description string Name string @@ -182,15 +182,15 @@ func (nr namedRegexp) MarshalJSON() ([]byte, error) { return json.Marshal(nr.name) } -// NewQueryRule creates a new QueryRule. -func NewQueryRule(description, name string, act Action) (qr *QueryRule) { +// NewQueryRule creates a new Rule. +func NewQueryRule(description, name string, act Action) (qr *Rule) { // We ignore act because there's only one action right now - return &QueryRule{Description: description, Name: name, act: act} + return &Rule{Description: description, Name: name, act: act} } -// Copy performs a deep copy of a QueryRule. -func (qr *QueryRule) Copy() (newqr *QueryRule) { - newqr = &QueryRule{ +// Copy performs a deep copy of a Rule. +func (qr *Rule) Copy() (newqr *Rule) { + newqr = &Rule{ Description: qr.Description, Name: qr.Name, requestIP: qr.requestIP, @@ -214,7 +214,7 @@ func (qr *QueryRule) Copy() (newqr *QueryRule) { } // MarshalJSON marshals to JSON. -func (qr *QueryRule) MarshalJSON() ([]byte, error) { +func (qr *Rule) MarshalJSON() ([]byte, error) { b := bytes.NewBuffer(nil) safeEncode(b, `{"Description":`, qr.Description) safeEncode(b, `,"Name":`, qr.Name) @@ -245,7 +245,7 @@ func (qr *QueryRule) MarshalJSON() ([]byte, error) { // SetIPCond adds a regular expression condition for the client IP. // It has to be a full match (not substring). -func (qr *QueryRule) SetIPCond(pattern string) (err error) { +func (qr *Rule) SetIPCond(pattern string) (err error) { qr.requestIP.name = pattern qr.requestIP.Regexp, err = regexp.Compile(makeExact(pattern)) return err @@ -253,7 +253,7 @@ func (qr *QueryRule) SetIPCond(pattern string) (err error) { // SetUserCond adds a regular expression condition for the user name // used by the client. -func (qr *QueryRule) SetUserCond(pattern string) (err error) { +func (qr *Rule) SetUserCond(pattern string) (err error) { qr.user.name = pattern qr.user.Regexp, err = regexp.Compile(makeExact(pattern)) return @@ -262,19 +262,19 @@ func (qr *QueryRule) SetUserCond(pattern string) (err error) { // AddPlanCond adds to the list of plans that can be matched for // the rule to fire. // This function acts as an OR: Any plan id match is considered a match. -func (qr *QueryRule) AddPlanCond(planType planbuilder.PlanType) { +func (qr *Rule) AddPlanCond(planType planbuilder.PlanType) { qr.plans = append(qr.plans, planType) } // AddTableCond adds to the list of tableNames that can be matched for // the rule to fire. // This function acts as an OR: Any tableName match is considered a match. -func (qr *QueryRule) AddTableCond(tableName string) { +func (qr *Rule) AddTableCond(tableName string) { qr.tableNames = append(qr.tableNames, tableName) } // SetQueryCond adds a regular expression condition for the query. -func (qr *QueryRule) SetQueryCond(pattern string) (err error) { +func (qr *Rule) SetQueryCond(pattern string) (err error) { qr.query.name = pattern qr.query.Regexp, err = regexp.Compile(makeExact(pattern)) return @@ -285,8 +285,8 @@ func makeExact(pattern string) string { return fmt.Sprintf("^%s$", pattern) } -// AddBindVarCond adds a bind variable restriction to the QueryRule. -// All bind var conditions have to be satisfied for the QueryRule +// AddBindVarCond adds a bind variable restriction to the Rule. +// All bind var conditions have to be satisfied for the Rule // to be a match. // name represents the name (not regexp) of the bind variable. // onAbsent specifies the value of the condition if the @@ -303,7 +303,7 @@ func makeExact(pattern string) string { // string ==, !=, <, >=, >, <=, MATCH, NOMATCH []byte, string // KeyRange IN, NOTIN whole numbers // whole numbers can be: int, int8, int16, int32, int64, uint64 -func (qr *QueryRule) AddBindVarCond(name string, onAbsent, onMismatch bool, op Operator, value interface{}) error { +func (qr *Rule) AddBindVarCond(name string, onAbsent, onMismatch bool, op Operator, value interface{}) error { var converted bvcValue if op == QRNoOp { qr.bindVarConds = append(qr.bindVarConds, BindVarCond{name, onAbsent, onMismatch, op, nil}) @@ -350,11 +350,11 @@ Error: return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid operator %v for type %T (%v)", op, value, value) } -// filterByPlan returns a new QueryRule if the query and planid match. -// The new QueryRule will contain all the original constraints other -// than the plan and query. If the plan and query don't match the QueryRule, +// FilterByPlan returns a new Rule if the query and planid match. +// The new Rule will contain all the original constraints other +// than the plan and query. If the plan and query don't match the Rule, // then it returns nil. -func (qr *QueryRule) filterByPlan(query string, planid planbuilder.PlanType, tableName string) (newqr *QueryRule) { +func (qr *Rule) FilterByPlan(query string, planid planbuilder.PlanType, tableName string) (newqr *Rule) { if !reMatch(qr.query.Regexp, query) { return nil } @@ -371,7 +371,7 @@ func (qr *QueryRule) filterByPlan(query string, planid planbuilder.PlanType, tab return newqr } -func (qr *QueryRule) getAction(ip, user string, bindVars map[string]interface{}) Action { +func (qr *Rule) GetAction(ip, user string, bindVars map[string]interface{}) Action { if !reMatch(qr.requestIP.Regexp, ip) { return QRContinue } @@ -426,10 +426,10 @@ func bvMatch(bvcond BindVarCond, bindVars map[string]interface{}) bool { } //----------------------------------------------- -// Support types for QueryRule +// Support types for Rule // Action speficies the list of actions to perform -// when a QueryRule is triggered. +// when a Rule is triggered. type Action int // These are actions. @@ -884,7 +884,7 @@ func MapStrOperator(strop string) (op Operator, err error) { } // BuildQueryRule builds a query rule from a ruleInfo. -func BuildQueryRule(ruleInfo map[string]interface{}) (qr *QueryRule, err error) { +func BuildQueryRule(ruleInfo map[string]interface{}) (qr *Rule, err error) { qr = NewQueryRule("", "", QRFail) for k, v := range ruleInfo { var sv string diff --git a/go/vt/tabletserver/query_rules_test.go b/go/vt/tabletserver/rules/rules_test.go similarity index 97% rename from go/vt/tabletserver/query_rules_test.go rename to go/vt/tabletserver/rules/rules_test.go index c276a81a006..3b0f84a2287 100644 --- a/go/vt/tabletserver/query_rules_test.go +++ b/go/vt/tabletserver/rules/rules_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package tabletserver +package rules import ( "bytes" @@ -22,7 +22,7 @@ import ( ) func TestQueryRules(t *testing.T) { - qrs := NewQueryRules() + qrs := New() qr1 := NewQueryRule("rule 1", "r1", QRFail) qr2 := NewQueryRule("rule 2", "r2", QRFail) qrs.Add(qr1) @@ -68,7 +68,7 @@ func TestQueryRules(t *testing.T) { // TestCopy tests for deep copy func TestCopy(t *testing.T) { - qrs1 := NewQueryRules() + qrs1 := New() qr1 := NewQueryRule("rule 1", "r1", QRFail) qr1.AddPlanCond(planbuilder.PlanPassSelect) qr1.AddTableCond("aa") @@ -83,7 +83,7 @@ func TestCopy(t *testing.T) { t.Errorf("qrs1: %+v, not equal to %+v", qrs2, qrs1) } - qrs1 = NewQueryRules() + qrs1 = New() qrs2 = qrs1.Copy() if !reflect.DeepEqual(qrs2, qrs1) { t.Errorf("qrs1: %+v, not equal to %+v", qrs2, qrs1) @@ -91,7 +91,7 @@ func TestCopy(t *testing.T) { } func TestFilterByPlan(t *testing.T) { - qrs := NewQueryRules() + qrs := New() qr1 := NewQueryRule("rule 1", "r1", QRFail) qr1.SetIPCond("123") @@ -117,7 +117,7 @@ func TestFilterByPlan(t *testing.T) { qrs.Add(qr3) qrs.Add(qr4) - qrs1 := qrs.filterByPlan("select", planbuilder.PlanPassSelect, "a") + qrs1 := qrs.FilterByPlan("select", planbuilder.PlanPassSelect, "a") want := compacted(`[{ "Description":"rule 1", "Name":"r1", @@ -152,7 +152,7 @@ func TestFilterByPlan(t *testing.T) { t.Errorf("qrs1:\n%s, want\n%s", got, want) } - qrs1 = qrs.filterByPlan("insert", planbuilder.PlanPassSelect, "a") + qrs1 = qrs.FilterByPlan("insert", planbuilder.PlanPassSelect, "a") want = compacted(`[{ "Description":"rule 2", "Name":"r2", @@ -168,13 +168,13 @@ func TestFilterByPlan(t *testing.T) { t.Errorf("qrs1:\n%s, want\n%s", got, want) } - qrs1 = qrs.filterByPlan("insert", planbuilder.PlanSelectLock, "a") + qrs1 = qrs.FilterByPlan("insert", planbuilder.PlanSelectLock, "a") got = marshalled(qrs1) if got != want { t.Errorf("qrs1:\n%s, want\n%s", got, want) } - qrs1 = qrs.filterByPlan("select", planbuilder.PlanInsertPK, "a") + qrs1 = qrs.FilterByPlan("select", planbuilder.PlanInsertPK, "a") want = compacted(`[{ "Description":"rule 3", "Name":"r3", @@ -190,12 +190,12 @@ func TestFilterByPlan(t *testing.T) { t.Errorf("qrs1:\n%s, want\n%s", got, want) } - qrs1 = qrs.filterByPlan("sel", planbuilder.PlanInsertPK, "a") + qrs1 = qrs.FilterByPlan("sel", planbuilder.PlanInsertPK, "a") if qrs1.rules != nil { t.Errorf("want nil, got non-nil") } - qrs1 = qrs.filterByPlan("table", planbuilder.PlanPassDML, "b") + qrs1 = qrs.FilterByPlan("table", planbuilder.PlanPassDML, "b") want = compacted(`[{ "Description":"rule 4", "Name":"r4", @@ -209,7 +209,7 @@ func TestFilterByPlan(t *testing.T) { qr5 := NewQueryRule("rule 5", "r5", QRFail) qrs.Add(qr5) - qrs1 = qrs.filterByPlan("sel", planbuilder.PlanInsertPK, "a") + qrs1 = qrs.FilterByPlan("sel", planbuilder.PlanInsertPK, "a") want = compacted(`[{ "Description":"rule 5", "Name":"r5", @@ -220,8 +220,8 @@ func TestFilterByPlan(t *testing.T) { t.Errorf("qrs1:\n%s, want\n%s", got, want) } - qrsnil1 := NewQueryRules() - if qrsnil2 := qrsnil1.filterByPlan("", planbuilder.PlanPassSelect, "a"); qrsnil2.rules != nil { + qrsnil1 := New() + if qrsnil2 := qrsnil1.FilterByPlan("", planbuilder.PlanPassSelect, "a"); qrsnil2.rules != nil { t.Errorf("want nil, got non-nil") } } @@ -574,7 +574,7 @@ func TestBVConditions(t *testing.T) { } func TestAction(t *testing.T) { - qrs := NewQueryRules() + qrs := New() qr1 := NewQueryRule("rule 1", "r1", QRFail) qr1.SetIPCond("123") @@ -591,26 +591,26 @@ func TestAction(t *testing.T) { bv := make(map[string]interface{}) bv["a"] = uint64(0) - action, desc := qrs.getAction("123", "user1", bv) + action, desc := qrs.GetAction("123", "user1", bv) if action != QRFail { t.Errorf("want fail") } if desc != "rule 1" { t.Errorf("want rule 1, got %s", desc) } - action, desc = qrs.getAction("1234", "user", bv) + action, desc = qrs.GetAction("1234", "user", bv) if action != QRFailRetry { t.Errorf("want fail_retry") } if desc != "rule 2" { t.Errorf("want rule 2, got %s", desc) } - action, desc = qrs.getAction("1234", "user1", bv) + action, desc = qrs.GetAction("1234", "user1", bv) if action != QRContinue { t.Errorf("want continue") } bv["a"] = uint64(1) - action, desc = qrs.getAction("1234", "user1", bv) + action, desc = qrs.GetAction("1234", "user1", bv) if action != QRFail { t.Errorf("want fail") } @@ -620,7 +620,7 @@ func TestAction(t *testing.T) { } func TestImport(t *testing.T) { - var qrs = NewQueryRules() + var qrs = New() jsondata := `[{ "Description": "desc1", "Name": "name1", @@ -703,7 +703,7 @@ var validjsons = []ValidJSONCase{ func TestValidJSON(t *testing.T) { for i, tcase := range validjsons { - qrs := NewQueryRules() + qrs := New() err := qrs.UnmarshalJSON([]byte(tcase.input)) if err != nil { t.Fatalf("Unexpected error for case %d: %v", i, err) @@ -779,7 +779,7 @@ var invalidjsons = []InvalidJSONCase{ func TestInvalidJSON(t *testing.T) { for _, tcase := range invalidjsons { - qrs := NewQueryRules() + qrs := New() err := qrs.UnmarshalJSON([]byte(tcase.input)) if err == nil { t.Errorf("want error for case %q", tcase.input) @@ -790,7 +790,7 @@ func TestInvalidJSON(t *testing.T) { t.Errorf("invalid json: %s, want '%v', got '%v'", tcase.input, tcase.err, recvd) } } - qrs := NewQueryRules() + qrs := New() err := qrs.UnmarshalJSON([]byte(`{`)) if code := vterrors.Code(err); code != vtrpcpb.Code_INVALID_ARGUMENT { t.Errorf("qrs.UnmarshalJSON: %v, want %v", code, vtrpcpb.Code_INVALID_ARGUMENT) diff --git a/go/vt/tabletserver/tabletserver.go b/go/vt/tabletserver/tabletserver.go index c8a52ed5a3c..1a96ff5055c 100644 --- a/go/vt/tabletserver/tabletserver.go +++ b/go/vt/tabletserver/tabletserver.go @@ -36,6 +36,7 @@ import ( "github.com/youtube/vitess/go/vt/tabletserver/messager" "github.com/youtube/vitess/go/vt/tabletserver/queryservice" "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/tabletserver/rules" "github.com/youtube/vitess/go/vt/tabletserver/splitquery" "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/tabletserver/txthrottler" @@ -215,16 +216,16 @@ func (tsv *TabletServer) Register() { // RegisterQueryRuleSource registers ruleSource for setting query rules. func (tsv *TabletServer) RegisterQueryRuleSource(ruleSource string) { - tsv.qe.queryRuleSources.RegisterQueryRuleSource(ruleSource) + tsv.qe.queryRuleSources.RegisterSource(ruleSource) } // UnRegisterQueryRuleSource unregisters ruleSource from query rules. func (tsv *TabletServer) UnRegisterQueryRuleSource(ruleSource string) { - tsv.qe.queryRuleSources.UnRegisterQueryRuleSource(ruleSource) + tsv.qe.queryRuleSources.UnRegisterSource(ruleSource) } // SetQueryRules sets the query rules for a registered ruleSource. -func (tsv *TabletServer) SetQueryRules(ruleSource string, qrs *QueryRules) error { +func (tsv *TabletServer) SetQueryRules(ruleSource string, qrs *rules.Rules) error { err := tsv.qe.queryRuleSources.SetRules(ruleSource, qrs) if err != nil { return err diff --git a/go/vt/tabletserver/tabletservermock/controller.go b/go/vt/tabletserver/tabletservermock/controller.go index 5daf993603a..bd15012e7ea 100644 --- a/go/vt/tabletserver/tabletservermock/controller.go +++ b/go/vt/tabletserver/tabletservermock/controller.go @@ -14,8 +14,8 @@ import ( "github.com/youtube/vitess/go/vt/mysqlctl" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - "github.com/youtube/vitess/go/vt/tabletserver" "github.com/youtube/vitess/go/vt/tabletserver/queryservice" + "github.com/youtube/vitess/go/vt/tabletserver/rules" ) // BroadcastData is used by the mock Controller to send data @@ -146,7 +146,7 @@ func (tqsc *Controller) UnRegisterQueryRuleSource(ruleSource string) { } // SetQueryRules is part of the tabletserver.Controller interface -func (tqsc *Controller) SetQueryRules(ruleSource string, qrs *tabletserver.QueryRules) error { +func (tqsc *Controller) SetQueryRules(ruleSource string, qrs *rules.Rules) error { return nil } From cee83c2828dfc5fa0f8fec9562179056b593e82f Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 12 Mar 2017 21:15:31 -0700 Subject: [PATCH 094/108] tabletserver: protect goroutines with recover. --- go/vt/tabletserver/engines/schema/engine.go | 5 +++- .../tabletserver/messager/message_manager.go | 25 +++++++++++++++---- go/vt/tabletserver/replication_watcher.go | 5 +++- go/vt/tabletserver/tx_engine.go | 5 +++- 4 files changed, 32 insertions(+), 8 deletions(-) diff --git a/go/vt/tabletserver/engines/schema/engine.go b/go/vt/tabletserver/engines/schema/engine.go index fcca53fab11..3b9a057451a 100644 --- a/go/vt/tabletserver/engines/schema/engine.go +++ b/go/vt/tabletserver/engines/schema/engine.go @@ -115,7 +115,10 @@ func (se *Engine) Open(dbaParams *sqldb.ConnParams) error { for _, row := range tableData.Rows { wg.Add(1) go func(row []sqltypes.Value) { - defer wg.Done() + defer func() { + tabletenv.LogError() + wg.Done() + }() tableName := row[0].String() conn, err := se.conns.Get(ctx) diff --git a/go/vt/tabletserver/messager/message_manager.go b/go/vt/tabletserver/messager/message_manager.go index d0f328d5186..0d6ad59d79d 100644 --- a/go/vt/tabletserver/messager/message_manager.go +++ b/go/vt/tabletserver/messager/message_manager.go @@ -254,7 +254,10 @@ func (mm *messageManager) Add(mr *MessageRow) bool { } func (mm *messageManager) runSend() { - defer mm.wg.Done() + defer func() { + tabletenv.LogError() + mm.wg.Done() + }() for { var rows [][]sqltypes.Value mm.mu.Lock() @@ -297,7 +300,10 @@ func (mm *messageManager) runSend() { } func (mm *messageManager) send(receiver *receiverWithStatus, qr *sqltypes.Result) { - defer mm.wg.Done() + defer func() { + tabletenv.LogError() + mm.wg.Done() + }() if err := receiver.receiver.Send(qr); err != nil { if err == io.EOF { // No need to call Cancel. messageReceiver already @@ -333,7 +339,10 @@ func (mm *messageManager) send(receiver *receiverWithStatus, qr *sqltypes.Result // not rely on members of messageManager. func postpone(tsv TabletService, name string, ackWaitTime time.Duration, ids []string) { ctx, cancel := context.WithTimeout(tabletenv.LocalContext(), ackWaitTime) - defer cancel() + defer func() { + tabletenv.LogError() + cancel() + }() _, err := tsv.PostponeMessages(ctx, nil, name, ids) if err != nil { // TODO(sougou): increment internal error. @@ -343,7 +352,10 @@ func postpone(tsv TabletService, name string, ackWaitTime time.Duration, ids []s func (mm *messageManager) runPoller() { ctx, cancel := context.WithTimeout(tabletenv.LocalContext(), mm.pollerTicks.Interval()) - defer cancel() + defer func() { + tabletenv.LogError() + cancel() + }() conn, err := mm.conns.Get(ctx) if err != nil { // TODO(sougou): increment internal error. @@ -410,7 +422,10 @@ func (mm *messageManager) runPurge() { // not rely on members of messageManager. func purge(tsv TabletService, name string, purgeAfter, purgeInterval time.Duration) { ctx, cancel := context.WithTimeout(tabletenv.LocalContext(), purgeInterval) - defer cancel() + defer func() { + tabletenv.LogError() + cancel() + }() for { count, err := tsv.PurgeMessages(ctx, nil, name, time.Now().Add(-purgeAfter).UnixNano()) if err != nil { diff --git a/go/vt/tabletserver/replication_watcher.go b/go/vt/tabletserver/replication_watcher.go index f3bb7b1567a..c2957b7dbfc 100644 --- a/go/vt/tabletserver/replication_watcher.go +++ b/go/vt/tabletserver/replication_watcher.go @@ -89,7 +89,10 @@ func (rpw *ReplicationWatcher) Close() { // Process processes the replication stream. func (rpw *ReplicationWatcher) Process(ctx context.Context, dbconfigs dbconfigs.DBConfigs, mysqld mysqlctl.MysqlDaemon) { - defer rpw.wg.Done() + defer func() { + tabletenv.LogError() + rpw.wg.Done() + }() for { log.Infof("Starting a binlog Streamer from current replication position to monitor binlogs") streamer := binlog.NewStreamer(dbconfigs.App.DbName, mysqld, rpw.se, nil /*clientCharset*/, replication.Position{}, 0 /*timestamp*/, func(trans *binlogdatapb.BinlogTransaction) error { diff --git a/go/vt/tabletserver/tx_engine.go b/go/vt/tabletserver/tx_engine.go index e39c398cc68..63322dbce7f 100644 --- a/go/vt/tabletserver/tx_engine.go +++ b/go/vt/tabletserver/tx_engine.go @@ -130,7 +130,10 @@ func (te *TxEngine) Close(immediate bool) { // the function closes rollbackDone, which can be // verified to make sure it won't kick in later. go func() { - defer close(rollbackDone) + defer func() { + tabletenv.LogError() + close(rollbackDone) + }() if immediate { // Immediately rollback everything and return. log.Info("Immediate shutdown: rolling back now.") From 07b84711cac7b3dfaed0637c03b44f26ca9223d1 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Sun, 12 Mar 2017 21:27:26 -0700 Subject: [PATCH 095/108] tabletserver: the big move * Created vttablet directory to hold tabletserver and tabletmanager. * scaffolding packages like grpc and interfaces are under vttablet. * tabletserver and tabletmanager have subdirs, but only on packages they depend on. --- go/cmd/l2vtgate/plugin_grpcqueryservice.go | 4 ++-- go/cmd/l2vtgate/plugin_grpctabletconn.go | 2 +- go/cmd/vtcombo/main.go | 2 +- go/cmd/vtcombo/tablet_map.go | 12 ++++++------ go/cmd/vtctl/plugin_grpctabletconn.go | 2 +- go/cmd/vtctl/plugin_grpctmclient.go | 2 +- go/cmd/vtctl/vtctl.go | 2 +- go/cmd/vtctld/plugin_grpctabletconn.go | 2 +- go/cmd/vtctld/plugin_grpctmclient.go | 2 +- go/cmd/vtctld/schema.go | 2 +- go/cmd/vtgate/plugin_grpctabletconn.go | 2 +- go/cmd/vtgateclienttest/goclienttest/echo.go | 2 +- go/cmd/vtgateclienttest/services/echo.go | 2 +- go/cmd/vttablet/plugin_filecustomrule.go | 2 +- go/cmd/vttablet/plugin_grpcqueryservice.go | 4 ++-- go/cmd/vttablet/plugin_grpctabletconn.go | 2 +- go/cmd/vttablet/plugin_grpctmclient.go | 2 +- go/cmd/vttablet/plugin_grpctmserver.go | 2 +- go/cmd/vttablet/plugin_sysloglogger.go | 2 +- go/cmd/vttablet/plugin_zkcustomrule.go | 2 +- go/cmd/vttablet/status.go | 4 ++-- go/cmd/vttablet/vttablet.go | 6 +++--- go/cmd/vtworker/plugin_grpctabletconn.go | 2 +- go/cmd/vtworker/plugin_grpctmclient.go | 2 +- go/vt/binlog/binlog_streamer.go | 2 +- go/vt/binlog/binlog_streamer_rbr_test.go | 2 +- go/vt/binlog/event_streamer.go | 2 +- go/vt/binlog/updatestreamctl.go | 2 +- go/vt/discovery/fake_healthcheck.go | 4 ++-- go/vt/discovery/healthcheck.go | 4 ++-- go/vt/discovery/healthcheck_test.go | 6 +++--- go/vt/schemamanager/schemamanager_test.go | 6 +++--- go/vt/schemamanager/schemaswap/schema_swap.go | 2 +- go/vt/throttler/demo/throttler_demo.go | 6 +++--- go/vt/vtctl/grpcvtctlserver/server.go | 2 +- go/vt/vtctl/query.go | 2 +- go/vt/vtctl/vtctlclienttest/client.go | 4 ++-- go/vt/vtctld/action_repository.go | 2 +- go/vt/vtctld/api.go | 2 +- go/vt/vtctld/realtime_status_test.go | 6 +++--- go/vt/vtctld/tablet_data.go | 2 +- go/vt/vtctld/tablet_data_test.go | 8 ++++---- go/vt/vtgate/engine/primitive.go | 2 +- go/vt/vtgate/engine/route.go | 2 +- go/vt/vtgate/gateway/discoverygateway.go | 2 +- go/vt/vtgate/gateway/discoverygateway_test.go | 2 +- go/vt/vtgate/gateway/gateway.go | 2 +- go/vt/vtgate/gateway/l2vtgategateway.go | 4 ++-- .../vtgate/gatewaytest/grpc_discovery_test.go | 6 +++--- go/vt/vtgate/gatewaytest/suite.go | 6 +++--- go/vt/vtgate/grpcvtgateconn/conn.go | 2 +- go/vt/vtgate/grpcvtgateservice/server.go | 2 +- go/vt/vtgate/l2vtgate/l2vtgate.go | 2 +- go/vt/vtgate/query_executor.go | 2 +- go/vt/vtgate/router_dml_test.go | 4 ++-- go/vt/vtgate/router_framework_test.go | 2 +- go/vt/vtgate/router_select_test.go | 4 ++-- go/vt/vtgate/sandbox_test.go | 6 +++--- go/vt/vtgate/scatter_conn.go | 2 +- go/vt/vtgate/scatter_conn_test.go | 2 +- go/vt/vtgate/topo_utils.go | 2 +- go/vt/vtgate/topo_utils_test.go | 2 +- go/vt/vtgate/tx_conn_test.go | 2 +- go/vt/vtgate/vindexes/lookup_hash_test.go | 2 +- .../vtgate/vindexes/lookup_hash_unique_test.go | 2 +- go/vt/vtgate/vindexes/lookup_test.go | 2 +- go/vt/vtgate/vtgate.go | 2 +- go/vt/vtgate/vtgate_test.go | 4 ++-- .../agentrpctest/test_agent_rpc.go | 4 ++-- .../filecustomrule/filecustomrule.go | 4 ++-- .../filecustomrule/filecustomrule_test.go | 4 ++-- .../customrule/zkcustomrule/zkcustomrule.go | 4 ++-- .../zkcustomrule/zkcustomrule_test.go | 4 ++-- .../endtoend/acl_test.go | 4 ++-- .../endtoend/batch_test.go | 4 ++-- .../endtoend/compatibility_test.go | 2 +- .../endtoend/config_test.go | 4 ++-- .../endtoend/endtoend.go | 0 .../endtoend/framework/client.go | 4 ++-- .../endtoend/framework/debugschema.go | 0 .../endtoend/framework/debugvars.go | 0 .../endtoend/framework/eventcatcher.go | 4 ++-- .../endtoend/framework/querystats.go | 0 .../endtoend/framework/server.go | 4 ++-- .../endtoend/framework/streamqueryz.go | 0 .../endtoend/framework/testcase.go | 0 .../endtoend/main_test.go | 4 ++-- .../endtoend/message_test.go | 2 +- .../endtoend/metadata_test.go | 2 +- .../endtoend/misc_test.go | 2 +- .../endtoend/queries_test.go | 2 +- .../endtoend/sequence_test.go | 2 +- .../endtoend/stream_test.go | 2 +- .../endtoend/transaction_test.go | 6 +++--- .../faketmclient/fake_client.go | 2 +- .../grpcqueryservice/server.go | 4 ++-- .../grpctabletconn/conn.go | 6 +++--- .../grpctabletconn/conn_test.go | 4 ++-- .../grpctmclient/client.go | 2 +- .../grpctmserver/server.go | 2 +- .../grpctmserver/server_test.go | 4 ++-- .../queryservice/fakes/error_query_service.go | 2 +- .../fakes/stream_health_query_service.go | 2 +- .../queryservice/queryservice.go | 2 +- .../queryservice/wrapped.go | 2 +- .../sandboxconn/sandboxconn.go | 4 ++-- .../sysloglogger/sysloglogger.go | 2 +- .../sysloglogger/sysloglogger_test.go | 2 +- .../tabletconn/grpc_error.go | 0 .../tabletconn/grpc_error_test.go | 0 .../tabletconn/tablet_conn.go | 2 +- .../tabletconntest/fakequeryservice.go | 2 +- .../tabletconntest/tabletconntest.go | 4 ++-- .../tabletmanager/action_agent.go | 4 ++-- .../tabletmanager/binlog_players.go | 0 .../tabletmanager/binlog_players_test.go | 6 +++--- .../tabletmanager/events/state_change.go | 0 .../tabletmanager/healthcheck.go | 0 .../tabletmanager/healthcheck_test.go | 4 ++-- .../tabletmanager/init_tablet.go | 0 .../tabletmanager/init_tablet_test.go | 0 .../tabletmanager/initial_rebuild.go | 0 .../tabletmanager/orchestrator.go | 0 .../tabletmanager/replication_reporter.go | 0 .../tabletmanager/replication_reporter_test.go | 0 go/vt/{ => vttablet}/tabletmanager/restore.go | 0 .../tabletmanager/rpc_actions.go | 0 .../{ => vttablet}/tabletmanager/rpc_agent.go | 0 .../{ => vttablet}/tabletmanager/rpc_backup.go | 0 .../tabletmanager/rpc_binlog_players.go | 0 .../tabletmanager/rpc_external_reparent.go | 2 +- .../{ => vttablet}/tabletmanager/rpc_query.go | 0 .../tabletmanager/rpc_replication.go | 0 .../{ => vttablet}/tabletmanager/rpc_schema.go | 0 .../{ => vttablet}/tabletmanager/rpc_server.go | 0 .../tabletmanager/state_change.go | 6 +++--- go/vt/{ => vttablet}/tabletserver/codex.go | 2 +- .../{ => vttablet}/tabletserver/codex_test.go | 2 +- go/vt/{ => vttablet}/tabletserver/comments.go | 0 .../tabletserver/comments_test.go | 0 .../tabletserver/connpool/dbconn.go | 2 +- .../tabletserver/connpool/dbconn_test.go | 0 .../tabletserver/connpool/pool.go | 2 +- .../tabletserver/connpool/pool_test.go | 0 .../{ => vttablet}/tabletserver/controller.go | 4 ++-- .../tabletserver/messager/cache.go | 0 .../tabletserver/messager/cache_test.go | 0 .../tabletserver/messager/engine.go | 6 +++--- .../tabletserver/messager/engine_test.go | 4 ++-- .../tabletserver/messager/message_manager.go | 6 +++--- .../messager/message_manager_test.go | 4 ++-- .../tabletserver/planbuilder/ddl.go | 2 +- .../tabletserver/planbuilder/dml.go | 2 +- .../tabletserver/planbuilder/plan.go | 2 +- .../tabletserver/planbuilder/plan_test.go | 2 +- .../tabletserver/planbuilder/query_gen.go | 2 +- .../tabletserver/query_engine.go | 10 +++++----- .../tabletserver/query_engine_test.go | 6 +++--- .../tabletserver/query_executor.go | 12 ++++++------ .../tabletserver/query_executor_test.go | 6 +++--- .../{ => vttablet}/tabletserver/query_list.go | 0 .../tabletserver/query_list_test.go | 0 go/vt/{ => vttablet}/tabletserver/querylogz.go | 2 +- .../tabletserver/querylogz_test.go | 4 ++-- .../tabletserver/querytypes/bound_query.go | 0 .../tabletserver/querytypes/proto3.go | 0 .../tabletserver/querytypes/proto3_test.go | 0 .../tabletserver/querytypes/query_split.go | 0 go/vt/{ => vttablet}/tabletserver/queryz.go | 2 +- .../{ => vttablet}/tabletserver/queryz_test.go | 4 ++-- .../tabletserver/replication_watcher.go | 4 ++-- go/vt/{ => vttablet}/tabletserver/rules/map.go | 2 +- .../tabletserver/rules/map_test.go | 2 +- .../{ => vttablet}/tabletserver/rules/rules.go | 2 +- .../tabletserver/rules/rules_test.go | 2 +- .../tabletserver}/schema/engine.go | 4 ++-- .../tabletserver}/schema/engine_test.go | 4 ++-- .../tabletserver}/schema/load_table.go | 4 ++-- .../tabletserver}/schema/load_table_test.go | 2 +- .../tabletserver}/schema/schema.go | 0 .../tabletserver}/schema/schema_test.go | 0 .../schema/schematest/schematest.go | 0 .../tabletserver}/schema/schemaz.go | 0 .../tabletserver}/schema/schemaz_test.go | 0 .../tabletserver/splitquery/doc.go | 0 .../splitquery/equal_splits_algorithm.go | 2 +- .../splitquery/equal_splits_algorithm_test.go | 4 ++-- .../tabletserver/splitquery/example_test.go | 4 ++-- .../splitquery/full_scan_algorithm.go | 4 ++-- .../splitquery/full_scan_algorithm_test.go | 4 ++-- .../splitquery/split_algorithm_interface.go | 2 +- .../tabletserver/splitquery/split_params.go | 4 ++-- .../splitquery/split_params_test.go | 4 ++-- .../splitquery_testing/mock_sqlexecuter.go | 0 .../tabletserver/splitquery/splitter.go | 4 ++-- .../tabletserver/splitquery/splitter_test.go | 6 +++--- .../splitquery/sql_executer_interface.go | 0 .../tabletserver/splitquery/testutils_test.go | 2 +- .../tabletserver/splitquery/utils.go | 0 go/vt/{ => vttablet}/tabletserver/status.go | 2 +- .../tabletserver/stream_queryz.go | 0 .../tabletserver/stream_queryz_test.go | 0 .../tabletserver/tabletenv/config.go | 0 .../tabletserver/tabletenv/local_context.go | 0 .../tabletserver/tabletenv/logstats.go | 0 .../tabletserver/tabletenv/logstats_test.go | 0 .../tabletserver/tabletenv/tabletenv.go | 0 .../tabletserver/tabletserver.go | 18 +++++++++--------- .../tabletserver/tabletserver_test.go | 6 +++--- .../tabletserver/testutils_test.go | 2 +- go/vt/{ => vttablet}/tabletserver/twopc.go | 2 +- .../{ => vttablet}/tabletserver/twopc_test.go | 0 go/vt/{ => vttablet}/tabletserver/twopcz.go | 0 go/vt/{ => vttablet}/tabletserver/tx_engine.go | 4 ++-- .../tabletserver/tx_engine_test.go | 2 +- .../{ => vttablet}/tabletserver/tx_executor.go | 4 ++-- .../tabletserver/tx_executor_test.go | 2 +- go/vt/{ => vttablet}/tabletserver/tx_pool.go | 6 +++--- .../tabletserver/tx_pool_test.go | 2 +- .../tabletserver/tx_prep_pool.go | 0 .../tabletserver/tx_prep_pool_test.go | 0 go/vt/{ => vttablet}/tabletserver/txlogz.go | 2 +- .../{ => vttablet}/tabletserver/txlogz_test.go | 2 +- .../txthrottler/mock_healthcheck_test.go | 2 +- .../txthrottler/mock_server_test.go | 0 .../txthrottler/mock_throttler_test.go | 0 .../txthrottler/mock_topology_watcher_test.go | 0 .../txthrottler/mock_toposerver_impl_test.go | 0 .../tabletserver/txthrottler/tx_throttler.go | 2 +- .../txthrottler/tx_throttler_test.go | 2 +- .../tabletservermock/controller.go | 4 ++-- .../tmclient/rpc_client_api.go | 0 go/vt/worker/diff_utils.go | 4 ++-- go/vt/worker/instance.go | 2 +- go/vt/worker/legacy_split_clone_test.go | 6 +++--- go/vt/worker/restartable_result_reader.go | 4 ++-- go/vt/worker/restartable_result_reader_test.go | 6 +++--- go/vt/worker/split_clone_test.go | 6 +++--- go/vt/worker/split_diff_test.go | 4 ++-- go/vt/worker/utils_test.go | 4 ++-- go/vt/worker/vertical_split_clone_test.go | 4 ++-- go/vt/worker/vertical_split_diff_test.go | 4 ++-- .../vtworkerclienttest/client_testsuite.go | 4 ++-- .../horizontal_resharding_workflow.go | 2 +- .../horizontal_resharding_workflow_test.go | 2 +- go/vt/wrangler/split.go | 2 +- go/vt/wrangler/testlib/apply_schema_test.go | 2 +- go/vt/wrangler/testlib/backup_test.go | 2 +- .../wrangler/testlib/copy_schema_shard_test.go | 2 +- .../testlib/emergency_reparent_shard_test.go | 2 +- go/vt/wrangler/testlib/fake_tablet.go | 12 ++++++------ .../wrangler/testlib/init_shard_master_test.go | 2 +- .../testlib/migrate_served_from_test.go | 2 +- .../testlib/migrate_served_types_test.go | 2 +- go/vt/wrangler/testlib/permissions_test.go | 2 +- .../testlib/planned_reparent_shard_test.go | 4 ++-- .../wrangler/testlib/reparent_external_test.go | 4 ++-- go/vt/wrangler/testlib/reparent_utils_test.go | 2 +- go/vt/wrangler/testlib/shard_test.go | 2 +- go/vt/wrangler/testlib/version_test.go | 2 +- go/vt/wrangler/testlib/wait_for_drain_test.go | 6 +++--- .../wait_for_filtered_replication_test.go | 10 +++++----- go/vt/wrangler/wrangler.go | 2 +- 263 files changed, 337 insertions(+), 337 deletions(-) rename go/vt/{tabletmanager => vttablet}/agentrpctest/test_agent_rpc.go (99%) rename go/vt/{tabletserver => vttablet}/customrule/filecustomrule/filecustomrule.go (95%) rename go/vt/{tabletserver => vttablet}/customrule/filecustomrule/filecustomrule_test.go (91%) rename go/vt/{tabletserver => vttablet}/customrule/zkcustomrule/zkcustomrule.go (97%) rename go/vt/{tabletserver => vttablet}/customrule/zkcustomrule/zkcustomrule_test.go (96%) rename go/vt/{tabletserver => vttablet}/endtoend/acl_test.go (97%) rename go/vt/{tabletserver => vttablet}/endtoend/batch_test.go (97%) rename go/vt/{tabletserver => vttablet}/endtoend/compatibility_test.go (99%) rename go/vt/{tabletserver => vttablet}/endtoend/config_test.go (98%) rename go/vt/{tabletserver => vttablet}/endtoend/endtoend.go (100%) rename go/vt/{tabletserver => vttablet}/endtoend/framework/client.go (98%) rename go/vt/{tabletserver => vttablet}/endtoend/framework/debugschema.go (100%) rename go/vt/{tabletserver => vttablet}/endtoend/framework/debugvars.go (100%) rename go/vt/{tabletserver => vttablet}/endtoend/framework/eventcatcher.go (95%) rename go/vt/{tabletserver => vttablet}/endtoend/framework/querystats.go (100%) rename go/vt/{tabletserver => vttablet}/endtoend/framework/server.go (95%) rename go/vt/{tabletserver => vttablet}/endtoend/framework/streamqueryz.go (100%) rename go/vt/{tabletserver => vttablet}/endtoend/framework/testcase.go (100%) rename go/vt/{tabletserver => vttablet}/endtoend/main_test.go (98%) rename go/vt/{tabletserver => vttablet}/endtoend/message_test.go (98%) rename go/vt/{tabletserver => vttablet}/endtoend/metadata_test.go (98%) rename go/vt/{tabletserver => vttablet}/endtoend/misc_test.go (99%) rename go/vt/{tabletserver => vttablet}/endtoend/queries_test.go (99%) rename go/vt/{tabletserver => vttablet}/endtoend/sequence_test.go (95%) rename go/vt/{tabletserver => vttablet}/endtoend/stream_test.go (98%) rename go/vt/{tabletserver => vttablet}/endtoend/transaction_test.go (99%) rename go/vt/{tabletmanager => vttablet}/faketmclient/fake_client.go (99%) rename go/vt/{tabletserver => vttablet}/grpcqueryservice/server.go (99%) rename go/vt/{tabletserver => vttablet}/grpctabletconn/conn.go (99%) rename go/vt/{tabletserver => vttablet}/grpctabletconn/conn_test.go (89%) rename go/vt/{tabletmanager => vttablet}/grpctmclient/client.go (99%) rename go/vt/{tabletmanager => vttablet}/grpctmserver/server.go (99%) rename go/vt/{tabletmanager => vttablet}/grpctmserver/server_test.go (91%) rename go/vt/{tabletserver => vttablet}/queryservice/fakes/error_query_service.go (89%) rename go/vt/{tabletserver => vttablet}/queryservice/fakes/stream_health_query_service.go (98%) rename go/vt/{tabletserver => vttablet}/queryservice/queryservice.go (98%) rename go/vt/{tabletserver => vttablet}/queryservice/wrapped.go (99%) rename go/vt/{tabletserver => vttablet}/sandboxconn/sandboxconn.go (99%) rename go/vt/{tabletserver => vttablet}/sysloglogger/sysloglogger.go (96%) rename go/vt/{tabletserver => vttablet}/sysloglogger/sysloglogger_test.go (98%) rename go/vt/{tabletserver => vttablet}/tabletconn/grpc_error.go (100%) rename go/vt/{tabletserver => vttablet}/tabletconn/grpc_error_test.go (100%) rename go/vt/{tabletserver => vttablet}/tabletconn/tablet_conn.go (96%) rename go/vt/{tabletserver => vttablet}/tabletconntest/fakequeryservice.go (99%) rename go/vt/{tabletserver => vttablet}/tabletconntest/tabletconntest.go (99%) rename go/vt/{ => vttablet}/tabletmanager/action_agent.go (99%) rename go/vt/{ => vttablet}/tabletmanager/binlog_players.go (100%) rename go/vt/{ => vttablet}/tabletmanager/binlog_players_test.go (99%) rename go/vt/{ => vttablet}/tabletmanager/events/state_change.go (100%) rename go/vt/{ => vttablet}/tabletmanager/healthcheck.go (100%) rename go/vt/{ => vttablet}/tabletmanager/healthcheck_test.go (99%) rename go/vt/{ => vttablet}/tabletmanager/init_tablet.go (100%) rename go/vt/{ => vttablet}/tabletmanager/init_tablet_test.go (100%) rename go/vt/{ => vttablet}/tabletmanager/initial_rebuild.go (100%) rename go/vt/{ => vttablet}/tabletmanager/orchestrator.go (100%) rename go/vt/{ => vttablet}/tabletmanager/replication_reporter.go (100%) rename go/vt/{ => vttablet}/tabletmanager/replication_reporter_test.go (100%) rename go/vt/{ => vttablet}/tabletmanager/restore.go (100%) rename go/vt/{ => vttablet}/tabletmanager/rpc_actions.go (100%) rename go/vt/{ => vttablet}/tabletmanager/rpc_agent.go (100%) rename go/vt/{ => vttablet}/tabletmanager/rpc_backup.go (100%) rename go/vt/{ => vttablet}/tabletmanager/rpc_binlog_players.go (100%) rename go/vt/{ => vttablet}/tabletmanager/rpc_external_reparent.go (99%) rename go/vt/{ => vttablet}/tabletmanager/rpc_query.go (100%) rename go/vt/{ => vttablet}/tabletmanager/rpc_replication.go (100%) rename go/vt/{ => vttablet}/tabletmanager/rpc_schema.go (100%) rename go/vt/{ => vttablet}/tabletmanager/rpc_server.go (100%) rename go/vt/{ => vttablet}/tabletmanager/state_change.go (98%) rename go/vt/{ => vttablet}/tabletserver/codex.go (99%) rename go/vt/{ => vttablet}/tabletserver/codex_test.go (99%) rename go/vt/{ => vttablet}/tabletserver/comments.go (100%) rename go/vt/{ => vttablet}/tabletserver/comments_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/connpool/dbconn.go (99%) rename go/vt/{ => vttablet}/tabletserver/connpool/dbconn_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/connpool/pool.go (98%) rename go/vt/{ => vttablet}/tabletserver/connpool/pool_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/controller.go (94%) rename go/vt/{ => vttablet}/tabletserver/messager/cache.go (100%) rename go/vt/{ => vttablet}/tabletserver/messager/cache_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/messager/engine.go (96%) rename go/vt/{ => vttablet}/tabletserver/messager/engine_test.go (97%) rename go/vt/{ => vttablet}/tabletserver/messager/message_manager.go (98%) rename go/vt/{ => vttablet}/tabletserver/messager/message_manager_test.go (99%) rename go/vt/{ => vttablet}/tabletserver/planbuilder/ddl.go (93%) rename go/vt/{ => vttablet}/tabletserver/planbuilder/dml.go (99%) rename go/vt/{ => vttablet}/tabletserver/planbuilder/plan.go (99%) rename go/vt/{ => vttablet}/tabletserver/planbuilder/plan_test.go (99%) rename go/vt/{ => vttablet}/tabletserver/planbuilder/query_gen.go (98%) rename go/vt/{ => vttablet}/tabletserver/query_engine.go (97%) rename go/vt/{ => vttablet}/tabletserver/query_engine_test.go (95%) rename go/vt/{ => vttablet}/tabletserver/query_executor.go (98%) rename go/vt/{ => vttablet}/tabletserver/query_executor_test.go (99%) rename go/vt/{ => vttablet}/tabletserver/query_list.go (100%) rename go/vt/{ => vttablet}/tabletserver/query_list_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/querylogz.go (98%) rename go/vt/{ => vttablet}/tabletserver/querylogz_test.go (96%) rename go/vt/{ => vttablet}/tabletserver/querytypes/bound_query.go (100%) rename go/vt/{ => vttablet}/tabletserver/querytypes/proto3.go (100%) rename go/vt/{ => vttablet}/tabletserver/querytypes/proto3_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/querytypes/query_split.go (100%) rename go/vt/{ => vttablet}/tabletserver/queryz.go (98%) rename go/vt/{ => vttablet}/tabletserver/queryz_test.go (95%) rename go/vt/{ => vttablet}/tabletserver/replication_watcher.go (97%) rename go/vt/{ => vttablet}/tabletserver/rules/map.go (97%) rename go/vt/{ => vttablet}/tabletserver/rules/map_test.go (99%) rename go/vt/{ => vttablet}/tabletserver/rules/rules.go (99%) rename go/vt/{ => vttablet}/tabletserver/rules/rules_test.go (99%) rename go/vt/{tabletserver/engines => vttablet/tabletserver}/schema/engine.go (99%) rename go/vt/{tabletserver/engines => vttablet/tabletserver}/schema/engine_test.go (98%) rename go/vt/{tabletserver/engines => vttablet/tabletserver}/schema/load_table.go (97%) rename go/vt/{tabletserver/engines => vttablet/tabletserver}/schema/load_table_test.go (99%) rename go/vt/{tabletserver/engines => vttablet/tabletserver}/schema/schema.go (100%) rename go/vt/{tabletserver/engines => vttablet/tabletserver}/schema/schema_test.go (100%) rename go/vt/{tabletserver/engines => vttablet/tabletserver}/schema/schematest/schematest.go (100%) rename go/vt/{tabletserver/engines => vttablet/tabletserver}/schema/schemaz.go (100%) rename go/vt/{tabletserver/engines => vttablet/tabletserver}/schema/schemaz_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/splitquery/doc.go (100%) rename go/vt/{ => vttablet}/tabletserver/splitquery/equal_splits_algorithm.go (99%) rename go/vt/{ => vttablet}/tabletserver/splitquery/equal_splits_algorithm_test.go (97%) rename go/vt/{ => vttablet}/tabletserver/splitquery/example_test.go (94%) rename go/vt/{ => vttablet}/tabletserver/splitquery/full_scan_algorithm.go (98%) rename go/vt/{ => vttablet}/tabletserver/splitquery/full_scan_algorithm_test.go (97%) rename go/vt/{ => vttablet}/tabletserver/splitquery/split_algorithm_interface.go (94%) rename go/vt/{ => vttablet}/tabletserver/splitquery/split_params.go (98%) rename go/vt/{ => vttablet}/tabletserver/splitquery/split_params_test.go (98%) rename go/vt/{ => vttablet}/tabletserver/splitquery/splitquery_testing/mock_sqlexecuter.go (100%) rename go/vt/{ => vttablet}/tabletserver/splitquery/splitter.go (98%) rename go/vt/{ => vttablet}/tabletserver/splitquery/splitter_test.go (98%) rename go/vt/{ => vttablet}/tabletserver/splitquery/sql_executer_interface.go (100%) rename go/vt/{ => vttablet}/tabletserver/splitquery/testutils_test.go (98%) rename go/vt/{ => vttablet}/tabletserver/splitquery/utils.go (100%) rename go/vt/{ => vttablet}/tabletserver/status.go (98%) rename go/vt/{ => vttablet}/tabletserver/stream_queryz.go (100%) rename go/vt/{ => vttablet}/tabletserver/stream_queryz_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/tabletenv/config.go (100%) rename go/vt/{ => vttablet}/tabletserver/tabletenv/local_context.go (100%) rename go/vt/{ => vttablet}/tabletserver/tabletenv/logstats.go (100%) rename go/vt/{ => vttablet}/tabletserver/tabletenv/logstats_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/tabletenv/tabletenv.go (100%) rename go/vt/{ => vttablet}/tabletserver/tabletserver.go (98%) rename go/vt/{ => vttablet}/tabletserver/tabletserver_test.go (99%) rename go/vt/{ => vttablet}/tabletserver/testutils_test.go (95%) rename go/vt/{ => vttablet}/tabletserver/twopc.go (99%) rename go/vt/{ => vttablet}/tabletserver/twopc_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/twopcz.go (100%) rename go/vt/{ => vttablet}/tabletserver/tx_engine.go (98%) rename go/vt/{ => vttablet}/tabletserver/tx_engine_test.go (97%) rename go/vt/{ => vttablet}/tabletserver/tx_executor.go (98%) rename go/vt/{ => vttablet}/tabletserver/tx_executor_test.go (99%) rename go/vt/{ => vttablet}/tabletserver/tx_pool.go (98%) rename go/vt/{ => vttablet}/tabletserver/tx_pool_test.go (99%) rename go/vt/{ => vttablet}/tabletserver/tx_prep_pool.go (100%) rename go/vt/{ => vttablet}/tabletserver/tx_prep_pool_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/txlogz.go (97%) rename go/vt/{ => vttablet}/tabletserver/txlogz_test.go (96%) rename go/vt/{ => vttablet}/tabletserver/txthrottler/mock_healthcheck_test.go (97%) rename go/vt/{ => vttablet}/tabletserver/txthrottler/mock_server_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/txthrottler/mock_throttler_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/txthrottler/mock_topology_watcher_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/txthrottler/mock_toposerver_impl_test.go (100%) rename go/vt/{ => vttablet}/tabletserver/txthrottler/tx_throttler.go (99%) rename go/vt/{ => vttablet}/tabletserver/txthrottler/tx_throttler_test.go (98%) rename go/vt/{tabletserver => vttablet}/tabletservermock/controller.go (97%) rename go/vt/{tabletmanager => vttablet}/tmclient/rpc_client_api.go (100%) diff --git a/go/cmd/l2vtgate/plugin_grpcqueryservice.go b/go/cmd/l2vtgate/plugin_grpcqueryservice.go index 609c977adc2..d8bf82553b3 100644 --- a/go/cmd/l2vtgate/plugin_grpcqueryservice.go +++ b/go/cmd/l2vtgate/plugin_grpcqueryservice.go @@ -8,9 +8,9 @@ package main import ( "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" "github.com/youtube/vitess/go/vt/vtgate/l2vtgate" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" ) func init() { diff --git a/go/cmd/l2vtgate/plugin_grpctabletconn.go b/go/cmd/l2vtgate/plugin_grpctabletconn.go index 282f56094ad..e773721e758 100644 --- a/go/cmd/l2vtgate/plugin_grpctabletconn.go +++ b/go/cmd/l2vtgate/plugin_grpctabletconn.go @@ -7,5 +7,5 @@ package main // Imports and register the gRPC tabletconn client import ( - _ "github.com/youtube/vitess/go/vt/tabletserver/grpctabletconn" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctabletconn" ) diff --git a/go/cmd/vtcombo/main.go b/go/cmd/vtcombo/main.go index edad1c63cc9..572e8bb36b8 100644 --- a/go/cmd/vtcombo/main.go +++ b/go/cmd/vtcombo/main.go @@ -23,11 +23,11 @@ import ( "github.com/youtube/vitess/go/vt/discovery" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/vtctld" "github.com/youtube/vitess/go/vt/vtgate" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" vttestpb "github.com/youtube/vitess/go/vt/proto/vttest" diff --git a/go/cmd/vtcombo/tablet_map.go b/go/cmd/vtcombo/tablet_map.go index 940403638e1..76e8c1271a7 100644 --- a/go/cmd/vtcombo/tablet_map.go +++ b/go/cmd/vtcombo/tablet_map.go @@ -18,17 +18,17 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletmanager" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/tabletserver" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/topotools" "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vtgate/vindexes" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" + "github.com/youtube/vitess/go/vt/vttablet/tabletmanager" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/cmd/vtctl/plugin_grpctabletconn.go b/go/cmd/vtctl/plugin_grpctabletconn.go index 282f56094ad..e773721e758 100644 --- a/go/cmd/vtctl/plugin_grpctabletconn.go +++ b/go/cmd/vtctl/plugin_grpctabletconn.go @@ -7,5 +7,5 @@ package main // Imports and register the gRPC tabletconn client import ( - _ "github.com/youtube/vitess/go/vt/tabletserver/grpctabletconn" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctabletconn" ) diff --git a/go/cmd/vtctl/plugin_grpctmclient.go b/go/cmd/vtctl/plugin_grpctmclient.go index a41f5ccad3e..16db003c134 100644 --- a/go/cmd/vtctl/plugin_grpctmclient.go +++ b/go/cmd/vtctl/plugin_grpctmclient.go @@ -7,5 +7,5 @@ package main // Imports and register the gRPC tabletmanager client import ( - _ "github.com/youtube/vitess/go/vt/tabletmanager/grpctmclient" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctmclient" ) diff --git a/go/cmd/vtctl/vtctl.go b/go/cmd/vtctl/vtctl.go index b186ba3ffa2..21087254e68 100644 --- a/go/cmd/vtctl/vtctl.go +++ b/go/cmd/vtctl/vtctl.go @@ -18,9 +18,9 @@ import ( "github.com/youtube/vitess/go/exit" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vtctl" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" "golang.org/x/net/context" ) diff --git a/go/cmd/vtctld/plugin_grpctabletconn.go b/go/cmd/vtctld/plugin_grpctabletconn.go index 282f56094ad..e773721e758 100644 --- a/go/cmd/vtctld/plugin_grpctabletconn.go +++ b/go/cmd/vtctld/plugin_grpctabletconn.go @@ -7,5 +7,5 @@ package main // Imports and register the gRPC tabletconn client import ( - _ "github.com/youtube/vitess/go/vt/tabletserver/grpctabletconn" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctabletconn" ) diff --git a/go/cmd/vtctld/plugin_grpctmclient.go b/go/cmd/vtctld/plugin_grpctmclient.go index a41f5ccad3e..16db003c134 100644 --- a/go/cmd/vtctld/plugin_grpctmclient.go +++ b/go/cmd/vtctld/plugin_grpctmclient.go @@ -7,5 +7,5 @@ package main // Imports and register the gRPC tabletmanager client import ( - _ "github.com/youtube/vitess/go/vt/tabletmanager/grpctmclient" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctmclient" ) diff --git a/go/cmd/vtctld/schema.go b/go/cmd/vtctld/schema.go index 2d6ba85e37a..bae3baa211b 100644 --- a/go/cmd/vtctld/schema.go +++ b/go/cmd/vtctld/schema.go @@ -11,7 +11,7 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/schemamanager" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" ) diff --git a/go/cmd/vtgate/plugin_grpctabletconn.go b/go/cmd/vtgate/plugin_grpctabletconn.go index 282f56094ad..e773721e758 100644 --- a/go/cmd/vtgate/plugin_grpctabletconn.go +++ b/go/cmd/vtgate/plugin_grpctabletconn.go @@ -7,5 +7,5 @@ package main // Imports and register the gRPC tabletconn client import ( - _ "github.com/youtube/vitess/go/vt/tabletserver/grpctabletconn" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctabletconn" ) diff --git a/go/cmd/vtgateclienttest/goclienttest/echo.go b/go/cmd/vtgateclienttest/goclienttest/echo.go index 6251aa69bda..25c708b207d 100644 --- a/go/cmd/vtgateclienttest/goclienttest/echo.go +++ b/go/cmd/vtgateclienttest/goclienttest/echo.go @@ -11,8 +11,8 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/callerid" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/vtgate/vtgateconn" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/cmd/vtgateclienttest/services/echo.go b/go/cmd/vtgateclienttest/services/echo.go index dc431eba745..f799e3a7bf0 100644 --- a/go/cmd/vtgateclienttest/services/echo.go +++ b/go/cmd/vtgateclienttest/services/echo.go @@ -15,8 +15,8 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/callerid" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/vtgate/vtgateservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/cmd/vttablet/plugin_filecustomrule.go b/go/cmd/vttablet/plugin_filecustomrule.go index 84c68d85560..893198092f4 100644 --- a/go/cmd/vttablet/plugin_filecustomrule.go +++ b/go/cmd/vttablet/plugin_filecustomrule.go @@ -7,5 +7,5 @@ package main // Imports and register the file custom rule source import ( - _ "github.com/youtube/vitess/go/vt/tabletserver/customrule/filecustomrule" + _ "github.com/youtube/vitess/go/vt/vttablet/customrule/filecustomrule" ) diff --git a/go/cmd/vttablet/plugin_grpcqueryservice.go b/go/cmd/vttablet/plugin_grpcqueryservice.go index 4a6aa17088b..4077bdedff6 100644 --- a/go/cmd/vttablet/plugin_grpcqueryservice.go +++ b/go/cmd/vttablet/plugin_grpcqueryservice.go @@ -8,8 +8,8 @@ package main import ( "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletserver" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" ) func init() { diff --git a/go/cmd/vttablet/plugin_grpctabletconn.go b/go/cmd/vttablet/plugin_grpctabletconn.go index 282f56094ad..e773721e758 100644 --- a/go/cmd/vttablet/plugin_grpctabletconn.go +++ b/go/cmd/vttablet/plugin_grpctabletconn.go @@ -7,5 +7,5 @@ package main // Imports and register the gRPC tabletconn client import ( - _ "github.com/youtube/vitess/go/vt/tabletserver/grpctabletconn" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctabletconn" ) diff --git a/go/cmd/vttablet/plugin_grpctmclient.go b/go/cmd/vttablet/plugin_grpctmclient.go index a41f5ccad3e..16db003c134 100644 --- a/go/cmd/vttablet/plugin_grpctmclient.go +++ b/go/cmd/vttablet/plugin_grpctmclient.go @@ -7,5 +7,5 @@ package main // Imports and register the gRPC tabletmanager client import ( - _ "github.com/youtube/vitess/go/vt/tabletmanager/grpctmclient" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctmclient" ) diff --git a/go/cmd/vttablet/plugin_grpctmserver.go b/go/cmd/vttablet/plugin_grpctmserver.go index e965ac26b8a..07ad21dd3da 100644 --- a/go/cmd/vttablet/plugin_grpctmserver.go +++ b/go/cmd/vttablet/plugin_grpctmserver.go @@ -7,5 +7,5 @@ package main // Imports and register the gRPC tabletmanager server import ( - _ "github.com/youtube/vitess/go/vt/tabletmanager/grpctmserver" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctmserver" ) diff --git a/go/cmd/vttablet/plugin_sysloglogger.go b/go/cmd/vttablet/plugin_sysloglogger.go index caeea7706c1..0507bfbe9bb 100644 --- a/go/cmd/vttablet/plugin_sysloglogger.go +++ b/go/cmd/vttablet/plugin_sysloglogger.go @@ -3,5 +3,5 @@ package main // Imports and register the syslog-based query logger import ( - _ "github.com/youtube/vitess/go/vt/tabletserver/sysloglogger" + _ "github.com/youtube/vitess/go/vt/vttablet/sysloglogger" ) diff --git a/go/cmd/vttablet/plugin_zkcustomrule.go b/go/cmd/vttablet/plugin_zkcustomrule.go index f36a2092342..3573fa2bc4a 100644 --- a/go/cmd/vttablet/plugin_zkcustomrule.go +++ b/go/cmd/vttablet/plugin_zkcustomrule.go @@ -7,5 +7,5 @@ package main // Imports and register the zookeeper custom rule source import ( - _ "github.com/youtube/vitess/go/vt/tabletserver/customrule/zkcustomrule" + _ "github.com/youtube/vitess/go/vt/vttablet/customrule/zkcustomrule" ) diff --git a/go/cmd/vttablet/status.go b/go/cmd/vttablet/status.go index e35e900bb55..bb8a37f2ec8 100644 --- a/go/cmd/vttablet/status.go +++ b/go/cmd/vttablet/status.go @@ -6,9 +6,9 @@ import ( "github.com/youtube/vitess/go/vt/health" "github.com/youtube/vitess/go/vt/servenv" _ "github.com/youtube/vitess/go/vt/status" - "github.com/youtube/vitess/go/vt/tabletmanager" - "github.com/youtube/vitess/go/vt/tabletserver" "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/vttablet/tabletmanager" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" ) var ( diff --git a/go/cmd/vttablet/vttablet.go b/go/cmd/vttablet/vttablet.go index 9a2e79f1d1c..864f29f379f 100644 --- a/go/cmd/vttablet/vttablet.go +++ b/go/cmd/vttablet/vttablet.go @@ -15,11 +15,11 @@ import ( "github.com/youtube/vitess/go/vt/servenv" "github.com/youtube/vitess/go/vt/tableacl" "github.com/youtube/vitess/go/vt/tableacl/simpleacl" - "github.com/youtube/vitess/go/vt/tabletmanager" - "github.com/youtube/vitess/go/vt/tabletserver" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/tabletmanager" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "golang.org/x/net/context" ) diff --git a/go/cmd/vtworker/plugin_grpctabletconn.go b/go/cmd/vtworker/plugin_grpctabletconn.go index 282f56094ad..e773721e758 100644 --- a/go/cmd/vtworker/plugin_grpctabletconn.go +++ b/go/cmd/vtworker/plugin_grpctabletconn.go @@ -7,5 +7,5 @@ package main // Imports and register the gRPC tabletconn client import ( - _ "github.com/youtube/vitess/go/vt/tabletserver/grpctabletconn" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctabletconn" ) diff --git a/go/cmd/vtworker/plugin_grpctmclient.go b/go/cmd/vtworker/plugin_grpctmclient.go index a41f5ccad3e..16db003c134 100644 --- a/go/cmd/vtworker/plugin_grpctmclient.go +++ b/go/cmd/vtworker/plugin_grpctmclient.go @@ -7,5 +7,5 @@ package main // Imports and register the gRPC tabletmanager client import ( - _ "github.com/youtube/vitess/go/vt/tabletmanager/grpctmclient" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctmclient" ) diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index d499bb0ecf2..aee736268ad 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -18,7 +18,7 @@ import ( "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/binlog/binlog_streamer_rbr_test.go b/go/vt/binlog/binlog_streamer_rbr_test.go index 0ce6cce4db0..f960299a8f1 100644 --- a/go/vt/binlog/binlog_streamer_rbr_test.go +++ b/go/vt/binlog/binlog_streamer_rbr_test.go @@ -8,7 +8,7 @@ import ( "github.com/youtube/vitess/go/mysqlconn/replication" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/binlog/event_streamer.go b/go/vt/binlog/event_streamer.go index e8313855abf..aa864131541 100644 --- a/go/vt/binlog/event_streamer.go +++ b/go/vt/binlog/event_streamer.go @@ -17,7 +17,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/binlog/updatestreamctl.go b/go/vt/binlog/updatestreamctl.go index 77dc76a1dfc..492bcd55d20 100644 --- a/go/vt/binlog/updatestreamctl.go +++ b/go/vt/binlog/updatestreamctl.go @@ -16,7 +16,7 @@ import ( "github.com/youtube/vitess/go/sync2" "github.com/youtube/vitess/go/tb" "github.com/youtube/vitess/go/vt/mysqlctl" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/discovery/fake_healthcheck.go b/go/vt/discovery/fake_healthcheck.go index d1d82550d05..4c68c1dc6d7 100644 --- a/go/vt/discovery/fake_healthcheck.go +++ b/go/vt/discovery/fake_healthcheck.go @@ -3,9 +3,9 @@ package discovery import ( "sync" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/sandboxconn" "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/sandboxconn" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go index 0f51658a911..0255594cd74 100644 --- a/go/vt/discovery/healthcheck.go +++ b/go/vt/discovery/healthcheck.go @@ -33,9 +33,9 @@ import ( "github.com/youtube/vitess/go/stats" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" "golang.org/x/net/context" ) diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go index 2411e871138..93c45ea9724 100644 --- a/go/vt/discovery/healthcheck_test.go +++ b/go/vt/discovery/healthcheck_test.go @@ -11,10 +11,10 @@ import ( "time" "github.com/youtube/vitess/go/vt/status" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice/fakes" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice/fakes" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 1131473c7bb..c6fe1bc31e7 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -11,11 +11,11 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletmanager/faketmclient" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/test/faketopo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/faketmclient" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" "golang.org/x/net/context" @@ -24,7 +24,7 @@ import ( topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" // import the gRPC client implementation for tablet manager - _ "github.com/youtube/vitess/go/vt/tabletmanager/grpctmclient" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctmclient" ) var ( diff --git a/go/vt/schemamanager/schemaswap/schema_swap.go b/go/vt/schemamanager/schemaswap/schema_swap.go index db8c0847dff..5afc20d046f 100644 --- a/go/vt/schemamanager/schemaswap/schema_swap.go +++ b/go/vt/schemamanager/schemaswap/schema_swap.go @@ -21,9 +21,9 @@ import ( "github.com/youtube/vitess/go/vt/logutil" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vtctl" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/workflow" "github.com/youtube/vitess/go/vt/wrangler" ) diff --git a/go/vt/throttler/demo/throttler_demo.go b/go/vt/throttler/demo/throttler_demo.go index 2f9adbb862c..efdb75f127c 100644 --- a/go/vt/throttler/demo/throttler_demo.go +++ b/go/vt/throttler/demo/throttler_demo.go @@ -15,11 +15,11 @@ import ( "github.com/youtube/vitess/go/vt/discovery" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice/fakes" "github.com/youtube/vitess/go/vt/throttler" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice/fakes" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" "github.com/youtube/vitess/go/vt/wrangler/testlib" diff --git a/go/vt/vtctl/grpcvtctlserver/server.go b/go/vt/vtctl/grpcvtctlserver/server.go index 5f5f89ce524..640499d157a 100644 --- a/go/vt/vtctl/grpcvtctlserver/server.go +++ b/go/vt/vtctl/grpcvtctlserver/server.go @@ -13,9 +13,9 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vtctl" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" logutilpb "github.com/youtube/vitess/go/vt/proto/logutil" diff --git a/go/vt/vtctl/query.go b/go/vt/vtctl/query.go index 528afcb5744..3858028c6f6 100644 --- a/go/vt/vtctl/query.go +++ b/go/vt/vtctl/query.go @@ -21,9 +21,9 @@ import ( "github.com/youtube/vitess/go/vt/callerid" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vtgate/vtgateconn" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" "github.com/youtube/vitess/go/vt/wrangler" "golang.org/x/net/context" diff --git a/go/vt/vtctl/vtctlclienttest/client.go b/go/vt/vtctl/vtctlclienttest/client.go index abf60960bc0..c08a913801e 100644 --- a/go/vt/vtctl/vtctlclienttest/client.go +++ b/go/vt/vtctl/vtctlclienttest/client.go @@ -21,13 +21,13 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/vtctl/vtctlclient" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" // import the gRPC client implementation for tablet manager - _ "github.com/youtube/vitess/go/vt/tabletmanager/grpctmclient" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctmclient" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) diff --git a/go/vt/vtctld/action_repository.go b/go/vt/vtctld/action_repository.go index 4908f9fa765..bc059fcd128 100644 --- a/go/vt/vtctld/action_repository.go +++ b/go/vt/vtctld/action_repository.go @@ -9,9 +9,9 @@ import ( "github.com/youtube/vitess/go/acl" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/vtctld/api.go b/go/vt/vtctld/api.go index dd10e16499e..5edc9e471da 100644 --- a/go/vt/vtctld/api.go +++ b/go/vt/vtctld/api.go @@ -20,10 +20,10 @@ import ( "github.com/youtube/vitess/go/acl" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/schemamanager" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vtctl" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/workflow" "github.com/youtube/vitess/go/vt/wrangler" diff --git a/go/vt/vtctld/realtime_status_test.go b/go/vt/vtctld/realtime_status_test.go index 03b2338dca3..152f747fb6a 100644 --- a/go/vt/vtctld/realtime_status_test.go +++ b/go/vt/vtctld/realtime_status_test.go @@ -11,10 +11,10 @@ import ( "github.com/golang/protobuf/proto" "github.com/youtube/vitess/go/vt/discovery" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice/fakes" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice/fakes" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" "github.com/youtube/vitess/go/vt/wrangler/testlib" diff --git a/go/vt/vtctld/tablet_data.go b/go/vt/vtctld/tablet_data.go index c79f5934c6a..c0f756821ac 100644 --- a/go/vt/vtctld/tablet_data.go +++ b/go/vt/vtctld/tablet_data.go @@ -8,8 +8,8 @@ import ( log "github.com/golang/glog" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/vtctld/tablet_data_test.go b/go/vt/vtctld/tablet_data_test.go index c8edc9abcea..701f6c074f8 100644 --- a/go/vt/vtctld/tablet_data_test.go +++ b/go/vt/vtctld/tablet_data_test.go @@ -10,11 +10,11 @@ import ( "github.com/golang/protobuf/proto" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice/fakes" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice/fakes" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" "github.com/youtube/vitess/go/vt/wrangler/testlib" diff --git a/go/vt/vtgate/engine/primitive.go b/go/vt/vtgate/engine/primitive.go index e0532205582..45ff8b4171e 100644 --- a/go/vt/vtgate/engine/primitive.go +++ b/go/vt/vtgate/engine/primitive.go @@ -7,8 +7,8 @@ package engine import ( "github.com/youtube/vitess/go/sqltypes" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/vtgate/queryinfo" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" ) // SeqVarName is a reserved bind var name for sequence values. diff --git a/go/vt/vtgate/engine/route.go b/go/vt/vtgate/engine/route.go index 18ac0e89ae9..d67cc464289 100644 --- a/go/vt/vtgate/engine/route.go +++ b/go/vt/vtgate/engine/route.go @@ -14,9 +14,9 @@ import ( "github.com/youtube/vitess/go/sqltypes" querypb "github.com/youtube/vitess/go/vt/proto/query" "github.com/youtube/vitess/go/vt/sqlannotation" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/vtgate/queryinfo" "github.com/youtube/vitess/go/vt/vtgate/vindexes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" ) // Route represents the instructions to route a query to diff --git a/go/vt/vtgate/gateway/discoverygateway.go b/go/vt/vtgate/gateway/discoverygateway.go index 40000db5a05..a427be7c173 100644 --- a/go/vt/vtgate/gateway/discoverygateway.go +++ b/go/vt/vtgate/gateway/discoverygateway.go @@ -18,11 +18,11 @@ import ( "github.com/youtube/vitess/go/flagutil" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vtgate/buffer" "github.com/youtube/vitess/go/vt/vtgate/masterbuffer" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/vtgate/gateway/discoverygateway_test.go b/go/vt/vtgate/gateway/discoverygateway_test.go index 9df2ae102f1..5e516ad72f0 100644 --- a/go/vt/vtgate/gateway/discoverygateway_test.go +++ b/go/vt/vtgate/gateway/discoverygateway_test.go @@ -8,9 +8,9 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vterrors" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/vtgate/gateway/gateway.go b/go/vt/vtgate/gateway/gateway.go index 41bbdd6fc2c..56170ecd766 100644 --- a/go/vt/vtgate/gateway/gateway.go +++ b/go/vt/vtgate/gateway/gateway.go @@ -14,8 +14,8 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) diff --git a/go/vt/vtgate/gateway/l2vtgategateway.go b/go/vt/vtgate/gateway/l2vtgategateway.go index 1a69200a106..2e381b22a94 100644 --- a/go/vt/vtgate/gateway/l2vtgategateway.go +++ b/go/vt/vtgate/gateway/l2vtgategateway.go @@ -18,10 +18,10 @@ import ( "github.com/youtube/vitess/go/flagutil" "github.com/youtube/vitess/go/vt/discovery" "github.com/youtube/vitess/go/vt/key" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/vtgate/gatewaytest/grpc_discovery_test.go b/go/vt/vtgate/gatewaytest/grpc_discovery_test.go index 2aa4084b43c..be7e64994e8 100644 --- a/go/vt/vtgate/gatewaytest/grpc_discovery_test.go +++ b/go/vt/vtgate/gatewaytest/grpc_discovery_test.go @@ -12,13 +12,13 @@ import ( "google.golang.org/grpc" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconntest" "github.com/youtube/vitess/go/vt/vtgate/gateway" "github.com/youtube/vitess/go/vt/vtgate/l2vtgate" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletconntest" // We will use gRPC to connect, register the dialer - _ "github.com/youtube/vitess/go/vt/tabletserver/grpctabletconn" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctabletconn" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) diff --git a/go/vt/vtgate/gatewaytest/suite.go b/go/vt/vtgate/gatewaytest/suite.go index 4f496d3725a..2bbf8707a7f 100644 --- a/go/vt/vtgate/gatewaytest/suite.go +++ b/go/vt/vtgate/gatewaytest/suite.go @@ -14,12 +14,12 @@ import ( "golang.org/x/net/context" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconntest" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/vtgate/gateway" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" + "github.com/youtube/vitess/go/vt/vttablet/tabletconntest" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/vtgate/grpcvtgateconn/conn.go b/go/vt/vtgate/grpcvtgateconn/conn.go index 80c303b0846..92f4f83572f 100644 --- a/go/vt/vtgate/grpcvtgateconn/conn.go +++ b/go/vt/vtgate/grpcvtgateconn/conn.go @@ -15,9 +15,9 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/callerid" "github.com/youtube/vitess/go/vt/servenv/grpcutils" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vtgate/vtgateconn" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/vtgate/grpcvtgateservice/server.go b/go/vt/vtgate/grpcvtgateservice/server.go index 91728004067..aefd83127de 100644 --- a/go/vt/vtgate/grpcvtgateservice/server.go +++ b/go/vt/vtgate/grpcvtgateservice/server.go @@ -16,10 +16,10 @@ import ( "github.com/youtube/vitess/go/vt/callerid" "github.com/youtube/vitess/go/vt/callinfo" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vtgate" "github.com/youtube/vitess/go/vt/vtgate/vtgateservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/vtgate/l2vtgate/l2vtgate.go b/go/vt/vtgate/l2vtgate/l2vtgate.go index 4e8859c3c4e..cbfc6b6fdf3 100644 --- a/go/vt/vtgate/l2vtgate/l2vtgate.go +++ b/go/vt/vtgate/l2vtgate/l2vtgate.go @@ -15,11 +15,11 @@ import ( "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/vt/discovery" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vtgate/gateway" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/vtgate/query_executor.go b/go/vt/vtgate/query_executor.go index 2c68454d1f6..2751334b047 100644 --- a/go/vt/vtgate/query_executor.go +++ b/go/vt/vtgate/query_executor.go @@ -12,7 +12,7 @@ import ( querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" vtgatepb "github.com/youtube/vitess/go/vt/proto/vtgate" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" ) type queryExecutor struct { diff --git a/go/vt/vtgate/router_dml_test.go b/go/vt/vtgate/router_dml_test.go index 8d3fe078d9e..52e1bbc101b 100644 --- a/go/vt/vtgate/router_dml_test.go +++ b/go/vt/vtgate/router_dml_test.go @@ -10,9 +10,9 @@ import ( "testing" "github.com/youtube/vitess/go/sqltypes" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" - "github.com/youtube/vitess/go/vt/tabletserver/sandboxconn" _ "github.com/youtube/vitess/go/vt/vtgate/vindexes" + "github.com/youtube/vitess/go/vt/vttablet/sandboxconn" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" diff --git a/go/vt/vtgate/router_framework_test.go b/go/vt/vtgate/router_framework_test.go index dc6850cfa28..85eeb035de1 100644 --- a/go/vt/vtgate/router_framework_test.go +++ b/go/vt/vtgate/router_framework_test.go @@ -7,7 +7,7 @@ package vtgate import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/tabletserver/sandboxconn" + "github.com/youtube/vitess/go/vt/vttablet/sandboxconn" "golang.org/x/net/context" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/vtgate/router_select_test.go b/go/vt/vtgate/router_select_test.go index bf4e1a89ef1..29e94141382 100644 --- a/go/vt/vtgate/router_select_test.go +++ b/go/vt/vtgate/router_select_test.go @@ -14,9 +14,9 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" - "github.com/youtube/vitess/go/vt/tabletserver/sandboxconn" _ "github.com/youtube/vitess/go/vt/vtgate/vindexes" + "github.com/youtube/vitess/go/vt/vttablet/sandboxconn" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/vtgate/sandbox_test.go b/go/vt/vtgate/sandbox_test.go index 1c3324873bc..42cbae89c61 100644 --- a/go/vt/vtgate/sandbox_test.go +++ b/go/vt/vtgate/sandbox_test.go @@ -12,11 +12,11 @@ import ( "time" "github.com/youtube/vitess/go/vt/key" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/sandboxconn" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vterrors" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/sandboxconn" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" "golang.org/x/net/context" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index ed1c8659156..44e19318c63 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -14,10 +14,10 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/vt/concurrency" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vtgate/gateway" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go index f83480190b7..4b68251cb69 100644 --- a/go/vt/vtgate/scatter_conn_test.go +++ b/go/vt/vtgate/scatter_conn_test.go @@ -21,7 +21,7 @@ import ( topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" vtgatepb "github.com/youtube/vitess/go/vt/proto/vtgate" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" ) // This file uses the sandbox_test framework. diff --git a/go/vt/vtgate/topo_utils.go b/go/vt/vtgate/topo_utils.go index 29ef91b0668..31f86a37e57 100644 --- a/go/vt/vtgate/topo_utils.go +++ b/go/vt/vtgate/topo_utils.go @@ -9,10 +9,10 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/key" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vterrors" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" "golang.org/x/net/context" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/vtgate/topo_utils_test.go b/go/vt/vtgate/topo_utils_test.go index 773a0e393a9..32f8dc9cc79 100644 --- a/go/vt/vtgate/topo_utils_test.go +++ b/go/vt/vtgate/topo_utils_test.go @@ -12,7 +12,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/key" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/vtgate/tx_conn_test.go b/go/vt/vtgate/tx_conn_test.go index 4566cd95c5c..d5abb230862 100644 --- a/go/vt/vtgate/tx_conn_test.go +++ b/go/vt/vtgate/tx_conn_test.go @@ -11,8 +11,8 @@ import ( "testing" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/tabletserver/sandboxconn" "github.com/youtube/vitess/go/vt/vterrors" + "github.com/youtube/vitess/go/vt/vttablet/sandboxconn" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/vtgate/vindexes/lookup_hash_test.go b/go/vt/vtgate/vindexes/lookup_hash_test.go index 8832607268c..dd4d02c479b 100644 --- a/go/vt/vtgate/vindexes/lookup_hash_test.go +++ b/go/vt/vtgate/vindexes/lookup_hash_test.go @@ -13,7 +13,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" querypb "github.com/youtube/vitess/go/vt/proto/query" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" ) type vcursor struct { diff --git a/go/vt/vtgate/vindexes/lookup_hash_unique_test.go b/go/vt/vtgate/vindexes/lookup_hash_unique_test.go index 2386c1e81d2..1c2c8522d21 100644 --- a/go/vt/vtgate/vindexes/lookup_hash_unique_test.go +++ b/go/vt/vtgate/vindexes/lookup_hash_unique_test.go @@ -8,7 +8,7 @@ import ( "reflect" "testing" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" ) var lhu Vindex diff --git a/go/vt/vtgate/vindexes/lookup_test.go b/go/vt/vtgate/vindexes/lookup_test.go index 5054a48a53b..6deb025a833 100644 --- a/go/vt/vtgate/vindexes/lookup_test.go +++ b/go/vt/vtgate/vindexes/lookup_test.go @@ -6,7 +6,7 @@ import ( "strings" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" ) var lookupUnique Vindex diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index e128781f212..c682ba645c9 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -25,10 +25,10 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/servenv" "github.com/youtube/vitess/go/vt/sqlannotation" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vterrors" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/vtgate/gateway" "github.com/youtube/vitess/go/vt/vtgate/vtgateservice" diff --git a/go/vt/vtgate/vtgate_test.go b/go/vt/vtgate/vtgate_test.go index f477fa91be6..0d92e0fbb10 100644 --- a/go/vt/vtgate/vtgate_test.go +++ b/go/vt/vtgate/vtgate_test.go @@ -16,10 +16,10 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/discovery" "github.com/youtube/vitess/go/vt/key" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" - "github.com/youtube/vitess/go/vt/tabletserver/sandboxconn" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vterrors" + "github.com/youtube/vitess/go/vt/vttablet/sandboxconn" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go b/go/vt/vttablet/agentrpctest/test_agent_rpc.go similarity index 99% rename from go/vt/tabletmanager/agentrpctest/test_agent_rpc.go rename to go/vt/vttablet/agentrpctest/test_agent_rpc.go index 8bd9696d880..af8b8a0add2 100644 --- a/go/vt/tabletmanager/agentrpctest/test_agent_rpc.go +++ b/go/vt/vttablet/agentrpctest/test_agent_rpc.go @@ -19,8 +19,8 @@ import ( "github.com/youtube/vitess/go/vt/hook" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletmanager" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" + "github.com/youtube/vitess/go/vt/vttablet/tabletmanager" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" querypb "github.com/youtube/vitess/go/vt/proto/query" replicationdatapb "github.com/youtube/vitess/go/vt/proto/replicationdata" diff --git a/go/vt/tabletserver/customrule/filecustomrule/filecustomrule.go b/go/vt/vttablet/customrule/filecustomrule/filecustomrule.go similarity index 95% rename from go/vt/tabletserver/customrule/filecustomrule/filecustomrule.go rename to go/vt/vttablet/customrule/filecustomrule/filecustomrule.go index 4606aa369cb..33c8f746a62 100644 --- a/go/vt/tabletserver/customrule/filecustomrule/filecustomrule.go +++ b/go/vt/vttablet/customrule/filecustomrule/filecustomrule.go @@ -11,8 +11,8 @@ import ( "time" log "github.com/golang/glog" - "github.com/youtube/vitess/go/vt/tabletserver" - "github.com/youtube/vitess/go/vt/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" ) var ( diff --git a/go/vt/tabletserver/customrule/filecustomrule/filecustomrule_test.go b/go/vt/vttablet/customrule/filecustomrule/filecustomrule_test.go similarity index 91% rename from go/vt/tabletserver/customrule/filecustomrule/filecustomrule_test.go rename to go/vt/vttablet/customrule/filecustomrule/filecustomrule_test.go index 952ed42bc4b..ab173c226c6 100644 --- a/go/vt/tabletserver/customrule/filecustomrule/filecustomrule_test.go +++ b/go/vt/vttablet/customrule/filecustomrule/filecustomrule_test.go @@ -10,8 +10,8 @@ import ( "path" "testing" - "github.com/youtube/vitess/go/vt/tabletserver/rules" - "github.com/youtube/vitess/go/vt/tabletserver/tabletservermock" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/tabletservermock" ) var customRule1 = `[ diff --git a/go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule.go b/go/vt/vttablet/customrule/zkcustomrule/zkcustomrule.go similarity index 97% rename from go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule.go rename to go/vt/vttablet/customrule/zkcustomrule/zkcustomrule.go index 15974f1ecbe..db3188bd641 100644 --- a/go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule.go +++ b/go/vt/vttablet/customrule/zkcustomrule/zkcustomrule.go @@ -15,8 +15,8 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletserver" - "github.com/youtube/vitess/go/vt/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" "github.com/youtube/vitess/go/vt/topo/zk2topo" ) diff --git a/go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule_test.go b/go/vt/vttablet/customrule/zkcustomrule/zkcustomrule_test.go similarity index 96% rename from go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule_test.go rename to go/vt/vttablet/customrule/zkcustomrule/zkcustomrule_test.go index 9570a997630..ea4aeec5846 100644 --- a/go/vt/tabletserver/customrule/zkcustomrule/zkcustomrule_test.go +++ b/go/vt/vttablet/customrule/zkcustomrule/zkcustomrule_test.go @@ -13,8 +13,8 @@ import ( "github.com/samuel/go-zookeeper/zk" "github.com/youtube/vitess/go/testfiles" - "github.com/youtube/vitess/go/vt/tabletserver/rules" - "github.com/youtube/vitess/go/vt/tabletserver/tabletservermock" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/tabletservermock" "github.com/youtube/vitess/go/vt/topo/zk2topo" "github.com/youtube/vitess/go/zk/zkctl" ) diff --git a/go/vt/tabletserver/endtoend/acl_test.go b/go/vt/vttablet/endtoend/acl_test.go similarity index 97% rename from go/vt/tabletserver/endtoend/acl_test.go rename to go/vt/vttablet/endtoend/acl_test.go index 00def40d176..97f8a3eb02a 100644 --- a/go/vt/tabletserver/endtoend/acl_test.go +++ b/go/vt/vttablet/endtoend/acl_test.go @@ -10,8 +10,8 @@ import ( "strings" "testing" - "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" - "github.com/youtube/vitess/go/vt/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/endtoend/framework" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" ) func TestTableACLNoAccess(t *testing.T) { diff --git a/go/vt/tabletserver/endtoend/batch_test.go b/go/vt/vttablet/endtoend/batch_test.go similarity index 97% rename from go/vt/tabletserver/endtoend/batch_test.go rename to go/vt/vttablet/endtoend/batch_test.go index c39def952e5..2eeea8308a2 100644 --- a/go/vt/tabletserver/endtoend/batch_test.go +++ b/go/vt/vttablet/endtoend/batch_test.go @@ -10,8 +10,8 @@ import ( "github.com/youtube/vitess/go/sqltypes" querypb "github.com/youtube/vitess/go/vt/proto/query" - "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/endtoend/framework" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" ) func TestBatchRead(t *testing.T) { diff --git a/go/vt/tabletserver/endtoend/compatibility_test.go b/go/vt/vttablet/endtoend/compatibility_test.go similarity index 99% rename from go/vt/tabletserver/endtoend/compatibility_test.go rename to go/vt/vttablet/endtoend/compatibility_test.go index 581416a37d4..5fc6605a415 100644 --- a/go/vt/tabletserver/endtoend/compatibility_test.go +++ b/go/vt/vttablet/endtoend/compatibility_test.go @@ -12,7 +12,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" querypb "github.com/youtube/vitess/go/vt/proto/query" - "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" + "github.com/youtube/vitess/go/vt/vttablet/endtoend/framework" ) var point12 = "\x00\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@" diff --git a/go/vt/tabletserver/endtoend/config_test.go b/go/vt/vttablet/endtoend/config_test.go similarity index 98% rename from go/vt/tabletserver/endtoend/config_test.go rename to go/vt/vttablet/endtoend/config_test.go index a2981cf888e..3b312a75087 100644 --- a/go/vt/tabletserver/endtoend/config_test.go +++ b/go/vt/vttablet/endtoend/config_test.go @@ -12,8 +12,8 @@ import ( "time" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" - "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/endtoend/framework" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" ) diff --git a/go/vt/tabletserver/endtoend/endtoend.go b/go/vt/vttablet/endtoend/endtoend.go similarity index 100% rename from go/vt/tabletserver/endtoend/endtoend.go rename to go/vt/vttablet/endtoend/endtoend.go diff --git a/go/vt/tabletserver/endtoend/framework/client.go b/go/vt/vttablet/endtoend/framework/client.go similarity index 98% rename from go/vt/tabletserver/endtoend/framework/client.go rename to go/vt/vttablet/endtoend/framework/client.go index c69e841db18..b08b8dc34a4 100644 --- a/go/vt/tabletserver/endtoend/framework/client.go +++ b/go/vt/vttablet/endtoend/framework/client.go @@ -9,8 +9,8 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/callerid" - "github.com/youtube/vitess/go/vt/tabletserver" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/endtoend/framework/debugschema.go b/go/vt/vttablet/endtoend/framework/debugschema.go similarity index 100% rename from go/vt/tabletserver/endtoend/framework/debugschema.go rename to go/vt/vttablet/endtoend/framework/debugschema.go diff --git a/go/vt/tabletserver/endtoend/framework/debugvars.go b/go/vt/vttablet/endtoend/framework/debugvars.go similarity index 100% rename from go/vt/tabletserver/endtoend/framework/debugvars.go rename to go/vt/vttablet/endtoend/framework/debugvars.go diff --git a/go/vt/tabletserver/endtoend/framework/eventcatcher.go b/go/vt/vttablet/endtoend/framework/eventcatcher.go similarity index 95% rename from go/vt/tabletserver/endtoend/framework/eventcatcher.go rename to go/vt/vttablet/endtoend/framework/eventcatcher.go index 51506351601..8377d8b7d0a 100644 --- a/go/vt/tabletserver/endtoend/framework/eventcatcher.go +++ b/go/vt/vttablet/endtoend/framework/eventcatcher.go @@ -9,8 +9,8 @@ import ( "time" "github.com/youtube/vitess/go/streamlog" - "github.com/youtube/vitess/go/vt/tabletserver" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" ) // TxCatcher allows you to capture and fetch transactions that are being diff --git a/go/vt/tabletserver/endtoend/framework/querystats.go b/go/vt/vttablet/endtoend/framework/querystats.go similarity index 100% rename from go/vt/tabletserver/endtoend/framework/querystats.go rename to go/vt/vttablet/endtoend/framework/querystats.go diff --git a/go/vt/tabletserver/endtoend/framework/server.go b/go/vt/vttablet/endtoend/framework/server.go similarity index 95% rename from go/vt/tabletserver/endtoend/framework/server.go rename to go/vt/vttablet/endtoend/framework/server.go index a672597debf..0a172796dcf 100644 --- a/go/vt/tabletserver/endtoend/framework/server.go +++ b/go/vt/vttablet/endtoend/framework/server.go @@ -17,8 +17,8 @@ import ( "github.com/youtube/vitess/go/vt/mysqlctl" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - "github.com/youtube/vitess/go/vt/tabletserver" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vtgate/fakerpcvtgateconn" "github.com/youtube/vitess/go/vt/vtgate/vtgateconn" ) diff --git a/go/vt/tabletserver/endtoend/framework/streamqueryz.go b/go/vt/vttablet/endtoend/framework/streamqueryz.go similarity index 100% rename from go/vt/tabletserver/endtoend/framework/streamqueryz.go rename to go/vt/vttablet/endtoend/framework/streamqueryz.go diff --git a/go/vt/tabletserver/endtoend/framework/testcase.go b/go/vt/vttablet/endtoend/framework/testcase.go similarity index 100% rename from go/vt/tabletserver/endtoend/framework/testcase.go rename to go/vt/vttablet/endtoend/framework/testcase.go diff --git a/go/vt/tabletserver/endtoend/main_test.go b/go/vt/vttablet/endtoend/main_test.go similarity index 98% rename from go/vt/tabletserver/endtoend/main_test.go rename to go/vt/vttablet/endtoend/main_test.go index 58ac73a4421..67c1b06a026 100644 --- a/go/vt/tabletserver/endtoend/main_test.go +++ b/go/vt/vttablet/endtoend/main_test.go @@ -15,8 +15,8 @@ import ( "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/vt/tableacl" "github.com/youtube/vitess/go/vt/tableacl/simpleacl" - "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/endtoend/framework" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vttest" ) diff --git a/go/vt/tabletserver/endtoend/message_test.go b/go/vt/vttablet/endtoend/message_test.go similarity index 98% rename from go/vt/tabletserver/endtoend/message_test.go rename to go/vt/vttablet/endtoend/message_test.go index dc47f24807f..1b18caa0671 100644 --- a/go/vt/tabletserver/endtoend/message_test.go +++ b/go/vt/vttablet/endtoend/message_test.go @@ -13,7 +13,7 @@ import ( "time" "github.com/youtube/vitess/go/sqltypes" - "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" + "github.com/youtube/vitess/go/vt/vttablet/endtoend/framework" querypb "github.com/youtube/vitess/go/vt/proto/query" ) diff --git a/go/vt/tabletserver/endtoend/metadata_test.go b/go/vt/vttablet/endtoend/metadata_test.go similarity index 98% rename from go/vt/tabletserver/endtoend/metadata_test.go rename to go/vt/vttablet/endtoend/metadata_test.go index 3fef81e5189..e74b3fcf2e1 100644 --- a/go/vt/tabletserver/endtoend/metadata_test.go +++ b/go/vt/vttablet/endtoend/metadata_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/youtube/vitess/go/sqltypes" - "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" + "github.com/youtube/vitess/go/vt/vttablet/endtoend/framework" querypb "github.com/youtube/vitess/go/vt/proto/query" ) diff --git a/go/vt/tabletserver/endtoend/misc_test.go b/go/vt/vttablet/endtoend/misc_test.go similarity index 99% rename from go/vt/tabletserver/endtoend/misc_test.go rename to go/vt/vttablet/endtoend/misc_test.go index e9a8d515996..a0b9ea550c7 100644 --- a/go/vt/tabletserver/endtoend/misc_test.go +++ b/go/vt/vttablet/endtoend/misc_test.go @@ -19,7 +19,7 @@ import ( "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" - "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" + "github.com/youtube/vitess/go/vt/vttablet/endtoend/framework" querypb "github.com/youtube/vitess/go/vt/proto/query" ) diff --git a/go/vt/tabletserver/endtoend/queries_test.go b/go/vt/vttablet/endtoend/queries_test.go similarity index 99% rename from go/vt/tabletserver/endtoend/queries_test.go rename to go/vt/vttablet/endtoend/queries_test.go index 9ea819cd9d4..3f5746f4d23 100644 --- a/go/vt/tabletserver/endtoend/queries_test.go +++ b/go/vt/vttablet/endtoend/queries_test.go @@ -7,7 +7,7 @@ package endtoend import ( "testing" - "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" + "github.com/youtube/vitess/go/vt/vttablet/endtoend/framework" ) var frameworkErrors = `fail failed: diff --git a/go/vt/tabletserver/endtoend/sequence_test.go b/go/vt/vttablet/endtoend/sequence_test.go similarity index 95% rename from go/vt/tabletserver/endtoend/sequence_test.go rename to go/vt/vttablet/endtoend/sequence_test.go index 8b2a37b5c3e..4ee6e004c66 100644 --- a/go/vt/tabletserver/endtoend/sequence_test.go +++ b/go/vt/vttablet/endtoend/sequence_test.go @@ -11,7 +11,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" querypb "github.com/youtube/vitess/go/vt/proto/query" - "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" + "github.com/youtube/vitess/go/vt/vttablet/endtoend/framework" ) func TestSequence(t *testing.T) { diff --git a/go/vt/tabletserver/endtoend/stream_test.go b/go/vt/vttablet/endtoend/stream_test.go similarity index 98% rename from go/vt/tabletserver/endtoend/stream_test.go rename to go/vt/vttablet/endtoend/stream_test.go index 18d6a171e3f..ee1f8469a14 100644 --- a/go/vt/tabletserver/endtoend/stream_test.go +++ b/go/vt/vttablet/endtoend/stream_test.go @@ -14,7 +14,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" - "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" + "github.com/youtube/vitess/go/vt/vttablet/endtoend/framework" "github.com/youtube/vitess/go/vt/vterrors" ) diff --git a/go/vt/tabletserver/endtoend/transaction_test.go b/go/vt/vttablet/endtoend/transaction_test.go similarity index 99% rename from go/vt/tabletserver/endtoend/transaction_test.go rename to go/vt/vttablet/endtoend/transaction_test.go index 7ba9919a89c..84791a7bbfb 100644 --- a/go/vt/tabletserver/endtoend/transaction_test.go +++ b/go/vt/vttablet/endtoend/transaction_test.go @@ -12,9 +12,9 @@ import ( "time" "github.com/youtube/vitess/go/sqldb" - "github.com/youtube/vitess/go/vt/tabletserver" - "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" + "github.com/youtube/vitess/go/vt/vttablet/endtoend/framework" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletmanager/faketmclient/fake_client.go b/go/vt/vttablet/faketmclient/fake_client.go similarity index 99% rename from go/vt/tabletmanager/faketmclient/fake_client.go rename to go/vt/vttablet/faketmclient/fake_client.go index 71aabc19213..3932472d3c7 100644 --- a/go/vt/tabletmanager/faketmclient/fake_client.go +++ b/go/vt/vttablet/faketmclient/fake_client.go @@ -18,7 +18,7 @@ import ( "github.com/youtube/vitess/go/vt/hook" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" logutilpb "github.com/youtube/vitess/go/vt/proto/logutil" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/grpcqueryservice/server.go b/go/vt/vttablet/grpcqueryservice/server.go similarity index 99% rename from go/vt/tabletserver/grpcqueryservice/server.go rename to go/vt/vttablet/grpcqueryservice/server.go index 92c6a4ab113..131b015363d 100644 --- a/go/vt/tabletserver/grpcqueryservice/server.go +++ b/go/vt/vttablet/grpcqueryservice/server.go @@ -10,8 +10,8 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/callerid" "github.com/youtube/vitess/go/vt/callinfo" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/vterrors" "golang.org/x/net/context" diff --git a/go/vt/tabletserver/grpctabletconn/conn.go b/go/vt/vttablet/grpctabletconn/conn.go similarity index 99% rename from go/vt/tabletserver/grpctabletconn/conn.go rename to go/vt/vttablet/grpctabletconn/conn.go index 44ca989b4de..1e3534cd469 100644 --- a/go/vt/tabletserver/grpctabletconn/conn.go +++ b/go/vt/vttablet/grpctabletconn/conn.go @@ -14,9 +14,9 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/callerid" "github.com/youtube/vitess/go/vt/servenv/grpcutils" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" "golang.org/x/net/context" "google.golang.org/grpc" diff --git a/go/vt/tabletserver/grpctabletconn/conn_test.go b/go/vt/vttablet/grpctabletconn/conn_test.go similarity index 89% rename from go/vt/tabletserver/grpctabletconn/conn_test.go rename to go/vt/vttablet/grpctabletconn/conn_test.go index cb65cda801d..79e029704c9 100644 --- a/go/vt/tabletserver/grpctabletconn/conn_test.go +++ b/go/vt/vttablet/grpctabletconn/conn_test.go @@ -10,8 +10,8 @@ import ( "google.golang.org/grpc" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconntest" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletconntest" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) diff --git a/go/vt/tabletmanager/grpctmclient/client.go b/go/vt/vttablet/grpctmclient/client.go similarity index 99% rename from go/vt/tabletmanager/grpctmclient/client.go rename to go/vt/vttablet/grpctmclient/client.go index e3cc802d53a..7c94965f4d0 100644 --- a/go/vt/tabletmanager/grpctmclient/client.go +++ b/go/vt/vttablet/grpctmclient/client.go @@ -17,7 +17,7 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" "github.com/youtube/vitess/go/vt/servenv/grpcutils" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/topo/topoproto" "golang.org/x/net/context" diff --git a/go/vt/tabletmanager/grpctmserver/server.go b/go/vt/vttablet/grpctmserver/server.go similarity index 99% rename from go/vt/tabletmanager/grpctmserver/server.go rename to go/vt/vttablet/grpctmserver/server.go index b530c8c888c..6ffcc480dc3 100644 --- a/go/vt/tabletmanager/grpctmserver/server.go +++ b/go/vt/vttablet/grpctmserver/server.go @@ -15,7 +15,7 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletmanager" + "github.com/youtube/vitess/go/vt/vttablet/tabletmanager" "github.com/youtube/vitess/go/vt/vterrors" logutilpb "github.com/youtube/vitess/go/vt/proto/logutil" diff --git a/go/vt/tabletmanager/grpctmserver/server_test.go b/go/vt/vttablet/grpctmserver/server_test.go similarity index 91% rename from go/vt/tabletmanager/grpctmserver/server_test.go rename to go/vt/vttablet/grpctmserver/server_test.go index 6d2379d9804..bf3a2a6c444 100644 --- a/go/vt/tabletmanager/grpctmserver/server_test.go +++ b/go/vt/vttablet/grpctmserver/server_test.go @@ -8,8 +8,8 @@ import ( "net" "testing" - "github.com/youtube/vitess/go/vt/tabletmanager/agentrpctest" - "github.com/youtube/vitess/go/vt/tabletmanager/grpctmclient" + "github.com/youtube/vitess/go/vt/vttablet/agentrpctest" + "github.com/youtube/vitess/go/vt/vttablet/grpctmclient" "google.golang.org/grpc" tabletmanagerservicepb "github.com/youtube/vitess/go/vt/proto/tabletmanagerservice" diff --git a/go/vt/tabletserver/queryservice/fakes/error_query_service.go b/go/vt/vttablet/queryservice/fakes/error_query_service.go similarity index 89% rename from go/vt/tabletserver/queryservice/fakes/error_query_service.go rename to go/vt/vttablet/queryservice/fakes/error_query_service.go index f2ad434d6eb..a5c01701720 100644 --- a/go/vt/tabletserver/queryservice/fakes/error_query_service.go +++ b/go/vt/vttablet/queryservice/fakes/error_query_service.go @@ -3,7 +3,7 @@ package fakes import ( "fmt" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" "golang.org/x/net/context" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/queryservice/fakes/stream_health_query_service.go b/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go similarity index 98% rename from go/vt/tabletserver/queryservice/fakes/stream_health_query_service.go rename to go/vt/vttablet/queryservice/fakes/stream_health_query_service.go index 476a9dfc896..e1161567a41 100644 --- a/go/vt/tabletserver/queryservice/fakes/stream_health_query_service.go +++ b/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go @@ -7,7 +7,7 @@ import ( querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" ) const ( diff --git a/go/vt/tabletserver/queryservice/queryservice.go b/go/vt/vttablet/queryservice/queryservice.go similarity index 98% rename from go/vt/tabletserver/queryservice/queryservice.go rename to go/vt/vttablet/queryservice/queryservice.go index fa5b1505fb9..9f72127ea66 100644 --- a/go/vt/tabletserver/queryservice/queryservice.go +++ b/go/vt/vttablet/queryservice/queryservice.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/sqltypes" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" querypb "github.com/youtube/vitess/go/vt/proto/query" ) diff --git a/go/vt/tabletserver/queryservice/wrapped.go b/go/vt/vttablet/queryservice/wrapped.go similarity index 99% rename from go/vt/tabletserver/queryservice/wrapped.go rename to go/vt/vttablet/queryservice/wrapped.go index 5d886746b4a..635df7a660a 100644 --- a/go/vt/tabletserver/queryservice/wrapped.go +++ b/go/vt/vttablet/queryservice/wrapped.go @@ -8,7 +8,7 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/sqltypes" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/sandboxconn/sandboxconn.go b/go/vt/vttablet/sandboxconn/sandboxconn.go similarity index 99% rename from go/vt/tabletserver/sandboxconn/sandboxconn.go rename to go/vt/vttablet/sandboxconn/sandboxconn.go index 14093bafa9e..21474c2ec7c 100644 --- a/go/vt/tabletserver/sandboxconn/sandboxconn.go +++ b/go/vt/vttablet/sandboxconn/sandboxconn.go @@ -11,8 +11,8 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/sync2" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/vterrors" "golang.org/x/net/context" diff --git a/go/vt/tabletserver/sysloglogger/sysloglogger.go b/go/vt/vttablet/sysloglogger/sysloglogger.go similarity index 96% rename from go/vt/tabletserver/sysloglogger/sysloglogger.go rename to go/vt/vttablet/sysloglogger/sysloglogger.go index 52cd67f6718..2bd34b396da 100644 --- a/go/vt/tabletserver/sysloglogger/sysloglogger.go +++ b/go/vt/vttablet/sysloglogger/sysloglogger.go @@ -7,7 +7,7 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" ) // syslogWriter is an interface that wraps syslog.Writer, so it can be mocked in unit tests. diff --git a/go/vt/tabletserver/sysloglogger/sysloglogger_test.go b/go/vt/vttablet/sysloglogger/sysloglogger_test.go similarity index 98% rename from go/vt/tabletserver/sysloglogger/sysloglogger_test.go rename to go/vt/vttablet/sysloglogger/sysloglogger_test.go index d3081e20d25..77999b83ce6 100644 --- a/go/vt/tabletserver/sysloglogger/sysloglogger_test.go +++ b/go/vt/vttablet/sysloglogger/sysloglogger_test.go @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" ) // fakeWriter is a mock of the real syslog writer, to enable capturing and playing back of log messages in unit testing. diff --git a/go/vt/tabletserver/tabletconn/grpc_error.go b/go/vt/vttablet/tabletconn/grpc_error.go similarity index 100% rename from go/vt/tabletserver/tabletconn/grpc_error.go rename to go/vt/vttablet/tabletconn/grpc_error.go diff --git a/go/vt/tabletserver/tabletconn/grpc_error_test.go b/go/vt/vttablet/tabletconn/grpc_error_test.go similarity index 100% rename from go/vt/tabletserver/tabletconn/grpc_error_test.go rename to go/vt/vttablet/tabletconn/grpc_error_test.go diff --git a/go/vt/tabletserver/tabletconn/tablet_conn.go b/go/vt/vttablet/tabletconn/tablet_conn.go similarity index 96% rename from go/vt/tabletserver/tabletconn/tablet_conn.go rename to go/vt/vttablet/tabletconn/tablet_conn.go index eed669e453d..7ad40e27b01 100644 --- a/go/vt/tabletserver/tabletconn/tablet_conn.go +++ b/go/vt/vttablet/tabletconn/tablet_conn.go @@ -10,7 +10,7 @@ import ( log "github.com/golang/glog" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" "github.com/youtube/vitess/go/vt/vterrors" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/tabletserver/tabletconntest/fakequeryservice.go b/go/vt/vttablet/tabletconntest/fakequeryservice.go similarity index 99% rename from go/vt/tabletserver/tabletconntest/fakequeryservice.go rename to go/vt/vttablet/tabletconntest/fakequeryservice.go index 50a35748fc2..6a8fdffec83 100644 --- a/go/vt/tabletserver/tabletconntest/fakequeryservice.go +++ b/go/vt/vttablet/tabletconntest/fakequeryservice.go @@ -11,7 +11,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/callerid" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/tabletserver/tabletconntest/tabletconntest.go b/go/vt/vttablet/tabletconntest/tabletconntest.go similarity index 99% rename from go/vt/tabletserver/tabletconntest/tabletconntest.go rename to go/vt/vttablet/tabletconntest/tabletconntest.go index bd9dbacd2de..b634095f38b 100644 --- a/go/vt/tabletserver/tabletconntest/tabletconntest.go +++ b/go/vt/vttablet/tabletconntest/tabletconntest.go @@ -15,8 +15,8 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/callerid" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" "github.com/youtube/vitess/go/vt/vterrors" "golang.org/x/net/context" diff --git a/go/vt/tabletmanager/action_agent.go b/go/vt/vttablet/tabletmanager/action_agent.go similarity index 99% rename from go/vt/tabletmanager/action_agent.go rename to go/vt/vttablet/tabletmanager/action_agent.go index 3c28cf8bc1b..c8e8f6be195 100644 --- a/go/vt/tabletmanager/action_agent.go +++ b/go/vt/vttablet/tabletmanager/action_agent.go @@ -48,8 +48,8 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletserver" - "github.com/youtube/vitess/go/vt/tabletserver/tabletservermock" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" + "github.com/youtube/vitess/go/vt/vttablet/tabletservermock" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" diff --git a/go/vt/tabletmanager/binlog_players.go b/go/vt/vttablet/tabletmanager/binlog_players.go similarity index 100% rename from go/vt/tabletmanager/binlog_players.go rename to go/vt/vttablet/tabletmanager/binlog_players.go diff --git a/go/vt/tabletmanager/binlog_players_test.go b/go/vt/vttablet/tabletmanager/binlog_players_test.go similarity index 99% rename from go/vt/tabletmanager/binlog_players_test.go rename to go/vt/vttablet/tabletmanager/binlog_players_test.go index e0219c4b266..ea9709fef5d 100644 --- a/go/vt/tabletmanager/binlog_players_test.go +++ b/go/vt/vttablet/tabletmanager/binlog_players_test.go @@ -19,9 +19,9 @@ import ( "github.com/youtube/vitess/go/vt/key" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice/fakes" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice/fakes" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" diff --git a/go/vt/tabletmanager/events/state_change.go b/go/vt/vttablet/tabletmanager/events/state_change.go similarity index 100% rename from go/vt/tabletmanager/events/state_change.go rename to go/vt/vttablet/tabletmanager/events/state_change.go diff --git a/go/vt/tabletmanager/healthcheck.go b/go/vt/vttablet/tabletmanager/healthcheck.go similarity index 100% rename from go/vt/tabletmanager/healthcheck.go rename to go/vt/vttablet/tabletmanager/healthcheck.go diff --git a/go/vt/tabletmanager/healthcheck_test.go b/go/vt/vttablet/tabletmanager/healthcheck_test.go similarity index 99% rename from go/vt/tabletmanager/healthcheck_test.go rename to go/vt/vttablet/tabletmanager/healthcheck_test.go index 1d6a0e686a7..673877919c6 100644 --- a/go/vt/tabletmanager/healthcheck_test.go +++ b/go/vt/vttablet/tabletmanager/healthcheck_test.go @@ -20,8 +20,8 @@ import ( "github.com/youtube/vitess/go/vt/binlog/binlogplayer" "github.com/youtube/vitess/go/vt/health" "github.com/youtube/vitess/go/vt/mysqlctl" - "github.com/youtube/vitess/go/vt/tabletserver" - "github.com/youtube/vitess/go/vt/tabletserver/tabletservermock" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" + "github.com/youtube/vitess/go/vt/vttablet/tabletservermock" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" diff --git a/go/vt/tabletmanager/init_tablet.go b/go/vt/vttablet/tabletmanager/init_tablet.go similarity index 100% rename from go/vt/tabletmanager/init_tablet.go rename to go/vt/vttablet/tabletmanager/init_tablet.go diff --git a/go/vt/tabletmanager/init_tablet_test.go b/go/vt/vttablet/tabletmanager/init_tablet_test.go similarity index 100% rename from go/vt/tabletmanager/init_tablet_test.go rename to go/vt/vttablet/tabletmanager/init_tablet_test.go diff --git a/go/vt/tabletmanager/initial_rebuild.go b/go/vt/vttablet/tabletmanager/initial_rebuild.go similarity index 100% rename from go/vt/tabletmanager/initial_rebuild.go rename to go/vt/vttablet/tabletmanager/initial_rebuild.go diff --git a/go/vt/tabletmanager/orchestrator.go b/go/vt/vttablet/tabletmanager/orchestrator.go similarity index 100% rename from go/vt/tabletmanager/orchestrator.go rename to go/vt/vttablet/tabletmanager/orchestrator.go diff --git a/go/vt/tabletmanager/replication_reporter.go b/go/vt/vttablet/tabletmanager/replication_reporter.go similarity index 100% rename from go/vt/tabletmanager/replication_reporter.go rename to go/vt/vttablet/tabletmanager/replication_reporter.go diff --git a/go/vt/tabletmanager/replication_reporter_test.go b/go/vt/vttablet/tabletmanager/replication_reporter_test.go similarity index 100% rename from go/vt/tabletmanager/replication_reporter_test.go rename to go/vt/vttablet/tabletmanager/replication_reporter_test.go diff --git a/go/vt/tabletmanager/restore.go b/go/vt/vttablet/tabletmanager/restore.go similarity index 100% rename from go/vt/tabletmanager/restore.go rename to go/vt/vttablet/tabletmanager/restore.go diff --git a/go/vt/tabletmanager/rpc_actions.go b/go/vt/vttablet/tabletmanager/rpc_actions.go similarity index 100% rename from go/vt/tabletmanager/rpc_actions.go rename to go/vt/vttablet/tabletmanager/rpc_actions.go diff --git a/go/vt/tabletmanager/rpc_agent.go b/go/vt/vttablet/tabletmanager/rpc_agent.go similarity index 100% rename from go/vt/tabletmanager/rpc_agent.go rename to go/vt/vttablet/tabletmanager/rpc_agent.go diff --git a/go/vt/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go similarity index 100% rename from go/vt/tabletmanager/rpc_backup.go rename to go/vt/vttablet/tabletmanager/rpc_backup.go diff --git a/go/vt/tabletmanager/rpc_binlog_players.go b/go/vt/vttablet/tabletmanager/rpc_binlog_players.go similarity index 100% rename from go/vt/tabletmanager/rpc_binlog_players.go rename to go/vt/vttablet/tabletmanager/rpc_binlog_players.go diff --git a/go/vt/tabletmanager/rpc_external_reparent.go b/go/vt/vttablet/tabletmanager/rpc_external_reparent.go similarity index 99% rename from go/vt/tabletmanager/rpc_external_reparent.go rename to go/vt/vttablet/tabletmanager/rpc_external_reparent.go index 25ece48537d..7164d22e4aa 100644 --- a/go/vt/tabletmanager/rpc_external_reparent.go +++ b/go/vt/vttablet/tabletmanager/rpc_external_reparent.go @@ -15,7 +15,7 @@ import ( "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/trace" "github.com/youtube/vitess/go/vt/concurrency" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/topotools/events" diff --git a/go/vt/tabletmanager/rpc_query.go b/go/vt/vttablet/tabletmanager/rpc_query.go similarity index 100% rename from go/vt/tabletmanager/rpc_query.go rename to go/vt/vttablet/tabletmanager/rpc_query.go diff --git a/go/vt/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go similarity index 100% rename from go/vt/tabletmanager/rpc_replication.go rename to go/vt/vttablet/tabletmanager/rpc_replication.go diff --git a/go/vt/tabletmanager/rpc_schema.go b/go/vt/vttablet/tabletmanager/rpc_schema.go similarity index 100% rename from go/vt/tabletmanager/rpc_schema.go rename to go/vt/vttablet/tabletmanager/rpc_schema.go diff --git a/go/vt/tabletmanager/rpc_server.go b/go/vt/vttablet/tabletmanager/rpc_server.go similarity index 100% rename from go/vt/tabletmanager/rpc_server.go rename to go/vt/vttablet/tabletmanager/rpc_server.go diff --git a/go/vt/tabletmanager/state_change.go b/go/vt/vttablet/tabletmanager/state_change.go similarity index 98% rename from go/vt/tabletmanager/state_change.go rename to go/vt/vttablet/tabletmanager/state_change.go index 550121b3cbd..2996cef5fbf 100644 --- a/go/vt/tabletmanager/state_change.go +++ b/go/vt/vttablet/tabletmanager/state_change.go @@ -18,9 +18,9 @@ import ( "github.com/youtube/vitess/go/event" "github.com/youtube/vitess/go/trace" "github.com/youtube/vitess/go/vt/mysqlctl" - "github.com/youtube/vitess/go/vt/tabletmanager/events" - "github.com/youtube/vitess/go/vt/tabletserver/rules" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletmanager/events" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" diff --git a/go/vt/tabletserver/codex.go b/go/vt/vttablet/tabletserver/codex.go similarity index 99% rename from go/vt/tabletserver/codex.go rename to go/vt/vttablet/tabletserver/codex.go index ae3ea261d2f..eef0ac037c1 100644 --- a/go/vt/tabletserver/codex.go +++ b/go/vt/vttablet/tabletserver/codex.go @@ -7,7 +7,7 @@ package tabletserver import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/codex_test.go b/go/vt/vttablet/tabletserver/codex_test.go similarity index 99% rename from go/vt/tabletserver/codex_test.go rename to go/vt/vttablet/tabletserver/codex_test.go index 976544ca58c..9d3107ccc2f 100644 --- a/go/vt/tabletserver/codex_test.go +++ b/go/vt/vttablet/tabletserver/codex_test.go @@ -14,7 +14,7 @@ import ( querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" "github.com/youtube/vitess/go/vt/vterrors" ) diff --git a/go/vt/tabletserver/comments.go b/go/vt/vttablet/tabletserver/comments.go similarity index 100% rename from go/vt/tabletserver/comments.go rename to go/vt/vttablet/tabletserver/comments.go diff --git a/go/vt/tabletserver/comments_test.go b/go/vt/vttablet/tabletserver/comments_test.go similarity index 100% rename from go/vt/tabletserver/comments_test.go rename to go/vt/vttablet/tabletserver/comments_test.go diff --git a/go/vt/tabletserver/connpool/dbconn.go b/go/vt/vttablet/tabletserver/connpool/dbconn.go similarity index 99% rename from go/vt/tabletserver/connpool/dbconn.go rename to go/vt/vttablet/tabletserver/connpool/dbconn.go index 0a17e7c2abe..ee3dac20a79 100644 --- a/go/vt/tabletserver/connpool/dbconn.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn.go @@ -17,7 +17,7 @@ import ( "github.com/youtube/vitess/go/trace" "github.com/youtube/vitess/go/vt/dbconnpool" querypb "github.com/youtube/vitess/go/vt/proto/query" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "golang.org/x/net/context" ) diff --git a/go/vt/tabletserver/connpool/dbconn_test.go b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go similarity index 100% rename from go/vt/tabletserver/connpool/dbconn_test.go rename to go/vt/vttablet/tabletserver/connpool/dbconn_test.go diff --git a/go/vt/tabletserver/connpool/pool.go b/go/vt/vttablet/tabletserver/connpool/pool.go similarity index 98% rename from go/vt/tabletserver/connpool/pool.go rename to go/vt/vttablet/tabletserver/connpool/pool.go index a9f7f0ee770..889ecef7510 100644 --- a/go/vt/tabletserver/connpool/pool.go +++ b/go/vt/vttablet/tabletserver/connpool/pool.go @@ -13,7 +13,7 @@ import ( "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/vt/dbconnpool" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" "golang.org/x/net/context" ) diff --git a/go/vt/tabletserver/connpool/pool_test.go b/go/vt/vttablet/tabletserver/connpool/pool_test.go similarity index 100% rename from go/vt/tabletserver/connpool/pool_test.go rename to go/vt/vttablet/tabletserver/connpool/pool_test.go diff --git a/go/vt/tabletserver/controller.go b/go/vt/vttablet/tabletserver/controller.go similarity index 94% rename from go/vt/tabletserver/controller.go rename to go/vt/vttablet/tabletserver/controller.go index d74b619af5e..f4bda5f3dcd 100644 --- a/go/vt/tabletserver/controller.go +++ b/go/vt/vttablet/tabletserver/controller.go @@ -9,8 +9,8 @@ import ( "github.com/youtube/vitess/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/mysqlctl" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/tabletserver/messager/cache.go b/go/vt/vttablet/tabletserver/messager/cache.go similarity index 100% rename from go/vt/tabletserver/messager/cache.go rename to go/vt/vttablet/tabletserver/messager/cache.go diff --git a/go/vt/tabletserver/messager/cache_test.go b/go/vt/vttablet/tabletserver/messager/cache_test.go similarity index 100% rename from go/vt/tabletserver/messager/cache_test.go rename to go/vt/vttablet/tabletserver/messager/cache_test.go diff --git a/go/vt/tabletserver/messager/engine.go b/go/vt/vttablet/tabletserver/messager/engine.go similarity index 96% rename from go/vt/tabletserver/messager/engine.go rename to go/vt/vttablet/tabletserver/messager/engine.go index fd5d20942ee..06551bcb561 100644 --- a/go/vt/tabletserver/messager/engine.go +++ b/go/vt/vttablet/tabletserver/messager/engine.go @@ -14,9 +14,9 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/dbconfigs" - "github.com/youtube/vitess/go/vt/tabletserver/connpool" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/messager/engine_test.go b/go/vt/vttablet/tabletserver/messager/engine_test.go similarity index 97% rename from go/vt/tabletserver/messager/engine_test.go rename to go/vt/vttablet/tabletserver/messager/engine_test.go index f64bfef7408..367d6c7f1cb 100644 --- a/go/vt/tabletserver/messager/engine_test.go +++ b/go/vt/vttablet/tabletserver/messager/engine_test.go @@ -16,8 +16,8 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/sync2" "github.com/youtube/vitess/go/vt/dbconfigs" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" ) var meTable = &schema.Table{ diff --git a/go/vt/tabletserver/messager/message_manager.go b/go/vt/vttablet/tabletserver/messager/message_manager.go similarity index 98% rename from go/vt/tabletserver/messager/message_manager.go rename to go/vt/vttablet/tabletserver/messager/message_manager.go index 0d6ad59d79d..db606b72fcb 100644 --- a/go/vt/tabletserver/messager/message_manager.go +++ b/go/vt/vttablet/tabletserver/messager/message_manager.go @@ -15,9 +15,9 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/timer" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/connpool" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" ) type messageReceiver struct { diff --git a/go/vt/tabletserver/messager/message_manager_test.go b/go/vt/vttablet/tabletserver/messager/message_manager_test.go similarity index 99% rename from go/vt/tabletserver/messager/message_manager_test.go rename to go/vt/vttablet/tabletserver/messager/message_manager_test.go index 859ed95abc1..2e59c1056dc 100644 --- a/go/vt/tabletserver/messager/message_manager_test.go +++ b/go/vt/vttablet/tabletserver/messager/message_manager_test.go @@ -18,8 +18,8 @@ import ( "github.com/youtube/vitess/go/sync2" "github.com/youtube/vitess/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/connpool" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" querypb "github.com/youtube/vitess/go/vt/proto/query" ) diff --git a/go/vt/tabletserver/planbuilder/ddl.go b/go/vt/vttablet/tabletserver/planbuilder/ddl.go similarity index 93% rename from go/vt/tabletserver/planbuilder/ddl.go rename to go/vt/vttablet/tabletserver/planbuilder/ddl.go index 264118c2c10..f0d3a271e14 100644 --- a/go/vt/tabletserver/planbuilder/ddl.go +++ b/go/vt/vttablet/tabletserver/planbuilder/ddl.go @@ -6,7 +6,7 @@ package planbuilder import ( "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" ) // DDLPlan provides a plan for DDLs. diff --git a/go/vt/tabletserver/planbuilder/dml.go b/go/vt/vttablet/tabletserver/planbuilder/dml.go similarity index 99% rename from go/vt/tabletserver/planbuilder/dml.go rename to go/vt/vttablet/tabletserver/planbuilder/dml.go index 406d7b0429d..343f60595f6 100644 --- a/go/vt/tabletserver/planbuilder/dml.go +++ b/go/vt/vttablet/tabletserver/planbuilder/dml.go @@ -10,7 +10,7 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" ) func analyzeUpdate(upd *sqlparser.Update, tables map[string]*schema.Table) (plan *Plan, err error) { diff --git a/go/vt/tabletserver/planbuilder/plan.go b/go/vt/vttablet/tabletserver/planbuilder/plan.go similarity index 99% rename from go/vt/tabletserver/planbuilder/plan.go rename to go/vt/vttablet/tabletserver/planbuilder/plan.go index 462dc37df49..18a8576bc0a 100644 --- a/go/vt/tabletserver/planbuilder/plan.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan.go @@ -11,7 +11,7 @@ import ( "github.com/youtube/vitess/go/vt/sqlparser" "github.com/youtube/vitess/go/vt/tableacl" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" ) var ( diff --git a/go/vt/tabletserver/planbuilder/plan_test.go b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go similarity index 99% rename from go/vt/tabletserver/planbuilder/plan_test.go rename to go/vt/vttablet/tabletserver/planbuilder/plan_test.go index 46de73ce4b7..dced9aebca9 100644 --- a/go/vt/tabletserver/planbuilder/plan_test.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go @@ -20,7 +20,7 @@ import ( "github.com/youtube/vitess/go/testfiles" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" ) // MarshalJSON is only used for testing. diff --git a/go/vt/tabletserver/planbuilder/query_gen.go b/go/vt/vttablet/tabletserver/planbuilder/query_gen.go similarity index 98% rename from go/vt/tabletserver/planbuilder/query_gen.go rename to go/vt/vttablet/tabletserver/planbuilder/query_gen.go index ab1c46e3b2d..cf3d1d8a039 100644 --- a/go/vt/tabletserver/planbuilder/query_gen.go +++ b/go/vt/vttablet/tabletserver/planbuilder/query_gen.go @@ -6,7 +6,7 @@ package planbuilder import ( "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" ) // GenerateFullQuery generates the full query from the ast. diff --git a/go/vt/tabletserver/query_engine.go b/go/vt/vttablet/tabletserver/query_engine.go similarity index 97% rename from go/vt/tabletserver/query_engine.go rename to go/vt/vttablet/tabletserver/query_engine.go index e9ebc346731..fc13c584928 100644 --- a/go/vt/tabletserver/query_engine.go +++ b/go/vt/vttablet/tabletserver/query_engine.go @@ -27,11 +27,11 @@ import ( "github.com/youtube/vitess/go/vt/sqlparser" "github.com/youtube/vitess/go/vt/tableacl" tacl "github.com/youtube/vitess/go/vt/tableacl/acl" - "github.com/youtube/vitess/go/vt/tabletserver/connpool" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" - "github.com/youtube/vitess/go/vt/tabletserver/rules" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/planbuilder" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/query_engine_test.go b/go/vt/vttablet/tabletserver/query_engine_test.go similarity index 95% rename from go/vt/tabletserver/query_engine_test.go rename to go/vt/vttablet/tabletserver/query_engine_test.go index a6e3d1303a4..eb7f606ed34 100644 --- a/go/vt/tabletserver/query_engine_test.go +++ b/go/vt/vttablet/tabletserver/query_engine_test.go @@ -15,9 +15,9 @@ import ( "github.com/youtube/vitess/go/mysqlconn/fakesqldb" "github.com/youtube/vitess/go/sqltypes" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema/schematest" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema/schematest" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" ) func TestStrictMode(t *testing.T) { diff --git a/go/vt/tabletserver/query_executor.go b/go/vt/vttablet/tabletserver/query_executor.go similarity index 98% rename from go/vt/tabletserver/query_executor.go rename to go/vt/vttablet/tabletserver/query_executor.go index a1503f26eae..15258ba18ed 100644 --- a/go/vt/tabletserver/query_executor.go +++ b/go/vt/vttablet/tabletserver/query_executor.go @@ -20,12 +20,12 @@ import ( "github.com/youtube/vitess/go/vt/callerid" "github.com/youtube/vitess/go/vt/callinfo" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/connpool" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/messager" - "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" - "github.com/youtube/vitess/go/vt/tabletserver/rules" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/messager" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/planbuilder" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/query_executor_test.go b/go/vt/vttablet/tabletserver/query_executor_test.go similarity index 99% rename from go/vt/tabletserver/query_executor_test.go rename to go/vt/vttablet/tabletserver/query_executor_test.go index f24e9396918..d4ff45d00f3 100644 --- a/go/vt/tabletserver/query_executor_test.go +++ b/go/vt/vttablet/tabletserver/query_executor_test.go @@ -23,9 +23,9 @@ import ( "github.com/youtube/vitess/go/vt/callinfo/fakecallinfo" "github.com/youtube/vitess/go/vt/tableacl" "github.com/youtube/vitess/go/vt/tableacl/simpleacl" - "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" - "github.com/youtube/vitess/go/vt/tabletserver/rules" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/planbuilder" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/query_list.go b/go/vt/vttablet/tabletserver/query_list.go similarity index 100% rename from go/vt/tabletserver/query_list.go rename to go/vt/vttablet/tabletserver/query_list.go diff --git a/go/vt/tabletserver/query_list_test.go b/go/vt/vttablet/tabletserver/query_list_test.go similarity index 100% rename from go/vt/tabletserver/query_list_test.go rename to go/vt/vttablet/tabletserver/query_list_test.go diff --git a/go/vt/tabletserver/querylogz.go b/go/vt/vttablet/tabletserver/querylogz.go similarity index 98% rename from go/vt/tabletserver/querylogz.go rename to go/vt/vttablet/tabletserver/querylogz.go index 158598c2d69..4197a226888 100644 --- a/go/vt/tabletserver/querylogz.go +++ b/go/vt/vttablet/tabletserver/querylogz.go @@ -16,7 +16,7 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/acl" "github.com/youtube/vitess/go/vt/logz" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" ) var ( diff --git a/go/vt/tabletserver/querylogz_test.go b/go/vt/vttablet/tabletserver/querylogz_test.go similarity index 96% rename from go/vt/tabletserver/querylogz_test.go rename to go/vt/vttablet/tabletserver/querylogz_test.go index 62f95e36855..eb50bf2ff6d 100644 --- a/go/vt/tabletserver/querylogz_test.go +++ b/go/vt/vttablet/tabletserver/querylogz_test.go @@ -14,8 +14,8 @@ import ( "time" "github.com/youtube/vitess/go/vt/callerid" - "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/planbuilder" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "golang.org/x/net/context" ) diff --git a/go/vt/tabletserver/querytypes/bound_query.go b/go/vt/vttablet/tabletserver/querytypes/bound_query.go similarity index 100% rename from go/vt/tabletserver/querytypes/bound_query.go rename to go/vt/vttablet/tabletserver/querytypes/bound_query.go diff --git a/go/vt/tabletserver/querytypes/proto3.go b/go/vt/vttablet/tabletserver/querytypes/proto3.go similarity index 100% rename from go/vt/tabletserver/querytypes/proto3.go rename to go/vt/vttablet/tabletserver/querytypes/proto3.go diff --git a/go/vt/tabletserver/querytypes/proto3_test.go b/go/vt/vttablet/tabletserver/querytypes/proto3_test.go similarity index 100% rename from go/vt/tabletserver/querytypes/proto3_test.go rename to go/vt/vttablet/tabletserver/querytypes/proto3_test.go diff --git a/go/vt/tabletserver/querytypes/query_split.go b/go/vt/vttablet/tabletserver/querytypes/query_split.go similarity index 100% rename from go/vt/tabletserver/querytypes/query_split.go rename to go/vt/vttablet/tabletserver/querytypes/query_split.go diff --git a/go/vt/tabletserver/queryz.go b/go/vt/vttablet/tabletserver/queryz.go similarity index 98% rename from go/vt/tabletserver/queryz.go rename to go/vt/vttablet/tabletserver/queryz.go index 632b91e1023..20cf7d7be02 100644 --- a/go/vt/tabletserver/queryz.go +++ b/go/vt/vttablet/tabletserver/queryz.go @@ -14,7 +14,7 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/acl" "github.com/youtube/vitess/go/vt/logz" - "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/planbuilder" ) var ( diff --git a/go/vt/tabletserver/queryz_test.go b/go/vt/vttablet/tabletserver/queryz_test.go similarity index 95% rename from go/vt/tabletserver/queryz_test.go rename to go/vt/vttablet/tabletserver/queryz_test.go index b8efe01b823..874bf1ff965 100644 --- a/go/vt/tabletserver/queryz_test.go +++ b/go/vt/vttablet/tabletserver/queryz_test.go @@ -14,8 +14,8 @@ import ( "time" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/planbuilder" ) func TestQueryzHandler(t *testing.T) { diff --git a/go/vt/tabletserver/replication_watcher.go b/go/vt/vttablet/tabletserver/replication_watcher.go similarity index 97% rename from go/vt/tabletserver/replication_watcher.go rename to go/vt/vttablet/tabletserver/replication_watcher.go index c2957b7dbfc..e886c16e64f 100644 --- a/go/vt/tabletserver/replication_watcher.go +++ b/go/vt/vttablet/tabletserver/replication_watcher.go @@ -17,8 +17,8 @@ import ( "github.com/youtube/vitess/go/vt/binlog/eventtoken" "github.com/youtube/vitess/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/mysqlctl" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/rules/map.go b/go/vt/vttablet/tabletserver/rules/map.go similarity index 97% rename from go/vt/tabletserver/rules/map.go rename to go/vt/vttablet/tabletserver/rules/map.go index 8704e2a9113..931a51c0ca8 100644 --- a/go/vt/tabletserver/rules/map.go +++ b/go/vt/vttablet/tabletserver/rules/map.go @@ -10,7 +10,7 @@ import ( "sync" log "github.com/golang/glog" - "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/planbuilder" ) // Map is the maintainer of Rules from multiple sources diff --git a/go/vt/tabletserver/rules/map_test.go b/go/vt/vttablet/tabletserver/rules/map_test.go similarity index 99% rename from go/vt/tabletserver/rules/map_test.go rename to go/vt/vttablet/tabletserver/rules/map_test.go index 945833e4d40..c88d6f8c094 100644 --- a/go/vt/tabletserver/rules/map_test.go +++ b/go/vt/vttablet/tabletserver/rules/map_test.go @@ -10,7 +10,7 @@ import ( "strings" "testing" - "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/planbuilder" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) diff --git a/go/vt/tabletserver/rules/rules.go b/go/vt/vttablet/tabletserver/rules/rules.go similarity index 99% rename from go/vt/tabletserver/rules/rules.go rename to go/vt/vttablet/tabletserver/rules/rules.go index c478ce466d2..a0cfb513f6d 100644 --- a/go/vt/tabletserver/rules/rules.go +++ b/go/vt/vttablet/tabletserver/rules/rules.go @@ -13,7 +13,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/key" - "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/planbuilder" "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/rules/rules_test.go b/go/vt/vttablet/tabletserver/rules/rules_test.go similarity index 99% rename from go/vt/tabletserver/rules/rules_test.go rename to go/vt/vttablet/tabletserver/rules/rules_test.go index 3b0f84a2287..07eb0e23e4f 100644 --- a/go/vt/tabletserver/rules/rules_test.go +++ b/go/vt/vttablet/tabletserver/rules/rules_test.go @@ -13,7 +13,7 @@ import ( "testing" "github.com/youtube/vitess/go/vt/key" - "github.com/youtube/vitess/go/vt/tabletserver/planbuilder" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/planbuilder" "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/engines/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go similarity index 99% rename from go/vt/tabletserver/engines/schema/engine.go rename to go/vt/vttablet/tabletserver/schema/engine.go index 3b9a057451a..e987fa9075b 100644 --- a/go/vt/tabletserver/engines/schema/engine.go +++ b/go/vt/vttablet/tabletserver/schema/engine.go @@ -24,8 +24,8 @@ import ( "github.com/youtube/vitess/go/timer" "github.com/youtube/vitess/go/vt/concurrency" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/connpool" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" diff --git a/go/vt/tabletserver/engines/schema/engine_test.go b/go/vt/vttablet/tabletserver/schema/engine_test.go similarity index 98% rename from go/vt/tabletserver/engines/schema/engine_test.go rename to go/vt/vttablet/tabletserver/schema/engine_test.go index e5404fc175e..862eb2a11b1 100644 --- a/go/vt/tabletserver/engines/schema/engine_test.go +++ b/go/vt/vttablet/tabletserver/schema/engine_test.go @@ -20,8 +20,8 @@ import ( "github.com/youtube/vitess/go/mysqlconn/fakesqldb" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema/schematest" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema/schematest" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "github.com/youtube/vitess/go/vt/proto/query" ) diff --git a/go/vt/tabletserver/engines/schema/load_table.go b/go/vt/vttablet/tabletserver/schema/load_table.go similarity index 97% rename from go/vt/tabletserver/engines/schema/load_table.go rename to go/vt/vttablet/tabletserver/schema/load_table.go index 49ac181a706..a3570d76a34 100644 --- a/go/vt/tabletserver/engines/schema/load_table.go +++ b/go/vt/vttablet/tabletserver/schema/load_table.go @@ -13,8 +13,8 @@ import ( log "github.com/golang/glog" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/connpool" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "github.com/youtube/vitess/go/vt/proto/query" ) diff --git a/go/vt/tabletserver/engines/schema/load_table_test.go b/go/vt/vttablet/tabletserver/schema/load_table_test.go similarity index 99% rename from go/vt/tabletserver/engines/schema/load_table_test.go rename to go/vt/vttablet/tabletserver/schema/load_table_test.go index c7f06a1514c..5961902daaa 100644 --- a/go/vt/tabletserver/engines/schema/load_table_test.go +++ b/go/vt/vttablet/tabletserver/schema/load_table_test.go @@ -17,7 +17,7 @@ import ( "github.com/youtube/vitess/go/mysqlconn/fakesqldb" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/connpool" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" querypb "github.com/youtube/vitess/go/vt/proto/query" ) diff --git a/go/vt/tabletserver/engines/schema/schema.go b/go/vt/vttablet/tabletserver/schema/schema.go similarity index 100% rename from go/vt/tabletserver/engines/schema/schema.go rename to go/vt/vttablet/tabletserver/schema/schema.go diff --git a/go/vt/tabletserver/engines/schema/schema_test.go b/go/vt/vttablet/tabletserver/schema/schema_test.go similarity index 100% rename from go/vt/tabletserver/engines/schema/schema_test.go rename to go/vt/vttablet/tabletserver/schema/schema_test.go diff --git a/go/vt/tabletserver/engines/schema/schematest/schematest.go b/go/vt/vttablet/tabletserver/schema/schematest/schematest.go similarity index 100% rename from go/vt/tabletserver/engines/schema/schematest/schematest.go rename to go/vt/vttablet/tabletserver/schema/schematest/schematest.go diff --git a/go/vt/tabletserver/engines/schema/schemaz.go b/go/vt/vttablet/tabletserver/schema/schemaz.go similarity index 100% rename from go/vt/tabletserver/engines/schema/schemaz.go rename to go/vt/vttablet/tabletserver/schema/schemaz.go diff --git a/go/vt/tabletserver/engines/schema/schemaz_test.go b/go/vt/vttablet/tabletserver/schema/schemaz_test.go similarity index 100% rename from go/vt/tabletserver/engines/schema/schemaz_test.go rename to go/vt/vttablet/tabletserver/schema/schemaz_test.go diff --git a/go/vt/tabletserver/splitquery/doc.go b/go/vt/vttablet/tabletserver/splitquery/doc.go similarity index 100% rename from go/vt/tabletserver/splitquery/doc.go rename to go/vt/vttablet/tabletserver/splitquery/doc.go diff --git a/go/vt/tabletserver/splitquery/equal_splits_algorithm.go b/go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm.go similarity index 99% rename from go/vt/tabletserver/splitquery/equal_splits_algorithm.go rename to go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm.go index 6bd1c9a7f6c..27542451cfb 100644 --- a/go/vt/tabletserver/splitquery/equal_splits_algorithm.go +++ b/go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm.go @@ -9,7 +9,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" querypb "github.com/youtube/vitess/go/vt/proto/query" ) diff --git a/go/vt/tabletserver/splitquery/equal_splits_algorithm_test.go b/go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm_test.go similarity index 97% rename from go/vt/tabletserver/splitquery/equal_splits_algorithm_test.go rename to go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm_test.go index 5be07f70d96..f4e764e25c1 100644 --- a/go/vt/tabletserver/splitquery/equal_splits_algorithm_test.go +++ b/go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm_test.go @@ -9,8 +9,8 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" - "github.com/youtube/vitess/go/vt/tabletserver/splitquery/splitquery_testing" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/splitquery/splitquery_testing" ) // Table-driven test for equal-splits algorithm. diff --git a/go/vt/tabletserver/splitquery/example_test.go b/go/vt/vttablet/tabletserver/splitquery/example_test.go similarity index 94% rename from go/vt/tabletserver/splitquery/example_test.go rename to go/vt/vttablet/tabletserver/splitquery/example_test.go index 890568722df..19e9718ec3b 100644 --- a/go/vt/tabletserver/splitquery/example_test.go +++ b/go/vt/vttablet/tabletserver/splitquery/example_test.go @@ -4,8 +4,8 @@ import ( "fmt" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" ) func Example() { diff --git a/go/vt/tabletserver/splitquery/full_scan_algorithm.go b/go/vt/vttablet/tabletserver/splitquery/full_scan_algorithm.go similarity index 98% rename from go/vt/tabletserver/splitquery/full_scan_algorithm.go rename to go/vt/vttablet/tabletserver/splitquery/full_scan_algorithm.go index fce6294a523..9638b3015a3 100644 --- a/go/vt/tabletserver/splitquery/full_scan_algorithm.go +++ b/go/vt/vttablet/tabletserver/splitquery/full_scan_algorithm.go @@ -4,8 +4,8 @@ import ( "fmt" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" ) // FullScanAlgorithm implements the SplitAlgorithmInterface and represents the full-scan algorithm diff --git a/go/vt/tabletserver/splitquery/full_scan_algorithm_test.go b/go/vt/vttablet/tabletserver/splitquery/full_scan_algorithm_test.go similarity index 97% rename from go/vt/tabletserver/splitquery/full_scan_algorithm_test.go rename to go/vt/vttablet/tabletserver/splitquery/full_scan_algorithm_test.go index 7c2f7655783..c02f9ad6acf 100644 --- a/go/vt/tabletserver/splitquery/full_scan_algorithm_test.go +++ b/go/vt/vttablet/tabletserver/splitquery/full_scan_algorithm_test.go @@ -8,8 +8,8 @@ import ( "github.com/golang/mock/gomock" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" - "github.com/youtube/vitess/go/vt/tabletserver/splitquery/splitquery_testing" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/splitquery/splitquery_testing" ) func TestMultipleBoundaries(t *testing.T) { diff --git a/go/vt/tabletserver/splitquery/split_algorithm_interface.go b/go/vt/vttablet/tabletserver/splitquery/split_algorithm_interface.go similarity index 94% rename from go/vt/tabletserver/splitquery/split_algorithm_interface.go rename to go/vt/vttablet/tabletserver/splitquery/split_algorithm_interface.go index 75dfa5c97e0..6433b5645cd 100644 --- a/go/vt/tabletserver/splitquery/split_algorithm_interface.go +++ b/go/vt/vttablet/tabletserver/splitquery/split_algorithm_interface.go @@ -2,7 +2,7 @@ package splitquery import ( "github.com/youtube/vitess/go/sqltypes" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" ) type tuple []sqltypes.Value diff --git a/go/vt/tabletserver/splitquery/split_params.go b/go/vt/vttablet/tabletserver/splitquery/split_params.go similarity index 98% rename from go/vt/tabletserver/splitquery/split_params.go rename to go/vt/vttablet/tabletserver/splitquery/split_params.go index a1435e47aaa..ea196feb402 100644 --- a/go/vt/tabletserver/splitquery/split_params.go +++ b/go/vt/vttablet/tabletserver/splitquery/split_params.go @@ -4,8 +4,8 @@ import ( "fmt" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" ) // SplitParams stores the context for a splitquery computation. It is used by diff --git a/go/vt/tabletserver/splitquery/split_params_test.go b/go/vt/vttablet/tabletserver/splitquery/split_params_test.go similarity index 98% rename from go/vt/tabletserver/splitquery/split_params_test.go rename to go/vt/vttablet/tabletserver/splitquery/split_params_test.go index 9f541c39659..7090edcea53 100644 --- a/go/vt/tabletserver/splitquery/split_params_test.go +++ b/go/vt/vttablet/tabletserver/splitquery/split_params_test.go @@ -6,8 +6,8 @@ import ( "testing" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" ) var splitParamsTestCases = []struct { diff --git a/go/vt/tabletserver/splitquery/splitquery_testing/mock_sqlexecuter.go b/go/vt/vttablet/tabletserver/splitquery/splitquery_testing/mock_sqlexecuter.go similarity index 100% rename from go/vt/tabletserver/splitquery/splitquery_testing/mock_sqlexecuter.go rename to go/vt/vttablet/tabletserver/splitquery/splitquery_testing/mock_sqlexecuter.go diff --git a/go/vt/tabletserver/splitquery/splitter.go b/go/vt/vttablet/tabletserver/splitquery/splitter.go similarity index 98% rename from go/vt/tabletserver/splitquery/splitter.go rename to go/vt/vttablet/tabletserver/splitquery/splitter.go index 4da0191e1a4..72db0954426 100644 --- a/go/vt/tabletserver/splitquery/splitter.go +++ b/go/vt/vttablet/tabletserver/splitquery/splitter.go @@ -4,8 +4,8 @@ import ( "fmt" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" ) // Splitter is used to drive the splitting procedure. diff --git a/go/vt/tabletserver/splitquery/splitter_test.go b/go/vt/vttablet/tabletserver/splitquery/splitter_test.go similarity index 98% rename from go/vt/tabletserver/splitquery/splitter_test.go rename to go/vt/vttablet/tabletserver/splitquery/splitter_test.go index e00ea55b236..40d7afdbd69 100644 --- a/go/vt/tabletserver/splitquery/splitter_test.go +++ b/go/vt/vttablet/tabletserver/splitquery/splitter_test.go @@ -8,9 +8,9 @@ import ( "github.com/golang/mock/gomock" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" - "github.com/youtube/vitess/go/vt/tabletserver/splitquery/splitquery_testing" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/splitquery/splitquery_testing" ) type FakeSplitAlgorithm struct { diff --git a/go/vt/tabletserver/splitquery/sql_executer_interface.go b/go/vt/vttablet/tabletserver/splitquery/sql_executer_interface.go similarity index 100% rename from go/vt/tabletserver/splitquery/sql_executer_interface.go rename to go/vt/vttablet/tabletserver/splitquery/sql_executer_interface.go diff --git a/go/vt/tabletserver/splitquery/testutils_test.go b/go/vt/vttablet/tabletserver/splitquery/testutils_test.go similarity index 98% rename from go/vt/tabletserver/splitquery/testutils_test.go rename to go/vt/vttablet/tabletserver/splitquery/testutils_test.go index e74189c75b9..521b6bba364 100644 --- a/go/vt/tabletserver/splitquery/testutils_test.go +++ b/go/vt/vttablet/tabletserver/splitquery/testutils_test.go @@ -8,7 +8,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" ) // getSchema returns a fake schema object that can be given to SplitParams diff --git a/go/vt/tabletserver/splitquery/utils.go b/go/vt/vttablet/tabletserver/splitquery/utils.go similarity index 100% rename from go/vt/tabletserver/splitquery/utils.go rename to go/vt/vttablet/tabletserver/splitquery/utils.go diff --git a/go/vt/tabletserver/status.go b/go/vt/vttablet/tabletserver/status.go similarity index 98% rename from go/vt/tabletserver/status.go rename to go/vt/vttablet/tabletserver/status.go index b493c04ee65..64edc18698e 100644 --- a/go/vt/tabletserver/status.go +++ b/go/vt/vttablet/tabletserver/status.go @@ -4,7 +4,7 @@ import ( "time" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" ) // This file contains the status web page export for tabletserver diff --git a/go/vt/tabletserver/stream_queryz.go b/go/vt/vttablet/tabletserver/stream_queryz.go similarity index 100% rename from go/vt/tabletserver/stream_queryz.go rename to go/vt/vttablet/tabletserver/stream_queryz.go diff --git a/go/vt/tabletserver/stream_queryz_test.go b/go/vt/vttablet/tabletserver/stream_queryz_test.go similarity index 100% rename from go/vt/tabletserver/stream_queryz_test.go rename to go/vt/vttablet/tabletserver/stream_queryz_test.go diff --git a/go/vt/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go similarity index 100% rename from go/vt/tabletserver/tabletenv/config.go rename to go/vt/vttablet/tabletserver/tabletenv/config.go diff --git a/go/vt/tabletserver/tabletenv/local_context.go b/go/vt/vttablet/tabletserver/tabletenv/local_context.go similarity index 100% rename from go/vt/tabletserver/tabletenv/local_context.go rename to go/vt/vttablet/tabletserver/tabletenv/local_context.go diff --git a/go/vt/tabletserver/tabletenv/logstats.go b/go/vt/vttablet/tabletserver/tabletenv/logstats.go similarity index 100% rename from go/vt/tabletserver/tabletenv/logstats.go rename to go/vt/vttablet/tabletserver/tabletenv/logstats.go diff --git a/go/vt/tabletserver/tabletenv/logstats_test.go b/go/vt/vttablet/tabletserver/tabletenv/logstats_test.go similarity index 100% rename from go/vt/tabletserver/tabletenv/logstats_test.go rename to go/vt/vttablet/tabletserver/tabletenv/logstats_test.go diff --git a/go/vt/tabletserver/tabletenv/tabletenv.go b/go/vt/vttablet/tabletserver/tabletenv/tabletenv.go similarity index 100% rename from go/vt/tabletserver/tabletenv/tabletenv.go rename to go/vt/vttablet/tabletserver/tabletenv/tabletenv.go diff --git a/go/vt/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go similarity index 98% rename from go/vt/tabletserver/tabletserver.go rename to go/vt/vttablet/tabletserver/tabletserver.go index 1a96ff5055c..5ed4aab1ba6 100644 --- a/go/vt/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -31,15 +31,15 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/connpool" - "github.com/youtube/vitess/go/vt/tabletserver/engines/schema" - "github.com/youtube/vitess/go/vt/tabletserver/messager" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" - "github.com/youtube/vitess/go/vt/tabletserver/rules" - "github.com/youtube/vitess/go/vt/tabletserver/splitquery" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" - "github.com/youtube/vitess/go/vt/tabletserver/txthrottler" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/messager" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/splitquery" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/txthrottler" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/utils" "github.com/youtube/vitess/go/vt/vterrors" diff --git a/go/vt/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go similarity index 99% rename from go/vt/tabletserver/tabletserver_test.go rename to go/vt/vttablet/tabletserver/tabletserver_test.go index bd2ce83419f..b46caafeede 100644 --- a/go/vt/tabletserver/tabletserver_test.go +++ b/go/vt/vttablet/tabletserver/tabletserver_test.go @@ -23,9 +23,9 @@ import ( "github.com/youtube/vitess/go/mysqlconn/fakesqldb" "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" - "github.com/youtube/vitess/go/vt/tabletserver/messager" - "github.com/youtube/vitess/go/vt/tabletserver/querytypes" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/messager" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/testutils_test.go b/go/vt/vttablet/tabletserver/testutils_test.go similarity index 95% rename from go/vt/tabletserver/testutils_test.go rename to go/vt/vttablet/tabletserver/testutils_test.go index 9a870f3ef8e..b2ad54cc0e4 100644 --- a/go/vt/tabletserver/testutils_test.go +++ b/go/vt/vttablet/tabletserver/testutils_test.go @@ -14,7 +14,7 @@ import ( "github.com/youtube/vitess/go/mysqlconn/fakesqldb" "github.com/youtube/vitess/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/mysqlctl" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" ) var errRejected = errors.New("rejected") diff --git a/go/vt/tabletserver/twopc.go b/go/vt/vttablet/tabletserver/twopc.go similarity index 99% rename from go/vt/tabletserver/twopc.go rename to go/vt/vttablet/tabletserver/twopc.go index befdbed860f..5b39257f42d 100644 --- a/go/vt/tabletserver/twopc.go +++ b/go/vt/vttablet/tabletserver/twopc.go @@ -19,7 +19,7 @@ import ( "github.com/youtube/vitess/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/dbconnpool" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/tabletserver/connpool" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/twopc_test.go b/go/vt/vttablet/tabletserver/twopc_test.go similarity index 100% rename from go/vt/tabletserver/twopc_test.go rename to go/vt/vttablet/tabletserver/twopc_test.go diff --git a/go/vt/tabletserver/twopcz.go b/go/vt/vttablet/tabletserver/twopcz.go similarity index 100% rename from go/vt/tabletserver/twopcz.go rename to go/vt/vttablet/tabletserver/twopcz.go diff --git a/go/vt/tabletserver/tx_engine.go b/go/vt/vttablet/tabletserver/tx_engine.go similarity index 98% rename from go/vt/tabletserver/tx_engine.go rename to go/vt/vttablet/tabletserver/tx_engine.go index 63322dbce7f..6e5a93f6f0c 100644 --- a/go/vt/tabletserver/tx_engine.go +++ b/go/vt/vttablet/tabletserver/tx_engine.go @@ -15,8 +15,8 @@ import ( "github.com/youtube/vitess/go/vt/concurrency" "github.com/youtube/vitess/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/dtids" - "github.com/youtube/vitess/go/vt/tabletserver/connpool" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vtgate/vtgateconn" ) diff --git a/go/vt/tabletserver/tx_engine_test.go b/go/vt/vttablet/tabletserver/tx_engine_test.go similarity index 97% rename from go/vt/tabletserver/tx_engine_test.go rename to go/vt/vttablet/tabletserver/tx_engine_test.go index a7b957949ea..ebdc6e21244 100644 --- a/go/vt/tabletserver/tx_engine_test.go +++ b/go/vt/vttablet/tabletserver/tx_engine_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "golang.org/x/net/context" ) diff --git a/go/vt/tabletserver/tx_executor.go b/go/vt/vttablet/tabletserver/tx_executor.go similarity index 98% rename from go/vt/tabletserver/tx_executor.go rename to go/vt/vttablet/tabletserver/tx_executor.go index ff680d336cf..cc0f5605f46 100644 --- a/go/vt/tabletserver/tx_executor.go +++ b/go/vt/vttablet/tabletserver/tx_executor.go @@ -14,8 +14,8 @@ import ( querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" - "github.com/youtube/vitess/go/vt/tabletserver/messager" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/messager" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" ) diff --git a/go/vt/tabletserver/tx_executor_test.go b/go/vt/vttablet/tabletserver/tx_executor_test.go similarity index 99% rename from go/vt/tabletserver/tx_executor_test.go rename to go/vt/vttablet/tabletserver/tx_executor_test.go index 51b032db756..4d1e1ffae25 100644 --- a/go/vt/tabletserver/tx_executor_test.go +++ b/go/vt/vttablet/tabletserver/tx_executor_test.go @@ -19,7 +19,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vtgate/fakerpcvtgateconn" "github.com/youtube/vitess/go/vt/vtgate/vtgateconn" ) diff --git a/go/vt/tabletserver/tx_pool.go b/go/vt/vttablet/tabletserver/tx_pool.go similarity index 98% rename from go/vt/tabletserver/tx_pool.go rename to go/vt/vttablet/tabletserver/tx_pool.go index 9f33753949a..acb24f9772a 100644 --- a/go/vt/tabletserver/tx_pool.go +++ b/go/vt/vttablet/tabletserver/tx_pool.go @@ -22,9 +22,9 @@ import ( "github.com/youtube/vitess/go/sync2" "github.com/youtube/vitess/go/timer" "github.com/youtube/vitess/go/vt/callerid" - "github.com/youtube/vitess/go/vt/tabletserver/connpool" - "github.com/youtube/vitess/go/vt/tabletserver/messager" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/messager" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/tx_pool_test.go b/go/vt/vttablet/tabletserver/tx_pool_test.go similarity index 99% rename from go/vt/tabletserver/tx_pool_test.go rename to go/vt/vttablet/tabletserver/tx_pool_test.go index 513b27b793a..4186174433e 100644 --- a/go/vt/tabletserver/tx_pool_test.go +++ b/go/vt/vttablet/tabletserver/tx_pool_test.go @@ -17,7 +17,7 @@ import ( "github.com/youtube/vitess/go/mysqlconn/fakesqldb" "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vterrors" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" diff --git a/go/vt/tabletserver/tx_prep_pool.go b/go/vt/vttablet/tabletserver/tx_prep_pool.go similarity index 100% rename from go/vt/tabletserver/tx_prep_pool.go rename to go/vt/vttablet/tabletserver/tx_prep_pool.go diff --git a/go/vt/tabletserver/tx_prep_pool_test.go b/go/vt/vttablet/tabletserver/tx_prep_pool_test.go similarity index 100% rename from go/vt/tabletserver/tx_prep_pool_test.go rename to go/vt/vttablet/tabletserver/tx_prep_pool_test.go diff --git a/go/vt/tabletserver/txlogz.go b/go/vt/vttablet/tabletserver/txlogz.go similarity index 97% rename from go/vt/tabletserver/txlogz.go rename to go/vt/vttablet/tabletserver/txlogz.go index 63854fab190..9ed9b6800ce 100644 --- a/go/vt/tabletserver/txlogz.go +++ b/go/vt/vttablet/tabletserver/txlogz.go @@ -15,7 +15,7 @@ import ( "github.com/youtube/vitess/go/acl" "github.com/youtube/vitess/go/vt/callerid" "github.com/youtube/vitess/go/vt/logz" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" diff --git a/go/vt/tabletserver/txlogz_test.go b/go/vt/vttablet/tabletserver/txlogz_test.go similarity index 96% rename from go/vt/tabletserver/txlogz_test.go rename to go/vt/vttablet/tabletserver/txlogz_test.go index 9208539fc55..41019403c9e 100644 --- a/go/vt/tabletserver/txlogz_test.go +++ b/go/vt/vttablet/tabletserver/txlogz_test.go @@ -13,7 +13,7 @@ import ( "github.com/youtube/vitess/go/sync2" "github.com/youtube/vitess/go/vt/callerid" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" ) func testHandler(req *http.Request, t *testing.T) { diff --git a/go/vt/tabletserver/txthrottler/mock_healthcheck_test.go b/go/vt/vttablet/tabletserver/txthrottler/mock_healthcheck_test.go similarity index 97% rename from go/vt/tabletserver/txthrottler/mock_healthcheck_test.go rename to go/vt/vttablet/tabletserver/txthrottler/mock_healthcheck_test.go index 41e48d951fd..d6dbc21ce3c 100644 --- a/go/vt/tabletserver/txthrottler/mock_healthcheck_test.go +++ b/go/vt/vttablet/tabletserver/txthrottler/mock_healthcheck_test.go @@ -7,7 +7,7 @@ import ( gomock "github.com/golang/mock/gomock" discovery "github.com/youtube/vitess/go/vt/discovery" topodata "github.com/youtube/vitess/go/vt/proto/topodata" - queryservice "github.com/youtube/vitess/go/vt/tabletserver/queryservice" + queryservice "github.com/youtube/vitess/go/vt/vttablet/queryservice" ) // Mock of HealthCheck interface diff --git a/go/vt/tabletserver/txthrottler/mock_server_test.go b/go/vt/vttablet/tabletserver/txthrottler/mock_server_test.go similarity index 100% rename from go/vt/tabletserver/txthrottler/mock_server_test.go rename to go/vt/vttablet/tabletserver/txthrottler/mock_server_test.go diff --git a/go/vt/tabletserver/txthrottler/mock_throttler_test.go b/go/vt/vttablet/tabletserver/txthrottler/mock_throttler_test.go similarity index 100% rename from go/vt/tabletserver/txthrottler/mock_throttler_test.go rename to go/vt/vttablet/tabletserver/txthrottler/mock_throttler_test.go diff --git a/go/vt/tabletserver/txthrottler/mock_topology_watcher_test.go b/go/vt/vttablet/tabletserver/txthrottler/mock_topology_watcher_test.go similarity index 100% rename from go/vt/tabletserver/txthrottler/mock_topology_watcher_test.go rename to go/vt/vttablet/tabletserver/txthrottler/mock_topology_watcher_test.go diff --git a/go/vt/tabletserver/txthrottler/mock_toposerver_impl_test.go b/go/vt/vttablet/tabletserver/txthrottler/mock_toposerver_impl_test.go similarity index 100% rename from go/vt/tabletserver/txthrottler/mock_toposerver_impl_test.go rename to go/vt/vttablet/tabletserver/txthrottler/mock_toposerver_impl_test.go diff --git a/go/vt/tabletserver/txthrottler/tx_throttler.go b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go similarity index 99% rename from go/vt/tabletserver/txthrottler/tx_throttler.go rename to go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go index 79f452d8c1e..3e927d42f9b 100644 --- a/go/vt/tabletserver/txthrottler/tx_throttler.go +++ b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go @@ -10,7 +10,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/throttler" "github.com/youtube/vitess/go/vt/topo" diff --git a/go/vt/tabletserver/txthrottler/tx_throttler_test.go b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go similarity index 98% rename from go/vt/tabletserver/txthrottler/tx_throttler_test.go rename to go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go index 169b65b9ff9..d13c61cb202 100644 --- a/go/vt/tabletserver/txthrottler/tx_throttler_test.go +++ b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go @@ -6,7 +6,7 @@ import ( "github.com/golang/mock/gomock" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/topo" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/tabletserver/tabletservermock/controller.go b/go/vt/vttablet/tabletservermock/controller.go similarity index 97% rename from go/vt/tabletserver/tabletservermock/controller.go rename to go/vt/vttablet/tabletservermock/controller.go index bd15012e7ea..a1dcae2767e 100644 --- a/go/vt/tabletserver/tabletservermock/controller.go +++ b/go/vt/vttablet/tabletservermock/controller.go @@ -14,8 +14,8 @@ import ( "github.com/youtube/vitess/go/vt/mysqlctl" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" ) // BroadcastData is used by the mock Controller to send data diff --git a/go/vt/tabletmanager/tmclient/rpc_client_api.go b/go/vt/vttablet/tmclient/rpc_client_api.go similarity index 100% rename from go/vt/tabletmanager/tmclient/rpc_client_api.go rename to go/vt/vttablet/tmclient/rpc_client_api.go diff --git a/go/vt/worker/diff_utils.go b/go/vt/worker/diff_utils.go index 3b5f5aa5a0b..bfff38a0baa 100644 --- a/go/vt/worker/diff_utils.go +++ b/go/vt/worker/diff_utils.go @@ -20,11 +20,11 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/key" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vtgate/vindexes" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" querypb "github.com/youtube/vitess/go/vt/proto/query" tabletmanagerdatapb "github.com/youtube/vitess/go/vt/proto/tabletmanagerdata" diff --git a/go/vt/worker/instance.go b/go/vt/worker/instance.go index 1c5af73af23..4ff5e38d3db 100644 --- a/go/vt/worker/instance.go +++ b/go/vt/worker/instance.go @@ -20,9 +20,9 @@ import ( "github.com/youtube/vitess/go/tb" "github.com/youtube/vitess/go/vt/logutil" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vterrors" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" ) diff --git a/go/vt/worker/legacy_split_clone_test.go b/go/vt/worker/legacy_split_clone_test.go index 0ff613c644a..01198614edc 100644 --- a/go/vt/worker/legacy_split_clone_test.go +++ b/go/vt/worker/legacy_split_clone_test.go @@ -19,11 +19,11 @@ import ( "github.com/youtube/vitess/go/mysqlconn/replication" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice/fakes" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice/fakes" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler/testlib" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/worker/restartable_result_reader.go b/go/vt/worker/restartable_result_reader.go index 1a5b52383b9..6d725669494 100644 --- a/go/vt/worker/restartable_result_reader.go +++ b/go/vt/worker/restartable_result_reader.go @@ -17,9 +17,9 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" querypb "github.com/youtube/vitess/go/vt/proto/query" tabletmanagerdatapb "github.com/youtube/vitess/go/vt/proto/tabletmanagerdata" diff --git a/go/vt/worker/restartable_result_reader_test.go b/go/vt/worker/restartable_result_reader_test.go index 0906b1e4a66..7b08e135ef9 100644 --- a/go/vt/worker/restartable_result_reader_test.go +++ b/go/vt/worker/restartable_result_reader_test.go @@ -16,10 +16,10 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" tabletmanagerdatapb "github.com/youtube/vitess/go/vt/proto/tabletmanagerdata" diff --git a/go/vt/worker/split_clone_test.go b/go/vt/worker/split_clone_test.go index 87b4c95113e..569630b868d 100644 --- a/go/vt/worker/split_clone_test.go +++ b/go/vt/worker/split_clone_test.go @@ -20,12 +20,12 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/concurrency" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice/fakes" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice/fakes" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler/testlib" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/worker/split_diff_test.go b/go/vt/worker/split_diff_test.go index f274f6c6b9d..47ec8ae1210 100644 --- a/go/vt/worker/split_diff_test.go +++ b/go/vt/worker/split_diff_test.go @@ -15,9 +15,9 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice/fakes" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice/fakes" "github.com/youtube/vitess/go/vt/wrangler" "github.com/youtube/vitess/go/vt/wrangler/testlib" diff --git a/go/vt/worker/utils_test.go b/go/vt/worker/utils_test.go index f15cf908cea..875994dfd37 100644 --- a/go/vt/worker/utils_test.go +++ b/go/vt/worker/utils_test.go @@ -13,9 +13,9 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/dbconnpool" - "github.com/youtube/vitess/go/vt/tabletmanager/faketmclient" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/vttablet/faketmclient" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/worker/vertical_split_clone_test.go b/go/vt/worker/vertical_split_clone_test.go index 2cb1b64c761..20384b6917c 100644 --- a/go/vt/worker/vertical_split_clone_test.go +++ b/go/vt/worker/vertical_split_clone_test.go @@ -12,10 +12,10 @@ import ( "github.com/youtube/vitess/go/mysqlconn/replication" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice/fakes" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice/fakes" "github.com/youtube/vitess/go/vt/wrangler/testlib" tabletmanagerdatapb "github.com/youtube/vitess/go/vt/proto/tabletmanagerdata" diff --git a/go/vt/worker/vertical_split_diff_test.go b/go/vt/worker/vertical_split_diff_test.go index ded37d84b59..b8a66da5354 100644 --- a/go/vt/worker/vertical_split_diff_test.go +++ b/go/vt/worker/vertical_split_diff_test.go @@ -15,9 +15,9 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice/fakes" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice/fakes" "github.com/youtube/vitess/go/vt/wrangler" "github.com/youtube/vitess/go/vt/wrangler/testlib" diff --git a/go/vt/worker/vtworkerclienttest/client_testsuite.go b/go/vt/worker/vtworkerclienttest/client_testsuite.go index ffbf9d662f7..d180b454744 100644 --- a/go/vt/worker/vtworkerclienttest/client_testsuite.go +++ b/go/vt/worker/vtworkerclienttest/client_testsuite.go @@ -24,15 +24,15 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/vterrors" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/worker" "github.com/youtube/vitess/go/vt/worker/vtworkerclient" // Import the gRPC client implementation for tablet manager because the real // vtworker implementation requires it. - _ "github.com/youtube/vitess/go/vt/tabletmanager/grpctmclient" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctmclient" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow.go b/go/vt/workflow/resharding/horizontal_resharding_workflow.go index c488b76556d..acbb51aa4d0 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow.go @@ -14,8 +14,8 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/topotools" "github.com/youtube/vitess/go/vt/workflow" diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go index b0d92258fba..848e8f83a25 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go @@ -14,7 +14,7 @@ import ( "github.com/youtube/vitess/go/vt/wrangler" // import the gRPC client implementation for tablet manager - _ "github.com/youtube/vitess/go/vt/tabletmanager/grpctmclient" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctmclient" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) diff --git a/go/vt/wrangler/split.go b/go/vt/wrangler/split.go index e5786a95b9b..ca33e2bab86 100644 --- a/go/vt/wrangler/split.go +++ b/go/vt/wrangler/split.go @@ -11,8 +11,8 @@ import ( "golang.org/x/net/context" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/wrangler/testlib/apply_schema_test.go b/go/vt/wrangler/testlib/apply_schema_test.go index 3b2eaebc2e8..917f6b6b4e6 100644 --- a/go/vt/wrangler/testlib/apply_schema_test.go +++ b/go/vt/wrangler/testlib/apply_schema_test.go @@ -14,8 +14,8 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" tabletmanagerdatapb "github.com/youtube/vitess/go/vt/proto/tabletmanagerdata" diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index 73ccffe87eb..d5e5c3fc9b3 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -20,9 +20,9 @@ import ( "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/mysqlctl/backupstorage" "github.com/youtube/vitess/go/vt/mysqlctl/filebackupstorage" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/wrangler/testlib/copy_schema_shard_test.go b/go/vt/wrangler/testlib/copy_schema_shard_test.go index b80b144cc93..3388c962dba 100644 --- a/go/vt/wrangler/testlib/copy_schema_shard_test.go +++ b/go/vt/wrangler/testlib/copy_schema_shard_test.go @@ -13,9 +13,9 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl/tmutils" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index ccaf27f06c2..51a78af4b5d 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -14,9 +14,9 @@ import ( "github.com/youtube/vitess/go/mysqlconn/replication" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/wrangler/testlib/fake_tablet.go b/go/vt/wrangler/testlib/fake_tablet.go index a76a04c601e..4eb3f79e1a3 100644 --- a/go/vt/wrangler/testlib/fake_tablet.go +++ b/go/vt/wrangler/testlib/fake_tablet.go @@ -20,21 +20,21 @@ import ( "github.com/youtube/vitess/go/mysqlconn/fakesqldb" "github.com/youtube/vitess/go/vt/mysqlctl" - "github.com/youtube/vitess/go/vt/tabletmanager" - "github.com/youtube/vitess/go/vt/tabletmanager/grpctmserver" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/tabletserver/tabletconn" "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/vttablet/grpctmserver" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" + "github.com/youtube/vitess/go/vt/vttablet/tabletmanager" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" // import the gRPC client implementation for tablet manager - _ "github.com/youtube/vitess/go/vt/tabletmanager/grpctmclient" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctmclient" // import the gRPC client implementation for query service - _ "github.com/youtube/vitess/go/vt/tabletserver/grpctabletconn" + _ "github.com/youtube/vitess/go/vt/vttablet/grpctabletconn" ) // This file contains utility methods for unit tests. diff --git a/go/vt/wrangler/testlib/init_shard_master_test.go b/go/vt/wrangler/testlib/init_shard_master_test.go index 29c8bead46c..3ae75749f86 100644 --- a/go/vt/wrangler/testlib/init_shard_master_test.go +++ b/go/vt/wrangler/testlib/init_shard_master_test.go @@ -16,10 +16,10 @@ import ( "github.com/youtube/vitess/go/mysqlconn/replication" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/wrangler/testlib/migrate_served_from_test.go b/go/vt/wrangler/testlib/migrate_served_from_test.go index 08a0b14432b..ee8df6e7ed0 100644 --- a/go/vt/wrangler/testlib/migrate_served_from_test.go +++ b/go/vt/wrangler/testlib/migrate_served_from_test.go @@ -13,8 +13,8 @@ import ( "github.com/youtube/vitess/go/mysqlconn/replication" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/wrangler/testlib/migrate_served_types_test.go b/go/vt/wrangler/testlib/migrate_served_types_test.go index b8d96c17095..4a4fae4ccfc 100644 --- a/go/vt/wrangler/testlib/migrate_served_types_test.go +++ b/go/vt/wrangler/testlib/migrate_served_types_test.go @@ -14,9 +14,9 @@ import ( "github.com/youtube/vitess/go/mysqlconn/replication" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/wrangler/testlib/permissions_test.go b/go/vt/wrangler/testlib/permissions_test.go index 6cf545d06c1..3d31af71f10 100644 --- a/go/vt/wrangler/testlib/permissions_test.go +++ b/go/vt/wrangler/testlib/permissions_test.go @@ -12,9 +12,9 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go index b9361b4a657..4225392aae8 100644 --- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go @@ -11,10 +11,10 @@ import ( "github.com/youtube/vitess/go/mysqlconn/replication" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/tabletserver/tabletservermock" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/tabletservermock" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/wrangler/testlib/reparent_external_test.go b/go/vt/wrangler/testlib/reparent_external_test.go index a2495767c7d..80b893f43a6 100644 --- a/go/vt/wrangler/testlib/reparent_external_test.go +++ b/go/vt/wrangler/testlib/reparent_external_test.go @@ -14,13 +14,13 @@ import ( "github.com/youtube/vitess/go/event" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/topotools" "github.com/youtube/vitess/go/vt/topotools/events" + "github.com/youtube/vitess/go/vt/vttablet/tabletmanager" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go index 6e06b360e0c..0ed311e3e03 100644 --- a/go/vt/wrangler/testlib/reparent_utils_test.go +++ b/go/vt/wrangler/testlib/reparent_utils_test.go @@ -12,10 +12,10 @@ import ( "github.com/youtube/vitess/go/mysqlconn/replication" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/wrangler/testlib/shard_test.go b/go/vt/wrangler/testlib/shard_test.go index 6dc1bd61e8a..9eee7327520 100644 --- a/go/vt/wrangler/testlib/shard_test.go +++ b/go/vt/wrangler/testlib/shard_test.go @@ -7,9 +7,9 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/wrangler/testlib/version_test.go b/go/vt/wrangler/testlib/version_test.go index 4fbe4099347..48a678c7c54 100644 --- a/go/vt/wrangler/testlib/version_test.go +++ b/go/vt/wrangler/testlib/version_test.go @@ -12,8 +12,8 @@ import ( "testing" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" diff --git a/go/vt/wrangler/testlib/wait_for_drain_test.go b/go/vt/wrangler/testlib/wait_for_drain_test.go index 2cac39de502..08fa710a1b3 100644 --- a/go/vt/wrangler/testlib/wait_for_drain_test.go +++ b/go/vt/wrangler/testlib/wait_for_drain_test.go @@ -13,10 +13,10 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" - "github.com/youtube/vitess/go/vt/tabletserver/queryservice/fakes" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/queryservice/fakes" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" logutilpb "github.com/youtube/vitess/go/vt/proto/logutil" diff --git a/go/vt/wrangler/testlib/wait_for_filtered_replication_test.go b/go/vt/wrangler/testlib/wait_for_filtered_replication_test.go index 371de0ceb0a..223eaee06ab 100644 --- a/go/vt/wrangler/testlib/wait_for_filtered_replication_test.go +++ b/go/vt/wrangler/testlib/wait_for_filtered_replication_test.go @@ -12,12 +12,12 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" - "github.com/youtube/vitess/go/vt/tabletserver" - "github.com/youtube/vitess/go/vt/tabletserver/grpcqueryservice" - "github.com/youtube/vitess/go/vt/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/topo/memorytopo" + "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletmanager" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" "github.com/youtube/vitess/go/vt/wrangler" querypb "github.com/youtube/vitess/go/vt/proto/query" diff --git a/go/vt/wrangler/wrangler.go b/go/vt/wrangler/wrangler.go index 0fe75cfe15c..e53952c4783 100644 --- a/go/vt/wrangler/wrangler.go +++ b/go/vt/wrangler/wrangler.go @@ -8,8 +8,8 @@ package wrangler import ( "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/tabletmanager/tmclient" "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/vttablet/tmclient" ) var ( From 0e4edf39c85b24fda16f167f1e27a9c80d672d12 Mon Sep 17 00:00:00 2001 From: Sugu Sougoumarane Date: Mon, 13 Mar 2017 15:30:03 -0700 Subject: [PATCH 096/108] tabletserver: fix data race in test --- go/vt/vttablet/tabletserver/messager/message_manager_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/go/vt/vttablet/tabletserver/messager/message_manager_test.go b/go/vt/vttablet/tabletserver/messager/message_manager_test.go index 2e59c1056dc..fdee30299cc 100644 --- a/go/vt/vttablet/tabletserver/messager/message_manager_test.go +++ b/go/vt/vttablet/tabletserver/messager/message_manager_test.go @@ -493,13 +493,14 @@ func TestMessageManagerPurge(t *testing.T) { db := fakesqldb.New(t) defer db.Close() tsv := newFakeTabletServer() + ch := make(chan string) + tsv.SetChannel(ch) + ti := newMMTable() ti.MessageInfo.PollInterval = 1 * time.Millisecond mm := newMessageManager(tsv, ti, newMMConnPool(db)) mm.Open() defer mm.Close() - ch := make(chan string) - tsv.SetChannel(ch) // Ensure Purge got called. if got := <-ch; got != mmTable.Name.String() { t.Errorf("Postpone: %s, want %v", got, mmTable.Name) From 0327cf42e63de9182a8cfb15e9cd6eec9e50920b Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 10 Mar 2017 17:05:10 -0800 Subject: [PATCH 097/108] Splitting RBR code into own files. --- .../replication/binlog_event_common.go | 685 ----------------- .../replication/binlog_event_common_test.go | 387 ---------- go/mysqlconn/replication/binlog_event_rbr.go | 692 ++++++++++++++++++ .../replication/binlog_event_rbr_test.go | 393 ++++++++++ 4 files changed, 1085 insertions(+), 1072 deletions(-) create mode 100644 go/mysqlconn/replication/binlog_event_rbr.go create mode 100644 go/mysqlconn/replication/binlog_event_rbr_test.go diff --git a/go/mysqlconn/replication/binlog_event_common.go b/go/mysqlconn/replication/binlog_event_common.go index 7800f7a3b51..945dee0cd0e 100644 --- a/go/mysqlconn/replication/binlog_event_common.go +++ b/go/mysqlconn/replication/binlog_event_common.go @@ -4,12 +4,8 @@ import ( "bytes" "encoding/binary" "fmt" - "strconv" - - "github.com/youtube/vitess/go/sqltypes" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" - querypb "github.com/youtube/vitess/go/vt/proto/query" ) // binlogEvent wraps a raw packet buffer and provides methods to examine it @@ -314,684 +310,3 @@ func (ev binlogEvent) TableID(f BinlogFormat) uint64 { uint64(ev[pos+4])<<32 | uint64(ev[pos+5])<<40 } - -// TableMap implements BinlogEvent.TableMap(). -// -// Expected format (L = total length of event data): -// # bytes field -// 4/6 table id -// 2 flags -// 1 schema name length sl -// sl schema name -// 1 [00] -// 1 table name length tl -// tl table name -// 1 [00] -// column count cc (var-len encoded) -// cc column-def, one byte per column -// column-meta-def (var-len encoded string) -// n NULL-bitmask, length: (cc + 7) / 8 -func (ev binlogEvent) TableMap(f BinlogFormat) (*TableMap, error) { - data := ev.Bytes()[f.HeaderLength:] - - result := &TableMap{} - pos := 6 - if f.HeaderSize(eTableMapEvent) == 6 { - pos = 4 - } - result.Flags = binary.LittleEndian.Uint16(data[pos : pos+2]) - pos += 2 - - l := int(data[pos]) - result.Database = string(data[pos+1 : pos+1+l]) - pos += 1 + l + 1 - - l = int(data[pos]) - result.Name = string(data[pos+1 : pos+1+l]) - pos += 1 + l + 1 - - // FIXME(alainjobart) this is varlength encoded. - columnCount := int(data[pos]) - pos++ - - result.Types = data[pos : pos+columnCount] - pos += columnCount - - // FIXME(alainjobart) this is a var-len-string. - l = int(data[pos]) - pos++ - - // Allocate and parse / copy Metadata. - result.Metadata = make([]uint16, columnCount) - expectedEnd := pos + l - for c := 0; c < columnCount; c++ { - var err error - result.Metadata[c], pos, err = metadataRead(data, pos, result.Types[c]) - if err != nil { - return nil, err - } - } - if pos != expectedEnd { - return nil, fmt.Errorf("unexpected metadata end: got %v was expecting %v (data=%v)", pos, expectedEnd, data) - } - - // A bit array that says if each colum can be NULL. - result.CanBeNull, _ = newBitmap(data, pos, columnCount) - - return result, nil -} - -// metadataLength returns how many bytes are used for metadata, based on a type. -func metadataLength(typ byte) int { - switch typ { - case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate, TypeVarString: - // No data here. - return 0 - - case TypeFloat, TypeDouble, TypeTimestamp2, TypeDateTime2, TypeTime2, TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: - // One byte. - return 1 - - case TypeNewDecimal, TypeEnum, TypeSet, TypeString: - // Two bytes, Big Endian because of crazy encoding. - return 2 - - case TypeVarchar, TypeBit: - // Two bytes, Little Endian - return 2 - - default: - // Unknown type. This is used in tests only, so panic. - panic(fmt.Errorf("metadataLength: unhandled data type: %v", typ)) - } -} - -// metadataTotalLength returns the total size of the metadata for an -// array of types. -func metadataTotalLength(types []byte) int { - sum := 0 - for _, t := range types { - sum += metadataLength(t) - } - return sum -} - -// metadataRead reads a single value from the metadata string. -func metadataRead(data []byte, pos int, typ byte) (uint16, int, error) { - switch typ { - - case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate, TypeVarString: - // No data here. - return 0, pos, nil - - case TypeFloat, TypeDouble, TypeTimestamp2, TypeDateTime2, TypeTime2, TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: - // One byte. - return uint16(data[pos]), pos + 1, nil - - case TypeNewDecimal, TypeEnum, TypeSet, TypeString: - // Two bytes, Big Endian because of crazy encoding. - return uint16(data[pos])<<8 + uint16(data[pos+1]), pos + 2, nil - - case TypeVarchar, TypeBit: - // Two bytes, Little Endian - return uint16(data[pos]) + uint16(data[pos+1])<<8, pos + 2, nil - - default: - // Unknown types, we can't go on. - return 0, 0, fmt.Errorf("metadataRead: unhandled data type: %v", typ) - } -} - -// metadataWrite writes a single value into the metadata string. -func metadataWrite(data []byte, pos int, typ byte, value uint16) int { - switch typ { - - case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate, TypeVarString: - // No data here. - return pos - - case TypeFloat, TypeDouble, TypeTimestamp2, TypeDateTime2, TypeTime2, TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: - // One byte. - data[pos] = byte(value) - return pos + 1 - - case TypeNewDecimal, TypeEnum, TypeSet, TypeString: - // Two bytes, Big Endian because of crazy encoding. - data[pos] = byte(value >> 8) - data[pos+1] = byte(value) - return pos + 2 - - case TypeVarchar, TypeBit: - // Two bytes, Little Endian - data[pos] = byte(value) - data[pos+1] = byte(value >> 8) - return pos + 2 - - default: - // Unknown type. This is used in tests only, so panic. - panic(fmt.Errorf("metadataRead: unhandled data type: %v", typ)) - } -} - -// cellLength returns the new position after the field with the given -// type is read. -func cellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { - switch typ { - case TypeNull: - return 0, nil - case TypeTiny, TypeYear: - return 1, nil - case TypeShort: - return 2, nil - case TypeInt24: - return 3, nil - case TypeLong, TypeTimestamp: - return 4, nil - case TypeLongLong: - return 8, nil - case TypeDate, TypeNewDate: - return 3, nil - case TypeTime: - return 4, nil - case TypeDateTime: - return 8, nil - case TypeVarchar: - // Length is encoded in 1 or 2 bytes. - if metadata > 255 { - l := int(uint64(data[pos]) | - uint64(data[pos+1])<<8) - return l + 2, nil - } - l := int(data[pos]) - return l + 1, nil - case TypeBit: - // bitmap length is in metadata, as: - // upper 8 bits: bytes length - // lower 8 bits: bit length - nbits := ((metadata >> 8) * 8) + (metadata & 0xFF) - return (int(nbits) + 7) / 8, nil - case TypeTimestamp2: - // metadata has number of decimals. One byte encodes - // two decimals. - return 4 + (int(metadata)+1)/2, nil - case TypeDateTime2: - // metadata has number of decimals. One byte encodes - // two decimals. - return 5 + (int(metadata)+1)/2, nil - case TypeTime2: - // metadata has number of decimals. One byte encodes - // two decimals. - return 3 + (int(metadata)+1)/2, nil - - default: - return 0, fmt.Errorf("Unsupported type %v (data: %v pos: %v)", typ, data, pos) - } -} - -// CellValue returns the data for a cell as a sqltypes.Value, and how -// many bytes it takes. It only uses the querypb.Type value for the -// signed flag. -func CellValue(data []byte, pos int, typ byte, metadata uint16, styp querypb.Type) (sqltypes.Value, int, error) { - switch typ { - case TypeTiny: - if sqltypes.IsSigned(styp) { - return sqltypes.MakeTrusted(querypb.Type_INT8, - strconv.AppendInt(nil, int64(int8(data[pos])), 10)), 1, nil - } - return sqltypes.MakeTrusted(querypb.Type_UINT8, - strconv.AppendUint(nil, uint64(data[pos]), 10)), 1, nil - case TypeYear: - return sqltypes.MakeTrusted(querypb.Type_YEAR, - strconv.AppendUint(nil, uint64(data[pos])+1900, 10)), 1, nil - case TypeShort: - val := binary.LittleEndian.Uint16(data[pos : pos+2]) - if sqltypes.IsSigned(styp) { - return sqltypes.MakeTrusted(querypb.Type_INT16, - strconv.AppendInt(nil, int64(int16(val)), 10)), 2, nil - } - return sqltypes.MakeTrusted(querypb.Type_UINT16, - strconv.AppendUint(nil, uint64(val), 10)), 2, nil - case TypeInt24: - if sqltypes.IsSigned(styp) && data[pos+2]&128 > 0 { - // Negative number, have to extend the sign. - val := int32(uint32(data[pos]) + - uint32(data[pos+1])<<8 + - uint32(data[pos+2])<<16 + - uint32(255)<<24) - return sqltypes.MakeTrusted(querypb.Type_INT24, - strconv.AppendInt(nil, int64(val), 10)), 3, nil - } - // Positive number. - val := uint64(data[pos]) + - uint64(data[pos+1])<<8 + - uint64(data[pos+2])<<16 - return sqltypes.MakeTrusted(querypb.Type_UINT24, - strconv.AppendUint(nil, val, 10)), 3, nil - case TypeLong: - val := binary.LittleEndian.Uint32(data[pos : pos+4]) - if sqltypes.IsSigned(styp) { - return sqltypes.MakeTrusted(querypb.Type_INT32, - strconv.AppendInt(nil, int64(int32(val)), 10)), 4, nil - } - return sqltypes.MakeTrusted(querypb.Type_UINT32, - strconv.AppendUint(nil, uint64(val), 10)), 4, nil - case TypeTimestamp: - val := binary.LittleEndian.Uint32(data[pos : pos+4]) - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - strconv.AppendUint(nil, uint64(val), 10)), 4, nil - case TypeLongLong: - val := binary.LittleEndian.Uint64(data[pos : pos+8]) - if sqltypes.IsSigned(styp) { - return sqltypes.MakeTrusted(querypb.Type_INT64, - strconv.AppendInt(nil, int64(val), 10)), 8, nil - } - return sqltypes.MakeTrusted(querypb.Type_UINT64, - strconv.AppendUint(nil, val, 10)), 8, nil - case TypeDate, TypeNewDate: - val := uint32(data[pos]) + - uint32(data[pos+1])<<8 + - uint32(data[pos+2])<<16 - day := val & 31 - month := val >> 5 & 15 - year := val >> 9 - return sqltypes.MakeTrusted(querypb.Type_DATE, - []byte(fmt.Sprintf("%04d-%02d-%02d", year, month, day))), 3, nil - case TypeTime: - val := binary.LittleEndian.Uint32(data[pos : pos+4]) - hour := val / 10000 - minute := (val % 10000) / 100 - second := val % 100 - return sqltypes.MakeTrusted(querypb.Type_TIME, - []byte(fmt.Sprintf("%02d:%02d:%02d", hour, minute, second))), 4, nil - case TypeDateTime: - val := binary.LittleEndian.Uint64(data[pos : pos+8]) - d := val / 1000000 - t := val % 1000000 - year := d / 10000 - month := (d % 10000) / 100 - day := d % 100 - hour := t / 10000 - minute := (t % 10000) / 100 - second := t % 100 - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte(fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second))), 8, nil - case TypeVarchar: - // Length is encoded in 1 or 2 bytes. - if metadata > 255 { - l := int(uint64(data[pos]) | - uint64(data[pos+1])<<8) - return sqltypes.MakeTrusted(querypb.Type_VARCHAR, - data[pos+2:pos+2+l]), l + 2, nil - } - l := int(data[pos]) - return sqltypes.MakeTrusted(querypb.Type_VARCHAR, - data[pos+1:pos+1+l]), l + 1, nil - case TypeBit: - // The contents is just the bytes, quoted. - nbits := ((metadata >> 8) * 8) + (metadata & 0xFF) - l := (int(nbits) + 7) / 8 - return sqltypes.MakeTrusted(querypb.Type_BIT, - data[pos:pos+l]), l, nil - case TypeTimestamp2: - second := binary.LittleEndian.Uint32(data[pos : pos+4]) - switch metadata { - case 1: - decimals := int(data[pos+4]) - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v.%01d", second, decimals))), 5, nil - case 2: - decimals := int(data[pos+4]) - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v.%02d", second, decimals))), 5, nil - case 3: - decimals := int(data[pos+4]) + - int(data[pos+5])<<8 - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v.%03d", second, decimals))), 6, nil - case 4: - decimals := int(data[pos+4]) + - int(data[pos+5])<<8 - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v.%04d", second, decimals))), 6, nil - case 5: - decimals := int(data[pos+4]) + - int(data[pos+5])<<8 + - int(data[pos+6])<<16 - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v.%05d", second, decimals))), 7, nil - case 6: - decimals := int(data[pos+4]) + - int(data[pos+5])<<8 + - int(data[pos+6])<<16 - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v.%.6d", second, decimals))), 7, nil - } - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - strconv.AppendUint(nil, uint64(second), 10)), 4, nil - case TypeDateTime2: - ymdhms := (uint64(data[pos]) | - uint64(data[pos+1])<<8 | - uint64(data[pos+2])<<16 | - uint64(data[pos+3])<<24 | - uint64(data[pos+4])<<32) - uint64(0x8000000000) - ymd := ymdhms >> 17 - ym := ymd >> 5 - hms := ymdhms % (1 << 17) - - day := ymd % (1 << 5) - month := ym % 13 - year := ym / 13 - - second := hms % (1 << 6) - minute := (hms >> 6) % (1 << 6) - hour := hms >> 12 - - datetime := fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second) - - switch metadata { - case 1: - decimals := int(data[pos+5]) - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte(fmt.Sprintf("%v.%01d", datetime, decimals))), 6, nil - case 2: - decimals := int(data[pos+5]) - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte(fmt.Sprintf("%v.%02d", datetime, decimals))), 6, nil - case 3: - decimals := int(data[pos+5]) + - int(data[pos+6])<<8 - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte(fmt.Sprintf("%v.%03d", datetime, decimals))), 7, nil - case 4: - decimals := int(data[pos+5]) + - int(data[pos+6])<<8 - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte(fmt.Sprintf("%v.%04d", datetime, decimals))), 7, nil - case 5: - decimals := int(data[pos+5]) + - int(data[pos+6])<<8 + - int(data[pos+7])<<16 - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte(fmt.Sprintf("%v.%05d", datetime, decimals))), 8, nil - case 6: - decimals := int(data[pos+5]) + - int(data[pos+6])<<8 + - int(data[pos+7])<<16 - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte(fmt.Sprintf("%v.%.6d", datetime, decimals))), 8, nil - } - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte(datetime)), 5, nil - case TypeTime2: - hms := (int64(data[pos]) | - int64(data[pos+1])<<8 | - int64(data[pos+2])<<16) - 0x800000 - sign := "" - if hms < 0 { - hms = -hms - sign = "-" - } - - fracStr := "" - switch metadata { - case 1: - frac := int(data[pos+3]) - if sign == "-" && frac != 0 { - hms-- - frac = 0x100 - frac - } - fracStr = fmt.Sprintf(".%.1d", frac/10) - case 2: - frac := int(data[pos+3]) - if sign == "-" && frac != 0 { - hms-- - frac = 0x100 - frac - } - fracStr = fmt.Sprintf(".%.2d", frac) - case 3: - frac := int(data[pos+3]) | - int(data[pos+4])<<8 - if sign == "-" && frac != 0 { - hms-- - frac = 0x10000 - frac - } - fracStr = fmt.Sprintf(".%.3d", frac/10) - case 4: - frac := int(data[pos+3]) | - int(data[pos+4])<<8 - if sign == "-" && frac != 0 { - hms-- - frac = 0x10000 - frac - } - fracStr = fmt.Sprintf(".%.4d", frac) - case 5: - frac := int(data[pos+3]) | - int(data[pos+4])<<8 | - int(data[pos+5])<<16 - if sign == "-" && frac != 0 { - hms-- - frac = 0x1000000 - frac - } - fracStr = fmt.Sprintf(".%.5d", frac/10) - case 6: - frac := int(data[pos+3]) | - int(data[pos+4])<<8 | - int(data[pos+5])<<16 - if sign == "-" && frac != 0 { - hms-- - frac = 0x1000000 - frac - } - fracStr = fmt.Sprintf(".%.6d", frac) - } - - hour := (hms >> 12) % (1 << 10) - minute := (hms >> 6) % (1 << 6) - second := hms % (1 << 6) - return sqltypes.MakeTrusted(querypb.Type_TIME, - []byte(fmt.Sprintf("%v%02d:%02d:%02d%v", sign, hour, minute, second, fracStr))), 3 + (int(metadata)+1)/2, nil - - default: - return sqltypes.NULL, 0, fmt.Errorf("Unsupported type %v", typ) - } -} - -// Rows implements BinlogEvent.TableMap(). -// -// Expected format (L = total length of event data): -// # bytes field -// 4/6 table id -// 2 flags -// -- if version == 2 -// 2 extra data length edl -// edl extra data -// -- endif -// number of columns (var-len encoded) -// identify bitmap -// data bitmap -// -- for each row -// null bitmap for identify for present rows -// values for each identify field -// null bitmap for data for present rows -// values for each data field -// -- -func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) { - typ := ev.Type() - data := ev.Bytes()[f.HeaderLength:] - hasIdentify := typ == eUpdateRowsEventV1 || typ == eUpdateRowsEventV2 || - typ == eDeleteRowsEventV1 || typ == eDeleteRowsEventV2 - hasData := typ == eWriteRowsEventV1 || typ == eWriteRowsEventV2 || - typ == eUpdateRowsEventV1 || typ == eUpdateRowsEventV2 - - result := Rows{} - pos := 6 - if f.HeaderSize(typ) == 6 { - pos = 4 - } - result.Flags = binary.LittleEndian.Uint16(data[pos : pos+2]) - pos += 2 - - // version=2 have extra data here. - if typ == eWriteRowsEventV2 || typ == eUpdateRowsEventV2 || typ == eDeleteRowsEventV2 { - // This extraDataLength contains the 2 bytes length. - extraDataLength := binary.LittleEndian.Uint16(data[pos : pos+2]) - pos += int(extraDataLength) - } - - // FIXME(alainjobart) this is var len encoded. - columnCount := int(data[pos]) - pos++ - - numIdentifyColumns := 0 - numDataColumns := 0 - - if hasIdentify { - // Bitmap of the columns used for identify. - result.IdentifyColumns, pos = newBitmap(data, pos, columnCount) - numIdentifyColumns = result.IdentifyColumns.BitCount() - } - - if hasData { - // Bitmap of columns that are present. - result.DataColumns, pos = newBitmap(data, pos, columnCount) - numDataColumns = result.DataColumns.BitCount() - } - - // One row at a time. - for pos < len(data) { - row := Row{} - - if hasIdentify { - // Bitmap of identify columns that are null (amongst the ones that are present). - row.NullIdentifyColumns, pos = newBitmap(data, pos, numIdentifyColumns) - - // Get the identify values. - startPos := pos - valueIndex := 0 - for c := 0; c < columnCount; c++ { - if !result.IdentifyColumns.Bit(c) { - // This column is not represented. - continue - } - - if row.NullIdentifyColumns.Bit(valueIndex) { - // This column is represented, but its value is NULL. - valueIndex++ - continue - } - - // This column is represented now. We need to skip its length. - l, err := cellLength(data, pos, tm.Types[c], tm.Metadata[c]) - if err != nil { - return result, err - } - pos += l - valueIndex++ - } - row.Identify = data[startPos:pos] - } - - if hasData { - // Bitmap of columns that are null (amongst the ones that are present). - row.NullColumns, pos = newBitmap(data, pos, numDataColumns) - - // Get the values. - startPos := pos - valueIndex := 0 - for c := 0; c < columnCount; c++ { - if !result.DataColumns.Bit(c) { - // This column is not represented. - continue - } - - if row.NullColumns.Bit(valueIndex) { - // This column is represented, but its value is NULL. - valueIndex++ - continue - } - - // This column is represented now. We need to skip its length. - l, err := cellLength(data, pos, tm.Types[c], tm.Metadata[c]) - if err != nil { - return result, err - } - pos += l - valueIndex++ - } - row.Data = data[startPos:pos] - } - - result.Rows = append(result.Rows, row) - } - - return result, nil -} - -// StringValuesForTests is a helper method to return the string value -// of all columns in a row in a Row. Only use it in tests, as the -// returned values cannot be interpreted correctly without the schema. -// We assume everything is unsigned in this method. -func (rs *Rows) StringValuesForTests(tm *TableMap, rowIndex int) ([]string, error) { - var result []string - - valueIndex := 0 - data := rs.Rows[rowIndex].Data - pos := 0 - for c := 0; c < rs.DataColumns.Count(); c++ { - if !rs.DataColumns.Bit(c) { - continue - } - - if rs.Rows[rowIndex].NullColumns.Bit(valueIndex) { - // This column is represented, but its value is NULL. - result = append(result, "NULL") - valueIndex++ - continue - } - - // We have real data - value, l, err := CellValue(data, pos, tm.Types[c], tm.Metadata[c], querypb.Type_UINT64) - if err != nil { - return nil, err - } - result = append(result, value.String()) - pos += l - valueIndex++ - } - - return result, nil -} - -// StringIdentifiesForTests is a helper method to return the string -// identify of all columns in a row in a Row. Only use it in tests, as the -// returned values cannot be interpreted correctly without the schema. -// We assume everything is unsigned in this method. -func (rs *Rows) StringIdentifiesForTests(tm *TableMap, rowIndex int) ([]string, error) { - var result []string - - valueIndex := 0 - data := rs.Rows[rowIndex].Identify - pos := 0 - for c := 0; c < rs.IdentifyColumns.Count(); c++ { - if !rs.IdentifyColumns.Bit(c) { - continue - } - - if rs.Rows[rowIndex].NullIdentifyColumns.Bit(valueIndex) { - // This column is represented, but its value is NULL. - result = append(result, "NULL") - valueIndex++ - continue - } - - // We have real data - value, l, err := CellValue(data, pos, tm.Types[c], tm.Metadata[c], querypb.Type_UINT64) - if err != nil { - return nil, err - } - result = append(result, value.String()) - pos += l - valueIndex++ - } - - return result, nil -} diff --git a/go/mysqlconn/replication/binlog_event_common_test.go b/go/mysqlconn/replication/binlog_event_common_test.go index 2add4db0ca1..89a6b285294 100644 --- a/go/mysqlconn/replication/binlog_event_common_test.go +++ b/go/mysqlconn/replication/binlog_event_common_test.go @@ -1,14 +1,10 @@ package replication import ( - "bytes" - "fmt" "reflect" "testing" - "github.com/youtube/vitess/go/sqltypes" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" - querypb "github.com/youtube/vitess/go/vt/proto/query" ) // sample event data @@ -337,386 +333,3 @@ func TestBinlogEventIntVarBadID(t *testing.T) { t.Errorf("wrong error, got %#v, want %#v", got, want) } } - -func TestCellLengthAndData(t *testing.T) { - testcases := []struct { - typ byte - metadata uint16 - styp querypb.Type - data []byte - out sqltypes.Value - }{{ - typ: TypeTiny, - styp: querypb.Type_UINT8, - data: []byte{0x82}, - out: sqltypes.MakeTrusted(querypb.Type_UINT8, - []byte("130")), - }, { - typ: TypeTiny, - styp: querypb.Type_INT8, - data: []byte{0xfe}, - out: sqltypes.MakeTrusted(querypb.Type_INT8, - []byte("-2")), - }, { - typ: TypeYear, - data: []byte{0x82}, - out: sqltypes.MakeTrusted(querypb.Type_YEAR, - []byte("2030")), - }, { - typ: TypeShort, - styp: querypb.Type_UINT16, - data: []byte{0x82, 0x81}, - out: sqltypes.MakeTrusted(querypb.Type_UINT16, - []byte(fmt.Sprintf("%v", 0x8182))), - }, { - typ: TypeShort, - styp: querypb.Type_INT16, - data: []byte{0xfe, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_INT16, - []byte(fmt.Sprintf("%v", -1-int32(0x0001)))), - }, { - typ: TypeInt24, - styp: querypb.Type_UINT24, - data: []byte{0x83, 0x82, 0x81}, - out: sqltypes.MakeTrusted(querypb.Type_UINT24, - []byte(fmt.Sprintf("%v", 0x818283))), - }, { - typ: TypeInt24, - styp: querypb.Type_INT24, - data: []byte{0xfd, 0xfe, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_INT24, - []byte(fmt.Sprintf("%v", -1-int32(0x000102)))), - }, { - typ: TypeLong, - styp: querypb.Type_UINT32, - data: []byte{0x84, 0x83, 0x82, 0x81}, - out: sqltypes.MakeTrusted(querypb.Type_UINT32, - []byte(fmt.Sprintf("%v", 0x81828384))), - }, { - typ: TypeLong, - styp: querypb.Type_INT32, - data: []byte{0xfc, 0xfd, 0xfe, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_INT32, - []byte(fmt.Sprintf("%v", -1-int32(0x00010203)))), - }, { - typ: TypeTimestamp, - data: []byte{0x84, 0x83, 0x82, 0x81}, - out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v", 0x81828384))), - }, { - typ: TypeLongLong, - styp: querypb.Type_UINT64, - data: []byte{0x88, 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81}, - out: sqltypes.MakeTrusted(querypb.Type_UINT64, - []byte(fmt.Sprintf("%v", uint64(0x8182838485868788)))), - }, { - typ: TypeLongLong, - styp: querypb.Type_INT64, - data: []byte{0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_INT64, - []byte(fmt.Sprintf("%v", -1-int64(0x0001020304050607)))), - }, { - typ: TypeDate, - // 2010 << 9 + 10 << 5 + 3 = 1029443 = 0x0fb543 - data: []byte{0x43, 0xb5, 0x0f}, - out: sqltypes.MakeTrusted(querypb.Type_DATE, - []byte("2010-10-03")), - }, { - typ: TypeNewDate, - // 2010 << 9 + 10 << 5 + 3 = 1029443 = 0x0fb543 - data: []byte{0x43, 0xb5, 0x0f}, - out: sqltypes.MakeTrusted(querypb.Type_DATE, - []byte("2010-10-03")), - }, { - typ: TypeTime, - // 154532 = 0x00025ba4 - data: []byte{0xa4, 0x5b, 0x02, 0x00}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("15:45:32")), - }, { - typ: TypeDateTime, - // 19840304154532 = 0x120b6e4807a4 - data: []byte{0xa4, 0x07, 0x48, 0x6e, 0x0b, 0x12, 0x00, 0x00}, - out: sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte("1984-03-04 15:45:32")), - }, { - typ: TypeVarchar, - metadata: 20, // one byte length encoding - data: []byte{3, 'a', 'b', 'c'}, - out: sqltypes.MakeTrusted(querypb.Type_VARCHAR, - []byte("abc")), - }, { - typ: TypeVarchar, - metadata: 384, // two bytes length encoding - data: []byte{3, 0, 'a', 'b', 'c'}, - out: sqltypes.MakeTrusted(querypb.Type_VARCHAR, - []byte("abc")), - }, { - typ: TypeBit, - metadata: 0x0107, - data: []byte{0x3, 0x1}, - out: sqltypes.MakeTrusted(querypb.Type_BIT, - []byte{3, 1}), - }, { - typ: TypeTimestamp2, - metadata: 0, - data: []byte{0x84, 0x83, 0x82, 0x81}, - out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v", 0x81828384))), - }, { - typ: TypeTimestamp2, - metadata: 1, - data: []byte{0x84, 0x83, 0x82, 0x81, 7}, - out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v.7", 0x81828384))), - }, { - typ: TypeTimestamp2, - metadata: 2, - data: []byte{0x84, 0x83, 0x82, 0x81, 76}, - out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v.76", 0x81828384))), - }, { - typ: TypeTimestamp2, - metadata: 3, - // 765 = 0x02fd - data: []byte{0x84, 0x83, 0x82, 0x81, 0xfd, 0x02}, - out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v.765", 0x81828384))), - }, { - typ: TypeTimestamp2, - metadata: 4, - // 7654 = 0x1de6 - data: []byte{0x84, 0x83, 0x82, 0x81, 0xe6, 0x1d}, - out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v.7654", 0x81828384))), - }, { - typ: TypeTimestamp2, - metadata: 5, - // 76543 = 0x012aff - data: []byte{0x84, 0x83, 0x82, 0x81, 0xff, 0x2a, 0x01}, - out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v.76543", 0x81828384))), - }, { - typ: TypeTimestamp2, - metadata: 6, - // 765432 = 0x0badf8 - data: []byte{0x84, 0x83, 0x82, 0x81, 0xf8, 0xad, 0x0b}, - out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - []byte(fmt.Sprintf("%v.765432", 0x81828384))), - }, { - typ: TypeDateTime2, - metadata: 0, - // (2012 * 13 + 6) << 22 + 21 << 17 + 15 << 12 + 45 << 6 + 17) - // = 109734198097 = 0x198caafb51 - // Then have to add 0x8000000000 = 0x998caafb51 - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99}, - out: sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte("2012-06-21 15:45:17")), - }, { - typ: TypeDateTime2, - metadata: 1, - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 7}, - out: sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte("2012-06-21 15:45:17.7")), - }, { - typ: TypeDateTime2, - metadata: 2, - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 76}, - out: sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte("2012-06-21 15:45:17.76")), - }, { - typ: TypeDateTime2, - metadata: 3, - // 765 = 0x02fd - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xfd, 0x02}, - out: sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte("2012-06-21 15:45:17.765")), - }, { - typ: TypeDateTime2, - metadata: 4, - // 7654 = 0x1de6 - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xe6, 0x1d}, - out: sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte("2012-06-21 15:45:17.7654")), - }, { - typ: TypeDateTime2, - metadata: 5, - // 76543 = 0x012aff - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xff, 0x2a, 0x01}, - out: sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte("2012-06-21 15:45:17.76543")), - }, { - typ: TypeDateTime2, - metadata: 6, - // 765432 = 0x0badf8 - data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xf8, 0xad, 0x0b}, - out: sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte("2012-06-21 15:45:17.765432")), - }, { - // This first set of tests is from a comment in - // sql-common/my_time.c: - // - // Disk value intpart frac Time value Memory value - // 800000.00 0 0 00:00:00.00 0000000000.000000 - // 7FFFFF.FF -1 255 -00:00:00.01 FFFFFFFFFF.FFD8F0 - // 7FFFFF.9D -1 99 -00:00:00.99 FFFFFFFFFF.F0E4D0 - // 7FFFFF.00 -1 0 -00:00:01.00 FFFFFFFFFF.000000 - // 7FFFFE.FF -1 255 -00:00:01.01 FFFFFFFFFE.FFD8F0 - // 7FFFFE.F6 -2 246 -00:00:01.10 FFFFFFFFFE.FE7960 - typ: TypeTime2, - metadata: 2, - data: []byte{0x00, 0x00, 0x80, 0x00}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("00:00:00.00")), - }, { - typ: TypeTime2, - metadata: 2, - data: []byte{0xff, 0xff, 0x7f, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:00.01")), - }, { - typ: TypeTime2, - metadata: 2, - data: []byte{0xff, 0xff, 0x7f, 0x9d}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:00.99")), - }, { - typ: TypeTime2, - metadata: 2, - data: []byte{0xff, 0xff, 0x7f, 0x00}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:01.00")), - }, { - typ: TypeTime2, - metadata: 2, - data: []byte{0xfe, 0xff, 0x7f, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:01.01")), - }, { - typ: TypeTime2, - metadata: 2, - data: []byte{0xfe, 0xff, 0x7f, 0xf6}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:01.10")), - }, { - // Similar tests for 4 decimals. - typ: TypeTime2, - metadata: 4, - data: []byte{0x00, 0x00, 0x80, 0x00, 0x00}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("00:00:00.0000")), - }, { - typ: TypeTime2, - metadata: 4, - data: []byte{0xff, 0xff, 0x7f, 0xff, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:00.0001")), - }, { - typ: TypeTime2, - metadata: 4, - data: []byte{0xff, 0xff, 0x7f, 0x9d, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:00.0099")), - }, { - typ: TypeTime2, - metadata: 4, - data: []byte{0xff, 0xff, 0x7f, 0x00, 0x00}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:01.0000")), - }, { - typ: TypeTime2, - metadata: 4, - data: []byte{0xfe, 0xff, 0x7f, 0xff, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:01.0001")), - }, { - typ: TypeTime2, - metadata: 4, - data: []byte{0xfe, 0xff, 0x7f, 0xf6, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:01.0010")), - }, { - // Similar tests for 6 decimals. - typ: TypeTime2, - metadata: 6, - data: []byte{0x00, 0x00, 0x80, 0x00, 0x00, 0x00}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("00:00:00.000000")), - }, { - typ: TypeTime2, - metadata: 6, - data: []byte{0xff, 0xff, 0x7f, 0xff, 0xff, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:00.000001")), - }, { - typ: TypeTime2, - metadata: 6, - data: []byte{0xff, 0xff, 0x7f, 0x9d, 0xff, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:00.000099")), - }, { - typ: TypeTime2, - metadata: 6, - data: []byte{0xff, 0xff, 0x7f, 0x00, 0x00, 0x00}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:01.000000")), - }, { - typ: TypeTime2, - metadata: 6, - data: []byte{0xfe, 0xff, 0x7f, 0xff, 0xff, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:01.000001")), - }, { - typ: TypeTime2, - metadata: 6, - data: []byte{0xfe, 0xff, 0x7f, 0xf6, 0xff, 0xff}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("-00:00:01.000010")), - }, { - // Few more tests. - typ: TypeTime2, - metadata: 0, - data: []byte{0x00, 0x00, 0x80}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("00:00:00")), - }, { - typ: TypeTime2, - metadata: 1, - data: []byte{0x01, 0x00, 0x80, 0x0a}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("00:00:01.1")), - }, { - typ: TypeTime2, - metadata: 2, - data: []byte{0x01, 0x00, 0x80, 0x0a}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("00:00:01.10")), - }, { - typ: TypeTime2, - metadata: 0, - // 15 << 12 + 34 << 6 + 54 = 63670 = 0x00f8b6 - // and need to add 0x800000 - data: []byte{0xb6, 0xf8, 0x80}, - out: sqltypes.MakeTrusted(querypb.Type_TIME, - []byte("15:34:54")), - }} - - for _, tcase := range testcases { - // Copy the data into a larger buffer (one extra byte - // on both sides), so we make sure the 'pos' field works. - padded := make([]byte, len(tcase.data)+2) - copy(padded[1:], tcase.data) - - // Test cellLength. - l, err := cellLength(padded, 1, tcase.typ, tcase.metadata) - if err != nil || l != len(tcase.data) { - t.Errorf("testcase cellLength(%v,%v) returned unexpected result: %v %v", tcase.typ, tcase.data, l, err) - } - - // Test CellValue. - out, l, err := CellValue(padded, 1, tcase.typ, tcase.metadata, tcase.styp) - if err != nil || l != len(tcase.data) || out.Type() != tcase.out.Type() || bytes.Compare(out.Raw(), tcase.out.Raw()) != 0 { - t.Errorf("testcase cellData(%v,%v) returned unexpected result: %v(%v) %v %v, was expecting %v(%v) %v ", tcase.typ, tcase.data, out, out.Type(), l, err, tcase.out, tcase.out.Type(), len(tcase.data)) - } - } -} diff --git a/go/mysqlconn/replication/binlog_event_rbr.go b/go/mysqlconn/replication/binlog_event_rbr.go new file mode 100644 index 00000000000..021341dc49f --- /dev/null +++ b/go/mysqlconn/replication/binlog_event_rbr.go @@ -0,0 +1,692 @@ +package replication + +import ( + "encoding/binary" + "fmt" + "strconv" + + "github.com/youtube/vitess/go/sqltypes" + + querypb "github.com/youtube/vitess/go/vt/proto/query" +) + +// TableMap implements BinlogEvent.TableMap(). +// +// Expected format (L = total length of event data): +// # bytes field +// 4/6 table id +// 2 flags +// 1 schema name length sl +// sl schema name +// 1 [00] +// 1 table name length tl +// tl table name +// 1 [00] +// column count cc (var-len encoded) +// cc column-def, one byte per column +// column-meta-def (var-len encoded string) +// n NULL-bitmask, length: (cc + 7) / 8 +func (ev binlogEvent) TableMap(f BinlogFormat) (*TableMap, error) { + data := ev.Bytes()[f.HeaderLength:] + + result := &TableMap{} + pos := 6 + if f.HeaderSize(eTableMapEvent) == 6 { + pos = 4 + } + result.Flags = binary.LittleEndian.Uint16(data[pos : pos+2]) + pos += 2 + + l := int(data[pos]) + result.Database = string(data[pos+1 : pos+1+l]) + pos += 1 + l + 1 + + l = int(data[pos]) + result.Name = string(data[pos+1 : pos+1+l]) + pos += 1 + l + 1 + + // FIXME(alainjobart) this is varlength encoded. + columnCount := int(data[pos]) + pos++ + + result.Types = data[pos : pos+columnCount] + pos += columnCount + + // FIXME(alainjobart) this is a var-len-string. + l = int(data[pos]) + pos++ + + // Allocate and parse / copy Metadata. + result.Metadata = make([]uint16, columnCount) + expectedEnd := pos + l + for c := 0; c < columnCount; c++ { + var err error + result.Metadata[c], pos, err = metadataRead(data, pos, result.Types[c]) + if err != nil { + return nil, err + } + } + if pos != expectedEnd { + return nil, fmt.Errorf("unexpected metadata end: got %v was expecting %v (data=%v)", pos, expectedEnd, data) + } + + // A bit array that says if each colum can be NULL. + result.CanBeNull, _ = newBitmap(data, pos, columnCount) + + return result, nil +} + +// metadataLength returns how many bytes are used for metadata, based on a type. +func metadataLength(typ byte) int { + switch typ { + case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate, TypeVarString: + // No data here. + return 0 + + case TypeFloat, TypeDouble, TypeTimestamp2, TypeDateTime2, TypeTime2, TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: + // One byte. + return 1 + + case TypeNewDecimal, TypeEnum, TypeSet, TypeString: + // Two bytes, Big Endian because of crazy encoding. + return 2 + + case TypeVarchar, TypeBit: + // Two bytes, Little Endian + return 2 + + default: + // Unknown type. This is used in tests only, so panic. + panic(fmt.Errorf("metadataLength: unhandled data type: %v", typ)) + } +} + +// metadataTotalLength returns the total size of the metadata for an +// array of types. +func metadataTotalLength(types []byte) int { + sum := 0 + for _, t := range types { + sum += metadataLength(t) + } + return sum +} + +// metadataRead reads a single value from the metadata string. +func metadataRead(data []byte, pos int, typ byte) (uint16, int, error) { + switch typ { + + case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate, TypeVarString: + // No data here. + return 0, pos, nil + + case TypeFloat, TypeDouble, TypeTimestamp2, TypeDateTime2, TypeTime2, TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: + // One byte. + return uint16(data[pos]), pos + 1, nil + + case TypeNewDecimal, TypeEnum, TypeSet, TypeString: + // Two bytes, Big Endian because of crazy encoding. + return uint16(data[pos])<<8 + uint16(data[pos+1]), pos + 2, nil + + case TypeVarchar, TypeBit: + // Two bytes, Little Endian + return uint16(data[pos]) + uint16(data[pos+1])<<8, pos + 2, nil + + default: + // Unknown types, we can't go on. + return 0, 0, fmt.Errorf("metadataRead: unhandled data type: %v", typ) + } +} + +// metadataWrite writes a single value into the metadata string. +func metadataWrite(data []byte, pos int, typ byte, value uint16) int { + switch typ { + + case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate, TypeVarString: + // No data here. + return pos + + case TypeFloat, TypeDouble, TypeTimestamp2, TypeDateTime2, TypeTime2, TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: + // One byte. + data[pos] = byte(value) + return pos + 1 + + case TypeNewDecimal, TypeEnum, TypeSet, TypeString: + // Two bytes, Big Endian because of crazy encoding. + data[pos] = byte(value >> 8) + data[pos+1] = byte(value) + return pos + 2 + + case TypeVarchar, TypeBit: + // Two bytes, Little Endian + data[pos] = byte(value) + data[pos+1] = byte(value >> 8) + return pos + 2 + + default: + // Unknown type. This is used in tests only, so panic. + panic(fmt.Errorf("metadataRead: unhandled data type: %v", typ)) + } +} + +// cellLength returns the new position after the field with the given +// type is read. +func cellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { + switch typ { + case TypeNull: + return 0, nil + case TypeTiny, TypeYear: + return 1, nil + case TypeShort: + return 2, nil + case TypeInt24: + return 3, nil + case TypeLong, TypeTimestamp: + return 4, nil + case TypeLongLong: + return 8, nil + case TypeDate, TypeNewDate: + return 3, nil + case TypeTime: + return 4, nil + case TypeDateTime: + return 8, nil + case TypeVarchar: + // Length is encoded in 1 or 2 bytes. + if metadata > 255 { + l := int(uint64(data[pos]) | + uint64(data[pos+1])<<8) + return l + 2, nil + } + l := int(data[pos]) + return l + 1, nil + case TypeBit: + // bitmap length is in metadata, as: + // upper 8 bits: bytes length + // lower 8 bits: bit length + nbits := ((metadata >> 8) * 8) + (metadata & 0xFF) + return (int(nbits) + 7) / 8, nil + case TypeTimestamp2: + // metadata has number of decimals. One byte encodes + // two decimals. + return 4 + (int(metadata)+1)/2, nil + case TypeDateTime2: + // metadata has number of decimals. One byte encodes + // two decimals. + return 5 + (int(metadata)+1)/2, nil + case TypeTime2: + // metadata has number of decimals. One byte encodes + // two decimals. + return 3 + (int(metadata)+1)/2, nil + + default: + return 0, fmt.Errorf("Unsupported type %v (data: %v pos: %v)", typ, data, pos) + } +} + +// CellValue returns the data for a cell as a sqltypes.Value, and how +// many bytes it takes. It only uses the querypb.Type value for the +// signed flag. +func CellValue(data []byte, pos int, typ byte, metadata uint16, styp querypb.Type) (sqltypes.Value, int, error) { + switch typ { + case TypeTiny: + if sqltypes.IsSigned(styp) { + return sqltypes.MakeTrusted(querypb.Type_INT8, + strconv.AppendInt(nil, int64(int8(data[pos])), 10)), 1, nil + } + return sqltypes.MakeTrusted(querypb.Type_UINT8, + strconv.AppendUint(nil, uint64(data[pos]), 10)), 1, nil + case TypeYear: + return sqltypes.MakeTrusted(querypb.Type_YEAR, + strconv.AppendUint(nil, uint64(data[pos])+1900, 10)), 1, nil + case TypeShort: + val := binary.LittleEndian.Uint16(data[pos : pos+2]) + if sqltypes.IsSigned(styp) { + return sqltypes.MakeTrusted(querypb.Type_INT16, + strconv.AppendInt(nil, int64(int16(val)), 10)), 2, nil + } + return sqltypes.MakeTrusted(querypb.Type_UINT16, + strconv.AppendUint(nil, uint64(val), 10)), 2, nil + case TypeInt24: + if sqltypes.IsSigned(styp) && data[pos+2]&128 > 0 { + // Negative number, have to extend the sign. + val := int32(uint32(data[pos]) + + uint32(data[pos+1])<<8 + + uint32(data[pos+2])<<16 + + uint32(255)<<24) + return sqltypes.MakeTrusted(querypb.Type_INT24, + strconv.AppendInt(nil, int64(val), 10)), 3, nil + } + // Positive number. + val := uint64(data[pos]) + + uint64(data[pos+1])<<8 + + uint64(data[pos+2])<<16 + return sqltypes.MakeTrusted(querypb.Type_UINT24, + strconv.AppendUint(nil, val, 10)), 3, nil + case TypeLong: + val := binary.LittleEndian.Uint32(data[pos : pos+4]) + if sqltypes.IsSigned(styp) { + return sqltypes.MakeTrusted(querypb.Type_INT32, + strconv.AppendInt(nil, int64(int32(val)), 10)), 4, nil + } + return sqltypes.MakeTrusted(querypb.Type_UINT32, + strconv.AppendUint(nil, uint64(val), 10)), 4, nil + case TypeTimestamp: + val := binary.LittleEndian.Uint32(data[pos : pos+4]) + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + strconv.AppendUint(nil, uint64(val), 10)), 4, nil + case TypeLongLong: + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + if sqltypes.IsSigned(styp) { + return sqltypes.MakeTrusted(querypb.Type_INT64, + strconv.AppendInt(nil, int64(val), 10)), 8, nil + } + return sqltypes.MakeTrusted(querypb.Type_UINT64, + strconv.AppendUint(nil, val, 10)), 8, nil + case TypeDate, TypeNewDate: + val := uint32(data[pos]) + + uint32(data[pos+1])<<8 + + uint32(data[pos+2])<<16 + day := val & 31 + month := val >> 5 & 15 + year := val >> 9 + return sqltypes.MakeTrusted(querypb.Type_DATE, + []byte(fmt.Sprintf("%04d-%02d-%02d", year, month, day))), 3, nil + case TypeTime: + val := binary.LittleEndian.Uint32(data[pos : pos+4]) + hour := val / 10000 + minute := (val % 10000) / 100 + second := val % 100 + return sqltypes.MakeTrusted(querypb.Type_TIME, + []byte(fmt.Sprintf("%02d:%02d:%02d", hour, minute, second))), 4, nil + case TypeDateTime: + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + d := val / 1000000 + t := val % 1000000 + year := d / 10000 + month := (d % 10000) / 100 + day := d % 100 + hour := t / 10000 + minute := (t % 10000) / 100 + second := t % 100 + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second))), 8, nil + case TypeVarchar: + // Length is encoded in 1 or 2 bytes. + if metadata > 255 { + l := int(uint64(data[pos]) | + uint64(data[pos+1])<<8) + return sqltypes.MakeTrusted(querypb.Type_VARCHAR, + data[pos+2:pos+2+l]), l + 2, nil + } + l := int(data[pos]) + return sqltypes.MakeTrusted(querypb.Type_VARCHAR, + data[pos+1:pos+1+l]), l + 1, nil + case TypeBit: + // The contents is just the bytes, quoted. + nbits := ((metadata >> 8) * 8) + (metadata & 0xFF) + l := (int(nbits) + 7) / 8 + return sqltypes.MakeTrusted(querypb.Type_BIT, + data[pos:pos+l]), l, nil + case TypeTimestamp2: + second := binary.LittleEndian.Uint32(data[pos : pos+4]) + switch metadata { + case 1: + decimals := int(data[pos+4]) + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.%01d", second, decimals))), 5, nil + case 2: + decimals := int(data[pos+4]) + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.%02d", second, decimals))), 5, nil + case 3: + decimals := int(data[pos+4]) + + int(data[pos+5])<<8 + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.%03d", second, decimals))), 6, nil + case 4: + decimals := int(data[pos+4]) + + int(data[pos+5])<<8 + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.%04d", second, decimals))), 6, nil + case 5: + decimals := int(data[pos+4]) + + int(data[pos+5])<<8 + + int(data[pos+6])<<16 + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.%05d", second, decimals))), 7, nil + case 6: + decimals := int(data[pos+4]) + + int(data[pos+5])<<8 + + int(data[pos+6])<<16 + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.%.6d", second, decimals))), 7, nil + } + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + strconv.AppendUint(nil, uint64(second), 10)), 4, nil + case TypeDateTime2: + ymdhms := (uint64(data[pos]) | + uint64(data[pos+1])<<8 | + uint64(data[pos+2])<<16 | + uint64(data[pos+3])<<24 | + uint64(data[pos+4])<<32) - uint64(0x8000000000) + ymd := ymdhms >> 17 + ym := ymd >> 5 + hms := ymdhms % (1 << 17) + + day := ymd % (1 << 5) + month := ym % 13 + year := ym / 13 + + second := hms % (1 << 6) + minute := (hms >> 6) % (1 << 6) + hour := hms >> 12 + + datetime := fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second) + + switch metadata { + case 1: + decimals := int(data[pos+5]) + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%v.%01d", datetime, decimals))), 6, nil + case 2: + decimals := int(data[pos+5]) + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%v.%02d", datetime, decimals))), 6, nil + case 3: + decimals := int(data[pos+5]) + + int(data[pos+6])<<8 + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%v.%03d", datetime, decimals))), 7, nil + case 4: + decimals := int(data[pos+5]) + + int(data[pos+6])<<8 + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%v.%04d", datetime, decimals))), 7, nil + case 5: + decimals := int(data[pos+5]) + + int(data[pos+6])<<8 + + int(data[pos+7])<<16 + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%v.%05d", datetime, decimals))), 8, nil + case 6: + decimals := int(data[pos+5]) + + int(data[pos+6])<<8 + + int(data[pos+7])<<16 + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%v.%.6d", datetime, decimals))), 8, nil + } + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(datetime)), 5, nil + case TypeTime2: + hms := (int64(data[pos]) | + int64(data[pos+1])<<8 | + int64(data[pos+2])<<16) - 0x800000 + sign := "" + if hms < 0 { + hms = -hms + sign = "-" + } + + fracStr := "" + switch metadata { + case 1: + frac := int(data[pos+3]) + if sign == "-" && frac != 0 { + hms-- + frac = 0x100 - frac + } + fracStr = fmt.Sprintf(".%.1d", frac/10) + case 2: + frac := int(data[pos+3]) + if sign == "-" && frac != 0 { + hms-- + frac = 0x100 - frac + } + fracStr = fmt.Sprintf(".%.2d", frac) + case 3: + frac := int(data[pos+3]) | + int(data[pos+4])<<8 + if sign == "-" && frac != 0 { + hms-- + frac = 0x10000 - frac + } + fracStr = fmt.Sprintf(".%.3d", frac/10) + case 4: + frac := int(data[pos+3]) | + int(data[pos+4])<<8 + if sign == "-" && frac != 0 { + hms-- + frac = 0x10000 - frac + } + fracStr = fmt.Sprintf(".%.4d", frac) + case 5: + frac := int(data[pos+3]) | + int(data[pos+4])<<8 | + int(data[pos+5])<<16 + if sign == "-" && frac != 0 { + hms-- + frac = 0x1000000 - frac + } + fracStr = fmt.Sprintf(".%.5d", frac/10) + case 6: + frac := int(data[pos+3]) | + int(data[pos+4])<<8 | + int(data[pos+5])<<16 + if sign == "-" && frac != 0 { + hms-- + frac = 0x1000000 - frac + } + fracStr = fmt.Sprintf(".%.6d", frac) + } + + hour := (hms >> 12) % (1 << 10) + minute := (hms >> 6) % (1 << 6) + second := hms % (1 << 6) + return sqltypes.MakeTrusted(querypb.Type_TIME, + []byte(fmt.Sprintf("%v%02d:%02d:%02d%v", sign, hour, minute, second, fracStr))), 3 + (int(metadata)+1)/2, nil + + default: + return sqltypes.NULL, 0, fmt.Errorf("Unsupported type %v", typ) + } +} + +// Rows implements BinlogEvent.TableMap(). +// +// Expected format (L = total length of event data): +// # bytes field +// 4/6 table id +// 2 flags +// -- if version == 2 +// 2 extra data length edl +// edl extra data +// -- endif +// number of columns (var-len encoded) +// identify bitmap +// data bitmap +// -- for each row +// null bitmap for identify for present rows +// values for each identify field +// null bitmap for data for present rows +// values for each data field +// -- +func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) { + typ := ev.Type() + data := ev.Bytes()[f.HeaderLength:] + hasIdentify := typ == eUpdateRowsEventV1 || typ == eUpdateRowsEventV2 || + typ == eDeleteRowsEventV1 || typ == eDeleteRowsEventV2 + hasData := typ == eWriteRowsEventV1 || typ == eWriteRowsEventV2 || + typ == eUpdateRowsEventV1 || typ == eUpdateRowsEventV2 + + result := Rows{} + pos := 6 + if f.HeaderSize(typ) == 6 { + pos = 4 + } + result.Flags = binary.LittleEndian.Uint16(data[pos : pos+2]) + pos += 2 + + // version=2 have extra data here. + if typ == eWriteRowsEventV2 || typ == eUpdateRowsEventV2 || typ == eDeleteRowsEventV2 { + // This extraDataLength contains the 2 bytes length. + extraDataLength := binary.LittleEndian.Uint16(data[pos : pos+2]) + pos += int(extraDataLength) + } + + // FIXME(alainjobart) this is var len encoded. + columnCount := int(data[pos]) + pos++ + + numIdentifyColumns := 0 + numDataColumns := 0 + + if hasIdentify { + // Bitmap of the columns used for identify. + result.IdentifyColumns, pos = newBitmap(data, pos, columnCount) + numIdentifyColumns = result.IdentifyColumns.BitCount() + } + + if hasData { + // Bitmap of columns that are present. + result.DataColumns, pos = newBitmap(data, pos, columnCount) + numDataColumns = result.DataColumns.BitCount() + } + + // One row at a time. + for pos < len(data) { + row := Row{} + + if hasIdentify { + // Bitmap of identify columns that are null (amongst the ones that are present). + row.NullIdentifyColumns, pos = newBitmap(data, pos, numIdentifyColumns) + + // Get the identify values. + startPos := pos + valueIndex := 0 + for c := 0; c < columnCount; c++ { + if !result.IdentifyColumns.Bit(c) { + // This column is not represented. + continue + } + + if row.NullIdentifyColumns.Bit(valueIndex) { + // This column is represented, but its value is NULL. + valueIndex++ + continue + } + + // This column is represented now. We need to skip its length. + l, err := cellLength(data, pos, tm.Types[c], tm.Metadata[c]) + if err != nil { + return result, err + } + pos += l + valueIndex++ + } + row.Identify = data[startPos:pos] + } + + if hasData { + // Bitmap of columns that are null (amongst the ones that are present). + row.NullColumns, pos = newBitmap(data, pos, numDataColumns) + + // Get the values. + startPos := pos + valueIndex := 0 + for c := 0; c < columnCount; c++ { + if !result.DataColumns.Bit(c) { + // This column is not represented. + continue + } + + if row.NullColumns.Bit(valueIndex) { + // This column is represented, but its value is NULL. + valueIndex++ + continue + } + + // This column is represented now. We need to skip its length. + l, err := cellLength(data, pos, tm.Types[c], tm.Metadata[c]) + if err != nil { + return result, err + } + pos += l + valueIndex++ + } + row.Data = data[startPos:pos] + } + + result.Rows = append(result.Rows, row) + } + + return result, nil +} + +// StringValuesForTests is a helper method to return the string value +// of all columns in a row in a Row. Only use it in tests, as the +// returned values cannot be interpreted correctly without the schema. +// We assume everything is unsigned in this method. +func (rs *Rows) StringValuesForTests(tm *TableMap, rowIndex int) ([]string, error) { + var result []string + + valueIndex := 0 + data := rs.Rows[rowIndex].Data + pos := 0 + for c := 0; c < rs.DataColumns.Count(); c++ { + if !rs.DataColumns.Bit(c) { + continue + } + + if rs.Rows[rowIndex].NullColumns.Bit(valueIndex) { + // This column is represented, but its value is NULL. + result = append(result, "NULL") + valueIndex++ + continue + } + + // We have real data + value, l, err := CellValue(data, pos, tm.Types[c], tm.Metadata[c], querypb.Type_UINT64) + if err != nil { + return nil, err + } + result = append(result, value.String()) + pos += l + valueIndex++ + } + + return result, nil +} + +// StringIdentifiesForTests is a helper method to return the string +// identify of all columns in a row in a Row. Only use it in tests, as the +// returned values cannot be interpreted correctly without the schema. +// We assume everything is unsigned in this method. +func (rs *Rows) StringIdentifiesForTests(tm *TableMap, rowIndex int) ([]string, error) { + var result []string + + valueIndex := 0 + data := rs.Rows[rowIndex].Identify + pos := 0 + for c := 0; c < rs.IdentifyColumns.Count(); c++ { + if !rs.IdentifyColumns.Bit(c) { + continue + } + + if rs.Rows[rowIndex].NullIdentifyColumns.Bit(valueIndex) { + // This column is represented, but its value is NULL. + result = append(result, "NULL") + valueIndex++ + continue + } + + // We have real data + value, l, err := CellValue(data, pos, tm.Types[c], tm.Metadata[c], querypb.Type_UINT64) + if err != nil { + return nil, err + } + result = append(result, value.String()) + pos += l + valueIndex++ + } + + return result, nil +} diff --git a/go/mysqlconn/replication/binlog_event_rbr_test.go b/go/mysqlconn/replication/binlog_event_rbr_test.go new file mode 100644 index 00000000000..78d0dc708ab --- /dev/null +++ b/go/mysqlconn/replication/binlog_event_rbr_test.go @@ -0,0 +1,393 @@ +package replication + +import ( + "bytes" + "fmt" + "testing" + + "github.com/youtube/vitess/go/sqltypes" + querypb "github.com/youtube/vitess/go/vt/proto/query" +) + +func TestCellLengthAndData(t *testing.T) { + testcases := []struct { + typ byte + metadata uint16 + styp querypb.Type + data []byte + out sqltypes.Value + }{{ + typ: TypeTiny, + styp: querypb.Type_UINT8, + data: []byte{0x82}, + out: sqltypes.MakeTrusted(querypb.Type_UINT8, + []byte("130")), + }, { + typ: TypeTiny, + styp: querypb.Type_INT8, + data: []byte{0xfe}, + out: sqltypes.MakeTrusted(querypb.Type_INT8, + []byte("-2")), + }, { + typ: TypeYear, + data: []byte{0x82}, + out: sqltypes.MakeTrusted(querypb.Type_YEAR, + []byte("2030")), + }, { + typ: TypeShort, + styp: querypb.Type_UINT16, + data: []byte{0x82, 0x81}, + out: sqltypes.MakeTrusted(querypb.Type_UINT16, + []byte(fmt.Sprintf("%v", 0x8182))), + }, { + typ: TypeShort, + styp: querypb.Type_INT16, + data: []byte{0xfe, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_INT16, + []byte(fmt.Sprintf("%v", -1-int32(0x0001)))), + }, { + typ: TypeInt24, + styp: querypb.Type_UINT24, + data: []byte{0x83, 0x82, 0x81}, + out: sqltypes.MakeTrusted(querypb.Type_UINT24, + []byte(fmt.Sprintf("%v", 0x818283))), + }, { + typ: TypeInt24, + styp: querypb.Type_INT24, + data: []byte{0xfd, 0xfe, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_INT24, + []byte(fmt.Sprintf("%v", -1-int32(0x000102)))), + }, { + typ: TypeLong, + styp: querypb.Type_UINT32, + data: []byte{0x84, 0x83, 0x82, 0x81}, + out: sqltypes.MakeTrusted(querypb.Type_UINT32, + []byte(fmt.Sprintf("%v", 0x81828384))), + }, { + typ: TypeLong, + styp: querypb.Type_INT32, + data: []byte{0xfc, 0xfd, 0xfe, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_INT32, + []byte(fmt.Sprintf("%v", -1-int32(0x00010203)))), + }, { + typ: TypeTimestamp, + data: []byte{0x84, 0x83, 0x82, 0x81}, + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v", 0x81828384))), + }, { + typ: TypeLongLong, + styp: querypb.Type_UINT64, + data: []byte{0x88, 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81}, + out: sqltypes.MakeTrusted(querypb.Type_UINT64, + []byte(fmt.Sprintf("%v", uint64(0x8182838485868788)))), + }, { + typ: TypeLongLong, + styp: querypb.Type_INT64, + data: []byte{0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_INT64, + []byte(fmt.Sprintf("%v", -1-int64(0x0001020304050607)))), + }, { + typ: TypeDate, + // 2010 << 9 + 10 << 5 + 3 = 1029443 = 0x0fb543 + data: []byte{0x43, 0xb5, 0x0f}, + out: sqltypes.MakeTrusted(querypb.Type_DATE, + []byte("2010-10-03")), + }, { + typ: TypeNewDate, + // 2010 << 9 + 10 << 5 + 3 = 1029443 = 0x0fb543 + data: []byte{0x43, 0xb5, 0x0f}, + out: sqltypes.MakeTrusted(querypb.Type_DATE, + []byte("2010-10-03")), + }, { + typ: TypeTime, + // 154532 = 0x00025ba4 + data: []byte{0xa4, 0x5b, 0x02, 0x00}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("15:45:32")), + }, { + typ: TypeDateTime, + // 19840304154532 = 0x120b6e4807a4 + data: []byte{0xa4, 0x07, 0x48, 0x6e, 0x0b, 0x12, 0x00, 0x00}, + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("1984-03-04 15:45:32")), + }, { + typ: TypeVarchar, + metadata: 20, // one byte length encoding + data: []byte{3, 'a', 'b', 'c'}, + out: sqltypes.MakeTrusted(querypb.Type_VARCHAR, + []byte("abc")), + }, { + typ: TypeVarchar, + metadata: 384, // two bytes length encoding + data: []byte{3, 0, 'a', 'b', 'c'}, + out: sqltypes.MakeTrusted(querypb.Type_VARCHAR, + []byte("abc")), + }, { + typ: TypeBit, + metadata: 0x0107, + data: []byte{0x3, 0x1}, + out: sqltypes.MakeTrusted(querypb.Type_BIT, + []byte{3, 1}), + }, { + typ: TypeTimestamp2, + metadata: 0, + data: []byte{0x84, 0x83, 0x82, 0x81}, + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v", 0x81828384))), + }, { + typ: TypeTimestamp2, + metadata: 1, + data: []byte{0x84, 0x83, 0x82, 0x81, 7}, + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.7", 0x81828384))), + }, { + typ: TypeTimestamp2, + metadata: 2, + data: []byte{0x84, 0x83, 0x82, 0x81, 76}, + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.76", 0x81828384))), + }, { + typ: TypeTimestamp2, + metadata: 3, + // 765 = 0x02fd + data: []byte{0x84, 0x83, 0x82, 0x81, 0xfd, 0x02}, + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.765", 0x81828384))), + }, { + typ: TypeTimestamp2, + metadata: 4, + // 7654 = 0x1de6 + data: []byte{0x84, 0x83, 0x82, 0x81, 0xe6, 0x1d}, + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.7654", 0x81828384))), + }, { + typ: TypeTimestamp2, + metadata: 5, + // 76543 = 0x012aff + data: []byte{0x84, 0x83, 0x82, 0x81, 0xff, 0x2a, 0x01}, + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.76543", 0x81828384))), + }, { + typ: TypeTimestamp2, + metadata: 6, + // 765432 = 0x0badf8 + data: []byte{0x84, 0x83, 0x82, 0x81, 0xf8, 0xad, 0x0b}, + out: sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + []byte(fmt.Sprintf("%v.765432", 0x81828384))), + }, { + typ: TypeDateTime2, + metadata: 0, + // (2012 * 13 + 6) << 22 + 21 << 17 + 15 << 12 + 45 << 6 + 17) + // = 109734198097 = 0x198caafb51 + // Then have to add 0x8000000000 = 0x998caafb51 + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99}, + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17")), + }, { + typ: TypeDateTime2, + metadata: 1, + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 7}, + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17.7")), + }, { + typ: TypeDateTime2, + metadata: 2, + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 76}, + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17.76")), + }, { + typ: TypeDateTime2, + metadata: 3, + // 765 = 0x02fd + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xfd, 0x02}, + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17.765")), + }, { + typ: TypeDateTime2, + metadata: 4, + // 7654 = 0x1de6 + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xe6, 0x1d}, + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17.7654")), + }, { + typ: TypeDateTime2, + metadata: 5, + // 76543 = 0x012aff + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xff, 0x2a, 0x01}, + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17.76543")), + }, { + typ: TypeDateTime2, + metadata: 6, + // 765432 = 0x0badf8 + data: []byte{0x51, 0xfb, 0xaa, 0x8c, 0x99, 0xf8, 0xad, 0x0b}, + out: sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte("2012-06-21 15:45:17.765432")), + }, { + // This first set of tests is from a comment in + // sql-common/my_time.c: + // + // Disk value intpart frac Time value Memory value + // 800000.00 0 0 00:00:00.00 0000000000.000000 + // 7FFFFF.FF -1 255 -00:00:00.01 FFFFFFFFFF.FFD8F0 + // 7FFFFF.9D -1 99 -00:00:00.99 FFFFFFFFFF.F0E4D0 + // 7FFFFF.00 -1 0 -00:00:01.00 FFFFFFFFFF.000000 + // 7FFFFE.FF -1 255 -00:00:01.01 FFFFFFFFFE.FFD8F0 + // 7FFFFE.F6 -2 246 -00:00:01.10 FFFFFFFFFE.FE7960 + typ: TypeTime2, + metadata: 2, + data: []byte{0x00, 0x00, 0x80, 0x00}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("00:00:00.00")), + }, { + typ: TypeTime2, + metadata: 2, + data: []byte{0xff, 0xff, 0x7f, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:00.01")), + }, { + typ: TypeTime2, + metadata: 2, + data: []byte{0xff, 0xff, 0x7f, 0x9d}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:00.99")), + }, { + typ: TypeTime2, + metadata: 2, + data: []byte{0xff, 0xff, 0x7f, 0x00}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.00")), + }, { + typ: TypeTime2, + metadata: 2, + data: []byte{0xfe, 0xff, 0x7f, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.01")), + }, { + typ: TypeTime2, + metadata: 2, + data: []byte{0xfe, 0xff, 0x7f, 0xf6}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.10")), + }, { + // Similar tests for 4 decimals. + typ: TypeTime2, + metadata: 4, + data: []byte{0x00, 0x00, 0x80, 0x00, 0x00}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("00:00:00.0000")), + }, { + typ: TypeTime2, + metadata: 4, + data: []byte{0xff, 0xff, 0x7f, 0xff, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:00.0001")), + }, { + typ: TypeTime2, + metadata: 4, + data: []byte{0xff, 0xff, 0x7f, 0x9d, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:00.0099")), + }, { + typ: TypeTime2, + metadata: 4, + data: []byte{0xff, 0xff, 0x7f, 0x00, 0x00}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.0000")), + }, { + typ: TypeTime2, + metadata: 4, + data: []byte{0xfe, 0xff, 0x7f, 0xff, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.0001")), + }, { + typ: TypeTime2, + metadata: 4, + data: []byte{0xfe, 0xff, 0x7f, 0xf6, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.0010")), + }, { + // Similar tests for 6 decimals. + typ: TypeTime2, + metadata: 6, + data: []byte{0x00, 0x00, 0x80, 0x00, 0x00, 0x00}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("00:00:00.000000")), + }, { + typ: TypeTime2, + metadata: 6, + data: []byte{0xff, 0xff, 0x7f, 0xff, 0xff, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:00.000001")), + }, { + typ: TypeTime2, + metadata: 6, + data: []byte{0xff, 0xff, 0x7f, 0x9d, 0xff, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:00.000099")), + }, { + typ: TypeTime2, + metadata: 6, + data: []byte{0xff, 0xff, 0x7f, 0x00, 0x00, 0x00}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.000000")), + }, { + typ: TypeTime2, + metadata: 6, + data: []byte{0xfe, 0xff, 0x7f, 0xff, 0xff, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.000001")), + }, { + typ: TypeTime2, + metadata: 6, + data: []byte{0xfe, 0xff, 0x7f, 0xf6, 0xff, 0xff}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("-00:00:01.000010")), + }, { + // Few more tests. + typ: TypeTime2, + metadata: 0, + data: []byte{0x00, 0x00, 0x80}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("00:00:00")), + }, { + typ: TypeTime2, + metadata: 1, + data: []byte{0x01, 0x00, 0x80, 0x0a}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("00:00:01.1")), + }, { + typ: TypeTime2, + metadata: 2, + data: []byte{0x01, 0x00, 0x80, 0x0a}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("00:00:01.10")), + }, { + typ: TypeTime2, + metadata: 0, + // 15 << 12 + 34 << 6 + 54 = 63670 = 0x00f8b6 + // and need to add 0x800000 + data: []byte{0xb6, 0xf8, 0x80}, + out: sqltypes.MakeTrusted(querypb.Type_TIME, + []byte("15:34:54")), + }} + + for _, tcase := range testcases { + // Copy the data into a larger buffer (one extra byte + // on both sides), so we make sure the 'pos' field works. + padded := make([]byte, len(tcase.data)+2) + copy(padded[1:], tcase.data) + + // Test cellLength. + l, err := cellLength(padded, 1, tcase.typ, tcase.metadata) + if err != nil || l != len(tcase.data) { + t.Errorf("testcase cellLength(%v,%v) returned unexpected result: %v %v", tcase.typ, tcase.data, l, err) + } + + // Test CellValue. + out, l, err := CellValue(padded, 1, tcase.typ, tcase.metadata, tcase.styp) + if err != nil || l != len(tcase.data) || out.Type() != tcase.out.Type() || bytes.Compare(out.Raw(), tcase.out.Raw()) != 0 { + t.Errorf("testcase cellData(%v,%v) returned unexpected result: %v(%v) %v %v, was expecting %v(%v) %v ", tcase.typ, tcase.data, out, out.Type(), l, err, tcase.out, tcase.out.Type(), len(tcase.data)) + } + } +} From 2f46b73c2c21c0c674cd552cc5c3661360bfcc33 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 10 Mar 2017 17:25:57 -0800 Subject: [PATCH 098/108] Adding float/double support. --- go/mysqlconn/replication/binlog_event_rbr.go | 15 +++++++++++++-- go/mysqlconn/replication/binlog_event_rbr_test.go | 12 ++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/go/mysqlconn/replication/binlog_event_rbr.go b/go/mysqlconn/replication/binlog_event_rbr.go index 021341dc49f..76f95bd639f 100644 --- a/go/mysqlconn/replication/binlog_event_rbr.go +++ b/go/mysqlconn/replication/binlog_event_rbr.go @@ -3,6 +3,7 @@ package replication import ( "encoding/binary" "fmt" + "math" "strconv" "github.com/youtube/vitess/go/sqltypes" @@ -180,9 +181,9 @@ func cellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { return 2, nil case TypeInt24: return 3, nil - case TypeLong, TypeTimestamp: + case TypeLong, TypeFloat, TypeTimestamp: return 4, nil - case TypeLongLong: + case TypeLongLong, TypeDouble: return 8, nil case TypeDate, TypeNewDate: return 3, nil @@ -270,6 +271,16 @@ func CellValue(data []byte, pos int, typ byte, metadata uint16, styp querypb.Typ } return sqltypes.MakeTrusted(querypb.Type_UINT32, strconv.AppendUint(nil, uint64(val), 10)), 4, nil + case TypeFloat: + val := binary.LittleEndian.Uint32(data[pos : pos+4]) + fval := math.Float32frombits(val) + return sqltypes.MakeTrusted(querypb.Type_FLOAT32, + strconv.AppendFloat(nil, float64(fval), 'E', -1, 32)), 4, nil + case TypeDouble: + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + fval := math.Float64frombits(val) + return sqltypes.MakeTrusted(querypb.Type_FLOAT64, + strconv.AppendFloat(nil, fval, 'E', -1, 64)), 8, nil case TypeTimestamp: val := binary.LittleEndian.Uint32(data[pos : pos+4]) return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, diff --git a/go/mysqlconn/replication/binlog_event_rbr_test.go b/go/mysqlconn/replication/binlog_event_rbr_test.go index 78d0dc708ab..d02dba54d80 100644 --- a/go/mysqlconn/replication/binlog_event_rbr_test.go +++ b/go/mysqlconn/replication/binlog_event_rbr_test.go @@ -69,6 +69,18 @@ func TestCellLengthAndData(t *testing.T) { data: []byte{0xfc, 0xfd, 0xfe, 0xff}, out: sqltypes.MakeTrusted(querypb.Type_INT32, []byte(fmt.Sprintf("%v", -1-int32(0x00010203)))), + }, { + // 3.1415927E+00 = 0x40490fdb + typ: TypeFloat, + data: []byte{0xdb, 0x0f, 0x49, 0x40}, + out: sqltypes.MakeTrusted(querypb.Type_FLOAT32, + []byte("3.1415927E+00")), + }, { + // 3.1415926535E+00 = 0x400921fb54411744 + typ: TypeDouble, + data: []byte{0x44, 0x17, 0x41, 0x54, 0xfb, 0x21, 0x09, 0x40}, + out: sqltypes.MakeTrusted(querypb.Type_FLOAT64, + []byte("3.1415926535E+00")), }, { typ: TypeTimestamp, data: []byte{0x84, 0x83, 0x82, 0x81}, From e959ccc08b78ef1e9c696a9e4991d8dd8e2c0e48 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 10 Mar 2017 18:32:48 -0800 Subject: [PATCH 099/108] Adding JSON, Enum, Set, String. --- go/mysqlconn/replication/binlog_event_rbr.go | 79 ++++++++++++++++++- .../replication/binlog_event_rbr_test.go | 30 +++++++ 2 files changed, 108 insertions(+), 1 deletion(-) diff --git a/go/mysqlconn/replication/binlog_event_rbr.go b/go/mysqlconn/replication/binlog_event_rbr.go index 76f95bd639f..a4bb21f3420 100644 --- a/go/mysqlconn/replication/binlog_event_rbr.go +++ b/go/mysqlconn/replication/binlog_event_rbr.go @@ -218,6 +218,24 @@ func cellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { // metadata has number of decimals. One byte encodes // two decimals. return 3 + (int(metadata)+1)/2, nil + case TypeJSON: + // length in encoded in 'meta' bytes, but at least 2, + // and the value cannot be > 64k, so just read 2 bytes. + // (meta also should have '2' as value). + // (this weird logic is what event printing does). + l := int(uint64(data[pos]) | + uint64(data[pos+1])<<8) + return l + int(metadata), nil + case TypeEnum, TypeSet: + return int(metadata & 0xff), nil + case TypeString: + // This may do String, Enum, and Set. The type is in + // metadata. If it's a string, then there will be more bits. + t := metadata >> 8 + if t == TypeEnum || t == TypeSet { + return int(metadata & 0xff), nil + } + return int((((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0xff)), nil default: return 0, fmt.Errorf("Unsupported type %v (data: %v pos: %v)", typ, data, pos) @@ -496,8 +514,67 @@ func CellValue(data []byte, pos int, typ byte, metadata uint16, styp querypb.Typ return sqltypes.MakeTrusted(querypb.Type_TIME, []byte(fmt.Sprintf("%v%02d:%02d:%02d%v", sign, hour, minute, second, fracStr))), 3 + (int(metadata)+1)/2, nil + case TypeJSON: + // length in encoded in 'meta' bytes, but at least 2, + // and the value cannot be > 64k, so just read 2 bytes. + // (meta also should have '2' as value). + // (this weird logic is what event printing does). + l := int(uint64(data[pos]) | + uint64(data[pos+1])<<8) + return sqltypes.MakeTrusted(querypb.Type_JSON, + data[pos+int(metadata):pos+int(metadata)+l]), l + int(metadata), nil + + case TypeEnum: + switch metadata & 0xff { + case 1: + // One byte storage. + return sqltypes.MakeTrusted(querypb.Type_ENUM, + strconv.AppendUint(nil, uint64(data[pos]), 10)), 1, nil + case 2: + // Two bytes storage. + val := binary.LittleEndian.Uint16(data[pos : pos+2]) + return sqltypes.MakeTrusted(querypb.Type_ENUM, + strconv.AppendUint(nil, uint64(val), 10)), 2, nil + default: + return sqltypes.NULL, 0, fmt.Errorf("unexpected enum size: %v", metadata&0xff) + } + + case TypeSet: + l := int(metadata & 0xff) + return sqltypes.MakeTrusted(querypb.Type_SET, + data[pos:pos+l]), l, nil + + case TypeString: + // This may do String, Enum, and Set. The type is in + // metadata. If it's a string, then there will be more bits. + t := metadata >> 8 + if t == TypeEnum { + switch metadata & 0xff { + case 1: + // One byte storage. + return sqltypes.MakeTrusted(querypb.Type_ENUM, + strconv.AppendUint(nil, uint64(data[pos]), 10)), 1, nil + case 2: + // Two bytes storage. + val := binary.LittleEndian.Uint16(data[pos : pos+2]) + return sqltypes.MakeTrusted(querypb.Type_ENUM, + strconv.AppendUint(nil, uint64(val), 10)), 2, nil + default: + return sqltypes.NULL, 0, fmt.Errorf("unexpected enum size: %v", metadata&0xff) + } + } + if t == TypeSet { + l := int(metadata & 0xff) + return sqltypes.MakeTrusted(querypb.Type_BIT, + data[pos:pos+l]), l, nil + } + // This is a real string. The length is weird. + l := int((((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0xff)) + return sqltypes.MakeTrusted(querypb.Type_VARCHAR, + data[pos:pos+l]), l, nil + default: - return sqltypes.NULL, 0, fmt.Errorf("Unsupported type %v", typ) + return sqltypes.NULL, 0, fmt.Errorf("unsupported type %v", typ) } } diff --git a/go/mysqlconn/replication/binlog_event_rbr_test.go b/go/mysqlconn/replication/binlog_event_rbr_test.go index d02dba54d80..325dd072d29 100644 --- a/go/mysqlconn/replication/binlog_event_rbr_test.go +++ b/go/mysqlconn/replication/binlog_event_rbr_test.go @@ -382,6 +382,36 @@ func TestCellLengthAndData(t *testing.T) { data: []byte{0xb6, 0xf8, 0x80}, out: sqltypes.MakeTrusted(querypb.Type_TIME, []byte("15:34:54")), + }, { + typ: TypeJSON, + metadata: 2, + data: []byte{0x03, 0x00, 'a', 'b', 'c'}, + out: sqltypes.MakeTrusted(querypb.Type_JSON, + []byte("abc")), + }, { + typ: TypeEnum, + metadata: 1, + data: []byte{0x03}, + out: sqltypes.MakeTrusted(querypb.Type_ENUM, + []byte("3")), + }, { + typ: TypeEnum, + metadata: 2, + data: []byte{0x01, 0x02}, + out: sqltypes.MakeTrusted(querypb.Type_ENUM, + []byte(fmt.Sprintf("%v", 0x0201))), + }, { + typ: TypeSet, + metadata: 2, + data: []byte{0x01, 0x02}, + out: sqltypes.MakeTrusted(querypb.Type_SET, + []byte{0x01, 0x02}), + }, { + typ: TypeString, + metadata: TypeString<<8 | 5, + data: []byte{0x01, 0x02, 0x03, 0x04, 0x05}, + out: sqltypes.MakeTrusted(querypb.Type_VARCHAR, + []byte{0x01, 0x02, 0x03, 0x04, 0x05}), }} for _, tcase := range testcases { From f67cfc6fd31fe94592976d960643c7fb42e311b1 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 10 Mar 2017 19:44:36 -0800 Subject: [PATCH 100/108] NewDecimal support. --- go/mysqlconn/replication/binlog_event_rbr.go | 158 ++++++++++++++++++ .../replication/binlog_event_rbr_test.go | 14 ++ 2 files changed, 172 insertions(+) diff --git a/go/mysqlconn/replication/binlog_event_rbr.go b/go/mysqlconn/replication/binlog_event_rbr.go index a4bb21f3420..8c606a15ae5 100644 --- a/go/mysqlconn/replication/binlog_event_rbr.go +++ b/go/mysqlconn/replication/binlog_event_rbr.go @@ -169,6 +169,8 @@ func metadataWrite(data []byte, pos int, typ byte, value uint16) int { } } +var dig2bytes = []int{0, 1, 1, 2, 2, 3, 3, 4, 4, 4} + // cellLength returns the new position after the field with the given // type is read. func cellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { @@ -226,6 +228,32 @@ func cellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { l := int(uint64(data[pos]) | uint64(data[pos+1])<<8) return l + int(metadata), nil + case TypeNewDecimal: + precision := int(metadata >> 8) + scale := int(metadata & 0xff) + // Example: + // NNNNNNNNNNNN.MMMMMM + // 12 bytes 6 bytes + // precision is 18 + // scale is 6 + // storage is done by groups of 9 digits: + // - 32 bits are used to store groups of 9 digits. + // - any leftover digit is stored in: + // - 1 byte for 1 and 2 digits + // - 2 bytes for 3 and 4 digits + // - 3 bytes for 5 and 6 digits + // - 4 bytes for 7 and 8 digits (would also work for 9) + // both sides of the dot are stored separately. + // In this example, we'd have: + // - 2 bytes to store the first 3 full digits. + // - 4 bytes to store the next 9 full digits. + // - 3 bytes to store the 6 fractional digits. + intg := precision - scale + intg0 := intg / 9 + frac0 := scale / 9 + intg0x := intg - intg0*9 + frac0x := scale - frac0*9 + return intg0*4 + dig2bytes[intg0x] + frac0*4 + dig2bytes[frac0x], nil case TypeEnum, TypeSet: return int(metadata & 0xff), nil case TypeString: @@ -524,6 +552,136 @@ func CellValue(data []byte, pos int, typ byte, metadata uint16, styp querypb.Typ return sqltypes.MakeTrusted(querypb.Type_JSON, data[pos+int(metadata):pos+int(metadata)+l]), l + int(metadata), nil + case TypeNewDecimal: + precision := int(metadata >> 8) // total digits number + scale := int(metadata & 0xff) // number of fractional digits + intg := precision - scale // number of full digits + intg0 := intg / 9 // number of 32-bits digits + intg0x := intg - intg0*9 // leftover full digits + frac0 := scale / 9 // number of 32 bits fractionals + frac0x := scale - frac0*9 // leftover fractionals + + l := intg0*4 + dig2bytes[intg0x] + frac0*4 + dig2bytes[frac0x] + + // Copy the data so we can change it. Otherwise + // decoding is just too hard. + d := make([]byte, l) + copy(d, data[pos:pos+l]) + + result := []byte{} + isNegative := (d[0] & 0x80) == 0 + d[0] ^= 0x80 // First bit is inverted. + if isNegative { + // Negative numbers are just inverted bytes. + result = append(result, '-') + for i := range d { + d[i] ^= 0xff + } + } + + // first we have the leftover full digits + var val uint32 + switch dig2bytes[intg0x] { + case 0: + // nothing to do + case 1: + // one byte, up to two digits + val = uint32(d[0]) + case 2: + // two bytes, up to 4 digits + val = uint32(d[0])<<8 + + uint32(d[1]) + case 3: + // 3 bytes, up to 6 digits + val = uint32(d[0])<<16 + + uint32(d[1])<<8 + + uint32(d[2]) + case 4: + // 4 bytes, up to 8 digits (9 digits would be a full) + val = uint32(d[0])<<24 + + uint32(d[1])<<16 + + uint32(d[2])<<8 + + uint32(d[3]) + } + pos = dig2bytes[intg0x] + if val > 0 { + result = strconv.AppendUint(result, uint64(val), 10) + } + + // now the full digits, 32 bits each, 9 digits + for i := 0; i < intg0; i++ { + val = binary.BigEndian.Uint32(d[pos : pos+4]) + t := fmt.Sprintf("%9d", val) + result = append(result, []byte(t)...) + pos += 4 + } + + // now see if we have a fraction + if scale == 0 { + return sqltypes.MakeTrusted(querypb.Type_DECIMAL, + result), l, nil + } + result = append(result, '.') + + // now the full fractional digits + for i := 0; i < frac0; i++ { + val = binary.BigEndian.Uint32(d[pos : pos+4]) + t := fmt.Sprintf("%9d", val) + result = append(result, []byte(t)...) + pos += 4 + } + + // then the partial fractional digits + t := "" + switch dig2bytes[frac0x] { + case 0: + // Nothing to do + return sqltypes.MakeTrusted(querypb.Type_DECIMAL, + result), l, nil + case 1: + // one byte, 1 or 2 digits + val = uint32(d[pos]) + if frac0x == 1 { + t = fmt.Sprintf("%1d", val) + } else { + t = fmt.Sprintf("%2d", val) + } + case 2: + // two bytes, 3 or 4 digits + val = uint32(d[pos])<<8 + + uint32(d[pos+1]) + if frac0x == 3 { + t = fmt.Sprintf("%3d", val) + } else { + t = fmt.Sprintf("%4d", val) + } + case 3: + // 3 bytes, 5 or 6 digits + val = uint32(d[pos])<<16 + + uint32(d[pos+1])<<8 + + uint32(d[pos+2]) + if frac0x == 5 { + t = fmt.Sprintf("%5d", val) + } else { + t = fmt.Sprintf("%6d", val) + } + case 4: + // 4 bytes, 7 or 8 digits (9 digits would be a full) + val = uint32(d[pos])<<24 + + uint32(d[pos+1])<<16 + + uint32(d[pos+2])<<8 + + uint32(d[pos+3]) + if frac0x == 7 { + t = fmt.Sprintf("%7d", val) + } else { + t = fmt.Sprintf("%8d", val) + } + } + result = append(result, []byte(t)...) + + return sqltypes.MakeTrusted(querypb.Type_DECIMAL, + result), l, nil + case TypeEnum: switch metadata & 0xff { case 1: diff --git a/go/mysqlconn/replication/binlog_event_rbr_test.go b/go/mysqlconn/replication/binlog_event_rbr_test.go index 325dd072d29..9d74acd6273 100644 --- a/go/mysqlconn/replication/binlog_event_rbr_test.go +++ b/go/mysqlconn/replication/binlog_event_rbr_test.go @@ -412,6 +412,20 @@ func TestCellLengthAndData(t *testing.T) { data: []byte{0x01, 0x02, 0x03, 0x04, 0x05}, out: sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte{0x01, 0x02, 0x03, 0x04, 0x05}), + }, { + // See strings/decimal.c function decimal2bin for why these + // values are here. + typ: TypeNewDecimal, + metadata: 14<<8 | 4, + data: []byte{0x81, 0x0D, 0xFB, 0x38, 0xD2, 0x04, 0xD2}, + out: sqltypes.MakeTrusted(querypb.Type_DECIMAL, + []byte("1234567890.1234")), + }, { + typ: TypeNewDecimal, + metadata: 14<<8 | 4, + data: []byte{0x7E, 0xF2, 0x04, 0xC7, 0x2D, 0xFB, 0x2D}, + out: sqltypes.MakeTrusted(querypb.Type_DECIMAL, + []byte("-1234567890.1234")), }} for _, tcase := range testcases { From 140e9e1ed367d91edfa7fb8068e6429bd5cd86f5 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 10 Mar 2017 20:24:44 -0800 Subject: [PATCH 101/108] Blob support, fixing string. --- go/mysqlconn/replication/binlog_event_rbr.go | 94 ++++++++++++++++--- .../replication/binlog_event_rbr_test.go | 54 ++++++++++- 2 files changed, 130 insertions(+), 18 deletions(-) diff --git a/go/mysqlconn/replication/binlog_event_rbr.go b/go/mysqlconn/replication/binlog_event_rbr.go index 8c606a15ae5..7c252dcae68 100644 --- a/go/mysqlconn/replication/binlog_event_rbr.go +++ b/go/mysqlconn/replication/binlog_event_rbr.go @@ -80,7 +80,7 @@ func (ev binlogEvent) TableMap(f BinlogFormat) (*TableMap, error) { // metadataLength returns how many bytes are used for metadata, based on a type. func metadataLength(typ byte) int { switch typ { - case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate, TypeVarString: + case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate: // No data here. return 0 @@ -92,7 +92,7 @@ func metadataLength(typ byte) int { // Two bytes, Big Endian because of crazy encoding. return 2 - case TypeVarchar, TypeBit: + case TypeVarchar, TypeBit, TypeVarString: // Two bytes, Little Endian return 2 @@ -116,7 +116,7 @@ func metadataTotalLength(types []byte) int { func metadataRead(data []byte, pos int, typ byte) (uint16, int, error) { switch typ { - case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate, TypeVarString: + case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate: // No data here. return 0, pos, nil @@ -128,7 +128,7 @@ func metadataRead(data []byte, pos int, typ byte) (uint16, int, error) { // Two bytes, Big Endian because of crazy encoding. return uint16(data[pos])<<8 + uint16(data[pos+1]), pos + 2, nil - case TypeVarchar, TypeBit: + case TypeVarchar, TypeBit, TypeVarString: // Two bytes, Little Endian return uint16(data[pos]) + uint16(data[pos+1])<<8, pos + 2, nil @@ -142,7 +142,7 @@ func metadataRead(data []byte, pos int, typ byte) (uint16, int, error) { func metadataWrite(data []byte, pos int, typ byte, value uint16) int { switch typ { - case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate, TypeVarString: + case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate: // No data here. return pos @@ -157,7 +157,7 @@ func metadataWrite(data []byte, pos int, typ byte, value uint16) int { data[pos+1] = byte(value) return pos + 2 - case TypeVarchar, TypeBit: + case TypeVarchar, TypeBit, TypeVarString: // Two bytes, Little Endian data[pos] = byte(value) data[pos+1] = byte(value >> 8) @@ -193,7 +193,7 @@ func cellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { return 4, nil case TypeDateTime: return 8, nil - case TypeVarchar: + case TypeVarchar, TypeVarString: // Length is encoded in 1 or 2 bytes. if metadata > 255 { l := int(uint64(data[pos]) | @@ -256,17 +256,50 @@ func cellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { return intg0*4 + dig2bytes[intg0x] + frac0*4 + dig2bytes[frac0x], nil case TypeEnum, TypeSet: return int(metadata & 0xff), nil + case TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob: + // Only TypeBlob is used in binary logs, + // but supports others just in case. + switch metadata { + case 1: + return 1 + int(uint32(data[pos])), nil + case 2: + return 2 + int(uint32(data[pos])| + uint32(data[pos+1])<<8), nil + case 3: + return 3 + int(uint32(data[pos])| + uint32(data[pos+1])<<8| + uint32(data[pos+2])<<16), nil + case 4: + return 4 + int(uint32(data[pos])| + uint32(data[pos+1])<<8| + uint32(data[pos+2])<<16| + uint32(data[pos+3])<<24), nil + default: + return 0, fmt.Errorf("unsupported blob metadata value %v (data: %v pos: %v)", metadata, data, pos) + } case TypeString: // This may do String, Enum, and Set. The type is in // metadata. If it's a string, then there will be more bits. + // This will give us the maximum length of the field. + max := 0 t := metadata >> 8 if t == TypeEnum || t == TypeSet { - return int(metadata & 0xff), nil + max = int(metadata & 0xff) + } else { + max = int((((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0xff)) } - return int((((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0xff)), nil + + // Length is encoded in 1 or 2 bytes. + if max > 255 { + l := int(uint64(data[pos]) | + uint64(data[pos+1])<<8) + return l + 2, nil + } + l := int(data[pos]) + return l + 1, nil default: - return 0, fmt.Errorf("Unsupported type %v (data: %v pos: %v)", typ, data, pos) + return 0, fmt.Errorf("unsupported type %v (data: %v pos: %v)", typ, data, pos) } } @@ -367,7 +400,7 @@ func CellValue(data []byte, pos int, typ byte, metadata uint16, styp querypb.Typ second := t % 100 return sqltypes.MakeTrusted(querypb.Type_DATETIME, []byte(fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second))), 8, nil - case TypeVarchar: + case TypeVarchar, TypeVarString: // Length is encoded in 1 or 2 bytes. if metadata > 255 { l := int(uint64(data[pos]) | @@ -702,6 +735,32 @@ func CellValue(data []byte, pos int, typ byte, metadata uint16, styp querypb.Typ return sqltypes.MakeTrusted(querypb.Type_SET, data[pos:pos+l]), l, nil + case TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob: + // Only TypeBlob is used in binary logs, + // but supports others just in case. + l := 0 + switch metadata { + case 1: + l = int(uint32(data[pos])) + case 2: + l = int(uint32(data[pos]) | + uint32(data[pos+1])<<8) + case 3: + l = int(uint32(data[pos]) | + uint32(data[pos+1])<<8 | + uint32(data[pos+2])<<16) + case 4: + l = int(uint32(data[pos]) | + uint32(data[pos+1])<<8 | + uint32(data[pos+2])<<16 | + uint32(data[pos+3])<<24) + default: + return sqltypes.NULL, 0, fmt.Errorf("unsupported blob metadata value %v (data: %v pos: %v)", metadata, data, pos) + } + pos += int(metadata) + return sqltypes.MakeTrusted(querypb.Type_VARBINARY, + data[pos:pos+l]), l + int(metadata), nil + case TypeString: // This may do String, Enum, and Set. The type is in // metadata. If it's a string, then there will be more bits. @@ -727,10 +786,17 @@ func CellValue(data []byte, pos int, typ byte, metadata uint16, styp querypb.Typ data[pos:pos+l]), l, nil } // This is a real string. The length is weird. - l := int((((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0xff)) + max := int((((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0xff)) + // Length is encoded in 1 or 2 bytes. + if max > 255 { + l := int(uint64(data[pos]) | + uint64(data[pos+1])<<8) + return sqltypes.MakeTrusted(querypb.Type_VARCHAR, + data[pos+2:pos+2+l]), l + 2, nil + } + l := int(data[pos]) return sqltypes.MakeTrusted(querypb.Type_VARCHAR, - data[pos:pos+l]), l, nil - + data[pos+1:pos+1+l]), l + 1, nil default: return sqltypes.NULL, 0, fmt.Errorf("unsupported type %v", typ) } diff --git a/go/mysqlconn/replication/binlog_event_rbr_test.go b/go/mysqlconn/replication/binlog_event_rbr_test.go index 9d74acd6273..9151c5e825e 100644 --- a/go/mysqlconn/replication/binlog_event_rbr_test.go +++ b/go/mysqlconn/replication/binlog_event_rbr_test.go @@ -408,10 +408,20 @@ func TestCellLengthAndData(t *testing.T) { []byte{0x01, 0x02}), }, { typ: TypeString, - metadata: TypeString<<8 | 5, - data: []byte{0x01, 0x02, 0x03, 0x04, 0x05}, + metadata: TypeString<<8 | 5, // maximum length = 5 + data: []byte{0x04, 0x01, 0x02, 0x03, 0x04}, out: sqltypes.MakeTrusted(querypb.Type_VARCHAR, - []byte{0x01, 0x02, 0x03, 0x04, 0x05}), + []byte{0x01, 0x02, 0x03, 0x04}), + }, { + // Length is encoded in 10 bits, 2 of them are in a weird place. + // In this test, we set the two high bits. + // 773 = 512 + 256 + 5 + // This requires 2 bytes to store the length. + typ: TypeString, + metadata: (TypeString<<8 ^ 0x3000) | 5, // maximum length = 773 + data: []byte{0x04, 0x00, 0x01, 0x02, 0x03, 0x04}, + out: sqltypes.MakeTrusted(querypb.Type_VARCHAR, + []byte{0x01, 0x02, 0x03, 0x04}), }, { // See strings/decimal.c function decimal2bin for why these // values are here. @@ -426,6 +436,42 @@ func TestCellLengthAndData(t *testing.T) { data: []byte{0x7E, 0xF2, 0x04, 0xC7, 0x2D, 0xFB, 0x2D}, out: sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("-1234567890.1234")), + }, { + typ: TypeBlob, + metadata: 1, + data: []byte{0x3, 'a', 'b', 'c'}, + out: sqltypes.MakeTrusted(querypb.Type_VARBINARY, + []byte("abc")), + }, { + typ: TypeBlob, + metadata: 2, + data: []byte{0x3, 0x00, 'a', 'b', 'c'}, + out: sqltypes.MakeTrusted(querypb.Type_VARBINARY, + []byte("abc")), + }, { + typ: TypeBlob, + metadata: 3, + data: []byte{0x3, 0x00, 0x00, 'a', 'b', 'c'}, + out: sqltypes.MakeTrusted(querypb.Type_VARBINARY, + []byte("abc")), + }, { + typ: TypeBlob, + metadata: 4, + data: []byte{0x3, 0x00, 0x00, 0x00, 'a', 'b', 'c'}, + out: sqltypes.MakeTrusted(querypb.Type_VARBINARY, + []byte("abc")), + }, { + typ: TypeVarString, + metadata: 20, // one byte length encoding + data: []byte{3, 'a', 'b', 'c'}, + out: sqltypes.MakeTrusted(querypb.Type_VARCHAR, + []byte("abc")), + }, { + typ: TypeVarString, + metadata: 384, // two bytes length encoding + data: []byte{3, 0, 'a', 'b', 'c'}, + out: sqltypes.MakeTrusted(querypb.Type_VARCHAR, + []byte("abc")), }} for _, tcase := range testcases { @@ -437,7 +483,7 @@ func TestCellLengthAndData(t *testing.T) { // Test cellLength. l, err := cellLength(padded, 1, tcase.typ, tcase.metadata) if err != nil || l != len(tcase.data) { - t.Errorf("testcase cellLength(%v,%v) returned unexpected result: %v %v", tcase.typ, tcase.data, l, err) + t.Errorf("testcase cellLength(%v,%v) returned unexpected result: %v %v was expected %v ", tcase.typ, tcase.data, l, err, len(tcase.data)) } // Test CellValue. From e2e4625a5357a46c0c98df9b16dac1c8064034af Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Fri, 10 Mar 2017 20:31:45 -0800 Subject: [PATCH 102/108] geometry support. --- go/mysqlconn/replication/binlog_event_rbr.go | 31 +++++++++++++++++-- .../replication/binlog_event_rbr_test.go | 24 ++++++++++++++ 2 files changed, 52 insertions(+), 3 deletions(-) diff --git a/go/mysqlconn/replication/binlog_event_rbr.go b/go/mysqlconn/replication/binlog_event_rbr.go index 7c252dcae68..11df0655176 100644 --- a/go/mysqlconn/replication/binlog_event_rbr.go +++ b/go/mysqlconn/replication/binlog_event_rbr.go @@ -256,8 +256,8 @@ func cellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { return intg0*4 + dig2bytes[intg0x] + frac0*4 + dig2bytes[frac0x], nil case TypeEnum, TypeSet: return int(metadata & 0xff), nil - case TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob: - // Only TypeBlob is used in binary logs, + case TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: + // of the Blobs, only TypeBlob is used in binary logs, // but supports others just in case. switch metadata { case 1: @@ -275,7 +275,7 @@ func cellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { uint32(data[pos+2])<<16| uint32(data[pos+3])<<24), nil default: - return 0, fmt.Errorf("unsupported blob metadata value %v (data: %v pos: %v)", metadata, data, pos) + return 0, fmt.Errorf("unsupported blob/geometry metadata value %v (data: %v pos: %v)", metadata, data, pos) } case TypeString: // This may do String, Enum, and Set. The type is in @@ -797,6 +797,31 @@ func CellValue(data []byte, pos int, typ byte, metadata uint16, styp querypb.Typ l := int(data[pos]) return sqltypes.MakeTrusted(querypb.Type_VARCHAR, data[pos+1:pos+1+l]), l + 1, nil + + case TypeGeometry: + l := 0 + switch metadata { + case 1: + l = int(uint32(data[pos])) + case 2: + l = int(uint32(data[pos]) | + uint32(data[pos+1])<<8) + case 3: + l = int(uint32(data[pos]) | + uint32(data[pos+1])<<8 | + uint32(data[pos+2])<<16) + case 4: + l = int(uint32(data[pos]) | + uint32(data[pos+1])<<8 | + uint32(data[pos+2])<<16 | + uint32(data[pos+3])<<24) + default: + return sqltypes.NULL, 0, fmt.Errorf("unsupported geometry metadata value %v (data: %v pos: %v)", metadata, data, pos) + } + pos += int(metadata) + return sqltypes.MakeTrusted(querypb.Type_GEOMETRY, + data[pos:pos+l]), l + int(metadata), nil + default: return sqltypes.NULL, 0, fmt.Errorf("unsupported type %v", typ) } diff --git a/go/mysqlconn/replication/binlog_event_rbr_test.go b/go/mysqlconn/replication/binlog_event_rbr_test.go index 9151c5e825e..7c92b69b803 100644 --- a/go/mysqlconn/replication/binlog_event_rbr_test.go +++ b/go/mysqlconn/replication/binlog_event_rbr_test.go @@ -472,6 +472,30 @@ func TestCellLengthAndData(t *testing.T) { data: []byte{3, 0, 'a', 'b', 'c'}, out: sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("abc")), + }, { + typ: TypeGeometry, + metadata: 1, + data: []byte{0x3, 'a', 'b', 'c'}, + out: sqltypes.MakeTrusted(querypb.Type_GEOMETRY, + []byte("abc")), + }, { + typ: TypeGeometry, + metadata: 2, + data: []byte{0x3, 0x00, 'a', 'b', 'c'}, + out: sqltypes.MakeTrusted(querypb.Type_GEOMETRY, + []byte("abc")), + }, { + typ: TypeGeometry, + metadata: 3, + data: []byte{0x3, 0x00, 0x00, 'a', 'b', 'c'}, + out: sqltypes.MakeTrusted(querypb.Type_GEOMETRY, + []byte("abc")), + }, { + typ: TypeGeometry, + metadata: 4, + data: []byte{0x3, 0x00, 0x00, 0x00, 'a', 'b', 'c'}, + out: sqltypes.MakeTrusted(querypb.Type_GEOMETRY, + []byte("abc")), }} for _, tcase := range testcases { From 145f4d48b470264ff3af79ede9f25a1be444976f Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 13 Mar 2017 11:29:22 -0700 Subject: [PATCH 103/108] Changing internal API for binlogs. To use an internal data structure (vs the proto structure), so we can get more information from RBR events if available. Filling in the table name when available in RBR event, and using it in table filter. --- go/vt/binlog/binlog_streamer.go | 63 +++++--- go/vt/binlog/binlog_streamer_rbr_test.go | 28 ++-- go/vt/binlog/binlog_streamer_test.go | 149 ++++++++---------- go/vt/binlog/event_streamer.go | 20 +-- go/vt/binlog/event_streamer_test.go | 84 ++++++---- go/vt/binlog/keyrange_filter.go | 47 +++--- go/vt/binlog/keyrange_filter_test.go | 111 ++++++++----- go/vt/binlog/tables_filter.go | 63 ++++---- go/vt/binlog/tables_filter_test.go | 86 ++++++---- go/vt/binlog/updatestream.go | 4 +- go/vt/binlog/updatestreamctl.go | 16 +- .../tabletserver/replication_watcher.go | 8 +- 12 files changed, 385 insertions(+), 294 deletions(-) diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index aee736268ad..56a2df29599 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -52,9 +52,20 @@ var ( } ) +// FullBinlogStatement has all the information we can gather for an event. +// Some fields are only set if asked for, and if RBR is used. +// Otherwise we'll revert back to using the SQL comments, for SBR. +type FullBinlogStatement struct { + Statement *binlogdatapb.BinlogTransaction_Statement + Table string + KeyspaceID []byte + PKNames []*querypb.Field + PKRow *querypb.Row +} + // sendTransactionFunc is used to send binlog events. // reply is of type binlogdatapb.BinlogTransaction. -type sendTransactionFunc func(trans *binlogdatapb.BinlogTransaction) error +type sendTransactionFunc func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error // getStatementCategory returns the binlogdatapb.BL_* category for a SQL statement. func getStatementCategory(sql string) binlogdatapb.BinlogTransaction_Statement_Category { @@ -173,7 +184,7 @@ func (bls *Streamer) Stream(ctx context.Context) (err error) { // If the events channel is closed, parseEvents returns ErrServerEOF. // If the context is done, returns ctx.Err(). func (bls *Streamer) parseEvents(ctx context.Context, events <-chan replication.BinlogEvent) (replication.Position, error) { - var statements []*binlogdatapb.BinlogTransaction_Statement + var statements []FullBinlogStatement var format replication.BinlogFormat var gtid replication.GTID var pos = bls.startPos @@ -191,21 +202,18 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan replication. log.Errorf("BEGIN in binlog stream while still in another transaction; dropping %d statements: %v", len(statements), statements) binlogStreamerErrors.Add("ParseEvents", 1) } - statements = make([]*binlogdatapb.BinlogTransaction_Statement, 0, 10) + statements = make([]FullBinlogStatement, 0, 10) autocommit = false } // A commit can be triggered either by a COMMIT query, or by an XID_EVENT. // Statements that aren't wrapped in BEGIN/COMMIT are committed immediately. commit := func(timestamp uint32) error { if int64(timestamp) >= bls.timestamp { - trans := &binlogdatapb.BinlogTransaction{ - Statements: statements, - EventToken: &querypb.EventToken{ - Timestamp: int64(timestamp), - Position: replication.EncodePosition(pos), - }, + eventToken := &querypb.EventToken{ + Timestamp: int64(timestamp), + Position: replication.EncodePosition(pos), } - if err = bls.sendTransaction(trans); err != nil { + if err = bls.sendTransaction(eventToken, statements); err != nil { if err == io.EOF { return ErrClientEOF } @@ -288,18 +296,22 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan replication. if err != nil { return pos, fmt.Errorf("can't parse INTVAR_EVENT: %v, event data: %#v", err, ev) } - statements = append(statements, &binlogdatapb.BinlogTransaction_Statement{ - Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, - Sql: []byte(fmt.Sprintf("SET %s=%d", replication.IntVarNames[typ], value)), + statements = append(statements, FullBinlogStatement{ + Statement: &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, + Sql: []byte(fmt.Sprintf("SET %s=%d", replication.IntVarNames[typ], value)), + }, }) case ev.IsRand(): // RAND_EVENT seed1, seed2, err := ev.Rand(format) if err != nil { return pos, fmt.Errorf("can't parse RAND_EVENT: %v, event data: %#v", err, ev) } - statements = append(statements, &binlogdatapb.BinlogTransaction_Statement{ - Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, - Sql: []byte(fmt.Sprintf("SET @@RAND_SEED1=%d, @@RAND_SEED2=%d", seed1, seed2)), + statements = append(statements, FullBinlogStatement{ + Statement: &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, + Sql: []byte(fmt.Sprintf("SET @@RAND_SEED1=%d, @@RAND_SEED2=%d", seed1, seed2)), + }, }) case ev.IsQuery(): // QUERY_EVENT // Extract the query string and group into transactions. @@ -341,7 +353,11 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan replication. setTimestamp.Charset = q.Charset statement.Charset = q.Charset } - statements = append(statements, setTimestamp, statement) + statements = append(statements, FullBinlogStatement{ + Statement: setTimestamp, + }, FullBinlogStatement{ + Statement: statement, + }) if autocommit { if err = commit(ev.Timestamp()); err != nil { return pos, err @@ -391,7 +407,9 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan replication. Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte(fmt.Sprintf("SET TIMESTAMP=%d", ev.Timestamp())), } - statements = append(statements, setTimestamp) + statements = append(statements, FullBinlogStatement{ + Statement: setTimestamp, + }) rows, err := ev.Rows(format, tm) if err != nil { @@ -409,7 +427,7 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan replication. } } -func appendUpdates(statements []*binlogdatapb.BinlogTransaction_Statement, rows *replication.Rows, tm *replication.TableMap, ti *schema.Table) []*binlogdatapb.BinlogTransaction_Statement { +func appendUpdates(statements []FullBinlogStatement, rows *replication.Rows, tm *replication.TableMap, ti *schema.Table) []FullBinlogStatement { for i := range rows.Rows { var sql bytes.Buffer @@ -433,7 +451,12 @@ func appendUpdates(statements []*binlogdatapb.BinlogTransaction_Statement, rows Category: binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, Sql: sql.Bytes(), } - statements = append(statements, update) + statements = append(statements, FullBinlogStatement{ + Statement: update, + Table: tm.Name, + }) + // TODO(alainjobart): fill in keyspaceID, pkNames, pkRows + // if necessary. } return statements } diff --git a/go/vt/binlog/binlog_streamer_rbr_test.go b/go/vt/binlog/binlog_streamer_rbr_test.go index f960299a8f1..7466e915785 100644 --- a/go/vt/binlog/binlog_streamer_rbr_test.go +++ b/go/vt/binlog/binlog_streamer_rbr_test.go @@ -97,19 +97,24 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { events := make(chan replication.BinlogEvent) - want := []binlogdatapb.BinlogTransaction{ + want := []fullBinlogTransaction{ { - Statements: []*binlogdatapb.BinlogTransaction_Statement{ + statements: []FullBinlogStatement{ { - Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, - Sql: []byte("SET TIMESTAMP=1407805592"), + Statement: &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, + Sql: []byte("SET TIMESTAMP=1407805592"), + }, }, { - Category: binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, - Sql: []byte("UPDATE vt_a SET id=1076895760, message='abcd' WHERE id=1076895760 AND message='abc'"), + Statement: &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, + Sql: []byte("UPDATE vt_a SET id=1076895760, message='abcd' WHERE id=1076895760 AND message='abc'"), + }, + Table: "vt_a", }, }, - EventToken: &querypb.EventToken{ + eventToken: &querypb.EventToken{ Timestamp: 1407805592, Position: replication.EncodePosition(replication.Position{ GTIDSet: replication.MariadbGTID{ @@ -121,9 +126,12 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { }, }, } - var got []binlogdatapb.BinlogTransaction - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { - got = append(got, *trans) + var got []fullBinlogTransaction + sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { + got = append(got, fullBinlogTransaction{ + eventToken: eventToken, + statements: statements, + }) return nil } bls := NewStreamer("vt_test_keyspace", nil, se, nil, replication.Position{}, 0, sendTransaction) diff --git a/go/vt/binlog/binlog_streamer_test.go b/go/vt/binlog/binlog_streamer_test.go index 4b1d5715b7e..d3935ad50f3 100644 --- a/go/vt/binlog/binlog_streamer_test.go +++ b/go/vt/binlog/binlog_streamer_test.go @@ -20,6 +20,29 @@ import ( querypb "github.com/youtube/vitess/go/vt/proto/query" ) +// fullBinlogTransaction is a helper type for tests. +type fullBinlogTransaction struct { + eventToken *querypb.EventToken + statements []FullBinlogStatement +} + +type binlogStatements []binlogdatapb.BinlogTransaction + +func (bs *binlogStatements) sendTransaction(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { + var s []*binlogdatapb.BinlogTransaction_Statement + if len(statements) > 0 { + s = make([]*binlogdatapb.BinlogTransaction_Statement, len(statements)) + for i, statement := range statements { + s[i] = statement.Statement + } + } + *bs = append(*bs, binlogdatapb.BinlogTransaction{ + Statements: s, + EventToken: eventToken, + }) + return nil +} + func sendTestEvents(channel chan<- replication.BinlogEvent, events []replication.BinlogEvent) { for _, ev := range events { channel <- ev @@ -65,12 +88,8 @@ func TestStreamerParseEventsXID(t *testing.T) { }, }, } - var got []binlogdatapb.BinlogTransaction - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { - got = append(got, *trans) - return nil - } - bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) + var got binlogStatements + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events) @@ -78,7 +97,7 @@ func TestStreamerParseEventsXID(t *testing.T) { t.Errorf("unexpected error: %v", err) } - if !reflect.DeepEqual(got, want) { + if !reflect.DeepEqual(got, binlogStatements(want)) { t.Errorf("binlogConnStreamer.parseEvents(): got:\n%v\nwant:\n%v", got, want) } } @@ -123,12 +142,8 @@ func TestStreamerParseEventsCommit(t *testing.T) { }, }, } - var got []binlogdatapb.BinlogTransaction - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { - got = append(got, *trans) - return nil - } - bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) + var got binlogStatements + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events) @@ -136,7 +151,7 @@ func TestStreamerParseEventsCommit(t *testing.T) { t.Errorf("unexpected error: %v", err) } - if !reflect.DeepEqual(got, want) { + if !reflect.DeepEqual(got, binlogStatements(want)) { t.Errorf("binlogConnStreamer.parseEvents(): got %v, want %v", got, want) } } @@ -144,7 +159,7 @@ func TestStreamerParseEventsCommit(t *testing.T) { func TestStreamerStop(t *testing.T) { events := make(chan replication.BinlogEvent) - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { + sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { return nil } bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) @@ -189,7 +204,7 @@ func TestStreamerParseEventsClientEOF(t *testing.T) { events := make(chan replication.BinlogEvent) - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { + sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { return io.EOF } bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) @@ -207,7 +222,7 @@ func TestStreamerParseEventsServerEOF(t *testing.T) { events := make(chan replication.BinlogEvent) close(events) - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { + sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { return nil } bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) @@ -237,7 +252,7 @@ func TestStreamerParseEventsSendErrorXID(t *testing.T) { events := make(chan replication.BinlogEvent) - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { + sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { return fmt.Errorf("foobar") } bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) @@ -275,7 +290,7 @@ func TestStreamerParseEventsSendErrorCommit(t *testing.T) { events := make(chan replication.BinlogEvent) - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { + sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { return fmt.Errorf("foobar") } bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) @@ -308,7 +323,7 @@ func TestStreamerParseEventsInvalid(t *testing.T) { events := make(chan replication.BinlogEvent) - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { + sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { return nil } bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) @@ -343,7 +358,7 @@ func TestStreamerParseEventsInvalidFormat(t *testing.T) { events := make(chan replication.BinlogEvent) - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { + sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { return nil } bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) @@ -378,7 +393,7 @@ func TestStreamerParseEventsNoFormat(t *testing.T) { events := make(chan replication.BinlogEvent) - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { + sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { return nil } bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) @@ -411,7 +426,7 @@ func TestStreamerParseEventsInvalidQuery(t *testing.T) { events := make(chan replication.BinlogEvent) - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { + sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { return nil } bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) @@ -490,20 +505,16 @@ func TestStreamerParseEventsRollback(t *testing.T) { }, }, } - var got []binlogdatapb.BinlogTransaction - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { - got = append(got, *trans) - return nil - } - bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) + var got binlogStatements + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { t.Errorf("unexpected error: %v", err) } - if !reflect.DeepEqual(got, want) { - t.Errorf("binlogConnStreamer.parseEvents(): got %v, want %v", got, want) + if !reflect.DeepEqual(got, binlogStatements(want)) { + t.Errorf("binlogConnStreamer.parseEvents(): got:\n%v\nwant:\n%v", got, want) } } @@ -555,19 +566,15 @@ func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) { }, }, } - var got []binlogdatapb.BinlogTransaction - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { - got = append(got, *trans) - return nil - } - bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) + var got binlogStatements + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { t.Errorf("unexpected error: %v", err) } - if !reflect.DeepEqual(got, want) { + if !reflect.DeepEqual(got, binlogStatements(want)) { t.Errorf("binlogConnStreamer.parseEvents(): got:\n%v\nwant:\n%v", got, want) } } @@ -610,7 +617,7 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) { }, }, { - Statements: []*binlogdatapb.BinlogTransaction_Statement{}, + Statements: nil, EventToken: &querypb.EventToken{ Timestamp: 1407805592, Position: replication.EncodePosition(replication.Position{ @@ -623,19 +630,15 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) { }, }, } - var got []binlogdatapb.BinlogTransaction - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { - got = append(got, *trans) - return nil - } - bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) + var got binlogStatements + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { t.Errorf("unexpected error: %v", err) } - if !reflect.DeepEqual(got, want) { + if !reflect.DeepEqual(got, binlogStatements(want)) { t.Errorf("binlogConnStreamer.parseEvents(): got:\n%v\nwant:\n%v", got, want) } } @@ -680,19 +683,15 @@ func TestStreamerParseEventsSetInsertID(t *testing.T) { }, }, } - var got []binlogdatapb.BinlogTransaction - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { - got = append(got, *trans) - return nil - } - bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) + var got binlogStatements + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { t.Errorf("unexpected error: %v", err) } - if !reflect.DeepEqual(got, want) { + if !reflect.DeepEqual(got, binlogStatements(want)) { t.Errorf("binlogConnStreamer.parseEvents(): got %v, want %v", got, want) } } @@ -717,7 +716,7 @@ func TestStreamerParseEventsInvalidIntVar(t *testing.T) { events := make(chan replication.BinlogEvent) - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { + sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { return nil } bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) @@ -774,19 +773,15 @@ func TestStreamerParseEventsOtherDB(t *testing.T) { }, }, } - var got []binlogdatapb.BinlogTransaction - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { - got = append(got, *trans) - return nil - } - bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) + var got binlogStatements + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { t.Errorf("unexpected error: %v", err) } - if !reflect.DeepEqual(got, want) { + if !reflect.DeepEqual(got, binlogStatements(want)) { t.Errorf("binlogConnStreamer.parseEvents(): got %v, want %v", got, want) } } @@ -832,19 +827,15 @@ func TestStreamerParseEventsOtherDBBegin(t *testing.T) { }, }, } - var got []binlogdatapb.BinlogTransaction - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { - got = append(got, *trans) - return nil - } - bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) + var got binlogStatements + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { t.Errorf("unexpected error: %v", err) } - if !reflect.DeepEqual(got, want) { + if !reflect.DeepEqual(got, binlogStatements(want)) { t.Errorf("binlogConnStreamer.parseEvents(): got %v, want %v", got, want) } } @@ -869,7 +860,7 @@ func TestStreamerParseEventsBeginAgain(t *testing.T) { events := make(chan replication.BinlogEvent) - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { + sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { return nil } bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) @@ -932,19 +923,15 @@ func TestStreamerParseEventsMariadbBeginGTID(t *testing.T) { }, }, } - var got []binlogdatapb.BinlogTransaction - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { - got = append(got, *trans) - return nil - } - bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) + var got binlogStatements + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { t.Errorf("unexpected error: %v", err) } - if !reflect.DeepEqual(got, want) { + if !reflect.DeepEqual(got, binlogStatements(want)) { t.Errorf("binlogConnStreamer.parseEvents(): got:\n%v\nwant:\n%v", got, want) } } @@ -987,19 +974,15 @@ func TestStreamerParseEventsMariadbStandaloneGTID(t *testing.T) { }, }, } - var got []binlogdatapb.BinlogTransaction - sendTransaction := func(trans *binlogdatapb.BinlogTransaction) error { - got = append(got, *trans) - return nil - } - bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, sendTransaction) + var got binlogStatements + bls := NewStreamer("vt_test_keyspace", nil, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF { t.Errorf("unexpected error: %v", err) } - if !reflect.DeepEqual(got, want) { + if !reflect.DeepEqual(got, binlogStatements(want)) { t.Errorf("binlogConnStreamer.parseEvents(): got:\n%v\nwant:\n%v", got, want) } } diff --git a/go/vt/binlog/event_streamer.go b/go/vt/binlog/event_streamer.go index aa864131541..f1da61b447e 100644 --- a/go/vt/binlog/event_streamer.go +++ b/go/vt/binlog/event_streamer.go @@ -53,16 +53,16 @@ func (evs *EventStreamer) Stream(ctx context.Context) error { return evs.bls.Stream(ctx) } -func (evs *EventStreamer) transactionToEvent(trans *binlogdatapb.BinlogTransaction) error { +func (evs *EventStreamer) transactionToEvent(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { event := &querypb.StreamEvent{ - EventToken: trans.EventToken, + EventToken: eventToken, } var err error var insertid int64 - for _, stmt := range trans.Statements { - switch stmt.Category { + for _, stmt := range statements { + switch stmt.Statement.Category { case binlogdatapb.BinlogTransaction_Statement_BL_SET: - sql := string(stmt.Sql) + sql := string(stmt.Statement.Sql) if strings.HasPrefix(sql, binlogSetInsertID) { insertid, err = strconv.ParseInt(sql[binlogSetInsertIDLen:], 10, 64) if err != nil { @@ -74,29 +74,29 @@ func (evs *EventStreamer) transactionToEvent(trans *binlogdatapb.BinlogTransacti binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, binlogdatapb.BinlogTransaction_Statement_BL_DELETE: var dmlStatement *querypb.StreamEvent_Statement - dmlStatement, insertid, err = evs.buildDMLStatement(string(stmt.Sql), insertid) + dmlStatement, insertid, err = evs.buildDMLStatement(string(stmt.Statement.Sql), insertid) if err != nil { dmlStatement = &querypb.StreamEvent_Statement{ Category: querypb.StreamEvent_Statement_Error, - Sql: stmt.Sql, + Sql: stmt.Statement.Sql, } } event.Statements = append(event.Statements, dmlStatement) case binlogdatapb.BinlogTransaction_Statement_BL_DDL: ddlStatement := &querypb.StreamEvent_Statement{ Category: querypb.StreamEvent_Statement_DDL, - Sql: stmt.Sql, + Sql: stmt.Statement.Sql, } event.Statements = append(event.Statements, ddlStatement) case binlogdatapb.BinlogTransaction_Statement_BL_UNRECOGNIZED: unrecognized := &querypb.StreamEvent_Statement{ Category: querypb.StreamEvent_Statement_Error, - Sql: stmt.Sql, + Sql: stmt.Statement.Sql, } event.Statements = append(event.Statements, unrecognized) default: binlogStreamerErrors.Add("EventStreamer", 1) - log.Errorf("Unrecognized event: %v: %s", stmt.Category, stmt.Sql) + log.Errorf("Unrecognized event: %v: %s", stmt.Statement.Category, stmt.Statement.Sql) } } return evs.sendEvent(event) diff --git a/go/vt/binlog/event_streamer_test.go b/go/vt/binlog/event_streamer_test.go index b1a2749d348..265b468b3cf 100644 --- a/go/vt/binlog/event_streamer_test.go +++ b/go/vt/binlog/event_streamer_test.go @@ -34,15 +34,15 @@ func TestEventErrors(t *testing.T) { }, } for _, sql := range dmlErrorCases { - trans := &binlogdatapb.BinlogTransaction{ - Statements: []*binlogdatapb.BinlogTransaction_Statement{ - { + statements := []FullBinlogStatement{ + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte(sql), }, }, } - err := evs.transactionToEvent(trans) + err := evs.transactionToEvent(nil, statements) if err != nil { t.Errorf("%s: %v", sql, err) continue @@ -67,16 +67,16 @@ func TestSetErrors(t *testing.T) { return nil }, } - trans := &binlogdatapb.BinlogTransaction{ - Statements: []*binlogdatapb.BinlogTransaction_Statement{ - { + statements := []FullBinlogStatement{ + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("SET INSERT_ID=abcd"), }, }, } before := binlogStreamerErrors.Counts()["EventStreamer"] - err := evs.transactionToEvent(trans) + err := evs.transactionToEvent(nil, statements) if err != nil { t.Error(err) } @@ -87,25 +87,36 @@ func TestSetErrors(t *testing.T) { } func TestDMLEvent(t *testing.T) { - trans := &binlogdatapb.BinlogTransaction{ - Statements: []*binlogdatapb.BinlogTransaction_Statement{{ - Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, - Sql: []byte("SET TIMESTAMP=2"), - }, { - Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, - Sql: []byte("SET INSERT_ID=10"), - }, { - Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, - Sql: []byte("query /* _stream _table_ (eid id name) (null 1 'bmFtZQ==' ) (null 18446744073709551615 'bmFtZQ==' ); */"), - }, { - Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, - Sql: []byte("query"), - }}, - EventToken: &querypb.EventToken{ - Timestamp: 1, - Position: "MariaDB/0-41983-20", + statements := []FullBinlogStatement{ + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, + Sql: []byte("SET TIMESTAMP=2"), + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, + Sql: []byte("SET INSERT_ID=10"), + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, + Sql: []byte("query /* _stream _table_ (eid id name) (null 1 'bmFtZQ==' ) (null 18446744073709551615 'bmFtZQ==' ); */"), + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, + Sql: []byte("query"), + }, }, } + eventToken := &querypb.EventToken{ + Timestamp: 1, + Position: "MariaDB/0-41983-20", + } evs := &EventStreamer{ sendEvent: func(event *querypb.StreamEvent) error { for _, statement := range event.Statements { @@ -135,27 +146,30 @@ func TestDMLEvent(t *testing.T) { return nil }, } - err := evs.transactionToEvent(trans) + err := evs.transactionToEvent(eventToken, statements) if err != nil { t.Error(err) } } func TestDDLEvent(t *testing.T) { - trans := &binlogdatapb.BinlogTransaction{ - Statements: []*binlogdatapb.BinlogTransaction_Statement{ - { + statements := []FullBinlogStatement{ + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("SET TIMESTAMP=2"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_DDL, Sql: []byte("DDL"), }, }, - EventToken: &querypb.EventToken{ - Timestamp: 1, - Position: "MariaDB/0-41983-20", - }, + } + eventToken := &querypb.EventToken{ + Timestamp: 1, + Position: "MariaDB/0-41983-20", } evs := &EventStreamer{ sendEvent: func(event *querypb.StreamEvent) error { @@ -180,7 +194,7 @@ func TestDDLEvent(t *testing.T) { return nil }, } - err := evs.transactionToEvent(trans) + err := evs.transactionToEvent(eventToken, statements) if err != nil { t.Error(err) } diff --git a/go/vt/binlog/keyrange_filter.go b/go/vt/binlog/keyrange_filter.go index 5d8e242ca46..614700d9e2a 100644 --- a/go/vt/binlog/keyrange_filter.go +++ b/go/vt/binlog/keyrange_filter.go @@ -12,41 +12,43 @@ import ( "errors" "fmt" + "github.com/youtube/vitess/go/vt/sqlparser" + binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" + querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - "github.com/youtube/vitess/go/vt/sqlparser" ) // KeyRangeFilterFunc returns a function that calls callback only if statements // in the transaction match the specified keyrange. The resulting function can be // passed into the Streamer: bls.Stream(file, pos, sendTransaction) -> // bls.Stream(file, pos, KeyRangeFilterFunc(keyrange, sendTransaction)) -func KeyRangeFilterFunc(keyrange *topodatapb.KeyRange, callback sendTransactionFunc) sendTransactionFunc { - return func(reply *binlogdatapb.BinlogTransaction) error { +func KeyRangeFilterFunc(keyrange *topodatapb.KeyRange, callback func(*binlogdatapb.BinlogTransaction) error) sendTransactionFunc { + return func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { matched := false - filtered := make([]*binlogdatapb.BinlogTransaction_Statement, 0, len(reply.Statements)) - for _, statement := range reply.Statements { - switch statement.Category { + filtered := make([]*binlogdatapb.BinlogTransaction_Statement, 0, len(statements)) + for _, statement := range statements { + switch statement.Statement.Category { case binlogdatapb.BinlogTransaction_Statement_BL_SET: - filtered = append(filtered, statement) + filtered = append(filtered, statement.Statement) case binlogdatapb.BinlogTransaction_Statement_BL_DDL: - log.Warningf("Not forwarding DDL: %s", statement.Sql) + log.Warningf("Not forwarding DDL: %s", statement.Statement.Sql) continue case binlogdatapb.BinlogTransaction_Statement_BL_INSERT, binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, binlogdatapb.BinlogTransaction_Statement_BL_DELETE: - keyspaceIDS, err := sqlannotation.ExtractKeyspaceIDS(string(statement.Sql)) + keyspaceIDS, err := sqlannotation.ExtractKeyspaceIDS(string(statement.Statement.Sql)) if err != nil { - if statement.Category == binlogdatapb.BinlogTransaction_Statement_BL_INSERT { + if statement.Statement.Category == binlogdatapb.BinlogTransaction_Statement_BL_INSERT { // TODO(erez): Stop filtered-replication here, and alert. logExtractKeySpaceIDError(err) continue } - // If no keyspace IDs are found, we replicate to all tarrgets. + // If no keyspace IDs are found, we replicate to all targets. // This is safe for UPDATE and DELETE because vttablet rewrites queries to // include the primary key and the query will only affect the shards that // have the rows. - filtered = append(filtered, statement) + filtered = append(filtered, statement.Statement) matched = true continue } @@ -55,37 +57,38 @@ func KeyRangeFilterFunc(keyrange *topodatapb.KeyRange, callback sendTransactionF // Skip keyspace ids that don't belong to the destination shard. continue } - filtered = append(filtered, statement) + filtered = append(filtered, statement.Statement) matched = true continue } - query, err := getValidRangeQuery(string(statement.Sql), keyspaceIDS, keyrange) + query, err := getValidRangeQuery(string(statement.Statement.Sql), keyspaceIDS, keyrange) if err != nil { - log.Errorf("Error parsing statement (%s). Got %v", string(statement.Sql), err) + log.Errorf("Error parsing statement (%s). Got %v", string(statement.Statement.Sql), err) continue } if query == "" { continue } splitStatement := &binlogdatapb.BinlogTransaction_Statement{ - Category: statement.Category, - Charset: statement.Charset, + Category: statement.Statement.Category, + Charset: statement.Statement.Charset, Sql: []byte(query), } filtered = append(filtered, splitStatement) matched = true case binlogdatapb.BinlogTransaction_Statement_BL_UNRECOGNIZED: updateStreamErrors.Add("KeyRangeStream", 1) - log.Errorf("Error parsing keyspace id: %s", statement.Sql) + log.Errorf("Error parsing keyspace id: %s", statement.Statement.Sql) continue } } + trans := &binlogdatapb.BinlogTransaction{ + EventToken: eventToken, + } if matched { - reply.Statements = filtered - } else { - reply.Statements = nil + trans.Statements = filtered } - return callback(reply) + return callback(trans) } } diff --git a/go/vt/binlog/keyrange_filter_test.go b/go/vt/binlog/keyrange_filter_test.go index 580c15892ab..c4c0550d0e3 100644 --- a/go/vt/binlog/keyrange_filter_test.go +++ b/go/vt/binlog/keyrange_filter_test.go @@ -19,41 +19,59 @@ var testKeyRange = &topodatapb.KeyRange{ } func TestKeyRangeFilterPass(t *testing.T) { - input := binlogdatapb.BinlogTransaction{ - Statements: []*binlogdatapb.BinlogTransaction_Statement{ - { + statements := []FullBinlogStatement{ + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("set1"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte("insert into tbl(col1, col2) values(1, a) /* vtgate:: keyspace_id:02 */"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte("insert into tbl(col1, col2, col3) values(1, 2, 3),(4, 5, 6) /* vtgate:: keyspace_id:01,02 *//*trailing_comments */"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte("insert into tbl(col1, col2, col3) values(1, 2, 3),(4, 5, 6) /* vtgate:: keyspace_id:01,20 *//*trailing_comments */"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte("insert into tbl(col1, col2, col3) values(1, 2, 3),(4, 5, 6) /* vtgate:: keyspace_id:10,20 *//*trailing_comments */"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, Sql: []byte("update tbl set col1=1"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_DELETE, Sql: []byte("delete from tbl where col1=1"), }, }, - EventToken: &querypb.EventToken{ - Position: "MariaDB/0-41983-1", - }, + } + eventToken := &querypb.EventToken{ + Position: "MariaDB/0-41983-1", } var got string f := KeyRangeFilterFunc(testKeyRange, func(reply *binlogdatapb.BinlogTransaction) error { got = bltToString(reply) return nil }) - f(&input) + f(eventToken, statements) want := `statement: <6, "set1"> statement: <7, "insert into tbl(col1, col2) values(1, a) /* vtgate:: keyspace_id:02 */"> statement: <7, "insert into tbl(col1, col2, col3) values (1, 2, 3), (4, 5, 6) /* vtgate:: keyspace_id:01,02 *//*trailing_comments */"> statement: <7, "insert into tbl(col1, col2, col3) values (1, 2, 3) /* vtgate:: keyspace_id:01,20 *//*trailing_comments */"> statement: <8, "update tbl set col1=1"> statement: <9, "delete from tbl where col1=1"> position: "MariaDB/0-41983-1" ` if want != got { t.Errorf("want\n%s, got\n%s", want, got) @@ -61,26 +79,29 @@ func TestKeyRangeFilterPass(t *testing.T) { } func TestKeyRangeFilterSkip(t *testing.T) { - input := binlogdatapb.BinlogTransaction{ - Statements: []*binlogdatapb.BinlogTransaction_Statement{ - { + statements := []FullBinlogStatement{ + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("set1"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte("dml1 /* vtgate:: keyspace_id:20 */"), }, }, - EventToken: &querypb.EventToken{ - Position: "MariaDB/0-41983-1", - }, + } + eventToken := &querypb.EventToken{ + Position: "MariaDB/0-41983-1", } var got string f := KeyRangeFilterFunc(testKeyRange, func(reply *binlogdatapb.BinlogTransaction) error { got = bltToString(reply) return nil }) - f(&input) + f(eventToken, statements) want := `position: "MariaDB/0-41983-1" ` if want != got { t.Errorf("want %s, got %s", want, got) @@ -88,26 +109,29 @@ func TestKeyRangeFilterSkip(t *testing.T) { } func TestKeyRangeFilterDDL(t *testing.T) { - input := binlogdatapb.BinlogTransaction{ - Statements: []*binlogdatapb.BinlogTransaction_Statement{ - { + statements := []FullBinlogStatement{ + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("set1"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_DDL, Sql: []byte("ddl"), }, }, - EventToken: &querypb.EventToken{ - Position: "MariaDB/0-41983-1", - }, + } + eventToken := &querypb.EventToken{ + Position: "MariaDB/0-41983-1", } var got string f := KeyRangeFilterFunc(testKeyRange, func(reply *binlogdatapb.BinlogTransaction) error { got = bltToString(reply) return nil }) - f(&input) + f(eventToken, statements) want := `position: "MariaDB/0-41983-1" ` if want != got { t.Errorf("want %s, got %s", want, got) @@ -115,32 +139,41 @@ func TestKeyRangeFilterDDL(t *testing.T) { } func TestKeyRangeFilterMalformed(t *testing.T) { - input := binlogdatapb.BinlogTransaction{ - Statements: []*binlogdatapb.BinlogTransaction_Statement{ - { + statements := []FullBinlogStatement{ + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("set1"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte("ddl"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte("dml1 /* vtgate:: keyspace_id:20*/"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte("dml1 /* vtgate:: keyspace_id:2 */"), // Odd-length hex string. }, }, - EventToken: &querypb.EventToken{ - Position: "MariaDB/0-41983-1", - }, + } + eventToken := &querypb.EventToken{ + Position: "MariaDB/0-41983-1", } var got string f := KeyRangeFilterFunc(testKeyRange, func(reply *binlogdatapb.BinlogTransaction) error { got = bltToString(reply) return nil }) - f(&input) + f(eventToken, statements) want := `position: "MariaDB/0-41983-1" ` if want != got { t.Errorf("want %s, got %s", want, got) diff --git a/go/vt/binlog/tables_filter.go b/go/vt/binlog/tables_filter.go index 8e03c004cd3..3017a67a3dd 100644 --- a/go/vt/binlog/tables_filter.go +++ b/go/vt/binlog/tables_filter.go @@ -10,6 +10,7 @@ import ( log "github.com/golang/glog" binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata" + querypb "github.com/youtube/vitess/go/vt/proto/query" ) const ( @@ -21,53 +22,61 @@ const ( // in the transaction match the specified tables. The resulting function can be // passed into the Streamer: bls.Stream(file, pos, sendTransaction) -> // bls.Stream(file, pos, TablesFilterFunc(sendTransaction)) -func TablesFilterFunc(tables []string, callback sendTransactionFunc) sendTransactionFunc { - return func(reply *binlogdatapb.BinlogTransaction) error { +func TablesFilterFunc(tables []string, callback func(*binlogdatapb.BinlogTransaction) error) sendTransactionFunc { + return func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { matched := false - filtered := make([]*binlogdatapb.BinlogTransaction_Statement, 0, len(reply.Statements)) - for _, statement := range reply.Statements { - switch statement.Category { + filtered := make([]*binlogdatapb.BinlogTransaction_Statement, 0, len(statements)) + for _, statement := range statements { + switch statement.Statement.Category { case binlogdatapb.BinlogTransaction_Statement_BL_SET: - filtered = append(filtered, statement) + filtered = append(filtered, statement.Statement) case binlogdatapb.BinlogTransaction_Statement_BL_DDL: - log.Warningf("Not forwarding DDL: %s", statement.Sql) + log.Warningf("Not forwarding DDL: %s", statement.Statement.Sql) continue case binlogdatapb.BinlogTransaction_Statement_BL_INSERT, binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, binlogdatapb.BinlogTransaction_Statement_BL_DELETE: - sql := string(statement.Sql) - tableIndex := strings.LastIndex(sql, streamComment) - if tableIndex == -1 { - updateStreamErrors.Add("TablesStream", 1) - log.Errorf("Error parsing table name: %s", sql) - continue - } - tableStart := tableIndex + len(streamComment) - tableEnd := strings.Index(sql[tableStart:], space) - if tableEnd == -1 { - updateStreamErrors.Add("TablesStream", 1) - log.Errorf("Error parsing table name: %s", sql) - continue + tableName := statement.Table + if tableName == "" { + // The statement doesn't + // contain the table name (SBR + // event), figure it out. + sql := string(statement.Statement.Sql) + tableIndex := strings.LastIndex(sql, streamComment) + if tableIndex == -1 { + updateStreamErrors.Add("TablesStream", 1) + log.Errorf("Error parsing table name: %s", sql) + continue + } + tableStart := tableIndex + len(streamComment) + tableEnd := strings.Index(sql[tableStart:], space) + if tableEnd == -1 { + updateStreamErrors.Add("TablesStream", 1) + log.Errorf("Error parsing table name: %s", sql) + continue + } + tableName = sql[tableStart : tableStart+tableEnd] } - tableName := sql[tableStart : tableStart+tableEnd] for _, t := range tables { if t == tableName { - filtered = append(filtered, statement) + filtered = append(filtered, statement.Statement) matched = true break } } case binlogdatapb.BinlogTransaction_Statement_BL_UNRECOGNIZED: updateStreamErrors.Add("TablesStream", 1) - log.Errorf("Error parsing table name: %s", string(statement.Sql)) + log.Errorf("Error parsing table name: %s", string(statement.Statement.Sql)) continue } } + + trans := &binlogdatapb.BinlogTransaction{ + EventToken: eventToken, + } if matched { - reply.Statements = filtered - } else { - reply.Statements = nil + trans.Statements = filtered } - return callback(reply) + return callback(trans) } } diff --git a/go/vt/binlog/tables_filter_test.go b/go/vt/binlog/tables_filter_test.go index 5f546336adc..ed1be7033e0 100644 --- a/go/vt/binlog/tables_filter_test.go +++ b/go/vt/binlog/tables_filter_test.go @@ -17,29 +17,35 @@ var testTables = []string{ } func TestTablesFilterPass(t *testing.T) { - input := binlogdatapb.BinlogTransaction{ - Statements: []*binlogdatapb.BinlogTransaction_Statement{ - { + statements := []FullBinlogStatement{ + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("set1"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte("dml1 /* _stream included1 (id ) (500 ); */"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte("dml2 /* _stream included2 (id ) (500 ); */"), }, }, - EventToken: &querypb.EventToken{ - Position: "MariaDB/0-41983-1", - }, + } + eventToken := &querypb.EventToken{ + Position: "MariaDB/0-41983-1", } var got string f := TablesFilterFunc(testTables, func(reply *binlogdatapb.BinlogTransaction) error { got = bltToString(reply) return nil }) - f(&input) + f(eventToken, statements) want := `statement: <6, "set1"> statement: <7, "dml1 /* _stream included1 (id ) (500 ); */"> statement: <7, "dml2 /* _stream included2 (id ) (500 ); */"> position: "MariaDB/0-41983-1" ` if want != got { t.Errorf("want\n%s, got\n%s", want, got) @@ -47,26 +53,29 @@ func TestTablesFilterPass(t *testing.T) { } func TestTablesFilterSkip(t *testing.T) { - input := binlogdatapb.BinlogTransaction{ - Statements: []*binlogdatapb.BinlogTransaction_Statement{ - { + statements := []FullBinlogStatement{ + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("set1"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte("dml1 /* _stream excluded1 (id ) (500 ); */"), }, }, - EventToken: &querypb.EventToken{ - Position: "MariaDB/0-41983-1", - }, + } + eventToken := &querypb.EventToken{ + Position: "MariaDB/0-41983-1", } var got string f := TablesFilterFunc(testTables, func(reply *binlogdatapb.BinlogTransaction) error { got = bltToString(reply) return nil }) - f(&input) + f(eventToken, statements) want := `position: "MariaDB/0-41983-1" ` if want != got { t.Errorf("want %s, got %s", want, got) @@ -74,26 +83,29 @@ func TestTablesFilterSkip(t *testing.T) { } func TestTablesFilterDDL(t *testing.T) { - input := binlogdatapb.BinlogTransaction{ - Statements: []*binlogdatapb.BinlogTransaction_Statement{ - { + statements := []FullBinlogStatement{ + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("set1"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_DDL, Sql: []byte("ddl"), }, }, - EventToken: &querypb.EventToken{ - Position: "MariaDB/0-41983-1", - }, + } + eventToken := &querypb.EventToken{ + Position: "MariaDB/0-41983-1", } var got string f := TablesFilterFunc(testTables, func(reply *binlogdatapb.BinlogTransaction) error { got = bltToString(reply) return nil }) - f(&input) + f(eventToken, statements) want := `position: "MariaDB/0-41983-1" ` if want != got { t.Errorf("want %s, got %s", want, got) @@ -101,29 +113,35 @@ func TestTablesFilterDDL(t *testing.T) { } func TestTablesFilterMalformed(t *testing.T) { - input := binlogdatapb.BinlogTransaction{ - Statements: []*binlogdatapb.BinlogTransaction_Statement{ - { + statements := []FullBinlogStatement{ + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("set1"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte("ddl"), - }, { + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, Sql: []byte("dml1 /* _stream excluded1*/"), }, }, - EventToken: &querypb.EventToken{ - Position: "MariaDB/0-41983-1", - }, + } + eventToken := &querypb.EventToken{ + Position: "MariaDB/0-41983-1", } var got string f := TablesFilterFunc(testTables, func(reply *binlogdatapb.BinlogTransaction) error { got = bltToString(reply) return nil }) - f(&input) + f(eventToken, statements) want := `position: "MariaDB/0-41983-1" ` if want != got { t.Errorf("want %s, got %s", want, got) diff --git a/go/vt/binlog/updatestream.go b/go/vt/binlog/updatestream.go index 9f881630b9f..8a46abfa497 100644 --- a/go/vt/binlog/updatestream.go +++ b/go/vt/binlog/updatestream.go @@ -14,10 +14,10 @@ import ( // UpdateStream is the interface for the binlog server type UpdateStream interface { // StreamKeyRange streams events related to a KeyRange only - StreamKeyRange(ctx context.Context, position string, keyRange *topodatapb.KeyRange, charset *binlogdatapb.Charset, callback func(*binlogdatapb.BinlogTransaction) error) error + StreamKeyRange(ctx context.Context, position string, keyRange *topodatapb.KeyRange, charset *binlogdatapb.Charset, callback func(trans *binlogdatapb.BinlogTransaction) error) error // StreamTables streams events related to a set of Tables only - StreamTables(ctx context.Context, position string, tables []string, charset *binlogdatapb.Charset, callback func(*binlogdatapb.BinlogTransaction) error) error + StreamTables(ctx context.Context, position string, tables []string, charset *binlogdatapb.Charset, callback func(trans *binlogdatapb.BinlogTransaction) error) error // HandlePanic should be called in a defer, // first thing in the RPC implementation. diff --git a/go/vt/binlog/updatestreamctl.go b/go/vt/binlog/updatestreamctl.go index 492bcd55d20..ae19be36c36 100644 --- a/go/vt/binlog/updatestreamctl.go +++ b/go/vt/binlog/updatestreamctl.go @@ -218,7 +218,7 @@ func (updateStream *UpdateStreamImpl) IsEnabled() bool { } // StreamKeyRange is part of the UpdateStream interface -func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, position string, keyRange *topodatapb.KeyRange, charset *binlogdatapb.Charset, callback func(reply *binlogdatapb.BinlogTransaction) error) (err error) { +func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, position string, keyRange *topodatapb.KeyRange, charset *binlogdatapb.Charset, callback func(trans *binlogdatapb.BinlogTransaction) error) (err error) { pos, err := replication.DecodePosition(position) if err != nil { return err @@ -239,10 +239,10 @@ func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, positi log.Infof("ServeUpdateStream starting @ %#v", pos) // Calls cascade like this: binlog.Streamer->KeyRangeFilterFunc->func(*binlogdatapb.BinlogTransaction)->callback - f := KeyRangeFilterFunc(keyRange, func(reply *binlogdatapb.BinlogTransaction) error { - keyrangeStatements.Add(int64(len(reply.Statements))) + f := KeyRangeFilterFunc(keyRange, func(trans *binlogdatapb.BinlogTransaction) error { + keyrangeStatements.Add(int64(len(trans.Statements))) keyrangeTransactions.Add(1) - return callback(reply) + return callback(trans) }) bls := NewStreamer(updateStream.dbname, updateStream.mysqld, updateStream.se, charset, pos, 0, f) @@ -254,7 +254,7 @@ func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, positi } // StreamTables is part of the UpdateStream interface -func (updateStream *UpdateStreamImpl) StreamTables(ctx context.Context, position string, tables []string, charset *binlogdatapb.Charset, callback func(reply *binlogdatapb.BinlogTransaction) error) (err error) { +func (updateStream *UpdateStreamImpl) StreamTables(ctx context.Context, position string, tables []string, charset *binlogdatapb.Charset, callback func(trans *binlogdatapb.BinlogTransaction) error) (err error) { pos, err := replication.DecodePosition(position) if err != nil { return err @@ -275,10 +275,10 @@ func (updateStream *UpdateStreamImpl) StreamTables(ctx context.Context, position log.Infof("ServeUpdateStream starting @ %#v", pos) // Calls cascade like this: binlog.Streamer->TablesFilterFunc->func(*binlogdatapb.BinlogTransaction)->callback - f := TablesFilterFunc(tables, func(reply *binlogdatapb.BinlogTransaction) error { - tablesStatements.Add(int64(len(reply.Statements))) + f := TablesFilterFunc(tables, func(trans *binlogdatapb.BinlogTransaction) error { + tablesStatements.Add(int64(len(trans.Statements))) tablesTransactions.Add(1) - return callback(reply) + return callback(trans) }) bls := NewStreamer(updateStream.dbname, updateStream.mysqld, updateStream.se, charset, pos, 0, f) diff --git a/go/vt/vttablet/tabletserver/replication_watcher.go b/go/vt/vttablet/tabletserver/replication_watcher.go index e886c16e64f..9650aa7019c 100644 --- a/go/vt/vttablet/tabletserver/replication_watcher.go +++ b/go/vt/vttablet/tabletserver/replication_watcher.go @@ -95,15 +95,15 @@ func (rpw *ReplicationWatcher) Process(ctx context.Context, dbconfigs dbconfigs. }() for { log.Infof("Starting a binlog Streamer from current replication position to monitor binlogs") - streamer := binlog.NewStreamer(dbconfigs.App.DbName, mysqld, rpw.se, nil /*clientCharset*/, replication.Position{}, 0 /*timestamp*/, func(trans *binlogdatapb.BinlogTransaction) error { + streamer := binlog.NewStreamer(dbconfigs.App.DbName, mysqld, rpw.se, nil /*clientCharset*/, replication.Position{}, 0 /*timestamp*/, func(eventToken *querypb.EventToken, statements []binlog.FullBinlogStatement) error { // Save the event token. rpw.mu.Lock() - rpw.eventToken = trans.EventToken + rpw.eventToken = eventToken rpw.mu.Unlock() // If it's a DDL, trigger a schema reload. - for _, statement := range trans.Statements { - if statement.Category != binlogdatapb.BinlogTransaction_Statement_BL_DDL { + for _, statement := range statements { + if statement.Statement.Category != binlogdatapb.BinlogTransaction_Statement_BL_DDL { continue } err := rpw.se.Reload(ctx) From d88e2e5d7a9a7b0c893527518b7e29bdc675a883 Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Mon, 13 Mar 2017 13:09:56 -0700 Subject: [PATCH 104/108] Adding support for inserts / deletes. --- go/mysqlconn/replication/binlog_event_make.go | 6 +- go/vt/binlog/binlog_streamer.go | 122 ++++++++++++++++++ go/vt/binlog/binlog_streamer_rbr_test.go | 84 +++++++++++- 3 files changed, 204 insertions(+), 8 deletions(-) diff --git a/go/mysqlconn/replication/binlog_event_make.go b/go/mysqlconn/replication/binlog_event_make.go index adc8c2cf23b..bc254441047 100644 --- a/go/mysqlconn/replication/binlog_event_make.go +++ b/go/mysqlconn/replication/binlog_event_make.go @@ -378,7 +378,11 @@ func newRowsEvent(f BinlogFormat, s *FakeBinlogStream, typ byte, tableID uint64, data[8] = 0x02 data[9] = 0x00 - data[10] = byte(rows.IdentifyColumns.Count()) // FIXME(alainjobart) len + if hasIdentify { + data[10] = byte(rows.IdentifyColumns.Count()) // FIXME(alainjobart) len + } else { + data[10] = byte(rows.DataColumns.Count()) // FIXME(alainjobart) len + } pos := 11 if hasIdentify { diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index 56a2df29599..8bc74014553 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -389,6 +389,40 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan replication. return pos, err } tableMaps[tableID] = tm + case ev.IsWriteRows(): + tableID := ev.TableID(format) + tm, ok := tableMaps[tableID] + if !ok { + return pos, fmt.Errorf("unknown tableID %v in WriteRows event", tableID) + } + if tm.Database != "" && tm.Database != bls.dbname { + // Skip cross-db statements. + continue + } + ti := bls.se.GetTable(sqlparser.NewTableIdent(tm.Name)) + if ti == nil { + return pos, fmt.Errorf("unknown table %v in schema", tm.Name) + } + setTimestamp := &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, + Sql: []byte(fmt.Sprintf("SET TIMESTAMP=%d", ev.Timestamp())), + } + statements = append(statements, FullBinlogStatement{ + Statement: setTimestamp, + }) + + rows, err := ev.Rows(format, tm) + if err != nil { + return pos, err + } + + statements = appendInserts(statements, &rows, tm, ti) + + if autocommit { + if err = commit(ev.Timestamp()); err != nil { + return pos, err + } + } case ev.IsUpdateRows(): tableID := ev.TableID(format) tm, ok := tableMaps[tableID] @@ -418,6 +452,40 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan replication. statements = appendUpdates(statements, &rows, tm, ti) + if autocommit { + if err = commit(ev.Timestamp()); err != nil { + return pos, err + } + } + case ev.IsDeleteRows(): + tableID := ev.TableID(format) + tm, ok := tableMaps[tableID] + if !ok { + return pos, fmt.Errorf("unknown tableID %v in DeleteRows event", tableID) + } + if tm.Database != "" && tm.Database != bls.dbname { + // Skip cross-db statements. + continue + } + ti := bls.se.GetTable(sqlparser.NewTableIdent(tm.Name)) + if ti == nil { + return pos, fmt.Errorf("unknown table %v in schema", tm.Name) + } + setTimestamp := &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, + Sql: []byte(fmt.Sprintf("SET TIMESTAMP=%d", ev.Timestamp())), + } + statements = append(statements, FullBinlogStatement{ + Statement: setTimestamp, + }) + + rows, err := ev.Rows(format, tm) + if err != nil { + return pos, err + } + + statements = appendDeletes(statements, &rows, tm, ti) + if autocommit { if err = commit(ev.Timestamp()); err != nil { return pos, err @@ -427,6 +495,33 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan replication. } } +func appendInserts(statements []FullBinlogStatement, rows *replication.Rows, tm *replication.TableMap, ti *schema.Table) []FullBinlogStatement { + for i := range rows.Rows { + var sql bytes.Buffer + + sql.WriteString("INSERT INTO ") + sql.WriteString(tm.Name) + sql.WriteString(" SET ") + + if err := writeValuesAsSQL(&sql, rows, tm, ti, i); err != nil { + log.Warningf("writeValuesAsSQL(%v) failed: %v", i, err) + continue + } + + statement := &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, + Sql: sql.Bytes(), + } + statements = append(statements, FullBinlogStatement{ + Statement: statement, + Table: tm.Name, + }) + // TODO(alainjobart): fill in keyspaceID, pkNames, pkRows + // if necessary. + } + return statements +} + func appendUpdates(statements []FullBinlogStatement, rows *replication.Rows, tm *replication.TableMap, ti *schema.Table) []FullBinlogStatement { for i := range rows.Rows { var sql bytes.Buffer @@ -461,6 +556,33 @@ func appendUpdates(statements []FullBinlogStatement, rows *replication.Rows, tm return statements } +func appendDeletes(statements []FullBinlogStatement, rows *replication.Rows, tm *replication.TableMap, ti *schema.Table) []FullBinlogStatement { + for i := range rows.Rows { + var sql bytes.Buffer + + sql.WriteString("DELETE FROM ") + sql.WriteString(tm.Name) + sql.WriteString(" WHERE ") + + if err := writeIdentifiesAsSQL(&sql, rows, tm, ti, i); err != nil { + log.Warningf("writeIdentifiesAsSQL(%v) failed: %v", i, err) + continue + } + + statement := &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_DELETE, + Sql: sql.Bytes(), + } + statements = append(statements, FullBinlogStatement{ + Statement: statement, + Table: tm.Name, + }) + // TODO(alainjobart): fill in keyspaceID, pkNames, pkRows + // if necessary. + } + return statements +} + // writeValuesAsSQL is a helper method to print the values as SQL in the // provided bytes.Buffer. func writeValuesAsSQL(sql *bytes.Buffer, rs *replication.Rows, tm *replication.TableMap, ti *schema.Table, rowIndex int) error { diff --git a/go/vt/binlog/binlog_streamer_rbr_test.go b/go/vt/binlog/binlog_streamer_rbr_test.go index 7466e915785..5efc5391167 100644 --- a/go/vt/binlog/binlog_streamer_rbr_test.go +++ b/go/vt/binlog/binlog_streamer_rbr_test.go @@ -56,8 +56,26 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { } tm.CanBeNull.Set(1, true) + // Do an insert packet with all fields set. + insertRows := replication.Rows{ + Flags: 0x1234, + DataColumns: replication.NewServerBitmap(2), + Rows: []replication.Row{ + { + NullColumns: replication.NewServerBitmap(2), + Data: []byte{ + 0x10, 0x20, 0x30, 0x40, // long + 0x04, 0x00, // len('abcd') + 'a', 'b', 'c', 'd', // 'abcd' + }, + }, + }, + } + insertRows.DataColumns.Set(0, true) + insertRows.DataColumns.Set(1, true) + // Do an update packet with all fields set. - rows := replication.Rows{ + updateRows := replication.Rows{ Flags: 0x1234, IdentifyColumns: replication.NewServerBitmap(2), DataColumns: replication.NewServerBitmap(2), @@ -78,10 +96,28 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { }, }, } - rows.IdentifyColumns.Set(0, true) - rows.IdentifyColumns.Set(1, true) - rows.DataColumns.Set(0, true) - rows.DataColumns.Set(1, true) + updateRows.IdentifyColumns.Set(0, true) + updateRows.IdentifyColumns.Set(1, true) + updateRows.DataColumns.Set(0, true) + updateRows.DataColumns.Set(1, true) + + // Do a delete packet with all fields set. + deleteRows := replication.Rows{ + Flags: 0x1234, + IdentifyColumns: replication.NewServerBitmap(2), + Rows: []replication.Row{ + { + NullIdentifyColumns: replication.NewServerBitmap(2), + Identify: []byte{ + 0x10, 0x20, 0x30, 0x40, // long + 0x03, 0x00, // len('abc') + 'a', 'b', 'c', // 'abc' + }, + }, + }, + } + deleteRows.IdentifyColumns.Set(0, true) + deleteRows.IdentifyColumns.Set(1, true) input := []replication.BinlogEvent{ replication.NewRotateEvent(f, s, 0, ""), @@ -91,7 +127,9 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { replication.NewQueryEvent(f, s, replication.Query{ Database: "vt_test_keyspace", SQL: "BEGIN"}), - replication.NewUpdateRowsEvent(f, s, tableID, rows), + replication.NewWriteRowsEvent(f, s, tableID, insertRows), + replication.NewUpdateRowsEvent(f, s, tableID, updateRows), + replication.NewDeleteRowsEvent(f, s, tableID, deleteRows), replication.NewXIDEvent(f, s), } @@ -100,6 +138,19 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { want := []fullBinlogTransaction{ { statements: []FullBinlogStatement{ + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, + Sql: []byte("SET TIMESTAMP=1407805592"), + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, + Sql: []byte("INSERT INTO vt_a SET id=1076895760, message='abcd'"), + }, + Table: "vt_a", + }, { Statement: &binlogdatapb.BinlogTransaction_Statement{ Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, @@ -113,6 +164,19 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { }, Table: "vt_a", }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, + Sql: []byte("SET TIMESTAMP=1407805592"), + }, + }, + { + Statement: &binlogdatapb.BinlogTransaction_Statement{ + Category: binlogdatapb.BinlogTransaction_Statement_BL_DELETE, + Sql: []byte("DELETE FROM vt_a WHERE id=1076895760 AND message='abc'"), + }, + Table: "vt_a", + }, }, eventToken: &querypb.EventToken{ Timestamp: 1407805592, @@ -143,6 +207,12 @@ func TestStreamerParseRBRUpdateEvent(t *testing.T) { } if !reflect.DeepEqual(got, want) { - t.Errorf("binlogConnStreamer.parseEvents(): got:\n%v\nwant:\n%v", got, want) + t.Errorf("binlogConnStreamer.parseEvents(): got:\n%+v\nwant:\n%+v", got, want) + for i, fbt := range got { + t.Errorf("Got (%v)=%v", i, fbt.statements) + } + for i, fbt := range want { + t.Errorf("Want(%v)=%v", i, fbt.statements) + } } } From 925addf07e98afca251b6d2b0ba973a0f256c6ed Mon Sep 17 00:00:00 2001 From: wangyipei01 Date: Tue, 14 Mar 2017 20:48:55 -0700 Subject: [PATCH 105/108] workflow: Implement canary feature. (#2613) User can control through UI for running a canary task, then running the remaining tasks. Create unit tests and manually test the UI in e2e test environment. --- go/vt/workflow/manager.go | 11 + .../horizontal_resharding_workflow.go | 38 +- .../horizontal_resharding_workflow_test.go | 2 +- go/vt/workflow/resharding/parallel_runner.go | 330 ++++++-- .../resharding/parallel_runner_test.go | 745 +++++++++++++++--- ...test_workflow.go => test_workflow_test.go} | 170 ++-- test/horizontal_resharding_workflow.py | 3 +- .../src/app/shared/dialog/dialog-content.ts | 2 +- web/vtctld2/src/app/shared/flags/flag.ts | 4 +- .../src/app/shared/flags/workflow.flags.ts | 11 + 10 files changed, 1042 insertions(+), 274 deletions(-) rename go/vt/workflow/resharding/{test_workflow.go => test_workflow_test.go} (72%) diff --git a/go/vt/workflow/manager.go b/go/vt/workflow/manager.go index eefe13a6be6..f3170678576 100644 --- a/go/vt/workflow/manager.go +++ b/go/vt/workflow/manager.go @@ -469,6 +469,17 @@ func (m *Manager) WorkflowForTesting(uuid string) (Workflow, error) { return rw.workflow, nil } +// WorkflowInfoForTesting returns the WorkflowInfo object of the running +// workflow identified by uuid. The method is used in unit tests to manipulate +// checkpoint. +func (m *Manager) WorkflowInfoForTesting(uuid string) (*topo.WorkflowInfo, error) { + rw, err := m.runningWorkflow(uuid) + if err != nil { + return nil, err + } + return rw.wi, nil +} + // runningWorkflow returns a runningWorkflow by uuid. func (m *Manager) runningWorkflow(uuid string) (*runningWorkflow, error) { m.mu.Lock() diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow.go b/go/vt/workflow/resharding/horizontal_resharding_workflow.go index acbb51aa4d0..5975f2fd4a7 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow.go @@ -7,6 +7,7 @@ package resharding import ( "flag" "fmt" + "strconv" "strings" log "github.com/golang/glog" @@ -59,6 +60,7 @@ func (*HorizontalReshardingWorkflowFactory) Init(m *workflow.Manager, w *workflo subFlags := flag.NewFlagSet(horizontalReshardingFactoryName, flag.ContinueOnError) keyspace := subFlags.String("keyspace", "", "Name of keyspace to perform horizontal resharding") vtworkersStr := subFlags.String("vtworkers", "", "A comma-separated list of vtworker addresses") + enableApprovals := subFlags.Bool("enable_approvals", true, "If true, executions of tasks require user's approvals on the UI.") if err := subFlags.Parse(args); err != nil { return err @@ -75,6 +77,8 @@ func (*HorizontalReshardingWorkflowFactory) Init(m *workflow.Manager, w *workflo return err } + checkpoint.Settings["enable_approvals"] = fmt.Sprintf("%v", *enableApprovals) + w.Data, err = proto.Marshal(checkpoint) if err != nil { return err @@ -91,13 +95,19 @@ func (*HorizontalReshardingWorkflowFactory) Instantiate(m *workflow.Manager, w * return nil, err } + enableApprovals, err := strconv.ParseBool(checkpoint.Settings["enable_approvals"]) + if err != nil { + return nil, err + } + hw := &HorizontalReshardingWorkflow{ - checkpoint: checkpoint, - rootUINode: rootNode, - logger: logutil.NewMemoryLogger(), - wr: wrangler.New(logutil.NewConsoleLogger(), m.TopoServer(), tmclient.NewTabletManagerClient()), - topoServer: m.TopoServer(), - manager: m, + checkpoint: checkpoint, + rootUINode: rootNode, + logger: logutil.NewMemoryLogger(), + wr: wrangler.New(logutil.NewConsoleLogger(), m.TopoServer(), tmclient.NewTabletManagerClient()), + topoServer: m.TopoServer(), + manager: m, + enableApprovals: enableApprovals, } copySchemaUINode := &workflow.Node{ Name: "CopySchemaShard", @@ -310,6 +320,8 @@ type HorizontalReshardingWorkflow struct { checkpoint *workflowpb.WorkflowCheckpoint checkpointWriter *CheckpointWriter + + enableApprovals bool } // Run executes the horizontal resharding process. @@ -330,43 +342,43 @@ func (hw *HorizontalReshardingWorkflow) Run(ctx context.Context, manager *workfl func (hw *HorizontalReshardingWorkflow) runWorkflow() error { copySchemaTasks := hw.GetTasks(phaseCopySchema) - copySchemaRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, copySchemaTasks, hw.runCopySchema, Parallel) + copySchemaRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, copySchemaTasks, hw.runCopySchema, Parallel, hw.enableApprovals) if err := copySchemaRunner.Run(); err != nil { return err } cloneTasks := hw.GetTasks(phaseClone) - cloneRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, cloneTasks, hw.runSplitClone, Parallel) + cloneRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, cloneTasks, hw.runSplitClone, Parallel, hw.enableApprovals) if err := cloneRunner.Run(); err != nil { return err } waitForFilteredReplicationTasks := hw.GetTasks(phaseWaitForFilteredReplication) - waitForFilteredReplicationRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, waitForFilteredReplicationTasks, hw.runWaitForFilteredReplication, Parallel) + waitForFilteredReplicationRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, waitForFilteredReplicationTasks, hw.runWaitForFilteredReplication, Parallel, hw.enableApprovals) if err := waitForFilteredReplicationRunner.Run(); err != nil { return err } diffTasks := hw.GetTasks(phaseDiff) - diffRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, diffTasks, hw.runSplitDiff, Sequential) + diffRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, diffTasks, hw.runSplitDiff, Sequential, hw.enableApprovals) if err := diffRunner.Run(); err != nil { return err } migrateRdonlyTasks := hw.GetTasks(phaseMigrateRdonly) - migrateRdonlyRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, migrateRdonlyTasks, hw.runMigrate, Sequential) + migrateRdonlyRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, migrateRdonlyTasks, hw.runMigrate, Sequential, hw.enableApprovals) if err := migrateRdonlyRunner.Run(); err != nil { return err } migrateReplicaTasks := hw.GetTasks(phaseMigrateReplica) - migrateReplicaRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, migrateReplicaTasks, hw.runMigrate, Sequential) + migrateReplicaRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, migrateReplicaTasks, hw.runMigrate, Sequential, hw.enableApprovals) if err := migrateReplicaRunner.Run(); err != nil { return err } migrateMasterTasks := hw.GetTasks(phaseMigrateMaster) - migrateMasterRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, migrateMasterTasks, hw.runMigrate, Sequential) + migrateMasterRunner := NewParallelRunner(hw.ctx, hw.rootUINode, hw.checkpointWriter, migrateMasterTasks, hw.runMigrate, Sequential, hw.enableApprovals) if err := migrateMasterRunner.Run(); err != nil { return err } diff --git a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go index 848e8f83a25..1c29c9a52df 100644 --- a/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go +++ b/go/vt/workflow/resharding/horizontal_resharding_workflow_test.go @@ -49,7 +49,7 @@ func TestHorizontalResharding(t *testing.T) { // Run the manager in the background. wg, _, cancel := startManager(m) // Create the workflow. - uuid, err := m.Create(ctx, horizontalReshardingFactoryName, []string{"-keyspace=" + testKeyspace, "-vtworkers=" + testVtworkers}) + uuid, err := m.Create(ctx, horizontalReshardingFactoryName, []string{"-keyspace=" + testKeyspace, "-vtworkers=" + testVtworkers, "-enable_approvals=false"}) if err != nil { t.Fatalf("cannot create resharding workflow: %v", err) } diff --git a/go/vt/workflow/resharding/parallel_runner.go b/go/vt/workflow/resharding/parallel_runner.go index 0e76e32c38c..2adc0786355 100644 --- a/go/vt/workflow/resharding/parallel_runner.go +++ b/go/vt/workflow/resharding/parallel_runner.go @@ -1,13 +1,16 @@ package resharding import ( + "errors" "fmt" + "path" "strings" "sync" log "github.com/golang/glog" "golang.org/x/net/context" + "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/workflow" workflowpb "github.com/youtube/vitess/go/vt/proto/workflow" @@ -22,50 +25,77 @@ const ( Parallel ) +const ( + actionNameRetry = "Retry" + actionNameApproveFirstTask = "Approve first shard" + actionNameApproveFirstTaskDone = "First shard approved" + actionNameApproveRemainingTasks = "Approve remaining shards" + actionNameApproveRemainingTasksDone = "Remaining shards approved" +) + +const taskFinishedMessage = "task finished" + // ParallelRunner is used to control executing tasks concurrently. // Each phase has its own ParallelRunner object. type ParallelRunner struct { ctx context.Context + uiLogger *logutil.MemoryLogger rootUINode *workflow.Node + phaseUINode *workflow.Node checkpointWriter *CheckpointWriter // tasks stores selected tasks for the phase with expected execution order. tasks []*workflowpb.Task concurrencyLevel level executeFunc func(context.Context, *workflowpb.Task) error + enableApprovals bool - // mu is used to protect the access to retryActionRegistry and - // serialize UI node changes. + // mu is used to protect the access to retryActionRegistry, channels for task + // approvals and serialize UI node changes. mu sync.Mutex // retryActionRegistry stores the data for retry actions. // Each task can retrieve the channel for synchronizing retrying // through its UI node path. - retryActionRegistry map[string]chan struct{} - - // reportTaskStatus gives the worklflow debug option to output the task - // status through UI. - // TODO(yipeiw): We will remove this option and make it always report task - // status, once we can unit test resharding workflow through manager - // (we ignore creating UI nodes when manually creating the workflow now). - reportTaskStatus bool + retryActionRegistry map[string]chan struct{} + firstTaskApproved chan struct{} + remainingTasksApproved chan struct{} } // NewParallelRunner returns a new ParallelRunner. -func NewParallelRunner(ctx context.Context, rootUINode *workflow.Node, cp *CheckpointWriter, tasks []*workflowpb.Task, executeFunc func(context.Context, *workflowpb.Task) error, concurrencyLevel level) *ParallelRunner { - return &ParallelRunner{ +func NewParallelRunner(ctx context.Context, rootUINode *workflow.Node, cp *CheckpointWriter, tasks []*workflowpb.Task, executeFunc func(context.Context, *workflowpb.Task) error, concurrencyLevel level, enableApprovals bool) *ParallelRunner { + if len(tasks) < 1 { + log.Fatal(errors.New("BUG: No tasks passed into ParallelRunner")) + } + + phaseID := path.Dir(tasks[0].Id) + phaseUINode, err := rootUINode.GetChildByPath(phaseID) + if err != nil { + panic(fmt.Errorf("BUG: nodepath %v not found", phaseID)) + } + + p := &ParallelRunner{ ctx: ctx, + uiLogger: logutil.NewMemoryLogger(), rootUINode: rootUINode, + phaseUINode: phaseUINode, checkpointWriter: cp, tasks: tasks, executeFunc: executeFunc, concurrencyLevel: concurrencyLevel, retryActionRegistry: make(map[string]chan struct{}), - reportTaskStatus: false, + enableApprovals: enableApprovals, + } + + if p.enableApprovals { + p.initApprovalActions() } + + return p } // Run is the entry point for controling task executions. func (p *ParallelRunner) Run() error { - var parallelNum int // default value is 0. The task will not run in this case. + // default value is 0. The task will not run in this case. + var parallelNum int switch p.concurrencyLevel { case Sequential: parallelNum = 1 @@ -74,76 +104,108 @@ func (p *ParallelRunner) Run() error { default: panic(fmt.Sprintf("BUG: Invalid concurrency level: %v", p.concurrencyLevel)) } - // sem is a channel used to control the level of concurrency. sem := make(chan bool, parallelNum) - for _, task := range p.tasks { - if task.State == workflowpb.TaskState_TaskDone && task.Error == "" { + wg := sync.WaitGroup{} + for i, task := range p.tasks { + if isTaskSucceeded(task) { continue } sem <- true + wg.Add(1) + if p.enableApprovals && !isTaskRunning(task) { + p.waitForApproval(i) + } + go func(t *workflowpb.Task) { + p.setUIMessage(fmt.Sprintf("Launch task: %v.", t.Id)) defer func() { <-sem }() - defer p.setFinishUIMessage(t.Id) - - taskID := t.Id - for { - // Update the task status to running in the checkpoint. - if updateErr := p.checkpointWriter.UpdateTask(taskID, workflowpb.TaskState_TaskRunning, nil); updateErr != nil { - // Only logging the error rather then passing it to ErrorRecorder. - // Errors in ErrorRecorder will lead to the stop of a workflow. We - // don't want to stop the workflow if only checkpointing fails. - log.Errorf("%v", updateErr) - } - err := p.executeFunc(p.ctx, t) - // Update the task status to done in the checkpoint. - if updateErr := p.checkpointWriter.UpdateTask(taskID, workflowpb.TaskState_TaskDone, err); updateErr != nil { - log.Errorf("%v", updateErr) - } - - // The function returns if the task is executed successfully. - if err == nil { - log.Infof("task %v has finished.", taskID) - return - } - // When task fails, first check whether the context is canceled. - // If so, return right away. If not, enable the retry action. - select { - case <-p.ctx.Done(): - return - default: - } - retryChannel := p.addRetryAction(taskID) - - // Block the task execution until the retry action is triggered - // or the context is canceled. - select { - case <-retryChannel: - continue - case <-p.ctx.Done(): - return - } - } + defer wg.Done() + p.executeTask(t) }(task) } + wg.Wait() - // Wait until all running jobs are done. - for i := 0; i < parallelNum; i++ { - sem <- true + if p.enableApprovals { + p.clearPhaseActions() } - // TODO: collect error message from tasks.Error instead, s.t. if the task is retried, we can update the error + // TODO(yipeiw): collect error message from tasks.Error instead, + // s.t. if the task is retried, we can update the error return nil } -// Action handles the retry action. It implements the interface ActionListener. +func (p *ParallelRunner) executeTask(t *workflowpb.Task) { + taskID := t.Id + for { + // Update the task status to running in the checkpoint. + if updateErr := p.checkpointWriter.UpdateTask(taskID, workflowpb.TaskState_TaskRunning, nil); updateErr != nil { + // Only logging the error rather then passing it to ErrorRecorder. + // Errors in ErrorRecorder will lead to the stop of a workflow. We + // don't want to stop the workflow if only checkpointing fails. + log.Errorf("%v", updateErr) + } + err := p.executeFunc(p.ctx, t) + // Update the task status to done in the checkpoint. + if updateErr := p.checkpointWriter.UpdateTask(taskID, workflowpb.TaskState_TaskDone, err); updateErr != nil { + log.Errorf("%v", updateErr) + } + + // The function returns if the task is executed successfully. + if err == nil { + p.setFinishUIMessage(t.Id) + p.setUIMessage(fmt.Sprintf("Task %v has finished.", t.Id)) + return + } + // When task fails, first check whether the context is canceled. + // If so, return right away. If not, enable the retry action. + select { + case <-p.ctx.Done(): + return + default: + } + retryChannel := p.addRetryAction(taskID) + + // Block the task execution until the retry action is triggered + // or the context is canceled. + select { + case <-retryChannel: + continue + case <-p.ctx.Done(): + return + } + } +} + +// Action handles retrying, approval of the first task and approval of the +// remaining tasks actions. It implements the interface ActionListener. func (p *ParallelRunner) Action(ctx context.Context, path, name string) error { switch name { - case "Retry": + case actionNameRetry: // Extract the path relative to the root node. parts := strings.Split(path, "/") taskID := strings.Join(parts[2:], "/") return p.triggerRetry(taskID) + case actionNameApproveFirstTask: + p.mu.Lock() + defer p.mu.Unlock() + + if p.firstTaskApproved != nil { + close(p.firstTaskApproved) + p.firstTaskApproved = nil + return nil + } + return fmt.Errorf("ignored the approval action %v because no pending approval found: it might be already approved before", actionNameApproveFirstTask) + case actionNameApproveRemainingTasks: + p.mu.Lock() + defer p.mu.Unlock() + + if p.remainingTasksApproved != nil { + close(p.remainingTasksApproved) + p.remainingTasksApproved = nil + return nil + } + return fmt.Errorf("ignored the approval action %v because no pending approval found: it might be already approved before", actionNameApproveRemainingTasks) default: return fmt.Errorf("Unknown action: %v", name) } @@ -192,7 +254,7 @@ func (p *ParallelRunner) addRetryAction(taskID string) chan struct{} { // Enable retry action on the node. retryAction := &workflow.Action{ - Name: "Retry", + Name: actionNameRetry, State: workflow.ActionStateEnabled, Style: workflow.ActionStyleWaiting, } @@ -202,16 +264,140 @@ func (p *ParallelRunner) addRetryAction(taskID string) chan struct{} { return retryChannel } -func (p *ParallelRunner) setFinishUIMessage(taskID string) { - if p.reportTaskStatus { - taskNode, err := p.rootUINode.GetChildByPath(taskID) - if err != nil { - panic(fmt.Errorf("nodepath %v not found", taskID)) +func (p *ParallelRunner) initApprovalActions() { + // If all tasks have succeeded, no action is added. + allDone := true + for _, task := range p.tasks { + if !isTaskSucceeded(task) { + allDone = false + break } + } + if allDone { + return + } + actionFirstApproval := &workflow.Action{ + Name: actionNameApproveFirstTask, + State: workflow.ActionStateDisabled, + Style: workflow.ActionStyleTriggered, + } + if isTaskSucceeded(p.tasks[0]) || isTaskRunning(p.tasks[0]) { + // Reset the action name if the first task is running or has succeeded. + actionFirstApproval.Name = actionNameApproveFirstTaskDone + } + + p.mu.Lock() + defer p.mu.Unlock() + + p.phaseUINode.Actions = []*workflow.Action{actionFirstApproval} + // Add the approval action for the remaining tasks, + // if there are more than one tasks. + if len(p.tasks) > 1 { + actionRemainingTasksApproval := &workflow.Action{ + Name: actionNameApproveRemainingTasks, + State: workflow.ActionStateDisabled, + Style: workflow.ActionStyleTriggered, + } + if isTaskSucceeded(p.tasks[1]) || isTaskRunning(p.tasks[1]) { + // Reset the action name if the second task is running or has succeeded. + actionRemainingTasksApproval.Name = actionNameApproveRemainingTasksDone + } + p.phaseUINode.Actions = append(p.phaseUINode.Actions, actionRemainingTasksApproval) + } + p.phaseUINode.Listener = p + p.phaseUINode.BroadcastChanges(false /* updateChildren */) +} + +func isTaskSucceeded(task *workflowpb.Task) bool { + if task.State == workflowpb.TaskState_TaskDone && task.Error == "" { + return true + } + return false +} + +func isTaskRunning(task *workflowpb.Task) bool { + if task.State == workflowpb.TaskState_TaskRunning { + return true + } + return false +} + +func (p *ParallelRunner) waitForApproval(taskIndex int) { + if taskIndex == 0 { p.mu.Lock() - defer p.mu.Unlock() - taskNode.Message = fmt.Sprintf("task %v finished", taskID) - taskNode.BroadcastChanges(false /* updateChildren */) + p.firstTaskApproved = make(chan struct{}) + firstTaskApproved := p.firstTaskApproved + p.updateApprovalActionLocked(0, actionNameApproveFirstTask, workflow.ActionStateEnabled, workflow.ActionStyleWaiting) + p.mu.Unlock() + + p.setUIMessage(fmt.Sprintf("approve first task enabled: %v", taskIndex)) + + select { + case <-firstTaskApproved: + p.mu.Lock() + defer p.mu.Unlock() + p.updateApprovalActionLocked(0, actionNameApproveFirstTaskDone, workflow.ActionStateDisabled, workflow.ActionStyleTriggered) + case <-p.ctx.Done(): + return + } + } else if taskIndex == 1 { + p.mu.Lock() + p.remainingTasksApproved = make(chan struct{}) + + remainingTasksApproved := p.remainingTasksApproved + p.updateApprovalActionLocked(1, actionNameApproveRemainingTasks, workflow.ActionStateEnabled, workflow.ActionStyleWaiting) + p.mu.Unlock() + + p.setUIMessage(fmt.Sprintf("approve remaining task enabled: %v", taskIndex)) + + select { + case <-remainingTasksApproved: + p.mu.Lock() + defer p.mu.Unlock() + p.updateApprovalActionLocked(1, actionNameApproveRemainingTasksDone, workflow.ActionStateDisabled, workflow.ActionStyleTriggered) + case <-p.ctx.Done(): + return + } } } + +func (p *ParallelRunner) updateApprovalActionLocked(index int, name string, state workflow.ActionState, style workflow.ActionStyle) { + action := p.phaseUINode.Actions[index] + action.Name = name + action.State = state + action.Style = style + p.phaseUINode.BroadcastChanges(false /* updateChildren */) +} + +func (p *ParallelRunner) clearPhaseActions() { + p.mu.Lock() + defer p.mu.Unlock() + + if len(p.phaseUINode.Actions) != 0 { + p.phaseUINode.Actions = []*workflow.Action{} + p.phaseUINode.BroadcastChanges(false /* updateChildren */) + } +} + +func (p *ParallelRunner) setFinishUIMessage(taskID string) { + taskNode, err := p.rootUINode.GetChildByPath(taskID) + if err != nil { + panic(fmt.Errorf("BUG: nodepath %v not found", taskID)) + } + + p.mu.Lock() + defer p.mu.Unlock() + taskNode.Message = taskFinishedMessage + taskNode.BroadcastChanges(false /* updateChildren */) +} + +func (p *ParallelRunner) setUIMessage(message string) { + p.uiLogger.Infof(message) + + p.mu.Lock() + defer p.mu.Unlock() + p.phaseUINode.Log = p.uiLogger.String() + p.phaseUINode.Message = message + p.phaseUINode.BroadcastChanges(false /* updateChildren */) +} diff --git a/go/vt/workflow/resharding/parallel_runner_test.go b/go/vt/workflow/resharding/parallel_runner_test.go index 3885f449efc..88b506b7e1d 100644 --- a/go/vt/workflow/resharding/parallel_runner_test.go +++ b/go/vt/workflow/resharding/parallel_runner_test.go @@ -2,9 +2,10 @@ package resharding import ( "context" + "encoding/json" "fmt" "path" - "strings" + "reflect" "sync" "testing" @@ -17,24 +18,108 @@ import ( ) func TestParallelRunner(t *testing.T) { + ctx := context.Background() ts := memorytopo.NewServer("cell") - m := workflow.NewManager(ts) + + m, uuid, wg, cancel, err := setupTestWorkflow(ctx, ts, false /* enableApprovals*/, false /* retry */) + if err != nil { + t.Fatal(err) + } + + // Start the job + if err := m.Start(ctx, uuid); err != nil { + t.Fatalf("cannot start testworkflow: %v", err) + } + + // Wait for the workflow to end. + m.Wait(ctx, uuid) + + if err := verifyAllTasksDone(ctx, ts, uuid); err != nil { + t.Fatal(err) + } + // Stop the manager. + if err := m.Stop(ctx, uuid); err != nil { + t.Fatalf("cannot stop testworkflow: %v", err) + } + cancel() + wg.Wait() +} + +func TestParallelRunnerApproval(t *testing.T) { ctx := context.Background() + ts := memorytopo.NewServer("cell") - // Run the manager in the background. - wg, _, cancel := startManager(m) + m, uuid, wg, cancel, err := setupTestWorkflow(ctx, ts, true /* enableApprovals*/, false /* retry */) + if err != nil { + t.Fatal(err) + } - // Create a testworkflow. - uuid, err := m.Create(ctx, testWorkflowFactoryName, []string{"-retry=false", "-count=2"}) + notifications, index, err := setupNotifications(m) if err != nil { - t.Fatalf("cannot create testworkflow: %v", err) + t.Fatal(err) } + defer m.NodeManager().CloseWatcher(index) + defer close(notifications) // Start the job if err := m.Start(ctx, uuid); err != nil { t.Fatalf("cannot start testworkflow: %v", err) } + if err := checkUIChangeFromNoneStarted(m, uuid, notifications); err != nil { + t.Fatal(err) + } + + // Wait for the workflow to end. + m.Wait(context.Background(), uuid) + + if err := verifyAllTasksDone(ctx, ts, uuid); err != nil { + t.Fatal(err) + } + // Stop the manager. + if err := m.Stop(ctx, uuid); err != nil { + t.Fatalf("cannot stop testworkflow: %v", err) + } + cancel() + wg.Wait() +} + +func TestParallelRunnerApprovalFromFirstDone(t *testing.T) { + ctx := context.Background() + ts := memorytopo.NewServer("cell") + m, uuid, wg, cancel, err := setupTestWorkflow(ctx, ts, true /* enableApprovals*/, false /* retry */) + if err != nil { + t.Fatal(err) + } + tw, err := testworkflow(m, uuid) + if err != nil { + t.Fatal(err) + } + + // Change the checkpoint of the workflow to let task1 succeed + // before starting the workflow. + wi, err := m.WorkflowInfoForTesting(uuid) + if err != nil { + t.Fatalf("fail to get workflow info from manager: %v", err) + } + checkpointWriter := NewCheckpointWriter(ts, tw.checkpoint, wi) + task1ID := createTestTaskID(phaseSimple, 0) + checkpointWriter.UpdateTask(task1ID, workflowpb.TaskState_TaskDone, nil) + + notifications, index, err := setupNotifications(m) + if err != nil { + t.Fatal(err) + } + defer m.NodeManager().CloseWatcher(index) + defer close(notifications) + + // Start the job + if err := m.Start(ctx, uuid); err != nil { + t.Fatalf("cannot start testworkflow: %v", err) + } + if err := checkUIChangeFirstApproved(m, uuid, notifications); err != nil { + t.Fatal(err) + } // Wait for the workflow to end. m.Wait(ctx, uuid) @@ -49,79 +134,98 @@ func TestParallelRunner(t *testing.T) { wg.Wait() } -func TestParallelRunnerRetryAction(t *testing.T) { - // Tasks in the workflow are forced to fail at the first attempt. Then we - // retry task1, after it is finished successfully, we retry task2. +func TestParallelRunnerApprovalFromFirstRunning(t *testing.T) { + ctx := context.Background() ts := memorytopo.NewServer("cell") - m := workflow.NewManager(ts) + m, uuid, wg, cancel, err := setupTestWorkflow(ctx, ts, true /* enableApprovals*/, false /* retry */) + if err != nil { + t.Fatal(err) + } + tw, err := testworkflow(m, uuid) + if err != nil { + t.Fatal(err) + } + + // Change the checkpoint of the workflow to let task1 succeeded + // before starting the workflow. + wi, err := m.WorkflowInfoForTesting(uuid) + if err != nil { + t.Fatalf("fail to get workflow info from manager: %v", err) + } + checkpointWriter := NewCheckpointWriter(ts, tw.checkpoint, wi) + task1ID := createTestTaskID(phaseSimple, 0) + checkpointWriter.UpdateTask(task1ID, workflowpb.TaskState_TaskRunning, nil) + + notifications, index, err := setupNotifications(m) + if err != nil { + t.Fatal(err) + } + defer m.NodeManager().CloseWatcher(index) + defer close(notifications) + + // Start the job + if err := m.Start(ctx, uuid); err != nil { + t.Fatalf("cannot start testworkflow: %v", err) + } + + if err := checkUIChangeFirstApproved(m, uuid, notifications); err != nil { + t.Fatal(err) + } + + // Wait for the workflow to end. + m.Wait(ctx, uuid) + + if err := verifyAllTasksDone(ctx, ts, uuid); err != nil { + t.Fatal(err) + } + // Stop the manager. + if err := m.Stop(ctx, uuid); err != nil { + t.Fatalf("cannot stop testworkflow: %v", err) + } + cancel() + wg.Wait() +} + +func TestParallelRunnerApprovalFromFirstDoneSecondRunning(t *testing.T) { ctx := context.Background() - // Run the manager in the background. - wg, _, cancel := startManager(m) + ts := memorytopo.NewServer("cell") + m, uuid, wg, cancel, err := setupTestWorkflow(ctx, ts, true /* enableApprovals*/, false /* retry */) + if err != nil { + t.Fatal(err) + } + tw, err := testworkflow(m, uuid) + if err != nil { + t.Fatal(err) + } - // Create a testworkflow. - uuid, err := m.Create(ctx, testWorkflowFactoryName, []string{"-retry=true", "-count=2"}) + // Change the checkpoint of the workflow to let task1 succeeded + // before starting the workflow. + wi, err := m.WorkflowInfoForTesting(uuid) if err != nil { - t.Fatalf("cannot create testworkflow: %v", err) + t.Fatalf("fail to get workflow info from manager: %v", err) } + checkpointWriter := NewCheckpointWriter(ts, tw.checkpoint, wi) + task1ID := createTestTaskID(phaseSimple, 0) + checkpointWriter.UpdateTask(task1ID, workflowpb.TaskState_TaskDone, nil) + task2ID := createTestTaskID(phaseSimple, 1) + checkpointWriter.UpdateTask(task2ID, workflowpb.TaskState_TaskRunning, nil) - // We use notifications channel to monitor the update of UI. - notifications := make(chan []byte, 10) - _, index, err := m.NodeManager().GetAndWatchFullTree(notifications) + notifications, index, err := setupNotifications(m) if err != nil { - t.Errorf("GetAndWatchTree Failed: %v", err) + t.Fatal(err) } defer m.NodeManager().CloseWatcher(index) - go func() { - // This goroutine is used to detect and trigger the retry actions. - task1ID := createTestTaskID(phaseSimple, 0) - task2ID := createTestTaskID(phaseSimple, 1) - - retry1 := false - retry2 := false - for { - select { - case monitor, ok := <-notifications: - monitorStr := string(monitor) - if !ok { - t.Errorf("notifications channel is closed unexpectedly: %v, %v", ok, monitorStr) - } - if strings.Contains(monitorStr, "Retry") { - if strings.Contains(monitorStr, task1ID) { - verifyTaskSuccessOrFailure(context.Background(), ts, uuid, task1ID, false /* isSuccess*/) - retry1 = true - } - if strings.Contains(monitorStr, task2ID) { - verifyTaskSuccessOrFailure(context.Background(), ts, uuid, task2ID, false /* isSuccess*/) - retry2 = true - } - } - // After detecting both tasks have enabled retry actions after failure, - // retry task1, check its success, then retry task2, check its success. - if retry1 && retry2 { - clickRetry(ctx, t, m, path.Join("/"+uuid, task1ID)) - waitForFinished(ctx, t, notifications, task1ID) - if err := verifyTaskSuccessOrFailure(context.Background(), ts, uuid, task1ID, true /* isSuccess*/); err != nil { - t.Errorf("verify task %v success failed: %v", task1ID, err) - } - - clickRetry(ctx, t, m, path.Join("/"+uuid, task2ID)) - waitForFinished(ctx, t, notifications, task2ID) - if err := verifyTaskSuccessOrFailure(context.Background(), ts, uuid, task2ID, true /* isSuccess*/); err != nil { - t.Errorf("verify task %v success failed: %v", task2ID, err) - } - return - } - case <-ctx.Done(): - t.Errorf("context is canceled") - return - } - } - }() + defer close(notifications) // Start the job if err := m.Start(ctx, uuid); err != nil { t.Fatalf("cannot start testworkflow: %v", err) } + + if err := checkUIChangeAllApproved(notifications); err != nil { + t.Fatal(err) + } + // Wait for the workflow to end. m.Wait(ctx, uuid) @@ -136,6 +240,249 @@ func TestParallelRunnerRetryAction(t *testing.T) { wg.Wait() } +func TestParallelRunnerApprovalFirstRunningSecondRunning(t *testing.T) { + ctx := context.Background() + ts := memorytopo.NewServer("cell") + m, uuid, wg, cancel, err := setupTestWorkflow(ctx, ts, true /* enableApprovals*/, false /* retry */) + if err != nil { + t.Fatal(err) + } + tw, err := testworkflow(m, uuid) + if err != nil { + t.Fatal(err) + } + + // Change the checkpoint in TestWorkflow to let task1 succeeded + // before starting the workflow. + wi, err := m.WorkflowInfoForTesting(uuid) + if err != nil { + t.Fatalf("fail to get workflow info from manager: %v", err) + } + checkpointWriter := NewCheckpointWriter(ts, tw.checkpoint, wi) + task1ID := createTestTaskID(phaseSimple, 0) + checkpointWriter.UpdateTask(task1ID, workflowpb.TaskState_TaskRunning, nil) + task2ID := createTestTaskID(phaseSimple, 1) + checkpointWriter.UpdateTask(task2ID, workflowpb.TaskState_TaskRunning, nil) + + notifications, index, err := setupNotifications(m) + if err != nil { + t.Fatal(err) + } + defer m.NodeManager().CloseWatcher(index) + defer close(notifications) + + // Start the job + if err := m.Start(ctx, uuid); err != nil { + t.Fatalf("cannot start testworkflow: %v", err) + } + + if err := checkUIChangeAllApproved(notifications); err != nil { + t.Fatal(err) + } + + // Wait for the workflow to end. + m.Wait(ctx, uuid) + + if err := verifyAllTasksDone(ctx, ts, uuid); err != nil { + t.Fatal(err) + } + + // Stop the manager. + if err := m.Stop(ctx, uuid); err != nil { + t.Fatalf("cannot stop testworkflow: %v", err) + } + cancel() + wg.Wait() +} + +func TestParallelRunnerApprovalFromAllDone(t *testing.T) { + ctx := context.Background() + ts := memorytopo.NewServer("cell") + m, uuid, wg, cancel, err := setupTestWorkflow(ctx, ts, true /* enableApprovals*/, false /* retry */) + if err != nil { + t.Fatal(err) + } + tw, err := testworkflow(m, uuid) + if err != nil { + t.Fatal(err) + } + + // Change the checkpoint in TestWorkflow to let all tasks succeeded + // before starting the workflow. + wi, err := m.WorkflowInfoForTesting(uuid) + if err != nil { + t.Fatalf("fail to get workflow info from manager: %v", err) + } + checkpointWriter := NewCheckpointWriter(ts, tw.checkpoint, wi) + task1ID := createTestTaskID(phaseSimple, 0) + checkpointWriter.UpdateTask(task1ID, workflowpb.TaskState_TaskDone, nil) + task2ID := createTestTaskID(phaseSimple, 1) + checkpointWriter.UpdateTask(task2ID, workflowpb.TaskState_TaskDone, nil) + + if err := verifyAllTasksDone(ctx, ts, uuid); err != nil { + t.Fatal(err) + } + + notifications, index, err := setupNotifications(m) + if err != nil { + t.Fatal(err) + } + defer m.NodeManager().CloseWatcher(index) + + // Start the job + if err := m.Start(ctx, uuid); err != nil { + t.Fatalf("cannot start testworkflow: %v", err) + } + + // Wait for the workflow to end. + m.Wait(ctx, uuid) + close(notifications) + // Check notification about phase node if exists, make sure no actions added. + + if message, err := checkNoActions(notifications, string(phaseSimple)); err != nil { + t.Fatalf("there should be no actions for node %v: %v, %v", phaseSimple, message, err) + } + + if err := verifyAllTasksDone(ctx, ts, uuid); err != nil { + t.Fatal(err) + } + // Stop the manager. + if err := m.Stop(ctx, uuid); err != nil { + t.Fatalf("cannot stop testworkflow: %v", err) + } + cancel() + wg.Wait() +} + +func TestParallelRunnerRetry(t *testing.T) { + // Tasks in the workflow are forced to fail at the first attempt. Then we + // retry task1, after it is finished successfully, we retry task2. + ctx := context.Background() + ts := memorytopo.NewServer("cell") + m, uuid, wg, cancel, err := setupTestWorkflow(ctx, ts, false /* enableApprovals*/, true /* retry */) + if err != nil { + t.Fatal(err) + } + + notifications, index, err := setupNotifications(m) + if err != nil { + t.Fatal(err) + } + defer m.NodeManager().CloseWatcher(index) + defer close(notifications) + + // Start the job + if err := m.Start(ctx, uuid); err != nil { + t.Fatalf("cannot start testworkflow: %v", err) + } + + task1ID := createTestTaskID(phaseSimple, 0) + task2ID := createTestTaskID(phaseSimple, 1) + task1Node := &workflow.Node{ + PathName: "0", + Actions: []*workflow.Action{ + { + Name: actionNameRetry, + State: workflow.ActionStateEnabled, + Style: workflow.ActionStyleWaiting, + }, + }, + } + task2Node := &workflow.Node{ + PathName: "1", + Actions: []*workflow.Action{ + { + Name: actionNameRetry, + State: workflow.ActionStateEnabled, + Style: workflow.ActionStyleWaiting, + }, + }, + } + + // Wait for retry actions enabled after tasks failed at the first attempt. + if err := consumeNotificationsUntil(notifications, task1Node, task2Node); err != nil { + t.Fatalf("Should get expected update of nodes: %v", task1Node) + } + if err := verifyTask(context.Background(), ts, uuid, task1ID, workflowpb.TaskState_TaskDone, errMessage); err != nil { + t.Errorf("verify task %v failed: %v", task1ID, err) + } + if err := verifyTask(context.Background(), ts, uuid, task2ID, workflowpb.TaskState_TaskDone, errMessage); err != nil { + t.Errorf("verify task %v failed: %v", task2ID, err) + } + + // Retry task1. Task1 is launched after removing actions. + if err := triggerAction(ctx, m, path.Join("/", uuid, task1ID), actionNameRetry); err != nil { + t.Fatal(err) + } + // Check the retry action is removed. + task1Node.Actions = []*workflow.Action{} + if err := consumeNotificationsUntil(notifications, task1Node); err != nil { + t.Fatalf("Should get expected update of nodes: %v", task1Node) + } + // Verify task1 has succeed. + if err := waitForFinished(notifications, "0", taskFinishedMessage); err != nil { + t.Fatal(err) + } + if err := verifyTask(context.Background(), ts, uuid, task1ID, workflowpb.TaskState_TaskDone, ""); err != nil { + t.Errorf("verify task %v failed: %v", task1ID, err) + } + + // Retry task2 + if err := triggerAction(ctx, m, path.Join("/", uuid, task2ID), actionNameRetry); err != nil { + t.Fatal(err) + } + // Check the retry action is removed. Task2 is launched after removing actions. + task2Node.Actions = []*workflow.Action{} + if err := consumeNotificationsUntil(notifications, task2Node); err != nil { + t.Fatalf("Should get expected update of nodes: %v", task2Node) + } + // Verify task2 has succeed. + if err := waitForFinished(notifications, "1", taskFinishedMessage); err != nil { + t.Fatal(err) + } + if err := verifyTask(context.Background(), ts, uuid, task2ID, workflowpb.TaskState_TaskDone, ""); err != nil { + t.Errorf("verify task %v failed: %v", task2ID, err) + } + + // Wait for the workflow to end. + m.Wait(ctx, uuid) + + if err := verifyAllTasksDone(ctx, ts, uuid); err != nil { + t.Fatal(err) + } + // Stop the manager. + if err := m.Stop(ctx, uuid); err != nil { + t.Fatalf("cannot stop testworkflow: %v", err) + } + cancel() + wg.Wait() +} + +func setupTestWorkflow(ctx context.Context, ts topo.Server, enableApprovals, retry bool) (*workflow.Manager, string, *sync.WaitGroup, context.CancelFunc, error) { + m := workflow.NewManager(ts) + // Run the manager in the background. + wg, _, cancel := startManager(m) + + // Create a testworkflow. + enableApprovalsFlag := fmt.Sprintf("-enable_approvals=%v", enableApprovals) + retryFlag := fmt.Sprintf("-retry=%v", retry) + uuid, err := m.Create(ctx, testWorkflowFactoryName, []string{retryFlag, "-count=2", enableApprovalsFlag}) + if err != nil { + return nil, "", nil, nil, fmt.Errorf("cannot create testworkflow: %v", err) + } + + return m, uuid, wg, cancel, nil +} + +func testworkflow(m *workflow.Manager, uuid string) (*TestWorkflow, error) { + w, err := m.WorkflowForTesting(uuid) + if err != nil { + return nil, fmt.Errorf("fail to get workflow from manager: %v", err) + } + tw := w.(*TestWorkflow) + return tw, nil +} + func startManager(m *workflow.Manager) (*sync.WaitGroup, context.Context, context.CancelFunc) { // Run the manager in the background. ctx, cancel := context.WithCancel(context.Background()) @@ -150,46 +497,229 @@ func startManager(m *workflow.Manager) (*sync.WaitGroup, context.Context, contex return wg, ctx, cancel } -func clickRetry(ctx context.Context, t *testing.T, m *workflow.Manager, nodePath string) { - t.Logf("Click retry action on node: %v.", nodePath) - if err := m.NodeManager().Action(ctx, &workflow.ActionParameters{ +func triggerAction(ctx context.Context, m *workflow.Manager, nodePath, actionName string) error { + return m.NodeManager().Action(ctx, &workflow.ActionParameters{ Path: nodePath, - Name: "Retry", - }); err != nil { - t.Errorf("unexpected action error: %v", err) + Name: actionName, + }) +} + +func setupNotifications(m *workflow.Manager) (chan []byte, int, error) { + // Set up notifications channel to monitor UI updates. + notifications := make(chan []byte, 10) + _, index, err := m.NodeManager().GetAndWatchFullTree(notifications) + if err != nil { + return nil, -1, fmt.Errorf("GetAndWatchTree Failed: %v", err) } + return notifications, index, nil } -func waitForFinished(ctx context.Context, t *testing.T, notifications chan []byte, taskID string) { - for { - select { - case monitor, ok := <-notifications: - monitorStr := string(monitor) - if !ok { - t.Errorf("unexpected notification: %v, %v", ok, monitorStr) - } +func checkUIChangeFromNoneStarted(m *workflow.Manager, uuid string, notifications chan []byte) error { + wantNode := &workflow.Node{ + PathName: string(phaseSimple), + Actions: []*workflow.Action{ + { + Name: actionNameApproveFirstTask, + State: workflow.ActionStateDisabled, + Style: workflow.ActionStyleTriggered, + }, + { + Name: actionNameApproveRemainingTasks, + State: workflow.ActionStateDisabled, + Style: workflow.ActionStyleTriggered, + }, + }, + } + + // Approval buttons are initially disabled. + if err := consumeNotificationsUntil(notifications, wantNode); err != nil { + return fmt.Errorf("should get expected update of node: %v", wantNode) + } + + // First task is ready and approval button is enabled. + wantNode.Actions[0].State = workflow.ActionStateEnabled + wantNode.Actions[0].Style = workflow.ActionStyleWaiting + if err := consumeNotificationsUntil(notifications, wantNode); err != nil { + return fmt.Errorf("should get expected update of node: %v", wantNode) + } + // Trigger the approval button. + // It becomes disabled and shows approved message. + if err := triggerAction(context.Background(), m, path.Join("/", uuid, string(phaseSimple)), actionNameApproveFirstTask); err != nil { + return err + } + // Trigger the approval button again to test the code is robust against + // duplicate requests. + if err := triggerAction(context.Background(), m, path.Join("/", uuid, string(phaseSimple)), actionNameApproveFirstTask); err == nil { + return fmt.Errorf("triggering the approval action %v again should fail", actionNameApproveFirstTask) + } + return checkUIChangeFirstApproved(m, uuid, notifications) +} + +// checkUIChangeFirstApproved observes the UI change for 2 scenarios: +// 1. the first and second tasks are all running. +// 2. the first and second tasks have succeeded, but remaining tasks haven't +// succeeded yet. +func checkUIChangeAllApproved(notifications chan []byte) error { + wantNode := &workflow.Node{ + PathName: string(phaseSimple), + Actions: []*workflow.Action{ + { + Name: actionNameApproveFirstTaskDone, + State: workflow.ActionStateDisabled, + Style: workflow.ActionStyleTriggered, + }, + { + Name: actionNameApproveRemainingTasksDone, + State: workflow.ActionStateDisabled, + Style: workflow.ActionStyleTriggered, + }, + }, + } + + // Approval buttons are disabled and show approved messages. + if err := consumeNotificationsUntil(notifications, wantNode); err != nil { + return fmt.Errorf("should get expected update of node: %v", wantNode) + } + + // Approval buttons are cleared after the phase is finished. + wantNode.Actions = []*workflow.Action{} + if err := consumeNotificationsUntil(notifications, wantNode); err != nil { + return fmt.Errorf("should get expected update of node: %v", wantNode) + } + return nil +} + +// checkUIChangeFirstApproved observes the UI change for 2 scenarios: +// 1. the first task is running and the second hasn't started. +// 2. the first task is done and the second hasn't started. +func checkUIChangeFirstApproved(m *workflow.Manager, uuid string, notifications chan []byte) error { + wantNode := &workflow.Node{ + PathName: string(phaseSimple), + Actions: []*workflow.Action{ + { + Name: actionNameApproveFirstTaskDone, + State: workflow.ActionStateDisabled, + Style: workflow.ActionStyleTriggered, + }, + { + Name: actionNameApproveRemainingTasks, + State: workflow.ActionStateDisabled, + Style: workflow.ActionStyleTriggered, + }, + }, + } + + // Approval buttons are initially disabled. + // The approval button for the first task shows the approved message. + if err := consumeNotificationsUntil(notifications, wantNode); err != nil { + return fmt.Errorf("should get expected update of node: %v", wantNode) + } + + // The second task is ready and approval button for remaining tasks is + // enabled. + wantNode.Actions[1].State = workflow.ActionStateEnabled + wantNode.Actions[1].Style = workflow.ActionStyleWaiting + if err := consumeNotificationsUntil(notifications, wantNode); err != nil { + return fmt.Errorf("should get expected update of node: %v", wantNode) + } + // Trigger this approval button. It becomes disabled and shows the + // approved message. + if err := triggerAction(context.Background(), m, path.Join("/", uuid, string(phaseSimple)), actionNameApproveRemainingTasks); err != nil { + return err + } + return checkUIChangeAllApproved(notifications) +} - finishMessage := fmt.Sprintf(`"message":"task %v finished"`, taskID) - if strings.Contains(monitorStr, finishMessage) { - if strings.Contains(monitorStr, `"actions":[{"name:`) { - t.Fatalf("the node actions should be empty after triggering retry: %v", monitorStr) +// consumeNotificationsUntil waits for all wantNodes to be seen from the +// notifications of UI change. +func consumeNotificationsUntil(notifications chan []byte, wantNodes ...*workflow.Node) error { + wantSet := make(map[*workflow.Node]bool) + for _, n := range wantNodes { + wantSet[n] = true + } + + for monitor := range notifications { + update := &workflow.Update{} + if err := json.Unmarshal(monitor, update); err != nil { + return err + } + + if update.Nodes == nil || len(update.Nodes) != 1 { + // Ignore unrelated UI updates. For example, the UI update often includes + // multiple nodes when the workflow initialize its UI. + continue + } + for _, n := range wantNodes { + if checkNode(update.Nodes[0], n) { + if wantSet[n] { + delete(wantSet, n) } - return } - case <-ctx.Done(): - return + if len(wantSet) == 0 { + return nil + } + } + } + return fmt.Errorf("notifications channel is closed unexpectedly when waiting for expected nodes") +} + +func checkNode(gotNode *workflow.Node, wantNode *workflow.Node) bool { + if gotNode.PathName != wantNode.PathName || len(gotNode.Actions) != len(wantNode.Actions) { + return false + } + + for i := 0; i < len(wantNode.Actions); i++ { + if !reflect.DeepEqual(gotNode.Actions[i], wantNode.Actions[i]) { + return false } } + return true +} + +func checkNoActions(notifications chan []byte, nodePath string) (string, error) { + for monitor := range notifications { + update := &workflow.Update{} + if err := json.Unmarshal(monitor, update); err != nil { + return "", err + } + + if update.Nodes == nil { + continue + } + + for _, n := range update.Nodes { + if n.PathName == nodePath && len(n.Actions) > 0 { + return string(monitor), fmt.Errorf("actions detected unexpectedly") + } + } + } + return "", nil +} + +func waitForFinished(notifications chan []byte, path, message string) error { + for monitor := range notifications { + update := &workflow.Update{} + if err := json.Unmarshal(monitor, update); err != nil { + return err + } + + if update.Nodes == nil || len(update.Nodes) != 1 { + // Ignore unrelated UI updates. For example, the UI update often includes + // multiple nodes when the workflow initialize its UI. + continue + } + + if update.Nodes[0].PathName == path && update.Nodes[0].Message == taskFinishedMessage { + return nil + } + } + return fmt.Errorf("notifications channel is closed unexpectedly when waiting for expected nodes") } func verifyAllTasksDone(ctx context.Context, ts topo.Server, uuid string) error { - wi, err := ts.GetWorkflow(ctx, uuid) + checkpoint, err := checkpoint(ctx, ts, uuid) if err != nil { - return fmt.Errorf("fail to get workflow for: %v", uuid) - } - checkpoint := &workflowpb.WorkflowCheckpoint{} - if err := proto.Unmarshal(wi.Workflow.Data, checkpoint); err != nil { - return fmt.Errorf("fails to get checkpoint for the workflow: %v", err) + return err } for _, task := range checkpoint.Tasks { @@ -200,24 +730,27 @@ func verifyAllTasksDone(ctx context.Context, ts topo.Server, uuid string) error return nil } -func verifyTaskSuccessOrFailure(ctx context.Context, ts topo.Server, uuid, taskID string, isSuccess bool) error { - wi, err := ts.GetWorkflow(ctx, uuid) +func verifyTask(ctx context.Context, ts topo.Server, uuid, taskID string, taskState workflowpb.TaskState, taskError string) error { + checkpoint, err := checkpoint(ctx, ts, uuid) if err != nil { - return fmt.Errorf("fail to get workflow for: %v", uuid) + return err } + task := checkpoint.Tasks[taskID] - checkpoint := &workflowpb.WorkflowCheckpoint{} - if err := proto.Unmarshal(wi.Workflow.Data, checkpoint); err != nil { - return fmt.Errorf("fails to get checkpoint for the workflow: %v", err) + if task.State != taskState || task.Error != taskError { + return fmt.Errorf("task status: %v, %v fails to match expected status: %v, %v", task.State, task.Error, taskState, taskError) } - task := checkpoint.Tasks[taskID] + return nil +} - taskError := "" - if !isSuccess { - taskError = errMessage +func checkpoint(ctx context.Context, ts topo.Server, uuid string) (*workflowpb.WorkflowCheckpoint, error) { + wi, err := ts.GetWorkflow(ctx, uuid) + if err != nil { + return nil, fmt.Errorf("fail to get workflow for: %v", uuid) } - if task.State != workflowpb.TaskState_TaskDone || task.Error != taskError { - return fmt.Errorf("task: %v should succeed. Task status: %v, %v", task.Id, task.State, task.Error) + checkpoint := &workflowpb.WorkflowCheckpoint{} + if err := proto.Unmarshal(wi.Workflow.Data, checkpoint); err != nil { + return nil, fmt.Errorf("fails to get checkpoint for the workflow: %v", err) } - return nil + return checkpoint, nil } diff --git a/go/vt/workflow/resharding/test_workflow.go b/go/vt/workflow/resharding/test_workflow_test.go similarity index 72% rename from go/vt/workflow/resharding/test_workflow.go rename to go/vt/workflow/resharding/test_workflow_test.go index edb689d1295..9c47cd2d935 100644 --- a/go/vt/workflow/resharding/test_workflow.go +++ b/go/vt/workflow/resharding/test_workflow_test.go @@ -19,9 +19,11 @@ import ( ) const ( - testWorkflowFactoryName = "test_workflow" - phaseSimple PhaseType = "simple" - errMessage = "fake error for testing retry" + testWorkflowFactoryName = "test_workflow" + + phaseSimple PhaseType = "simple" + + errMessage = "fake error for testing retry" ) func createTestTaskID(phase PhaseType, count int) string { @@ -32,80 +34,15 @@ func init() { workflow.Register(testWorkflowFactoryName, &TestWorkflowFactory{}) } -// TestWorkflow is created to simplfy the unit test of ParallelRunner. -type TestWorkflow struct { - ctx context.Context - manager *workflow.Manager - topoServer topo.Server - wi *topo.WorkflowInfo - logger *logutil.MemoryLogger - - retryMu sync.Mutex - // retryFlags stores the retry flag for all the tasks. - retryFlags map[string]bool - - rootUINode *workflow.Node - - checkpoint *workflowpb.WorkflowCheckpoint - checkpointWriter *CheckpointWriter -} - -// Run implements the worklfow.Workflow interface. -func (tw *TestWorkflow) Run(ctx context.Context, manager *workflow.Manager, wi *topo.WorkflowInfo) error { - tw.ctx = ctx - tw.topoServer = manager.TopoServer() - tw.manager = manager - tw.wi = wi - tw.checkpointWriter = NewCheckpointWriter(tw.topoServer, tw.checkpoint, tw.wi) - tw.rootUINode.Display = workflow.NodeDisplayDeterminate - tw.rootUINode.BroadcastChanges(true /* updateChildren */) - - simpleTasks := tw.getTasks(phaseSimple) - simpleRunner := NewParallelRunner(tw.ctx, tw.rootUINode, tw.checkpointWriter, simpleTasks, tw.runSimple, Parallel) - simpleRunner.reportTaskStatus = true - if err := simpleRunner.Run(); err != nil { - return err - } - - log.Infof("Horizontal resharding is finished successfully.") - return nil -} - -func (tw *TestWorkflow) getTasks(phaseName PhaseType) []*workflowpb.Task { - count, err := strconv.Atoi(tw.checkpoint.Settings["count"]) - if err != nil { - log.Info("converting count in checkpoint.Settings to int fails: %v \n", tw.checkpoint.Settings["count"]) - return nil - } - var tasks []*workflowpb.Task - for i := 0; i < count; i++ { - taskID := createTestTaskID(phaseName, i) - tasks = append(tasks, tw.checkpoint.Tasks[taskID]) - } - return tasks -} - -func (tw *TestWorkflow) runSimple(ctx context.Context, t *workflowpb.Task) error { - log.Info("The number passed to me is %v \n", t.Attributes["number"]) - - tw.retryMu.Lock() - defer tw.retryMu.Unlock() - if tw.retryFlags[t.Id] { - log.Info("I will fail at this time since retry flag is true.") - tw.retryFlags[t.Id] = false - return errors.New(errMessage) - } - return nil -} - // TestWorkflowFactory is the factory to create a test workflow. type TestWorkflowFactory struct{} // Init is part of the workflow.Factory interface. func (*TestWorkflowFactory) Init(_ *workflow.Manager, w *workflowpb.Workflow, args []string) error { subFlags := flag.NewFlagSet(testWorkflowFactoryName, flag.ContinueOnError) - retryFlag := subFlags.Bool("retry", false, "The retry flag should be true if the retry action need to be tested") + retryFlag := subFlags.Bool("retry", false, "The retry flag should be true if the retry action should be tested") count := subFlags.Int("count", 0, "The number of simple tasks") + enableApprovals := subFlags.Bool("enable_approvals", false, "If true, executions of tasks require user's approvals on the UI.") if err := subFlags.Parse(args); err != nil { return err } @@ -123,7 +60,7 @@ func (*TestWorkflowFactory) Init(_ *workflow.Manager, w *workflowpb.Workflow, ar checkpoint := &workflowpb.WorkflowCheckpoint{ CodeVersion: 0, Tasks: taskMap, - Settings: map[string]string{"count": fmt.Sprintf("%v", *count), "retry": fmt.Sprintf("%v", *retryFlag)}, + Settings: map[string]string{"count": fmt.Sprintf("%v", *count), "retry": fmt.Sprintf("%v", *retryFlag), "enable_approvals": fmt.Sprintf("%v", *enableApprovals)}, } var err error w.Data, err = proto.Marshal(checkpoint) @@ -134,7 +71,7 @@ func (*TestWorkflowFactory) Init(_ *workflow.Manager, w *workflowpb.Workflow, ar } // Instantiate is part the workflow.Factory interface. -func (*TestWorkflowFactory) Instantiate(_ *workflow.Manager, w *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { +func (*TestWorkflowFactory) Instantiate(m *workflow.Manager, w *workflowpb.Workflow, rootNode *workflow.Node) (workflow.Workflow, error) { checkpoint := &workflowpb.WorkflowCheckpoint{} if err := proto.Unmarshal(w.Data, checkpoint); err != nil { return nil, err @@ -142,7 +79,7 @@ func (*TestWorkflowFactory) Instantiate(_ *workflow.Manager, w *workflowpb.Workf // Get the retry flags for all tasks from the checkpoint. retry, err := strconv.ParseBool(checkpoint.Settings["retry"]) if err != nil { - log.Errorf("converting retry in checkpoint.Settings to bool fails: %v \n", checkpoint.Settings["retry"]) + log.Errorf("converting retry in checkpoint.Settings to bool fails: %v", checkpoint.Settings["retry"]) return nil, err } retryFlags := make(map[string]bool) @@ -150,16 +87,26 @@ func (*TestWorkflowFactory) Instantiate(_ *workflow.Manager, w *workflowpb.Workf retryFlags[task.Id] = retry } + // Get the user control flags from the checkpoint. + enableApprovals, err := strconv.ParseBool(checkpoint.Settings["enable_approvals"]) + if err != nil { + log.Errorf("converting retry in checkpoint.Settings to bool fails: %v", checkpoint.Settings["user_control"]) + return nil, err + } + tw := &TestWorkflow{ - checkpoint: checkpoint, - rootUINode: rootNode, - logger: logutil.NewMemoryLogger(), - retryFlags: retryFlags, + topoServer: m.TopoServer(), + manager: m, + checkpoint: checkpoint, + rootUINode: rootNode, + logger: logutil.NewMemoryLogger(), + retryFlags: retryFlags, + enableApprovals: enableApprovals, } count, err := strconv.Atoi(checkpoint.Settings["count"]) if err != nil { - log.Errorf("converting count in checkpoint.Settings to int fails: %v \n", checkpoint.Settings["count"]) + log.Errorf("converting count in checkpoint.Settings to int fails: %v", checkpoint.Settings["count"]) return nil, err } @@ -179,3 +126,70 @@ func (*TestWorkflowFactory) Instantiate(_ *workflow.Manager, w *workflowpb.Workf } return tw, nil } + +// TestWorkflow is used to unit test the ParallelRunner object. It is a +// simplified workflow of one phase. To test the ParallelRunner's retry +// behavior, we can let the tasks explicitly fail initially and succeed +// after a retry. +type TestWorkflow struct { + ctx context.Context + manager *workflow.Manager + topoServer topo.Server + wi *topo.WorkflowInfo + logger *logutil.MemoryLogger + + retryMu sync.Mutex + // retryFlags stores the retry flag for all tasks. + retryFlags map[string]bool + + rootUINode *workflow.Node + + checkpoint *workflowpb.WorkflowCheckpoint + checkpointWriter *CheckpointWriter + + enableApprovals bool +} + +// Run implements the workflow.Workflow interface. +func (tw *TestWorkflow) Run(ctx context.Context, manager *workflow.Manager, wi *topo.WorkflowInfo) error { + tw.ctx = ctx + tw.wi = wi + tw.checkpointWriter = NewCheckpointWriter(tw.topoServer, tw.checkpoint, tw.wi) + + tw.rootUINode.Display = workflow.NodeDisplayDeterminate + tw.rootUINode.BroadcastChanges(true /* updateChildren */) + + simpleTasks := tw.getTasks(phaseSimple) + simpleRunner := NewParallelRunner(tw.ctx, tw.rootUINode, tw.checkpointWriter, simpleTasks, tw.runSimple, Parallel, tw.enableApprovals) + if err := simpleRunner.Run(); err != nil { + return err + } + return nil +} + +func (tw *TestWorkflow) getTasks(phaseName PhaseType) []*workflowpb.Task { + count, err := strconv.Atoi(tw.checkpoint.Settings["count"]) + if err != nil { + log.Info("converting count in checkpoint.Settings to int failed: %v", tw.checkpoint.Settings["count"]) + return nil + } + var tasks []*workflowpb.Task + for i := 0; i < count; i++ { + taskID := createTestTaskID(phaseName, i) + tasks = append(tasks, tw.checkpoint.Tasks[taskID]) + } + return tasks +} + +func (tw *TestWorkflow) runSimple(ctx context.Context, t *workflowpb.Task) error { + log.Info("The number passed to me is %v", t.Attributes["number"]) + + tw.retryMu.Lock() + defer tw.retryMu.Unlock() + if tw.retryFlags[t.Id] { + log.Info("I will fail at this time since retry flag is true.") + tw.retryFlags[t.Id] = false + return errors.New(errMessage) + } + return nil +} diff --git a/test/horizontal_resharding_workflow.py b/test/horizontal_resharding_workflow.py index 932e2f0e8ac..b7274f4622e 100644 --- a/test/horizontal_resharding_workflow.py +++ b/test/horizontal_resharding_workflow.py @@ -43,7 +43,8 @@ def test_successful_resharding(self): stdout = utils.run_vtctl(['WorkflowCreate', 'horizontal_resharding', '-keyspace=test_keyspace', - '-vtworkers=%s' % vtworker_endpoint], + '-vtworkers=%s' % vtworker_endpoint, + '-enable_approvals=false'], auto_log=True) workflow_uuid = re.match(r'^uuid: (.*)$', stdout[0]).group(1) diff --git a/web/vtctld2/src/app/shared/dialog/dialog-content.ts b/web/vtctld2/src/app/shared/dialog/dialog-content.ts index 5806f932083..8095b88cd50 100644 --- a/web/vtctld2/src/app/shared/dialog/dialog-content.ts +++ b/web/vtctld2/src/app/shared/dialog/dialog-content.ts @@ -64,7 +64,7 @@ export class DialogContent { } /* - Checks to see if a particular flagId should be set as vissible to the user. + Checks to see if a particular flagId should be set as visible to the user. First checks if the flag doesnt exist or if it has explicitly been set to not be editable. Then it iterates through the blockOnEmpty and BlockOnFilled lists to ensure that the values of other flags does not diff --git a/web/vtctld2/src/app/shared/flags/flag.ts b/web/vtctld2/src/app/shared/flags/flag.ts index edf175c2092..d8fc03d023f 100644 --- a/web/vtctld2/src/app/shared/flags/flag.ts +++ b/web/vtctld2/src/app/shared/flags/flag.ts @@ -88,7 +88,7 @@ export class Flag { } public getArgs() { - if (this.getValue() === false || this.getStrValue() === '' || !this.positional) { + if (this.getStrValue() === '' || !this.positional) { return []; } if (this.namedPositional !== undefined) { @@ -103,7 +103,7 @@ export class Flag { } public getFlags() { - if (this.getValue() === false || this.getStrValue() === '' || this.positional) { + if (this.getStrValue() === '' || this.positional) { return []; } // Non-positional arguments need a key value pair. diff --git a/web/vtctld2/src/app/shared/flags/workflow.flags.ts b/web/vtctld2/src/app/shared/flags/workflow.flags.ts index 4ef17497afc..1e757e6f94f 100644 --- a/web/vtctld2/src/app/shared/flags/workflow.flags.ts +++ b/web/vtctld2/src/app/shared/flags/workflow.flags.ts @@ -30,6 +30,10 @@ export class NewWorkflowFlags { this.flags['horizontal_resharding_vtworkers'] = new HorizontalReshardingVtworkerFlag(6, 'horizontal_resharding_vtworkers'); this.flags['horizontal_resharding_vtworkers'].positional = true; this.flags['horizontal_resharding_vtworkers'].namedPositional = 'vtworkers'; + this.flags['horizontal_resharding_enable_approvals'] = new HorizontalReshardingEnableApprovalsFlag(7, 'horizontal_resharding_enable_approvals'); + this.flags['horizontal_resharding_enable_approvals'].positional = true; + this.flags['horizontal_resharding_enable_approvals'].namedPositional = 'enable_approvals'; + } } @@ -107,6 +111,13 @@ export class HorizontalReshardingVtworkerFlag extends InputFlag { } } +export class HorizontalReshardingEnableApprovalsFlag extends CheckBoxFlag { + constructor(position: number, id: string, value= true) { + super(position, id, 'enable approvals', 'Set true if use user approvals of task execution.', value); + this.setDisplayOn('factory_name', 'horizontal_resharding'); + } +} + // WorkflowFlags is used by the Start / Stop / Delete dialogs. export class WorkflowFlags { flags= {}; From b7420b103d63978cf1e716ec4542533c2e60b19e Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 15 Mar 2017 08:02:30 -0700 Subject: [PATCH 106/108] Actually plumbing schema engine. --- go/vt/binlog/updatestreamctl.go | 4 ++-- go/vt/vttablet/tabletmanager/action_agent.go | 6 +++--- go/vt/vttablet/tabletserver/controller.go | 4 ++++ go/vt/vttablet/tabletserver/tabletserver.go | 15 ++++++++++----- go/vt/vttablet/tabletservermock/controller.go | 6 ++++++ 5 files changed, 25 insertions(+), 10 deletions(-) diff --git a/go/vt/binlog/updatestreamctl.go b/go/vt/binlog/updatestreamctl.go index ae19be36c36..11eddf9935c 100644 --- a/go/vt/binlog/updatestreamctl.go +++ b/go/vt/binlog/updatestreamctl.go @@ -93,7 +93,6 @@ func (m *UpdateStreamControlMock) IsEnabled() bool { // and UpdateStreamControl type UpdateStreamImpl struct { // the following variables are set at construction time - mysqld mysqlctl.MysqlDaemon dbname string se *schema.Engine @@ -156,9 +155,10 @@ type RegisterUpdateStreamServiceFunc func(UpdateStream) var RegisterUpdateStreamServices []RegisterUpdateStreamServiceFunc // NewUpdateStream returns a new UpdateStreamImpl object -func NewUpdateStream(mysqld mysqlctl.MysqlDaemon, dbname string) *UpdateStreamImpl { +func NewUpdateStream(mysqld mysqlctl.MysqlDaemon, se *schema.Engine, dbname string) *UpdateStreamImpl { return &UpdateStreamImpl{ mysqld: mysqld, + se: se, dbname: dbname, } } diff --git a/go/vt/vttablet/tabletmanager/action_agent.go b/go/vt/vttablet/tabletmanager/action_agent.go index c8e8f6be195..7472db16a2b 100644 --- a/go/vt/vttablet/tabletmanager/action_agent.go +++ b/go/vt/vttablet/tabletmanager/action_agent.go @@ -48,10 +48,10 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/vttablet/tabletserver" - "github.com/youtube/vitess/go/vt/vttablet/tabletservermock" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver" + "github.com/youtube/vitess/go/vt/vttablet/tabletservermock" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -546,7 +546,7 @@ func (agent *ActionAgent) Start(ctx context.Context, mysqlPort, vtPort, gRPCPort // (it needs the dbname, so it has to be delayed up to here, // but it has to be before updateState below that may use it) if initUpdateStream { - us := binlog.NewUpdateStream(agent.MysqlDaemon, agent.DBConfigs.App.DbName) + us := binlog.NewUpdateStream(agent.MysqlDaemon, agent.QueryServiceControl.SchemaEngine(), agent.DBConfigs.App.DbName) agent.UpdateStream = us servenv.OnRun(func() { us.RegisterService() diff --git a/go/vt/vttablet/tabletserver/controller.go b/go/vt/vttablet/tabletserver/controller.go index f4bda5f3dcd..75d2f7e197f 100644 --- a/go/vt/vttablet/tabletserver/controller.go +++ b/go/vt/vttablet/tabletserver/controller.go @@ -11,6 +11,7 @@ import ( "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/vttablet/queryservice" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -57,6 +58,9 @@ type Controller interface { // QueryService returns the QueryService object used by this Controller QueryService() queryservice.QueryService + // SchemaEngine returns the SchemaEngine object used by this Controller + SchemaEngine() *schema.Engine + // BroadcastHealth sends the current health to all listeners BroadcastHealth(terTimestamp int64, stats *querypb.RealtimeStats) } diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 5ed4aab1ba6..2d03a69f9ab 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -31,18 +31,18 @@ import ( "github.com/youtube/vitess/go/vt/logutil" "github.com/youtube/vitess/go/vt/mysqlctl" "github.com/youtube/vitess/go/vt/sqlparser" + "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/utils" + "github.com/youtube/vitess/go/vt/vterrors" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" - "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/messager" - "github.com/youtube/vitess/go/vt/vttablet/queryservice" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/splitquery" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/txthrottler" - "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/utils" - "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -603,6 +603,11 @@ func (tsv *TabletServer) QueryService() queryservice.QueryService { return tsv } +// SchemaEngine returns the SchemaEngine part of TabletServer. +func (tsv *TabletServer) SchemaEngine() *schema.Engine { + return tsv.se +} + // Begin starts a new transaction. This is allowed only if the state is StateServing. func (tsv *TabletServer) Begin(ctx context.Context, target *querypb.Target) (transactionID int64, err error) { err = tsv.execRequest( diff --git a/go/vt/vttablet/tabletservermock/controller.go b/go/vt/vttablet/tabletservermock/controller.go index a1dcae2767e..5bbf8f0ab28 100644 --- a/go/vt/vttablet/tabletservermock/controller.go +++ b/go/vt/vttablet/tabletservermock/controller.go @@ -16,6 +16,7 @@ import ( topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" "github.com/youtube/vitess/go/vt/vttablet/queryservice" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" ) // BroadcastData is used by the mock Controller to send data @@ -155,6 +156,11 @@ func (tqsc *Controller) QueryService() queryservice.QueryService { return nil } +// SchemaEngine is part of the tabletserver.Controller interface +func (tqsc *Controller) SchemaEngine() *schema.Engine { + return nil +} + // BroadcastHealth is part of the tabletserver.Controller interface func (tqsc *Controller) BroadcastHealth(terTimestamp int64, stats *querypb.RealtimeStats) { tqsc.mu.Lock() From f4a97f698644714a91e88cb1881655a15eee3bbc Mon Sep 17 00:00:00 2001 From: Alain Jobart Date: Wed, 15 Mar 2017 08:04:14 -0700 Subject: [PATCH 107/108] Adding a rbr version of vertical_split.py. Extra bonus: it actually passes. --- config/mycnf/rbr.cnf | 1 + test/config.json | 11 +++++++++++ test/vertical_split.py | 8 +++++++- test/vertical_split_rbr.py | 14 ++++++++++++++ 4 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 config/mycnf/rbr.cnf create mode 100755 test/vertical_split_rbr.py diff --git a/config/mycnf/rbr.cnf b/config/mycnf/rbr.cnf new file mode 100644 index 00000000000..5dde64cda57 --- /dev/null +++ b/config/mycnf/rbr.cnf @@ -0,0 +1 @@ +binlog-format=row diff --git a/test/config.json b/test/config.json index b7c778584b3..03cffaa5e59 100644 --- a/test/config.json +++ b/test/config.json @@ -396,6 +396,17 @@ "worker_test" ] }, + "vertical_split_rbr": { + "File": "vertical_split_rbr.py", + "Args": [], + "Command": [], + "Manual": false, + "Shard": 1, + "RetryMax": 0, + "Tags": [ + "worker_test" + ] + }, "vtctld": { "File": "vtctld_test.py", "Args": [], diff --git a/test/vertical_split.py b/test/vertical_split.py index 96f590ed637..72277debaa4 100755 --- a/test/vertical_split.py +++ b/test/vertical_split.py @@ -18,6 +18,9 @@ from vtdb import keyrange_constants from vtdb import vtgate_client +# Global variables, for tests flavors. +use_rbr = False + # source keyspace, with 4 tables source_master = tablet.Tablet() source_replica = tablet.Tablet() @@ -38,7 +41,10 @@ def setUpModule(): try: environment.topo_server().setup() - setup_procs = [t.init_mysql() for t in all_tablets] + extra_my_cnf = None + if use_rbr: + extra_my_cnf = environment.vttop + '/config/mycnf/rbr.cnf' + setup_procs = [t.init_mysql(extra_my_cnf=extra_my_cnf) for t in all_tablets] utils.Vtctld().start() utils.wait_procs(setup_procs) except: diff --git a/test/vertical_split_rbr.py b/test/vertical_split_rbr.py new file mode 100755 index 00000000000..9b36fe74e77 --- /dev/null +++ b/test/vertical_split_rbr.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +# +# Copyright 2013, Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can +# be found in the LICENSE file. + +"""Re-runs resharding.py with RBR on.""" + +import vertical_split +import utils + +if __name__ == '__main__': + vertical_split.use_rbr = True + utils.main(vertical_split) From ca81fda801574990094120aa7e8acd2a57138676 Mon Sep 17 00:00:00 2001 From: Erez Louidor Date: Wed, 15 Mar 2017 13:53:41 -0700 Subject: [PATCH 108/108] Changing the errors SplitQuery returns to be from the vterrors package. --- .../splitquery/equal_splits_algorithm.go | 12 +++-- .../splitquery/full_scan_algorithm.go | 10 +++-- .../tabletserver/splitquery/split_params.go | 44 ++++++++++++------- go/vt/vttablet/tabletserver/tabletserver.go | 20 ++------- .../tabletserver/tabletserver_test.go | 4 +- 5 files changed, 48 insertions(+), 42 deletions(-) diff --git a/go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm.go b/go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm.go index 27542451cfb..dc0e97f3b44 100644 --- a/go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm.go +++ b/go/vt/vttablet/tabletserver/splitquery/equal_splits_algorithm.go @@ -9,9 +9,11 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/sqlparser" + "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" querypb "github.com/youtube/vitess/go/vt/proto/query" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) // EqualSplitsAlgorithm implements the SplitAlgorithmInterface and represents the equal-splits @@ -51,12 +53,14 @@ func NewEqualSplitsAlgorithm(splitParams *SplitParams, sqlExecuter SQLExecuter) // primary key columns, and there can be more than one primary key column for a table. if !sqltypes.IsFloat(splitParams.splitColumns[0].Type) && !sqltypes.IsIntegral(splitParams.splitColumns[0].Type) { - return nil, fmt.Errorf("using the EQUAL_SPLITS algorithm in SplitQuery requires having"+ - " a numeric (integral or float) split-column. Got type: %v", splitParams.splitColumns[0]) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, + "using the EQUAL_SPLITS algorithm in SplitQuery requires having"+ + " a numeric (integral or float) split-column. Got type: %v", splitParams.splitColumns[0]) } if splitParams.splitCount <= 0 { - return nil, fmt.Errorf("using the EQUAL_SPLITS algorithm in SplitQuery requires a positive"+ - " splitParams.splitCount. Got: %v", splitParams.splitCount) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, + "using the EQUAL_SPLITS algorithm in SplitQuery requires a positive"+ + " splitParams.splitCount. Got: %v", splitParams.splitCount) } result := &EqualSplitsAlgorithm{ splitParams: splitParams, diff --git a/go/vt/vttablet/tabletserver/splitquery/full_scan_algorithm.go b/go/vt/vttablet/tabletserver/splitquery/full_scan_algorithm.go index 9638b3015a3..b2c406895ce 100644 --- a/go/vt/vttablet/tabletserver/splitquery/full_scan_algorithm.go +++ b/go/vt/vttablet/tabletserver/splitquery/full_scan_algorithm.go @@ -4,8 +4,11 @@ import ( "fmt" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) // FullScanAlgorithm implements the SplitAlgorithmInterface and represents the full-scan algorithm @@ -48,8 +51,9 @@ func NewFullScanAlgorithm( splitParams *SplitParams, sqlExecuter SQLExecuter) (*FullScanAlgorithm, error) { if !splitParams.areSplitColumnsPrimaryKey() { - return nil, fmt.Errorf("Using the FULL_SCAN algorithm requires split columns to be"+ - " the primary key. Got: %+v", splitParams) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, + "Using the FULL_SCAN algorithm requires split columns to be"+ + " the primary key. Got: %+v", splitParams) } result := &FullScanAlgorithm{ splitParams: splitParams, diff --git a/go/vt/vttablet/tabletserver/splitquery/split_params.go b/go/vt/vttablet/tabletserver/splitquery/split_params.go index ea196feb402..cf11688df56 100644 --- a/go/vt/vttablet/tabletserver/splitquery/split_params.go +++ b/go/vt/vttablet/tabletserver/splitquery/split_params.go @@ -4,8 +4,11 @@ import ( "fmt" "github.com/youtube/vitess/go/vt/sqlparser" - "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) // SplitParams stores the context for a splitquery computation. It is used by @@ -63,8 +66,8 @@ func NewSplitParamsGivenNumRowsPerQueryPart( schema map[string]*schema.Table, ) (*SplitParams, error) { if numRowsPerQueryPart <= 0 { - return nil, fmt.Errorf("numRowsPerQueryPart must be positive. Got: %v", - numRowsPerQueryPart) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, + "numRowsPerQueryPart must be positive. Got: %v", numRowsPerQueryPart) } result, err := newSplitParams(query, splitColumnNames, schema) if err != nil { @@ -104,8 +107,8 @@ func NewSplitParamsGivenSplitCount( schema map[string]*schema.Table, ) (*SplitParams, error) { if splitCount <= 0 { - return nil, fmt.Errorf("splitCount must be positive. Got: %v", - splitCount) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, + "splitCount must be positive. Got: %v", splitCount) } result, err := newSplitParams(query, splitColumnNames, schema) if err != nil { @@ -130,31 +133,33 @@ func newSplitParams( ) (*SplitParams, error) { statement, err := sqlparser.Parse(query.Sql) if err != nil { - return nil, fmt.Errorf("failed parsing query: '%v', err: '%v'", query.Sql, err) + return nil, vterrors.Errorf( + vtrpcpb.Code_INVALID_ARGUMENT, "failed parsing query: '%v', err: '%v'", query.Sql, err) } selectAST, ok := statement.(*sqlparser.Select) if !ok { - return nil, fmt.Errorf("not a select statement") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "not a select statement") } if selectAST.Distinct != "" || selectAST.GroupBy != nil || selectAST.Having != nil || len(selectAST.From) != 1 || selectAST.OrderBy != nil || selectAST.Limit != nil || selectAST.Lock != "" { - return nil, fmt.Errorf("unsupported query: %v", query.Sql) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported query: %v", query.Sql) } var aliasedTableExpr *sqlparser.AliasedTableExpr aliasedTableExpr, ok = selectAST.From[0].(*sqlparser.AliasedTableExpr) if !ok { - return nil, fmt.Errorf("unsupported FROM clause in query: %v", query.Sql) + return nil, vterrors.Errorf( + vtrpcpb.Code_INVALID_ARGUMENT, "unsupported FROM clause in query: %v", query.Sql) } tableName := sqlparser.GetTableName(aliasedTableExpr.Expr) if tableName.IsEmpty() { - return nil, fmt.Errorf("unsupported FROM clause in query"+ + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported FROM clause in query"+ " (must be a simple table expression): %v", query.Sql) } tableSchema, ok := schemaMap[tableName.String()] if tableSchema == nil { - return nil, fmt.Errorf("can't find table in schema") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "can't find table in schema") } // Get the schema.TableColumn representation of each splitColumnName. @@ -162,9 +167,10 @@ func newSplitParams( if len(splitColumnNames) == 0 { splitColumns = getPrimaryKeyColumns(tableSchema) if len(splitColumns) == 0 { - return nil, fmt.Errorf("no split columns where given and the queried table has"+ - " no primary key columns (is the table a view? Running SplitQuery on a view"+ - " is not supported). query: %v", query.Sql) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, + "no split columns where given and the queried table has"+ + " no primary key columns (is the table a view? Running SplitQuery on a view"+ + " is not supported). query: %v", query.Sql) } } else { splitColumns, err = findSplitColumnsInSchema(splitColumnNames, tableSchema) @@ -172,8 +178,10 @@ func newSplitParams( return nil, err } if !areColumnsAPrefixOfAnIndex(splitColumns, tableSchema) { - return nil, fmt.Errorf("split-columns must be a prefix of the columns composing"+ - " an index. Sql: %v, split-columns: %v", query.Sql, splitColumns) + return nil, vterrors.Errorf( + vtrpcpb.Code_INVALID_ARGUMENT, + "split-columns must be a prefix of the columns composing"+ + " an index. Sql: %v, split-columns: %v", query.Sql, splitColumns) } } @@ -199,7 +207,9 @@ func findSplitColumnsInSchema( for _, splitColumnName := range splitColumnNames { i := tableSchema.FindColumn(splitColumnName) if i == -1 { - return nil, fmt.Errorf("can't find split column: %v", splitColumnName) + return nil, vterrors.Errorf( + vtrpcpb.Code_INVALID_ARGUMENT, + "can't find split column: %v", splitColumnName) } result = append(result, &tableSchema.Columns[i]) } diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 2d03a69f9ab..df68bf1b006 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -1089,11 +1089,11 @@ func (tsv *TabletServer) SplitQuery( defer sqlExecuter.done() algorithmObject, err := createSplitQueryAlgorithmObject(algorithm, splitParams, sqlExecuter) if err != nil { - return splitQueryToTabletError(err) + return err } splits, err = splitquery.NewSplitter(splitParams, algorithmObject).Split() if err != nil { - return splitQueryToTabletError(err) + return err } return nil }, @@ -1299,13 +1299,11 @@ func createSplitParams( ) (*splitquery.SplitParams, error) { switch { case numRowsPerQueryPart != 0 && splitCount == 0: - splitParams, err := splitquery.NewSplitParamsGivenNumRowsPerQueryPart( + return splitquery.NewSplitParamsGivenNumRowsPerQueryPart( query, splitColumns, numRowsPerQueryPart, schema) - return splitParams, splitQueryToTabletError(err) case numRowsPerQueryPart == 0 && splitCount != 0: - splitParams, err := splitquery.NewSplitParamsGivenSplitCount( + return splitquery.NewSplitParamsGivenSplitCount( query, splitColumns, splitCount, schema) - return splitParams, splitQueryToTabletError(err) default: panic(fmt.Errorf("Exactly one of {numRowsPerQueryPart, splitCount} must be"+ " non zero. This should have already been caught by 'validateSplitQueryParameters' and "+ @@ -1386,16 +1384,6 @@ func createSplitQueryAlgorithmObject( } } -// splitQueryToTabletError converts the given error assumed to be returned from the -// splitquery-package into a TabletError suitable to be returned to the caller. -// It returns nil if 'err' is nil. -func splitQueryToTabletError(err error) error { - if err == nil { - return nil - } - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "splitquery: %v", err) -} - // StreamHealth streams the health status to callback. // At the beginning, if TabletServer has a valid health // state, that response is immediately sent. diff --git a/go/vt/vttablet/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go index b46caafeede..1d1f80590a3 100644 --- a/go/vt/vttablet/tabletserver/tabletserver_test.go +++ b/go/vt/vttablet/tabletserver/tabletserver_test.go @@ -23,10 +23,10 @@ import ( "github.com/youtube/vitess/go/mysqlconn/fakesqldb" "github.com/youtube/vitess/go/sqldb" "github.com/youtube/vitess/go/sqltypes" + "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/messager" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/querytypes" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" - "github.com/youtube/vitess/go/vt/vterrors" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -1780,7 +1780,7 @@ func TestTabletServerSplitQueryEqualSplitsOnStringColumn(t *testing.T) { 0, /* numRowsPerQueryPart */ querypb.SplitQueryRequest_EQUAL_SPLITS) want := - "splitquery: using the EQUAL_SPLITS algorithm in SplitQuery" + + "using the EQUAL_SPLITS algorithm in SplitQuery" + " requires having a numeric (integral or float) split-column." + " Got type: {Name: 'name_string', Type: VARCHAR}" if err.Error() != want {