diff --git a/data/test/vtgate/dml_cases.txt b/data/test/vtgate/dml_cases.txt index 40a9c69a542..fc06a5f3558 100644 --- a/data/test/vtgate/dml_cases.txt +++ b/data/test/vtgate/dml_cases.txt @@ -1512,3 +1512,24 @@ "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)" } } + +# update vindex value to null +"update user set name = null where id = 1" +{ + "Original": "update user set name = null where id = 1", + "Instructions": { + "Opcode": "UpdateEqual", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "update user set name = null where id = 1", + "Vindex": "user_index", + "Values": [1], + "ChangedVindexValues": { + "name_user_map": [null] + }, + "Table": "user", + "OwnedVindexQuery": "select Name, Costly from user where id = 1 for update" + } +} diff --git a/doc/GettingStartedKubernetes.md b/doc/GettingStartedKubernetes.md index 004efecaa5f..d0d358e3f8e 100644 --- a/doc/GettingStartedKubernetes.md +++ b/doc/GettingStartedKubernetes.md @@ -278,7 +278,7 @@ $ export KUBECTL=/example/path/to/google-cloud-sdk/bin/kubectl 1. **Access vtctld web UI** To access vtctld from outside Kubernetes, use [kubectl proxy] - (http://kubernetes.io/v1.1/docs/user-guide/kubectl/kubectl_proxy.html) + (https://kubernetes.io/docs/tasks/access-kubernetes-api/http-proxy-access-api/) to create an authenticated tunnel on your workstation: **Note:** The proxy command runs in the foreground, @@ -292,13 +292,13 @@ $ export KUBECTL=/example/path/to/google-cloud-sdk/bin/kubectl You can then load the vtctld web UI on `localhost`: - http://localhost:8001/api/v1/proxy/namespaces/default/services/vtctld:web/ + http://localhost:8001/api/v1/namespaces/default/services/vtctld:web/proxy You can also use this proxy to access the [Kubernetes Dashboard] - (http://kubernetes.io/v1.1/docs/user-guide/ui.html), + (https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/), where you can monitor nodes, pods, and services: - http://localhost:8001/ui + http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/. 1. **Use vtctlclient to send commands to vtctld** diff --git a/examples/kubernetes/vtctld-up.sh b/examples/kubernetes/vtctld-up.sh index 5ed5ff5b5a9..ff381129c10 100755 --- a/examples/kubernetes/vtctld-up.sh +++ b/examples/kubernetes/vtctld-up.sh @@ -47,5 +47,4 @@ cat vtctld-controller-template.yaml | sed -e "$sed_script" | $KUBECTL $KUBECTL_O echo echo "To access vtctld web UI, start kubectl proxy in another terminal:" echo " kubectl proxy --port=8001" -echo "Then visit http://localhost:8001/api/v1/proxy/namespaces/$VITESS_NAME/services/vtctld:web/" - +echo "Then visit http://localhost:8001/api/v1/namespaces/$VITESS_NAME/services/vtctld:web/proxy" \ No newline at end of file diff --git a/go/bucketpool/bucketpool.go b/go/bucketpool/bucketpool.go new file mode 100644 index 00000000000..be0ec18aa7d --- /dev/null +++ b/go/bucketpool/bucketpool.go @@ -0,0 +1,91 @@ +package bucketpool + +import ( + "math" + "sync" +) + +type sizedPool struct { + size int + pool sync.Pool +} + +func newSizedPool(size int) *sizedPool { + return &sizedPool{ + size: size, + pool: sync.Pool{ + New: func() interface{} { return makeSlicePointer(size) }, + }, + } +} + +// Pool is actually multiple pools which store buffers of specific size. +// i.e. it can be three pools which return buffers 32K, 64K and 128K. +type Pool struct { + minSize int + maxSize int + pools []*sizedPool +} + +// New returns Pool which has buckets from minSize to maxSize. +// Buckets increase with the power of two, i.e with multiplier 2: [2b, 4b, 16b, ... , 1024b] +// Last pool will always be capped to maxSize. +func New(minSize, maxSize int) *Pool { + if maxSize < minSize { + panic("maxSize can't be less than minSize") + } + const multiplier = 2 + var pools []*sizedPool + curSize := minSize + for curSize < maxSize { + pools = append(pools, newSizedPool(curSize)) + curSize *= multiplier + } + pools = append(pools, newSizedPool(maxSize)) + return &Pool{ + minSize: minSize, + maxSize: maxSize, + pools: pools, + } +} + +func (p *Pool) findPool(size int) *sizedPool { + if size > p.maxSize { + return nil + } + idx := int(math.Ceil(math.Log2(float64(size) / float64(p.minSize)))) + if idx < 0 { + idx = 0 + } + if idx > len(p.pools)-1 { + return nil + } + return p.pools[idx] +} + +// Get returns pointer to []byte which has len size. +// If there is no bucket with buffers >= size, slice will be allocated. +func (p *Pool) Get(size int) *[]byte { + sp := p.findPool(size) + if sp == nil { + return makeSlicePointer(size) + } + buf := sp.pool.Get().(*[]byte) + *buf = (*buf)[:size] + return buf +} + +// Put returns pointer to slice to some bucket. Discards slice for which there is no bucket +func (p *Pool) Put(b *[]byte) { + sp := p.findPool(cap(*b)) + if sp == nil { + return + } + *b = (*b)[:cap(*b)] + sp.pool.Put(b) +} + +func makeSlicePointer(size int) *[]byte { + data := make([]byte, size) + return &data +} diff --git a/go/bucketpool/bucketpool_test.go b/go/bucketpool/bucketpool_test.go new file mode 100644 index 00000000000..a7e2cf0e75c --- /dev/null +++ b/go/bucketpool/bucketpool_test.go @@ -0,0 +1,202 @@ +package bucketpool + +import ( + "math/rand" + "testing" +) + +func TestPool(t *testing.T) { + maxSize := 16384 + pool := New(1024, maxSize) + if pool.maxSize != maxSize { + t.Fatalf("Invalid max pool size: %d, expected %d", pool.maxSize, maxSize) + } + if len(pool.pools) != 5 { + t.Fatalf("Invalid number of pools: %d, expected %d", len(pool.pools), 5) + } + + buf := pool.Get(64) + if len(*buf) != 64 { + t.Fatalf("unexpected buf length: %d", len(*buf)) + } + if cap(*buf) != 1024 { + t.Fatalf("unexepected buf cap: %d", cap(*buf)) + } + + // get from same pool, check that length is right + buf = pool.Get(128) + if len(*buf) != 128 { + t.Fatalf("unexpected buf length: %d", len(*buf)) + } + if cap(*buf) != 1024 { + t.Fatalf("unexepected buf cap: %d", cap(*buf)) + } + pool.Put(buf) + + // get boundary size + buf = pool.Get(1024) + if len(*buf) != 1024 { + t.Fatalf("unexpected buf length: %d", len(*buf)) + } + if cap(*buf) != 1024 { + t.Fatalf("unexepected buf cap: %d", cap(*buf)) + } + pool.Put(buf) + + // get from the middle + buf = pool.Get(5000) + if len(*buf) != 5000 { + t.Fatalf("unexpected buf length: %d", len(*buf)) + } + if cap(*buf) != 8192 { + t.Fatalf("unexepected buf cap: %d", cap(*buf)) + } + pool.Put(buf) + + // check last pool + buf = pool.Get(16383) + if len(*buf) != 16383 { + t.Fatalf("unexpected buf length: %d", len(*buf)) + } + if cap(*buf) != 16384 { + t.Fatalf("unexepected buf cap: %d", cap(*buf)) + } + pool.Put(buf) + + // get big buffer + buf = pool.Get(16385) + if len(*buf) != 16385 { + t.Fatalf("unexpected buf length: %d", len(*buf)) + } + if cap(*buf) != 16385 { + t.Fatalf("unexepected buf cap: %d", cap(*buf)) + } + pool.Put(buf) +} + +func TestPoolOneSize(t *testing.T) { + maxSize := 1024 + pool := New(1024, maxSize) + if pool.maxSize != maxSize { + t.Fatalf("Invalid max pool size: %d, expected %d", pool.maxSize, maxSize) + } + buf := pool.Get(64) + if len(*buf) != 64 { + t.Fatalf("unexpected buf length: %d", len(*buf)) + } + if cap(*buf) != 1024 { + t.Fatalf("unexepected buf cap: %d", cap(*buf)) + } + pool.Put(buf) + + buf = pool.Get(1025) + if len(*buf) != 1025 { + t.Fatalf("unexpected buf length: %d", len(*buf)) + } + if cap(*buf) != 1025 { + t.Fatalf("unexepected buf cap: %d", cap(*buf)) + } + pool.Put(buf) +} + +func TestPoolTwoSizeNotMultiplier(t *testing.T) { + maxSize := 2000 + pool := New(1024, maxSize) + if pool.maxSize != maxSize { + t.Fatalf("Invalid max pool size: %d, expected %d", pool.maxSize, maxSize) + } + buf := pool.Get(64) + if len(*buf) != 64 { + t.Fatalf("unexpected buf length: %d", len(*buf)) + } + if cap(*buf) != 1024 { + t.Fatalf("unexepected buf cap: %d", cap(*buf)) + } + pool.Put(buf) + + buf = pool.Get(2001) + if len(*buf) != 2001 { + t.Fatalf("unexpected buf length: %d", len(*buf)) + } + if cap(*buf) != 2001 { + t.Fatalf("unexepected buf cap: %d", cap(*buf)) + } + pool.Put(buf) +} + +func TestPoolWeirdMaxSize(t *testing.T) { + maxSize := 15000 + pool := New(1024, maxSize) + if pool.maxSize != maxSize { + t.Fatalf("Invalid max pool size: %d, expected %d", pool.maxSize, maxSize) + } + + buf := pool.Get(14000) + if len(*buf) != 14000 { + t.Fatalf("unexpected buf length: %d", len(*buf)) + } + if cap(*buf) != 15000 { + t.Fatalf("unexepected buf cap: %d", cap(*buf)) + } + pool.Put(buf) + + buf = pool.Get(16383) + if len(*buf) != 16383 { + t.Fatalf("unexpected buf length: %d", len(*buf)) + } + if cap(*buf) != 16383 { + t.Fatalf("unexepected buf cap: %d", cap(*buf)) + } + pool.Put(buf) +} + +func TestFuzz(t *testing.T) { + maxTestSize := 16384 + for i := 0; i < 20000; i++ { + minSize := rand.Intn(maxTestSize) + maxSize := rand.Intn(maxTestSize-minSize) + minSize + p := New(minSize, maxSize) + bufSize := rand.Intn(maxTestSize) + buf := p.Get(bufSize) + if len(*buf) != bufSize { + t.Fatalf("Invalid length %d, expected %d", len(*buf), bufSize) + } + sPool := p.findPool(bufSize) + if sPool == nil { + if cap(*buf) != len(*buf) { + t.Fatalf("Invalid cap %d, expected %d", cap(*buf), len(*buf)) + } + } else { + if cap(*buf) != sPool.size { + t.Fatalf("Invalid cap %d, expected %d", cap(*buf), sPool.size) + } + } + p.Put(buf) + } +} + +func BenchmarkPool(b *testing.B) { + pool := New(2, 16384) + b.SetParallelism(16) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + randomSize := rand.Intn(pool.maxSize) + data := pool.Get(randomSize) + pool.Put(data) + } + }) +} + +func BenchmarkPoolGet(b *testing.B) { + pool := New(2, 16384) + b.SetParallelism(16) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + randomSize := rand.Intn(pool.maxSize) + data := pool.Get(randomSize) + _ = data + } + }) +} diff --git a/go/mysql/client.go b/go/mysql/client.go index 119640a8237..b45da1922d6 100644 --- a/go/mysql/client.go +++ b/go/mysql/client.go @@ -250,8 +250,7 @@ func (c *Conn) clientHandshake(characterSet uint8, params *ConnParams) error { // Switch to SSL. conn := tls.Client(c.conn, clientConfig) c.conn = conn - c.reader.Reset(conn) - c.writer.Reset(conn) + c.bufferedReader.Reset(conn) c.Capabilities |= CapabilityClientSSL } @@ -508,7 +507,7 @@ func (c *Conn) writeSSLRequest(capabilities uint32, characterSet uint8, params * pos = writeByte(data, pos, characterSet) // And send it as is. - if err := c.writeEphemeralPacket(true /* direct */); err != nil { + if err := c.writeEphemeralPacket(); err != nil { return NewSQLError(CRServerLost, SSUnknownSQLState, "cannot send SSLRequest: %v", err) } return nil @@ -600,7 +599,7 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, scrambledPassword [ return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "writeHandshakeResponse41: only packed %v bytes, out of %v allocated", pos, len(data)) } - if err := c.writeEphemeralPacket(true /* direct */); err != nil { + if err := c.writeEphemeralPacket(); err != nil { return NewSQLError(CRServerLost, SSUnknownSQLState, "cannot send HandshakeResponse41: %v", err) } return nil @@ -627,5 +626,5 @@ func (c *Conn) writeClearTextPassword(params *ConnParams) error { if pos != len(data) { return fmt.Errorf("error building ClearTextPassword packet: got %v bytes expected %v", pos, len(data)) } - return c.writeEphemeralPacket(true) + return c.writeEphemeralPacket() } diff --git a/go/mysql/conn.go b/go/mysql/conn.go index 522b249eb38..2f374fbbc10 100644 --- a/go/mysql/conn.go +++ b/go/mysql/conn.go @@ -24,6 +24,7 @@ import ( "strings" "sync" + "vitess.io/vitess/go/bucketpool" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -40,25 +41,17 @@ const ( // read or write a packet while one is already used. ephemeralUnused = iota - // ephemeralWriteGlobalBuffer means conn.buffer was used to write - // a packet. The first four bytes contain size and sequence. - ephemeralWriteGlobalBuffer - // ephemeralWriteSingleBuffer means a single buffer was // allocated to write a packet. It is in - // c.currentEphemeralPacket. The first four bytes contain size + // c.currentEphemeralWriteBuffer. The first four bytes contain size // and sequence. ephemeralWriteSingleBuffer // ephemeralWriteBigBuffer means a big buffer was allocated to // write a packet, and will need to be split when sending. - // The allocated buffer is in c.currentEphemeralPacket. + // The allocated buffer is in c.currentEphemeralWriteBuffer. ephemeralWriteBigBuffer - // ephemeralReadGlobalBuffer means conn.buffer was used for reading - // an ephemeral packet. - ephemeralReadGlobalBuffer - // ephemeralReadSingleBuffer means we are using a pool of buffers // for reading. ephemeralReadSingleBuffer @@ -144,9 +137,9 @@ type Conn struct { ClientData interface{} // Packet encoding variables. - reader *bufio.Reader - writer *bufio.Writer - sequence uint8 + bufferedReader *bufio.Reader + bufferedWriter *bufio.Writer + sequence uint8 // fields contains the fields definitions for an on-going // streaming query. It is set by ExecuteStreamFetch, and @@ -155,27 +148,6 @@ type Conn struct { // fields, this is set to an empty array (but not nil). fields []*querypb.Field - // Internal buffer for zero-allocation reads and writes. This - // uses the fact that both sides of a connection either read - // packets, or write packets, but never do both, and both - // sides know who is expected to read or write a packet next. - // - // Reading side: if the next expected packet will most likely be - // small, and we don't need to hand on to the memory after reading - // the packet, use readEphemeralPacket instead of readPacket. - // If the packet is too big, it will revert to the usual read. - // But if the packet is smaller than connBufferSize, this buffer - // will be used instead. - // - // Writing side: if the next packet to write is smaller than - // connBufferSize-4, this buffer can be used to create a - // packet. It will contain both the size and sequence header, - // and the contents of the packet. - // Call startEphemeralPacket(length) to get a buffer. If length - // is smaller or equal than connBufferSize-4, this buffer will be used. - // Otherwise memory will be allocated for it. - buffer []byte - // Keep track of how and of the buffer we allocated for an // ephemeral packet on the read and write sides. // These fields are used by: @@ -183,91 +155,70 @@ type Conn struct { // - readEphemeralPacket / recycleReadPacket methods for reads. currentEphemeralPolicy int // TODO (danieltahara): Ultimately get rid of this delineation. - currentEphemeralPacket []byte - currentEphemeralBuffer *[]byte + // currentEphemeralWriteBuffer and currentEphemeralReadBuffer used for tracking + // allocated temporary buffers for writes and reads respectively. + currentEphemeralWriteBuffer *[]byte + currentEphemeralReadBuffer *[]byte } // bufPool is used to allocate and free buffers in an efficient way. -var bufPool = sync.Pool{} +var bufPool = bucketpool.New(connBufferSize, MaxPacketSize) + +// writersPool is used for pooling bufio.Writer objects. +var writersPool = sync.Pool{New: func() interface{} { return bufio.NewWriterSize(nil, connBufferSize) }} // newConn is an internal method to create a Conn. Used by client and server // side for common creation code. func newConn(conn net.Conn) *Conn { return &Conn{ - conn: conn, - - reader: bufio.NewReaderSize(conn, connBufferSize), - writer: bufio.NewWriterSize(conn, connBufferSize), - sequence: 0, - buffer: make([]byte, connBufferSize), + conn: conn, + bufferedReader: bufio.NewReaderSize(conn, connBufferSize), + sequence: 0, } } -// readPacketDirect attempts to read a packet from the socket directly. -// It needs to be used for the first handshake packet the server receives, -// so we do't buffer the SSL negotiation packet. As a shortcut, only -// packets smaller than MaxPacketSize can be read here. -func (c *Conn) readPacketDirect() ([]byte, error) { - var header [4]byte - if _, err := io.ReadFull(c.conn, header[:]); err != nil { - // Propagate as is so server can ignore this kind of error - // Same as readEphemeralPacket() - if err == io.EOF { - return nil, err - } - // Treat connection reset by peer as io.EOF, otherwise is too spammy. - if strings.HasSuffix(err.Error(), "read: connection reset by peer") { - return nil, io.EOF - } - return nil, fmt.Errorf("io.ReadFull(header size) failed: %v", err) - } +// startWriterBuffering starts using buffered writes. This should +// be terminated by a call to flush. +func (c *Conn) startWriterBuffering() { + c.bufferedWriter = writersPool.Get().(*bufio.Writer) + c.bufferedWriter.Reset(c.conn) +} - sequence := uint8(header[3]) - if sequence != c.sequence { - return nil, fmt.Errorf("invalid sequence, expected %v got %v", c.sequence, sequence) +// flush flushes the written data to the socket. +// This must be called to terminate startBuffering. +func (c *Conn) flush() error { + if c.bufferedWriter == nil { + return nil } - c.sequence++ + defer func() { + c.bufferedWriter.Reset(nil) + writersPool.Put(c.bufferedWriter) + c.bufferedWriter = nil + }() - length := int(uint32(header[0]) | uint32(header[1])<<8 | uint32(header[2])<<16) - if length <= cap(c.buffer) { - // Fast path: read into buffer, we're good. - c.buffer = c.buffer[:length] - if _, err := io.ReadFull(c.conn, c.buffer); err != nil { - return nil, fmt.Errorf("io.ReadFull(direct packet body of length %v) failed: %v", length, err) - } - return c.buffer, nil - } - - // Sanity check - if length == MaxPacketSize { - return nil, fmt.Errorf("readPacketDirect doesn't support more than one packet") - } + return c.bufferedWriter.Flush() +} - // Slow path, revert to allocating. - data := make([]byte, length) - if _, err := io.ReadFull(c.conn, data); err != nil { - return nil, fmt.Errorf("io.ReadFull(packet body of length %v) failed: %v", length, err) +// getWriter returns the current writer. It may be either +// the original connection or a wrapper. +func (c *Conn) getWriter() io.Writer { + if c.bufferedWriter != nil { + return c.bufferedWriter } - return data, nil + return c.conn } -// readEphemeralPacket attempts to read a packet into c.buffer. Do -// not use this method if the contents of the packet needs to be kept -// after the next readEphemeralPacket. If the packet is bigger than -// connBufferSize, we revert to using the same behavior as a regular -// readPacket. recycleReadPacket() has to be called after this method -// is used, and before we read or write any other packet on the connection. -// -// Note if the connection is closed already, an error will be -// returned, and it may not be io.EOF. If the connection closes while -// we are stuck waiting for data, an error will also be returned, and -// it most likely will be io.EOF. -func (c *Conn) readEphemeralPacket() ([]byte, error) { +func (c *Conn) readEphemeralPacketHelper(direct bool) ([]byte, error) { if c.currentEphemeralPolicy != ephemeralUnused { panic(fmt.Errorf("readEphemeralPacket: unexpected currentEphemeralPolicy: %v", c.currentEphemeralPolicy)) } + var r io.Reader = c.bufferedReader + if direct { + r = c.conn + } + // Note io.ReadFull will return two different types of errors: // 1. if the socket is already closed, and the go runtime knows it, // then ReadFull will return an error (different than EOF), @@ -275,7 +226,7 @@ func (c *Conn) readEphemeralPacket() ([]byte, error) { // 2. if the socket is not closed while we start the read, // but gets closed after the read is started, we'll get io.EOF. var header [4]byte - if _, err := io.ReadFull(c.reader, header[:]); err != nil { + if _, err := io.ReadFull(r, header[:]); err != nil { // The special casing of propagating io.EOF up // is used by the server side only, to suppress an error // message if a client just disconnects. @@ -301,43 +252,19 @@ func (c *Conn) readEphemeralPacket() ([]byte, error) { // exactly size MaxPacketSize. return nil, nil } - if length <= cap(c.buffer) { - // Fast path: read into buffer, we're good. - c.currentEphemeralPolicy = ephemeralReadGlobalBuffer - c.buffer = c.buffer[:length] - if _, err := io.ReadFull(c.reader, c.buffer); err != nil { - return nil, fmt.Errorf("io.ReadFull(packet body of length %v) failed: %v", length, err) - } - return c.buffer, nil - } - // Slightly slower path: single packet. Use the bufPool. + // Use the bufPool. if length < MaxPacketSize { c.currentEphemeralPolicy = ephemeralReadSingleBuffer - i := bufPool.Get() - if i == nil { - // We couldn't get an array from the pool, allocate one. - data := make([]byte, length) - c.currentEphemeralBuffer = &data - } else { - // We got an array from the pool, see if it's - // big enough. - data := i.(*[]byte) - if cap(*data) >= length { - // big enough, just use it. - *data = (*data)[:length] - c.currentEphemeralBuffer = data - } else { - // not big enough: allocate a new one, discard - // the smaller buffer. - data := make([]byte, length) - c.currentEphemeralBuffer = &data - } - } - if _, err := io.ReadFull(c.reader, *c.currentEphemeralBuffer); err != nil { + c.currentEphemeralReadBuffer = bufPool.Get(length) + if _, err := io.ReadFull(r, *c.currentEphemeralReadBuffer); err != nil { return nil, fmt.Errorf("io.ReadFull(packet body of length %v) failed: %v", length, err) } - return *c.currentEphemeralBuffer, nil + return *c.currentEphemeralReadBuffer, nil + } + + if direct { + return nil, fmt.Errorf("readEphemeralPacketDirect doesn't support more than one packet") } // Much slower path, revert to allocating everything from scratch. @@ -345,7 +272,7 @@ func (c *Conn) readEphemeralPacket() ([]byte, error) { // optimize this code path easily. c.currentEphemeralPolicy = ephemeralReadBigBuffer data := make([]byte, length) - if _, err := io.ReadFull(c.reader, data); err != nil { + if _, err := io.ReadFull(c.bufferedReader, data); err != nil { return nil, fmt.Errorf("io.ReadFull(packet body of length %v) failed: %v", length, err) } for { @@ -368,21 +295,39 @@ func (c *Conn) readEphemeralPacket() ([]byte, error) { return data, nil } +// readEphemeralPacketDirect attempts to read a packet from the socket directly. +// It needs to be used for the first handshake packet the server receives, +// so we do't buffer the SSL negotiation packet. As a shortcut, only +// packets smaller than MaxPacketSize can be read here. +func (c *Conn) readEphemeralPacketDirect() ([]byte, error) { + return c.readEphemeralPacketHelper(true) +} + +// readEphemeralPacket attempts to read a packet into buffer from sync.Pool. Do +// not use this method if the contents of the packet needs to be kept +// after the next readEphemeralPacket. +// +// Note if the connection is closed already, an error will be +// returned, and it may not be io.EOF. If the connection closes while +// we are stuck waiting for data, an error will also be returned, and +// it most likely will be io.EOF. +func (c *Conn) readEphemeralPacket() ([]byte, error) { + return c.readEphemeralPacketHelper(false) +} + // recycleReadPacket recycles the read packet. It needs to be called // after readEphemeralPacket was called. func (c *Conn) recycleReadPacket() { switch c.currentEphemeralPolicy { - case ephemeralReadGlobalBuffer: - // We used small built-in buffer, nothing to do. case ephemeralReadSingleBuffer: // We are using the pool, put the buffer back in. - bufPool.Put(c.currentEphemeralBuffer) - c.currentEphemeralBuffer = nil + bufPool.Put(c.currentEphemeralReadBuffer) + c.currentEphemeralReadBuffer = nil case ephemeralReadBigBuffer: // We allocated a one-time buffer we can't re-use. // Nothing to do. Nil out for safety. - c.currentEphemeralBuffer = nil - case ephemeralUnused, ephemeralWriteGlobalBuffer, ephemeralWriteSingleBuffer, ephemeralWriteBigBuffer: + c.currentEphemeralReadBuffer = nil + case ephemeralUnused, ephemeralWriteSingleBuffer, ephemeralWriteBigBuffer: // Programming error. panic(fmt.Errorf("trying to call recycleReadPacket while currentEphemeralPolicy is %d", c.currentEphemeralPolicy)) } @@ -393,7 +338,7 @@ func (c *Conn) recycleReadPacket() { func (c *Conn) readOnePacket() ([]byte, error) { var header [4]byte - if _, err := io.ReadFull(c.reader, header[:]); err != nil { + if _, err := io.ReadFull(c.bufferedReader, header[:]); err != nil { return nil, fmt.Errorf("io.ReadFull(header size) failed: %v", err) } @@ -412,7 +357,7 @@ func (c *Conn) readOnePacket() ([]byte, error) { } data := make([]byte, length) - if _, err := io.ReadFull(c.reader, data); err != nil { + if _, err := io.ReadFull(c.bufferedReader, data); err != nil { return nil, fmt.Errorf("io.ReadFull(packet body of length %v) failed: %v", length, err) } return data, nil @@ -476,6 +421,8 @@ func (c *Conn) writePacket(data []byte) error { index := 0 length := len(data) + w := c.getWriter() + for { // Packet length is capped to MaxPacketSize. packetLength := length @@ -489,14 +436,14 @@ func (c *Conn) writePacket(data []byte) error { header[1] = byte(packetLength >> 8) header[2] = byte(packetLength >> 16) header[3] = c.sequence - if n, err := c.writer.Write(header[:]); err != nil { + if n, err := w.Write(header[:]); err != nil { return fmt.Errorf("Write(header) failed: %v", err) } else if n != 4 { return fmt.Errorf("Write(header) returned a short write: %v < 4", n) } // Write the body. - if n, err := c.writer.Write(data[index : index+packetLength]); err != nil { + if n, err := w.Write(data[index : index+packetLength]); err != nil { return fmt.Errorf("Write(packet) failed: %v", err) } else if n != packetLength { return fmt.Errorf("Write(packet) returned a short write: %v < %v", n, packetLength) @@ -514,7 +461,7 @@ func (c *Conn) writePacket(data []byte) error { header[1] = 0 header[2] = 0 header[3] = c.sequence - if n, err := c.writer.Write(header[:]); err != nil { + if n, err := w.Write(header[:]); err != nil { return fmt.Errorf("Write(empty header) failed: %v", err) } else if n != 4 { return fmt.Errorf("Write(empty header) returned a short write: %v < 4", n) @@ -532,76 +479,46 @@ func (c *Conn) startEphemeralPacket(length int) []byte { panic("startEphemeralPacket cannot be used while a packet is already started.") } - // Fast path: we can reuse a single memory buffer for - // both the header and the data. - if length <= cap(c.buffer)-4 { - c.currentEphemeralPolicy = ephemeralWriteGlobalBuffer - c.buffer = c.buffer[:length+4] - c.buffer[0] = byte(length) - c.buffer[1] = byte(length >> 8) - c.buffer[2] = byte(length >> 16) - c.buffer[3] = c.sequence - c.sequence++ - return c.buffer[4:] - } - - // Slower path: we can use a single buffer for both the header and the data, but it has to be allocated. + // get buffer from pool if length < MaxPacketSize { c.currentEphemeralPolicy = ephemeralWriteSingleBuffer - c.currentEphemeralPacket = make([]byte, length+4) - c.currentEphemeralPacket[0] = byte(length) - c.currentEphemeralPacket[1] = byte(length >> 8) - c.currentEphemeralPacket[2] = byte(length >> 16) - c.currentEphemeralPacket[3] = c.sequence + + c.currentEphemeralWriteBuffer = bufPool.Get(length + 4) + (*c.currentEphemeralWriteBuffer)[0] = byte(length) + (*c.currentEphemeralWriteBuffer)[1] = byte(length >> 8) + (*c.currentEphemeralWriteBuffer)[2] = byte(length >> 16) + (*c.currentEphemeralWriteBuffer)[3] = c.sequence c.sequence++ - return c.currentEphemeralPacket[4:] + return (*c.currentEphemeralWriteBuffer)[4:] } // Even slower path: create a full size buffer and return it. c.currentEphemeralPolicy = ephemeralWriteBigBuffer - c.currentEphemeralPacket = make([]byte, length) - return c.currentEphemeralPacket + data := make([]byte, length) + c.currentEphemeralWriteBuffer = &data + return *c.currentEphemeralWriteBuffer } // writeEphemeralPacket writes the packet that was allocated by -// startEphemeralPacket. If 'direct' is set, we write to the -// underlying connection directly, by-passing the write buffer. -func (c *Conn) writeEphemeralPacket(direct bool) error { +// startEphemeralPacket. +func (c *Conn) writeEphemeralPacket() error { defer c.recycleWritePacket() - var w io.Writer = c.writer - if direct { - w = c.conn - } - switch c.currentEphemeralPolicy { - case ephemeralWriteGlobalBuffer: - // Just write c.buffer as a single buffer. - // It has both header and data. - if n, err := w.Write(c.buffer); err != nil { - return fmt.Errorf("Conn %v: Write(c.buffer) failed: %v", c.ID(), err) - } else if n != len(c.buffer) { - return fmt.Errorf("Conn %v: Write(c.buffer) returned a short write: %v < %v", c.ID(), n, len(c.buffer)) - } case ephemeralWriteSingleBuffer: // Write the allocated buffer as a single buffer. // It has both header and data. - if n, err := w.Write(c.currentEphemeralPacket); err != nil { - return fmt.Errorf("Conn %v: Write(c.currentEphemeralPacket) failed: %v", c.ID(), err) - } else if n != len(c.currentEphemeralPacket) { - return fmt.Errorf("Conn %v: Write(c.currentEphemeralPacket) returned a short write: %v < %v", c.ID(), n, len(c.currentEphemeralPacket)) + if n, err := c.getWriter().Write(*c.currentEphemeralWriteBuffer); err != nil { + return fmt.Errorf("Conn %v: Write(*c.currentEphemeralWriteBuffer) failed: %v", c.ID(), err) + } else if n != len(*c.currentEphemeralWriteBuffer) { + return fmt.Errorf("Conn %v: Write(*c.currentEphemeralWriteBuffer) returned a short write: %v < %v", c.ID(), n, len(*c.currentEphemeralWriteBuffer)) } case ephemeralWriteBigBuffer: // This is the slower path for big data. - // With direct=true, the caller expects a flush, so we call it - // manually. - if err := c.writePacket(c.currentEphemeralPacket); err != nil { + if err := c.writePacket(*c.currentEphemeralWriteBuffer); err != nil { return fmt.Errorf("Conn %v: %v", c.ID(), err) } - if direct { - return c.flush() - } - case ephemeralUnused, ephemeralReadGlobalBuffer, ephemeralReadSingleBuffer, ephemeralReadBigBuffer: + case ephemeralUnused, ephemeralReadSingleBuffer, ephemeralReadBigBuffer: // Programming error. panic(fmt.Errorf("Conn %v: trying to call writeEphemeralPacket while currentEphemeralPolicy is %v", c.ID(), c.currentEphemeralPolicy)) } @@ -613,33 +530,22 @@ func (c *Conn) writeEphemeralPacket(direct bool) error { // after writeEphemeralPacket was called. func (c *Conn) recycleWritePacket() { switch c.currentEphemeralPolicy { - case ephemeralWriteGlobalBuffer: - // We used small built-in buffer, nothing to do. case ephemeralWriteSingleBuffer: // Release our reference so the buffer can be gced - c.currentEphemeralPacket = nil + bufPool.Put(c.currentEphemeralWriteBuffer) + c.currentEphemeralWriteBuffer = nil case ephemeralWriteBigBuffer: // We allocated a one-time buffer we can't re-use. - // N.B. Unlike the read packet, we actually assign the big buffer to currentEphemeralBuffer, + // N.B. Unlike the read packet, we actually assign the big buffer to currentEphemeralReadBuffer, // so we should remove our reference to it. - c.currentEphemeralPacket = nil - case ephemeralUnused, ephemeralReadGlobalBuffer, - ephemeralReadSingleBuffer, ephemeralReadBigBuffer: + c.currentEphemeralWriteBuffer = nil + case ephemeralUnused, ephemeralReadSingleBuffer, ephemeralReadBigBuffer: // Programming error. panic(fmt.Errorf("trying to call recycleWritePacket while currentEphemeralPolicy is %d", c.currentEphemeralPolicy)) } c.currentEphemeralPolicy = ephemeralUnused } -// flush flushes the written data to the socket. -// This method returns a generic error, not a SQLError. -func (c *Conn) flush() error { - if err := c.writer.Flush(); err != nil { - return fmt.Errorf("Conn %v: Flush() failed: %v", c.ID(), err) - } - return nil -} - // writeComQuit writes a Quit message for the server, to indicate we // want to close the connection. // Client -> Server. @@ -650,7 +556,7 @@ func (c *Conn) writeComQuit() error { data := c.startEphemeralPacket(1) data[0] = ComQuit - if err := c.writeEphemeralPacket(true); err != nil { + if err := c.writeEphemeralPacket(); err != nil { return NewSQLError(CRServerGone, SSUnknownSQLState, err.Error()) } return nil @@ -689,8 +595,7 @@ func (c *Conn) IsClosed() bool { // Packet writing methods, for generic packets. // -// writeOKPacket writes an OK packet, directly. Do not use this if -// there is already a packet in the buffer. +// writeOKPacket writes an OK packet. // Server -> Client. // This method returns a generic error, not a SQLError. func (c *Conn) writeOKPacket(affectedRows, lastInsertID uint64, flags uint16, warnings uint16) error { @@ -707,7 +612,7 @@ func (c *Conn) writeOKPacket(affectedRows, lastInsertID uint64, flags uint16, wa pos = writeUint16(data, pos, flags) pos = writeUint16(data, pos, warnings) - return c.writeEphemeralPacket(true) + return c.writeEphemeralPacket() } // writeOKPacketWithEOFHeader writes an OK packet with an EOF header. @@ -729,15 +634,10 @@ func (c *Conn) writeOKPacketWithEOFHeader(affectedRows, lastInsertID uint64, fla pos = writeUint16(data, pos, flags) pos = writeUint16(data, pos, warnings) - if err := c.writeEphemeralPacket(false); err != nil { - return err - } - return c.flush() + return c.writeEphemeralPacket() } // writeErrorPacket writes an error packet. -// It writes directly to the socket, so this cannot be called after other -// packets have already been written. // Server -> Client. // This method returns a generic error, not a SQLError. func (c *Conn) writeErrorPacket(errorCode uint16, sqlState string, format string, args ...interface{}) error { @@ -757,7 +657,7 @@ func (c *Conn) writeErrorPacket(errorCode uint16, sqlState string, format string pos = writeEOFString(data, pos, sqlState) pos = writeEOFString(data, pos, errorMessage) - return c.writeEphemeralPacket(true) + return c.writeEphemeralPacket() } // writeErrorPacketFromError writes an error packet, from a regular error. @@ -780,7 +680,7 @@ func (c *Conn) writeEOFPacket(flags uint16, warnings uint16) error { pos = writeUint16(data, pos, warnings) pos = writeUint16(data, pos, flags) - return c.writeEphemeralPacket(false) + return c.writeEphemeralPacket() } // diff --git a/go/mysql/conn_test.go b/go/mysql/conn_test.go index 50b99714d31..3ff4e0889a7 100644 --- a/go/mysql/conn_test.go +++ b/go/mysql/conn_test.go @@ -77,26 +77,22 @@ func useWritePacket(t *testing.T, cConn *Conn, data []byte) { if err := cConn.writePacket(data); err != nil { t.Fatalf("writePacket failed: %v", err) } - if err := cConn.flush(); err != nil { - t.Fatalf("flush failed: %v", err) - } } -func useWriteEphemeralPacket(t *testing.T, cConn *Conn, data []byte) { +func useWriteEphemeralPacketBuffered(t *testing.T, cConn *Conn, data []byte) { defer func() { if x := recover(); x != nil { t.Fatalf("%v", x) } }() + cConn.startWriterBuffering() + defer cConn.flush() buf := cConn.startEphemeralPacket(len(data)) copy(buf, data) - if err := cConn.writeEphemeralPacket(false); err != nil { + if err := cConn.writeEphemeralPacket(); err != nil { t.Fatalf("writeEphemeralPacket(false) failed: %v", err) } - if err := cConn.flush(); err != nil { - t.Fatalf("flush failed: %v", err) - } } func useWriteEphemeralPacketDirect(t *testing.T, cConn *Conn, data []byte) { @@ -108,7 +104,7 @@ func useWriteEphemeralPacketDirect(t *testing.T, cConn *Conn, data []byte) { buf := cConn.startEphemeralPacket(len(data)) copy(buf, data) - if err := cConn.writeEphemeralPacket(true); err != nil { + if err := cConn.writeEphemeralPacket(); err != nil { t.Fatalf("writeEphemeralPacket(true) failed: %v", err) } } @@ -139,22 +135,25 @@ func verifyPacketCommsSpecific(t *testing.T, cConn *Conn, data []byte, func verifyPacketComms(t *testing.T, cConn, sConn *Conn, data []byte) { // All three writes, with ReadPacket. verifyPacketCommsSpecific(t, cConn, data, useWritePacket, sConn.ReadPacket) - verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacket, sConn.ReadPacket) + verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketBuffered, sConn.ReadPacket) verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketDirect, sConn.ReadPacket) // All three writes, with readEphemeralPacket. verifyPacketCommsSpecific(t, cConn, data, useWritePacket, sConn.readEphemeralPacket) sConn.recycleReadPacket() - verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacket, sConn.readEphemeralPacket) + verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketBuffered, sConn.readEphemeralPacket) sConn.recycleReadPacket() verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketDirect, sConn.readEphemeralPacket) sConn.recycleReadPacket() - // All three writes, with readPacketDirect, if size allows it. + // All three writes, with readEphemeralPacketDirect, if size allows it. if len(data) < MaxPacketSize { - verifyPacketCommsSpecific(t, cConn, data, useWritePacket, sConn.readPacketDirect) - verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacket, sConn.readPacketDirect) - verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketDirect, sConn.readPacketDirect) + verifyPacketCommsSpecific(t, cConn, data, useWritePacket, sConn.readEphemeralPacketDirect) + sConn.recycleReadPacket() + verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketBuffered, sConn.readEphemeralPacketDirect) + sConn.recycleReadPacket() + verifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketDirect, sConn.readEphemeralPacketDirect) + sConn.recycleReadPacket() } } @@ -254,9 +253,6 @@ func TestBasicPackets(t *testing.T) { if err := sConn.writeEOFPacket(0x8912, 0xabba); err != nil { t.Fatalf("writeEOFPacket failed: %v", err) } - if err := sConn.flush(); err != nil { - t.Fatalf("flush failed: %v", err) - } data, err = cConn.ReadPacket() if err != nil || len(data) == 0 || !isEOFPacket(data) { t.Fatalf("cConn.ReadPacket - EOFPacket failed: %v %v", data, err) diff --git a/go/mysql/query.go b/go/mysql/query.go index b9dd2aaeddb..067f01b4d96 100644 --- a/go/mysql/query.go +++ b/go/mysql/query.go @@ -40,7 +40,7 @@ func (c *Conn) WriteComQuery(query string) error { data := c.startEphemeralPacket(len(query) + 1) data[0] = ComQuery copy(data[1:], query) - if err := c.writeEphemeralPacket(true); err != nil { + if err := c.writeEphemeralPacket(); err != nil { return NewSQLError(CRServerGone, SSUnknownSQLState, err.Error()) } return nil @@ -53,7 +53,7 @@ func (c *Conn) writeComInitDB(db string) error { data := c.startEphemeralPacket(len(db) + 1) data[0] = ComInitDB copy(data[1:], db) - if err := c.writeEphemeralPacket(true); err != nil { + if err := c.writeEphemeralPacket(); err != nil { return NewSQLError(CRServerGone, SSUnknownSQLState, err.Error()) } return nil @@ -65,7 +65,7 @@ func (c *Conn) writeComSetOption(operation uint16) error { data := c.startEphemeralPacket(16 + 1) data[0] = ComSetOption writeUint16(data, 1, operation) - if err := c.writeEphemeralPacket(true); err != nil { + if err := c.writeEphemeralPacket(); err != nil { return NewSQLError(CRServerGone, SSUnknownSQLState, err.Error()) } return nil @@ -481,7 +481,7 @@ func (c *Conn) sendColumnCount(count uint64) error { length := lenEncIntSize(count) data := c.startEphemeralPacket(length) writeLenEncInt(data, 0, count) - return c.writeEphemeralPacket(false) + return c.writeEphemeralPacket() } func (c *Conn) writeColumnDefinition(field *querypb.Field) error { @@ -528,7 +528,7 @@ func (c *Conn) writeColumnDefinition(field *querypb.Field) error { return fmt.Errorf("internal error: packing of column definition used %v bytes instead of %v", pos, len(data)) } - return c.writeEphemeralPacket(false) + return c.writeEphemeralPacket() } func (c *Conn) writeRow(row []sqltypes.Value) error { @@ -558,7 +558,7 @@ func (c *Conn) writeRow(row []sqltypes.Value) error { return fmt.Errorf("internal error packet row: got %v bytes but expected %v", pos, length) } - return c.writeEphemeralPacket(false) + return c.writeEphemeralPacket() } // writeFields writes the fields of a Result. It should be called only @@ -609,9 +609,6 @@ func (c *Conn) writeEndResult(more bool) error { if err := c.writeEOFPacket(flag, 0); err != nil { return err } - if err := c.flush(); err != nil { - return err - } } else { // This will flush too. if err := c.writeOKPacketWithEOFHeader(0, 0, flag, 0); err != nil { diff --git a/go/mysql/query_benchmark_test.go b/go/mysql/query_benchmark_test.go index 628338a3a47..3031c8eebd6 100644 --- a/go/mysql/query_benchmark_test.go +++ b/go/mysql/query_benchmark_test.go @@ -17,6 +17,7 @@ limitations under the License. package mysql import ( + "math/rand" "net" "strings" "testing" @@ -24,6 +25,8 @@ import ( "golang.org/x/net/context" ) +const benchmarkQueryPrefix = "benchmark " + func benchmarkQuery(b *testing.B, threads int, query string) { th := &testHandler{} @@ -40,7 +43,9 @@ func benchmarkQuery(b *testing.B, threads int, query string) { }() b.SetParallelism(threads) - b.SetBytes(int64(len(query))) + if query != "" { + b.SetBytes(int64(len(query))) + } host := l.Addr().(*net.TCPAddr).IP.String() port := l.Addr().(*net.TCPAddr).Port @@ -65,7 +70,14 @@ func benchmarkQuery(b *testing.B, threads int, query string) { }() for pb.Next() { - if _, err := conn.ExecuteFetch(query, 1000, true); err != nil { + execQuery := query + if execQuery == "" { + // generate random query + n := rand.Intn(MaxPacketSize-len(benchmarkQueryPrefix)) + 1 + execQuery = benchmarkQueryPrefix + strings.Repeat("x", n) + + } + if _, err := conn.ExecuteFetch(execQuery, 1000, true); err != nil { b.Fatalf("ExecuteFetch failed: %v", err) } } @@ -78,9 +90,13 @@ func benchmarkQuery(b *testing.B, threads int, query string) { // executes M queries on them, then closes them. // It is meant as a somewhat real load test. func BenchmarkParallelShortQueries(b *testing.B) { - benchmarkQuery(b, 10, "select rows") + benchmarkQuery(b, 10, benchmarkQueryPrefix+"select rows") } func BenchmarkParallelMediumQueries(b *testing.B) { - benchmarkQuery(b, 10, "select"+strings.Repeat("x", connBufferSize)) + benchmarkQuery(b, 10, benchmarkQueryPrefix+"select"+strings.Repeat("x", connBufferSize)) +} + +func BenchmarkParallelRandomQueries(b *testing.B) { + benchmarkQuery(b, 10, "") } diff --git a/go/mysql/replication.go b/go/mysql/replication.go index 3797728dbe7..e59db547d6a 100644 --- a/go/mysql/replication.go +++ b/go/mysql/replication.go @@ -34,7 +34,7 @@ func (c *Conn) WriteComBinlogDump(serverID uint32, binlogFilename string, binlog pos = writeUint16(data, pos, flags) pos = writeUint32(data, pos, serverID) pos = writeEOFString(data, pos, binlogFilename) - if err := c.writeEphemeralPacket(true); err != nil { + if err := c.writeEphemeralPacket(); err != nil { return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) } return nil @@ -62,7 +62,7 @@ func (c *Conn) WriteComBinlogDumpGTID(serverID uint32, binlogFilename string, bi pos = writeUint64(data, pos, binlogPos) pos = writeUint32(data, pos, uint32(len(gtidSet))) pos += copy(data[pos:], gtidSet) - if err := c.writeEphemeralPacket(true); err != nil { + if err := c.writeEphemeralPacket(); err != nil { return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) } return nil diff --git a/go/mysql/server.go b/go/mysql/server.go index 6b634b9c8cc..bbe6c9ac5d4 100644 --- a/go/mysql/server.go +++ b/go/mysql/server.go @@ -185,6 +185,10 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti if x := recover(); x != nil { log.Errorf("mysql_server caught panic:\n%v\n%s", x, tb.Stack(4)) } + // We call flush here in case there's a premature return after + // startWriterBuffering is called + c.flush() + conn.Close() }() @@ -204,7 +208,7 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti // Wait for the client response. This has to be a direct read, // so we don't buffer the TLS negotiation packets. - response, err := c.readPacketDirect() + response, err := c.readEphemeralPacketDirect() if err != nil { // Don't log EOF errors. They cause too much spam, same as main read loop. if err != io.EOF { @@ -218,6 +222,8 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti return } + c.recycleReadPacket() + if c.Capabilities&CapabilityClientSSL > 0 { // SSL was enabled. We need to re-read the auth packet. response, err = c.readEphemeralPacket() @@ -341,6 +347,11 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti return } case ComQuery: + // flush is called at the end of this block. + // We cannot encapsulate it with a defer inside a func because + // we have to return from this func if it fails. + c.startWriterBuffering() + queryStart := time.Now() query := c.parseComQuery(data) c.recycleReadPacket() @@ -380,26 +391,30 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti log.Errorf("Error writing query error to %s: %v", c, werr) return } - continue - } - - if err != nil { - // We can't send an error in the middle of a stream. - // All we can do is abort the send, which will cause a 2013. - log.Errorf("Error in the middle of a stream to %s: %v", c, err) - return - } - - // Send the end packet only sendFinished is false (results were streamed). - if !sendFinished { - if err := c.writeEndResult(false); err != nil { - log.Errorf("Error writing result to %s: %v", c, err) + } else { + if err != nil { + // We can't send an error in the middle of a stream. + // All we can do is abort the send, which will cause a 2013. + log.Errorf("Error in the middle of a stream to %s: %v", c, err) return } + + // Send the end packet only sendFinished is false (results were streamed). + if !sendFinished { + if err := c.writeEndResult(false); err != nil { + log.Errorf("Error writing result to %s: %v", c, err) + return + } + } } timings.Record(queryTimingKey, queryStart) + if err := c.flush(); err != nil { + log.Errorf("Conn %v: Flush() failed: %v", c.ID(), err) + return + } + case ComPing: // No payload to that one, just return OKPacket. c.recycleReadPacket() @@ -439,7 +454,6 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti log.Errorf("Error writing error packet to %s: %s", c, err) return } - } } } @@ -537,7 +551,7 @@ func (c *Conn) writeHandshakeV10(serverVersion string, authServer AuthServer, en return nil, fmt.Errorf("error building Handshake packet: got %v bytes expected %v", pos, len(data)) } - if err := c.writeEphemeralPacket(true); err != nil { + if err := c.writeEphemeralPacket(); err != nil { return nil, err } @@ -593,8 +607,7 @@ func (l *Listener) parseClientHandshakePacket(c *Conn, firstTime bool, data []by // Need to switch to TLS, and then re-read the packet. conn := tls.Server(c.conn, l.TLSConfig) c.conn = conn - c.reader.Reset(conn) - c.writer.Reset(conn) + c.bufferedReader.Reset(conn) c.Capabilities |= CapabilityClientSSL return "", "", nil, nil } @@ -689,5 +702,5 @@ func (c *Conn) writeAuthSwitchRequest(pluginName string, pluginData []byte) erro if pos != len(data) { return fmt.Errorf("error building AuthSwitchRequestPacket packet: got %v bytes expected %v", pos, len(data)) } - return c.writeEphemeralPacket(true) + return c.writeEphemeralPacket() } diff --git a/go/mysql/server_test.go b/go/mysql/server_test.go index ee51eeeea2a..bdfd8bc390e 100644 --- a/go/mysql/server_test.go +++ b/go/mysql/server_test.go @@ -143,6 +143,22 @@ func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.R }, }) default: + if strings.HasPrefix(query, benchmarkQueryPrefix) { + callback(&sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "result", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(query)), + }, + }, + }) + } + callback(&sqltypes.Result{}) } return nil diff --git a/go/sqltypes/arithmetic.go b/go/sqltypes/arithmetic.go index b91cdf9a489..9b02ec56048 100644 --- a/go/sqltypes/arithmetic.go +++ b/go/sqltypes/arithmetic.go @@ -248,7 +248,7 @@ func ToNative(v Value) (interface{}, error) { return ToUint64(v) case v.IsFloat(): return ToFloat64(v) - case v.IsQuoted() || v.Type() == Decimal: + case v.IsQuoted() || v.Type() == Bit || v.Type() == Decimal: out = v.val case v.Type() == Expression: err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v cannot be converted to a go type", v) diff --git a/go/sqltypes/type.go b/go/sqltypes/type.go index f2032da33bb..cf1bed67a42 100644 --- a/go/sqltypes/type.go +++ b/go/sqltypes/type.go @@ -66,7 +66,7 @@ func IsFloat(t querypb.Type) bool { // IsQuoted returns true if querypb.Type is a quoted text or binary. // If you have a Value object, use its member function. func IsQuoted(t querypb.Type) bool { - return int(t)&flagIsQuoted == flagIsQuoted + return (int(t)&flagIsQuoted == flagIsQuoted) && t != Bit } // IsText returns true if querypb.Type is a text. @@ -95,7 +95,7 @@ func isNumber(t querypb.Type) bool { // instead. // The following conditions are non-overlapping // and cover all types: IsSigned(), IsUnsigned(), -// IsFloat(), IsQuoted(), Null, Decimal, Expression. +// IsFloat(), IsQuoted(), Null, Decimal, Expression, Bit // Also, IsIntegral() == (IsSigned()||IsUnsigned()). // TestCategory needs to be updated accordingly if // you add a new type. diff --git a/go/sqltypes/type_test.go b/go/sqltypes/type_test.go index 74f6cbc1a88..08aed75c81b 100644 --- a/go/sqltypes/type_test.go +++ b/go/sqltypes/type_test.go @@ -192,7 +192,7 @@ func TestCategory(t *testing.T) { } matched = true } - if typ == Null || typ == Decimal || typ == Expression { + if typ == Null || typ == Decimal || typ == Expression || typ == Bit { if matched { t.Errorf("%v matched more than one category", typ) } diff --git a/go/sqltypes/value.go b/go/sqltypes/value.go index 6e64263905b..b788700be81 100644 --- a/go/sqltypes/value.go +++ b/go/sqltypes/value.go @@ -74,7 +74,7 @@ func NewValue(typ querypb.Type, val []byte) (v Value, err error) { return NULL, err } return MakeTrusted(typ, val), nil - case IsQuoted(typ) || typ == Null: + case IsQuoted(typ) || typ == Bit || typ == Null: return MakeTrusted(typ, val), nil } // All other types are unsafe or invalid. @@ -205,7 +205,7 @@ func (v Value) String() string { if v.typ == Null { return "NULL" } - if v.IsQuoted() { + if v.IsQuoted() || v.typ == Bit { return fmt.Sprintf("%v(%q)", v.typ, v.val) } return fmt.Sprintf("%v(%s)", v.typ, v.val) @@ -218,6 +218,8 @@ func (v Value) EncodeSQL(b BinWriter) { b.Write(nullstr) case v.IsQuoted(): encodeBytesSQL(v.val, b) + case v.typ == Bit: + encodeBytesSQLBits(v.val, b) default: b.Write(v.val) } @@ -228,7 +230,7 @@ func (v Value) EncodeASCII(b BinWriter) { switch { case v.typ == Null: b.Write(nullstr) - case v.IsQuoted(): + case v.IsQuoted() || v.typ == Bit: encodeBytesASCII(v.val, b) default: b.Write(v.val) @@ -279,7 +281,7 @@ func (v Value) IsBinary() bool { // It's not a complete implementation. func (v Value) MarshalJSON() ([]byte, error) { switch { - case v.IsQuoted(): + case v.IsQuoted() || v.typ == Bit: return json.Marshal(v.ToString()) case v.typ == Null: return nullstr, nil @@ -333,6 +335,14 @@ func encodeBytesSQL(val []byte, b BinWriter) { b.Write(buf.Bytes()) } +func encodeBytesSQLBits(val []byte, b BinWriter) { + fmt.Fprint(b, "b'") + for _, ch := range val { + fmt.Fprintf(b, "%08b", ch) + } + fmt.Fprint(b, "'") +} + func encodeBytesASCII(val []byte, b BinWriter) { buf := &bytes2.Buffer{} buf.WriteByte('\'') diff --git a/go/sqltypes/value_test.go b/go/sqltypes/value_test.go index 73df210c50b..2a76058bb33 100644 --- a/go/sqltypes/value_test.go +++ b/go/sqltypes/value_test.go @@ -257,7 +257,7 @@ func TestIntegralValue(t *testing.T) { } } -func TestInerfaceValue(t *testing.T) { +func TestInterfaceValue(t *testing.T) { testcases := []struct { in interface{} out Value @@ -382,6 +382,10 @@ func TestEncode(t *testing.T) { in: TestValue(VarChar, "\x00'\"\b\n\r\t\x1A\\"), outSQL: "'\\0\\'\\\"\\b\\n\\r\\t\\Z\\\\'", outASCII: "'ACciCAoNCRpc'", + }, { + in: TestValue(Bit, "a"), + outSQL: "b'01100001'", + outASCII: "'YQ=='", }} for _, tcase := range testcases { buf := &bytes.Buffer{} diff --git a/go/vt/mysqlctl/reparent.go b/go/vt/mysqlctl/reparent.go index 9c228f856bf..803838a4d05 100644 --- a/go/vt/mysqlctl/reparent.go +++ b/go/vt/mysqlctl/reparent.go @@ -117,6 +117,12 @@ func (mysqld *Mysqld) PromoteSlave(hookExtraEnv map[string]string) (mysql.Positi cmds := []string{ conn.StopSlaveCommand(), "RESET SLAVE ALL", // "ALL" makes it forget master host:port. + // When using semi-sync and GTID, a replica first connects to the new master with a given GTID set, + // it can take a long time to scan the current binlog file to find the corresponding position. + // This can cause commits that occur soon after the master is promoted to take a long time waiting + // for a semi-sync ACK, since replication is not fully set up. + // More details in: https://github.com/vitessio/vitess/issues/4161 + "FLUSH BINARY LOGS", } if err := mysqld.executeSuperQueryListConn(ctx, conn, cmds); err != nil { diff --git a/go/vt/servenv/pid_file.go b/go/vt/servenv/pid_file.go index ec06a4a0a95..09224fd63c0 100644 --- a/go/vt/servenv/pid_file.go +++ b/go/vt/servenv/pid_file.go @@ -27,17 +27,20 @@ import ( var pidFile = flag.String("pid_file", "", "If set, the process will write its pid to the named file, and delete it on graceful shutdown.") func init() { + pidFileCreated := false + // Create pid file after flags are parsed. onInit(func() { if *pidFile == "" { return } - file, err := os.Create(*pidFile) + file, err := os.OpenFile(*pidFile, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) if err != nil { log.Errorf("Unable to create pid file '%s': %v", *pidFile, err) return } + pidFileCreated = true fmt.Fprintln(file, os.Getpid()) file.Close() }) @@ -47,6 +50,9 @@ func init() { if *pidFile == "" { return } + if !pidFileCreated { + return + } if err := os.Remove(*pidFile); err != nil { log.Errorf("Unable to remove pid file '%s': %v", *pidFile, err) diff --git a/go/vt/sqlparser/analyzer_test.go b/go/vt/sqlparser/analyzer_test.go index ee798c7ecce..68e24d75ca5 100644 --- a/go/vt/sqlparser/analyzer_test.go +++ b/go/vt/sqlparser/analyzer_test.go @@ -265,6 +265,12 @@ func TestNewPlanValue(t *testing.T) { Val: []byte("strval"), }, out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("strval")}, + }, { + in: &SQLVal{ + Type: BitVal, + Val: []byte("01100001"), + }, + err: "expression is too complex", }, { in: &SQLVal{ Type: HexVal, @@ -323,14 +329,14 @@ func TestNewPlanValue(t *testing.T) { }} for _, tc := range tcases { got, err := NewPlanValue(tc.in) - if err != nil { + if tc.err != "" { if !strings.Contains(err.Error(), tc.err) { t.Errorf("NewPlanValue(%s) error: %v, want '%s'", String(tc.in), err, tc.err) } continue } - if tc.err != "" { - t.Errorf("NewPlanValue(%s) error: nil, want '%s'", String(tc.in), tc.err) + if err != nil { + t.Error(err) continue } if !reflect.DeepEqual(got, tc.out) { diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go index 7d987a52145..1cf53209db8 100644 --- a/go/vt/sqlparser/ast.go +++ b/go/vt/sqlparser/ast.go @@ -24,6 +24,7 @@ import ( "fmt" "io" "strings" + "sync" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" @@ -33,6 +34,41 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) +// parserPool is a pool for parser objects. +var parserPool = sync.Pool{} + +// zeroParser is a zero-initialized parser to help reinitialize the parser for pooling. +var zeroParser = *(yyNewParser().(*yyParserImpl)) + +// yyParsePooled is a wrapper around yyParse that pools the parser objects. There isn't a +// particularly good reason to use yyParse directly, since it immediately discards its parser. What +// would be ideal down the line is to actually pool the stacks themselves rather than the parser +// objects, as per https://github.com/cznic/goyacc/blob/master/main.go. However, absent an upstream +// change to goyacc, this is the next best option. +// +// N.B: Parser pooling means that you CANNOT take references directly to parse stack variables (e.g. +// $$ = &$4) in sql.y rules. You must instead add an intermediate reference like so: +// showCollationFilterOpt := $4 +// $$ = &Show{Type: string($2), ShowCollationFilterOpt: &showCollationFilterOpt} +func yyParsePooled(yylex yyLexer) int { + // Being very particular about using the base type and not an interface type b/c we depend on + // the implementation to know how to reinitialize the parser. + var parser *yyParserImpl + + i := parserPool.Get() + if i != nil { + parser = i.(*yyParserImpl) + } else { + parser = yyNewParser().(*yyParserImpl) + } + + defer func() { + *parser = zeroParser + parserPool.Put(parser) + }() + return parser.Parse(yylex) +} + // Instructions for creating new types: If a type // needs to satisfy an interface, declare that function // along with that interface. This will help users @@ -51,7 +87,7 @@ import ( // error is ignored and the DDL is returned anyway. func Parse(sql string) (Statement, error) { tokenizer := NewStringTokenizer(sql) - if yyParse(tokenizer) != 0 { + if yyParsePooled(tokenizer) != 0 { if tokenizer.partialDDL != nil { log.Warningf("ignoring error parsing DDL '%s': %v", sql, tokenizer.LastError) tokenizer.ParseTree = tokenizer.partialDDL @@ -69,7 +105,7 @@ func Parse(sql string) (Statement, error) { // partially parsed DDL statements. func ParseStrictDDL(sql string) (Statement, error) { tokenizer := NewStringTokenizer(sql) - if yyParse(tokenizer) != 0 { + if yyParsePooled(tokenizer) != 0 { return nil, tokenizer.LastError } if tokenizer.ParseTree == nil { @@ -104,7 +140,7 @@ func parseNext(tokenizer *Tokenizer, strict bool) (Statement, error) { tokenizer.reset() tokenizer.multi = true - if yyParse(tokenizer) != 0 { + if yyParsePooled(tokenizer) != 0 { if tokenizer.partialDDL != nil && !strict { tokenizer.ParseTree = tokenizer.partialDDL return tokenizer.ParseTree, nil diff --git a/go/vt/sqlparser/normalizer_test.go b/go/vt/sqlparser/normalizer_test.go index edf012e561a..7aa24814d55 100644 --- a/go/vt/sqlparser/normalizer_test.go +++ b/go/vt/sqlparser/normalizer_test.go @@ -117,6 +117,16 @@ func TestNormalize(t *testing.T) { in: "update a set v1 = 0x1234", outstmt: "update a set v1 = 0x1234", outbv: map[string]*querypb.BindVariable{}, + }, { + // Bin value does not convert + in: "select * from t where v1 = b'11'", + outstmt: "select * from t where v1 = B'11'", + outbv: map[string]*querypb.BindVariable{}, + }, { + // Bin value does not convert for DMLs + in: "update a set v1 = b'11'", + outstmt: "update a set v1 = B'11'", + outbv: map[string]*querypb.BindVariable{}, }, { // Values up to len 256 will reuse. in: fmt.Sprintf("select * from t where v1 = '%256s' and v2 = '%256s'", "a", "a"), diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go index ab77ade7d89..c86abe01d98 100644 --- a/go/vt/sqlparser/parse_test.go +++ b/go/vt/sqlparser/parse_test.go @@ -19,7 +19,9 @@ package sqlparser import ( "bytes" "fmt" + "math/rand" "strings" + "sync" "testing" ) @@ -1356,6 +1358,36 @@ func TestValid(t *testing.T) { } } +// Ensure there is no corruption from using a pooled yyParserImpl in Parse. +func TestValidParallel(t *testing.T) { + parallelism := 100 + numIters := 1000 + + wg := sync.WaitGroup{} + wg.Add(parallelism) + for i := 0; i < parallelism; i++ { + go func() { + defer wg.Done() + for j := 0; j < numIters; j++ { + tcase := validSQL[rand.Intn(len(validSQL))] + if tcase.output == "" { + tcase.output = tcase.input + } + tree, err := Parse(tcase.input) + if err != nil { + t.Errorf("Parse(%q) err: %v, want nil", tcase.input, err) + continue + } + out := String(tree) + if out != tcase.output { + t.Errorf("Parse(%q) = %q, want: %q", tcase.input, out, tcase.output) + } + } + }() + } + wg.Wait() +} + func TestCaseSensitivity(t *testing.T) { validSQL := []struct { input string @@ -2162,8 +2194,36 @@ func TestErrors(t *testing.T) { // BenchmarkParse1-4 100000 16334 ns/op // BenchmarkParse2-4 30000 44121 ns/op +// Benchmark run on 9/3/18, comparing pooled parser performance. +// +// benchmark old ns/op new ns/op delta +// BenchmarkNormalize-4 2540 2533 -0.28% +// BenchmarkParse1-4 18269 13330 -27.03% +// BenchmarkParse2-4 46703 41255 -11.67% +// BenchmarkParse2Parallel-4 22246 20707 -6.92% +// BenchmarkParse3-4 4064743 4083135 +0.45% +// +// benchmark old allocs new allocs delta +// BenchmarkNormalize-4 27 27 +0.00% +// BenchmarkParse1-4 75 74 -1.33% +// BenchmarkParse2-4 264 263 -0.38% +// BenchmarkParse2Parallel-4 176 175 -0.57% +// BenchmarkParse3-4 360 361 +0.28% +// +// benchmark old bytes new bytes delta +// BenchmarkNormalize-4 821 821 +0.00% +// BenchmarkParse1-4 22776 2307 -89.87% +// BenchmarkParse2-4 28352 7881 -72.20% +// BenchmarkParse2Parallel-4 25712 5235 -79.64% +// BenchmarkParse3-4 6352082 6336307 -0.25% + +const ( + sql1 = "select 'abcd', 20, 30.0, eid from a where 1=eid and name='3'" + sql2 = "select aaaa, bbb, ccc, ddd, eeee, ffff, gggg, hhhh, iiii from tttt, ttt1, ttt3 where aaaa = bbbb and bbbb = cccc and dddd+1 = eeee group by fff, gggg having hhhh = iiii and iiii = jjjj order by kkkk, llll limit 3, 4" +) + func BenchmarkParse1(b *testing.B) { - sql := "select 'abcd', 20, 30.0, eid from a where 1=eid and name='3'" + sql := sql1 for i := 0; i < b.N; i++ { ast, err := Parse(sql) if err != nil { @@ -2174,7 +2234,7 @@ func BenchmarkParse1(b *testing.B) { } func BenchmarkParse2(b *testing.B) { - sql := "select aaaa, bbb, ccc, ddd, eeee, ffff, gggg, hhhh, iiii from tttt, ttt1, ttt3 where aaaa = bbbb and bbbb = cccc and dddd+1 = eeee group by fff, gggg having hhhh = iiii and iiii = jjjj order by kkkk, llll limit 3, 4" + sql := sql2 for i := 0; i < b.N; i++ { ast, err := Parse(sql) if err != nil { @@ -2184,6 +2244,19 @@ func BenchmarkParse2(b *testing.B) { } } +func BenchmarkParse2Parallel(b *testing.B) { + sql := sql2 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + ast, err := Parse(sql) + if err != nil { + b.Fatal(err) + } + _ = ast + } + }) +} + var benchQuery string func init() { diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go index 5747b67fc79..a3f59678552 100644 --- a/go/vt/sqlparser/sql.go +++ b/go/vt/sqlparser/sql.go @@ -4322,594 +4322,596 @@ yydefault: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:1521 { - yyVAL.statement = &Show{Type: string(yyDollar[2].bytes), ShowCollationFilterOpt: &yyDollar[4].expr} + // Cannot dereference $4 directly, or else the parser stackcannot be pooled. See yyParsePooled + showCollationFilterOpt := yyDollar[4].expr + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes), ShowCollationFilterOpt: &showCollationFilterOpt} } case 271: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1525 + //line sql.y:1527 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes), OnTable: yyDollar[4].tableName} } case 272: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1529 + //line sql.y:1531 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 273: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1533 + //line sql.y:1535 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 274: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1537 + //line sql.y:1539 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 275: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1541 + //line sql.y:1543 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 276: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1545 + //line sql.y:1547 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 277: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1555 + //line sql.y:1557 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } case 278: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1561 + //line sql.y:1563 { yyVAL.str = string(yyDollar[1].bytes) } case 279: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1565 + //line sql.y:1567 { yyVAL.str = string(yyDollar[1].bytes) } case 280: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1571 + //line sql.y:1573 { yyVAL.str = "" } case 281: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1575 + //line sql.y:1577 { yyVAL.str = "full " } case 282: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1581 + //line sql.y:1583 { yyVAL.str = "" } case 283: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1585 + //line sql.y:1587 { yyVAL.str = yyDollar[2].tableIdent.v } case 284: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1589 + //line sql.y:1591 { yyVAL.str = yyDollar[2].tableIdent.v } case 285: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1595 + //line sql.y:1597 { yyVAL.showFilter = nil } case 286: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1599 + //line sql.y:1601 { yyVAL.showFilter = &ShowFilter{Like: string(yyDollar[2].bytes)} } case 287: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1603 + //line sql.y:1605 { yyVAL.showFilter = &ShowFilter{Filter: yyDollar[2].expr} } case 288: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1609 + //line sql.y:1611 { yyVAL.str = "" } case 289: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1613 + //line sql.y:1615 { yyVAL.str = SessionStr } case 290: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1617 + //line sql.y:1619 { yyVAL.str = GlobalStr } case 291: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1623 + //line sql.y:1625 { yyVAL.statement = &Use{DBName: yyDollar[2].tableIdent} } case 292: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1627 + //line sql.y:1629 { yyVAL.statement = &Use{DBName: TableIdent{v: ""}} } case 293: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1633 + //line sql.y:1635 { yyVAL.statement = &Begin{} } case 294: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1637 + //line sql.y:1639 { yyVAL.statement = &Begin{} } case 295: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1643 + //line sql.y:1645 { yyVAL.statement = &Commit{} } case 296: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1649 + //line sql.y:1651 { yyVAL.statement = &Rollback{} } case 297: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1655 + //line sql.y:1657 { yyVAL.statement = &OtherRead{} } case 298: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1659 + //line sql.y:1661 { yyVAL.statement = &OtherRead{} } case 299: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1663 + //line sql.y:1665 { yyVAL.statement = &OtherRead{} } case 300: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1667 + //line sql.y:1669 { yyVAL.statement = &OtherAdmin{} } case 301: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1671 + //line sql.y:1673 { yyVAL.statement = &OtherAdmin{} } case 302: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1675 + //line sql.y:1677 { yyVAL.statement = &OtherAdmin{} } case 303: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1679 + //line sql.y:1681 { yyVAL.statement = &OtherAdmin{} } case 304: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1684 + //line sql.y:1686 { setAllowComments(yylex, true) } case 305: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1688 + //line sql.y:1690 { yyVAL.bytes2 = yyDollar[2].bytes2 setAllowComments(yylex, false) } case 306: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1694 + //line sql.y:1696 { yyVAL.bytes2 = nil } case 307: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1698 + //line sql.y:1700 { yyVAL.bytes2 = append(yyDollar[1].bytes2, yyDollar[2].bytes) } case 308: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1704 + //line sql.y:1706 { yyVAL.str = UnionStr } case 309: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1708 + //line sql.y:1710 { yyVAL.str = UnionAllStr } case 310: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1712 + //line sql.y:1714 { yyVAL.str = UnionDistinctStr } case 311: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1717 + //line sql.y:1719 { yyVAL.str = "" } case 312: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1721 + //line sql.y:1723 { yyVAL.str = SQLNoCacheStr } case 313: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1725 + //line sql.y:1727 { yyVAL.str = SQLCacheStr } case 314: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1730 + //line sql.y:1732 { yyVAL.str = "" } case 315: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1734 + //line sql.y:1736 { yyVAL.str = DistinctStr } case 316: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1739 + //line sql.y:1741 { yyVAL.str = "" } case 317: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1743 + //line sql.y:1745 { yyVAL.str = StraightJoinHint } case 318: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1748 + //line sql.y:1750 { yyVAL.selectExprs = nil } case 319: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1752 + //line sql.y:1754 { yyVAL.selectExprs = yyDollar[1].selectExprs } case 320: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1758 + //line sql.y:1760 { yyVAL.selectExprs = SelectExprs{yyDollar[1].selectExpr} } case 321: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1762 + //line sql.y:1764 { yyVAL.selectExprs = append(yyVAL.selectExprs, yyDollar[3].selectExpr) } case 322: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1768 + //line sql.y:1770 { yyVAL.selectExpr = &StarExpr{} } case 323: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1772 + //line sql.y:1774 { yyVAL.selectExpr = &AliasedExpr{Expr: yyDollar[1].expr, As: yyDollar[2].colIdent} } case 324: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1776 + //line sql.y:1778 { yyVAL.selectExpr = &StarExpr{TableName: TableName{Name: yyDollar[1].tableIdent}} } case 325: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:1780 + //line sql.y:1782 { yyVAL.selectExpr = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}} } case 326: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1785 + //line sql.y:1787 { yyVAL.colIdent = ColIdent{} } case 327: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1789 + //line sql.y:1791 { yyVAL.colIdent = yyDollar[1].colIdent } case 328: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1793 + //line sql.y:1795 { yyVAL.colIdent = yyDollar[2].colIdent } case 330: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1800 + //line sql.y:1802 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } case 331: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1805 + //line sql.y:1807 { yyVAL.tableExprs = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewTableIdent("dual")}}} } case 332: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1809 + //line sql.y:1811 { yyVAL.tableExprs = yyDollar[2].tableExprs } case 333: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1815 + //line sql.y:1817 { yyVAL.tableExprs = TableExprs{yyDollar[1].tableExpr} } case 334: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1819 + //line sql.y:1821 { yyVAL.tableExprs = append(yyVAL.tableExprs, yyDollar[3].tableExpr) } case 337: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1829 + //line sql.y:1831 { yyVAL.tableExpr = yyDollar[1].aliasedTableName } case 338: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1833 + //line sql.y:1835 { yyVAL.tableExpr = &AliasedTableExpr{Expr: yyDollar[1].subquery, As: yyDollar[3].tableIdent} } case 339: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1837 + //line sql.y:1839 { yyVAL.tableExpr = &ParenTableExpr{Exprs: yyDollar[2].tableExprs} } case 340: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1843 + //line sql.y:1845 { yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].tableIdent, Hints: yyDollar[3].indexHints} } case 341: yyDollar = yyS[yypt-7 : yypt+1] - //line sql.y:1847 + //line sql.y:1849 { yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, Partitions: yyDollar[4].partitions, As: yyDollar[6].tableIdent, Hints: yyDollar[7].indexHints} } case 342: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1853 + //line sql.y:1855 { yyVAL.columns = Columns{yyDollar[1].colIdent} } case 343: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1857 + //line sql.y:1859 { yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) } case 344: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1863 + //line sql.y:1865 { yyVAL.partitions = Partitions{yyDollar[1].colIdent} } case 345: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1867 + //line sql.y:1869 { yyVAL.partitions = append(yyVAL.partitions, yyDollar[3].colIdent) } case 346: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1880 + //line sql.y:1882 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} } case 347: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1884 + //line sql.y:1886 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} } case 348: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1888 + //line sql.y:1890 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} } case 349: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1892 + //line sql.y:1894 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr} } case 350: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1898 + //line sql.y:1900 { yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} } case 351: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1900 + //line sql.y:1902 { yyVAL.joinCondition = JoinCondition{Using: yyDollar[3].columns} } case 352: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1904 + //line sql.y:1906 { yyVAL.joinCondition = JoinCondition{} } case 353: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1906 + //line sql.y:1908 { yyVAL.joinCondition = yyDollar[1].joinCondition } case 354: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1910 + //line sql.y:1912 { yyVAL.joinCondition = JoinCondition{} } case 355: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1912 + //line sql.y:1914 { yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} } case 356: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1915 + //line sql.y:1917 { yyVAL.empty = struct{}{} } case 357: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1917 + //line sql.y:1919 { yyVAL.empty = struct{}{} } case 358: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1920 + //line sql.y:1922 { yyVAL.tableIdent = NewTableIdent("") } case 359: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1924 + //line sql.y:1926 { yyVAL.tableIdent = yyDollar[1].tableIdent } case 360: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1928 + //line sql.y:1930 { yyVAL.tableIdent = yyDollar[2].tableIdent } case 362: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1935 + //line sql.y:1937 { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) } case 363: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1941 + //line sql.y:1943 { yyVAL.str = JoinStr } case 364: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1945 + //line sql.y:1947 { yyVAL.str = JoinStr } case 365: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1949 + //line sql.y:1951 { yyVAL.str = JoinStr } case 366: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1955 + //line sql.y:1957 { yyVAL.str = StraightJoinStr } case 367: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1961 + //line sql.y:1963 { yyVAL.str = LeftJoinStr } case 368: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1965 + //line sql.y:1967 { yyVAL.str = LeftJoinStr } case 369: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1969 + //line sql.y:1971 { yyVAL.str = RightJoinStr } case 370: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1973 + //line sql.y:1975 { yyVAL.str = RightJoinStr } case 371: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1979 + //line sql.y:1981 { yyVAL.str = NaturalJoinStr } case 372: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1983 + //line sql.y:1985 { if yyDollar[2].str == LeftJoinStr { yyVAL.str = NaturalLeftJoinStr @@ -4919,457 +4921,457 @@ yydefault: } case 373: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1993 + //line sql.y:1995 { yyVAL.tableName = yyDollar[2].tableName } case 374: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1997 + //line sql.y:1999 { yyVAL.tableName = yyDollar[1].tableName } case 375: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2003 + //line sql.y:2005 { yyVAL.tableName = TableName{Name: yyDollar[1].tableIdent} } case 376: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2007 + //line sql.y:2009 { yyVAL.tableName = TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent} } case 377: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2012 + //line sql.y:2014 { yyVAL.indexHints = nil } case 378: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2016 + //line sql.y:2018 { yyVAL.indexHints = &IndexHints{Type: UseStr, Indexes: yyDollar[4].columns} } case 379: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2020 + //line sql.y:2022 { yyVAL.indexHints = &IndexHints{Type: IgnoreStr, Indexes: yyDollar[4].columns} } case 380: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2024 + //line sql.y:2026 { yyVAL.indexHints = &IndexHints{Type: ForceStr, Indexes: yyDollar[4].columns} } case 381: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2029 + //line sql.y:2031 { yyVAL.expr = nil } case 382: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2033 + //line sql.y:2035 { yyVAL.expr = yyDollar[2].expr } case 383: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2039 + //line sql.y:2041 { yyVAL.expr = yyDollar[1].expr } case 384: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2043 + //line sql.y:2045 { yyVAL.expr = &AndExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} } case 385: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2047 + //line sql.y:2049 { yyVAL.expr = &OrExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} } case 386: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2051 + //line sql.y:2053 { yyVAL.expr = &NotExpr{Expr: yyDollar[2].expr} } case 387: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2055 + //line sql.y:2057 { yyVAL.expr = &IsExpr{Operator: yyDollar[3].str, Expr: yyDollar[1].expr} } case 388: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2059 + //line sql.y:2061 { yyVAL.expr = yyDollar[1].expr } case 389: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2063 + //line sql.y:2065 { yyVAL.expr = &Default{ColName: yyDollar[2].str} } case 390: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2069 + //line sql.y:2071 { yyVAL.str = "" } case 391: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2073 + //line sql.y:2075 { yyVAL.str = string(yyDollar[2].bytes) } case 392: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2079 + //line sql.y:2081 { yyVAL.boolVal = BoolVal(true) } case 393: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2083 + //line sql.y:2085 { yyVAL.boolVal = BoolVal(false) } case 394: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2089 + //line sql.y:2091 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: yyDollar[2].str, Right: yyDollar[3].expr} } case 395: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2093 + //line sql.y:2095 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: InStr, Right: yyDollar[3].colTuple} } case 396: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2097 + //line sql.y:2099 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotInStr, Right: yyDollar[4].colTuple} } case 397: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2101 + //line sql.y:2103 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: LikeStr, Right: yyDollar[3].expr, Escape: yyDollar[4].expr} } case 398: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2105 + //line sql.y:2107 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotLikeStr, Right: yyDollar[4].expr, Escape: yyDollar[5].expr} } case 399: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2109 + //line sql.y:2111 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: RegexpStr, Right: yyDollar[3].expr} } case 400: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2113 + //line sql.y:2115 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotRegexpStr, Right: yyDollar[4].expr} } case 401: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2117 + //line sql.y:2119 { yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: BetweenStr, From: yyDollar[3].expr, To: yyDollar[5].expr} } case 402: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:2121 + //line sql.y:2123 { yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: NotBetweenStr, From: yyDollar[4].expr, To: yyDollar[6].expr} } case 403: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2125 + //line sql.y:2127 { yyVAL.expr = &ExistsExpr{Subquery: yyDollar[2].subquery} } case 404: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2131 + //line sql.y:2133 { yyVAL.str = IsNullStr } case 405: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2135 + //line sql.y:2137 { yyVAL.str = IsNotNullStr } case 406: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2139 + //line sql.y:2141 { yyVAL.str = IsTrueStr } case 407: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2143 + //line sql.y:2145 { yyVAL.str = IsNotTrueStr } case 408: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2147 + //line sql.y:2149 { yyVAL.str = IsFalseStr } case 409: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2151 + //line sql.y:2153 { yyVAL.str = IsNotFalseStr } case 410: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2157 + //line sql.y:2159 { yyVAL.str = EqualStr } case 411: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2161 + //line sql.y:2163 { yyVAL.str = LessThanStr } case 412: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2165 + //line sql.y:2167 { yyVAL.str = GreaterThanStr } case 413: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2169 + //line sql.y:2171 { yyVAL.str = LessEqualStr } case 414: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2173 + //line sql.y:2175 { yyVAL.str = GreaterEqualStr } case 415: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2177 + //line sql.y:2179 { yyVAL.str = NotEqualStr } case 416: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2181 + //line sql.y:2183 { yyVAL.str = NullSafeEqualStr } case 417: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2186 + //line sql.y:2188 { yyVAL.expr = nil } case 418: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2190 + //line sql.y:2192 { yyVAL.expr = yyDollar[2].expr } case 419: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2196 + //line sql.y:2198 { yyVAL.colTuple = yyDollar[1].valTuple } case 420: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2200 + //line sql.y:2202 { yyVAL.colTuple = yyDollar[1].subquery } case 421: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2204 + //line sql.y:2206 { yyVAL.colTuple = ListArg(yyDollar[1].bytes) } case 422: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2210 + //line sql.y:2212 { yyVAL.subquery = &Subquery{yyDollar[2].selStmt} } case 423: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2216 + //line sql.y:2218 { yyVAL.exprs = Exprs{yyDollar[1].expr} } case 424: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2220 + //line sql.y:2222 { yyVAL.exprs = append(yyDollar[1].exprs, yyDollar[3].expr) } case 425: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2226 + //line sql.y:2228 { yyVAL.expr = yyDollar[1].expr } case 426: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2230 + //line sql.y:2232 { yyVAL.expr = yyDollar[1].boolVal } case 427: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2234 + //line sql.y:2236 { yyVAL.expr = yyDollar[1].colName } case 428: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2238 + //line sql.y:2240 { yyVAL.expr = yyDollar[1].expr } case 429: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2242 + //line sql.y:2244 { yyVAL.expr = yyDollar[1].subquery } case 430: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2246 + //line sql.y:2248 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitAndStr, Right: yyDollar[3].expr} } case 431: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2250 + //line sql.y:2252 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitOrStr, Right: yyDollar[3].expr} } case 432: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2254 + //line sql.y:2256 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitXorStr, Right: yyDollar[3].expr} } case 433: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2258 + //line sql.y:2260 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: PlusStr, Right: yyDollar[3].expr} } case 434: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2262 + //line sql.y:2264 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MinusStr, Right: yyDollar[3].expr} } case 435: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2266 + //line sql.y:2268 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MultStr, Right: yyDollar[3].expr} } case 436: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2270 + //line sql.y:2272 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: DivStr, Right: yyDollar[3].expr} } case 437: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2274 + //line sql.y:2276 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: IntDivStr, Right: yyDollar[3].expr} } case 438: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2278 + //line sql.y:2280 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} } case 439: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2282 + //line sql.y:2284 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} } case 440: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2286 + //line sql.y:2288 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftLeftStr, Right: yyDollar[3].expr} } case 441: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2290 + //line sql.y:2292 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftRightStr, Right: yyDollar[3].expr} } case 442: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2294 + //line sql.y:2296 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONExtractOp, Right: yyDollar[3].expr} } case 443: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2298 + //line sql.y:2300 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONUnquoteExtractOp, Right: yyDollar[3].expr} } case 444: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2302 + //line sql.y:2304 { yyVAL.expr = &CollateExpr{Expr: yyDollar[1].expr, Charset: yyDollar[3].str} } case 445: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2306 + //line sql.y:2308 { yyVAL.expr = &UnaryExpr{Operator: BinaryStr, Expr: yyDollar[2].expr} } case 446: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2310 + //line sql.y:2312 { yyVAL.expr = &UnaryExpr{Operator: UBinaryStr, Expr: yyDollar[2].expr} } case 447: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2314 + //line sql.y:2316 { yyVAL.expr = &UnaryExpr{Operator: Utf8mb4Str, Expr: yyDollar[2].expr} } case 448: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2318 + //line sql.y:2320 { if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal { yyVAL.expr = num @@ -5379,7 +5381,7 @@ yydefault: } case 449: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2326 + //line sql.y:2328 { if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal { // Handle double negative @@ -5395,19 +5397,19 @@ yydefault: } case 450: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2340 + //line sql.y:2342 { yyVAL.expr = &UnaryExpr{Operator: TildaStr, Expr: yyDollar[2].expr} } case 451: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2344 + //line sql.y:2346 { yyVAL.expr = &UnaryExpr{Operator: BangStr, Expr: yyDollar[2].expr} } case 452: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2348 + //line sql.y:2350 { // This rule prevents the usage of INTERVAL // as a function. If support is needed for that, @@ -5417,259 +5419,259 @@ yydefault: } case 457: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2366 + //line sql.y:2368 { yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Exprs: yyDollar[3].selectExprs} } case 458: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2370 + //line sql.y:2372 { yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Distinct: true, Exprs: yyDollar[4].selectExprs} } case 459: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:2374 + //line sql.y:2376 { yyVAL.expr = &FuncExpr{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].colIdent, Exprs: yyDollar[5].selectExprs} } case 460: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2384 + //line sql.y:2386 { yyVAL.expr = &FuncExpr{Name: NewColIdent("left"), Exprs: yyDollar[3].selectExprs} } case 461: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2388 + //line sql.y:2390 { yyVAL.expr = &FuncExpr{Name: NewColIdent("right"), Exprs: yyDollar[3].selectExprs} } case 462: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:2392 + //line sql.y:2394 { yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} } case 463: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:2396 + //line sql.y:2398 { yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} } case 464: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:2400 + //line sql.y:2402 { yyVAL.expr = &ConvertUsingExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].str} } case 465: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:2404 + //line sql.y:2406 { yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: nil} } case 466: yyDollar = yyS[yypt-8 : yypt+1] - //line sql.y:2408 + //line sql.y:2410 { yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} } case 467: yyDollar = yyS[yypt-8 : yypt+1] - //line sql.y:2412 + //line sql.y:2414 { yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} } case 468: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:2416 + //line sql.y:2418 { yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: nil} } case 469: yyDollar = yyS[yypt-8 : yypt+1] - //line sql.y:2420 + //line sql.y:2422 { yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} } case 470: yyDollar = yyS[yypt-8 : yypt+1] - //line sql.y:2424 + //line sql.y:2426 { yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} } case 471: yyDollar = yyS[yypt-9 : yypt+1] - //line sql.y:2428 + //line sql.y:2430 { yyVAL.expr = &MatchExpr{Columns: yyDollar[3].selectExprs, Expr: yyDollar[7].expr, Option: yyDollar[8].str} } case 472: yyDollar = yyS[yypt-7 : yypt+1] - //line sql.y:2432 + //line sql.y:2434 { yyVAL.expr = &GroupConcatExpr{Distinct: yyDollar[3].str, Exprs: yyDollar[4].selectExprs, OrderBy: yyDollar[5].orderBy, Separator: yyDollar[6].str} } case 473: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2436 + //line sql.y:2438 { yyVAL.expr = &CaseExpr{Expr: yyDollar[2].expr, Whens: yyDollar[3].whens, Else: yyDollar[4].expr} } case 474: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2440 + //line sql.y:2442 { yyVAL.expr = &ValuesFuncExpr{Name: yyDollar[3].colName} } case 475: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2450 + //line sql.y:2452 { yyVAL.expr = &FuncExpr{Name: NewColIdent("current_timestamp")} } case 476: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2454 + //line sql.y:2456 { yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_timestamp")} } case 477: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2458 + //line sql.y:2460 { yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_time")} } case 478: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2462 + //line sql.y:2464 { yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_date")} } case 479: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2467 + //line sql.y:2469 { yyVAL.expr = &FuncExpr{Name: NewColIdent("localtime")} } case 480: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2472 + //line sql.y:2474 { yyVAL.expr = &FuncExpr{Name: NewColIdent("localtimestamp")} } case 481: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2477 + //line sql.y:2479 { yyVAL.expr = &FuncExpr{Name: NewColIdent("current_date")} } case 482: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2482 + //line sql.y:2484 { yyVAL.expr = &FuncExpr{Name: NewColIdent("current_time")} } case 485: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2496 + //line sql.y:2498 { yyVAL.expr = &FuncExpr{Name: NewColIdent("if"), Exprs: yyDollar[3].selectExprs} } case 486: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2500 + //line sql.y:2502 { yyVAL.expr = &FuncExpr{Name: NewColIdent("database"), Exprs: yyDollar[3].selectExprs} } case 487: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2504 + //line sql.y:2506 { yyVAL.expr = &FuncExpr{Name: NewColIdent("mod"), Exprs: yyDollar[3].selectExprs} } case 488: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2508 + //line sql.y:2510 { yyVAL.expr = &FuncExpr{Name: NewColIdent("replace"), Exprs: yyDollar[3].selectExprs} } case 489: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2514 + //line sql.y:2516 { yyVAL.str = "" } case 490: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2518 + //line sql.y:2520 { yyVAL.str = BooleanModeStr } case 491: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2522 + //line sql.y:2524 { yyVAL.str = NaturalLanguageModeStr } case 492: yyDollar = yyS[yypt-7 : yypt+1] - //line sql.y:2526 + //line sql.y:2528 { yyVAL.str = NaturalLanguageModeWithQueryExpansionStr } case 493: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2530 + //line sql.y:2532 { yyVAL.str = QueryExpansionStr } case 494: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2536 + //line sql.y:2538 { yyVAL.str = string(yyDollar[1].bytes) } case 495: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2540 + //line sql.y:2542 { yyVAL.str = string(yyDollar[1].bytes) } case 496: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2546 + //line sql.y:2548 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} } case 497: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2550 + //line sql.y:2552 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: yyDollar[3].str, Operator: CharacterSetStr} } case 498: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2554 + //line sql.y:2556 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: string(yyDollar[3].bytes)} } case 499: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2558 + //line sql.y:2560 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } case 500: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2562 + //line sql.y:2564 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} } case 501: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2566 + //line sql.y:2568 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} yyVAL.convertType.Length = yyDollar[2].LengthScaleOption.Length @@ -5677,169 +5679,169 @@ yydefault: } case 502: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2572 + //line sql.y:2574 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } case 503: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2576 + //line sql.y:2578 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} } case 504: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2580 + //line sql.y:2582 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } case 505: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2584 + //line sql.y:2586 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } case 506: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2588 + //line sql.y:2590 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} } case 507: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2592 + //line sql.y:2594 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } case 508: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2596 + //line sql.y:2598 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } case 509: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2601 + //line sql.y:2603 { yyVAL.expr = nil } case 510: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2605 + //line sql.y:2607 { yyVAL.expr = yyDollar[1].expr } case 511: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2610 + //line sql.y:2612 { yyVAL.str = string("") } case 512: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2614 + //line sql.y:2616 { yyVAL.str = " separator '" + string(yyDollar[2].bytes) + "'" } case 513: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2620 + //line sql.y:2622 { yyVAL.whens = []*When{yyDollar[1].when} } case 514: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2624 + //line sql.y:2626 { yyVAL.whens = append(yyDollar[1].whens, yyDollar[2].when) } case 515: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2630 + //line sql.y:2632 { yyVAL.when = &When{Cond: yyDollar[2].expr, Val: yyDollar[4].expr} } case 516: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2635 + //line sql.y:2637 { yyVAL.expr = nil } case 517: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2639 + //line sql.y:2641 { yyVAL.expr = yyDollar[2].expr } case 518: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2645 + //line sql.y:2647 { yyVAL.colName = &ColName{Name: yyDollar[1].colIdent} } case 519: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2649 + //line sql.y:2651 { yyVAL.colName = &ColName{Qualifier: TableName{Name: yyDollar[1].tableIdent}, Name: yyDollar[3].colIdent} } case 520: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2653 + //line sql.y:2655 { yyVAL.colName = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}, Name: yyDollar[5].colIdent} } case 521: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2659 + //line sql.y:2661 { yyVAL.expr = NewStrVal(yyDollar[1].bytes) } case 522: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2663 + //line sql.y:2665 { yyVAL.expr = NewHexVal(yyDollar[1].bytes) } case 523: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2667 + //line sql.y:2669 { yyVAL.expr = NewBitVal(yyDollar[1].bytes) } case 524: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2671 + //line sql.y:2673 { yyVAL.expr = NewIntVal(yyDollar[1].bytes) } case 525: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2675 + //line sql.y:2677 { yyVAL.expr = NewFloatVal(yyDollar[1].bytes) } case 526: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2679 + //line sql.y:2681 { yyVAL.expr = NewHexNum(yyDollar[1].bytes) } case 527: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2683 + //line sql.y:2685 { yyVAL.expr = NewValArg(yyDollar[1].bytes) } case 528: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2687 + //line sql.y:2689 { yyVAL.expr = &NullVal{} } case 529: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2693 + //line sql.y:2695 { // TODO(sougou): Deprecate this construct. if yyDollar[1].colIdent.Lowered() != "value" { @@ -5850,237 +5852,237 @@ yydefault: } case 530: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2702 + //line sql.y:2704 { yyVAL.expr = NewIntVal(yyDollar[1].bytes) } case 531: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2706 + //line sql.y:2708 { yyVAL.expr = NewValArg(yyDollar[1].bytes) } case 532: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2711 + //line sql.y:2713 { yyVAL.exprs = nil } case 533: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2715 + //line sql.y:2717 { yyVAL.exprs = yyDollar[3].exprs } case 534: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2720 + //line sql.y:2722 { yyVAL.expr = nil } case 535: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2724 + //line sql.y:2726 { yyVAL.expr = yyDollar[2].expr } case 536: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2729 + //line sql.y:2731 { yyVAL.orderBy = nil } case 537: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2733 + //line sql.y:2735 { yyVAL.orderBy = yyDollar[3].orderBy } case 538: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2739 + //line sql.y:2741 { yyVAL.orderBy = OrderBy{yyDollar[1].order} } case 539: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2743 + //line sql.y:2745 { yyVAL.orderBy = append(yyDollar[1].orderBy, yyDollar[3].order) } case 540: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2749 + //line sql.y:2751 { yyVAL.order = &Order{Expr: yyDollar[1].expr, Direction: yyDollar[2].str} } case 541: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2754 + //line sql.y:2756 { yyVAL.str = AscScr } case 542: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2758 + //line sql.y:2760 { yyVAL.str = AscScr } case 543: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2762 + //line sql.y:2764 { yyVAL.str = DescScr } case 544: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2767 + //line sql.y:2769 { yyVAL.limit = nil } case 545: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2771 + //line sql.y:2773 { yyVAL.limit = &Limit{Rowcount: yyDollar[2].expr} } case 546: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2775 + //line sql.y:2777 { yyVAL.limit = &Limit{Offset: yyDollar[2].expr, Rowcount: yyDollar[4].expr} } case 547: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2779 + //line sql.y:2781 { yyVAL.limit = &Limit{Offset: yyDollar[4].expr, Rowcount: yyDollar[2].expr} } case 548: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2784 + //line sql.y:2786 { yyVAL.str = "" } case 549: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2788 + //line sql.y:2790 { yyVAL.str = ForUpdateStr } case 550: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2792 + //line sql.y:2794 { yyVAL.str = ShareModeStr } case 551: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2805 + //line sql.y:2807 { yyVAL.ins = &Insert{Rows: yyDollar[2].values} } case 552: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2809 + //line sql.y:2811 { yyVAL.ins = &Insert{Rows: yyDollar[1].selStmt} } case 553: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2813 + //line sql.y:2815 { // Drop the redundant parenthesis. yyVAL.ins = &Insert{Rows: yyDollar[2].selStmt} } case 554: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2818 + //line sql.y:2820 { yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].values} } case 555: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2822 + //line sql.y:2824 { yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[4].selStmt} } case 556: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:2826 + //line sql.y:2828 { // Drop the redundant parenthesis. yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].selStmt} } case 557: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2833 + //line sql.y:2835 { yyVAL.columns = Columns{yyDollar[1].colIdent} } case 558: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2837 + //line sql.y:2839 { yyVAL.columns = Columns{yyDollar[3].colIdent} } case 559: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2841 + //line sql.y:2843 { yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) } case 560: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2845 + //line sql.y:2847 { yyVAL.columns = append(yyVAL.columns, yyDollar[5].colIdent) } case 561: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2850 + //line sql.y:2852 { yyVAL.updateExprs = nil } case 562: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2854 + //line sql.y:2856 { yyVAL.updateExprs = yyDollar[5].updateExprs } case 563: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2860 + //line sql.y:2862 { yyVAL.values = Values{yyDollar[1].valTuple} } case 564: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2864 + //line sql.y:2866 { yyVAL.values = append(yyDollar[1].values, yyDollar[3].valTuple) } case 565: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2870 + //line sql.y:2872 { yyVAL.valTuple = yyDollar[1].valTuple } case 566: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2874 + //line sql.y:2876 { yyVAL.valTuple = ValTuple{} } case 567: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2880 + //line sql.y:2882 { yyVAL.valTuple = ValTuple(yyDollar[2].exprs) } case 568: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2886 + //line sql.y:2888 { if len(yyDollar[1].valTuple) == 1 { yyVAL.expr = &ParenExpr{yyDollar[1].valTuple[0]} @@ -6090,271 +6092,271 @@ yydefault: } case 569: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2896 + //line sql.y:2898 { yyVAL.updateExprs = UpdateExprs{yyDollar[1].updateExpr} } case 570: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2900 + //line sql.y:2902 { yyVAL.updateExprs = append(yyDollar[1].updateExprs, yyDollar[3].updateExpr) } case 571: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2906 + //line sql.y:2908 { yyVAL.updateExpr = &UpdateExpr{Name: yyDollar[1].colName, Expr: yyDollar[3].expr} } case 572: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2912 + //line sql.y:2914 { yyVAL.setExprs = SetExprs{yyDollar[1].setExpr} } case 573: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2916 + //line sql.y:2918 { yyVAL.setExprs = append(yyDollar[1].setExprs, yyDollar[3].setExpr) } case 574: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2922 + //line sql.y:2924 { yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: NewStrVal([]byte("on"))} } case 575: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2926 + //line sql.y:2928 { yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: yyDollar[3].expr} } case 576: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2930 + //line sql.y:2932 { yyVAL.setExpr = &SetExpr{Name: NewColIdent(string(yyDollar[1].bytes)), Expr: yyDollar[2].expr} } case 578: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2937 + //line sql.y:2939 { yyVAL.bytes = []byte("charset") } case 580: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2944 + //line sql.y:2946 { yyVAL.expr = NewStrVal([]byte(yyDollar[1].colIdent.String())) } case 581: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2948 + //line sql.y:2950 { yyVAL.expr = NewStrVal(yyDollar[1].bytes) } case 582: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2952 + //line sql.y:2954 { yyVAL.expr = &Default{} } case 585: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2961 + //line sql.y:2963 { yyVAL.byt = 0 } case 586: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2963 + //line sql.y:2965 { yyVAL.byt = 1 } case 587: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2966 + //line sql.y:2968 { yyVAL.empty = struct{}{} } case 588: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2968 + //line sql.y:2970 { yyVAL.empty = struct{}{} } case 589: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2971 + //line sql.y:2973 { yyVAL.str = "" } case 590: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2973 + //line sql.y:2975 { yyVAL.str = IgnoreStr } case 591: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2977 + //line sql.y:2979 { yyVAL.empty = struct{}{} } case 592: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2979 + //line sql.y:2981 { yyVAL.empty = struct{}{} } case 593: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2981 + //line sql.y:2983 { yyVAL.empty = struct{}{} } case 594: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2983 + //line sql.y:2985 { yyVAL.empty = struct{}{} } case 595: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2985 + //line sql.y:2987 { yyVAL.empty = struct{}{} } case 596: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2987 + //line sql.y:2989 { yyVAL.empty = struct{}{} } case 597: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2989 + //line sql.y:2991 { yyVAL.empty = struct{}{} } case 598: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2991 + //line sql.y:2993 { yyVAL.empty = struct{}{} } case 599: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2993 + //line sql.y:2995 { yyVAL.empty = struct{}{} } case 600: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2995 + //line sql.y:2997 { yyVAL.empty = struct{}{} } case 601: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2998 + //line sql.y:3000 { yyVAL.empty = struct{}{} } case 602: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3000 + //line sql.y:3002 { yyVAL.empty = struct{}{} } case 603: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3002 + //line sql.y:3004 { yyVAL.empty = struct{}{} } case 604: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3006 + //line sql.y:3008 { yyVAL.empty = struct{}{} } case 605: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3008 + //line sql.y:3010 { yyVAL.empty = struct{}{} } case 606: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:3011 + //line sql.y:3013 { yyVAL.empty = struct{}{} } case 607: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3013 + //line sql.y:3015 { yyVAL.empty = struct{}{} } case 608: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3015 + //line sql.y:3017 { yyVAL.empty = struct{}{} } case 609: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:3018 + //line sql.y:3020 { yyVAL.colIdent = ColIdent{} } case 610: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:3020 + //line sql.y:3022 { yyVAL.colIdent = yyDollar[2].colIdent } case 611: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3024 + //line sql.y:3026 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } case 612: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3028 + //line sql.y:3030 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } case 614: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3035 + //line sql.y:3037 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } case 615: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3041 + //line sql.y:3043 { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) } case 616: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3045 + //line sql.y:3047 { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) } case 618: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3052 + //line sql.y:3054 { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) } case 818: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3277 + //line sql.y:3279 { if incNesting(yylex) { yylex.Error("max nesting level reached") @@ -6363,31 +6365,31 @@ yydefault: } case 819: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3286 + //line sql.y:3288 { decNesting(yylex) } case 820: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:3291 + //line sql.y:3293 { forceEOF(yylex) } case 821: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:3296 + //line sql.y:3298 { forceEOF(yylex) } case 822: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3300 + //line sql.y:3302 { forceEOF(yylex) } case 823: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:3304 + //line sql.y:3306 { forceEOF(yylex) } diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y index 4e44a213e65..9914eb5b808 100644 --- a/go/vt/sqlparser/sql.y +++ b/go/vt/sqlparser/sql.y @@ -1519,7 +1519,9 @@ show_statement: } | SHOW COLLATION WHERE expression { - $$ = &Show{Type: string($2), ShowCollationFilterOpt: &$4} + // Cannot dereference $4 directly, or else the parser stackcannot be pooled. See yyParsePooled + showCollationFilterOpt := $4 + $$ = &Show{Type: string($2), ShowCollationFilterOpt: &showCollationFilterOpt} } | SHOW VINDEXES ON table_name { diff --git a/go/vt/vtctld/api.go b/go/vt/vtctld/api.go index 645a14bec31..7e67c965fcc 100644 --- a/go/vt/vtctld/api.go +++ b/go/vt/vtctld/api.go @@ -454,8 +454,7 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository, re logstream := logutil.NewMemoryLogger() wr := wrangler.New(logstream, ts, tmClient) - // TODO(enisoc): Context for run command should be request-scoped. - err := vtctl.RunCommand(ctx, wr, args) + err := vtctl.RunCommand(r.Context(), wr, args) if err != nil { resp.Error = err.Error() } diff --git a/go/vt/vtgate/engine/insert.go b/go/vt/vtgate/engine/insert.go index 6a6e2d4f014..80696fcc2d5 100644 --- a/go/vt/vtgate/engine/insert.go +++ b/go/vt/vtgate/engine/insert.go @@ -451,9 +451,6 @@ func (ins *Insert) processPrimary(vcursor VCursor, vindexKeys [][]sqltypes.Value func (ins *Insert) processOwned(vcursor VCursor, vindexColumnsKeys [][]sqltypes.Value, colVindex *vindexes.ColumnVindex, bv map[string]*querypb.BindVariable, ksids [][]byte) error { for rowNum, rowColumnKeys := range vindexColumnsKeys { for colIdx, vindexKey := range rowColumnKeys { - if vindexKey.IsNull() { - return fmt.Errorf("value must be supplied for column %v", colVindex.Columns[colIdx]) - } col := colVindex.Columns[colIdx] bv[insertVarName(col, rowNum)] = sqltypes.ValueBindVariable(vindexKey) } @@ -476,9 +473,6 @@ func (ins *Insert) processOwnedIgnore(vcursor VCursor, vindexColumnsKeys [][]sql createKsids = append(createKsids, ksids[rowNum]) for colIdx, vindexKey := range rowColumnKeys { - if vindexKey.IsNull() { - return fmt.Errorf("value must be supplied for column %v", colVindex.Columns) - } rowKeys = append(rowKeys, vindexKey) col := colVindex.Columns[colIdx] bv[insertVarName(col, rowNum)] = sqltypes.ValueBindVariable(vindexKey) diff --git a/go/vt/vtgate/engine/insert_test.go b/go/vt/vtgate/engine/insert_test.go index 2ebf3f63e3a..5353588c73d 100644 --- a/go/vt/vtgate/engine/insert_test.go +++ b/go/vt/vtgate/engine/insert_test.go @@ -560,7 +560,7 @@ func TestInsertShardedOwned(t *testing.T) { }) } -func TestInsertShardedOwnedFail(t *testing.T) { +func TestInsertShardedOwnedWithNull(t *testing.T) { invschema := &vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "sharded": { @@ -626,11 +626,20 @@ func TestInsertShardedOwnedFail(t *testing.T) { } vc := &loggingVCursor{ - shards: []string{"-20", "20-"}, + shards: []string{"-20", "20-"}, + shardForKsid: []string{"20-", "-20", "20-"}, } - // No reverse map available for lookup. So, it will fail. _, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "execInsertSharded: getInsertShardedRoute: value must be supplied for column c3") + if err != nil { + t.Fatal(err) + } + vc.ExpectLog(t, []string{ + `Execute insert into lkp1(from, toc) values(:from0, :toc0) from0: toc0: type:VARBINARY ` + + `value:"\026k@\264J\272K\326" true`, + `ResolveDestinations sharded [value:"0" ] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, + `ExecuteMultiShard sharded.20-: prefix mid1 suffix /* vtgate:: keyspace_id:166b40b44aba4bd6 */ ` + + `{_c30: _id0: type:INT64 value:"1" } true true`, + }) } func TestInsertShardedIgnoreOwned(t *testing.T) { @@ -824,7 +833,7 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { }) } -func TestInsertShardedIgnoreOwnedFail(t *testing.T) { +func TestInsertShardedIgnoreOwnedWithNull(t *testing.T) { invschema := &vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "sharded": { @@ -889,11 +898,35 @@ func TestInsertShardedIgnoreOwnedFail(t *testing.T) { Suffix: " suffix", } + ksid0 := sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "to", + "varbinary", + ), + "\x00", + ) + //noresult := &sqltypes.Result{} vc := &loggingVCursor{ - shards: []string{"-20", "20-"}, + shards: []string{"-20", "20-"}, + shardForKsid: []string{"-20", "20-"}, + results: []*sqltypes.Result{ + ksid0, + ksid0, + ksid0, + }, } _, err = ins.Execute(vc, map[string]*querypb.BindVariable{}, false) - expectError(t, "Execute", err, "execInsertSharded: getInsertShardedRoute: value must be supplied for column [c3]") + if err != nil { + t.Fatal(err) + } + vc.ExpectLog(t, []string{ + `Execute insert ignore into lkp1(from, toc) values(:from0, :toc0) from0: toc0: type:VARBINARY ` + + `value:"\026k@\264J\272K\326" true`, + `Execute select from from lkp1 where from = :from and toc = :toc from: toc: type:VARBINARY value:"\026k@\264J\272K\326" true`, + `ResolveDestinations sharded [value:"0" ] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, + `ExecuteMultiShard sharded.-20: prefix mid1 suffix /* vtgate:: keyspace_id:166b40b44aba4bd6 */ ` + + `{_c30: _id0: type:INT64 value:"1" } true true`, + }) } func TestInsertShardedUnownedVerify(t *testing.T) { diff --git a/go/vt/vtgate/planbuilder/update.go b/go/vt/vtgate/planbuilder/update.go index bbc870a176b..33c4a366d4b 100644 --- a/go/vt/vtgate/planbuilder/update.go +++ b/go/vt/vtgate/planbuilder/update.go @@ -172,7 +172,7 @@ func generateUpdateSubquery(upd *sqlparser.Update, table *vindexes.Table) string // it's holding. At the moment it only supports: StrVal, HexVal, IntVal, ValArg. // If a complex expression is provided (e.g set name = name + 1), the update will be rejected. func extractValueFromUpdate(upd *sqlparser.UpdateExpr) (pv sqltypes.PlanValue, err error) { - if !sqlparser.IsValue(upd.Expr) { + if !sqlparser.IsValue(upd.Expr) && !sqlparser.IsNull(upd.Expr) { err := vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: Only values are supported. Invalid update on column: %v", upd.Name.Name) return sqltypes.PlanValue{}, err } diff --git a/go/vt/vtgate/vindexes/lookup_hash_test.go b/go/vt/vtgate/vindexes/lookup_hash_test.go index ee8606704af..7cd9692d3e2 100644 --- a/go/vt/vtgate/vindexes/lookup_hash_test.go +++ b/go/vt/vtgate/vindexes/lookup_hash_test.go @@ -154,6 +154,39 @@ func TestLookupHashMapAbsent(t *testing.T) { } } +func TestLookupHashMapNull(t *testing.T) { + lookuphash := createLookup(t, "lookup_hash", false) + vc := &vcursor{numRows: 1} + + got, err := lookuphash.Map(vc, []sqltypes.Value{sqltypes.NULL}) + if err != nil { + t.Error(err) + } + want := []key.Destination{ + key.DestinationKeyspaceIDs([][]byte{ + []byte("\x16k@\xb4J\xbaK\xd6"), + }), + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Map(): %#v, want %+v", got, want) + } + + // writeOnly true should return full keyranges. + lookuphash = createLookup(t, "lookup_hash", true) + got, err = lookuphash.Map(vc, []sqltypes.Value{sqltypes.NULL}) + if err != nil { + t.Error(err) + } + want = []key.Destination{ + key.DestinationKeyRange{ + KeyRange: &topodatapb.KeyRange{}, + }, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Map(): %#v, want %+v", got, want) + } +} + func TestLookupHashVerify(t *testing.T) { lookuphash := createLookup(t, "lookup_hash", false) vc := &vcursor{numRows: 1} @@ -216,6 +249,15 @@ func TestLookupHashCreate(t *testing.T) { t.Errorf("vc.queries length: %v, want %v", got, want) } + vc.queries = nil + err = lookuphash.(Lookup).Create(vc, [][]sqltypes.Value{{sqltypes.NULL}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) + if err != nil { + t.Error(err) + } + if got, want := len(vc.queries), 1; got != want { + t.Errorf("vc.queries length: %v, want %v", got, want) + } + err = lookuphash.(Lookup).Create(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("bogus")}, false /* ignoreMode */) want := "lookup.Create.vunhash: invalid keyspace id: 626f677573" if err == nil || err.Error() != want { @@ -235,6 +277,15 @@ func TestLookupHashDelete(t *testing.T) { t.Errorf("vc.queries length: %v, want %v", got, want) } + vc.queries = nil + err = lookuphash.(Lookup).Delete(vc, [][]sqltypes.Value{{sqltypes.NULL}}, []byte("\x16k@\xb4J\xbaK\xd6")) + if err != nil { + t.Error(err) + } + if got, want := len(vc.queries), 1; got != want { + t.Errorf("vc.queries length: %v, want %v", got, want) + } + err = lookuphash.(Lookup).Delete(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("bogus")) want := "lookup.Delete.vunhash: invalid keyspace id: 626f677573" if err == nil || err.Error() != want { @@ -253,4 +304,13 @@ func TestLookupHashUpdate(t *testing.T) { if got, want := len(vc.queries), 2; got != want { t.Errorf("vc.queries length: %v, want %v", got, want) } + + vc.queries = nil + err = lookuphash.(Lookup).Update(vc, []sqltypes.Value{sqltypes.NULL}, []byte("\x16k@\xb4J\xbaK\xd6"), []sqltypes.Value{sqltypes.NewInt64(2)}) + if err != nil { + t.Error(err) + } + if got, want := len(vc.queries), 2; got != want { + t.Errorf("vc.queries length: %v, want %v", got, want) + } } diff --git a/go/vt/vttablet/endtoend/queries_test.go b/go/vt/vttablet/endtoend/queries_test.go index f321de17dab..56021b831da 100644 --- a/go/vt/vttablet/endtoend/queries_test.go +++ b/go/vt/vttablet/endtoend/queries_test.go @@ -1679,7 +1679,7 @@ func TestQueries(t *testing.T) { Query: "insert into vitess_misc(id, b, d, dt, t) select 2, b, d, dt, t from vitess_misc", Rewritten: []string{ "select 2, b, d, dt, t from vitess_misc limit 10001", - "insert into vitess_misc(id, b, d, dt, t) values (2, '\x01', '2012-01-01', '2012-01-01 15:45:45', '15:45:45') /* _stream vitess_misc (id ) (2 )", + "insert into vitess_misc(id, b, d, dt, t) values (2, b'00000001', '2012-01-01', '2012-01-01 15:45:45', '15:45:45') /* _stream vitess_misc (id ) (2 )", }, }, framework.TestQuery("commit"), @@ -1798,8 +1798,11 @@ func TestQueries(t *testing.T) { } func TestBitDefault(t *testing.T) { + // Default values for bit fields that are PKs are not supported + // Does not make sense to use a bit field as PK client := framework.NewClient() + expectedError := "bit default value: Execute failed: could not create default row for insert without row values: cannot convert value BIT(\"\\x05\") to AST (CallerID: dev)" testCases := []framework.Testable{ &framework.MultiCase{ Name: "bit default value", @@ -1824,8 +1827,9 @@ func TestBitDefault(t *testing.T) { }, } for _, tcase := range testCases { - if err := tcase.Test("", client); err != nil { - t.Error(err) + err := tcase.Test("", client) + if err == nil || err.Error() != expectedError { + t.Errorf("TestBitDefault result: \n%q\nexpecting\n%q", err.Error(), expectedError) } } } diff --git a/test/resharding.py b/test/resharding.py index 00359621dd0..8d1379855f0 100755 --- a/test/resharding.py +++ b/test/resharding.py @@ -202,6 +202,14 @@ def _create_schema(self): parent_id bigint not null, primary key (parent_id, id), index by_msg (msg) +) Engine=InnoDB''' + create_table_bindata_template = '''create table %s( +custom_ksid_col ''' + t + ''' not null, +id bigint not null, +parent_id bigint not null, +msg bit(8), +primary key (parent_id, id), +index by_msg (msg) ) Engine=InnoDB''' create_view_template = ( 'create view %s' @@ -236,6 +244,10 @@ def _create_schema(self): '-sql=' + create_table_template % ('resharding2'), 'test_keyspace'], auto_log=True) + utils.run_vtctl(['ApplySchema', + '-sql=' + create_table_bindata_template % ('resharding3'), + 'test_keyspace'], + auto_log=True) utils.run_vtctl(['ApplySchema', '-sql=' + create_view_template % ('view1', 'resharding1'), 'test_keyspace'], @@ -259,6 +271,12 @@ def _insert_startup_values(self): 0x9000000000000000) self._insert_value(shard_1_master, 'resharding1', 3, 'msg3', 0xD000000000000000) + self._insert_value(shard_0_master, 'resharding3', 1, 'a', + 0x1000000000000000) + self._insert_value(shard_1_master, 'resharding3', 2, 'b', + 0x9000000000000000) + self._insert_value(shard_1_master, 'resharding3', 3, 'c', + 0xD000000000000000) if base_sharding.use_rbr: self._insert_value(shard_1_master, 'no_pk', 1, 'msg1', 0xA000000000000000) @@ -270,16 +288,22 @@ def _check_startup_values(self): # check first value is in the right shard for t in shard_2_tablets: self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000) + self._check_value(t, 'resharding3', 2, 'b', 0x9000000000000000) for t in shard_3_tablets: self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000, should_be_here=False) + self._check_value(t, 'resharding3', 2, 'b', 0x9000000000000000, + should_be_here=False) # check second value is in the right shard too for t in shard_2_tablets: self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000, should_be_here=False) + self._check_value(t, 'resharding3', 3, 'c', 0xD000000000000000, + should_be_here=False) for t in shard_3_tablets: self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000) + self._check_value(t, 'resharding3', 3, 'c', 0xD000000000000000) if base_sharding.use_rbr: for t in shard_2_tablets: @@ -314,6 +338,7 @@ def _exec_multi_shard_dmls(self): keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000] self._insert_multi_value(shard_1_master, 'resharding1', mids, msg_ids, keyspace_ids) + # This update targets two shards. self._exec_non_annotated_update(shard_1_master, 'resharding1', [10000011, 10000012], 'update1') @@ -326,12 +351,51 @@ def _exec_multi_shard_dmls(self): keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000] self._insert_multi_value(shard_1_master, 'resharding1', mids, msg_ids, keyspace_ids) + # This delete targets two shards. self._exec_non_annotated_delete(shard_1_master, 'resharding1', [10000014, 10000015]) + # This delete targets one shard. self._exec_non_annotated_delete(shard_1_master, 'resharding1', [10000016]) + # repeat DMLs for table with msg as bit(8) + mids = [10000001, 10000002, 10000003] + keyspace_ids = [0x9000000000000000, 0xD000000000000000, + 0xE000000000000000] + self._insert_multi_value(shard_1_master, 'resharding3', mids, + ['a','b','c'], keyspace_ids) + + mids = [10000004, 10000005] + keyspace_ids = [0xD000000000000000, 0xE000000000000000] + self._insert_multi_value(shard_1_master, 'resharding3', mids, + ['d', 'e'], keyspace_ids) + mids = [10000011, 10000012, 10000013] + keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000] + + self._insert_multi_value(shard_1_master, 'resharding3', mids, + ['k', 'l', 'm'], keyspace_ids) + + # This update targets two shards. + self._exec_non_annotated_update(shard_1_master, 'resharding3', + [10000011, 10000012], 'g') + + # This update targets one shard. + self._exec_non_annotated_update(shard_1_master, 'resharding3', + [10000013], 'h') + + mids = [10000014, 10000015, 10000016] + keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000] + self._insert_multi_value(shard_1_master, 'resharding3', mids, + ['n', 'o', 'p'], keyspace_ids) + + # This delete targets two shards. + self._exec_non_annotated_delete(shard_1_master, 'resharding3', + [10000014, 10000015]) + + # This delete targets one shard. + self._exec_non_annotated_delete(shard_1_master, 'resharding3', [10000016]) + def _check_multi_shard_values(self): self._check_multi_dbs( [shard_2_master, shard_2_replica1, shard_2_replica2], @@ -396,6 +460,70 @@ def _check_multi_shard_values(self): 'resharding1', 10000016, 'msg-id10000016', 0xF000000000000000, should_be_here=False) + # checks for bit(8) table + self._check_multi_dbs( + [shard_2_master, shard_2_replica1, shard_2_replica2], + 'resharding3', 10000001, 'a', 0x9000000000000000) + self._check_multi_dbs( + [shard_2_master, shard_2_replica1, shard_2_replica2], + 'resharding3', 10000002, 'b', 0xD000000000000000, + should_be_here=False) + self._check_multi_dbs( + [shard_2_master, shard_2_replica1, shard_2_replica2], + 'resharding3', 10000003, 'c', 0xE000000000000000, + should_be_here=False) + self._check_multi_dbs( + [shard_3_master, shard_3_replica], + 'resharding3', 10000001, 'a', 0x9000000000000000, + should_be_here=False) + self._check_multi_dbs( + [shard_3_master, shard_3_replica], + 'resharding3', 10000002, 'b', 0xD000000000000000) + self._check_multi_dbs( + [shard_3_master, shard_3_replica], + 'resharding3', 10000003, 'c', 0xE000000000000000) + + self._check_multi_dbs( + [shard_2_master, shard_2_replica1, shard_2_replica2], + 'resharding3', 10000004, 'd', 0xD000000000000000, + should_be_here=False) + self._check_multi_dbs( + [shard_2_master, shard_2_replica1, shard_2_replica2], + 'resharding3', 10000005, 'e', 0xE000000000000000, + should_be_here=False) + self._check_multi_dbs( + [shard_3_master, shard_3_replica], + 'resharding3', 10000004, 'd', 0xD000000000000000) + self._check_multi_dbs( + [shard_3_master, shard_3_replica], + 'resharding3', 10000005, 'e', 0xE000000000000000) + + self._check_multi_dbs( + [shard_2_master, shard_2_replica1, shard_2_replica2], + 'resharding3', 10000011, 'g', 0x9000000000000000) + self._check_multi_dbs( + [shard_3_master, shard_3_replica], + 'resharding3', 10000012, 'g', 0xD000000000000000) + self._check_multi_dbs( + [shard_3_master, shard_3_replica], + 'resharding3', 10000013, 'h', 0xE000000000000000) + + self._check_multi_dbs( + [shard_2_master, shard_2_replica1, shard_2_replica2, + shard_3_master, shard_3_replica], + 'resharding3', 10000014, 'n', 0x9000000000000000, + should_be_here=False) + self._check_multi_dbs( + [shard_2_master, shard_2_replica1, shard_2_replica2, + shard_3_master, shard_3_replica], + 'resharding3', 10000015, 'o', 0xD000000000000000, + should_be_here=False) + self._check_multi_dbs( + [shard_2_master, shard_2_replica1, shard_2_replica2, + shard_3_master, shard_3_replica], + 'resharding3', 10000016, 'p', 0xF000000000000000, + should_be_here=False) + # _check_multi_dbs checks the row in multiple dbs. def _check_multi_dbs(self, dblist, table, mid, msg, keyspace_id, should_be_here=True): @@ -774,11 +902,11 @@ def test_resharding(self): # are smaller. In the second shard, we submitted statements # that affect more than one keyspace id. These will result # in two queries with RBR. So the count there is higher. - self.check_running_binlog_player(shard_2_master, 4018, 2008) - self.check_running_binlog_player(shard_3_master, 4028, 2008) + self.check_running_binlog_player(shard_2_master, 4036, 2016) + self.check_running_binlog_player(shard_3_master, 4056, 2016) else: - self.check_running_binlog_player(shard_2_master, 4022, 2008) - self.check_running_binlog_player(shard_3_master, 4024, 2008) + self.check_running_binlog_player(shard_2_master, 4044, 2016) + self.check_running_binlog_player(shard_3_master, 4048, 2016) # start a thread to insert data into shard_1 in the background # with current time, and monitor the delay